cross_run.sh 1.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445
  1. metric="min_diff_filter"
  2. scenes="A,B,D,G,H,I"
  3. all_scenes="A,B,C,D,E,F,G,H,I"
  4. # file which contains model names we want to use for simulation
  5. file_path="results/models_comparisons.csv"
  6. stride=1
  7. for window in {"3","5","7","9"}; do
  8. echo python generate/generate_reconstructed_data.py --features ${metric} --params ${window},${window},${stride} --size 100,100 --scenes ${all_scenes}
  9. done
  10. for scene in {"A","B","D","G","H","I"}; do
  11. # remove current scene test from dataset
  12. s="${scenes//,${scene}}"
  13. s="${s//${scene},}"
  14. for zone in {10,11,12}; do
  15. for window in {"3","5","7","9"}; do
  16. for balancing in {0,1}; do
  17. OUTPUT_DATA_FILE="${metric}_nb_zones_${zone}_W${window}_S${stride}_balancing${balancing}_without_${scene}"
  18. OUTPUT_DATA_FILE_TEST="${metric}_nb_zones_${zone}_W${window}_S${stride}_balancing${balancing}_scene_${scene}"
  19. if grep -q "${OUTPUT_DATA_FILE}" "${file_path}"; then
  20. echo "SVD model ${OUTPUT_DATA_FILE} already generated"
  21. else
  22. #echo "Run computation for SVD model ${OUTPUT_DATA_FILE}"
  23. echo python generate/generate_dataset.py --output data/${OUTPUT_DATA_FILE_TEST} --features ${metric} --scenes ${scene} --params ${window},${window},${stride} --nb_zones ${zone} --random 1 --size 100,100
  24. echo python generate/generate_dataset.py --output data/${OUTPUT_DATA_FILE} --features ${metric} --scenes ${s} --params ${window},${window},${stride} --nb_zones ${zone} --random 1 --size 100,100
  25. echo python train_model.py --data data/${OUTPUT_DATA_FILE} --output ${OUTPUT_DATA_FILE} --balancing ${balancing}
  26. echo python prediction_model.py --data data/${OUTPUT_DATA_FILE_TEST}.train --model saved_models/${OUTPUT_DATA_FILE}.json
  27. fi
  28. done
  29. done
  30. done
  31. done