make_dataset.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. import numpy as np
  2. import pandas as pd
  3. import os, sys, argparse
  4. import modules.config as cfg
  5. def compute_files(_n, _each_row, _each_column):
  6. """
  7. Read all folders and files of scenes in order to compute output dataset
  8. """
  9. output_dataset_filename = cfg.output_file_prefix + _n + '_column_' + _each_column + '_row_' + _each_row + '.csv'
  10. output_dataset_filename = os.path.join(cfg.output_data_folder, output_dataset_filename)
  11. if not os.path.exists(cfg.output_data_folder):
  12. os.makedirs(cfg.output_data_folder)
  13. output_file = open(output_dataset_filename, 'w')
  14. print('Preparing to store data into ', output_dataset_filename)
  15. scenes = os.listdir(cfg.folder_scenes_path)
  16. # remove min max file from scenes folder
  17. scenes = [s for s in scenes if s not in cfg.folder_and_files_filtered]
  18. scenes = [s for s in scenes if '.csv' not in s] # do not keep generated .csv file
  19. # skip test scene from dataset
  20. scenes = [ s for s in scenes if s not in cfg.test_scenes]
  21. # print(scenes)
  22. counter = 0
  23. number_of_elements = len(scenes) * cfg.number_of_rows * cfg.number_of_columns
  24. #print(number_of_elements, ' to manage')
  25. for scene in scenes:
  26. scene_path = os.path.join(cfg.folder_scenes_path, scene)
  27. for id_column in range(cfg.number_of_columns):
  28. if id_column % int(_each_column) == 0 :
  29. folder_path = os.path.join(scene_path, str(id_column))
  30. for id_row in range(cfg.number_of_rows):
  31. if id_row % int(_each_row) == 0:
  32. pixel_filename = scene + '_' + str(id_column) + '_' + str(id_row) + ".dat"
  33. pixel_file_path = os.path.join(folder_path, pixel_filename)
  34. saved_row = ''
  35. # for each file read content, keep `n` first values and compute mean
  36. with open(pixel_file_path, 'r') as f:
  37. lines = [float(l)/255. for l in f.readlines()]
  38. pixel_values = lines[0:int(_n)]
  39. mean = sum(lines) / float(len(lines))
  40. saved_row += str(mean)
  41. for val in pixel_values:
  42. saved_row += ';' + str(val)
  43. saved_row += '\n'
  44. # store mean and pixel values into .csv row
  45. output_file.write(saved_row)
  46. counter = counter + 1
  47. else:
  48. counter += cfg.number_of_rows
  49. print("{0:.2f}%".format(counter / number_of_elements * 100))
  50. sys.stdout.write("\033[F")
  51. print('\n')
  52. output_file.close()
  53. def main():
  54. parser = argparse.ArgumentParser(description="Compute .csv dataset file")
  55. parser.add_argument('--n', type=str, help='Number of pixel values approximated to keep')
  56. parser.add_argument('--each_row', type=str, help='Keep only values from specific row', default=1)
  57. parser.add_argument('--each_column', type=str, help='Keep only values from specific column', default=1)
  58. args = parser.parse_args()
  59. param_n = args.n
  60. param_each_row = args.each_row
  61. param_each_column = args.each_column
  62. compute_files(param_n, param_each_row, param_each_column)
  63. if __name__== "__main__":
  64. main()