get_specific_dataset_png_with_mean.py 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
  1. # main imports
  2. import os, sys
  3. import argparse
  4. import json
  5. import numpy as np
  6. import shutil
  7. # PNG images
  8. from PIL import Image
  9. # others import
  10. from ipfml import utils
  11. from scipy.signal import savgol_filter
  12. '''
  13. Display progress information as progress bar
  14. '''
  15. def write_progress(progress):
  16. barWidth = 180
  17. output_str = "["
  18. pos = barWidth * progress
  19. for i in range(barWidth):
  20. if i < pos:
  21. output_str = output_str + "="
  22. elif i == pos:
  23. output_str = output_str + ">"
  24. else:
  25. output_str = output_str + " "
  26. output_str = output_str + "] " + str(int(progress * 100.0)) + " %\r"
  27. print(output_str)
  28. sys.stdout.write("\033[F")
  29. def extract_index(filepath):
  30. return int(filepath.split('_')[-1].split('.')[0])
  31. def extracts_linear_indices(images_path, n_expected=50, indices_step=20, start_at=20, smooth_arr=False):
  32. # TODO : check this part
  33. default_add = start_at - indices_step
  34. # extract variance for each image path
  35. var_arr = []
  36. n_counter = 0
  37. n_images = len(images_path)
  38. for p in sorted(images_path):
  39. img = Image.open(p)
  40. var_arr.append(np.var(img))
  41. n_counter += 1
  42. write_progress((n_counter + 1) / n_images)
  43. # normalize variance values
  44. norm_arr = np.array(utils.normalize_arr_with_range(var_arr))
  45. if smooth_arr:
  46. norm_arr = utils.normalize_arr_with_range(savgol_filter(norm_arr, 201, 3)) # window size 7, polynomial order 3
  47. # get expected linear step (using n_expectec output images)
  48. linear_steps = utils.normalize_arr_with_range((1 - (np.arange(n_expected) / n_expected)))
  49. # get image indices from variance convergence and linear
  50. # => when linear step is reached we store the index found from variance values
  51. indices_found = []
  52. for i in linear_steps:
  53. find_index = 0
  54. for index, y in enumerate(norm_arr):
  55. if i <= y:
  56. find_index = index
  57. indices_found.append(find_index + 1)
  58. indices = np.array(indices_found) * indices_step
  59. # add tricks to avoid same indice
  60. # => when index is same as previous, then add number of samples expected by step
  61. # Example with step of 20 : [20, 20, 20, 100, 200] => [20, 40, 60, 100, 200]
  62. final_indices = []
  63. for index, i in enumerate(indices):
  64. value = indices[index]
  65. if index > 0:
  66. if i <= indices[index - 1]:
  67. value = indices[index - 1] + indices_step
  68. indices[index] = value
  69. final_indices.append(value)
  70. return np.array(final_indices) + default_add
  71. def main():
  72. """
  73. main function which is ran when launching script
  74. """
  75. parser = argparse.ArgumentParser(description="Compute new dataset scene")
  76. parser.add_argument('--file', type=str, help='file data extracted from `utils/extract_stats_freq_and_min.py` script', required=True)
  77. parser.add_argument('--png_folder', type=str, help='png dataset folder with scene', required=True)
  78. parser.add_argument('--users', type=int, help='min number of users required per scene', required=True, default=10)
  79. #parser.add_argument('--samples', type=int, help='expected samples to get for this dataset', required=True, default=10000)
  80. parser.add_argument('--output', type=str, help='output image folder', required=True)
  81. args = parser.parse_args()
  82. p_file = args.file
  83. p_png_folder = args.png_folder
  84. p_users = args.users
  85. #p_samples = args.samples
  86. p_output = args.output
  87. with open(p_file, 'r') as f:
  88. for line in f.readlines():
  89. data = line.split(';')
  90. scene = data[0]
  91. n_users = int(data[1])
  92. min_index = int(data[2])
  93. # remove _partX from scene name
  94. scene_parts = scene.split('_')
  95. del scene_parts[-1]
  96. scene_name = '_'.join(scene_parts)
  97. output_scene_dir = os.path.join(p_output, scene)
  98. if os.path.exists(output_scene_dir):
  99. print('Extraction of custom indices already done for', scene)
  100. continue
  101. if n_users >= p_users:
  102. print('Extract custom indices based on minimum index for', scene)
  103. png_folder_scene = os.path.join(p_png_folder, scene)
  104. if not os.path.exists(png_folder_scene):
  105. print(png_folder_scene, 'png folder does not exist')
  106. else:
  107. # get all rawls files
  108. png_files = [ os.path.join(png_folder_scene, p) for p in sorted(os.listdir(png_folder_scene)) ]
  109. # extract max samples found for this scene
  110. _, filename = os.path.split(png_files[-1])
  111. max_samples = extract_index(filename)
  112. # extract step from these files
  113. input_step = int(max_samples / len(png_files))
  114. # get indices using min index
  115. indices = extracts_linear_indices(png_files[int(min_index / input_step):], n_expected=50, indices_step=input_step, start_at=min_index, smooth_arr=True)
  116. # here add the most noisy image + mean between first predicted and most noisy image
  117. min_index = extract_index(png_files[0])
  118. if not min_index in indices:
  119. # get mean between min and next one in list
  120. mean_index = int((min_index + indices[1]) / 2)
  121. # check mean index step
  122. if mean_index % input_step != 0:
  123. mean_index = mean_index + (mean_index % input_step)
  124. if not mean_index in indices:
  125. indices = np.insert(indices, 0, mean_index)
  126. # add min index as first
  127. indices = np.insert(indices, 0, min_index)
  128. # print('Indices found are', indices)
  129. # create output directory
  130. if not os.path.exists(output_scene_dir):
  131. os.makedirs(output_scene_dir)
  132. # get expected png image and move it
  133. for index in indices:
  134. str_index = str(index)
  135. while len(str_index) < 5:
  136. str_index = "0" + str_index
  137. image_name = scene_name + '_' + str_index + '.png'
  138. png_image_path = os.path.join(png_folder_scene, image_name)
  139. # create output filepath
  140. output_img_filepath = os.path.join(output_scene_dir, image_name)
  141. # copy expected image path
  142. shutil.copy2(png_image_path, output_img_filepath)
  143. else:
  144. print('Only', n_users, 'users who passed the experiment for', scene)
  145. print('\n---------------------------------------------')
  146. if __name__ == "__main__":
  147. main()