generate_reconstructed_folder.py 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Wed Jun 19 11:47:42 2019
  5. @author: jbuisine
  6. """
  7. # main imports
  8. import sys, os, argparse
  9. import numpy as np
  10. # images processing imports
  11. from PIL import Image
  12. from ipfml.processing.segmentation import divide_in_blocks
  13. # modules imports
  14. sys.path.insert(0, '') # trick to enable import of main folder module
  15. import custom_config as cfg
  16. from modules.utils.data import get_scene_image_quality
  17. from modules.classes.Transformation import Transformation
  18. # getting configuration information
  19. zone_folder = cfg.zone_folder
  20. # define all scenes values
  21. zones = cfg.zones_indices
  22. features_choices = cfg.features_choices_labels
  23. '''
  24. Display progress information as progress bar
  25. '''
  26. def write_progress(progress):
  27. barWidth = 180
  28. output_str = "["
  29. pos = barWidth * progress
  30. for i in range(barWidth):
  31. if i < pos:
  32. output_str = output_str + "="
  33. elif i == pos:
  34. output_str = output_str + ">"
  35. else:
  36. output_str = output_str + " "
  37. output_str = output_str + "] " + str(int(progress * 100.0)) + " %\r"
  38. print(output_str)
  39. sys.stdout.write("\033[F")
  40. def generate_data(transformation, _dataset_path, _output, _human_thresholds, _replace):
  41. """
  42. @brief Method which generates all .csv files from scenes
  43. @return nothing
  44. """
  45. # path is the default dataset path
  46. scenes = os.listdir(_dataset_path)
  47. n_scenes = len(scenes)
  48. # go ahead each scenes
  49. for id_scene, folder_scene in enumerate(scenes):
  50. print('Scene {0} of {1} ({2})'.format((id_scene + 1), n_scenes, folder_scene))
  51. scene_path = os.path.join(_dataset_path, folder_scene)
  52. output_scene_path = os.path.join(_output, folder_scene)
  53. # construct each zones folder name
  54. zones_folder = []
  55. features_folder = []
  56. zones_threshold = _human_thresholds[folder_scene]
  57. # get zones list info
  58. for index in zones:
  59. index_str = str(index)
  60. if len(index_str) < 2:
  61. index_str = "0" + index_str
  62. current_zone = "zone"+index_str
  63. zones_folder.append(current_zone)
  64. zone_path = os.path.join(output_scene_path, current_zone)
  65. # custom path for feature
  66. feature_path = os.path.join(zone_path, transformation.getName())
  67. if not os.path.exists(feature_path):
  68. os.makedirs(feature_path)
  69. # custom path for interval of reconstruction and feature
  70. feature_interval_path = os.path.join(zone_path, transformation.getTransformationPath())
  71. features_folder.append(feature_interval_path)
  72. if not os.path.exists(feature_interval_path):
  73. os.makedirs(feature_interval_path)
  74. # create for each zone the labels folder
  75. labels = [cfg.not_noisy_folder, cfg.noisy_folder]
  76. for label in labels:
  77. label_folder = os.path.join(feature_interval_path, label)
  78. if not os.path.exists(label_folder):
  79. os.makedirs(label_folder)
  80. # get all images of folder
  81. scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
  82. number_scene_image = len(scene_images)
  83. # for each images
  84. for id_img, img_path in enumerate(scene_images):
  85. current_img = Image.open(img_path)
  86. img_blocks = divide_in_blocks(current_img, cfg.sub_image_size)
  87. current_quality_index = int(get_scene_image_quality(img_path))
  88. for id_block, block in enumerate(img_blocks):
  89. ##########################
  90. # Image computation part #
  91. ##########################
  92. label_path = features_folder[id_block]
  93. # get label folder for block
  94. if current_quality_index > zones_threshold[id_block]:
  95. label_path = os.path.join(label_path, cfg.not_noisy_folder)
  96. else:
  97. label_path = os.path.join(label_path, cfg.noisy_folder)
  98. # check if necessary to compute or not images
  99. # Data augmentation!
  100. rotations = [0, 90, 180, 270]
  101. #img_flip_labels = ['original', 'horizontal', 'vertical', 'both']
  102. img_flip_labels = ['original', 'horizontal']
  103. output_images_path = []
  104. check_path_exists = []
  105. # rotate and flip image to increase dataset size
  106. for id, flip_label in enumerate(img_flip_labels):
  107. for rotation in rotations:
  108. output_reconstructed_filename = img_path.split('/')[-1].replace('.png', '') + '_' + zones_folder[id_block] + cfg.post_image_name_separator
  109. output_reconstructed_filename = output_reconstructed_filename + flip_label + '_' + str(rotation) + '.png'
  110. output_reconstructed_path = os.path.join(label_path, output_reconstructed_filename)
  111. if os.path.exists(output_reconstructed_path):
  112. check_path_exists.append(True)
  113. else:
  114. check_path_exists.append(False)
  115. output_images_path.append(output_reconstructed_path)
  116. # compute only if not exists or necessary to replace
  117. if _replace or not np.array(check_path_exists).all():
  118. # compute image
  119. # pass block to grey level
  120. output_block = transformation.getTransformedImage(block)
  121. output_block = np.array(output_block, 'uint8')
  122. # current output image
  123. output_block_img = Image.fromarray(output_block)
  124. #horizontal_img = output_block_img.transpose(Image.FLIP_LEFT_RIGHT)
  125. #vertical_img = output_block_img.transpose(Image.FLIP_TOP_BOTTOM)
  126. #both_img = output_block_img.transpose(Image.TRANSPOSE)
  127. #flip_images = [output_block_img, horizontal_img, vertical_img, both_img]
  128. #flip_images = [output_block_img, horizontal_img]
  129. # Only current image img currenlty
  130. flip_images = [output_block_img]
  131. # rotate and flip image to increase dataset size
  132. counter_index = 0 # get current path index
  133. for id, flip in enumerate(flip_images):
  134. for rotation in rotations:
  135. if _replace or not check_path_exists[counter_index]:
  136. rotated_output_img = flip.rotate(rotation)
  137. rotated_output_img.save(output_images_path[counter_index])
  138. counter_index +=1
  139. write_progress((id_img + 1) / number_scene_image)
  140. print('\n')
  141. print("{0}_{1} : end of data generation\n".format(transformation.getName(), transformation.getParam()))
  142. def main():
  143. parser = argparse.ArgumentParser(description="Compute and prepare data of feature of all scenes using specific interval if necessary")
  144. parser.add_argument('--features', type=str,
  145. help="list of features choice in order to compute data",
  146. default='svd_reconstruction, ipca_reconstruction',
  147. required=True)
  148. parser.add_argument('--params', type=str,
  149. help="list of specific param for each feature choice (See README.md for further information in 3D mode)",
  150. default='100, 200 :: 50, 25',
  151. required=True)
  152. parser.add_argument('--folder', type=str,
  153. help='folder where dataset is available',
  154. required=True)
  155. parser.add_argument('--output', type=str,
  156. help='output folder where data are saved',
  157. required=True)
  158. parser.add_argument('--thresholds', type=str, help='file which cantains all thresholds', required=True)
  159. parser.add_argument('--size', type=str,
  160. help="specific size of image",
  161. default='100, 100',
  162. required=True)
  163. parser.add_argument('--replace', type=int, help='replace previous picutre', default=1)
  164. args = parser.parse_args()
  165. p_features = list(map(str.strip, args.features.split(',')))
  166. p_params = list(map(str.strip, args.params.split('::')))
  167. p_folder = args.folder
  168. p_output = args.output
  169. p_thresholds = args.thresholds
  170. p_size = args.size
  171. p_replace = bool(args.replace)
  172. # list of transformations
  173. transformations = []
  174. for id, feature in enumerate(p_features):
  175. if feature not in features_choices or feature == 'static':
  176. raise ValueError("Unknown feature, please select a correct feature (`static` excluded) : ", features_choices)
  177. transformations.append(Transformation(feature, p_params[id], p_size))
  178. human_thresholds = {}
  179. # 3. retrieve human_thresholds
  180. # construct zones folder
  181. with open(p_thresholds) as f:
  182. thresholds_line = f.readlines()
  183. for line in thresholds_line:
  184. data = line.split(';')
  185. del data[-1] # remove unused last element `\n`
  186. current_scene = data[0]
  187. thresholds_scene = data[1:]
  188. if current_scene != '50_shades_of_grey':
  189. human_thresholds[current_scene] = [ int(threshold) for threshold in thresholds_scene ]
  190. # generate all or specific feature data
  191. for transformation in transformations:
  192. generate_data(transformation, p_folder, p_output, human_thresholds, p_replace)
  193. if __name__== "__main__":
  194. main()