generate_reconstructed_folder.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Wed Jun 19 11:47:42 2019
  5. @author: jbuisine
  6. """
  7. # main imports
  8. import sys, os, argparse
  9. import numpy as np
  10. # images processing imports
  11. from PIL import Image
  12. from ipfml.processing.segmentation import divide_in_blocks
  13. # modules imports
  14. sys.path.insert(0, '') # trick to enable import of main folder module
  15. import custom_config as cfg
  16. from modules.utils.data import get_scene_image_quality
  17. from modules.classes.Transformation import Transformation
  18. # getting configuration information
  19. zone_folder = cfg.zone_folder
  20. # define all scenes values
  21. zones = cfg.zones_indices
  22. features_choices = cfg.features_choices_labels
  23. '''
  24. Display progress information as progress bar
  25. '''
  26. def write_progress(progress):
  27. barWidth = 180
  28. output_str = "["
  29. pos = barWidth * progress
  30. for i in range(barWidth):
  31. if i < pos:
  32. output_str = output_str + "="
  33. elif i == pos:
  34. output_str = output_str + ">"
  35. else:
  36. output_str = output_str + " "
  37. output_str = output_str + "] " + str(int(progress * 100.0)) + " %\r"
  38. print(output_str)
  39. sys.stdout.write("\033[F")
  40. def generate_data(transformation, _dataset_path, _output, _human_thresholds, _replace):
  41. """
  42. @brief Method which generates all .csv files from scenes
  43. @return nothing
  44. """
  45. # path is the default dataset path
  46. scenes = os.listdir(_dataset_path)
  47. n_scenes = len(scenes)
  48. # go ahead each scenes
  49. for id_scene, folder_scene in enumerate(scenes):
  50. print('Scene {0} of {1} ({2})'.format((id_scene + 1), n_scenes, folder_scene))
  51. scene_path = os.path.join(_dataset_path, folder_scene)
  52. output_scene_path = os.path.join(cfg.output_data_generated, _output, folder_scene)
  53. # construct each zones folder name
  54. zones_folder = []
  55. features_folder = []
  56. if folder_scene in _human_thresholds:
  57. zones_threshold = _human_thresholds[folder_scene]
  58. # get zones list info
  59. for index in zones:
  60. index_str = str(index)
  61. if len(index_str) < 2:
  62. index_str = "0" + index_str
  63. current_zone = "zone"+index_str
  64. zones_folder.append(current_zone)
  65. zone_path = os.path.join(output_scene_path, current_zone)
  66. # custom path for feature
  67. feature_path = os.path.join(zone_path, transformation.getName())
  68. if not os.path.exists(feature_path):
  69. os.makedirs(feature_path)
  70. # custom path for interval of reconstruction and feature
  71. feature_interval_path = os.path.join(zone_path, transformation.getTransformationPath())
  72. features_folder.append(feature_interval_path)
  73. if not os.path.exists(feature_interval_path):
  74. os.makedirs(feature_interval_path)
  75. # create for each zone the labels folder
  76. labels = [cfg.not_noisy_folder, cfg.noisy_folder]
  77. for label in labels:
  78. label_folder = os.path.join(feature_interval_path, label)
  79. if not os.path.exists(label_folder):
  80. os.makedirs(label_folder)
  81. # get all images of folder
  82. scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
  83. number_scene_image = len(scene_images)
  84. # for each images
  85. for id_img, img_path in enumerate(scene_images):
  86. current_img = Image.open(img_path)
  87. img_blocks = divide_in_blocks(current_img, cfg.sub_image_size)
  88. current_quality_index = int(get_scene_image_quality(img_path))
  89. for id_block, block in enumerate(img_blocks):
  90. ##########################
  91. # Image computation part #
  92. ##########################
  93. label_path = features_folder[id_block]
  94. # get label folder for block
  95. if current_quality_index > zones_threshold[id_block]:
  96. label_path = os.path.join(label_path, cfg.not_noisy_folder)
  97. else:
  98. label_path = os.path.join(label_path, cfg.noisy_folder)
  99. # check if necessary to compute or not images
  100. # Data augmentation!
  101. rotations = [0, 90, 180, 270]
  102. #img_flip_labels = ['original', 'horizontal', 'vertical', 'both']
  103. img_flip_labels = ['original', 'horizontal']
  104. output_images_path = []
  105. check_path_exists = []
  106. # rotate and flip image to increase dataset size
  107. for id, flip_label in enumerate(img_flip_labels):
  108. for rotation in rotations:
  109. output_reconstructed_filename = img_path.split('/')[-1].replace('.png', '') + '_' + zones_folder[id_block] + cfg.post_image_name_separator
  110. output_reconstructed_filename = output_reconstructed_filename + flip_label + '_' + str(rotation) + '.png'
  111. output_reconstructed_path = os.path.join(label_path, output_reconstructed_filename)
  112. if os.path.exists(output_reconstructed_path):
  113. check_path_exists.append(True)
  114. else:
  115. check_path_exists.append(False)
  116. output_images_path.append(output_reconstructed_path)
  117. # compute only if not exists or necessary to replace
  118. if _replace or not np.array(check_path_exists).all():
  119. # compute image
  120. # pass block to grey level
  121. output_block = transformation.getTransformedImage(block)
  122. output_block = np.array(output_block, 'uint8')
  123. # current output image
  124. output_block_img = Image.fromarray(output_block)
  125. #horizontal_img = output_block_img.transpose(Image.FLIP_LEFT_RIGHT)
  126. #vertical_img = output_block_img.transpose(Image.FLIP_TOP_BOTTOM)
  127. #both_img = output_block_img.transpose(Image.TRANSPOSE)
  128. #flip_images = [output_block_img, horizontal_img, vertical_img, both_img]
  129. #flip_images = [output_block_img, horizontal_img]
  130. # Only current image img currenlty
  131. flip_images = [output_block_img]
  132. # rotate and flip image to increase dataset size
  133. counter_index = 0 # get current path index
  134. for id, flip in enumerate(flip_images):
  135. for rotation in rotations:
  136. if _replace or not check_path_exists[counter_index]:
  137. rotated_output_img = flip.rotate(rotation)
  138. rotated_output_img.save(output_images_path[counter_index])
  139. counter_index +=1
  140. write_progress((id_img + 1) / number_scene_image)
  141. print('\n')
  142. print("{0}_{1} : end of data generation\n".format(transformation.getName(), transformation.getParam()))
  143. def main():
  144. parser = argparse.ArgumentParser(description="Compute and prepare data of feature of all scenes using specific interval if necessary")
  145. parser.add_argument('--features', type=str,
  146. help="list of features choice in order to compute data",
  147. default='svd_reconstruction, ipca_reconstruction',
  148. required=True)
  149. parser.add_argument('--params', type=str,
  150. help="list of specific param for each feature choice (See README.md for further information in 3D mode)",
  151. default='100, 200 :: 50, 25',
  152. required=True)
  153. parser.add_argument('--folder', type=str,
  154. help='folder where dataset is available',
  155. required=True)
  156. parser.add_argument('--output', type=str,
  157. help='output folder where data are saved',
  158. required=True)
  159. parser.add_argument('--thresholds', type=str, help='file which cantains all thresholds', required=True)
  160. parser.add_argument('--size', type=str,
  161. help="specific size of image",
  162. default='100, 100',
  163. required=True)
  164. parser.add_argument('--replace', type=int, help='replace previous picutre', default=1)
  165. args = parser.parse_args()
  166. p_features = list(map(str.strip, args.features.split(',')))
  167. p_params = list(map(str.strip, args.params.split('::')))
  168. p_folder = args.folder
  169. p_output = args.output
  170. p_thresholds = args.thresholds
  171. p_size = args.size
  172. p_replace = bool(args.replace)
  173. # list of transformations
  174. transformations = []
  175. for id, feature in enumerate(p_features):
  176. if feature not in features_choices or feature == 'static':
  177. raise ValueError("Unknown feature {0}, please select a correct feature (`static` excluded) : {1}".format(feature, features_choices))
  178. transformations.append(Transformation(feature, p_params[id], p_size))
  179. human_thresholds = {}
  180. # 3. retrieve human_thresholds
  181. # construct zones folder
  182. with open(p_thresholds) as f:
  183. thresholds_line = f.readlines()
  184. for line in thresholds_line:
  185. data = line.split(';')
  186. del data[-1] # remove unused last element `\n`
  187. current_scene = data[0]
  188. thresholds_scene = data[1:]
  189. if current_scene != '50_shades_of_grey':
  190. human_thresholds[current_scene] = [ int(threshold) for threshold in thresholds_scene ]
  191. # generate all or specific feature data
  192. for transformation in transformations:
  193. generate_data(transformation, p_folder, p_output, human_thresholds, p_replace)
  194. if __name__== "__main__":
  195. main()