generate_reconstructed_folder.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Wed Jun 19 11:47:42 2019
  5. @author: jbuisine
  6. """
  7. # main imports
  8. import sys, os, argparse
  9. import numpy as np
  10. # images processing imports
  11. from PIL import Image
  12. from ipfml.processing.segmentation import divide_in_blocks
  13. # modules imports
  14. sys.path.insert(0, '') # trick to enable import of main folder module
  15. import custom_config as cfg
  16. from modules.utils.data import get_scene_image_quality
  17. from modules.classes.Transformation import Transformation
  18. # getting configuration information
  19. zone_folder = cfg.zone_folder
  20. # define all scenes values
  21. zones = cfg.zones_indices
  22. features_choices = cfg.features_choices_labels
  23. '''
  24. Display progress information as progress bar
  25. '''
  26. def write_progress(progress):
  27. barWidth = 180
  28. output_str = "["
  29. pos = barWidth * progress
  30. for i in range(barWidth):
  31. if i < pos:
  32. output_str = output_str + "="
  33. elif i == pos:
  34. output_str = output_str + ">"
  35. else:
  36. output_str = output_str + " "
  37. output_str = output_str + "] " + str(int(progress * 100.0)) + " %\r"
  38. print(output_str)
  39. sys.stdout.write("\033[F")
  40. def generate_data(transformation, _dataset_path, _output, _human_thresholds, _replace):
  41. """
  42. @brief Method which generates all .csv files from scenes
  43. @return nothing
  44. """
  45. # path is the default dataset path
  46. scenes = os.listdir(_dataset_path)
  47. n_scenes = len(scenes)
  48. # go ahead each scenes
  49. for id_scene, folder_scene in enumerate(scenes):
  50. print('Scene {0} of {1} ({2})'.format((id_scene + 1), n_scenes, folder_scene))
  51. scene_path = os.path.join(_dataset_path, folder_scene)
  52. output_scene_path = os.path.join(cfg.output_data_generated, _output, folder_scene)
  53. # construct each zones folder name
  54. zones_folder = []
  55. features_folder = []
  56. if folder_scene in _human_thresholds:
  57. zones_threshold = _human_thresholds[folder_scene]
  58. # get zones list info
  59. for index in zones:
  60. index_str = str(index)
  61. if len(index_str) < 2:
  62. index_str = "0" + index_str
  63. current_zone = "zone"+index_str
  64. zones_folder.append(current_zone)
  65. zone_path = os.path.join(output_scene_path, current_zone)
  66. # custom path for feature
  67. feature_path = os.path.join(zone_path, transformation.getName())
  68. if not os.path.exists(feature_path):
  69. os.makedirs(feature_path)
  70. # custom path for interval of reconstruction and feature
  71. feature_interval_path = os.path.join(zone_path, transformation.getTransformationPath())
  72. features_folder.append(feature_interval_path)
  73. if not os.path.exists(feature_interval_path):
  74. os.makedirs(feature_interval_path)
  75. # create for each zone the labels folder
  76. labels = [cfg.not_noisy_folder, cfg.noisy_folder]
  77. for label in labels:
  78. label_folder = os.path.join(feature_interval_path, label)
  79. if not os.path.exists(label_folder):
  80. os.makedirs(label_folder)
  81. # get all images of folder
  82. scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
  83. number_scene_image = len(scene_images)
  84. # for each images
  85. for id_img, img_path in enumerate(scene_images):
  86. current_img = Image.open(img_path)
  87. img_blocks = divide_in_blocks(current_img, cfg.sub_image_size)
  88. current_quality_index = int(get_scene_image_quality(img_path))
  89. for id_block, block in enumerate(img_blocks):
  90. ##########################
  91. # Image computation part #
  92. ##########################
  93. label_path = features_folder[id_block]
  94. # get label folder for block
  95. if current_quality_index > zones_threshold[id_block]:
  96. label_path = os.path.join(label_path, cfg.not_noisy_folder)
  97. else:
  98. label_path = os.path.join(label_path, cfg.noisy_folder)
  99. # check if necessary to compute or not images
  100. # Disable use of data augmentation for the moment
  101. # Data augmentation!
  102. # rotations = [0, 90, 180, 270]
  103. #img_flip_labels = ['original', 'horizontal', 'vertical', 'both']
  104. # img_flip_labels = ['original', 'horizontal']
  105. # output_images_path = []
  106. # check_path_exists = []
  107. # # rotate and flip image to increase dataset size
  108. # for id, flip_label in enumerate(img_flip_labels):
  109. # for rotation in rotations:
  110. # output_reconstructed_filename = img_path.split('/')[-1].replace('.png', '') + '_' + zones_folder[id_block] + cfg.post_image_name_separator
  111. # output_reconstructed_filename = output_reconstructed_filename + flip_label + '_' + str(rotation) + '.png'
  112. # output_reconstructed_path = os.path.join(label_path, output_reconstructed_filename)
  113. # if os.path.exists(output_reconstructed_path):
  114. # check_path_exists.append(True)
  115. # else:
  116. # check_path_exists.append(False)
  117. # output_images_path.append(output_reconstructed_path)
  118. # compute only if not exists or necessary to replace
  119. # if _replace or not np.array(check_path_exists).all():
  120. # compute image
  121. # pass block to grey level
  122. # output_block = transformation.getTransformedImage(block)
  123. # output_block = np.array(output_block, 'uint8')
  124. # # current output image
  125. # output_block_img = Image.fromarray(output_block)
  126. #horizontal_img = output_block_img.transpose(Image.FLIP_LEFT_RIGHT)
  127. #vertical_img = output_block_img.transpose(Image.FLIP_TOP_BOTTOM)
  128. #both_img = output_block_img.transpose(Image.TRANSPOSE)
  129. #flip_images = [output_block_img, horizontal_img, vertical_img, both_img]
  130. #flip_images = [output_block_img, horizontal_img]
  131. # Only current image img currenlty
  132. # flip_images = [output_block_img]
  133. # # rotate and flip image to increase dataset size
  134. # counter_index = 0 # get current path index
  135. # for id, flip in enumerate(flip_images):
  136. # for rotation in rotations:
  137. # if _replace or not check_path_exists[counter_index]:
  138. # rotated_output_img = flip.rotate(rotation)
  139. # rotated_output_img.save(output_images_path[counter_index])
  140. # counter_index +=1
  141. if _replace:
  142. _, filename = os.path.split(img_path)
  143. # build of output image filename
  144. filename = filename.replace('.png', '')
  145. filename_parts = filename.split('_')
  146. # get samples : `00XXX`
  147. n_samples = filename_parts[-1]
  148. del filename_parts[-1]
  149. # `p3d_XXXXXX`
  150. output_reconstructed = '_'.join(filename_parts)
  151. output_reconstructed_filename = output_reconstructed + '_' + zones_folder[id_block] + '_' + n_samples + '.png'
  152. output_reconstructed_path = os.path.join(label_path, output_reconstructed_filename)
  153. output_block = transformation.getTransformedImage(block)
  154. output_block = np.array(output_block, 'uint8')
  155. # current output image
  156. output_block_img = Image.fromarray(output_block)
  157. output_block_img.save(output_reconstructed_path)
  158. write_progress((id_img + 1) / number_scene_image)
  159. print('\n')
  160. print("{0}_{1} : end of data generation\n".format(transformation.getName(), transformation.getParam()))
  161. def main():
  162. parser = argparse.ArgumentParser(description="Compute and prepare data of feature of all scenes using specific interval if necessary")
  163. parser.add_argument('--features', type=str,
  164. help="list of features choice in order to compute data",
  165. default='svd_reconstruction, ipca_reconstruction',
  166. required=True)
  167. parser.add_argument('--params', type=str,
  168. help="list of specific param for each feature choice (See README.md for further information in 3D mode)",
  169. default='100, 200 :: 50, 25',
  170. required=True)
  171. parser.add_argument('--folder', type=str,
  172. help='folder where dataset is available',
  173. required=True)
  174. parser.add_argument('--output', type=str,
  175. help='output folder where data are saved',
  176. required=True)
  177. parser.add_argument('--thresholds', type=str, help='file which cantains all thresholds', required=True)
  178. parser.add_argument('--size', type=str,
  179. help="specific size of image",
  180. default='100, 100',
  181. required=True)
  182. parser.add_argument('--replace', type=int, help='replace previous picutre', default=1)
  183. args = parser.parse_args()
  184. p_features = list(map(str.strip, args.features.split(',')))
  185. p_params = list(map(str.strip, args.params.split('::')))
  186. p_folder = args.folder
  187. p_output = args.output
  188. p_thresholds = args.thresholds
  189. p_size = args.size
  190. p_replace = bool(args.replace)
  191. # list of transformations
  192. transformations = []
  193. for id, feature in enumerate(p_features):
  194. if feature not in features_choices or feature == 'static':
  195. raise ValueError("Unknown feature {0}, please select a correct feature (`static` excluded) : {1}".format(feature, features_choices))
  196. transformations.append(Transformation(feature, p_params[id], p_size))
  197. human_thresholds = {}
  198. # 3. retrieve human_thresholds
  199. # construct zones folder
  200. with open(p_thresholds) as f:
  201. thresholds_line = f.readlines()
  202. for line in thresholds_line:
  203. data = line.split(';')
  204. del data[-1] # remove unused last element `\n`
  205. current_scene = data[0]
  206. thresholds_scene = data[1:]
  207. if current_scene != '50_shades_of_grey':
  208. human_thresholds[current_scene] = [ int(threshold) for threshold in thresholds_scene ]
  209. # generate all or specific feature data
  210. for transformation in transformations:
  211. generate_data(transformation, p_folder, p_output, human_thresholds, p_replace)
  212. if __name__== "__main__":
  213. main()