generate_reconstructed_data.py 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Wed Jun 19 11:47:42 2019
  5. @author: jbuisine
  6. """
  7. import sys, os, argparse
  8. import numpy as np
  9. import random
  10. import time
  11. import json
  12. from PIL import Image
  13. from ipfml import processing, metrics, utils
  14. from skimage import color
  15. from modules.utils import config as cfg
  16. from modules.classes.Transformation import Transformation
  17. # getting configuration information
  18. config_filename = cfg.config_filename
  19. zone_folder = cfg.zone_folder
  20. min_max_filename = cfg.min_max_filename_extension
  21. # define all scenes values
  22. scenes_list = cfg.scenes_names
  23. scenes_indexes = cfg.scenes_indices
  24. choices = cfg.normalization_choices
  25. path = cfg.dataset_path
  26. zones = cfg.zones_indices
  27. seuil_expe_filename = cfg.seuil_expe_filename
  28. metric_choices = cfg.metric_choices_labels
  29. output_data_folder = cfg.output_data_folder
  30. generic_output_file_svd = '_random.csv'
  31. def generate_data(transformation):
  32. """
  33. @brief Method which generates all .csv files from scenes
  34. @return nothing
  35. """
  36. scenes = os.listdir(path)
  37. # remove min max file from scenes folder
  38. scenes = [s for s in scenes if min_max_filename not in s]
  39. # go ahead each scenes
  40. for id_scene, folder_scene in enumerate(scenes):
  41. print(folder_scene)
  42. scene_path = os.path.join(path, folder_scene)
  43. config_file_path = os.path.join(scene_path, config_filename)
  44. with open(config_file_path, "r") as config_file:
  45. last_image_name = config_file.readline().strip()
  46. prefix_image_name = config_file.readline().strip()
  47. start_index_image = config_file.readline().strip()
  48. end_index_image = config_file.readline().strip()
  49. step_counter = int(config_file.readline().strip())
  50. # construct each zones folder name
  51. zones_folder = []
  52. metrics_folder = []
  53. zones_threshold = []
  54. # get zones list info
  55. for index in zones:
  56. index_str = str(index)
  57. if len(index_str) < 2:
  58. index_str = "0" + index_str
  59. current_zone = "zone"+index_str
  60. zones_folder.append(current_zone)
  61. zone_path = os.path.join(scene_path, current_zone)
  62. with open(os.path.join(zone_path, cfg.seuil_expe_filename)) as f:
  63. zones_threshold.append(int(f.readline()))
  64. # custom path for metric
  65. metric_path = os.path.join(zone_path, transformation.getName())
  66. if not os.path.exists(metric_path):
  67. os.makedirs(metric_path)
  68. # custom path for interval of reconstruction and metric
  69. metric_interval_path = os.path.join(zone_path, transformation.getTransformationPath())
  70. metrics_folder.append(metric_interval_path)
  71. if not os.path.exists(metric_interval_path):
  72. os.makedirs(metric_interval_path)
  73. # create for each zone the labels folder
  74. labels = [cfg.not_noisy_folder, cfg.noisy_folder]
  75. for label in labels:
  76. label_folder = os.path.join(metric_interval_path, label)
  77. if not os.path.exists(label_folder):
  78. os.makedirs(label_folder)
  79. current_counter_index = int(start_index_image)
  80. end_counter_index = int(end_index_image)
  81. # for each images
  82. while(current_counter_index <= end_counter_index):
  83. current_counter_index_str = str(current_counter_index)
  84. while len(start_index_image) > len(current_counter_index_str):
  85. current_counter_index_str = "0" + current_counter_index_str
  86. img_path = os.path.join(scene_path, prefix_image_name + current_counter_index_str + ".png")
  87. current_img = Image.open(img_path)
  88. img_blocks = processing.divide_in_blocks(current_img, cfg.keras_img_size)
  89. for id_block, block in enumerate(img_blocks):
  90. ##########################
  91. # Image computation part #
  92. ##########################
  93. # pass block to grey level
  94. output_block = transformation.getTransformedImage(block)
  95. output_block = np.array(output_block, 'uint8')
  96. # current output image
  97. output_block_img = Image.fromarray(output_block)
  98. label_path = metrics_folder[id_block]
  99. # get label folder for block
  100. if current_counter_index > zones_threshold[id_block]:
  101. label_path = os.path.join(label_path, cfg.not_noisy_folder)
  102. else:
  103. label_path = os.path.join(label_path, cfg.noisy_folder)
  104. # Data augmentation!
  105. rotations = [0, 90, 180, 270]
  106. img_flip_labels = ['original', 'horizontal', 'vertical', 'both']
  107. horizontal_img = output_block_img.transpose(Image.FLIP_LEFT_RIGHT)
  108. vertical_img = output_block_img.transpose(Image.FLIP_TOP_BOTTOM)
  109. both_img = output_block_img.transpose(Image.TRANSPOSE)
  110. flip_images = [output_block_img, horizontal_img, vertical_img, both_img]
  111. # rotate and flip image to increase dataset size
  112. for id, flip in enumerate(flip_images):
  113. for rotation in rotations:
  114. rotated_output_img = flip.rotate(rotation)
  115. output_reconstructed_filename = img_path.split('/')[-1].replace('.png', '') + '_' + zones_folder[id_block] + cfg.post_image_name_separator
  116. output_reconstructed_filename = output_reconstructed_filename + img_flip_labels[id] + '_' + str(rotation) + '.png'
  117. output_reconstructed_path = os.path.join(label_path, output_reconstructed_filename)
  118. rotated_output_img.save(output_reconstructed_path)
  119. start_index_image_int = int(start_index_image)
  120. print(transformation.getName() + "_" + folder_scene + " - " + "{0:.2f}".format((current_counter_index - start_index_image_int) / (end_counter_index - start_index_image_int)* 100.) + "%")
  121. sys.stdout.write("\033[F")
  122. current_counter_index += step_counter
  123. print('\n')
  124. print("%s_%s : end of data generation\n" % (transformation.getName(), transformation.getParam()))
  125. def main():
  126. parser = argparse.ArgumentParser(description="Compute and prepare data of metric of all scenes using specific interval if necessary")
  127. parser.add_argument('--metrics', type=str,
  128. help="list of metrics choice in order to compute data",
  129. default='svd_reconstruction, ipca_reconstruction',
  130. required=True)
  131. parser.add_argument('--params', type=str,
  132. help="list of specific param for each metric choice (See README.md for further information in 3D mode)",
  133. default='100, 200 :: 50, 25',
  134. required=True)
  135. args = parser.parse_args()
  136. p_metrics = list(map(str.strip, args.metrics.split(',')))
  137. p_params = list(map(str.strip, args.params.split('::')))
  138. transformations = []
  139. for id, metric in enumerate(p_metrics):
  140. if metric not in metric_choices:
  141. raise ValueError("Unknown metric, please select a correct metric : ", metric_choices)
  142. transformations.append(Transformation(metric, p_params[id]))
  143. # generate all or specific metric data
  144. for transformation in transformations:
  145. generate_data(transformation)
  146. if __name__== "__main__":
  147. main()