generate_dataset.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Wed Jun 19 11:47:42 2019
  5. @author: jbuisine
  6. """
  7. # main imports
  8. import sys, os, argparse
  9. import numpy as np
  10. import random
  11. # images processing imports
  12. from PIL import Image
  13. from ipfml.processing.segmentation import divide_in_blocks
  14. # modules imports
  15. sys.path.insert(0, '') # trick to enable import of main folder module
  16. import custom_config as cfg
  17. from modules.utils import data as dt
  18. from modules.classes.Transformation import Transformation
  19. # getting configuration information
  20. config_filename = cfg.config_filename
  21. zone_folder = cfg.zone_folder
  22. learned_folder = cfg.learned_zones_folder
  23. min_max_filename = cfg.min_max_filename_extension
  24. # define all scenes values
  25. scenes_list = cfg.scenes_names
  26. scenes_indexes = cfg.scenes_indices
  27. dataset_path = cfg.dataset_path
  28. zones = cfg.zones_indices
  29. seuil_expe_filename = cfg.seuil_expe_filename
  30. features_choices = cfg.features_choices_labels
  31. output_data_folder = cfg.output_data_folder
  32. generic_output_file_svd = '_random.csv'
  33. def generate_data_model(_scenes_list, _filename, _transformations, _scenes, _nb_zones = 4, _random=0):
  34. output_train_filename = _filename + ".train"
  35. output_test_filename = _filename + ".test"
  36. if not '/' in output_train_filename:
  37. raise Exception("Please select filename with directory path to save data. Example : data/dataset")
  38. # create path if not exists
  39. if not os.path.exists(output_data_folder):
  40. os.makedirs(output_data_folder)
  41. zones_indices = zones
  42. train_file_data = []
  43. test_file_data = []
  44. scenes = os.listdir(dataset_path)
  45. # remove min max file from scenes folder
  46. scenes = [s for s in scenes if min_max_filename not in s]
  47. # go ahead each scenes
  48. for folder_scene in _scenes_list:
  49. scene_path = os.path.join(dataset_path, folder_scene)
  50. # shuffle list of zones (=> randomly choose zones)
  51. # only in random mode
  52. if _random:
  53. random.shuffle(zones_indices)
  54. # store zones learned
  55. learned_zones_indices = zones_indices[:_nb_zones]
  56. # write into file
  57. folder_learned_path = os.path.join(learned_folder, _filename.split('/')[1])
  58. if not os.path.exists(folder_learned_path):
  59. os.makedirs(folder_learned_path)
  60. file_learned_path = os.path.join(folder_learned_path, folder_scene + '.csv')
  61. with open(file_learned_path, 'w') as f:
  62. for i in learned_zones_indices:
  63. f.write(str(i) + ';')
  64. for id_zone, index_folder in enumerate(zones_indices):
  65. index_str = str(index_folder)
  66. if len(index_str) < 2:
  67. index_str = "0" + index_str
  68. current_zone_folder = "zone" + index_str
  69. zone_path = os.path.join(scene_path, current_zone_folder)
  70. # custom path for interval of reconstruction and metric
  71. features_path = []
  72. for transformation in _transformations:
  73. # check if it's a static content and create augmented images if necessary
  74. if transformation.getName() == 'static':
  75. # {sceneName}/zoneXX/static
  76. static_metric_path = os.path.join(zone_path, transformation.getName())
  77. # img.png
  78. image_name = transformation.getParam().split('/')[-1]
  79. # {sceneName}/zoneXX/static/img
  80. image_prefix_name = image_name.replace('.png', '')
  81. image_folder_path = os.path.join(static_metric_path, image_prefix_name)
  82. if not os.path.exists(image_folder_path):
  83. os.makedirs(image_folder_path)
  84. features_path.append(image_folder_path)
  85. # get image path to manage
  86. # {sceneName}/static/img.png
  87. transform_image_path = os.path.join(scene_path, transformation.getName(), image_name)
  88. static_transform_image = Image.open(transform_image_path)
  89. static_transform_image_block = divide_in_blocks(static_transform_image, cfg.keras_img_size)[id_zone]
  90. dt.augmented_data_image(static_transform_image_block, image_folder_path, image_prefix_name)
  91. else:
  92. metric_interval_path = os.path.join(zone_path, transformation.getTransformationPath())
  93. features_path.append(metric_interval_path)
  94. # as labels are same for each metric
  95. for label in os.listdir(features_path[0]):
  96. label_features_path = []
  97. for path in features_path:
  98. label_path = os.path.join(path, label)
  99. label_features_path.append(label_path)
  100. # getting images list for each metric
  101. features_images_list = []
  102. for index_metric, label_path in enumerate(label_features_path):
  103. if _transformations[index_metric].getName() == 'static':
  104. # by default append nothing..
  105. features_images_list.append([])
  106. else:
  107. images = sorted(os.listdir(label_path))
  108. features_images_list.append(images)
  109. # construct each line using all images path of each
  110. for index_image in range(0, len(features_images_list[0])):
  111. images_path = []
  112. # get information about rotation and flip from first transformation (need to be a not static transformation)
  113. current_post_fix = features_images_list[0][index_image].split(cfg.post_image_name_separator)[-1]
  114. # getting images with same index and hence name for each metric (transformation)
  115. for index_metric in range(0, len(features_path)):
  116. # custom behavior for static transformation (need to check specific image)
  117. if _transformations[index_metric].getName() == 'static':
  118. # add static path with selecting correct data augmented image
  119. image_name = _transformations[index_metric].getParam().split('/')[-1].replace('.png', '')
  120. img_path = os.path.join(features_path[index_metric], image_name + cfg.post_image_name_separator + current_post_fix)
  121. images_path.append(img_path)
  122. else:
  123. img_path = features_images_list[index_metric][index_image]
  124. images_path.append(os.path.join(label_features_path[index_metric], img_path))
  125. if label == cfg.noisy_folder:
  126. line = '1;'
  127. else:
  128. line = '0;'
  129. # compute line information with all images paths
  130. for id_path, img_path in enumerate(images_path):
  131. if id_path < len(images_path) - 1:
  132. line = line + img_path + '::'
  133. else:
  134. line = line + img_path
  135. line = line + '\n'
  136. if id_zone < _nb_zones and folder_scene in _scenes:
  137. train_file_data.append(line)
  138. else:
  139. test_file_data.append(line)
  140. train_file = open(output_train_filename, 'w')
  141. test_file = open(output_test_filename, 'w')
  142. random.shuffle(train_file_data)
  143. random.shuffle(test_file_data)
  144. for line in train_file_data:
  145. train_file.write(line)
  146. for line in test_file_data:
  147. test_file.write(line)
  148. train_file.close()
  149. test_file.close()
  150. def main():
  151. parser = argparse.ArgumentParser(description="Compute specific dataset for model using of metric")
  152. parser.add_argument('--output', type=str, help='output file name desired (.train and .test)')
  153. parser.add_argument('--features', type=str,
  154. help="list of features choice in order to compute data",
  155. default='svd_reconstruction, ipca_reconstruction',
  156. required=True)
  157. parser.add_argument('--params', type=str,
  158. help="list of specific param for each metric choice (See README.md for further information in 3D mode)",
  159. default='100, 200 :: 50, 25',
  160. required=True)
  161. parser.add_argument('--scenes', type=str, help='List of scenes to use for training data')
  162. parser.add_argument('--nb_zones', type=int, help='Number of zones to use for training data set', choices=list(range(1, 17)))
  163. parser.add_argument('--renderer', type=str, help='Renderer choice in order to limit scenes used', choices=cfg.renderer_choices, default='all')
  164. parser.add_argument('--random', type=int, help='Data will be randomly filled or not', choices=[0, 1])
  165. args = parser.parse_args()
  166. p_filename = args.output
  167. p_features = list(map(str.strip, args.features.split(',')))
  168. p_params = list(map(str.strip, args.params.split('::')))
  169. p_scenes = args.scenes.split(',')
  170. p_nb_zones = args.nb_zones
  171. p_renderer = args.renderer
  172. p_random = args.random
  173. # create list of Transformation
  174. transformations = []
  175. for id, feature in enumerate(p_features):
  176. if feature not in features_choices:
  177. raise ValueError("Unknown metric, please select a correct metric : ", features_choices)
  178. transformations.append(Transformation(feature, p_params[id]))
  179. if transformations[0].getName() == 'static':
  180. raise ValueError("The first transformation in list cannot be static")
  181. # list all possibles choices of renderer
  182. scenes_list = dt.get_renderer_scenes_names(p_renderer)
  183. scenes_indices = dt.get_renderer_scenes_indices(p_renderer)
  184. # getting scenes from indexes user selection
  185. scenes_selected = []
  186. for scene_id in p_scenes:
  187. index = scenes_indices.index(scene_id.strip())
  188. scenes_selected.append(scenes_list[index])
  189. # create database using img folder (generate first time only)
  190. generate_data_model(scenes_list, p_filename, transformations, scenes_selected, p_nb_zones, p_random)
  191. if __name__== "__main__":
  192. main()