generate_data_model_corr_random.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Fri Sep 14 21:02:42 2018
  5. @author: jbuisine
  6. """
  7. from __future__ import print_function
  8. import sys, os, argparse
  9. import numpy as np
  10. import pandas as pd
  11. import random
  12. import time
  13. import json
  14. import subprocess
  15. from PIL import Image
  16. from ipfml import processing, metrics, utils
  17. from modules.utils import config as cfg
  18. from modules.utils import data as dt
  19. # getting configuration information
  20. config_filename = cfg.config_filename
  21. learned_folder = cfg.learned_zones_folder
  22. min_max_filename = cfg.min_max_filename_extension
  23. # define all scenes values
  24. all_scenes_list = cfg.scenes_names
  25. all_scenes_indices = cfg.scenes_indices
  26. renderer_choices = cfg.renderer_choices
  27. normalization_choices = cfg.normalization_choices
  28. path = cfg.dataset_path
  29. zones = cfg.zones_indices
  30. seuil_expe_filename = cfg.seuil_expe_filename
  31. metric_choices = cfg.metric_choices_labels
  32. output_data_folder = cfg.output_data_folder
  33. custom_min_max_folder = cfg.min_max_custom_folder
  34. min_max_ext = cfg.min_max_filename_extension
  35. generic_output_file_svd = '_random.csv'
  36. min_value_interval = sys.maxsize
  37. max_value_interval = 0
  38. def construct_new_line(path_seuil, indices, line, choice, norm):
  39. # increase indices values by one to avoid label
  40. f = lambda x : x + 1
  41. indices = f(indices)
  42. line_data = np.array(line.split(';'))
  43. seuil = line_data[0]
  44. metrics = line_data[indices]
  45. # TODO : check if it's always necessary to do that (loss of information for svd)
  46. if norm:
  47. if choice == 'svdne':
  48. metrics = utils.normalize_arr_with_range(metrics, min_value_interval, max_value_interval)
  49. if choice == 'svdn':
  50. metrics = utils.normalize_arr(metrics)
  51. with open(path_seuil, "r") as seuil_file:
  52. seuil_learned = int(seuil_file.readline().strip())
  53. if seuil_learned > int(seuil):
  54. line = '1'
  55. else:
  56. line = '0'
  57. for idx, val in enumerate(metrics):
  58. line += ';'
  59. line += str(val)
  60. line += '\n'
  61. return line
  62. def get_min_max_value_interval(_scenes_list, _indices, _metric):
  63. global min_value_interval, max_value_interval
  64. # increase indices values by one to avoid label
  65. f = lambda x : x + 1
  66. indices = f(_indices)
  67. scenes = os.listdir(path)
  68. # remove min max file from scenes folder
  69. scenes = [s for s in scenes if min_max_filename not in s]
  70. for id_scene, folder_scene in enumerate(scenes):
  71. # only take care of maxwell scenes
  72. if folder_scene in _scenes_list:
  73. scene_path = os.path.join(path, folder_scene)
  74. zones_folder = []
  75. # create zones list
  76. for index in zones:
  77. index_str = str(index)
  78. if len(index_str) < 2:
  79. index_str = "0" + index_str
  80. zones_folder.append("zone"+index_str)
  81. for id_zone, zone_folder in enumerate(zones_folder):
  82. zone_path = os.path.join(scene_path, zone_folder)
  83. # if custom normalization choices then we use svd values not already normalized
  84. data_filename = _metric + "_svd"+ generic_output_file_svd
  85. data_file_path = os.path.join(zone_path, data_filename)
  86. # getting number of line and read randomly lines
  87. f = open(data_file_path)
  88. lines = f.readlines()
  89. # check if user select current scene and zone to be part of training data set
  90. for line in lines:
  91. line_data = np.array(line.split(';'))
  92. metrics = line_data[[_indices]]
  93. metrics = [float(m) for m in metrics]
  94. min_value = min(metrics)
  95. max_value = max(metrics)
  96. if min_value < min_value_interval:
  97. min_value_interval = min_value
  98. if max_value > max_value_interval:
  99. max_value_interval = max_value
  100. def generate_data_model(_scenes_list, _filename, _interval, _choice, _metric, _scenes, _nb_zones = 4, _percent = 1, _random=0, _step=1, _custom = False):
  101. output_train_filename = _filename + ".train"
  102. output_test_filename = _filename + ".test"
  103. if not '/' in output_train_filename:
  104. raise Exception("Please select filename with directory path to save data. Example : data/dataset")
  105. # create path if not exists
  106. if not os.path.exists(output_data_folder):
  107. os.makedirs(output_data_folder)
  108. train_file_data = []
  109. test_file_data = []
  110. for id_scene, folder_scene in enumerate(_scenes_list):
  111. scene_path = os.path.join(path, folder_scene)
  112. zones_indices = zones
  113. # shuffle list of zones (=> randomly choose zones)
  114. # only in random mode
  115. if _random:
  116. random.shuffle(zones_indices)
  117. # store zones learned
  118. learned_zones_indices = zones_indices[:_nb_zones]
  119. # write into file
  120. folder_learned_path = os.path.join(learned_folder, _filename.split('/')[1])
  121. if not os.path.exists(folder_learned_path):
  122. os.makedirs(folder_learned_path)
  123. file_learned_path = os.path.join(folder_learned_path, folder_scene + '.csv')
  124. with open(file_learned_path, 'w') as f:
  125. for i in learned_zones_indices:
  126. f.write(str(i) + ';')
  127. for id_zone, index_folder in enumerate(zones_indices):
  128. index_str = str(index_folder)
  129. if len(index_str) < 2:
  130. index_str = "0" + index_str
  131. current_zone_folder = "zone" + index_str
  132. zone_path = os.path.join(scene_path, current_zone_folder)
  133. # if custom normalization choices then we use svd values not already normalized
  134. if _custom:
  135. data_filename = _metric + "_svd"+ generic_output_file_svd
  136. else:
  137. data_filename = _metric + "_" + _choice + generic_output_file_svd
  138. data_file_path = os.path.join(zone_path, data_filename)
  139. # getting number of line and read randomly lines
  140. f = open(data_file_path)
  141. lines = f.readlines()
  142. num_lines = len(lines)
  143. # randomly shuffle image
  144. if _random:
  145. random.shuffle(lines)
  146. path_seuil = os.path.join(zone_path, seuil_expe_filename)
  147. counter = 0
  148. # check if user select current scene and zone to be part of training data set
  149. for data in lines:
  150. percent = counter / num_lines
  151. image_index = int(data.split(';')[0])
  152. if image_index % _step == 0:
  153. line = construct_new_line(path_seuil, _interval, data, _choice, _custom)
  154. if id_zone < _nb_zones and folder_scene in _scenes and percent <= _percent:
  155. train_file_data.append(line)
  156. else:
  157. test_file_data.append(line)
  158. counter += 1
  159. f.close()
  160. train_file = open(output_train_filename, 'w')
  161. test_file = open(output_test_filename, 'w')
  162. for line in train_file_data:
  163. train_file.write(line)
  164. for line in test_file_data:
  165. test_file.write(line)
  166. train_file.close()
  167. test_file.close()
  168. def main():
  169. # getting all params
  170. parser = argparse.ArgumentParser(description="Generate data for model using correlation matrix information from data")
  171. parser.add_argument('--output', type=str, help='output file name desired (.train and .test)')
  172. parser.add_argument('--n', type=int, help='Number of features wanted')
  173. parser.add_argument('--highest', type=int, help='Specify if highest or lowest values are wishes', choices=[0, 1])
  174. parser.add_argument('--label', type=int, help='Specify if label correlation is used or not', choices=[0, 1])
  175. parser.add_argument('--kind', type=str, help='Kind of normalization level wished', choices=normalization_choices)
  176. parser.add_argument('--metric', type=str, help='Metric data choice', choices=metric_choices)
  177. parser.add_argument('--scenes', type=str, help='List of scenes to use for training data')
  178. parser.add_argument('--nb_zones', type=int, help='Number of zones to use for training data set')
  179. parser.add_argument('--random', type=int, help='Data will be randomly filled or not', choices=[0, 1])
  180. parser.add_argument('--percent', type=float, help='Percent of data use for train and test dataset (by default 1)')
  181. parser.add_argument('--step', type=int, help='Photo step to keep for build datasets', default=1)
  182. parser.add_argument('--renderer', type=str, help='Renderer choice in order to limit scenes used', choices=renderer_choices, default='all')
  183. parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default=False)
  184. args = parser.parse_args()
  185. p_filename = args.output
  186. p_n = args.n
  187. p_highest = args.highest
  188. p_label = args.label
  189. p_kind = args.kind
  190. p_metric = args.metric
  191. p_scenes = args.scenes.split(',')
  192. p_nb_zones = args.nb_zones
  193. p_random = args.random
  194. p_percent = args.percent
  195. p_step = args.step
  196. p_renderer = args.renderer
  197. p_custom = args.custom
  198. # list all possibles choices of renderer
  199. scenes_list = dt.get_renderer_scenes_names(p_renderer)
  200. scenes_indices = dt.get_renderer_scenes_indices(p_renderer)
  201. # getting scenes from indexes user selection
  202. scenes_selected = []
  203. for scene_id in p_scenes:
  204. index = scenes_indices.index(scene_id.strip())
  205. scenes_selected.append(scenes_list[index])
  206. # Get indices to keep from correlation information
  207. # compute temp data file to get correlation information
  208. temp_filename = 'temp'
  209. temp_filename_path = os.path.join(cfg.output_data_folder, temp_filename)
  210. cmd = ['python', 'generate_data_model_random.py',
  211. '--output', temp_filename_path,
  212. '--interval', '0, 200',
  213. '--kind', p_kind,
  214. '--metric', p_metric,
  215. '--scenes', args.scenes,
  216. '--nb_zones', str(16),
  217. '--random', str(int(p_random)),
  218. '--percent', str(p_percent),
  219. '--step', str(p_step),
  220. '--each', str(1),
  221. '--renderer', p_renderer,
  222. '--custom', temp_filename + min_max_ext]
  223. subprocess.Popen(cmd).wait()
  224. temp_data_file_path = temp_filename_path + '.train'
  225. df = pd.read_csv(temp_data_file_path, sep=';', header=None)
  226. indices = []
  227. # compute correlation matrix from whole data scenes of renderer (using or not label column)
  228. if p_label:
  229. # compute pearson correlation between features and label
  230. corr = df.corr()
  231. features_corr = []
  232. for id_row, row in enumerate(corr):
  233. for id_col, val in enumerate(corr[row]):
  234. if id_col == 0 and id_row != 0:
  235. features_corr.append(abs(val))
  236. else:
  237. df = df.drop(df.columns[[0]], axis=1)
  238. # compute pearson correlation between features using only features
  239. corr = df[1:200].corr()
  240. features_corr = []
  241. for id_row, row in enumerate(corr):
  242. correlation_score = 0
  243. for id_col, val in enumerate(corr[row]):
  244. if id_col != id_row:
  245. correlation_score += abs(val)
  246. features_corr.append(correlation_score)
  247. # find `n` min or max indices to keep
  248. if p_highest:
  249. indices = utils.get_indices_of_highest_values(features_corr, p_n)
  250. else:
  251. indices = utils.get_indices_of_lowest_values(features_corr, p_n)
  252. indices = np.sort(indices)
  253. # save indices found
  254. if not os.path.exists(cfg.correlation_indices_folder):
  255. os.makedirs(cfg.correlation_indices_folder)
  256. indices_file_path = os.path.join(cfg.correlation_indices_folder, p_filename.replace(cfg.output_data_folder + '/', '') + '.csv')
  257. with open(indices_file_path, 'w') as f:
  258. for i in indices:
  259. f.write(str(i) + ';')
  260. # find min max value if necessary to renormalize data from `n` indices found
  261. if p_custom:
  262. get_min_max_value_interval(scenes_list, indices, p_metric)
  263. # write new file to save
  264. if not os.path.exists(custom_min_max_folder):
  265. os.makedirs(custom_min_max_folder)
  266. min_max_folder_path = os.path.join(os.path.dirname(__file__), custom_min_max_folder)
  267. min_max_filename_path = os.path.join(min_max_folder_path, p_custom)
  268. with open(min_max_filename_path, 'w') as f:
  269. f.write(str(min_value_interval) + '\n')
  270. f.write(str(max_value_interval) + '\n')
  271. # create database using img folder (generate first time only)
  272. generate_data_model(scenes_list, p_filename, indices, p_kind, p_metric, scenes_selected, p_nb_zones, p_percent, p_random, p_step, p_custom)
  273. if __name__== "__main__":
  274. main()