estimate_thresholds.py 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169
  1. # main imports
  2. import numpy as np
  3. import pandas as pd
  4. import sys, os, argparse
  5. # image processing
  6. from PIL import Image
  7. from ipfml import utils
  8. from ipfml.processing import transform, segmentation
  9. import matplotlib.pyplot as plt
  10. # model imports
  11. import joblib
  12. # modules and config imports
  13. sys.path.insert(0, '') # trick to enable import of main folder module
  14. import custom_config as cfg
  15. from modules.utils import data as dt
  16. from data_attributes import get_image_features
  17. zones_indices = cfg.zones_indices
  18. def write_progress(progress):
  19. barWidth = 180
  20. output_str = "["
  21. pos = barWidth * progress
  22. for i in range(barWidth):
  23. if i < pos:
  24. output_str = output_str + "="
  25. elif i == pos:
  26. output_str = output_str + ">"
  27. else:
  28. output_str = output_str + " "
  29. output_str = output_str + "] " + str(int(progress * 100.0)) + " %\r"
  30. print(output_str)
  31. sys.stdout.write("\033[F")
  32. def main():
  33. parser = argparse.ArgumentParser(description="Read and compute model on scene in order to make predictions")
  34. parser.add_argument('--folder', type=str, help='folder where scene data are stored', required=True)
  35. parser.add_argument('--model', type=str, help='model file', required=True)
  36. parser.add_argument('--solution', type=str, help='Data of solution to specify filters to use', required=True)
  37. parser.add_argument('--method', type=str, help='method name to used', choices=cfg.features_choices_labels, default=cfg.features_choices_labels[0], required=True)
  38. parser.add_argument('--kind', type=str, help='Kind of normalization level wished', choices=cfg.normalization_choices, required=True)
  39. parser.add_argument('--n_stop', type=int, help='n consecutive prediction to stop', default=1)
  40. parser.add_argument('--custom', type=str, help='Name of custom min max file if use of renormalization of data', default='')
  41. parser.add_argument('--save', type=str, help='filename where to save input data', required=True)
  42. parser.add_argument('--label', type=str, help='label to use when saving thresholds', required=True)
  43. args = parser.parse_args()
  44. p_model = args.model
  45. p_solution = list(map(int, args.solution.split(' ')))
  46. p_method = args.method
  47. p_n_stop = args.n_stop
  48. p_folder = args.folder
  49. p_mode = args.kind
  50. p_custom = args.custom
  51. p_save = args.save
  52. p_label = args.label
  53. if len(p_custom) > 0:
  54. # need to read min_max_file
  55. with open(p_custom, 'r') as f:
  56. min_val = float(f.readline().replace('\n', ''))
  57. max_val = float(f.readline().replace('\n', ''))
  58. # 1. get scene name
  59. scene_path = p_folder
  60. # 2. load model and compile it
  61. # TODO : check kind of model
  62. model = joblib.load(p_model)
  63. # model.compile(loss='binary_crossentropy',
  64. # optimizer='rmsprop',
  65. # metrics=['accuracy'])
  66. # 3. get indices kept by solution
  67. # get indices of attributes data to use (attributes selection from solution)
  68. indices = []
  69. for index, value in enumerate(p_solution):
  70. if value == 1:
  71. indices.append(index)
  72. # 4. prepare scene to predict
  73. estimated_thresholds = []
  74. n_estimated_thresholds = []
  75. zones_list = np.arange(16)
  76. # 4. get estimated thresholds using model and specific method
  77. images_path = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
  78. number_of_images = len(images_path)
  79. image_indices = [ dt.get_scene_image_quality(img_path) for img_path in images_path ]
  80. image_counter = 0
  81. # append empty list
  82. for _ in zones_list:
  83. estimated_thresholds.append(None)
  84. n_estimated_thresholds.append(0)
  85. for img_i, img_path in enumerate(images_path):
  86. blocks = segmentation.divide_in_blocks(Image.open(img_path), (200, 200))
  87. for index, block in enumerate(blocks):
  88. if estimated_thresholds[index] is None:
  89. # check if prediction is possible
  90. data = np.array(get_image_features(p_method, np.array(block)))
  91. if p_mode == 'svdn':
  92. data = utils.normalize_arr_with_range(data)
  93. if p_mode == 'svdne':
  94. data = utils.normalize_arr_with_range(data, min_val, max_val)
  95. data = np.array(data)[indices]
  96. #data = np.expand_dims(data, axis=0)
  97. #print(data.shape)
  98. prob = model.predict(np.array(data).reshape(1, -1))[0]
  99. #print(index, ':', image_indices[img_i], '=>', prob)
  100. if prob < 0.5:
  101. n_estimated_thresholds[index] += 1
  102. # if same number of detection is attempted
  103. if n_estimated_thresholds[index] >= p_n_stop:
  104. estimated_thresholds[index] = image_indices[img_i]
  105. else:
  106. n_estimated_thresholds[index] = 0
  107. # write progress bar
  108. write_progress((image_counter + 1) / number_of_images)
  109. image_counter = image_counter + 1
  110. # default label
  111. for i, _ in enumerate(zones_list):
  112. if estimated_thresholds[i] == None:
  113. estimated_thresholds[i] = image_indices[-1]
  114. # 6. save estimated thresholds into specific file
  115. print(estimated_thresholds)
  116. print(p_save)
  117. if p_save is not None:
  118. with open(p_save, 'a') as f:
  119. f.write(p_label + ';')
  120. for t in estimated_thresholds:
  121. f.write(str(t) + ';')
  122. f.write('\n')
  123. if __name__== "__main__":
  124. main()