|
@@ -1,11 +1,14 @@
|
|
|
# main imports
|
|
|
import numpy as np
|
|
|
import pandas as pd
|
|
|
+import math
|
|
|
+import time
|
|
|
|
|
|
import os, sys, argparse
|
|
|
|
|
|
# image processing imports
|
|
|
import matplotlib.pyplot as plt
|
|
|
+from PIL import Image
|
|
|
|
|
|
# modules imports
|
|
|
sys.path.insert(0, '') # trick to enable import of main folder module
|
|
@@ -17,6 +20,12 @@ from data_attributes import get_image_features
|
|
|
learned_zones_folder = cfg.learned_zones_folder
|
|
|
models_name = cfg.models_names_list
|
|
|
|
|
|
+# utils information
|
|
|
+zone_width, zone_height = (200, 200)
|
|
|
+scene_width, scene_height = (800, 800)
|
|
|
+nb_x_parts = math.floor(scene_width / zone_width)
|
|
|
+
|
|
|
+
|
|
|
def reconstruct_image(folder_path, model_name, p_limit):
|
|
|
"""
|
|
|
@brief Method used to display simulation given .csv files
|
|
@@ -34,18 +43,113 @@ def reconstruct_image(folder_path, model_name, p_limit):
|
|
|
|
|
|
scene_names = [f.split('_')[3] for f in data_files]
|
|
|
|
|
|
+ # compute zone start index
|
|
|
+ zones_coordinates = []
|
|
|
+ for index, zone_index in enumerate(cfg.zones_indices):
|
|
|
+ x_zone = (zone_index % nb_x_parts) * zone_width
|
|
|
+ y_zone = (math.floor(zone_index / nb_x_parts)) * zone_height
|
|
|
+
|
|
|
+ zones_coordinates.append((x_zone, y_zone))
|
|
|
+
|
|
|
+ print(zones_coordinates)
|
|
|
+
|
|
|
for id, f in enumerate(data_files):
|
|
|
|
|
|
- print(scene_names[id])
|
|
|
+ scene_name = scene_names[id]
|
|
|
path_file = os.path.join(folder_path, f)
|
|
|
|
|
|
# TODO : check if necessary to keep information about zone learned when displaying data
|
|
|
- scenes_zones_used_file_path = os.path.join(learned_zones_folder_path, scene_names[id] + '.csv')
|
|
|
+ scenes_zones_used_file_path = os.path.join(learned_zones_folder_path, scene_name + '.csv')
|
|
|
+
|
|
|
+ zones_used = []
|
|
|
+
|
|
|
+ if os.path.exists(scenes_zones_used_file_path):
|
|
|
+ with open(scenes_zones_used_file_path, 'r') as f:
|
|
|
+ zones_used = [int(x) for x in f.readline().split(';') if x != '']
|
|
|
+
|
|
|
+ # 1. find estimated threshold for each zone scene using `data_files` and p_limit
|
|
|
+ model_thresholds = []
|
|
|
+ df = pd.read_csv(path_file, header=None, sep=";")
|
|
|
+
|
|
|
+ for index, row in df.iterrows():
|
|
|
+
|
|
|
+ row = np.asarray(row)
|
|
|
+
|
|
|
+ threshold = row[2]
|
|
|
+ start_index = row[3]
|
|
|
+ step_value = row[4]
|
|
|
+ rendering_predictions = row[5:]
|
|
|
+
|
|
|
+ nb_generated_image = 0
|
|
|
+ nb_not_noisy_prediction = 0
|
|
|
+
|
|
|
+ for prediction in rendering_predictions:
|
|
|
+
|
|
|
+ if int(prediction) == 0:
|
|
|
+ nb_not_noisy_prediction += 1
|
|
|
+ else:
|
|
|
+ nb_not_noisy_prediction = 0
|
|
|
+
|
|
|
+ # exit loop if limit is targeted
|
|
|
+ if nb_not_noisy_prediction >= p_limit:
|
|
|
+ break
|
|
|
+
|
|
|
+ nb_generated_image += 1
|
|
|
+
|
|
|
+ current_threshold = start_index + step_value * nb_generated_image
|
|
|
+ model_thresholds.append(current_threshold)
|
|
|
+
|
|
|
+ # 2. find images for each zone which are attached to this estimated threshold by the model
|
|
|
+
|
|
|
+ zone_images_index = []
|
|
|
+
|
|
|
+ for est_threshold in model_thresholds:
|
|
|
+
|
|
|
+ str_index = str(est_threshold)
|
|
|
+ while len(str_index) < 5:
|
|
|
+ str_index = "0" + str_index
|
|
|
+
|
|
|
+ zone_images_index.append(str_index)
|
|
|
+
|
|
|
+ scene_folder = os.path.join(cfg.dataset_path, scene_name)
|
|
|
+
|
|
|
+ scenes_images = [img for img in os.listdir(scene_folder) if cfg.scene_image_extension in img]
|
|
|
+ scenes_images = sorted(scenes_images)
|
|
|
+
|
|
|
+ images_zones = []
|
|
|
+ line_images_zones = []
|
|
|
+ # get image using threshold by zone
|
|
|
+ for id, zone_index in enumerate(zone_images_index):
|
|
|
+ filtered_images = [img for img in scenes_images if zone_index in img]
|
|
|
+
|
|
|
+ if len(filtered_images) > 0:
|
|
|
+ image_name = filtered_images[0]
|
|
|
+ else:
|
|
|
+ image_name = scenes_images[-1]
|
|
|
+
|
|
|
+ #print(image_name)
|
|
|
+ image_path = os.path.join(scene_folder, image_name)
|
|
|
+ selected_image = Image.open(image_path)
|
|
|
+
|
|
|
+ x_zone, y_zone = zones_coordinates[id]
|
|
|
+ zone_image = np.array(selected_image)[y_zone:y_zone+zone_height, x_zone:x_zone+zone_width]
|
|
|
+ line_images_zones.append(zone_image)
|
|
|
+
|
|
|
+ if int(id + 1) % int(scene_width / zone_width) == 0:
|
|
|
+ images_zones.append(np.concatenate(line_images_zones, axis=1))
|
|
|
+ print(len(line_images_zones))
|
|
|
+ line_images_zones = []
|
|
|
+
|
|
|
+
|
|
|
+ # 3. reconstructed the image using these zones
|
|
|
+ reconstructed_image = np.concatenate(images_zones, axis=0)
|
|
|
+
|
|
|
+ # 4. Save the image with generated name based on scene, model and `p_limit`
|
|
|
+ reconstructed_pil_img = Image.fromarray(reconstructed_image)
|
|
|
+
|
|
|
+ output_path = os.path.join(folder_path, scene_names[id] + '_reconstruction_limit_' + str(p_limit) + '.png')
|
|
|
|
|
|
- # TODO : find estimated threshold for each zone scene using `data_files` and p_limit
|
|
|
- # TODO : find images for each zone which are attached to this estimated threshold by the model
|
|
|
- # TODO : reconstructed the image using these zones
|
|
|
- # TODO : Save the image with generated name based on scene, model and `p_limit`
|
|
|
+ reconstructed_pil_img.save(output_path)
|
|
|
|
|
|
|
|
|
def main():
|
|
@@ -60,7 +164,6 @@ def main():
|
|
|
|
|
|
p_folder = args.folder
|
|
|
p_limit = args.limit
|
|
|
- p_output = args.output
|
|
|
|
|
|
if args.model:
|
|
|
p_model = args.model
|