Parcourir la source

Add example function

Vasiliki il y a 4 ans
Parent
commit
f7fb1081f2
3 fichiers modifiés avec 163 ajouts et 51 suppressions
  1. 35 7
      expe/config.py
  2. 63 0
      expe/expes/run.py
  3. 65 44
      expe/views.py

+ 35 - 7
expe/config.py

@@ -36,18 +36,46 @@ expes_configuration            = {
             'indication': "press RIGHT if you see 2 images, LEFT if not",
             'end_text': "Experience is finished. Thanks for your participation",
             'examples': {
-                'sentence': ["First example of 2 images:\n This image is cropped {0}.\n {1}% on the left originating from a low-quality image and on the right originating from high quality. \n So, press RIGHT.", 
-                             "Second example of 1 image: \n This image is cropped {0}. \n {1}% on the left originating from a high-quality image and on the right originating from high quality, too.\n So, press LEFT", 
-                             "Third example of 2 images: \n This image is cropped {0}. \n {1}% on the upper part originating from a low-quality image and on the bottom originating from high quality. \n So, press RIGHT."],
+                'sentence': ["1st example : ", 
+                             "The answer is 2 images! \n This image is cropped {0}.\n {1}% on the left originating from a low-quality image and on the right originating from high quality. \n So, press RIGHT.",
+                             "2nd example : ",
+                             "The answer is 1 image! \n This image is cropped {0} but \n {1}% on the left originating from a high-quality image and on the right originating from high quality, too.\n So, press LEFT.", 
+                             "3rd example: ",
+                             "The answer is 2 images! \n This image is cropped {0}. \n {1}% on the upper part originating from a low-quality image and on the bottom originating from high quality. \n So, press RIGHT.",
+                             "4th example: ",
+                             "The answer is 2 images! \n This image is cropped {0}. \n {1}% on the upper part originating from a low-quality image and on the bottom originating from high quality. \n So, press RIGHT.",
+                             "5th example: ",
+                             "The answer is 1 image! \n This image is cropped {0}. \n {1}% on the upper part originating from a high-quality image and on the bottom originating from a low-quality quality.\n So, press LEFT.",
+                             "6th example: ",
+                             "The answer is 2 images! \n This image is cropped {0}. \n {1}% on the left originating from a low-quality image and on the right originating from high quality. \n So, press RIGHT.",
+                             ],
                 'crop_params': [
                     [0.3, 0, 0],
                     [0.3, 0, 0],
-                    [0.7, 1, 0]
+                    [0.3, 0, 0],
+                    [0.3, 0, 0],
+                    [0.7, 1, 0],
+                    [0.7, 1, 0],
+                    [0.2, 1, 0],
+                    [0.2, 1, 0],
+                    [0.8, 1, 1],
+                    [0.8, 1, 1],
+                    [0.2, 0, 0],
+                    [0.2, 0, 0]
                 ],
                 'images': [
-                    [2, -1],
-                    [-3, -1],
-                    [7, -1]
+                    [1, -1],
+                    [1, -1],
+                    [-1, -1],
+                    [-1, -1],
+                    [7, -1],
+                    [7, -1],
+                    [15, -1],
+                    [15, -1],
+                    [20, -1],
+                    [20, -1],
+                    [30, -1],
+                    [30, -1]
                 ]
             }
         },

+ 63 - 0
expe/expes/run.py

@@ -22,6 +22,69 @@ from .classes.quest_plus import psychometric_fun
 from ipfml import utils
 from pprint import pprint
 
+from PIL import Image, ImageDraw
+
+def example_quest_one_image(request, expe_name, scene_name):
+    
+    example_number = request.GET.get('example')
+
+    
+    # get expected image qualities indices (load noisy and ref image)
+    params_image = cfg.expes_configuration[expe_name]['text']['examples']['images'][int(example_number)]
+    qualities = api.get_scene_qualities(scene_name)
+
+    noisy_quality = qualities[params_image[0]]
+    ref_quality = qualities[params_image[1]]
+
+    noisy_image = api.get_image(scene_name, noisy_quality)
+    ref_image = api.get_image(scene_name, ref_quality)
+
+    # get crop params from configuration
+    crop_params = cfg.expes_configuration[expe_name]['text']['examples']['crop_params'][int(example_number)]
+
+    img_merge, percentage, orientation, position = crop_images(noisy_image,     
+                                                                ref_image, 
+                                                                per=crop_params[0], 
+                                                                orien=crop_params[1], 
+                                                                swap_img=crop_params[2])
+    width, height = img_merge.size
+    if orientation==0:
+        left, top, right, bottom = percentage*width, 0, percentage*width, height   #vertical
+    else:
+        left, top, right, bottom = 0, percentage*height, width, percentage*height   #horizontal
+    if  int(example_number) % 2 != 0 :
+        if noisy_quality != qualities[-1]:#-noisy_quality > qualities[-1]-(10*qualities[-1])/100 :
+            draw = ImageDraw.Draw(img_merge) 
+            draw.line((left, top, right, bottom), fill='black', width=5)
+    example_sentence = cfg.expes_configuration[expe_name]['text']['examples']['sentence'][int(example_number)]
+
+    if orientation == 0:
+        example_sentence = example_sentence.format('vertically', str(percentage*100))
+    else:
+        example_sentence = example_sentence.format('horizontally', str(percentage*100))
+    
+    
+    # Temporary save of image
+    tmp_folder = os.path.join(settings.MEDIA_ROOT, cfg.output_tmp_folder)
+
+    if not os.path.exists(tmp_folder):
+        os.makedirs(tmp_folder)
+
+    # generate tmp merged image (pass as BytesIO was complicated..)
+    filepath_img = os.path.join(tmp_folder, 'example_' + scene_name + '' + expe_name + '.png')
+    
+    # replace img_merge if necessary (new iteration of expe)
+    if img_merge is not None:
+        img_merge.save(filepath_img)
+
+    data_example = {
+        'example_sentence': example_sentence,
+        'example': filepath_img
+    }
+
+    return data_example
+
+    
 
 def run_quest_one_image(request, model_filepath, output_file):
 

+ 65 - 44
expe/views.py

@@ -26,7 +26,7 @@ from .expes import run as run_expe
 
 # image processing imports
 import io
-from PIL import Image
+from PIL import Image, ImageDraw
 
 # module imports
 from .utils import api
@@ -86,6 +86,8 @@ def indications(request):
     # get param 
     expe_name = request.GET.get('expe')
     
+
+    
     scene_name = None
     if 'scene' in request.GET:
         scene_name = request.GET.get('scene')
@@ -111,50 +113,69 @@ def indications(request):
     if (int(example_number) >= number_of_examples):
         start_experiment = True
     else:
+         # run expe method using `expe_name`
+        function_name = 'example_' + expe_name
+    
+        try:
+            run_example_method = getattr(run_expe, function_name)
+        except AttributeError:
+            raise NotImplementedError("Run expe method `{}` not implement `{}`".format(run_expe.__name__, function_name))
+    
+        data_example = run_example_method(request, expe_name, scene_name)
+        data.update(data_example)
+         
         # get expected image qualities indices (load noisy and ref image)
-        params_image = cfg.expes_configuration[expe_name]['text']['examples']['images'][int(example_number)]
-        qualities = api.get_scene_qualities(scene_name)
-
-        noisy_quality = qualities[params_image[0]]
-        ref_quality = qualities[params_image[1]]
-
-        noisy_image = api.get_image(scene_name, noisy_quality)
-        ref_image = api.get_image(scene_name, ref_quality)
-
-        # get crop params from configuration
-        crop_params = cfg.expes_configuration[expe_name]['text']['examples']['crop_params'][int(example_number)]
-
-        img_merge, percentage, orientation, position = crop_images(noisy_image,     
-                                                                    ref_image, 
-                                                                    per=crop_params[0], 
-                                                                    orien=crop_params[1], 
-                                                                    swap_img=crop_params[2])
-
-        example_sentence = cfg.expes_configuration[expe_name]['text']['examples']['sentence'][int(example_number)]
-
-        if orientation == 0:
-            example_sentence = example_sentence.format('vertically', str(percentage*100))
-        else:
-            example_sentence = example_sentence.format('horizontally', str(percentage*100))
-
-        data['example_sentence'] = example_sentence
-
-
-        # Temporary save of image
-        tmp_folder = os.path.join(settings.MEDIA_ROOT, cfg.output_tmp_folder)
-
-        if not os.path.exists(tmp_folder):
-            os.makedirs(tmp_folder)
-
-        # generate tmp merged image (pass as BytesIO was complicated..)
-        filepath_img = os.path.join(tmp_folder, 'example_' + scene_name + '' + expe_name + '.png')
-        
-        # replace img_merge if necessary (new iteration of expe)
-        if img_merge is not None:
-            img_merge.save(filepath_img)
-
-        print(filepath_img)
-        data['example'] = filepath_img
+#        params_image = cfg.expes_configuration[expe_name]['text']['examples']['images'][int(example_number)]
+#        qualities = api.get_scene_qualities(scene_name)
+#
+#        noisy_quality = qualities[params_image[0]]
+#        ref_quality = qualities[params_image[1]]
+#
+#        noisy_image = api.get_image(scene_name, noisy_quality)
+#        ref_image = api.get_image(scene_name, ref_quality)
+#
+#        # get crop params from configuration
+#        crop_params = cfg.expes_configuration[expe_name]['text']['examples']['crop_params'][int(example_number)]
+#
+#        img_merge, percentage, orientation, position = crop_images(noisy_image,     
+#                                                                    ref_image, 
+#                                                                    per=crop_params[0], 
+#                                                                    orien=crop_params[1], 
+#                                                                    swap_img=crop_params[2])
+#        width, height = img_merge.size
+#        if orientation==0:
+#            left, top, right, bottom = percentage*width, 0, percentage*width, height   #vertical
+#        else:
+#            left, top, right, bottom = 0, percentage*height, width, percentage*height   #horizontal
+#        if  int(example_number) % 2 != 0 :
+#            if noisy_quality != qualities[-1]:#-noisy_quality > qualities[-1]-(10*qualities[-1])/100 :
+#                draw = ImageDraw.Draw(img_merge) 
+#                draw.line((left, top, right, bottom), fill='black', width=5)
+#        example_sentence = cfg.expes_configuration[expe_name]['text']['examples']['sentence'][int(example_number)]
+#
+#        if orientation == 0:
+#            example_sentence = example_sentence.format('vertically', str(percentage*100))
+#        else:
+#            example_sentence = example_sentence.format('horizontally', str(percentage*100))
+#
+#        data['example_sentence'] = example_sentence
+#
+#
+#        # Temporary save of image
+#        tmp_folder = os.path.join(settings.MEDIA_ROOT, cfg.output_tmp_folder)
+#
+#        if not os.path.exists(tmp_folder):
+#            os.makedirs(tmp_folder)
+#
+#        # generate tmp merged image (pass as BytesIO was complicated..)
+#        filepath_img = os.path.join(tmp_folder, 'example_' + scene_name + '' + expe_name + '.png')
+#        
+#        # replace img_merge if necessary (new iteration of expe)
+#        if img_merge is not None:
+#            img_merge.save(filepath_img)
+#
+#        print(filepath_img)
+#        data['example'] = filepath_img
 
     data['start'] = start_experiment