Browse Source

Merge branch 'release/v0.2.2'

Jérôme BUISINE 5 years ago
parent
commit
9b3f715485

+ 1 - 1
DOCUMENTATION.md

@@ -94,7 +94,7 @@ Example of loading or saving Python object (need of pickle):
 ```python
 # check if necessary to construct `quest` object or if backup exists
 if not os.path.exists(model_filepath):
-    qp = QuestPlus(stim_space, [thresholds, slopes], function=psychometric_fun)
+    qp = QuestPlus(stim_space, [stime_space, slopes], function=psychometric_fun)
 else:
     print('Load `qp` model')
     filehandler = open(model_filepath, 'rb') 

+ 16 - 1
expe/config.py

@@ -34,11 +34,26 @@ expes_configuration            = {
             'question': "Do you see one image or a composition of more than one?",
             'indication': "press left if you see one image, right if not",
             'end_text': "Experience is finished. Thanks for your participation",
+            'examples': {
+                'sentence': "This images is cropped {0} with {1}%",
+                'crop_params': [
+                    [0.3, 0, 0],
+                    [0.3, 0, 0],
+                    [0.7, 1, 0]
+                ],
+                'images': [
+                    [2, -1],
+                    [-3, -1],
+                    [7, -1]
+                ]
+            }
         },
         'params':{
             'iterations': 10
         },
-       
+        'slopes':{
+
+        },
         # if others custom session param are directly set for experiments
         'session_params': [
             'expe_data',

+ 45 - 14
expe/expes/run.py

@@ -3,6 +3,7 @@ import os
 import time
 import numpy as np
 import pickle
+import sys
 
 # django imports
 from django.conf import settings
@@ -17,6 +18,10 @@ from .. import config as cfg
 from .classes.quest_plus import QuestPlus
 from .classes.quest_plus import psychometric_fun
 
+# other imports 
+from ipfml import utils
+from pprint import pprint
+
 
 def run_quest_one_image(request, model_filepath, output_file):
 
@@ -56,28 +61,45 @@ def run_quest_one_image(request, model_filepath, output_file):
     # 3. Load or create Quest instance
     # default params
     # TODO : add specific thresholds information for scene
-    thresholds = np.arange(50, 10000, 50)
+    #thresholds = np.arange(50, 10000, 50)
     stim_space = np.asarray(qualities)
-    slopes = np.arange(0.0001, 0.001, 0.00003)
+    #slopes = np.arange(0.0001, 0.001, 0.00003) # contemporary
+    slopes = np.arange(0.0005, 0.01, 0.0003) # bathroom
+
+    # TODO : update norm slopes
+    # stim_space = np.asarray(qualities)
+    # slopes = np.arange(0.0001, 0.001, 0.00003)
+
+    # # normalize stim_space and slopes for this current scene
+    # stim_space_norm = np.array(utils.normalize_arr_with_range(stim_space, stim_space.min(), stim_space.max()))
+    # slopes_norm = slopes * (slopes.max() - slopes.min()) 
 
     # check if necessary to construct `quest` object
     if not os.path.exists(model_filepath):
-        qp = QuestPlus(stim_space, [thresholds, slopes], function=psychometric_fun)
+        print('Creation of `qp` model')
+        #print(slopes_norm)
+        #qp = QuestPlus(stim_space_norm, [stim_space_norm, slopes_norm], function=psychometric_fun)
+        qp = QuestPlus(stim_space, [stim_space, slopes], function=psychometric_fun)
+
     else:
         print('Load `qp` model')
         filehandler = open(model_filepath, 'rb') 
         qp = pickle.load(filehandler)
+        pprint(qp)
     
     # 4. If expe started update and save experiments information and model
     # if experiments is already began
     if request.session.get('expe_started'):
 
-        # TODO : check `i` variable 
-        # update of `quest`
-        # qp.update(qualities[i], answer)
-        # Use of previous stim
-        qp.update(qualities[iteration], answer) 
+        # TODO : update norm slopes
+        #previous_stim_norm = (int(previous_stim) - stim_space.min()) / (stim_space.max() - stim_space.min() + sys.float_info.epsilon)
+
+        print(previous_stim)
+        #print(previous_stim_norm)
+
+        qp.update(int(previous_stim), answer) 
         entropy = qp.get_entropy()
+        print('chosen entropy', entropy)
 
         line = str(previous_stim) 
         line += ";" + scene_name 
@@ -92,20 +114,25 @@ def run_quest_one_image(request, model_filepath, output_file):
         output_file.write(line)
         output_file.flush()
 
-    # save `quest` model
-    file_pi = open(model_filepath, 'wb') 
-    pickle.dump(qp, file_pi)
-
     # 5. Contruct new image and save it
     # construct image 
     if iteration < cfg.expes_configuration[expe_name]['params']['iterations']:
         # process `quest`
+
         next_stim = qp.next_contrast()
-        print("Next quality ", next_stim)
+        print(next_stim)
+        #next_stim_img = int(next_stim*(stim_space.max()-stim_space.min())+stim_space.min())
+    
+        print('-------------------------------------------------')
+        print('Iteration', iteration)
+        print(next_stim)
+        #print('denorm', next_stim_img)
+        print('-------------------------------------------------')
 
-        # construct new image
+        #noisy_image = api.get_image(scene_name, next_stim_img)
         noisy_image = api.get_image(scene_name, next_stim)
 
+
         # reconstruct reference image from list stored into session
         ref_image = api.get_image(scene_name, 'max')
         img_merge, percentage, orientation, position = crop_images(noisy_image, ref_image)
@@ -127,6 +154,10 @@ def run_quest_one_image(request, model_filepath, output_file):
     if img_merge is not None:
         img_merge.save(filepath_img)
 
+    # save qp model at each iteration
+    file_pi = open(model_filepath, 'wb') 
+    pickle.dump(qp, file_pi)
+
     # 6. Prepare experiments data for current iteration and data for view
     
     # here you can save whatever you need for you experiments

+ 1 - 0
expe/templates/base.html

@@ -37,6 +37,7 @@
         if (BASE !== '') baseUrl += BASE + '/'
 
         const expeUrl  = baseUrl + 'expe'  
+        const indicationsUrl  = baseUrl + 'indications'  
 
         // EXPE variables parts
         // get access to django variables

+ 24 - 17
expe/templates/expe/expe_indications.html

@@ -12,25 +12,32 @@
     <br />
     <h5>{{indication}}</h5>
 
-    <br />
-    <br />
-
-    <div class="row">
-        <div class="col-md-4 offset-md-4">
-            <form method="GET" action="/expe">
-                <div class="form-group">
-                    <input type="hidden" name="scene" value="{{scene_name}}"/>
-                    <input type="hidden" name="expe" value="{{expe_name}}"/>
-                    <input type="hidden" name="iteration" value="0"/>
-
-                    <label for="experimentId">Select experiment identifier:</label>
-                    <input type="text" class="form-control" name="experimentId" placeholder="Enter your experiment identifier"/>
-                </div>
-            </form>
+    {% if start %}
+        <br />
+        <br />
+
+        <div class="row">
+            <div class="col-md-4 offset-md-4">
+                <form method="GET" action="/expe">
+                    <div class="form-group">
+                        <input type="hidden" name="scene" value="{{scene_name}}"/>
+                        <input type="hidden" name="expe" value="{{expe_name}}"/>
+                        <input type="hidden" name="iteration" value="0"/>
+
+                        <label for="experimentId">Select experiment identifier:</label>
+                        <input type="text" class="form-control" name="experimentId" placeholder="Enter your experiment identifier"/>
+                    </div>
+                </form>
+            </div>
         </div>
-    </div>
 
-    <p id="expeIndication"><strong>Press enter to begin experiments</strong></p>
+        <p id="expeIndication"><strong>Press enter to begin experiments</strong></p>
+    {% else %}
+        <br/>
+        <h5>{{example_sentence}}</h5>
+        <br/>
+        <img src="{{example}}" />
+    {% endif %}
 
 {% endblock %}
 

+ 2 - 0
expe/templates/expe/expe_list.html

@@ -14,6 +14,8 @@
 
             <form action="/indications" id="expeChoice">
 
+                <input type="hidden" name="example" value="0"/>
+
                 <div class="form-group">
                     <label for="scene">Select scene:</label>
                     <select class="form-control" name="scene">

+ 1 - 0
expe/utils/api.py

@@ -23,6 +23,7 @@ def get_image(scene, img_quality):
     # Make a get request to get information of scene image with quality of 200
     response = requests.get(url)
     # Print the content of the response formatted into JSON
+
     content_json = json.loads(response.content)
     
     # Access to link of image using 'key' (data & link) from json data

+ 1 - 1
expe/utils/processing.py

@@ -11,7 +11,7 @@ def crop_images(img1, img2, per=None, orien=None, swap_img=None):
     crop and gather reference image and a noisy one randomly
     '''
     if per is None:
-        per = random.choice([0.25, 0.5, 0.75])
+        per = random.random() * 0.7 + 0.15
     if orien is None:
         orien = random.choice([0, 1])
     if swap_img is None:

+ 59 - 0
expe/views.py

@@ -58,6 +58,7 @@ def expe_list(request):
 
     # get list of experimentss
     expes = cfg.expe_name_list
+    data = get_base_data()
 
     # by default user restart expe
     request.session['expe_started'] = False
@@ -73,9 +74,14 @@ def expe_list(request):
 
 def indications(request):
 
+    random.seed(10)
+
     # get param 
     expe_name = request.GET.get('expe')
     scene_name = request.GET.get('scene')
+    example_number = request.GET.get('example')
+
+    print(example_number)
 
     # get base data
     data = get_base_data()
@@ -85,6 +91,59 @@ def indications(request):
     data['question']   = cfg.expes_configuration[expe_name]['text']['question']
     data['indication'] = cfg.expes_configuration[expe_name]['text']['indication']
 
+    number_of_examples = len(cfg.expes_configuration[expe_name]['text']['examples']['images'])
+
+    start_experiment = False
+    if (int(example_number) >= number_of_examples):
+        start_experiment = True
+    else:
+        # get expected image qualities indices (load noisy and ref image)
+        params_image = cfg.expes_configuration[expe_name]['text']['examples']['images'][int(example_number)]
+        qualities = api.get_scene_qualities(scene_name)
+
+        noisy_quality = qualities[params_image[0]]
+        ref_quality = qualities[params_image[1]]
+
+        noisy_image = api.get_image(scene_name, noisy_quality)
+        ref_image = api.get_image(scene_name, ref_quality)
+
+        # get crop params from configuration
+        crop_params = cfg.expes_configuration[expe_name]['text']['examples']['crop_params'][int(example_number)]
+
+        img_merge, percentage, orientation, position = crop_images(noisy_image,     
+                                                                    ref_image, 
+                                                                    per=crop_params[0], 
+                                                                    orien=crop_params[1], 
+                                                                    swap_img=crop_params[2])
+
+        example_sentence = cfg.expes_configuration[expe_name]['text']['examples']['sentence']
+
+        if orientation == 0:
+            example_sentence = example_sentence.format('vertically', str(percentage*100))
+        else:
+            example_sentence = example_sentence.format('horizontally', str(percentage*100))
+
+        data['example_sentence'] = example_sentence
+
+
+        # Temporary save of image
+        tmp_folder = os.path.join(settings.MEDIA_ROOT, cfg.output_tmp_folder)
+
+        if not os.path.exists(tmp_folder):
+            os.makedirs(tmp_folder)
+
+        # generate tmp merged image (pass as BytesIO was complicated..)
+        filepath_img = os.path.join(tmp_folder, 'example_' + scene_name + '' + expe_name + '.png')
+        
+        # replace img_merge if necessary (new iteration of expe)
+        if img_merge is not None:
+            img_merge.save(filepath_img)
+
+        print(filepath_img)
+        data['example'] = filepath_img
+
+    data['start'] = start_experiment
+
     return render(request, 'expe/expe_indications.html', data)
 
 

+ 2 - 1
requirements.txt

@@ -5,4 +5,5 @@ questplus
 psychopy
 pandas
 requests
-docker-compose
+docker-compose
+ipfml

+ 20 - 2
static/js/indications.js

@@ -1,11 +1,18 @@
 // Utils informations
 const KEYCODE_Q     = 81
 const KEYCODE_ENTER = 13
+const KEYCODE_LEFT_ARROW  = 37
+const KEYCODE_RIGHT_ARROW = 39
 
 urlParams = new URLSearchParams(window.location.search)
 
-const scene = urlParams.get('scene')
-const expe  = urlParams.get('expe')
+const scene    = urlParams.get('scene')
+const expe     = urlParams.get('expe')
+var example    = urlParams.get('example')
+
+if (example === null || example === ''){
+     example = 0
+}
 
 const checkKey = e => {
      if (e.keyCode === KEYCODE_Q) {
@@ -20,7 +27,18 @@ const checkKey = e => {
           console.log(expeUrl + params)
           window.location = expeUrl + params
      }
+     else if (e.keyCode === KEYCODE_LEFT_ARROW || e.keyCode === KEYCODE_RIGHT_ARROW) {
+          
+          // increment number of example
+          example = parseInt(example) + 1
+
+          console.log("I'm here")
+          // construct url with params for experiments
+          const params = `?scene=${scene}&expe=${expe}&example=${example}`
+          window.location = indicationsUrl + params
+     }
 }
 
 // implement `key` events
 document.addEventListener('keydown', checkKey)
+