run.py 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. # main imports
  2. import os
  3. import time
  4. import numpy as np
  5. import pickle
  6. import sys
  7. # django imports
  8. from django.conf import settings
  9. # module imports
  10. from ..utils import api
  11. from ..utils.processing import crop_images
  12. from .. import config as cfg
  13. # expe imports
  14. from .classes.quest_plus import QuestPlus
  15. from .classes.quest_plus import psychometric_fun
  16. # other imports
  17. from ipfml import utils
  18. from pprint import pprint
  19. from PIL import Image, ImageDraw
  20. def example_quest_one_image(request, expe_name, scene_name):
  21. example_number = request.GET.get('example')
  22. # get expected image qualities indices (load noisy and ref image)
  23. params_image = cfg.expes_configuration[expe_name]['text']['examples']['images'][int(example_number)]
  24. qualities = api.get_scene_qualities(scene_name)
  25. noisy_quality = qualities[params_image[0]]
  26. ref_quality = qualities[params_image[1]]
  27. noisy_image = api.get_image(scene_name, noisy_quality)
  28. ref_image = api.get_image(scene_name, ref_quality)
  29. # get crop params from configuration
  30. crop_params = cfg.expes_configuration[expe_name]['text']['examples']['crop_params'][int(example_number)]
  31. img_merge, percentage, orientation, position = crop_images(noisy_image,
  32. ref_image,
  33. per=crop_params[0],
  34. orien=crop_params[1],
  35. swap_img=crop_params[2])
  36. width, height = img_merge.size
  37. if orientation==0:
  38. left, top, right, bottom = percentage*width, 0, percentage*width, height #vertical
  39. else:
  40. left, top, right, bottom = 0, percentage*height, width, percentage*height #horizontal
  41. if int(example_number) % 2 != 0 :
  42. if noisy_quality != qualities[-1]:#-noisy_quality > qualities[-1]-(10*qualities[-1])/100 :
  43. draw = ImageDraw.Draw(img_merge)
  44. draw.line((left, top, right, bottom), fill='black', width=5)
  45. example_sentence = cfg.expes_configuration[expe_name]['text']['examples']['sentence'][int(example_number)]
  46. if orientation == 0:
  47. example_sentence = example_sentence.format('vertically', str(percentage*100))
  48. else:
  49. example_sentence = example_sentence.format('horizontally', str(percentage*100))
  50. # Temporary save of image
  51. tmp_folder = os.path.join(settings.MEDIA_ROOT, cfg.output_tmp_folder)
  52. if not os.path.exists(tmp_folder):
  53. os.makedirs(tmp_folder)
  54. # generate tmp merged image (pass as BytesIO was complicated..)
  55. filepath_img = os.path.join(tmp_folder, 'example_' + scene_name + '' + expe_name + '.png')
  56. # replace img_merge if necessary (new iteration of expe)
  57. if img_merge is not None:
  58. img_merge.save(filepath_img)
  59. data_example = {
  60. 'example_sentence': example_sentence,
  61. 'example': filepath_img
  62. }
  63. return data_example
  64. def run_quest_one_image(request, model_filepath, output_file):
  65. # 1. get session parameters
  66. qualities = request.session.get('qualities')
  67. scene_name = request.session.get('scene')
  68. expe_name = request.session.get('expe')
  69. # by default
  70. iteration = 0
  71. # used to stop when necessary
  72. if 'iteration' in request.GET:
  73. iteration = int(request.GET.get('iteration'))
  74. else:
  75. request.session['expe_started'] = False
  76. # 2. Get expe information if started
  77. # first time only init `quest`
  78. # if experiments is started we can save data
  79. if request.session.get('expe_started'):
  80. # does not change expe parameters
  81. if request.session['expe_data']['expe_previous_iteration'] == iteration:
  82. return None
  83. else:
  84. current_expe_data = request.session['expe_data']
  85. answer = int(request.GET.get('answer'))
  86. expe_answer_time = time.time() - current_expe_data['expe_answer_time']
  87. previous_percentage = current_expe_data['expe_percentage']
  88. previous_orientation = current_expe_data['expe_orientation']
  89. previous_position = current_expe_data['expe_position']
  90. previous_stim = current_expe_data['expe_stim']
  91. print("Answer time is ", expe_answer_time)
  92. # 3. Load or create Quest instance
  93. # default params
  94. # TODO : add specific thresholds information for scene
  95. #thresholds = np.arange(50, 10000, 50)
  96. stim_space = np.asarray(qualities)
  97. slope_range = cfg.expes_configuration[expe_name]['params']['slopes'][scene_name]
  98. slopes = np.arange(slope_range[0], slope_range[1], slope_range[2])
  99. #slopes = np.arange(0.0001, 0.001, 0.00003) # contemporary
  100. #slopes = np.arange(0.0005, 0.01, 0.0003) # bathroom
  101. #slopes = np.arange(1.995,19.95,0.5985)
  102. # TODO : update norm slopes
  103. # stim_space = np.asarray(qualities)
  104. # slopes = np.arange(0.0001, 0.001, 0.00003)
  105. # # normalize stim_space and slopes for this current scene
  106. # stim_space_norm = np.array(utils.normalize_arr_with_range(stim_space, stim_space.min(), stim_space.max()))
  107. # slopes_norm = slopes * (slopes.max() - slopes.min())
  108. # check if necessary to construct `quest` object
  109. if not os.path.exists(model_filepath):
  110. print('Creation of `qp` model')
  111. #print(slopes_norm)
  112. #qp = QuestPlus(stim_space_norm, [stim_space_norm, slopes_norm], function=psychometric_fun)
  113. qp = QuestPlus(stim_space, [stim_space, slopes], function=psychometric_fun)
  114. else:
  115. print('Load `qp` model')
  116. filehandler = open(model_filepath, 'rb')
  117. qp = pickle.load(filehandler)
  118. pprint(qp)
  119. # 4. If expe started update and save experiments information and model
  120. # if experiments is already began
  121. if request.session.get('expe_started'):
  122. # TODO : update norm slopes
  123. #previous_stim_norm = (int(previous_stim) - stim_space.min()) / (stim_space.max() - stim_space.min() + sys.float_info.epsilon)
  124. print(previous_stim)
  125. #print(previous_stim_norm)
  126. qp.update(int(previous_stim), answer)
  127. entropy = qp.get_entropy()
  128. print('chosen entropy', entropy)
  129. line = str(previous_stim)
  130. line += ";" + scene_name
  131. line += ";" + str(previous_percentage)
  132. line += ";" + str(previous_orientation)
  133. line += ";" + str(previous_position)
  134. line += ";" + str(answer)
  135. line += ";" + str(expe_answer_time)
  136. line += ";" + str(entropy)
  137. line += '\n'
  138. output_file.write(line)
  139. output_file.flush()
  140. if entropy < cfg.expes_configuration[expe_name]['params']['entropy']:
  141. request.session['expe_finished'] = True
  142. return None
  143. # 5. Contruct new image and save it
  144. # construct image
  145. if iteration < cfg.expes_configuration[expe_name]['params']['iterations']:
  146. # process `quest`
  147. next_stim = qp.next_contrast()
  148. print(next_stim)
  149. #next_stim_img = int(next_stim*(stim_space.max()-stim_space.min())+stim_space.min())
  150. print('-------------------------------------------------')
  151. print('Iteration', iteration)
  152. print(next_stim)
  153. #print('denorm', next_stim_img)
  154. print('-------------------------------------------------')
  155. #noisy_image = api.get_image(scene_name, next_stim_img)
  156. noisy_image = api.get_image(scene_name, next_stim)
  157. # reconstruct reference image from list stored into session
  158. ref_image = api.get_image(scene_name, 'max')
  159. img_merge, percentage, orientation, position = crop_images(noisy_image, ref_image)
  160. else:
  161. request.session['expe_finished'] = True
  162. return None
  163. # save image using user information
  164. # create output folder for tmp files if necessary
  165. tmp_folder = os.path.join(settings.MEDIA_ROOT, cfg.output_tmp_folder)
  166. if not os.path.exists(tmp_folder):
  167. os.makedirs(tmp_folder)
  168. # generate tmp merged image (pass as BytesIO was complicated..)
  169. filepath_img = os.path.join(tmp_folder, request.session.get('id') + '_' + scene_name + '' + expe_name + '.png')
  170. # replace img_merge if necessary (new iteration of expe)
  171. if img_merge is not None:
  172. img_merge.save(filepath_img)
  173. # save qp model at each iteration
  174. file_pi = open(model_filepath, 'wb')
  175. pickle.dump(qp, file_pi)
  176. # 6. Prepare experiments data for current iteration and data for view
  177. # here you can save whatever you need for you experiments
  178. data_expe = {
  179. 'image_path': filepath_img,
  180. 'expe_percentage': percentage,
  181. 'expe_orientation': orientation,
  182. 'expe_position': position,
  183. 'expe_answer_time': time.time(),
  184. 'expe_previous_iteration': iteration,
  185. 'expe_stim': str(next_stim)
  186. }
  187. # expe is now started
  188. request.session['expe_started'] = True
  189. return data_expe