views.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. # django imports
  2. from django.shortcuts import render
  3. from django.http import HttpResponse
  4. from django.conf import settings
  5. # main imports
  6. import os
  7. import json
  8. import base64
  9. import random
  10. import numpy as np
  11. import datetime
  12. # image processing imports
  13. import io
  14. from PIL import Image
  15. # api imports
  16. from .utils import api
  17. from .utils import functions
  18. from .quest.processing import crop_images
  19. from . import config as cfg
  20. def expe_list(request):
  21. # get all scenes from dataset
  22. scenes = api.get_scenes()
  23. # get list of experiences
  24. expes = cfg.expe_name_list
  25. return render(request, 'expe/expe_list.html', {'scenes': scenes, 'expes': expes})
  26. # Create your views here.
  27. def expe(request):
  28. question_sentence = "Do you see one image or a composition of more than one?"
  29. indication_sentence = "press left if you see one image, right if not"
  30. # get param
  31. expe_name = request.GET.get('expe')
  32. scene_name = request.GET.get('scene')
  33. # first time expe is launched
  34. if 'expe' not in request.session:
  35. request.session['expe'] = expe_name
  36. request.session['begin'] = True
  37. else:
  38. request.session['begin'] = False
  39. # update ref img at first time or expe changed
  40. if 'ref_img' not in request.session or expe_name != request.session['expe']:
  41. request.session['begin'] = True
  42. request.session['qualities'] = api.get_scene_qualities(scene_name)
  43. request.session['id'] = functions.uniqueID()
  44. # TODO : add in cache ref_image
  45. # get reference image
  46. #ref_image = api.get_image(scene_name, 'max')
  47. # save ref image as list (can't save python object)
  48. #request.session['ref_img'] = np.array(ref_image).tolist()
  49. # construct new image
  50. quality = random.choice(request.session.get('qualities'))
  51. noisy_image = api.get_image(scene_name, quality)
  52. # reconstruct reference image from list stored into session
  53. # ref_image = Image.fromarray(np.array(request.session.get('ref_img')))
  54. ref_image = api.get_image(scene_name, 'max')
  55. img_merge, per, orien, swap_img = crop_images(noisy_image, ref_image)
  56. # create output folder for tmp files if necessary
  57. folder = os.path.join(settings.MEDIA_ROOT, cfg.output_tmp_folder)
  58. if not os.path.exists(folder):
  59. os.makedirs(folder)
  60. # generate tmp merged image (pass as BytesIO was complicated..)
  61. # TODO : add crontab task to erase generated img
  62. filepath_img = os.path.join(folder, request.session.get('id') + '_' + scene_name + '' + expe_name + '.png')
  63. img_merge.save(filepath_img)
  64. # create output folder for expe_result
  65. timestamp = datetime.strftime(datetime.utcnow(), "%Y-%m-%d_%Hh%Mm%Ss")
  66. filename += "online_ans" + timestamp +".csv"
  67. f = open(filename,"w")
  68. #orientation : 0 = vertical, 1 = horizontal
  69. #image_ref_position : 0 = right/bottom, 1 = left/up
  70. #answer : left = 1, right = 0
  71. f.write('stimulus' + ";" + "name_stimulus" + ";" + 'cropping_percentage' + ";" + 'orientation' + ';'
  72. + 'image_ref_position' + ';' + 'answer' + ';' + 'time_reaction' + ';' + 'entropy' + '\n')
  73. # expe parameters
  74. data = {
  75. 'expe_name': expe_name,
  76. 'img_merged_path': filepath_img,
  77. 'question': question_sentence,
  78. 'indication': indication_sentence
  79. }
  80. return render(request, 'expe/expe.html', data)