Parcourir la source

Add of first expe template

Jérôme BUISINE il y a 4 ans
Parent
commit
00e235635c

+ 4 - 1
ThesisWebExpeDjango/settings.py

@@ -122,4 +122,7 @@ STATIC_URL = '/static/'
 
 STATICFILES_DIRS = (
     os.path.join(BASE_DIR, 'static'),
-)
+)
+
+MEDIA_ROOT = "media/"
+MEDIA_URL = "media/"

+ 12 - 0
expe/config.py

@@ -0,0 +1,12 @@
+# api variables
+DIRAN_DOMAIN_NAME           = "https://diran.univ-littoral.fr/"
+GET_SCENE_QUALITIES_API_URL = DIRAN_DOMAIN_NAME + "api/listSceneQualities?sceneName={0}"
+GET_SCENE_IMAGE_API_URL     = DIRAN_DOMAIN_NAME + "api/getImage?sceneName={0}&imageQuality={1}"
+GET_SCENES_API_URL          = DIRAN_DOMAIN_NAME + "api/listScenes"
+
+# folder variables
+output_expe_folder          = "expes_results/{0}/"
+output_tmp_folder           = "tmp"
+
+# expes list
+expe_name_list              = ["quest_same_image"]

+ 0 - 0
expe/quest/__init__.py


+ 146 - 0
expe/quest/expe.py

@@ -0,0 +1,146 @@
+# main imports 
+import numpy as np
+import os
+import time
+from datetime import datetime
+import re
+
+# image processing imports
+from .processing import crop_images
+
+# expe imports
+from .quest_plus import QuestPlus
+
+# load `config` variables
+from .. import config as cfg
+
+# PARAMETERS of the psychometric function
+chance_level = 0 #e.g. chance_level should be 0.5 for 2AFC (Two-alternative forced choice) procedure
+threshold_prob = 1.-(1.-chance_level)/2.0 #the probability level at the threshold
+
+# quest_plus.py comes also with psychometric.py wich includes the definition of the weibull and weibull_db function
+# here I define the logistic function using the same template that works with the quest_plus implementation
+def logistic(x, params, corr_at_thresh=threshold_prob, chance_level=chance_level):
+        # unpack params
+        if len(params) == 3:
+            THRESHOLD, SLOPE, lapse = params
+        else:
+            THRESHOLD, SLOPE = params
+            lapse = 0.
+
+        b = 4 * SLOPE
+        a = -b * THRESHOLD
+
+        return chance_level + (1 - lapse - chance_level) / (1 + np.exp(-(a + b*x)))
+    
+
+# that's a wrapper function to specify wich  psychometric function one we want to use for the QUEST procedure
+def psychometric_fun( x , params ):
+    return logistic(x , params ,  corr_at_thresh=threshold_prob, chance_level=chance_level )
+
+
+
+#create results directory if not exist
+if not os.path.exists(cfg.output_expe_folder):
+    os.makedirs(cfg.output_expe_folder)
+  
+timestamp = datetime.strftime(datetime.utcnow(), "%Y-%m-%d_%Hh%Mm%Ss")
+filename += "online_ans" + timestamp +".csv"
+f = open(filename,"w")
+
+#orientation : 0 = vertical, 1 = horizontal
+#image_ref_position : 0 = right/bottom, 1 = left/up
+#answer : left = 1, right = 0
+f.write('stimulus' + ";" + "name_stimulus" + ";" + 'cropping_percentage' + ";" + 'orientation' + ';' 
+         + 'image_ref_position' + ';' + 'answer' + ';' + 'time_reaction' + ';' + 'entropy' + '\n')
+#k=np.linspace(50,20000,50)
+#k=k.astype(int)
+dd_sam=[]
+
+for i in range(len(files)):
+    ff = [int(s) for s in re.findall(r'\d+', files[i])]
+    dd_sam.append(ff[0])
+
+
+dd_sam=np.array(dd_sam)
+    
+    
+thresholds = np.arange(50, 10000, 50)
+stim_space=np.asarray(dd_sam)
+slopes = np.arange(0.0001, 0.001, 0.00003)
+
+#mywin = visual.Window([800,600], monitor="testMonitor", screen = 1, units="deg",fullscr=True)
+qp = QuestPlus(stim_space, [thresholds, slopes], function=psychometric_fun)
+answerTime=[]
+r=[]
+
+dataset = "contemporary"
+
+#image_ref = Image.open(data_folder + files[-1])
+image_ref = get_image(diran_domain_name, dataset, 10000)
+for i in range(5):
+    next_stim = qp.next_contrast()
+    print(next_stim)
+    #next_idx = np.where(dd_sam==next_stim)
+    #print(files[next_idx[0][0]])
+    #image_path = data_folder + files[next_idx[0][0]]
+    #current_image = Image.open(image_path)
+    current_image= get_image(diran_domain_name, dataset, next_stim)
+    crop_image, percentage, orientation, position = crop_images(image_ref, current_image)
+    img = visual.ImageStim(win=mywin, image=crop_image)
+    img.draw()
+    mywin.flip()
+    start = time.time()
+    key_answer = event.waitKeys(keyList=["left","right"])
+    end = time.time()
+    answerTime.append(end-start)
+    if key_answer == ['left']:
+        answer = 1 #one image
+    else: 
+        answer = 0  #two images
+    r.append(answer)
+    qp.update(dd_sam[i], answer) 
+    
+    entropy = qp.get_entropy()
+
+    print(entropy)
+    
+    f.write(str(next_stim) + ";" + dataset + ";" + str(percentage) + ";" + str(orientation) + ";" 
+            + str(position) + ";" + str(answer) + ";" + str(answerTime[-1]) + ";" + str(entropy) +'\n')
+    f.flush()
+
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    

+ 50 - 0
expe/quest/processing.py

@@ -0,0 +1,50 @@
+
+from PIL import Image
+import os
+import numpy as np
+import random
+
+def crop_images(img1, img2, per=None, orien=None, swap_img=None):
+    '''
+    crop and gather reference image and a noisy one randomly
+    '''
+    if per is None:
+        per = random.choice([0.25, 0.5, 0.75])
+    if orien is None:
+        orien = random.choice([0, 1])
+    if swap_img is None:
+        swap_img = random.choice([0, 1])
+    
+    if swap_img:
+        tmp_img = img1
+        img1 = img2
+        img2 = tmp_img
+        
+    img_merge = None
+
+    #vertical
+    if orien==0:
+        width, height = img1.size
+        left, top, right, bottom = 0, 0, per*width, height
+        cropped1 = img1.crop( ( left, top, right, bottom ) )  
+        
+        left, top, right, bottom = per*width, 0, width, height
+        cropped2 = img2.crop( ( left, top, right, bottom ) ) 
+
+        crop1 =np.asarray(cropped1)
+        crop2 = np.asarray(cropped2)
+        img_merge = np.hstack((crop1,crop2))
+        img_merge = Image.fromarray( img_merge)
+    else:
+        #horizontal
+        width, height = img1.size
+        left, top, right, bottom = 0, 0, width, per*height
+        cropped1 = img1.crop( ( left, top, right, bottom ) )  
+        left, top, right, bottom = 0, per*height, width, height
+        cropped2 = img2.crop( ( left, top, right, bottom ) ) 
+        crop1 =np.asarray(cropped1)
+        crop2 = np.asarray(cropped2)
+        img_merge = np.vstack((crop1,crop2))
+        img_merge = Image.fromarray( img_merge)
+    
+    return img_merge, per, orien, swap_img

+ 138 - 0
expe/quest/quest_plus.py

@@ -0,0 +1,138 @@
+from copy import deepcopy
+from itertools import product
+
+import numpy as np
+import pandas as pd
+
+# TODO : Currently `weibull` is not used as default function
+# from psychometric import weibull
+
+def reformat_params(params):
+    '''Unroll multiple lists into array of their products.'''
+    if isinstance(params, list):
+        n_params = len(params)
+        params = np.array(list(product(*params)))
+    elif isinstance(params, np.ndarray):
+        assert params.ndim == 1
+        params = params[:, np.newaxis]
+    return params
+
+
+# TODO:
+# - [ ] highlight lowest point in entropy in plot
+class QuestPlus(object):
+    def __init__(self, stim, params, function):
+        self.function = function
+        self.stim_domain = stim
+        self.param_domain = reformat_params(params)
+
+        self._orig_params = deepcopy(params)
+        self._orig_param_shape = (list(map(len, params)) if
+                                  isinstance(params, list) else len(params))
+        self._orig_stim_shape = (list(map(len, params)) if
+                                 isinstance(params, list) else len(params))
+
+        n_stim, n_param = self.stim_domain.shape[0], self.param_domain.shape[0]
+
+        # setup likelihoods for all combinations
+        # of stimulus and model parameter domains
+        self.likelihoods = np.zeros((n_stim, n_param, 2))
+        for p in range(n_param):
+            self.likelihoods[:, p, 0] = self.function(self.stim_domain,
+                                                      self.param_domain[p, :])
+
+        # assumes (correct, incorrect) responses
+        self.likelihoods[:, :, 1] = 1. - self.likelihoods[:, :, 0]
+
+        # we also assume a flat prior (so we init posterior to flat too)
+        self.posterior = np.ones(n_param)
+        self.posterior /= self.posterior.sum()
+
+        self.stim_history = list()
+        self.resp_history = list()
+        self.entropy = np.ones(n_stim)
+
+    def update(self, contrast, ifcorrect, approximate=False):
+        '''Update posterior probability with outcome of current trial.
+
+        contrast - contrast value for the given trial
+        ifcorrect   - whether response was correct or not
+                      1 - correct, 0 - incorrect
+        '''
+
+        # turn ifcorrect to response index
+        resp_idx = 1 - ifcorrect
+        contrast_idx = self._find_contrast_index(
+            contrast,  approximate=approximate)[0]
+
+        # take likelihood of such resp for whole model parameter domain
+        likelihood = self.likelihoods[contrast_idx, :, resp_idx]
+        self.posterior *= likelihood
+        self.posterior /= self.posterior.sum()
+
+        # log history of contrasts and responses
+        self.stim_history.append(contrast)
+        self.resp_history.append(ifcorrect)
+
+    def _find_contrast_index(self, contrast, approximate=False):
+        contrast = np.atleast_1d(contrast)
+        if not approximate:
+            idx = [np.nonzero(self.stim_domain == cntrst)[0][0]
+                   for cntrst in contrast]
+        else:
+            idx = np.abs(self.stim_domain[np.newaxis, :] -
+                         contrast[:, np.newaxis]).argmin(axis=1)
+        return idx
+
+    def next_contrast(self, axis=None):
+        '''Get contrast value minimizing entropy of the posterior
+        distribution.
+
+        Expected entropy is updated in self.entropy.
+
+        Returns
+        -------
+        contrast : contrast value for the next trial.'''
+        full_posterior = self.likelihoods * self.posterior[
+            np.newaxis, :, np.newaxis]
+        if axis is not None:
+            shp = full_posterior.shape
+            new_shape = [shp[0]] + self._orig_param_shape + [shp[-1]]
+            full_posterior = full_posterior.reshape(new_shape)
+            reduce_axes = np.arange(len(self._orig_param_shape)) + 1
+            reduce_axes = tuple(np.delete(reduce_axes, axis))
+            full_posterior = full_posterior.sum(axis=reduce_axes)
+
+        norm = full_posterior.sum(axis=1, keepdims=True)
+        full_posterior /= norm
+
+        H = -np.nansum(full_posterior * np.log(full_posterior), axis=1)
+        self.entropy = (norm[:, 0, :] * H).sum(axis=1)
+
+        # choose contrast with minimal entropy
+        return self.stim_domain[self.entropy.argmin()]
+
+    def get_entropy(self):
+        return self.entropy.min()
+    
+    def get_posterior(self):
+    	return self.posterior.reshape(self._orig_param_shape)
+
+    def get_fit_params(self, select='mode'):
+        if select in ['max', 'mode']:
+            # parameters corresponding to maximum peak in posterior probability
+            return self.param_domain[self.posterior.argmax(), :]
+        elif select == 'mean':
+            # parameters weighted by their probability
+            return (self.posterior[:, np.newaxis] *
+                    self.param_domain).sum(axis=0)
+
+    def fit(self, contrasts, responses, approximate=False):
+        for contrast, response in zip(contrasts, responses):
+            self.update(contrast, response, approximate=approximate)
+
+    def plot(self):
+        '''Plot posterior model parameter probabilities and weibull fits.'''
+        pass
+        # TODO : implement this method
+        # return plot_quest_plus(self)

+ 1 - 1
expe/templates/expe/base.html

@@ -10,6 +10,7 @@
 
     {% load staticfiles %}
     {% block stylesheets %}
+        <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css" integrity="sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T" crossorigin="anonymous">
         <link rel="stylesheet" type="text/css" href="{% static "css/expe.css" %}">
     {% endblock %}
 </head>
@@ -22,6 +23,5 @@
     </div>
 
     {% block javascripts %}
-        <script src="{% static "js/loadImg.js" %}"/>
     {% endblock %}
 </body>

+ 18 - 4
expe/templates/expe/expe.html

@@ -1,12 +1,26 @@
-{% extends 'expe/base.html' %}
+{% extends 'base.html' %}
+
+{% load staticfiles %}
 
 {% block title %}
     Expe {{ expe_name }}
 {% endblock %}
 
 {% block content %}
-    <p>Display reconstructed image here</p>
+    
+    {% if request.session.begin %}
+        <h3>{{ question }}</h3>
+        <p>{{ indication }}</p>
+    {% endif %}
+    
+    <!-- TODO : Load    img from bitmap with javascript `loadImg.js` -->
+    {% if not request.session.begin %}
+        <img id="expeImg" src="/{{img_merged_path}}" data-img="{{request.session.img_merged}}"/>
+    {% endif %}
 
-    <!-- TODO : Load img from bitmap with javascript `loadImg.js` -->
-    <img src="{{ link1 }}"/>
+    {% block javascripts %}
+        <script src="{% static "js/pnglib.js" %}"></script>
+        <script src="{% static "js/loadImg.js" %}"></script>
+        <script src="{% static "js/keyEvents.js" %}"></script>
+    {% endblock %}
 {% endblock %}

+ 41 - 0
expe/templates/expe/expe_list.html

@@ -0,0 +1,41 @@
+{% extends 'base.html' %}
+
+{% load staticfiles %}
+
+{% block title %}
+    Expe choice
+{% endblock %}
+
+{% block content %}
+    <p>Choose you experience and dataset</p>
+
+    <div class="row">
+        <div class="col-md-4 col-md-offset-4">
+            <form action="/expe" id="expeChoice">
+
+                <div class="form-group">
+                    <label for="scene">Select scene:</label>
+                    <select class="form-control" name="scene">
+                        {% for scene in scenes %}
+                            <option value="{{scene}}">{{scene}}</option>
+                        {% endfor %}
+                    </select>
+                </div>
+
+                <div class="form-group">
+                    <label for="expe">Select experience:</label>
+                    <select class="form-control " name="expe">
+                        {% for expe in expes %}
+                            <option value="{{expe}}">{{expe}}</option>
+                        {% endfor %}
+                    </select>
+                </div>
+
+                <button type="submit" class="btn btn-primary">Submit</button>
+            </form>
+        </div>
+    </div>
+    {% block javascripts %}
+
+    {% endblock %}
+{% endblock %}

+ 8 - 2
expe/urls.py

@@ -1,8 +1,14 @@
 from django.contrib import admin
 from django.urls import path
+from django.conf import settings
+from django.conf.urls.static import static
 
 from . import views
 
 urlpatterns = [
-    path('', views.expe, name='expe'),
-]
+    path('', views.expe_list, name='expe_list'),
+    path('expe', views.expe, name='expe'),
+]
+
+if settings.DEBUG is True:
+    urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)

+ 0 - 0
expe/utils/__init__.py


+ 100 - 0
expe/utils/api.py

@@ -0,0 +1,100 @@
+# main imports 
+import os
+import requests
+import json
+
+# image processing imports
+from io import BytesIO
+from PIL import Image 
+
+# import config variables
+from ..config import GET_SCENE_IMAGE_API_URL, GET_SCENE_QUALITIES_API_URL, GET_SCENES_API_URL
+from ..config import DIRAN_DOMAIN_NAME
+
+def get_image(scene, img_quality):
+    '''
+    Return specific image of scene dataset with quality
+    '''
+    if not type(img_quality) == str:
+        img_quality = str(img_quality)
+
+    # get URL to contact
+    url = GET_SCENE_IMAGE_API_URL.format(scene, img_quality)
+    # Make a get request to get information of scene image with quality of 200
+    response = requests.get(url)
+    # Print the content of the response formatted into JSON
+    content_json = json.loads(response.content)
+    
+    # Access to link of image using 'key' (data & link) from json data
+    api_link = content_json['data']['link']
+    image_url = DIRAN_DOMAIN_NAME + api_link
+    print(image_url)
+    
+    # Ask API to get acess to image
+    response_img = requests.get(image_url)
+    
+    # Convert content of the response (the whole image) and parse it using BytesIO
+    print("Access to image located at : ", image_url)
+    return Image.open(BytesIO(response_img.content))   
+
+
+def get_scene_qualities(scene):
+    '''
+    Return all qualities known from scene
+    '''
+
+    # construct `url` to get qualities
+    url = GET_SCENE_QUALITIES_API_URL.format(scene)
+
+    response = requests.get(url)
+    # Print the content of the response formatted into JSON
+    content_json = json.loads(response.content)
+
+    # return list of qualities
+    return content_json['data']
+
+
+def get_scenes():
+    '''
+    Return list of scenes available into dataset
+    '''
+
+    url = GET_SCENES_API_URL
+
+    # get scene list
+    response = requests.get(url)
+
+    # Print the content of the response formatted into JSON
+    content_json = json.loads(response.content)
+
+    # return list of scenes
+    return content_json['data']
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    
+    

+ 7 - 0
expe/utils/functions.py

@@ -0,0 +1,7 @@
+import random
+
+def uniqueID():
+    '''
+    Return unique identifier for current user and 
+    '''
+    return str(random.uniform(0, 1))[2:15]

+ 99 - 3
expe/views.py

@@ -1,9 +1,105 @@
+# django imports
 from django.shortcuts import render
 from django.http import HttpResponse
+from django.conf import settings
 
-# Create your views here.
+# main imports
+import os
+import json
+import base64
+import random
+import numpy as np
+import datetime
+
+# image processing imports
+import io
+from PIL import Image
+
+# api imports
+from .utils import api
+from .utils import functions
+
+from .quest.processing import crop_images
+from . import config as cfg
+
+
+def expe_list(request):
 
+    # get all scenes from dataset
+    scenes = api.get_scenes()
+
+    # get list of experiences
+    expes = cfg.expe_name_list
+
+    return render(request, 'expe/expe_list.html', {'scenes': scenes, 'expes': expes})
+
+
+# Create your views here.
 def expe(request):
+    
+    question_sentence  = "Do you see one image or a composition of more than one?"
+    indication_sentence = "press left if you see one image, right if not"
+
+    # get param 
+    expe_name = request.GET.get('expe')
+    scene_name = request.GET.get('scene')
+
+    # first time expe is launched
+    if 'expe' not in request.session:
+        request.session['expe'] = expe_name
+        request.session['begin'] = True
+    else:
+        request.session['begin'] = False
+
+    # update ref img at first time or expe changed
+    if 'ref_img' not in request.session or expe_name != request.session['expe']:
+        request.session['begin'] = True
+        request.session['qualities'] = api.get_scene_qualities(scene_name)
+        request.session['id'] = functions.uniqueID()
+
+        # TODO : add in cache ref_image
+        # get reference image
+        #ref_image = api.get_image(scene_name, 'max')
+        # save ref image as list (can't save python object)
+        #request.session['ref_img'] = np.array(ref_image).tolist()
+
+    # construct new image
+    quality = random.choice(request.session.get('qualities'))
+    noisy_image = api.get_image(scene_name, quality)
+
+    # reconstruct reference image from list stored into session
+    # ref_image = Image.fromarray(np.array(request.session.get('ref_img')))
+    ref_image = api.get_image(scene_name, 'max')
+    img_merge, per, orien, swap_img = crop_images(noisy_image, ref_image)
+
+    # create output folder for tmp files if necessary
+    folder = os.path.join(settings.MEDIA_ROOT, cfg.output_tmp_folder)
+
+    if not os.path.exists(folder):
+        os.makedirs(folder)
+
+    # generate tmp merged image (pass as BytesIO was complicated..)
+    # TODO : add crontab task to erase generated img
+    filepath_img = os.path.join(folder, request.session.get('id') + '_' + scene_name + '' + expe_name + '.png')
+    img_merge.save(filepath_img)
+
+    # create output folder for expe_result
+    timestamp = datetime.strftime(datetime.utcnow(), "%Y-%m-%d_%Hh%Mm%Ss")
+    filename += "online_ans" + timestamp +".csv"
+    f = open(filename,"w")
+
+    #orientation : 0 = vertical, 1 = horizontal
+    #image_ref_position : 0 = right/bottom, 1 = left/up
+    #answer : left = 1, right = 0
+    f.write('stimulus' + ";" + "name_stimulus" + ";" + 'cropping_percentage' + ";" + 'orientation' + ';' 
+            + 'image_ref_position' + ';' + 'answer' + ';' + 'time_reaction' + ';' + 'entropy' + '\n')
+    
+    # expe parameters
+    data = {
+        'expe_name': expe_name,
+        'img_merged_path': filepath_img,
+        'question': question_sentence,
+        'indication': indication_sentence
+    }
 
-    link1 = 'http://diran.univ-littoral.fr/api/images/bathroom/bathroom_00200.png'
-    return render(request, 'expe/expe.html', {'expe_name': 'test_expe', 'link1': link1})
+    return render(request, 'expe/expe.html', data)

+ 7 - 1
requirement.txt

@@ -1 +1,7 @@
-Django
+Django
+numpy
+Pillow
+questplus
+psychopy
+pandas
+psychometric

+ 1 - 1
static/css/expe.css

@@ -3,6 +3,6 @@ body {
 }
 
 .container{
-    margin-top: 5%;
+    margin-top: 2%;
     text-align: center;
 }

+ 22 - 0
static/js/keyEvents.js

@@ -0,0 +1,22 @@
+// implement `key` events
+document.onkeydown = checkKey;
+
+function checkKey(e) {
+
+    e = e || window.event;
+
+    if (e.keyCode == '81') {
+        // `q` for quit expe
+       console.log('`q` key is pressed');
+       window.location = ''
+    }
+    else if (e.keyCode == '37') {
+       // left arrow
+       console.log('left arrow is pressed');
+    }
+    else if (e.keyCode == '39') {
+       // right arrow
+       console.log('right arrow is pressed');
+    }
+
+}

+ 22 - 1
static/js/loadImg.js

@@ -2,4 +2,25 @@
 
 window.onload = function () {
     console.log('Load img here...');
-}
+
+
+    /*img_data = document.getElementById('expeImg').getAttribute('data-img');
+    img_data = JSON.parse(img_data);
+
+    var p = new PNGlib(800, 800, 256); // construcor takes height, weight and color-depth
+    var background = p.color(0, 0, 0, 0); // set the background transparent
+
+    for (var i = 0; i < 800; i++){
+        for (var j = 0; j < 800; j++){
+
+            let r = img_data[i][j][0]
+            let g = img_data[i][j][1]
+            let b = img_data[i][j][2]
+
+            p.buffer[i, j] = p.color(r, g, b)
+        }
+    }
+    console.log('done')
+
+    document.getElementById('expeImg').src = "data:image/png;base64,"+p.getBase64();*/
+}

+ 214 - 0
static/js/pnglib.js

@@ -0,0 +1,214 @@
+/**
+* A handy class to calculate color values.
+*
+* @version 1.0
+* @author Robert Eisele <robert@xarg.org>
+* @copyright Copyright (c) 2010, Robert Eisele
+* @link http://www.xarg.org/2010/03/generate-client-side-png-files-using-javascript/
+* @license http://www.opensource.org/licenses/bsd-license.php BSD License
+*
+*/
+
+(function() {
+
+	// helper functions for that ctx
+	function write(buffer, offs) {
+		for (var i = 2; i < arguments.length; i++) {
+			for (var j = 0; j < arguments[i].length; j++) {
+				buffer[offs++] = arguments[i].charAt(j);
+			}
+		}
+	}
+
+	function byte2(w) {
+		return String.fromCharCode((w >> 8) & 255, w & 255);
+	}
+
+	function byte4(w) {
+		return String.fromCharCode((w >> 24) & 255, (w >> 16) & 255, (w >> 8) & 255, w & 255);
+	}
+
+	function byte2lsb(w) {
+		return String.fromCharCode(w & 255, (w >> 8) & 255);
+	}
+
+	window.PNGlib = function(width,height,depth) {
+
+		this.width   = width;
+		this.height  = height;
+		this.depth   = depth;
+
+		// pixel data and row filter identifier size
+		this.pix_size = height * (width + 1);
+
+		// deflate header, pix_size, block headers, adler32 checksum
+		this.data_size = 2 + this.pix_size + 5 * Math.floor((0xfffe + this.pix_size) / 0xffff) + 4;
+
+		// offsets and sizes of Png chunks
+		this.ihdr_offs = 0;									// IHDR offset and size
+		this.ihdr_size = 4 + 4 + 13 + 4;
+		this.plte_offs = this.ihdr_offs + this.ihdr_size;	// PLTE offset and size
+		this.plte_size = 4 + 4 + 3 * depth + 4;
+		this.trns_offs = this.plte_offs + this.plte_size;	// tRNS offset and size
+		this.trns_size = 4 + 4 + depth + 4;
+		this.idat_offs = this.trns_offs + this.trns_size;	// IDAT offset and size
+		this.idat_size = 4 + 4 + this.data_size + 4;
+		this.iend_offs = this.idat_offs + this.idat_size;	// IEND offset and size
+		this.iend_size = 4 + 4 + 4;
+		this.buffer_size  = this.iend_offs + this.iend_size;	// total PNG size
+
+		this.buffer  = new Array();
+		this.palette = new Object();
+		this.pindex  = 0;
+
+		var _crc32 = new Array();
+
+		// initialize buffer with zero bytes
+		for (var i = 0; i < this.buffer_size; i++) {
+			this.buffer[i] = "\x00";
+		}
+
+		// initialize non-zero elements
+		write(this.buffer, this.ihdr_offs, byte4(this.ihdr_size - 12), 'IHDR', byte4(width), byte4(height), "\x08\x03");
+		write(this.buffer, this.plte_offs, byte4(this.plte_size - 12), 'PLTE');
+		write(this.buffer, this.trns_offs, byte4(this.trns_size - 12), 'tRNS');
+		write(this.buffer, this.idat_offs, byte4(this.idat_size - 12), 'IDAT');
+		write(this.buffer, this.iend_offs, byte4(this.iend_size - 12), 'IEND');
+
+		// initialize deflate header
+		var header = ((8 + (7 << 4)) << 8) | (3 << 6);
+		header+= 31 - (header % 31);
+
+		write(this.buffer, this.idat_offs + 8, byte2(header));
+
+		// initialize deflate block headers
+		for (var i = 0; (i << 16) - 1 < this.pix_size; i++) {
+			var size, bits;
+			if (i + 0xffff < this.pix_size) {
+				size = 0xffff;
+				bits = "\x00";
+			} else {
+				size = this.pix_size - (i << 16) - i;
+				bits = "\x01";
+			}
+			write(this.buffer, this.idat_offs + 8 + 2 + (i << 16) + (i << 2), bits, byte2lsb(size), byte2lsb(~size));
+		}
+
+		/* Create crc32 lookup table */
+		for (var i = 0; i < 256; i++) {
+			var c = i;
+			for (var j = 0; j < 8; j++) {
+				if (c & 1) {
+					c = -306674912 ^ ((c >> 1) & 0x7fffffff);
+				} else {
+					c = (c >> 1) & 0x7fffffff;
+				}
+			}
+			_crc32[i] = c;
+		}
+
+		// compute the index into a png for a given pixel
+		this.index = function(x,y) {
+			var i = y * (this.width + 1) + x + 1;
+			var j = this.idat_offs + 8 + 2 + 5 * Math.floor((i / 0xffff) + 1) + i;
+			return j;
+		}
+
+		// convert a color and build up the palette
+		this.color = function(red, green, blue, alpha) {
+
+			alpha = alpha >= 0 ? alpha : 255;
+			var color = (((((alpha << 8) | red) << 8) | green) << 8) | blue;
+
+			if (typeof this.palette[color] == "undefined") {
+				if (this.pindex == this.depth) return "\x00";
+
+				var ndx = this.plte_offs + 8 + 3 * this.pindex;
+
+				this.buffer[ndx + 0] = String.fromCharCode(red);
+				this.buffer[ndx + 1] = String.fromCharCode(green);
+				this.buffer[ndx + 2] = String.fromCharCode(blue);
+				this.buffer[this.trns_offs+8+this.pindex] = String.fromCharCode(alpha);
+
+				this.palette[color] = String.fromCharCode(this.pindex++);
+			}
+			return this.palette[color];
+		}
+
+		// output a PNG string, Base64 encoded
+		this.getBase64 = function() {
+
+			var s = this.getDump();
+
+			// If the current browser supports the Base64 encoding
+			// function, then offload the that to the browser as it
+			// will be done in native code.
+			if ((typeof window.btoa !== 'undefined') && (window.btoa !== null)) {
+				return window.btoa(s);
+			}
+
+			var ch = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
+			var c1, c2, c3, e1, e2, e3, e4;
+			var l = s.length;
+			var i = 0;
+			var r = "";
+
+			do {
+				c1 = s.charCodeAt(i);
+				e1 = c1 >> 2;
+				c2 = s.charCodeAt(i+1);
+				e2 = ((c1 & 3) << 4) | (c2 >> 4);
+				c3 = s.charCodeAt(i+2);
+				if (l < i+2) { e3 = 64; } else { e3 = ((c2 & 0xf) << 2) | (c3 >> 6); }
+				if (l < i+3) { e4 = 64; } else { e4 = c3 & 0x3f; }
+				r+= ch.charAt(e1) + ch.charAt(e2) + ch.charAt(e3) + ch.charAt(e4);
+			} while ((i+= 3) < l);
+			return r;
+		}
+
+		// output a PNG string
+		this.getDump = function() {
+
+			// compute adler32 of output pixels + row filter bytes
+			var BASE = 65521; /* largest prime smaller than 65536 */
+			var NMAX = 5552;  /* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */
+			var s1 = 1;
+			var s2 = 0;
+			var n = NMAX;
+
+			for (var y = 0; y < this.height; y++) {
+				for (var x = -1; x < this.width; x++) {
+					s1+= this.buffer[this.index(x, y)].charCodeAt(0);
+					s2+= s1;
+					if ((n-= 1) == 0) {
+						s1%= BASE;
+						s2%= BASE;
+						n = NMAX;
+					}
+				}
+			}
+			s1%= BASE;
+			s2%= BASE;
+			write(this.buffer, this.idat_offs + this.idat_size - 8, byte4((s2 << 16) | s1));
+
+			// compute crc32 of the PNG chunks
+			function crc32(png, offs, size) {
+				var crc = -1;
+				for (var i = 4; i < size-4; i += 1) {
+					crc = _crc32[(crc ^ png[offs+i].charCodeAt(0)) & 0xff] ^ ((crc >> 8) & 0x00ffffff);
+				}
+				write(png, offs+size-4, byte4(crc ^ -1));
+			}
+
+			crc32(this.buffer, this.ihdr_offs, this.ihdr_size);
+			crc32(this.buffer, this.plte_offs, this.plte_size);
+			crc32(this.buffer, this.trns_offs, this.trns_size);
+			crc32(this.buffer, this.idat_offs, this.idat_size);
+			crc32(this.buffer, this.iend_offs, this.iend_size);
+
+			// convert PNG to string
+			return "\211PNG\r\n\032\n"+this.buffer.join('');
+		}
+	}
+
+})();