浏览代码

first commit

vuthanho 4 年之前
当前提交
d7d593d947

+ 162 - 0
.gitignore

@@ -0,0 +1,162 @@
+# Windows default autosave extension
+*.asv
+
+# OSX / *nix default autosave extension
+*.m~
+
+# Compiled MEX binaries (all platforms)
+*.mex*
+
+# Packaged app and toolbox files
+*.mlappinstall
+*.mltbx
+
+# Generated helpsearch folders
+helpsearch*/
+
+# Simulink code generation folders
+slprj/
+sccprj/
+
+# Matlab code generation folders
+codegen/
+
+# Simulink autosave extension
+*.autosave
+
+# Simulink cache files
+*.slxc
+
+# Octave session info
+octave-workspace
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+tests/
+annexes/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+pip-wheel-metadata/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+#  Usually these files are written by a python script from a template
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# pipenv
+#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+#   However, in case of collaboration, if having platform-specific dependencies or dependencies
+#   having no cross-platform support, pipenv may install dependencies that don't work, or not
+#   install all needed dependencies.
+#Pipfile.lock
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# IDE
+.idea
+.vscode/

+ 73 - 0
MATLAB/calib_meth/EMNeNMF/emnenmf.m

@@ -0,0 +1,73 @@
+function [ T , RMSE ] = emnenmf( W , X , G , F , Omega_G, Omega_F, Phi_G, Phi_F , InnerMinIter , InnerMaxIter , Tmax , v, F_theo, delta_measure)
+
+X0 =X;
+Omega_G = (Omega_G == 1); % Logical mask is faster than indexing in matlab.
+Omega_F = (Omega_F == 1); % Logical mask is faster than indexing in matlab.
+% nOmega_G = ~Omega_G; % Logical mask is faster than indexing in matlab.
+% nOmega_F = ~Omega_F; % Logical mask is faster than indexing in matlab.
+
+[~, num_sensor] = size(F);
+num_sensor = num_sensor-1;
+em_iter_max = round(Tmax / delta_measure) ;
+T = nan(1,em_iter_max);
+RMSE = nan(2,em_iter_max);
+
+X = G*F+W.*(X0-G*F);
+GG = G'*G;
+GX = G'*X;
+GradF = GG*F-GX;
+
+FF = F*F';
+XF = X*F';
+GradG = G*FF-XF;
+
+d = Grad_P([GradG',GradF],[G',F]);
+StoppingCritF = 1.e-3*d;
+StoppingCritG = StoppingCritF;
+
+tic
+i = 1;
+T(i) = toc;
+RMSE(:,i) = vecnorm(F(:,1:end-1)- F_theo(:,1:end-1),2,2)/sqrt(num_sensor);
+niter = 0;
+while toc<Tmax
+    
+    X = G*F+W.*(X0-G*F);
+    for j =1:v
+        FF = F*F';
+        XF = X*F' - Phi_G*FF;
+
+        G(Omega_G) = 0; % Convert G to \Delta G
+        [ G , iterG , ~ ] = MaJ_G_EM_NeNMF( G , FF , XF , InnerMaxIter , StoppingCritG , Omega_G); % Update \Delta G
+        G(Omega_G) = Phi_G(Omega_G); % Convert \Delta G to G
+        niter = niter + iterG;
+        if(iterG<=InnerMinIter)
+            StoppingCritG = 1.e-1*StoppingCritG;
+        end
+
+        GG = G'*G;
+        GX = G'*X-GG*Phi_F;
+
+        F(Omega_F) = 0; % Convert F to \Delta F
+        [ F , iterF ] = MaJ_F_EM_NeNMF( GG , GX , F , InnerMaxIter , StoppingCritF , Omega_F); % Update \Delta F
+        F(Omega_F) = Phi_F(Omega_F); % Convert \Delta F to F
+        niter = niter + iterF;
+        if(iterF<=InnerMinIter)
+            StoppingCritF = 1.e-1*StoppingCritF;
+        end
+
+        if toc - i*delta_measure >= delta_measure
+            i = i+1;
+            if i > em_iter_max
+                break
+            end
+            T(i) = toc;
+            RMSE(:,i) = vecnorm(F(:,1:end-1) - F_theo(:,1:end-1),2,2)/sqrt(num_sensor);
+        end
+    end
+    
+end
+
+end
+
+

+ 76 - 0
MATLAB/calib_meth/INCAL/IN_Cal.m

@@ -0,0 +1,76 @@
+function [ T , RMSE ] = IN_Cal( W , X , G , F , Omega_G , Omega_F , Phi_G , Phi_F , F_theo , Tmax, delta_measure )
+
+[~, num_sensor] = size(F);
+num_sensor = num_sensor-1;
+
+W2 = W.^2;
+Omega_Bar_G = ones(size(G))-Omega_G;
+Omega_Bar_F = ones(size(F))-Omega_F;
+Delta_G = G.*Omega_Bar_G;
+Delta_F = F.*Omega_Bar_F;
+
+iter_max = round(Tmax / delta_measure) ;
+T = nan(1,iter_max);
+RMSE = nan(2,iter_max);
+
+tic
+i = 1;
+T(i) = toc;
+RMSE(:,i) = vecnorm(F(:,1:end-1)-F_theo(:,1:end-1),2,2)/sqrt(num_sensor);
+
+while toc<Tmax
+    
+    % updating G
+    Delta_G = Updt_Delta_G( W2 , X , Delta_G , Phi_G , F , Omega_Bar_G );
+    G = Phi_G + Delta_G;
+    
+    % updating F
+    Delta_F = Updt_Delta_F( W2 , X , G , Delta_F , Phi_F , Omega_Bar_F );
+    F = Phi_F + Delta_F;
+    
+    if toc - i*delta_measure >= delta_measure
+        i = i+1;
+        if i > iter_max
+            break
+        end
+        T(i) = toc;
+        RMSE(:,i) = vecnorm(F(:,1:end-1) - F_theo(:,1:end-1),2,2)/sqrt(num_sensor);
+    end
+    
+end
+
+end
+
+
+
+function [ Delta_F ] = Updt_Delta_F( W , X , G , Delta_F , Phi_F , Omega_B_F )
+%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Update rule for Delta_F %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%
+Delta_F = Delta_F.*(Omega_B_F).*((G'*(W.*secu_plus(X-G*Phi_F)))./(G'*(W.*(G*Delta_F))));
+Delta_F(isnan(Delta_F))=0;
+end
+
+
+
+function [ Delta_G ] = Updt_Delta_G( W , X , Delta_G , Phi_G , F , Omega_B_G )
+%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Update rule for Delta_G %
+%%%%%%%%%%%%%%%%%%%%%%%%%%%
+Delta_G = Delta_G.*(Omega_B_G).*((W.*secu_plus(X-Phi_G*F))*F')./((W.*(Delta_G*F)*F')); % mise à jour
+Delta_G(isnan(Delta_G))=0;
+end
+
+
+
+function [toto] = secu_plus(tutu,s)
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% Goal: Security in the NMF procedure which project the negative data to
+% epsilon (small user-defined threshold
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+if(nargin<2)
+    s=1.e-12;
+end
+toto=max(tutu,s);
+toto(isnan(tutu)) = 0;
+end

+ 134 - 0
MATLAB/calib_meth/REMNeNMF/remnenmf.m

@@ -0,0 +1,134 @@
+function [ T , RMSE ] = remnenmf( W , X , G , F , Omega_G, Omega_F, Phi_G, Phi_F , InnerMinIter , InnerMaxIter , Tmax , v, F_theo, delta_measure)
+
+r = 0;
+X0 = X;
+Omega_G = (Omega_G == 1); % Logical mask is faster than indexing in matlab.
+Omega_F = (Omega_F == 1); % Logical mask is faster than indexing in matlab.
+% nOmega_G = ~Omega_G; % Logical mask is faster than indexing in matlab.
+% nOmega_F = ~Omega_F; % Logical mask is faster than indexing in matlab.
+
+[~, num_sensor] = size(F);
+num_sensor = num_sensor-1;
+em_iter_max = round(Tmax / delta_measure) ;
+T = nan(1,em_iter_max);
+RMSE = nan(2,em_iter_max);
+
+X = G*F+W.*(X0-G*F);
+[L,R]=RSI_compression(X,r);
+% Compress left and right
+X_L = L * X;
+X_R = X * R;
+G_L = L * G;
+F_R = F * R;
+
+GG = G_L' * G_L;
+GX = G_L' * X_L;
+GradF = GG * F - GX;
+
+FF = F_R * F_R';
+XF = X_R * F_R';
+GradG = G * FF - XF;
+
+d = Grad_P([GradG',GradF],[G',F]);
+StoppingCritF = 1.e-3*d;
+StoppingCritG = StoppingCritF;
+
+tic
+i = 1;
+niter = 0;
+RMSE(:,i) = vecnorm(F(:,1:end-1)- F_theo(:,1:end-1),2,2)/sqrt(num_sensor);
+T(i) = toc;
+while toc<Tmax
+    
+    % Estimation step
+    X = G*F+W.*(X0-G*F);
+    [L,R]=RSI_compression(X,r);
+    % Compress left and right
+    X_L = L * X;
+    X_R = X * R;
+    
+    % Maximization step
+    for j =1:v
+        F_R = F * R;
+        FF = F_R * F_R';
+        XF = X_R * F_R' - Phi_G * FF;
+
+        G(Omega_G) = 0; % Convert G to \Delta G
+        [ G , iterG , ~ ] = MaJ_G_EM_NeNMF( G , FF , XF , InnerMaxIter , StoppingCritG , Omega_G); % Update \Delta G
+        G(Omega_G) = Phi_G(Omega_G); % Convert \Delta G to G
+
+        if(iterG<=InnerMinIter)
+            StoppingCritG = 1.e-1*StoppingCritG;
+        end
+        
+        G_L = L * G;
+        GG = G_L' * G_L;
+        GX = G_L' * X_L - GG * Phi_F;
+
+        F(Omega_F) = 0; % Convert F to \Delta F
+        [ F , iterF ] = MaJ_F_EM_NeNMF( GG , GX , F , InnerMaxIter , StoppingCritF , Omega_F); % Update \Delta F
+        F(Omega_F) = Phi_F(Omega_F); % Convert \Delta F to F
+
+        if(iterF<=InnerMinIter)
+            StoppingCritF = 1.e-1*StoppingCritF;
+        end
+
+        if toc - i*delta_measure >= delta_measure
+            i = i+1;
+            if i > em_iter_max
+                break
+            end
+            T(i) = toc;
+            RMSE(:,i) = vecnorm(F(:,1:end-1) - F_theo(:,1:end-1),2,2)/sqrt(num_sensor);
+        end
+    end
+    niter = niter + 1;
+end
+niter
+end
+
+function [ L,R ] = RSI_compression(X,r)
+%             Tepper, M., & Sapiro, G. (2016). Compressed nonnegative
+%             matrix factorization is fast and accurate. IEEE Transactions
+%             on Signal Processing, 64(9), 2269-2283.
+%             see: https://arxiv.org/pdf/1505.04650
+%             The corresponding code is originally created by the authors
+%             Then, it is modified by F. Yahaya.
+%             Date: 13/04/2018
+%
+
+compressionLevel=10;
+[m,n]=size(X);
+
+l = min(n, max(compressionLevel, r + 10));
+
+OmegaL = randn(n,l);
+
+Y = X * OmegaL;
+
+for i=1:4
+    
+    [Y,~]=qr(Y,0);
+    S=X'*Y;
+    [Z,~]=qr(S,0);
+    Y=X* Z;
+end
+[L,~]=qr(Y,0);
+L=L';
+
+OmegaR = randn(l, m);
+Y = OmegaR * X;
+
+for i=1:4
+    [Y,~]=qr(Y',0);
+    S=X*Y;
+    [Z,~]=qr(S,0);
+    
+    Y=Z'*X;
+end
+Y=Y';
+[R,~] = qr(Y,0);
+
+
+end
+

+ 3 - 0
MATLAB/calib_meth/common_nesterov/Grad_P.m

@@ -0,0 +1,3 @@
+function GradP = Grad_P( Grad , X )
+
+GradP=norm(Grad(Grad<0|X>0));

+ 30 - 0
MATLAB/calib_meth/common_nesterov/MaJ_F_EM_NeNMF.m

@@ -0,0 +1,30 @@
+function [ F1 , i ] = MaJ_F_EM_NeNMF( GG , GX , F1 , InnerMaxIter , StoppingCritF , Omega_F)
+
+Y = F1;
+
+alpha=zeros(2,1);
+alpha(1) = 1;
+
+Grad_y = GG*Y-GX;
+
+L = norm(GG);
+
+for i = 1 : InnerMaxIter
+    
+    F2 = max(Y-(1/L)*Grad_y,0);
+    F2(Omega_F) = 0; % Projection on not(Omega_F)
+    alpha(2) = (1+sqrt(4*alpha(1)^2+1))/2;
+    
+    Y = F2 + ((alpha(1)-1)/alpha(2))*(F2-F1);
+    Grad_y = GG*Y-GX;
+    
+    F1 = F2;
+    alpha(1) = alpha(2);
+    
+    if(Grad_P(Grad_y , Y)<=StoppingCritF)
+        break
+    end
+    
+end
+
+end

+ 30 - 0
MATLAB/calib_meth/common_nesterov/MaJ_G_EM_NeNMF.m

@@ -0,0 +1,30 @@
+function [ G1 , i , GradG ] = MaJ_G_EM_NeNMF( G1 , FF , XF ,  InnerMaxIter , StoppingCritG , Omega_G)
+
+Y = G1;
+
+alpha = zeros(2,1);
+alpha(1) = 1;
+
+L = norm(FF);
+
+Grad_y = Y*FF-XF;
+
+for i = 1 : InnerMaxIter
+    
+    G2 = max(Y-(1/L)*Grad_y,0);
+    G2(Omega_G) = 0;
+    alpha(2) = (1+sqrt(4*alpha(1)^2+1))/2;
+    
+    Y = G2 + ((alpha(1)-1)/alpha(2))*(G2-G1);
+    Grad_y = Y*FF-XF;
+    
+    G1 = G2;
+    alpha(1) = alpha(2);
+    
+    if(Grad_P(Grad_y , Y)<=StoppingCritG)
+        break
+    end
+    
+end
+GradG = G1*FF-XF;
+end

+ 83 - 0
MATLAB/common/data_gen.m

@@ -0,0 +1,83 @@
+function [X, X_theo, W, F_theo, Omega_G, Omega_F, Phi_G, Phi_F, Ginit, Finit] = data_gen(s_width, s_length, run, N_Ref, N_Cpt, Mu_beta, Mu_alpha, Bound_beta, Bound_alpha, MV, RV, var_n) 
+
+rng(run+1306) % Random seed
+
+%% Scene simulation 
+
+phenLowerBound = 0.05;
+phenUpperBound = 0.15;
+n_pic = 15;
+s_n = s_width*s_length; % Total number of areas in the scene
+[xx,yy] = meshgrid((-1:2/(s_width-1):1),(-1:2/(s_length-1):1));
+xxyy = cat(2,xx(:),yy(:));
+
+g = zeros(1,s_n);
+for pic = 1:n_pic
+    mu = 2*(rand(1,2)-0.5);
+    sig = diag([ phenLowerBound + (phenUpperBound - phenLowerBound)*abs(randn()+0.5) , phenLowerBound + (phenUpperBound - phenLowerBound)*abs(randn()+0.5) ]);
+    g = g + mvnpdf(xxyy,mu,sig);
+end
+
+g = g-min(g);
+g = .5*(g/max(g))+1e-5;
+G_theo = [ones(s_n,1),g]; % Theoretical matrix G (see eq.(3) of [1])
+
+
+%% Sensors simulation
+
+F_theo = [max(Bound_beta(1),min(Bound_beta(2),Mu_beta+.5*randn(1,N_Cpt)));...
+     max(Bound_alpha(1),min(Bound_alpha(2),Mu_alpha+.5*randn(1,N_Cpt)))];
+F_theo = [F_theo,[0;1]]; % Theoretical matrix F (see eq.(3) of [1])
+
+
+%% Data simulation
+
+X_theo = G_theo*F_theo; %  Theoretical matrix X (see eq.(2) of [1])
+
+W = zeros(s_n,N_Cpt+1);
+
+idx_Ref = randperm(s_n);
+idx_Ref = idx_Ref(1:N_Ref); % Reference measurement locations
+W(idx_Ref,end) = 1;
+
+N_RV = round(N_Cpt*RV); % Nb. of sensors having a RendezVous
+idx_CptRV = randperm(N_Cpt);
+idx_CptRV = idx_CptRV(1:N_RV); % Selection of sensors having a RendezVous
+idx_RefRV = randi(N_Ref,1,N_Cpt);
+idx_RefRV = idx_Ref(idx_RefRV(1:N_RV)); % Selection of the references for each RendezVous
+
+for i = 1 : N_RV
+    W(idx_RefRV(i),idx_CptRV(i)) = 1;
+end
+
+N_data = round((1-MV)*(N_Cpt)*(100-N_Ref)); % Nb. of measurements in data matrix X
+xCpt = 1 : s_n;
+xCpt(idx_Ref) = []; % Reference free locations
+[xx,yy] = meshgrid(xCpt,1:N_Cpt); % Possibly sensed locations
+
+idx_data = randperm((s_n-N_Ref)*N_Cpt);
+for i = 1 : N_data
+    W(xx(idx_data(i)),yy(idx_data(i))) = 1; % Sensor measurement placement
+end
+
+N = var_n*randn(s_n,N_Cpt+1); % Noise simulation
+N(:,end) = 0;
+N = max(N,-X_theo);
+
+X = W.*(X_theo+N); % Data matrix X
+
+%% Calibration parameters
+
+% % Common parameters
+Omega_G = [ones(s_n,1),W(:,end)]; % Mask on known values in G (see eq.(14) of [1])
+Omega_F = [zeros(2,N_Cpt),[1;1]]; % Mask on known values in F (see eq.(15) of [1])
+Phi_G = [ones(s_n,1),X(:,end)]; % Known values in G (see eq.(14) of [1])
+Phi_F = [zeros(2,N_Cpt),[0;1]]; % Known values in F (see eq.(15) of [1])
+Ginit = abs(randn(s_n,2)+mean(Phi_G(idx_Ref,end))); % Initial matrix G : randn + mean of known ref values
+Ginit = (1-Omega_G).*Ginit+Phi_G;
+Finit = [max(Bound_beta(1),min(Bound_beta(2),Mu_beta+.5*randn(1,N_Cpt)));...
+     max(Bound_alpha(1),min(Bound_alpha(2),Mu_alpha+.5*randn(1,N_Cpt)))];
+Finit = [Finit,[0;1]]; % Initial matrix F
+Finit = (1-Omega_F).*Finit+Phi_F;
+
+end

+ 112 - 0
MATLAB/main_incal_vs_emnenmf.m

@@ -0,0 +1,112 @@
+clear
+close all
+clc
+
+%% Adding every files in the path
+
+addpath(genpath(pwd))
+
+%% Simulation parameters
+
+s_width = 100;  % Scene width
+s_length = 100; % Scene length
+N_Ref = 4; % Nb. of reference measurements
+N_Cpt = 25; % Nb. of mobile sensors
+Mu_beta = .9; % Mean sensors gain
+Mu_alpha = 5; % Mean sensors offset
+Bound_beta = [.01;1.5]; % Gain boundaries
+Bound_alpha = [3.5;6.5]; % Offset boundaries
+
+MV = .5; % Missing Value prop.
+RV = 0.3; % RendezVous prop.
+var_n = 0; % Noise variance
+M_loop = 5; % Number of M loop per E step
+runs = 1; % Total number of runs
+
+%% Nesterov parameters
+InnerMinIter = 5;
+InnerMaxIter = 100;
+Tmax = 300;
+
+%% 
+delta_measure = 1;
+iter_max = round(Tmax / delta_measure);
+
+
+%% Allocation for the RMSE values
+% emnenmf
+RMSE_offset_emnenmf = nan(runs, iter_max);
+RMSE_gain_emnenmf = nan(runs, iter_max);
+% Incal
+RMSE_offset_incal = nan(runs, iter_max);
+RMSE_gain_incal = nan(runs, iter_max);
+
+for run = 1:runs
+    % data generation
+    [X, X_theo, W, F_theo, Omega_G, Omega_F, Phi_G, Phi_F, Ginit, Finit] = data_gen(s_width, s_length, run, N_Ref, N_Cpt, Mu_beta, Mu_alpha, Bound_beta, Bound_alpha, MV, RV, var_n);
+    % emnenmf
+    [T_emnenmf, RMSE] = emnenmf( W , X , Ginit , Finit, Omega_G, Omega_F, Phi_G, Phi_F , InnerMinIter , InnerMaxIter , Tmax , M_loop, F_theo, delta_measure);
+    RMSE_offset_emnenmf(run,:) = RMSE(1,:);
+    RMSE_gain_emnenmf(run,:) = RMSE(2,:);
+    % incal
+    [T_incal, RMSE] = IN_Cal( W , X , Ginit , Finit , Omega_G , Omega_F , Phi_G , Phi_F , F_theo , Tmax, delta_measure );
+    RMSE_offset_incal(run,:) = RMSE(1,:);
+    RMSE_gain_incal(run,:) = RMSE(2,:);    
+    
+end
+
+% emnenmf
+min_offset_emnenmf = min(RMSE_offset_emnenmf,[],1,'omitnan');
+med_offset_emnenmf = median(RMSE_offset_emnenmf,1,'omitnan');
+max_offset_emnenmf = max(RMSE_offset_emnenmf,[],1,'omitnan');
+
+min_gain_emnenmf = min(RMSE_gain_emnenmf,[],1,'omitnan');
+med_gain_emnenmf = median(RMSE_gain_emnenmf,1,'omitnan');
+max_gain_emnenmf = max(RMSE_gain_emnenmf,[],1,'omitnan');
+
+subplot(121)
+semilogy(T_emnenmf,min_offset_emnenmf,'b')
+hold on
+semilogy(T_emnenmf,med_offset_emnenmf,'b')
+o_e = semilogy(T_emnenmf,max_offset_emnenmf,'b');
+hold off
+
+subplot(122)
+semilogy(T_emnenmf,min_gain_emnenmf,'b')
+hold on
+semilogy(T_emnenmf,med_gain_emnenmf,'b')
+g_e = semilogy(T_emnenmf,max_gain_emnenmf,'b');
+hold off
+
+% incal
+min_offset_incal = min(RMSE_offset_incal,[],1,'omitnan');
+med_offset_incal = median(RMSE_offset_incal,1,'omitnan');
+max_offset_incal = max(RMSE_offset_incal,[],1,'omitnan');
+
+min_gain_incal = min(RMSE_gain_incal,[],1,'omitnan');
+med_gain_incal = median(RMSE_gain_incal,1,'omitnan');
+max_gain_incal = max(RMSE_gain_incal,[],1,'omitnan');
+
+subplot(121)
+hold on
+semilogy(T_incal,min_offset_incal,'r')
+semilogy(T_incal,med_offset_incal,'r')
+o_i = semilogy(T_incal,max_offset_incal,'r');
+hold off
+
+subplot(122)
+hold on
+semilogy(T_incal,min_gain_incal,'r')
+semilogy(T_incal,med_gain_incal,'r')
+g_i = semilogy(T_incal,max_gain_incal,'r');
+hold off
+
+% adding title and labels
+subplot(121)
+title('RMSE offset')
+legend([o_e o_i],'EMNeNMF','IN\_Cal')
+
+subplot(122)
+title('RMSE gain')
+legend([g_e g_i],'EMNeNMF','IN\_Cal')
+

+ 112 - 0
MATLAB/main_incal_vs_remnenmf.m

@@ -0,0 +1,112 @@
+clear
+close all
+clc
+
+%% Adding every files in the path
+
+addpath(genpath(pwd))
+
+%% Simulation parameters
+
+s_width = 100;  % Scene width
+s_length = 100; % Scene length
+N_Ref = 4; % Nb. of reference measurements
+N_Cpt = 25; % Nb. of mobile sensors
+Mu_beta = .9; % Mean sensors gain
+Mu_alpha = 5; % Mean sensors offset
+Bound_beta = [.01;1.5]; % Gain boundaries
+Bound_alpha = [3.5;6.5]; % Offset boundaries
+
+MV = .5; % Missing Value prop.
+RV = 0.3; % RendezVous prop.
+var_n = 0; % Noise variance
+M_loop = 20; % Number of M loop per E step
+runs = 1; % Total number of runs
+
+%% Nesterov parameters
+InnerMinIter = 5;
+InnerMaxIter = 100;
+Tmax = 300;
+
+%% 
+delta_measure = 1;
+iter_max = round(Tmax / delta_measure);
+
+
+%% Allocation for the RMSE values
+% remnenmf
+RMSE_offset_remnenmf = nan(runs, iter_max);
+RMSE_gain_remnenmf = nan(runs, iter_max);
+% Incal
+RMSE_offset_incal = nan(runs, iter_max);
+RMSE_gain_incal = nan(runs, iter_max);
+
+for run = 1:runs
+    % data generation
+    [X, X_theo, W, F_theo, Omega_G, Omega_F, Phi_G, Phi_F, Ginit, Finit] = data_gen(s_width, s_length, run, N_Ref, N_Cpt, Mu_beta, Mu_alpha, Bound_beta, Bound_alpha, MV, RV, var_n);
+    % remnenmf
+    [T_remnenmf, RMSE] = remnenmf( W , X , Ginit , Finit, Omega_G, Omega_F, Phi_G, Phi_F , InnerMinIter , InnerMaxIter , Tmax , M_loop, F_theo, delta_measure);
+    RMSE_offset_remnenmf(run,:) = RMSE(1,:);
+    RMSE_gain_remnenmf(run,:) = RMSE(2,:);
+    % incal
+    [T_incal, RMSE] = IN_Cal( W , X , Ginit , Finit , Omega_G , Omega_F , Phi_G , Phi_F , F_theo , Tmax, delta_measure );
+    RMSE_offset_incal(run,:) = RMSE(1,:);
+    RMSE_gain_incal(run,:) = RMSE(2,:);    
+    
+end
+
+% remnenmf
+min_offset_remnenmf = min(RMSE_offset_remnenmf,[],1,'omitnan');
+med_offset_remnenmf = median(RMSE_offset_remnenmf,1,'omitnan');
+max_offset_remnenmf = max(RMSE_offset_remnenmf,[],1,'omitnan');
+
+min_gain_remnenmf = min(RMSE_gain_remnenmf,[],1,'omitnan');
+med_gain_remnenmf = median(RMSE_gain_remnenmf,1,'omitnan');
+max_gain_remnenmf = max(RMSE_gain_remnenmf,[],1,'omitnan');
+
+subplot(121)
+semilogy(T_remnenmf,min_offset_remnenmf,'b')
+hold on
+semilogy(T_remnenmf,med_offset_remnenmf,'b')
+o_e = semilogy(T_remnenmf,max_offset_remnenmf,'b');
+hold off
+
+subplot(122)
+semilogy(T_remnenmf,min_gain_remnenmf,'b')
+hold on
+semilogy(T_remnenmf,med_gain_remnenmf,'b')
+g_e = semilogy(T_remnenmf,max_gain_remnenmf,'b');
+hold off
+
+% incal
+min_offset_incal = min(RMSE_offset_incal,[],1,'omitnan');
+med_offset_incal = median(RMSE_offset_incal,1,'omitnan');
+max_offset_incal = max(RMSE_offset_incal,[],1,'omitnan');
+
+min_gain_incal = min(RMSE_gain_incal,[],1,'omitnan');
+med_gain_incal = median(RMSE_gain_incal,1,'omitnan');
+max_gain_incal = max(RMSE_gain_incal,[],1,'omitnan');
+
+subplot(121)
+hold on
+semilogy(T_incal,min_offset_incal,'r')
+semilogy(T_incal,med_offset_incal,'r')
+o_i = semilogy(T_incal,max_offset_incal,'r');
+hold off
+
+subplot(122)
+hold on
+semilogy(T_incal,min_gain_incal,'r')
+semilogy(T_incal,med_gain_incal,'r')
+g_i = semilogy(T_incal,max_gain_incal,'r');
+hold off
+
+% adding title and labels
+subplot(121)
+title('RMSE offset')
+legend([o_e o_i],'REMNeNMF','IN\_Cal')
+
+subplot(122)
+title('RMSE gain')
+legend([g_e g_i],'REMNeNMF','IN\_Cal')
+

+ 0 - 0
Python/__init__.py


+ 0 - 0
Python/calibrationMethods/__init__.py


+ 135 - 0
Python/calibrationMethods/emwamsgrad.py

@@ -0,0 +1,135 @@
+import numpy as np
+import time
+import matplotlib.pyplot as plt
+
+
+def emwamsgrad(data, G, F, r, Tmax):
+    tol = 1e-5
+    delta_measure = 1
+    em_iter_max = round(Tmax / delta_measure) + 1  #
+    T = np.empty(shape=(em_iter_max + 1))
+    T.fill(np.nan)
+    RMSE = np.empty(shape=(2, em_iter_max + 1))
+    RMSE.fill(np.nan)
+
+    # RRE = np.empty(shape=(em_iter_max + 1))
+    # RRE.fill(np.nan)
+
+    ITER_MAX = 100  # maximum inner iteration number (Default)
+    ITER_MIN = 5  # minimum inner iteration number (Default)
+
+    np.put(F, data.idxOF, data.sparsePhi_F)
+    np.put(G, data.idxOG, data.sparsePhi_G)
+    X = data.X + np.multiply(data.nW, np.dot(G, F))
+
+    FXt = np.dot(F, X.T)
+    FFt = np.dot(F, F.T)
+
+    GtX = np.dot(G.T, X)
+    GtG = np.dot(G.T, G)
+
+    GradG = np.dot(G, FFt) - FXt.T
+    GradF = np.dot(GtG, F) - GtX
+
+    init_delta = stop_rule(np.hstack((G.T, F)), np.hstack((GradG.T, GradF)))
+    tolF = tol * init_delta
+    tolG = tolF  # Stopping tolerance
+
+    # Iterative updating
+    G = G.T
+    k = 0
+    RMSE[:, k] = np.linalg.norm(F[:, 0:-1] - data.F[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
+    T[k] = 0
+    t = time.time()
+    # Main loop
+    while time.time() - t <= Tmax + delta_measure:
+
+        # Estimation step
+        X = data.X + np.multiply(data.nW, np.dot(G.T, F))
+
+        # Maximisation step
+        # Optimize F with fixed G
+        np.put(F, data.idxOF, 0)
+        F, iterF, _ = amsgrad(F, GtG, GtX - GtG.dot(data.Phi_F), ITER_MIN, ITER_MAX, tolF, data.idxOF, False)
+        np.put(F, data.idxOF, data.sparsePhi_F)
+        # print(F[:,0:5])
+        if iterF <= ITER_MIN:
+            tolF = tolF / 10
+            # print('Tweaked F tolerance to '+str(tolF))
+        FFt = np.dot(F, F.T)
+        FXt = np.dot(F, X.T)
+
+        # Optimize G with fixed F
+        np.put(G.T, data.idxOG, 0)
+        G, iterG, _ = amsgrad(G, FFt, FXt - FFt.dot(data.Phi_G.T), ITER_MIN, ITER_MAX, tolG, data.idxOG, True)
+        np.put(G.T, data.idxOG, data.sparsePhi_G)
+        if iterG <= ITER_MIN:
+            tolG = tolG / 10
+            # print('Tweaked G tolerance to '+str(tolG))
+        GtG = np.dot(G, G.T)
+        GtX = np.dot(G, X)
+
+        if time.time() - t - k * delta_measure >= delta_measure:
+            k = k + 1
+            if k >= em_iter_max + 1:
+                break
+            RMSE[:, k] = np.linalg.norm(F[:, 0:-1] - data.F[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
+            T[k] = time.time() - t
+            # RRE[k] = nmf_norm_fro(data.Xtheo, G.T, F, data.W)
+            # if k%100==0:
+            #     print(str(k)+'   '+str(RMSE[0,k])+'   '+str(RMSE[1,k]))
+    # plt.semilogy(RRE)
+    # plt.show()
+    return {'RMSE': RMSE, 'T': T}
+
+
+def stop_rule(X, GradX):
+    # Stopping Criterions
+    pGrad = GradX[np.any(np.dstack((X > 0, GradX < 0)), 2)]
+    return np.linalg.norm(pGrad, 2)
+
+
+def amsgrad(Z, GtG, GtX, iterMin, iterMax, tol, idxfixed, transposed):
+
+    grad = np.dot(GtG, Z) - GtX  # Gradient
+    m = 0
+    v = 0
+    v_hat = 0
+    alpha = 1e-3
+    beta1 = 0.9
+    beta2 = 0.999
+    eps = 1e-8
+
+    for iter in range(1, iterMax + 1):
+        bias_correction1 = 1 - beta1 ** iter
+        bias_correction2 = 1 - beta2 ** iter
+        # bias_correction1 = 1 - beta1
+        # bias_correction2 = 1 - beta2
+        m = beta1 * m + (1 - beta1) * grad
+        v = beta2 * v + (1 - beta2) * np.square(grad)
+        v_hat = np.maximum(v_hat, v)
+        denom = np.sqrt(v_hat)/np.sqrt(bias_correction2) + eps
+        Z = Z - (alpha/bias_correction1) * m / denom
+        if transposed:  # If Z = G.T
+            np.put(Z.T, idxfixed, 0)
+        else:  # If Z = F
+            np.put(Z, idxfixed, 0)
+        Z = np.maximum(Z, 0)
+        # Stopping criteria
+        if iter >= iterMin:
+            # Lin's stopping criteria
+            pgn = stop_rule(Z, grad)
+            if pgn <= tol:
+                break
+        grad = np.dot(GtG, Z) - GtX
+    return Z, iter, grad
+
+
+def nmf_norm_fro(X, G, F, *args):
+    W = args
+    if len(W) == 0:
+        f = np.square(np.linalg.norm(X - np.dot(G, F), 'fro')) / np.square(np.linalg.norm(X, 'fro'))
+    else:
+        W = W[0]
+        f = np.square(np.linalg.norm(X - np.multiply(W, np.dot(G, F)), 'fro')) / np.square(np.linalg.norm(X, 'fro'))
+    return f

+ 198 - 0
Python/calibrationMethods/emwfnnls.py

@@ -0,0 +1,198 @@
+import numpy as np
+import time
+from numpy import float64, sum,  nonzero, newaxis, finfo
+
+
+nu = newaxis
+
+
+def emwfnnls(data, G, F, r, Tmax):
+    tol = 0
+    delta_measure = 1
+    em_iter_max = round(Tmax / delta_measure) + 1  #
+    T = np.empty(shape=(em_iter_max + 1))
+    T.fill(np.nan)
+    RMSE = np.empty(shape=(2, em_iter_max + 1))
+    RMSE.fill(np.nan)
+
+    # RRE = np.empty(shape=(em_iter_max + 1))
+    # RRE.fill(np.nan)
+
+    np.put(F, data.idxOF, data.sparsePhi_F)
+    np.put(G, data.idxOG, data.sparsePhi_G)
+    X = data.X + np.multiply(data.nW, np.dot(G, F))
+
+    GtX = np.dot(G.T, X)
+    GtG = np.dot(G.T, G)
+
+    # Iterative updating
+    G = G.T
+    k = 0
+    RMSE[:, k] = np.linalg.norm(F[:, 0:-1] - data.F[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
+    T[k] = 0
+    t = time.time()
+    # Main loop
+    while time.time() - t <= Tmax + delta_measure:
+
+        # Estimation step
+        X = data.X + np.multiply(data.nW, np.dot(G.T, F))
+
+        # Maximisation step
+        # Optimize F with fixed G
+        np.put(F, data.idxOF, 0)
+        F, _ = fnnls(GtG, GtX - GtG.dot(data.Phi_F))
+        np.put(F, data.idxOF, data.sparsePhi_F)
+        FFt = np.dot(F, F.T)
+        FXt = np.dot(F, X.T)
+
+        # Optimize G with fixed F
+        np.put(G.T, data.idxOG, 0)
+        G, _ = fnnls(FFt, FXt - FFt.dot(data.Phi_G.T))
+        np.put(G.T, data.idxOG, data.sparsePhi_G)
+        GtG = np.dot(G, G.T)
+        GtX = np.dot(G, X)
+
+        if time.time() - t - k * delta_measure >= delta_measure:
+            k = k + 1
+            if k >= em_iter_max + 1:
+                break
+            RMSE[:, k] = np.linalg.norm(F[:, 0:-1] - data.F[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
+            T[k] = time.time() - t
+            # RRE[k] = nmf_norm_fro(data.Xtheo, G.T, F, data.W)
+            # if k%100==0:
+            #     print(str(k)+'   '+str(RMSE[0,k])+'   '+str(RMSE[1,k]))
+    # plt.semilogy(RRE)
+    # plt.show()
+    return {'RMSE': RMSE, 'T': T}
+
+
+def stop_rule(X, GradX):
+    # Stopping Criterions
+    pGrad = GradX[np.any(np.dstack((X > 0, GradX < 0)), 2)]
+    return np.linalg.norm(pGrad, 2)
+
+
+# machine epsilon
+eps = finfo(float64).eps
+
+
+def any(a):
+    # assuming a vector, a
+    larger_than_zero = sum(a > 0)
+    if larger_than_zero:
+        return True
+    else:
+        return False
+
+
+def find_nonzero(a):
+    # returns indices of nonzero elements in a
+    return nonzero(a)[0]
+
+
+def fnnls(AtA, Aty, epsilon=None, iter_max=None):
+    """
+    Given a matrix A and vector y, find x which minimizes the objective function
+    f(x) = ||Ax - y||^2.
+    This algorithm is similar to the widespread Lawson-Hanson method, but
+    implements the optimizations described in the paper
+    "A Fast Non-Negativity-Constrained Least Squares Algorithm" by
+    Rasmus Bro and Sumen De Jong.
+    Note that the inputs are not A and y, but are
+    A^T * A and A^T * y
+    This is to avoid incurring the overhead of computing these products
+    many times in cases where we need to call this routine many times.
+    :param AtA:       A^T * A. See above for definitions. If A is an (m x n)
+                      matrix, this should be an (n x n) matrix.
+    :type AtA:        numpy.ndarray
+    :param Aty:       A^T * y. See above for definitions. If A is an (m x n)
+                      matrix and y is an m dimensional vector, this should be an
+                      n dimensional vector.
+    :type Aty:        numpy.ndarray
+    :param epsilon:   Anything less than this value is consider 0 in the code.
+                      Use this to prevent issues with floating point precision.
+                      Defaults to the machine precision for doubles.
+    :type epsilon:    float
+    :param iter_max:  Maximum number of inner loop iterations. Defaults to
+                      30 * [number of cols in A] (the same value that is used
+                      in the publication this algorithm comes from).
+    :type iter_max:   int, optional
+    """
+    if epsilon is None:
+        epsilon = np.finfo(np.float64).eps
+
+    n = AtA.shape[0]
+
+    if iter_max is None:
+        iter_max = 30 * n
+
+    if Aty.ndim != 1 or Aty.shape[0] != n:
+        raise ValueError('Invalid dimension; got Aty vector of size {}, ' \
+                         'expected {}'.format(Aty.shape, n))
+
+    # Represents passive and active sets.
+    # If sets[j] is 0, then index j is in the active set (R in literature).
+    # Else, it is in the passive set (P).
+    sets = np.zeros(n, dtype=np.bool)
+    # The set of all possible indices. Construct P, R by using `sets` as a mask
+    ind = np.arange(n, dtype=int)
+    P = ind[sets]
+    R = ind[~sets]
+
+    x = np.zeros(n, dtype=np.float64)
+    w = Aty
+    s = np.zeros(n, dtype=np.float64)
+
+    i = 0
+    # While R not empty and max_(n \in R) w_n > epsilon
+    while not np.all(sets) and np.max(w[R]) > epsilon and i < iter_max:
+        # Find index of maximum element of w which is in active set.
+        j = np.argmax(w[R])
+        # We have the index in MASKED w.
+        # The real index is stored in the j-th position of R.
+        m = R[j]
+
+        # Move index from active set to passive set.
+        sets[m] = True
+        P = ind[sets]
+        R = ind[~sets]
+
+        # Get the rows, cols in AtA corresponding to P
+        AtA_in_p = AtA[P][:, P]
+        # Do the same for Aty
+        Aty_in_p = Aty[P]
+
+        # Update s. Solve (AtA)^p * s^p = (Aty)^p
+        s[P] = np.linalg.lstsq(AtA_in_p, Aty_in_p, rcond=None)[0]
+        s[R] = 0.
+
+        while np.any(s[P] <= epsilon):
+            i += 1
+
+            mask = (s[P] <= epsilon)
+            alpha = np.min(x[P][mask] / (x[P][mask] - s[P][mask]))
+            x += alpha * (s - x)
+
+            # Move all indices j in P such that x[j] = 0 to R
+            # First get all indices where x == 0 in the MASKED x
+            zero_mask = (x[P] < epsilon)
+            # These correspond to indices in P
+            zeros = P[zero_mask]
+            # Finally, update the passive/active sets.
+            sets[zeros] = False
+            P = ind[sets]
+            R = ind[~sets]
+
+            # Get the rows, cols in AtA corresponding to P
+            AtA_in_p = AtA[P][:, P]
+            # Do the same for Aty
+            Aty_in_p = Aty[P]
+
+            # Update s. Solve (AtA)^p * s^p = (Aty)^p
+            s[P] = np.linalg.lstsq(AtA_in_p, Aty_in_p, rcond=None)[0]
+            s[R] = 0.
+
+        x = s.copy()
+        w = Aty - AtA.dot(x)
+
+    return x

+ 102 - 0
Python/calibrationMethods/emwmunmf.py

@@ -0,0 +1,102 @@
+import numpy as np
+import time
+import matplotlib.pyplot as plt
+
+
+def emwmunmf(data, G, F, r, Tmax):
+    delta_measure = 1
+    em_iter_max = round(Tmax / delta_measure) + 1  #
+    T = np.empty(shape=(em_iter_max + 1))
+    T.fill(np.nan)
+    RMSE = np.empty(shape=(2, em_iter_max + 1))
+    RMSE.fill(np.nan)
+
+    # RRE = np.empty(shape=(em_iter_max + 1))
+    # RRE.fill(np.nan)
+    secu = 1e-12
+    M_loop = 5
+    np.put(F, data.idxOF, data.sparsePhi_F)
+    np.put(G, data.idxOG, data.sparsePhi_G)
+
+    delta_G = G
+    delta_F = F
+
+    # Iterative updating
+    k = 0
+    RMSE[:, k] = np.linalg.norm(F[:, 0:-1] - data.F[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
+    T[k] = 0
+    t = time.time()
+    # Main loop
+    while time.time() - t <= Tmax + delta_measure:
+
+        # Estimation step
+        X = data.X + np.multiply(data.nW, np.dot(G, F))
+
+        # Maximisation step
+        for _ in range(M_loop):
+            # Optimize F with fixed G
+            np.put(delta_G, data.idxOG, 0)
+            delta_G = np.divide(
+                np.multiply(
+                    delta_G,
+                    np.dot(
+                        secu_plus(X - data.Phi_G.dot(F), secu),
+                        F.T
+                    )
+                ),
+                np.dot(
+                    delta_G.dot(F),
+                    F.T
+                )
+            )
+            delta_G[np.isnan(delta_G)] = 0
+            G = delta_G
+            np.put(G, data.idxOG, data.sparsePhi_G)
+
+            # Optimize G with fixed F
+            np.put(F, data.idxOF, 0)
+            delta_F = np.divide(
+                np.multiply(
+                    delta_F,
+                    np.dot(
+                        G.T,
+                        secu_plus(X - G.dot(data.Phi_F), secu)
+                    )
+                ),
+                np.dot(
+                    G.T,
+                    G.dot(delta_F)
+                )
+            )
+            delta_F[np.isnan(delta_F)] = 0
+            F = delta_F
+            np.put(F, data.idxOF, data.sparsePhi_F)
+
+        if time.time() - t - k * delta_measure >= delta_measure:
+            k = k + 1
+            if k >= em_iter_max + 1:
+                break
+            RMSE[:, k] = np.linalg.norm(F[:, 0:-1] - data.F[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
+            T[k] = time.time() - t
+            # RRE[k] = nmf_norm_fro(data.Xtheo, G.T, F, data.W)
+            # if k%100==0:
+            #     print(str(k)+'   '+str(RMSE[0,k])+'   '+str(RMSE[1,k]))
+    # plt.semilogy(RRE)
+    # plt.show()
+    return {'RMSE': RMSE, 'T': T}
+
+
+def secu_plus(tutu, s):
+    toto = np.maximum(tutu, s)
+    toto[np.isnan(tutu)] = 0
+    return toto
+
+
+def nmf_norm_fro(X, G, F, *args):
+    W = args
+    if len(W) == 0:
+        f = np.square(np.linalg.norm(X - np.dot(G, F), 'fro')) / np.square(np.linalg.norm(X, 'fro'))
+    else:
+        W = W[0]
+        f = np.square(np.linalg.norm(X - np.multiply(W, np.dot(G, F)), 'fro')) / np.square(np.linalg.norm(X, 'fro'))
+    return f

+ 118 - 0
Python/calibrationMethods/emwnenmf.py

@@ -0,0 +1,118 @@
+import numpy as np
+import time
+
+
+def emwnenmf(data, G, F, r, Tmax):
+    tol = 1e-3
+    delta_measure = 1
+    em_iter_max = round(Tmax / delta_measure) + 1  #
+    T = np.empty(shape=(em_iter_max + 1))
+    T.fill(np.nan)
+    RMSE = np.empty(shape=(2, em_iter_max + 1))
+    RMSE.fill(np.nan)
+
+    # RRE = np.empty(shape=(em_iter_max + 1))
+    # RRE.fill(np.nan)
+    M_loop = 2  # Number of passage over M step
+    ITER_MAX = 100  # maximum inner iteration number (Default)
+    ITER_MIN = 5  # minimum inner iteration number (Default)
+
+    X = data.X + np.multiply(data.nW, np.dot(G, F))
+
+    FXt = np.dot(F, X.T)
+    FFt = np.dot(F, F.T)
+
+    GtX = np.dot(G.T, X)
+    GtG = np.dot(G.T, G)
+
+    GradG = np.dot(G, FFt) - FXt.T
+    GradF = np.dot(GtG, F) - GtX
+
+    init_delta = stop_rule(np.hstack((G.T, F)), np.hstack((GradG.T, GradF)))
+    tolF = tol * init_delta
+    tolG = tolF  # Stopping tolerance
+
+    # Iterative updating
+    G = G.T
+    GtX = np.dot(G, X) - GtG.dot(data.Phi_F)
+    k = 0
+    RMSE[:, k] = np.linalg.norm(F[:, 0:-1] - data.F[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
+    T[k] = 0
+    t = time.time()
+    # Main loop
+    while time.time() - t <= Tmax + delta_measure:
+
+        # Estimation step
+        if (k+1) % M_loop:
+            X = data.X + np.multiply(data.nW, np.dot(G.T, F))
+
+        # Maximisation step
+        # Optimize F with fixed G
+        np.put(F, data.idxOF, 0)
+        F, iterF, _ = NNLS(F, GtG, GtX, ITER_MAX, tolF, data.idxOF, False)
+        np.put(F, data.idxOF, data.sparsePhi_F)
+        # print(F[:,0:5])
+        if iterF <= ITER_MIN:
+            tolF = tolF / 10
+            # print('Tweaked F tolerance to '+str(tolF))
+        FFt = np.dot(F, F.T)
+        FXt = np.dot(F, X.T) - FFt.dot(data.Phi_G.T)
+
+        # Optimize G with fixed F
+        np.put(G.T, data.idxOG, 0)
+        G, iterG, _ = NNLS(G, FFt, FXt, ITER_MAX, tolG, data.idxOG, True)
+        np.put(G.T, data.idxOG, data.sparsePhi_G)
+        if iterG <= ITER_MIN:
+            tolG = tolG / 10
+            # print('Tweaked G tolerance to '+str(tolG))
+        GtG = np.dot(G, G.T)
+        GtX = np.dot(G, X) - GtG.dot(data.Phi_F)
+
+        if time.time() - t - k * delta_measure >= delta_measure:
+            k = k + 1
+            if k >= em_iter_max + 1:
+                break
+            RMSE[:, k] = np.linalg.norm(F[:, 0:-1] - data.F[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
+            T[k] = time.time() - t
+    return {'RMSE': RMSE, 'T': T}
+
+
+def stop_rule(X, GradX):
+    # Stopping Criterions
+    pGrad = GradX[np.any(np.dstack((X > 0, GradX < 0)), 2)]
+    return np.linalg.norm(pGrad, 2)
+
+
+def NNLS(Z, GtG, GtX, iterMax, tol, idxfixed, transposed):
+    L = np.linalg.norm(GtG, 2)  # Lipschitz constant
+    H0 = Z  # Initialization
+    Grad = np.dot(GtG, Z) - GtX  # Gradient
+    alpha1 = 1
+
+    for iter in range(1, iterMax + 1):
+        H = np.maximum(Z - (1 / L) * Grad, 0)  # Calculate squence 'Y'
+
+        if transposed:  # If Z = G.T
+            np.put(H.T, idxfixed, 0)
+        else:  # If Z = F
+            np.put(H, idxfixed, 0)
+        alpha2 = 0.5 * (1 + np.sqrt(1 + 4 * alpha1 ** 2))
+        Z = H + ((alpha1 - 1) / alpha2) * (H - H0)
+        alpha1 = alpha2
+        Grad = np.dot(GtG, Z) - GtX
+        H0 = H
+        # Lin's stopping criteria
+        pgn = stop_rule(Z, Grad)
+        if pgn <= tol:
+            break
+    return H, iter, Grad
+
+
+def nmf_norm_fro(X, G, F, *args):
+    W = args
+    if len(W) == 0:
+        f = np.square(np.linalg.norm(X - np.dot(G, F), 'fro')) / np.square(np.linalg.norm(X, 'fro'))
+    else:
+        W = W[0]
+        f = np.square(np.linalg.norm(X - np.multiply(W, np.dot(G, F)), 'fro')) / np.square(np.linalg.norm(X, 'fro'))
+    return f

+ 125 - 0
Python/calibrationMethods/emwnenmf_break.py

@@ -0,0 +1,125 @@
+import numpy as np
+import time
+import matplotlib.pyplot as plt
+
+
+def emwnenmf_break(data, G, F, r, Tmax):
+    tol = 1e-5
+    delta_measure = 1
+    em_iter_max = round(Tmax / delta_measure) + 1  #
+    T = np.empty(shape=(em_iter_max + 1))
+    T.fill(np.nan)
+    RMSE = np.empty(shape=(2, em_iter_max + 1))
+    RMSE.fill(np.nan)
+
+    ITER_MAX = 200  # maximum inner iteration number (Default)
+    ITER_MIN = 10  # minimum inner iteration number (Default)
+
+    np.put(F, data.idxOF, data.sparsePhi_F)
+    np.put(G, data.idxOG, data.sparsePhi_G)
+    X = data.X + np.multiply(data.nW, np.dot(G, F))
+
+    FXt = np.dot(F, X.T)
+    FFt = np.dot(F, F.T)
+
+    GtX = np.dot(G.T, X)
+    GtG = np.dot(G.T, G)
+
+    GradG = np.dot(G, FFt) - FXt.T
+    GradF = np.dot(GtG, F) - GtX
+
+    init_delta = stop_rule(np.hstack((G.T, F)), np.hstack((GradG.T, GradF)))
+    tolF = tol * init_delta
+    tolG = tolF  # Stopping tolerance
+
+    # Iterative updating
+    G = G.T
+    k = 0
+    RMSE[:, k] = np.linalg.norm(F[:, 0:-1] - data.F[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
+    T[k] = 0
+    t = time.time()
+    # Main loop
+    while time.time() - t <= Tmax + delta_measure:
+
+        # Estimation step
+        X = data.X + np.multiply(data.nW, np.dot(G.T, F))
+
+        # Maximisation step
+        # Optimize F with fixed G
+        np.put(F, data.idxOF, 0)
+        F, iterF, _ = NNLS(F, GtG, GtX - GtG.dot(data.Phi_F), ITER_MIN, ITER_MAX, tolF, data.idxOF, False)
+        np.put(F, data.idxOF, data.sparsePhi_F)
+        # print(F[:,0:5])
+        if iterF <= ITER_MIN:
+            tolF = tolF / 10
+            # print('Tweaked F tolerance to '+str(tolF))
+        FFt = np.dot(F, F.T)
+        FXt = np.dot(F, X.T)
+
+        # Optimize G with fixed F
+        np.put(G.T, data.idxOG, 0)
+        G, iterG, _ = NNLS(G, FFt, FXt - FFt.dot(data.Phi_G.T), ITER_MIN, ITER_MAX, tolG, data.idxOG, True)
+        np.put(G.T, data.idxOG, data.sparsePhi_G)
+        if iterG <= ITER_MIN:
+            tolG = tolG / 10
+            # print('Tweaked G tolerance to '+str(tolG))
+        GtG = np.dot(G, G.T)
+        GtX = np.dot(G, X)
+
+        if time.time() - t - k * delta_measure >= delta_measure:
+            k = k + 1
+            if k >= em_iter_max + 1:
+                break
+            RMSE[:, k] = np.linalg.norm(F[:, 0:-1] - data.F[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
+            T[k] = time.time() - t
+            # if k%100==0:
+            #     print(str(k)+'   '+str(RMSE[0,k])+'   '+str(RMSE[1,k]))
+    return {'RMSE': RMSE, 'T': T}
+
+
+def stop_rule(X, GradX):
+    # Stopping Criterions
+    pGrad = GradX[np.any(np.dstack((X > 0, GradX < 0)), 2)]
+    return np.linalg.norm(pGrad, 2)
+
+
+def NNLS(Z, GtG, GtX, iterMin, iterMax, tol, idxfixed, transposed):
+    L = np.linalg.norm(GtG, 2)  # Lipschitz constant
+    H = Z  # Initialization
+    Grad = np.dot(GtG, Z) - GtX  # Gradient
+    alpha1 = np.ones(shape=(2, 1))
+
+    for iter in range(1, iterMax + 1):
+        H0 = H
+        H = np.maximum(Z - Grad / L, 0)  # Calculate squence 'Y'
+        grad_scheme = np.greater(Grad.dot(H.T - H0.T), 0)
+        if np.any(grad_scheme[:, 0]):
+            break
+        if np.any(grad_scheme[:, 1]):
+            break
+        if transposed:  # If Z = G.T
+            np.put(H.T, idxfixed, 0)
+        else:  # If Z = F
+            np.put(H, idxfixed, 0)
+        alpha2 = 0.5 * (1 + np.sqrt(1 + 4 * alpha1 ** 2))
+        Z = H + ((alpha1 - 1) / alpha2) * (H - H0)
+        alpha1 = alpha2
+        Grad = np.dot(GtG, Z) - GtX
+
+        # Stopping criteria
+        if iter >= iterMin:
+            # Lin's stopping criteria
+            pgn = stop_rule(Z, Grad)
+            if pgn <= tol:
+                break
+    return H, iter, Grad
+
+
+def nmf_norm_fro(X, G, F, *args):
+    W = args
+    if len(W) == 0:
+        f = np.square(np.linalg.norm(X - np.dot(G, F), 'fro')) / np.square(np.linalg.norm(X, 'fro'))
+    else:
+        W = W[0]
+        f = np.square(np.linalg.norm(X - np.multiply(W, np.dot(G, F)), 'fro')) / np.square(np.linalg.norm(X, 'fro'))
+    return f

+ 170 - 0
Python/calibrationMethods/emwnenmf_comp.py

@@ -0,0 +1,170 @@
+import numpy as np
+import time
+
+
+def emwnenmf_comp(data, G, F, r, Tmax):
+    OutIter = 50
+    tol = 1e-5
+    delta_measure = 1
+    em_iter_max = round(Tmax / delta_measure) + 1  #
+    T = np.empty(shape=(em_iter_max + 1))
+    T.fill(np.nan)
+    RMSE = np.empty(shape=(2, em_iter_max + 1))
+    RMSE.fill(np.nan)
+
+    ITER_MAX = 200  # maximum inner iteration number (Default)
+    ITER_MIN = 10  # minimum inner iteration number (Default)
+
+    np.put(F, data.idxOF, data.sparsePhi_F)
+    np.put(G, data.idxOG, data.sparsePhi_G)
+    Xcomp = data.X + np.multiply(data.nW, np.dot(G, F))
+
+    FXt = np.dot(F, Xcomp.T)
+    FFt = np.dot(F, F.T)
+
+    GtX = np.dot(G.T, Xcomp)
+    GtG = np.dot(G.T, G)
+
+    GradG = np.dot(G, FFt) - FXt.T
+    GradF = np.dot(GtG, F) - GtX
+
+    init_delta = stop_rule(np.hstack((G.T, F)), np.hstack((GradG.T, GradF)))
+    tolF = tol * init_delta
+    tolG = tolF  # Stopping tolerance
+
+    # Iterative updating
+    G = G.T
+    k = 0
+    RMSE[:, k] = np.linalg.norm(F[:, 0:-1] - data.F[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
+    T[k] = 0
+    t = time.time()
+    to_comp = False
+    trigger_comp = 1e-5
+    # Main loop
+    while time.time() - t <= Tmax + delta_measure:
+
+        # Estimation step
+        Xcomp = data.X + np.multiply(data.nW, np.dot(G.T, F))
+        if to_comp:
+            # Compress left and right
+            L, R = RSI_compression(Xcomp, r)
+            X_L = np.dot(L, Xcomp)
+            X_R = np.dot(Xcomp, R)
+        # Maximisation step
+        for _ in range(OutIter):
+            # Optimize F with fixed G
+            np.put(F, data.idxOF, 0)
+            F, iterF, _ = NNLS(F, GtG, GtX - GtG.dot(data.Phi_F), ITER_MIN, ITER_MAX, tolF, data.idxOF, False)
+            np.put(F, data.idxOF, data.sparsePhi_F)
+            if iterF <= ITER_MIN:
+                tolF = tolF / 10
+                # print('Tweaked F tolerance to '+str(tolF))
+            if to_comp:
+                F_comp = F.dot(R)
+                FFt = np.dot(F_comp, F_comp.T)
+                FXt = np.dot(F_comp, X_R.T)
+            else:
+                FFt = np.dot(F, F.T)
+                FXt = np.dot(F, Xcomp.T)
+
+            # Optimize G with fixed F
+            np.put(G.T, data.idxOG, 0)
+            G, iterG, _ = NNLS(G, FFt, FXt - FFt.dot(data.Phi_G.T), ITER_MIN, ITER_MAX, tolG, data.idxOG, True)
+            np.put(G.T, data.idxOG, data.sparsePhi_G)
+            if iterG <= ITER_MIN:
+                tolG = tolG / 10
+                # print('Tweaked G tolerance to '+str(tolG))
+            if to_comp:
+                G_comp = G.dot(L.T)
+                GtG = np.dot(G_comp, G_comp.T)
+                GtX = np.dot(G_comp, X_L)
+            else:
+                GtG = np.dot(G, G.T)
+                GtX = np.dot(G, Xcomp)
+
+            if time.time() - t - k * delta_measure >= delta_measure:
+                k = k + 1
+                if k >= em_iter_max + 1:
+                    break
+                RMSE[:, k] = np.linalg.norm(F[:, 0:-1] - data.F[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
+                T[k] = time.time() - t
+                # if k%100==0:
+                #     print(str(k)+'   '+str(RMSE[0,k])+'   '+str(RMSE[1,k]))
+            if not to_comp:
+                if nmf_norm_fro(Xcomp, G.T, F) > trigger_comp:
+                    break
+                else:
+                    to_comp = True
+                    print(time.time()-t)
+                    break
+
+    return {'RMSE': RMSE, 'T': T}
+
+
+def stop_rule(X, GradX):
+    # Stopping Criterions
+    pGrad = GradX[np.any(np.dstack((X > 0, GradX < 0)), 2)]
+    return np.linalg.norm(pGrad, 2)
+
+
+def NNLS(Z, GtG, GtX, iterMin, iterMax, tol, idxfixed, transposed):
+    L = np.linalg.norm(GtG, 2)  # Lipschitz constant
+    H = Z  # Initialization
+    Grad = np.dot(GtG, Z) - GtX  # Gradient
+    alpha1 = 1
+
+    for iter in range(1, iterMax + 1):
+        H0 = H
+        H = np.maximum(Z - Grad / L, 0)  # Calculate squence 'Y'
+
+        if transposed:  # If Z = G.T
+            np.put(H.T, idxfixed, 0)
+        else:  # If Z = F
+            np.put(H, idxfixed, 0)
+        alpha2 = 0.5 * (1 + np.sqrt(1 + 4 * alpha1 ** 2))
+        Z = H + ((alpha1 - 1) / alpha2) * (H - H0)
+        alpha1 = alpha2
+        Grad = np.dot(GtG, Z) - GtX
+
+        # Stopping criteria
+        if iter >= iterMin:
+            # Lin's stopping criteria
+            pgn = stop_rule(Z, Grad)
+            if pgn <= tol:
+                break
+    return H, iter, Grad
+
+
+def nmf_norm_fro(X, G, F, *args):
+    W = args
+    if len(W) == 0:
+        f = np.square(np.linalg.norm(X - np.dot(G, F), 'fro')) / np.square(np.linalg.norm(X, 'fro'))
+    else:
+        W = W[0]
+        f = np.square(np.linalg.norm(X - np.multiply(W, np.dot(G, F)), 'fro')) / np.square(np.linalg.norm(X, 'fro'))
+    return f
+
+
+def RSI_compression(X, r):
+    compressionLevel = 20
+    m, n = X.shape
+    l = min(n, max(compressionLevel, r + 10))
+
+    OmegaL = np.random.randn(n, l)
+    Y = np.dot(X, OmegaL)
+    for _ in range(4):
+        Y = np.linalg.qr(Y, mode='reduced')[0]
+        S = np.dot(X.T, Y)
+        Z = np.linalg.qr(S, mode='reduced')[0]
+        Y = np.dot(X, Z)
+    L = np.linalg.qr(Y, mode='reduced')[0].T
+
+    OmegaR = np.random.randn(l, m)
+    Y = np.dot(OmegaR, X)
+    for _ in range(4):
+        Y = np.linalg.qr(Y.T, mode='reduced')[0]
+        S = np.dot(X, Y)
+        Z = np.linalg.qr(S, mode='reduced')[0]
+        Y = np.dot(Z.T, X)
+    R = np.linalg.qr(Y.T, mode='reduced')[0]
+    return L, R

+ 138 - 0
Python/calibrationMethods/emwnenmf_m.py

@@ -0,0 +1,138 @@
+import numpy as np
+import time
+
+
+def emwnenmf_m(data, G, F, r, Tmax):
+    tol = 1e-3
+    delta_measure = 1
+    em_iter_max = round(Tmax / delta_measure) + 1  #
+    T = np.empty(shape=(em_iter_max + 1))
+    T.fill(np.nan)
+    RMSE = np.empty(shape=(2, em_iter_max + 1))
+    RMSE.fill(np.nan)
+
+    # RRE = np.empty(shape=(em_iter_max + 1))
+    # RRE.fill(np.nan)
+    M_loop = 2  # Number of passage over M step
+    ITER_MAX = 100  # maximum inner iteration number (Default)
+    ITER_MIN = 5  # minimum inner iteration number (Default)
+
+    dataX = data.X
+    dataF = data.F
+    dataidxOF = data.idxOF
+    dataidxOG = data.idxOG
+    datanW = data.nW
+    dataPhi_F = data.Phi_F
+    dataPhi_G = data.Phi_G
+    datasparsePhi_F = data.sparsePhi_F
+    datasparsePhi_G = data.sparsePhi_G
+
+
+    X = dataX + np.multiply(datanW, np.dot(G, F))
+
+    XFt = np.dot(X, F.T)
+    FFt = np.dot(F, F.T)
+
+    GtX = np.dot(G.T, X)
+    GtG = np.dot(G.T, G)
+
+    GradG = np.dot(G, FFt) - XFt
+    GradF = np.dot(GtG, F) - GtX
+
+    init_delta = stop_rule(np.hstack((G.T, F)), np.hstack((GradG.T, GradF)))
+    tolF = tol * init_delta
+    tolG = tolF  # Stopping tolerance
+
+    # Iterative updating
+    k = 0
+    RMSE[:, k] = np.linalg.norm(F[:, 0:-1] - dataF[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
+    T[k] = 0
+    t = time.time()
+    niter = 0
+    # Main loop
+    while time.time() - t <= Tmax + delta_measure:
+
+        # Estimation step
+        X = dataX + np.multiply(datanW, np.dot(G, F))
+
+        # Maximisation step
+        for _ in range(M_loop):
+            FFt = F.dot(F.T)
+            XFt = X.dot(F.T) - dataPhi_G.dot(FFt)
+            np.put(G, dataidxOG, 0)
+            G, iterG = maj_G(G, FFt, XFt, ITER_MAX, tolG, dataidxOG)
+            np.put(G, dataidxOG, datasparsePhi_G)
+            if iterG <= ITER_MIN:
+                tolG = 0.1 * tolG
+            niter = niter + iterG
+
+            GtG = G.T.dot(G)
+            GtX = G.T.dot(X) - GtG.dot(dataPhi_F)
+            np.put(F, dataidxOF, 0)
+            F, iterF = maj_F(F, GtG, GtX, ITER_MAX, tolF, dataidxOF)
+            np.put(F, dataidxOF, datasparsePhi_F)
+            if iterF <= ITER_MIN:
+                tolF = 0.1 * tolF
+            niter = niter + iterF
+
+            if time.time() - t - k * delta_measure >= delta_measure:
+                k = k + 1
+                if k >= em_iter_max + 1:
+                    break
+                RMSE[:, k] = np.linalg.norm(F[:, 0:-1] - dataF[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
+                T[k] = time.time() - t
+
+    print(niter)
+    return {'RMSE': RMSE, 'T': T}
+
+
+def stop_rule(X, GradX):
+    # Stopping Criterions
+    pGrad = GradX[np.any(np.dstack((X > 0, GradX < 0)), 2)]
+    return np.linalg.norm(pGrad)
+
+
+def maj_G(G1, FFt, XFt, ITER_MAX, tolG, idxOG):
+    Y = G1
+    alpha1 = 1
+    L = np.linalg.norm(FFt)
+    Grad_y = Y.dot(FFt) - XFt
+
+    for i in range(1, ITER_MAX + 1):
+        G2 = np.maximum(Y - (1 / L) * Grad_y, 0)
+        np.put(G2, idxOG, 0)
+        alpha2 = (1 + np.sqrt(4 * alpha1 ** 2 + 1)) / 2
+
+        Y = G2 + ((alpha1 - 1) / alpha2) * (G2 - G1)
+        Grad_y = Y.dot(FFt) - XFt
+
+        G1 = G2
+        alpha1 = alpha2
+
+        if stop_rule(Y, Grad_y) <= tolG:
+            break
+
+    return G1, i
+
+
+def maj_F(F1, GtG, GtX, ITER_MAX, tolF, idxOF):
+    Y = F1
+    alpha1 = 1
+    L = np.linalg.norm(GtG)
+    Grad_y = GtG.dot(Y) - GtX
+
+    for i in range(1, ITER_MAX + 1):
+        F2 = np.maximum(Y - (1 / L) * Grad_y, 0)
+        np.put(F2, idxOF, 0)
+        alpha2 = (1 + np.sqrt(4 * alpha1 ** 2 + 1)) / 2
+
+        Y = F2 + ((alpha1 - 1) / alpha2) * (F2 - F1)
+        Grad_y = GtG.dot(Y) - GtX
+
+        F1 = F2
+        alpha1 = alpha2
+
+        if stop_rule(Y, Grad_y) <= tolF:
+            break
+
+    return F1, i

+ 147 - 0
Python/calibrationMethods/emwnenmf_old_10-03.py

@@ -0,0 +1,147 @@
+import numpy as np
+import time
+
+def emwnenmf(data,G,F,r,Tmax):
+    MinIter = 10
+    tol     = 1e-5
+    em_iter_max = round(Tmax/0.05)+1 # 
+    T       = np.empty(shape=(em_iter_max+1))
+    T.fill(np.nan)
+    RMSE    = np.empty(shape=(2,em_iter_max+1))
+    RMSE.fill(np.nan)
+
+    ITER_MAX=100  # maximum inner iteration number (Default)
+    ITER_MIN=15   # minimum inner iteration number (Default)
+
+    np.put(F,data.idxOF,data.sparsePhi_F)
+    np.put(G,data.idxOG,data.sparsePhi_G)
+    Xcomp = data.X + np.multiply(data.nW,np.dot(G,F))
+
+    FXt = np.dot(F,Xcomp.T)
+    FFt = np.dot(F,F.T)
+
+    GtX = np.dot(G.T,Xcomp)
+    GtG = np.dot(G.T,G)
+
+    GradG = np.dot(G,FFt)-FXt.T
+    GradF = np.dot(GtG,F)-GtX
+
+    init_delta = stop_rule(np.hstack((G.T,F)),np.hstack((GradG.T,GradF)))
+    tolF = max(tol,1e-3)*init_delta
+    tolG = tolF # Stopping tolerance
+
+    # Iterative updating
+    G = G.T
+    k = 0
+    RMSE[:,k] = np.linalg.norm(F[:,0:-1]-data.F[:,0:-1],2,axis=1)/np.sqrt(F.shape[1]-1)
+    T[k] = 0
+    t = time.time()
+    # Main loop
+    while time.time()-t <= Tmax+0.05:
+
+        # Estimation step
+        Xcomp = data.X + np.multiply(data.nW,np.dot(G.T,F))
+        # Compress left and right
+        L,R = RSI_compression(Xcomp,r)
+        X_L = np.dot(L,Xcomp)
+        X_R = np.dot(Xcomp,R)
+
+        # Maximisation step
+        # Optimize F with fixed G
+        F,iterF,_ = NNLS(F,GtG,GtX,ITER_MIN,ITER_MAX,tolF)
+        np.put(F,data.idxOF,data.sparsePhi_F)
+        # print(F[:,0:5])
+        if iterF<=ITER_MIN:
+            tolF = tolF/10
+            print('Tweaked F tolerance to '+str(tolF))
+        F_comp = np.dot(F,R)
+        FFt = np.dot(F_comp,F_comp.T)
+        FXt = np.dot(F_comp,X_R.T)
+        
+        # Optimize G with fixed F
+        G,iterG,GradG = NNLS(G,FFt,FXt,ITER_MIN,ITER_MAX,tolG)
+        np.put(G.T,data.idxOG,data.sparsePhi_G)
+        if iterG<=ITER_MIN:
+            tolG = tolG/10
+            print('Tweaked G tolerance to '+str(tolG))
+        G_comp = np.dot(G,L.T)
+        GtG = np.dot(G_comp,G_comp.T)
+        GtX = np.dot(G_comp,X_L)
+        GradF = np.dot(GtG,F) - GtX
+
+        # Stopping condition
+        # delta = stop_rule(np.hstack((G,F)),np.hstack((GradG,GradF)))
+        # if (delta<=tol*init_delta and k>=MinIter):
+        #     print('break before Tmax')
+        #     break
+
+        if time.time()-t - k*0.05 >= 0.05:
+            k = k+1
+            RMSE[:,k] = np.linalg.norm(F[:,0:-1]-data.F[:,0:-1],2,axis=1)/np.sqrt(F.shape[1]-1)
+            T[k] = time.time()-t
+            if k%100==0:
+                print(str(k)+'   '+str(RMSE[0,k])+'   '+str(RMSE[1,k])+'   '+str(nmf_norm_fro(Xcomp,G.T,F)))
+
+    # return {'G' : G.T, 'F' : F, 'RMSE' : RMSE, 'T': T, 'RMSEb' : RMSEb}
+    return {'G' : G.T, 'F' : F, 'RMSE' : RMSE, 'T': T}
+
+def stop_rule(X,GradX):
+    # Stopping Criterions
+    pGrad = GradX[np.any(np.dstack((X>0,GradX<0)),2)]
+    return np.linalg.norm(pGrad,2)
+
+def NNLS(Z,GtG,GtX,iterMin,iterMax,tol):
+    L = np.linalg.norm(GtG,2) # Lipschitz constant
+    H = Z # Initialization
+    Grad = np.dot(GtG,Z)-GtX #Gradient
+    alpha1 = 1
+
+    for iter in range(1,iterMax+1):
+        H0 = H
+        H = np.maximum(Z-Grad/L,0) # Calculate squence 'Y'
+        alpha2 = 0.5*(1+np.sqrt(1+4*alpha1**2))
+        Z = H + ((alpha1-1)/alpha2)*(H-H0)
+        alpha1 = alpha2
+        Grad=np.dot(GtG,Z)-GtX
+
+        # Stopping criteria
+        if iter>=iterMin:
+            # Lin's stopping criteria
+            pgn = stop_rule(Z,Grad)
+            if pgn <= tol:
+                break
+    return H,iter,Grad
+
+
+def nmf_norm_fro(X,G,F,*args):
+    W = args
+    if len(W)==0:
+        f = np.square(np.linalg.norm(X-np.dot(G,F),'fro'))/np.square(np.linalg.norm(X,'fro'))
+    else:
+        W=W[0]
+        f = np.square(np.linalg.norm(X-np.multiply(W,np.dot(G,F)),'fro'))/np.square(np.linalg.norm(X,'fro'))
+    return f
+
+def RSI_compression(X,r):
+    compressionLevel = 20
+    m,n = X.shape
+    l = min(n,max(compressionLevel,r+10))
+
+    OmegaL = np.random.randn(n,l)
+    Y = np.dot(X,OmegaL)
+    for _ in range(4):
+        Y = np.linalg.qr(Y,mode='reduced')[0]
+        S = np.dot(X.T,Y)
+        Z = np.linalg.qr(S,mode='reduced')[0]
+        Y = np.dot(X,Z)
+    L = np.linalg.qr(Y,mode='reduced')[0].T
+
+    OmegaR = np.random.randn(l,m)
+    Y = np.dot(OmegaR,X)
+    for _ in range(4):
+        Y = np.linalg.qr(Y.T,mode='reduced')[0]
+        S = np.dot(X,Y)
+        Z = np.linalg.qr(S,mode='reduced')[0]
+        Y = np.dot(Z.T,X)
+    R = np.linalg.qr(Y.T,mode='reduced')[0]
+    return L,R

+ 127 - 0
Python/calibrationMethods/emwnenmf_restart.py

@@ -0,0 +1,127 @@
+import numpy as np
+import time
+import matplotlib.pyplot as plt
+
+
+def emwnenmf_restart(data, G, F, r, Tmax):
+    tol = 1e-5
+    delta_measure = 1
+    em_iter_max = round(Tmax / delta_measure) + 1  #
+    T = np.empty(shape=(em_iter_max + 1))
+    T.fill(np.nan)
+    RMSE = np.empty(shape=(2, em_iter_max + 1))
+    RMSE.fill(np.nan)
+
+    ITER_MAX = 200  # maximum inner iteration number (Default)
+    ITER_MIN = 10  # minimum inner iteration number (Default)
+
+    np.put(F, data.idxOF, data.sparsePhi_F)
+    np.put(G, data.idxOG, data.sparsePhi_G)
+    X = data.X + np.multiply(data.nW, np.dot(G, F))
+
+    FXt = np.dot(F, X.T)
+    FFt = np.dot(F, F.T)
+
+    GtX = np.dot(G.T, X)
+    GtG = np.dot(G.T, G)
+
+    GradG = np.dot(G, FFt) - FXt.T
+    GradF = np.dot(GtG, F) - GtX
+
+    init_delta = stop_rule(np.hstack((G.T, F)), np.hstack((GradG.T, GradF)))
+    tolF = tol * init_delta
+    tolG = tolF  # Stopping tolerance
+
+    # Iterative updating
+    G = G.T
+    k = 0
+    RMSE[:, k] = np.linalg.norm(F[:, 0:-1] - data.F[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
+    T[k] = 0
+    t = time.time()
+    # Main loop
+    while time.time() - t <= Tmax + delta_measure:
+
+        # Estimation step
+        X = data.X + np.multiply(data.nW, np.dot(G.T, F))
+
+        # Maximisation step
+        # Optimize F with fixed G
+        np.put(F, data.idxOF, 0)
+        F, iterF, _ = NNLS(F, GtG, GtX - GtG.dot(data.Phi_F), ITER_MIN, ITER_MAX, tolF, data.idxOF, False)
+        np.put(F, data.idxOF, data.sparsePhi_F)
+        # print(F[:,0:5])
+        if iterF <= ITER_MIN:
+            tolF = tolF / 10
+            # print('Tweaked F tolerance to '+str(tolF))
+        FFt = np.dot(F, F.T)
+        FXt = np.dot(F, X.T)
+
+        # Optimize G with fixed F
+        np.put(G.T, data.idxOG, 0)
+        G, iterG, _ = NNLS(G, FFt, FXt - FFt.dot(data.Phi_G.T), ITER_MIN, ITER_MAX, tolG, data.idxOG, True)
+        np.put(G.T, data.idxOG, data.sparsePhi_G)
+        if iterG <= ITER_MIN:
+            tolG = tolG / 10
+            # print('Tweaked G tolerance to '+str(tolG))
+        GtG = np.dot(G, G.T)
+        GtX = np.dot(G, X)
+
+        if time.time() - t - k * delta_measure >= delta_measure:
+            k = k + 1
+            if k >= em_iter_max + 1:
+                break
+            RMSE[:, k] = np.linalg.norm(F[:, 0:-1] - data.F[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
+            T[k] = time.time() - t
+            # if k%100==0:
+            #     print(str(k)+'   '+str(RMSE[0,k])+'   '+str(RMSE[1,k]))
+    return {'RMSE': RMSE, 'T': T}
+
+
+def stop_rule(X, GradX):
+    # Stopping Criterions
+    pGrad = GradX[np.any(np.dstack((X > 0, GradX < 0)), 2)]
+    return np.linalg.norm(pGrad, 2)
+
+
+def NNLS(Z, GtG, GtX, iterMin, iterMax, tol, idxfixed, transposed):
+    L = np.linalg.norm(GtG, 2)  # Lipschitz constant
+    H = Z  # Initialization
+    Grad = np.dot(GtG, Z) - GtX  # Gradient
+    alpha1 = np.ones(shape=(2, 1))
+
+    for iter in range(1, iterMax + 1):
+        H0 = H
+        H = np.maximum(Z - Grad / L, 0)  # Calculate squence 'Y'
+        grad_scheme = np.greater(Grad.dot(H.T - H0.T), 0)
+        if np.any(grad_scheme[:, 0]):
+            alpha1[0] = 1
+            # break
+        if np.any(grad_scheme[:, 1]):
+            alpha1[1] = 1
+            # break
+        if transposed:  # If Z = G.T
+            np.put(H.T, idxfixed, 0)
+        else:  # If Z = F
+            np.put(H, idxfixed, 0)
+        alpha2 = 0.5 * (1 + np.sqrt(1 + 4 * alpha1 ** 2))
+        Z = H + ((alpha1 - 1) / alpha2) * (H - H0)
+        alpha1 = alpha2
+        Grad = np.dot(GtG, Z) - GtX
+
+        # Stopping criteria
+        if iter >= iterMin:
+            # Lin's stopping criteria
+            pgn = stop_rule(Z, Grad)
+            if pgn <= tol:
+                break
+    return H, iter, Grad
+
+
+def nmf_norm_fro(X, G, F, *args):
+    W = args
+    if len(W) == 0:
+        f = np.square(np.linalg.norm(X - np.dot(G, F), 'fro')) / np.square(np.linalg.norm(X, 'fro'))
+    else:
+        W = W[0]
+        f = np.square(np.linalg.norm(X - np.multiply(W, np.dot(G, F)), 'fro')) / np.square(np.linalg.norm(X, 'fro'))
+    return f

+ 151 - 0
Python/calibrationMethods/emwnenmf_updateinsideNNLS.py

@@ -0,0 +1,151 @@
+import numpy as np
+import time
+
+def emwnenmf(data,G,F,r,Tmax):
+    MinIter = 10
+    tol     = 1e-5
+    em_iter_max = round(Tmax/0.05)+1 # 
+    T       = np.empty(shape=(em_iter_max+1))
+    T.fill(np.nan)
+    RMSE    = np.empty(shape=(2,em_iter_max+1))
+    RMSE.fill(np.nan)
+
+    ITER_MAX=100  # maximum inner iteration number (Default)
+    ITER_MIN=10   # minimum inner iteration number (Default)
+
+    np.put(F,data.idxOF,data.sparsePhi_F)
+    np.put(G,data.idxOG,data.sparsePhi_G)
+    Xcomp = data.X + np.multiply(data.nW,np.dot(G,F))
+
+    FXt = np.dot(F,Xcomp.T)
+    FFt = np.dot(F,F.T)
+
+    GtX = np.dot(G.T,Xcomp)
+    GtG = np.dot(G.T,G)
+
+    GradG = np.dot(G,FFt)-FXt.T
+    GradF = np.dot(GtG,F)-GtX
+
+    init_delta = stop_rule(np.hstack((G.T,F)),np.hstack((GradG.T,GradF)))
+    tolF = max(tol,1e-3)*init_delta
+    tolG = tolF # Stopping tolerance
+
+    # Iterative updating
+    G = G.T
+    k = 0
+    RMSE[:,k] = np.linalg.norm(F[:,0:-1]-data.F[:,0:-1],2,axis=1)/np.sqrt(F.shape[1]-1)
+    T[k] = 0
+    t = time.time()
+    # Main loop
+    while time.time()-t <= Tmax+0.05:
+
+        # Estimation step
+        Xcomp = data.X + np.multiply(data.nW,np.dot(G.T,F))
+        # Compress left and right
+        L,R = RSI_compression(Xcomp,r)
+        X_L = np.dot(L,Xcomp)
+        X_R = np.dot(Xcomp,R)
+
+        # Maximisation step
+        # Optimize F with fixed G
+        F,iterF,_ = NNLS(F,GtG,GtX,ITER_MIN,ITER_MAX,tolF,data.idxOF,data.sparsePhi_F,False)
+        # np.put(F,data.idxOF,data.sparsePhi_F)
+        # print(F[:,0:5])
+        if iterF<=ITER_MIN:
+            tolF = tolF/10
+            print('Tweaked F tolerance to '+str(tolF))
+        F_comp = np.dot(F,R)
+        FFt = np.dot(F_comp,F_comp.T)
+        FXt = np.dot(F_comp,X_R.T)
+        
+        # Optimize G with fixed F
+        G,iterG,GradG = NNLS(G,FFt,FXt,ITER_MIN,ITER_MAX,tolG,data.idxOG,data.sparsePhi_G,True)
+        # np.put(G.T,data.idxOG,data.sparsePhi_G)
+        if iterG<=ITER_MIN:
+            tolG = tolG/10
+            print('Tweaked G tolerance to '+str(tolG))
+        G_comp = np.dot(G,L.T)
+        GtG = np.dot(G_comp,G_comp.T)
+        GtX = np.dot(G_comp,X_L)
+        GradF = np.dot(GtG,F) - GtX
+
+        # Stopping condition
+        # delta = stop_rule(np.hstack((G,F)),np.hstack((GradG,GradF)))
+        # if (delta<=tol*init_delta and k>=MinIter):
+        #     print('break before Tmax')
+        #     break
+
+        if time.time()-t - k*0.05 >= 0.05:
+            k = k+1
+            RMSE[:,k] = np.linalg.norm(F[:,0:-1]-data.F[:,0:-1],2,axis=1)/np.sqrt(F.shape[1]-1)
+            T[k] = time.time()-t
+            if k%100==0:
+                print(str(k)+'   '+str(RMSE[0,k])+'   '+str(RMSE[1,k])+'   '+str(nmf_norm_fro(Xcomp,G.T,F)))
+
+    # return {'G' : G.T, 'F' : F, 'RMSE' : RMSE, 'T': T, 'RMSEb' : RMSEb}
+    return {'G' : G.T, 'F' : F, 'RMSE' : RMSE, 'T': T}
+
+def stop_rule(X,GradX):
+    # Stopping Criterions
+    pGrad = GradX[np.any(np.dstack((X>0,GradX<0)),2)]
+    return np.linalg.norm(pGrad,2)
+
+def NNLS(Z,GtG,GtX,iterMin,iterMax,tol,idxfixed,fixed,transposed):
+    L = np.linalg.norm(GtG,2) # Lipschitz constant
+    H = Z # Initialization
+    Grad = np.dot(GtG,Z)-GtX #Gradient
+    alpha1 = 1
+
+    for iter in range(1,iterMax+1):
+        H0 = H
+        H = np.maximum(Z-Grad/L,0) # Calculate squence 'Y'
+        if transposed: # If Z = G.T
+            np.put(H.T,idxfixed,fixed)
+        else: # If Z = F
+            np.put(H,idxfixed,fixed)
+        alpha2 = 0.5*(1+np.sqrt(1+4*alpha1**2))
+        Z = H + ((alpha1-1)/alpha2)*(H-H0)
+        alpha1 = alpha2
+        Grad=np.dot(GtG,Z)-GtX
+
+        # Stopping criteria
+        if iter>=iterMin:
+            # Lin's stopping criteria
+            pgn = stop_rule(Z,Grad)
+            if pgn <= tol:
+                break
+    return H,iter,Grad
+
+
+def nmf_norm_fro(X,G,F,*args):
+    W = args
+    if len(W)==0:
+        f = np.square(np.linalg.norm(X-np.dot(G,F),'fro'))/np.square(np.linalg.norm(X,'fro'))
+    else:
+        W=W[0]
+        f = np.square(np.linalg.norm(X-np.multiply(W,np.dot(G,F)),'fro'))/np.square(np.linalg.norm(X,'fro'))
+    return f
+
+def RSI_compression(X,r):
+    compressionLevel = 20
+    m,n = X.shape
+    l = min(n,max(compressionLevel,r+10))
+
+    OmegaL = np.random.randn(n,l)
+    Y = np.dot(X,OmegaL)
+    for _ in range(4):
+        Y = np.linalg.qr(Y,mode='reduced')[0]
+        S = np.dot(X.T,Y)
+        Z = np.linalg.qr(S,mode='reduced')[0]
+        Y = np.dot(X,Z)
+    L = np.linalg.qr(Y,mode='reduced')[0].T
+
+    OmegaR = np.random.randn(l,m)
+    Y = np.dot(OmegaR,X)
+    for _ in range(4):
+        Y = np.linalg.qr(Y.T,mode='reduced')[0]
+        S = np.dot(X,Y)
+        Z = np.linalg.qr(S,mode='reduced')[0]
+        Y = np.dot(Z.T,X)
+    R = np.linalg.qr(Y.T,mode='reduced')[0]
+    return L,R

+ 111 - 0
Python/calibrationMethods/incal.py

@@ -0,0 +1,111 @@
+import numpy as np
+import time
+import matplotlib.pyplot as plt
+
+
+def incal(data, G, F, r, Tmax):
+    W2 = np.square(data.W)
+    delta_measure = 1
+    iter_max = round(Tmax / delta_measure) + 1
+    secu = 1e-12
+    T = np.empty(shape=(iter_max + 1))
+    T.fill(np.nan)
+    RMSE = np.empty(shape=(2, iter_max + 1))
+    RMSE.fill(np.nan)
+
+    # RRE = np.empty(shape=(iter_max + 1))
+    # RRE.fill(np.nan)
+
+    dataX = data.X
+    dataF = data.F
+    dataPhi_G = data.Phi_G
+    dataPhi_F = data.Phi_F
+    dataidxOF = data.idxOF
+    dataidxOG = data.idxOG
+    datasparsePhi_F = data.sparsePhi_F
+    datasparsePhi_G = data.sparsePhi_G
+
+    delta_G = G
+    delta_F = F
+    t = time.time()
+    T[0] = time.time() - t
+    RMSE[:, 0] = np.linalg.norm(F[:, 0:-1] - dataF[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
+    i = 0
+    niter = 0
+    while time.time() - t <= Tmax + delta_measure:
+
+        # Updating G
+        np.put(delta_G, dataidxOG, 0)
+        delta_G = np.divide(
+            np.multiply(
+                delta_G,
+                np.dot(
+                    np.multiply(
+                        W2,
+                        secu_plus(dataX - dataPhi_G.dot(F), secu)
+                    ),
+                    F.T
+                )
+            ),
+            np.dot(
+                np.multiply(
+                    W2,
+                    delta_G.dot(F)
+                ),
+                F.T
+            )
+        )
+        delta_G[np.isnan(delta_G)] = 0
+        G = delta_G
+        np.put(G, dataidxOG, datasparsePhi_G)
+
+        # Updating F
+        np.put(F, dataidxOF, 0)
+        delta_F = np.divide(
+            np.multiply(
+                delta_F,
+                np.dot(
+                    G.T,
+                    np.multiply(
+                        W2,
+                        secu_plus(dataX - G.dot(dataPhi_F), secu)
+                    )
+                )
+            ),
+            np.dot(
+                G.T,
+                np.multiply(
+                    W2,
+                    G.dot(delta_F)
+                )
+            )
+        )
+        delta_F[np.isnan(delta_F)] = 0
+        F = delta_F
+        np.put(F, dataidxOF, datasparsePhi_F)
+
+        # Saving results for this iteration
+        if time.time() - t - i * delta_measure >= delta_measure:
+            i = i + 1
+            RMSE[:, i] = np.linalg.norm(F[:, 0:-1] - dataF[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
+            T[i] = time.time() - t
+
+        niter = niter + 1
+    print(niter)
+    return {'RMSE': RMSE, 'T': T}
+
+
+def secu_plus(tutu, s):
+    toto = np.maximum(tutu, s)
+    toto[np.isnan(tutu)] = 0
+    return toto
+
+
+def nmf_norm_fro(X, G, F, *args):
+    W = args
+    if len(W) == 0:
+        f = np.square(np.linalg.norm(X - np.dot(G, F), 'fro')) / np.square(np.linalg.norm(X, 'fro'))
+    else:
+        W = W[0]
+        f = np.square(np.linalg.norm(X - np.multiply(W, np.dot(G, F)), 'fro')) / np.square(np.linalg.norm(X, 'fro'))
+    return f

+ 200 - 0
Python/calibrationMethods/muem.py

@@ -0,0 +1,200 @@
+import numpy as np
+import time
+
+def muem(data, G, F, r, Tmax):
+    delta_measure = 1
+    iter_max = round(Tmax / delta_measure) + 1
+    T = np.empty(shape=(iter_max + 1))
+    T.fill(np.nan)
+    RMSE = np.empty(shape=(2, iter_max + 1))
+    RMSE.fill(np.nan)
+
+    mu_rate = 0.05
+    mu_res = mu(data, G, F, r, mu_rate * Tmax, T, RMSE, delta_measure)
+    return em(data, mu_res['G'], mu_res['F'], r, (1 - mu_rate) * Tmax, mu_res['T'], mu_res['RMSE'], mu_res['mu_state'], delta_measure)
+
+
+def mu(data, G, F, r, Tmax, T, RMSE, delta_measure):
+    W2 = np.square(data.W)
+    secu = 1e-12
+    # RRE = np.empty(shape=(iter_max + 1))
+    # RRE.fill(np.nan)
+    delta_G = G
+    delta_F = F
+    t = time.time()
+    T[0] = time.time() - t
+    RMSE[:, 0] = np.linalg.norm(F[:, 0:-1] - data.F[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
+    i = 0
+    while time.time() - t <= Tmax + delta_measure:
+
+        # Updating G
+        np.put(delta_G, data.idxOG, 0)
+        delta_G = np.divide(
+            np.multiply(
+                delta_G,
+                np.dot(
+                    np.multiply(
+                        W2,
+                        secu_plus(data.X - data.Phi_G.dot(F), secu)
+                    ),
+                    F.T
+                )
+            ),
+            np.dot(
+                np.multiply(
+                    W2,
+                    delta_G.dot(F)
+                ),
+                F.T
+            )
+        )
+        delta_G[np.isnan(delta_G)] = 0
+        G = delta_G
+        np.put(G, data.idxOG, data.sparsePhi_G)
+
+        # Updating F
+        np.put(F, data.idxOF, 0)
+        delta_F = np.divide(
+            np.multiply(
+                delta_F,
+                np.dot(
+                    G.T,
+                    np.multiply(
+                        W2,
+                        secu_plus(data.X - G.dot(data.Phi_F), secu)
+                    )
+                )
+            ),
+            np.dot(
+                G.T,
+                np.multiply(
+                    W2,
+                    G.dot(delta_F)
+                )
+            )
+        )
+        delta_F[np.isnan(delta_F)] = 0
+        F = delta_F
+        np.put(F, data.idxOF, data.sparsePhi_F)
+
+        # Saving results for this iteration
+        if time.time() - t - i * delta_measure >= delta_measure:
+            i = i + 1
+            RMSE[:, i] = np.linalg.norm(F[:, 0:-1] - data.F[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
+            T[i] = time.time() - t
+    return {'G': G, 'F': F, 'T': T, 'RMSE': RMSE, 'mu_state': i}
+
+
+def secu_plus(tutu, s):
+    toto = np.maximum(tutu, s)
+    toto[np.isnan(tutu)] = 0
+    return toto
+
+
+def em(data, G, F, r, Tmax, T, RMSE, mu_state, delta_measure):
+
+    tol = 1e-5
+    em_iter_max = round(Tmax / delta_measure) + 1  #
+    M_loop = 5  # Number of passage over M step
+
+    ITER_MAX = 3  # maximum inner iteration number (Default)
+    ITER_MIN = 1  # minimum inner iteration number (Default)
+
+    np.put(F, data.idxOF, data.sparsePhi_F)
+    np.put(G, data.idxOG, data.sparsePhi_G)
+    X = data.X + np.multiply(data.nW, np.dot(G, F))
+
+    FXt = np.dot(F, X.T)
+    FFt = np.dot(F, F.T)
+
+    GtX = np.dot(G.T, X)
+    GtG = np.dot(G.T, G)
+
+    tolF = tol * stop_rule(F, np.dot(GtG, F) - GtX)
+    tolG = tol * stop_rule(G.T, (np.dot(G, FFt) - FXt.T).T)  # Stopping tolerance
+
+    # Iterative updating
+    G = G.T
+    k = mu_state
+    t = time.time()
+
+    # Main loop
+    while time.time() - t <= Tmax + delta_measure:
+
+        # Estimation step
+        X = data.X + np.multiply(data.nW, np.dot(G.T, F))
+
+        # Maximisation step
+        for _ in range(M_loop):
+            # Optimize F with fixed G
+            np.put(F, data.idxOF, 0)
+            F, iterF, _ = NNLS(F, GtG, GtX - GtG.dot(data.Phi_F), ITER_MIN, ITER_MAX, tolF, data.idxOF, False)
+            np.put(F, data.idxOF, data.sparsePhi_F)
+            # print(F[:,0:5])
+            if iterF <= ITER_MIN:
+                tolF = tolF / 10
+                # print('Tweaked F tolerance to '+str(tolF))
+            FFt = np.dot(F, F.T)
+            FXt = np.dot(F, X.T)
+
+            # Optimize G with fixed F
+            np.put(G.T, data.idxOG, 0)
+            G, iterG, _ = NNLS(G, FFt, FXt - FFt.dot(data.Phi_G.T), ITER_MIN, ITER_MAX, tolG, data.idxOG, True)
+            np.put(G.T, data.idxOG, data.sparsePhi_G)
+            if iterG <= ITER_MIN:
+                tolG = tolG / 10
+                # print('Tweaked G tolerance to '+str(tolG))
+            GtG = np.dot(G, G.T)
+            GtX = np.dot(G, X)
+
+        if time.time() - t - (k-mu_state) * delta_measure >= delta_measure:
+            k = k + 1
+            if (k-mu_state) >= em_iter_max:
+                break
+            RMSE[:, k] = np.linalg.norm(F[:, 0:-1] - data.F[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
+            T[k] = T[mu_state] + time.time() - t
+    return {'RMSE': RMSE, 'T': T}
+
+
+def stop_rule(X, GradX):
+    # Stopping Criterions
+    pGrad = GradX[np.any(np.dstack((X > 0, GradX < 0)), 2)]
+    return np.linalg.norm(pGrad, 2)
+
+
+def NNLS(Z, GtG, GtX, iterMin, iterMax, tol, idxfixed, transposed):
+    L = np.linalg.norm(GtG, 2)  # Lipschitz constant
+    H = Z  # Initialization
+    Grad = np.dot(GtG, Z) - GtX  # Gradient
+    alpha1 = 1
+
+    for iter in range(1, iterMax + 1):
+        H0 = H
+        H = np.maximum(Z - Grad / L, 0)  # Calculate squence 'Y'
+
+        if transposed:  # If Z = G.T
+            np.put(H.T, idxfixed, 0)
+        else:  # If Z = F
+            np.put(H, idxfixed, 0)
+        alpha2 = 0.5 * (1 + np.sqrt(1 + 4 * alpha1 ** 2))
+        Z = H + ((alpha1 - 1) / alpha2) * (H - H0)
+        alpha1 = alpha2
+        Grad = np.dot(GtG, Z) - GtX
+
+        # Stopping criteria
+        if iter >= iterMin:
+            # Lin's stopping criteria
+            pgn = stop_rule(Z, Grad)
+            if pgn <= tol:
+                break
+    return H, iter, Grad
+
+
+def nmf_norm_fro(X, G, F, *args):
+    W = args
+    if len(W) == 0:
+        f = np.square(np.linalg.norm(X - np.dot(G, F), 'fro')) / np.square(np.linalg.norm(X, 'fro'))
+    else:
+        W = W[0]
+        f = np.square(np.linalg.norm(X - np.multiply(W, np.dot(G, F)), 'fro')) / np.square(np.linalg.norm(X, 'fro'))
+    return f

+ 15 - 0
Python/calibrationStatistics.py

@@ -0,0 +1,15 @@
+import numpy as np
+
+def calibrationStatistics(data,res):
+    F = res['F']
+    
+    Stat = {
+        'rmseGain': rms(F[0,0:-1]-data.F[0,0:-1]),
+        'nrmseGain': rms(F[0,0:-1]-data.F[0,0:-1])/rms(data.F[0,0:-1]),
+        'rmseOffset': rms(F[1,0:-1]-data.F[1,0:-1]),
+        'nrmseOffset': rms(F[1,0:-1]-data.F[1,0:-1])/rms(data.F[1,0:-1])
+    }  
+    return Stat
+
+def rms(x):
+    return np.sqrt(np.mean(x ** 2))

+ 19 - 0
Python/config.json

@@ -0,0 +1,19 @@
+{
+    "sceneWidth" : 50,
+    "sceneLength" : 50,
+    "numSensor" : 25,
+    "numRef" : 4,
+    "rdvR" : 0.3,
+    "mvR" : 0.5,
+    "calibrationMethods" : ["emwnenmf_m"],
+    "numRuns" : 1,
+    "phenLowerBound" : 0.05,
+    "phenUpperBound" : 0.15,
+    "Mu_beta" : 5,
+    "Mu_alpha" : 0.9,
+    "Bound_beta" : [3.5,6.5],
+    "Bound_alpha" : [0.01,1.5],
+    "Tmax" : 180,
+    "r" : 15,
+    "save2dat" : 1
+}

+ 141 - 0
Python/dataCreator.py

@@ -0,0 +1,141 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+import numpy as np
+from scipy.stats import multivariate_normal
+import matplotlib.pyplot as plt
+
+
+class dataCreator:
+    '''
+    Generates a pollution scenario
+    '''
+
+    def __init__(self, sceneWidth, sceneLength, numSensor, numRef, rdvR, mvR, phenLowerBound, phenUpperBound, Mu_beta,
+                 Mu_alpha, Bound_beta, Bound_alpha):
+        self.sceneWidth = sceneWidth  # Width of the scene
+        self.sceneLength = sceneLength  # Length of the scene
+        self.numArea = self.sceneWidth * self.sceneLength  # Number of sampled area in the scene
+        self.numSensor = numSensor  # Number of sensors in the scene
+        self.numRef = numRef  # Number of references in the scene
+        self.rdvR = rdvR  # Rendez-vous rate : number of rendez-vous/number of sensors
+        self.mvR = mvR  # missing value rate
+        self.phenLowerBound = phenLowerBound  # lower bound on the phenomena (air pollution concentrations) standard deviation (normal distribution)
+        self.phenUpperBound = phenUpperBound  # upper bound "
+        self.Mu_beta = Mu_beta  # Mean sensors offset
+        self.Mu_alpha = Mu_alpha  # Mean sensors gain
+        self.Bound_beta = Bound_beta  # Offset boundaries
+        self.Bound_alpha = Bound_alpha  # Gain boundaries
+        self.numRdv = round(self.numSensor * self.rdvR)  # Number of sensors having a rendez-vous in the scene
+        # self.numAreaVisited = self.numSensor+self.numRef-self.numRdv # Number of not empty areas 
+
+    def create_scene(self, randSeed):
+        np.random.seed(randSeed)  # Random seed
+        # Creation of the map
+        self.S = np.zeros(shape=self.numArea)
+        x = np.linspace(-1, 1, num=self.sceneLength)
+        y = np.linspace(-1, 1, num=self.sceneWidth)
+        # Create meshgrid
+        xx, yy = np.meshgrid(x, y)
+        coord = np.vstack((xx.ravel(), yy.ravel())).T
+        # number of pollution pics fixed to 10
+
+        # MUTED FOR DEBUGGIN
+        # for _ in range(10):
+        #     mean = np.squeeze(np.array([2 * (np.random.rand(1) - 0.5), 2 * (np.random.rand(1) - 0.5)]))
+        #     cxy = 0
+        #     cov = np.squeeze(np.array([[self.phenLowerBound + (self.phenUpperBound - self.phenLowerBound) * np.absolute(
+        #         np.random.randn(1) + 0.5), cxy],
+        #                                [cxy,
+        #                                 self.phenLowerBound + (self.phenUpperBound - self.phenLowerBound) * np.absolute(
+        #                                     np.random.randn(1) + 0.5)]]))
+        #     z = multivariate_normal.pdf(coord, mean=mean, cov=cov)
+        #     self.S = self.S + z
+
+        # REMOVE THIS BLOCK AFTER DEBUG
+        z = multivariate_normal.pdf(coord, mean=[0, 0], cov=[[1, 0], [0, 1]])
+        z = z - np.min(z)
+        z = 0.5 * (z / np.max(z)) + 1e-5
+        self.S = z
+
+        # Random locations for the mobile sensors and the references
+        posRef = np.random.permutation(self.numArea)[0:self.numRef]  # Selection of the references
+        idxSenRdv = np.random.permutation(self.numSensor)[
+                    0:self.numRdv]  # Selection of the sensors having a rendez-vous
+        idxRefRdv = np.random.randint(self.numRef,
+                                      size=self.numRdv)  # Selection of the references corresponding to idxSenRdv
+
+        # idxSen = np.arange(self.numSensor)
+        # idxSen = np.delete(idxSen,idxSenRdv) # Still available sensors
+        # freePos = np.arange(self.numArea) 
+        # freePos = np.delete(freePos,posRef) # Still available positions
+        # posSen = np.random.choice(freePos,size=(self.numSensor-self.numRdv)) # Selection of the positions
+        # self.posAll = np.unique(np.concatenate((posRef,posSen))) # All unique positions of sensors and references
+        # self.posEmpty = np.arange(self.numArea)
+        # self.posEmpty = np.delete(self.posEmpty,self.posAll)
+
+        # Computation of W,G,F and X
+        self.W = np.zeros(shape=[self.numArea, self.numSensor + 1])
+
+        # The references
+        self.W[posRef, -1] = 1
+
+        # The sensors having a rendez-vous
+        self.W[posRef[idxRefRdv], idxSenRdv] = 1  # np.put(self.W,(self.numSensor+1)*posRef[idxRefRdv]+idxSenRdv,1)
+
+        # The other sensors
+        Ndata = round((1 - self.mvR) * self.numSensor * (self.numArea - self.numRef))
+        posSen = np.delete(np.arange(self.numArea), posRef)
+        xx, yy = np.meshgrid(posSen, np.arange(self.numSensor))
+        idx_mesh_sensor = np.random.permutation((self.numArea - self.numRef) * self.numSensor)[0:Ndata]
+        self.W[xx.flat[idx_mesh_sensor], yy.flat[idx_mesh_sensor]] = 1
+
+        # The areas that are not measured
+        self.nW = 1 - self.W
+
+        self.G = np.ones([self.numArea, 2])  # The last column of G is only composed of ones
+        self.G[:, 0] = self.S.flat  # The first column of G is composed of the true concentration for each area
+
+        self.F = np.squeeze([np.maximum(self.Bound_alpha[0], np.minimum(self.Bound_alpha[1],
+                                                                        self.Mu_alpha + 0.5 * np.random.randn(1,
+                                                                                                              self.numSensor + 1))),
+                             np.maximum(self.Bound_beta[0], np.minimum(self.Bound_beta[1],
+                                                                       self.Mu_beta + 0.5 * np.random.randn(1,
+                                                                                                            self.numSensor + 1)))])
+        self.F[:, -1] = [1, 0]
+
+        self.Xtheo = np.dot(self.G, self.F)
+        self.X = np.multiply(self.Xtheo, self.W)
+
+        # Computation of omega and phi 
+        self.Omega_G = np.vstack((self.W[:, -1], np.ones(shape=self.numArea))).T
+        self.Omega_F = np.hstack((np.zeros(shape=(2, self.numSensor)), np.ones(shape=(2, 1))))
+        self.Phi_G = np.vstack((self.X[:, -1], np.ones(shape=self.numArea))).T
+        self.Phi_F = np.zeros(shape=(2, self.numSensor + 1))
+        self.Phi_F[0, -1] = 1
+
+        # Faster access to the fixed values, done to avoid time consuming Hadamard product
+        self.idxOG = np.argwhere(self.Omega_G.flat)
+        self.sparsePhi_G = self.Phi_G.flat[self.idxOG]
+        self.idxOF = np.argwhere(self.Omega_F.flat)
+        self.sparsePhi_F = self.Phi_F.flat[self.idxOF]
+
+        # Initial F and G
+        self.Finit = np.squeeze([np.maximum(self.Bound_alpha[0], np.minimum(self.Bound_alpha[1],
+                                                                            self.Mu_alpha + 0.5 * np.random.randn(1,
+                                                                                                                  self.numSensor + 1))),
+                                 np.maximum(self.Bound_beta[0], np.minimum(self.Bound_beta[1],
+                                                                           self.Mu_beta + 0.5 * np.random.randn(1,
+                                                                                                                self.numSensor + 1)))])
+        self.Finit[:, -1] = [1, 0]
+        self.Ginit = np.absolute(np.mean(self.Phi_G[posRef, 0]) + np.random.randn(self.numArea, 2))
+        np.put(self.Ginit, self.idxOG, self.sparsePhi_G)
+
+    def show_scene(self):
+        plt.imshow(self.S.reshape((self.sceneWidth, self.sceneLength)))
+        plt.show()
+
+    # def show_measured_scene(self):
+    #     obs = np.zeros(shape=(self.sceneWidth,self.sceneLength))
+    #     np.put(obs,self.posAll,1)
+    #     plt.imshow(np.multiply(obs,self.S.reshape((self.sceneWidth,self.sceneLength))))
+    #     plt.show()

+ 58 - 0
Python/display_scenes.py

@@ -0,0 +1,58 @@
+#!/usr/bin/env python3
+
+__author__ = "Olivier Vu thanh"
+__email__ = "olivier.vu-thanh@grenoble-inp.org"
+
+'''
+Run "python main.py --config_file=config.json"
+
+Available calibration methods are   : emwnenmf (EM-W-NeNMF from [quote paper])
+                                    : coming soon...
+'''
+
+import numpy as np
+import argparse
+import json
+import matplotlib.pyplot as plt
+from dataCreator import dataCreator
+from calibrationStatistics import calibrationStatistics
+from calibrationMethods.emwnenmf import emwnenmf
+
+
+print('Work in progress')
+
+'''
+Get the config (json) file, see "config.json" for default one
+'''
+parser = argparse.ArgumentParser(description='Parse location of config file (json).')
+parser.add_argument('--config_file', type=str, default='config.json',
+                    help='path to json config file, see config.json for default')
+
+args = parser.parse_args()
+with open(args.config_file) as json_data_file:
+    config = json.load(json_data_file)
+
+'''
+Main loop
+'''
+
+data = dataCreator(config['sceneWidth'],
+        config['sceneLength'],
+        config['sensorR'],
+        config['refR'],
+        config['rdvR'],
+        config['mvR'],
+        config['phenLowerBound'],
+        config['phenUpperBound'],
+        config['Mu_beta'],
+        config['Mu_alpha'],
+        config['Bound_beta'],
+        config['Bound_alpha'])
+m = data.numArea
+n = data.numSensor+1
+Res = {}
+
+for run in range(config['numRuns']):
+    data.create_scene(run)
+    data.show_scene()
+    # print(data.W)

+ 86 - 0
Python/main.py

@@ -0,0 +1,86 @@
+
+__author__ = "Olivier Vu thanh"
+__email__ = "olivier.vu-thanh@grenoble-inp.org"
+
+'''
+Run "python main.py --config_file=config.json"
+
+Available calibration methods are   : emwnenmf (EM-W-NeNMF from [quote paper])
+                                    : coming soon...
+'''
+
+import numpy as np
+import argparse
+import json
+from dataCreator import dataCreator
+# from calibrationStatistics import calibrationStatistics
+# from calibrationMethods.emwnenmf_updateinsideNNLS import emwnenmf
+# from calibrationMethods.emwnenmf_seprestart import emwnenmf
+from calibrationMethods.incal import incal
+from calibrationMethods.emwnenmf import emwnenmf
+from calibrationMethods.emwnenmf_comp import emwnenmf_comp
+from calibrationMethods.emwnenmf_restart import emwnenmf_restart
+from calibrationMethods.emwnenmf_break import emwnenmf_break
+from calibrationMethods.emwamsgrad import emwamsgrad
+from calibrationMethods.muem import muem
+from calibrationMethods.emwfnnls import emwfnnls
+from calibrationMethods.emwmunmf import emwmunmf
+from calibrationMethods.emwnenmf_m import emwnenmf_m
+from save2dat import save2dat
+
+
+print('Work in progress')
+
+'''
+Get the config (json) file, see "config.json" for default one
+'''
+parser = argparse.ArgumentParser(description='Parse location of config file (json).')
+parser.add_argument('--config_file', type=str, default='config.json',
+                    help='path to json config file, see config.json for default')
+
+args = parser.parse_args()
+with open(args.config_file) as json_data_file:
+    config = json.load(json_data_file)
+
+'''
+Main loop
+'''
+
+data = dataCreator(config['sceneWidth'],
+                   config['sceneLength'],
+                   config['numSensor'],
+                   config['numRef'],
+                   config['rdvR'],
+                   config['mvR'],
+                   config['phenLowerBound'],
+                   config['phenUpperBound'],
+                   config['Mu_beta'],
+                   config['Mu_alpha'],
+                   config['Bound_beta'],
+                   config['Bound_alpha'])
+m = data.numArea
+n = data.numSensor + 1
+RMSE = {}
+T = {}
+for method in config['calibrationMethods']:
+    RMSE.update({method: []})
+    T.update({method: []})
+
+for run in range(config['numRuns']):
+    data.create_scene(run+5)
+    # ONLY EMWNENMF AND INCAL HAVE BEEN CODED FOR NOW
+    print('run : ' + str(run))
+    for method in config['calibrationMethods']:
+        print('method : ' + method)
+        calMethod = locals()[method]
+        res = calMethod(data, data.Ginit, data.Finit, config['r'], config['Tmax'])
+        if run == 0:
+            RMSE[method] = res['RMSE']
+            T[method] = res['T']
+        else:
+            RMSE[method] = np.vstack((RMSE[method], res['RMSE']))
+            T[method] = np.vstack((T[method], res['T']))
+        print('RMSE : ' + str(res['RMSE'][0][-1]) + '   ' + str(res['RMSE'][1][-1]))
+
+if config['save2dat']:
+    save2dat(RMSE, T, config['calibrationMethods'], config['numRuns'])

+ 36 - 0
Python/save2dat.py

@@ -0,0 +1,36 @@
+import numpy as np
+import pandas as pd
+import matplotlib.pyplot as plt
+
+def save2dat(RMSE,T,calibrationMethods,numRuns):
+    k = 0
+    for method in calibrationMethods:
+        min_gain    = np.min(RMSE[method][::2],axis=0)
+        min_offset  = np.min(RMSE[method][1::2],axis=0)
+        max_gain    = np.max(RMSE[method][::2],axis=0)
+        max_offset  = np.max(RMSE[method][1::2],axis=0)
+        med_gain    = np.median(RMSE[method][::2],axis=0)
+        med_offset  = np.median(RMSE[method][1::2],axis=0)
+        if numRuns > 1:
+            k = k+1
+            plt.subplot(len(calibrationMethods),2,k)
+            plt.semilogy(T[method][0],min_gain)
+            plt.semilogy(T[method][0],max_gain)
+            plt.semilogy(T[method][0],med_gain)
+            k = k+1
+            plt.subplot(len(calibrationMethods),2,k)
+            plt.semilogy(T[method][0],min_offset)
+            plt.semilogy(T[method][0],max_offset)
+            plt.semilogy(T[method][0],med_offset)
+        else:
+            k = k+1
+            plt.subplot(len(calibrationMethods),2,k)
+            plt.semilogy(T[method][:],min_gain)
+            plt.semilogy(T[method][:],max_gain)
+            plt.semilogy(T[method][:],med_gain)
+            k = k+1
+            plt.subplot(len(calibrationMethods),2,k)
+            plt.semilogy(T[method][:],min_offset)
+            plt.semilogy(T[method][:],max_offset)
+            plt.semilogy(T[method][:],med_offset)
+    plt.show()

+ 0 - 0
README.md