RSI_NeNMF.m 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. % % Non-negative Matrix Factorization via Nesterov's Optimal Gradient Method: Improved via Randomized Subspace Iterations NeNMF (RSI)
  2. % Reference
  3. % F. Yahaya, M. Puigt, G. Delmaire, G. Roussel, Faster-than-fast NMF using
  4. % random projections and Nesterov iterations, to appear in the Proceedings
  5. % of iTWIST: international Traveling Workshop on Interactions between
  6. % low-complexity data models and Sensing Techniques, Marseille, France,
  7. % November 21-23, 2018
  8. % <Inputs>
  9. % X : Input data matrix (m x n)
  10. % r : Target low-rank
  11. %
  12. % MAX_ITER : Maximum number of iterations. Default is 1,000.
  13. % MIN_ITER : Minimum number of iterations. Default is 10.
  14. % TOL : Stopping tolerance. Default is 1e-5. If you want to obtain a more accurate solution, decrease TOL and increase MAX_ITER at the same time.
  15. % <Outputs>
  16. % W : Obtained basis matrix (m x r).
  17. % H : Obtained coefficients matrix (r x n).
  18. % T : CPU TIME.
  19. % RRE: Relative reconstruction error in each iteration
  20. % Tmax : CPU time in seconds.
  21. % Note: another file 'stop_rule.m' should be included under the same
  22. % directory as this code.
  23. function [W,H,RRE,T]=RSI_NeNMF( X,W,H,r,Tmax)
  24. MinIter=10;
  25. tol=1e-5;
  26. T=zeros(1,301);
  27. RRE=zeros(1,301);
  28. ITER_MAX=500; % maximum inner iteration number (Default)
  29. ITER_MIN=10; % minimum inner iteration number (Default)
  30. [L,R]=RSI_compression(X,r);
  31. % Compress left and right
  32. X_L = L * X;
  33. X_R = X * R;
  34. H_comp= H* R;
  35. W_comp = L*W;
  36. HVt=H_comp*X_R';
  37. HHt=H_comp*H_comp';
  38. WtV=W_comp'*X_L;
  39. WtW=W_comp'*W_comp;
  40. GradW=W*HHt-HVt';
  41. GradH=WtW*H-WtV;
  42. init_delta=stop_rule([W',H],[GradW',GradH]);
  43. tolH=max(tol,1e-3)*init_delta;
  44. tolW=tolH; % Stopping tolerance
  45. % Iterative updating
  46. W=W';
  47. k=1;
  48. RRE(k) = nmf_norm_fro( X, W', H);
  49. T(k) =0;
  50. tic
  51. % main loop
  52. while(toc<= Tmax+0.05)
  53. % Optimize H with W fixed
  54. [H,iterH]=NNLS(H,WtW,WtV,ITER_MIN,ITER_MAX,tolH);
  55. if iterH<=ITER_MIN
  56. tolH=tolH/10;
  57. end
  58. H_comp=H*R;
  59. HHt=H_comp*H_comp'; HVt=H_comp*X_R';
  60. % Optimize W with H fixed
  61. [W,iterW,GradW]=NNLS(W,HHt,HVt,ITER_MIN,ITER_MAX,tolW);
  62. if iterW<=ITER_MIN
  63. tolW=tolW/10;
  64. end
  65. W_comp=W * L';
  66. WtW=W_comp*W_comp';
  67. WtV=W_comp*X_L;
  68. GradH=WtW*H-WtV;
  69. % HIS.niter=niter+iterH+iterW;
  70. delta=stop_rule([W,H],[GradW,GradH]);
  71. % Stopping condition
  72. if (delta<=tol*init_delta && k>=MinIter)
  73. break;
  74. end
  75. if toc-(k-1)*0.05>=0.05
  76. k = k+1;
  77. RRE(k) = nmf_norm_fro( X, W', H);
  78. T(k) = toc;
  79. end
  80. end %end of loop
  81. W=W';
  82. return;
  83. function [H,iter,Grad]=NNLS(Z,WtW,WtV,iterMin,iterMax,tol)
  84. if ~issparse(WtW)
  85. L=norm(WtW); % Lipschitz constant
  86. else
  87. L=norm(full(WtW));
  88. end
  89. H=Z; % Initialization
  90. Grad=WtW*Z-WtV; % Gradient
  91. alpha1=1;
  92. for iter=1:iterMax
  93. H0=H;
  94. H=max(Z-Grad/L,0); % Calculate sequence 'Y'
  95. alpha2=0.5*(1+sqrt(1+4*alpha1^2));
  96. Z=H+((alpha1-1)/alpha2)*(H-H0);
  97. alpha1=alpha2;
  98. Grad=WtW*Z-WtV;
  99. % Stopping criteria
  100. if iter>=iterMin
  101. % Lin's stopping condition
  102. pgn=stop_rule(Z,Grad);
  103. if pgn<=tol
  104. break;
  105. end
  106. end
  107. end
  108. Grad=WtW*H-WtV;
  109. return;
  110. function f = nmf_norm_fro(X, W, H)
  111. % Author : F. Yahaya
  112. % Date: 13/04/2018
  113. % Contact: farouk.yahaya@univ-littoral.fr
  114. % Goal: compute a normalized error reconstruction of the mixing matrix V
  115. % "Normalized" means that we divide the squared Frobenius norm of the error
  116. % by the squared Frobenius norm of the matrix V
  117. % Note: To express the error in dB, you have to compute 10*log10(f)
  118. %
  119. f = norm(X - W * H,'fro')^2/norm(X,'fro')^2;
  120. return;
  121. function [ L,R ] = RSI_compression(X,r)
  122. % Tepper, M., & Sapiro, G. (2016). Compressed nonnegative
  123. % matrix factorization is fast and accurate. IEEE Transactions
  124. % on Signal Processing, 64(9), 2269-2283.
  125. % see: https://arxiv.org/pdf/1505.04650
  126. % The corresponding code is originally created by the authors
  127. % Then, it is modified by F. Yahaya.
  128. % Date: 13/04/2018
  129. %
  130. compressionLevel=20;
  131. [m,n]=size(X);
  132. l = min(n, max(compressionLevel, r + 10));
  133. OmegaL = randn(n,l);
  134. Y = X * OmegaL;
  135. for i=1:4
  136. [Y,~]=qr(Y,0);
  137. S=X'*Y;
  138. [Z,~]=qr(S,0);
  139. Y=X* Z;
  140. end
  141. [L,~]=qr(Y,0);
  142. L=L';
  143. OmegaR = randn(l, m);
  144. Y = OmegaR * X;
  145. for i=1:4
  146. [Y,~]=qr(Y',0);
  147. S=X*Y;
  148. [Z,~]=qr(S,0);
  149. Y=Z'*X;
  150. end
  151. Y=Y';
  152. [R,~] = qr(Y,0);
  153. return