RPI_NeNMF.m 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. % Randomized Power Iterations NeNMF (RPINeNMF)
  2. % Reference
  3. % N. Guan, D. Tao, Z. Luo, and B. Yuan, "NeNMF: An Optimal Gradient Method
  4. % for Non-negative Matrix Factorization", IEEE Transactions on Signal
  5. % Processing, Vol. 60, No. 6, PP. 2882-2898, Jun. 2012. (DOI:
  6. % 10.1109/TSP.2012.2190406)
  7. % Modified by:F. Yahaya
  8. % Date: 06/09/2018
  9. % Contact: farouk.yahaya@univ-littoral.fr
  10. % Reference
  11. % F. Yahaya, M. Puigt, G. Delmaire, G. Roussel, Faster-than-fast NMF using
  12. % random projections and Nesterov iterations, to appear in the Proceedings
  13. % of iTWIST: international Traveling Workshop on Interactions between
  14. % low-complexity data models and Sensing Techniques, Marseille, France,
  15. % November 21-23, 2018
  16. % <Inputs>
  17. % X : Input data matrix (m x n)
  18. % r : Target low-rank
  19. %
  20. % MAX_ITER : Maximum number of iterations. Default is 1,000.
  21. % MIN_ITER : Minimum number of iterations. Default is 10.
  22. % TOL : Stopping tolerance. Default is 1e-5. If you want to obtain a more accurate solution, decrease TOL and increase MAX_ITER at the same time.
  23. % <Outputs>
  24. % W : Obtained basis matrix (m x r).
  25. % H : Obtained coefficients matrix (r x n).
  26. % T : CPU TIME.
  27. % RRE: Relative reconstruction error in each iteration
  28. % Tmax : CPU time in seconds.
  29. % Note: another file 'stop_rule.m' should be included under the same
  30. % directory as this code.
  31. function [W,H,RRE,T]=RPI_NeNMF( X,W,H,r,Tmax)
  32. MinIter=10;
  33. % tol=1e-5;
  34. tol= 1e-5;
  35. T=zeros(1,5000);
  36. RRE=zeros(1,5000);
  37. ITER_MAX=500; % maximum inner iteration number (Default)
  38. ITER_MIN=10; % minimum inner iteration number (Default)
  39. [L,R]=compression(X,r);
  40. X_L = L * X;
  41. X_R = X* R;
  42. %initialization
  43. % W=W0; H=H0;
  44. H_comp= H* R;
  45. W_comp = L*W;
  46. HVt=H_comp*X_R';
  47. HHt=H_comp*H_comp';
  48. WtV=W_comp'*X_L;
  49. WtW=W_comp'*W_comp;
  50. GradW=W*HHt-HVt';
  51. GradH=WtW*H-WtV;
  52. init_delta=stop_rule([W',H],[GradW',GradH]);
  53. tolH=max(tol,1e-3)*init_delta;
  54. tolW=tolH; % Stopping tolerance
  55. % Iterative updating
  56. W=W';
  57. k=1;
  58. RRE(k) = nmf_norm_fro( X, W', H);
  59. T(k) =0;
  60. tic
  61. % main loop
  62. while (toc<= Tmax)
  63. k = k+1;
  64. % Optimize H with W fixed
  65. [H,iterH]=NNLS(H,WtW,WtV,ITER_MIN,ITER_MAX,tolH);
  66. if iterH<=ITER_MIN
  67. tolH=tolH/10;
  68. end
  69. H_comp=H*R;
  70. HHt=H_comp*H_comp'; HVt=H_comp*X_R';
  71. % Optimize W with H fixed
  72. [W,iterW,GradW]=NNLS(W,HHt,HVt,ITER_MIN,ITER_MAX,tolW);
  73. if iterW<=ITER_MIN
  74. tolW=tolW/10;
  75. end
  76. W_comp=W * L';
  77. WtW=W_comp*W_comp';
  78. WtV=W_comp*X_L;
  79. GradH=WtW*H-WtV;
  80. % HIS.niter=niter+iterH+iterW;
  81. delta=stop_rule([W,H],[GradW,GradH]);
  82. % Stopping condition
  83. if (delta<=tol*init_delta && k>=MinIter)
  84. break;
  85. end
  86. RRE(k) = nmf_norm_fro( X, W', H);
  87. T(k) = toc;
  88. end %end of loop
  89. W=W';
  90. end
  91. function [H,iter,Grad]=NNLS(Z,WtW,WtV,iterMin,iterMax,tol)
  92. if ~issparse(WtW)
  93. L=norm(WtW); % Lipschitz constant
  94. else
  95. L=norm(full(WtW));
  96. end
  97. H=Z; % Initialization
  98. Grad=WtW*Z-WtV; % Gradient
  99. alpha1=1;
  100. for iter=1:iterMax
  101. H0=H;
  102. H=max(Z-Grad/L,0); % Calculate sequence 'Y'
  103. alpha2=0.5*(1+sqrt(1+4*alpha1^2));
  104. Z=H+((alpha1-1)/alpha2)*(H-H0);
  105. alpha1=alpha2;
  106. Grad=WtW*Z-WtV;
  107. % Stopping criteria
  108. if iter>=iterMin
  109. % Lin's stopping condition
  110. pgn=stop_rule(Z,Grad);
  111. if pgn<=tol
  112. break;
  113. end
  114. end
  115. end
  116. Grad=WtW*H-WtV;
  117. end
  118. function f = nmf_norm_fro(X, W, H)
  119. % Author : F. Yahaya
  120. % Date: 13/04/2018
  121. % Contact: farouk.yahaya@univ-littoral.fr
  122. % Goal: compute a normalized error reconstruction of the mixing matrix V
  123. % "Normalized" means that we divide the squared Frobenius norm of the error
  124. % by the squared Frobenius norm of the matrix V
  125. % Note: To express the error in dB, you have to compute 10*log10(f)
  126. %
  127. f = norm(X - W * H,'fro')^2/norm(X,'fro')^2;
  128. end
  129. function [ L,R ] = compression(X, r)
  130. % Tepper, M., & Sapiro, G. (2016). Compressed nonnegative
  131. % matrix factorization is fast and accurate. IEEE Transactions
  132. % on Signal Processing, 64(9), 2269-2283.
  133. % see: https://arxiv.org/pdf/1505.04650
  134. % The corresponding code is originally created by the authors
  135. % Then, it is modified by F. Yahaya.
  136. % Date: 13/04/2018
  137. %
  138. compressionLevel=20;
  139. [m,n]=size(X);
  140. l = min(n, max(compressionLevel, r + 10));
  141. OmegaL = randn(n,l);
  142. B = X * OmegaL;
  143. for j = 1:4
  144. B = X * (X' * B);
  145. end
  146. [L,~] = qr(B, 0);
  147. L = L';
  148. OmegaR = randn(l, m);
  149. B = OmegaR * X;
  150. for j = 1:4
  151. B = (B * X') * X;
  152. end
  153. [R,~] = qr(B', 0);
  154. end