emwnenmf_restart.py 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. import numpy as np
  2. import time
  3. import matplotlib.pyplot as plt
  4. def emwnenmf_restart(data, G, F, r, Tmax):
  5. tol = 1e-5
  6. delta_measure = 1
  7. em_iter_max = round(Tmax / delta_measure) + 1 #
  8. T = np.empty(shape=(em_iter_max + 1))
  9. T.fill(np.nan)
  10. RMSE = np.empty(shape=(2, em_iter_max + 1))
  11. RMSE.fill(np.nan)
  12. ITER_MAX = 200 # maximum inner iteration number (Default)
  13. ITER_MIN = 10 # minimum inner iteration number (Default)
  14. np.put(F, data.idxOF, data.sparsePhi_F)
  15. np.put(G, data.idxOG, data.sparsePhi_G)
  16. X = data.X + np.multiply(data.nW, np.dot(G, F))
  17. FXt = np.dot(F, X.T)
  18. FFt = np.dot(F, F.T)
  19. GtX = np.dot(G.T, X)
  20. GtG = np.dot(G.T, G)
  21. GradG = np.dot(G, FFt) - FXt.T
  22. GradF = np.dot(GtG, F) - GtX
  23. init_delta = stop_rule(np.hstack((G.T, F)), np.hstack((GradG.T, GradF)))
  24. tolF = tol * init_delta
  25. tolG = tolF # Stopping tolerance
  26. # Iterative updating
  27. G = G.T
  28. k = 0
  29. RMSE[:, k] = np.linalg.norm(F[:, 0:-1] - data.F[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
  30. T[k] = 0
  31. t = time.time()
  32. # Main loop
  33. while time.time() - t <= Tmax + delta_measure:
  34. # Estimation step
  35. X = data.X + np.multiply(data.nW, np.dot(G.T, F))
  36. # Maximisation step
  37. # Optimize F with fixed G
  38. np.put(F, data.idxOF, 0)
  39. F, iterF, _ = NNLS(F, GtG, GtX - GtG.dot(data.Phi_F), ITER_MIN, ITER_MAX, tolF, data.idxOF, False)
  40. np.put(F, data.idxOF, data.sparsePhi_F)
  41. # print(F[:,0:5])
  42. if iterF <= ITER_MIN:
  43. tolF = tolF / 10
  44. # print('Tweaked F tolerance to '+str(tolF))
  45. FFt = np.dot(F, F.T)
  46. FXt = np.dot(F, X.T)
  47. # Optimize G with fixed F
  48. np.put(G.T, data.idxOG, 0)
  49. G, iterG, _ = NNLS(G, FFt, FXt - FFt.dot(data.Phi_G.T), ITER_MIN, ITER_MAX, tolG, data.idxOG, True)
  50. np.put(G.T, data.idxOG, data.sparsePhi_G)
  51. if iterG <= ITER_MIN:
  52. tolG = tolG / 10
  53. # print('Tweaked G tolerance to '+str(tolG))
  54. GtG = np.dot(G, G.T)
  55. GtX = np.dot(G, X)
  56. if time.time() - t - k * delta_measure >= delta_measure:
  57. k = k + 1
  58. if k >= em_iter_max + 1:
  59. break
  60. RMSE[:, k] = np.linalg.norm(F[:, 0:-1] - data.F[:, 0:-1], 2, axis=1) / np.sqrt(F.shape[1] - 1)
  61. T[k] = time.time() - t
  62. # if k%100==0:
  63. # print(str(k)+' '+str(RMSE[0,k])+' '+str(RMSE[1,k]))
  64. return {'RMSE': RMSE, 'T': T}
  65. def stop_rule(X, GradX):
  66. # Stopping Criterions
  67. pGrad = GradX[np.any(np.dstack((X > 0, GradX < 0)), 2)]
  68. return np.linalg.norm(pGrad, 2)
  69. def NNLS(Z, GtG, GtX, iterMin, iterMax, tol, idxfixed, transposed):
  70. L = np.linalg.norm(GtG, 2) # Lipschitz constant
  71. H = Z # Initialization
  72. Grad = np.dot(GtG, Z) - GtX # Gradient
  73. alpha1 = np.ones(shape=(2, 1))
  74. for iter in range(1, iterMax + 1):
  75. H0 = H
  76. H = np.maximum(Z - Grad / L, 0) # Calculate squence 'Y'
  77. grad_scheme = np.greater(Grad.dot(H.T - H0.T), 0)
  78. if np.any(grad_scheme[:, 0]):
  79. alpha1[0] = 1
  80. # break
  81. if np.any(grad_scheme[:, 1]):
  82. alpha1[1] = 1
  83. # break
  84. if transposed: # If Z = G.T
  85. np.put(H.T, idxfixed, 0)
  86. else: # If Z = F
  87. np.put(H, idxfixed, 0)
  88. alpha2 = 0.5 * (1 + np.sqrt(1 + 4 * alpha1 ** 2))
  89. Z = H + ((alpha1 - 1) / alpha2) * (H - H0)
  90. alpha1 = alpha2
  91. Grad = np.dot(GtG, Z) - GtX
  92. # Stopping criteria
  93. if iter >= iterMin:
  94. # Lin's stopping criteria
  95. pgn = stop_rule(Z, Grad)
  96. if pgn <= tol:
  97. break
  98. return H, iter, Grad
  99. def nmf_norm_fro(X, G, F, *args):
  100. W = args
  101. if len(W) == 0:
  102. f = np.square(np.linalg.norm(X - np.dot(G, F), 'fro')) / np.square(np.linalg.norm(X, 'fro'))
  103. else:
  104. W = W[0]
  105. f = np.square(np.linalg.norm(X - np.multiply(W, np.dot(G, F)), 'fro')) / np.square(np.linalg.norm(X, 'fro'))
  106. return f