prediction_scene.py 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. # main imports
  2. import sys, os, argparse
  3. import numpy as np
  4. import json
  5. import pandas as pd
  6. # models imports
  7. from sklearn.externals import joblib
  8. from sklearn.metrics import accuracy_score
  9. from keras.models import Sequential
  10. from keras.layers import Conv1D, MaxPooling1D
  11. from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization
  12. from keras import backend as K
  13. from keras.models import model_from_json
  14. from keras.wrappers.scikit_learn import KerasClassifier
  15. # modules imports
  16. sys.path.insert(0, '') # trick to enable import of main folder module
  17. import custom_config as cfg
  18. # parameters and variables
  19. output_model_folder = cfg.saved_models_folder
  20. def main():
  21. parser = argparse.ArgumentParser(description="Give model performance on specific scene")
  22. parser.add_argument('--data', type=str, help='dataset filename prefix of specific scene (without .train and .test)')
  23. parser.add_argument('--model', type=str, help='saved model (Keras or SKlearn) filename with extension')
  24. parser.add_argument('--output', type=str, help="filename to store predicted and performance model obtained on scene")
  25. parser.add_argument('--scene', type=str, help="scene indice to predict", choices=cfg.scenes_indices)
  26. args = parser.parse_args()
  27. p_data_file = args.data
  28. p_model_file = args.model
  29. p_output = args.output
  30. p_scene = args.scene
  31. if '.joblib' in p_model_file:
  32. kind_model = 'sklearn'
  33. model_ext = '.joblib'
  34. if '.json' in p_model_file:
  35. kind_model = 'keras'
  36. model_ext = '.json'
  37. if not os.path.exists(output_model_folder):
  38. os.makedirs(output_model_folder)
  39. dataset = pd.read_csv(p_data_file, header=None, sep=";")
  40. y_dataset = dataset.ix[:,0]
  41. x_dataset = dataset.ix[:,1:]
  42. noisy_dataset = dataset[dataset.ix[:, 0] == 1]
  43. not_noisy_dataset = dataset[dataset.ix[:, 0] == 0]
  44. y_noisy_dataset = noisy_dataset.ix[:, 0]
  45. x_noisy_dataset = noisy_dataset.ix[:, 1:]
  46. y_not_noisy_dataset = not_noisy_dataset.ix[:, 0]
  47. x_not_noisy_dataset = not_noisy_dataset.ix[:, 1:]
  48. if kind_model == 'keras':
  49. with open(p_model_file, 'r') as f:
  50. json_model = json.load(f)
  51. model = model_from_json(json_model)
  52. model.load_weights(p_model_file.replace('.json', '.h5'))
  53. model.compile(loss='binary_crossentropy',
  54. optimizer='adam',
  55. metrics=['accuracy'])
  56. _, vector_size = np.array(x_dataset).shape
  57. # reshape all data
  58. x_dataset = np.array(x_dataset).reshape(len(x_dataset), vector_size, 1)
  59. x_noisy_dataset = np.array(x_noisy_dataset).reshape(len(x_noisy_dataset), vector_size, 1)
  60. x_not_noisy_dataset = np.array(x_not_noisy_dataset).reshape(len(x_not_noisy_dataset), vector_size, 1)
  61. if kind_model == 'sklearn':
  62. model = joblib.load(p_model_file)
  63. if kind_model == 'keras':
  64. y_pred = model.predict_classes(x_dataset)
  65. y_noisy_pred = model.predict_classes(x_noisy_dataset)
  66. y_not_noisy_pred = model.predict_classes(x_not_noisy_dataset)
  67. if kind_model == 'sklearn':
  68. y_pred = model.predict(x_dataset)
  69. y_noisy_pred = model.predict(x_noisy_dataset)
  70. y_not_noisy_pred = model.predict(x_not_noisy_dataset)
  71. accuracy_global = accuracy_score(y_dataset, y_pred)
  72. accuracy_noisy = accuracy_score(y_noisy_dataset, y_noisy_pred)
  73. accuracy_not_noisy = accuracy_score(y_not_noisy_dataset, y_not_noisy_pred)
  74. if(p_scene):
  75. print(p_scene + " | " + str(accuracy_global) + " | " + str(accuracy_noisy) + " | " + str(accuracy_not_noisy))
  76. else:
  77. print(str(accuracy_global) + " \t | " + str(accuracy_noisy) + " \t | " + str(accuracy_not_noisy))
  78. with open(p_output, 'w') as f:
  79. f.write("Global accuracy found %s " % str(accuracy_global))
  80. f.write("Noisy accuracy found %s " % str(accuracy_noisy))
  81. f.write("Not noisy accuracy found %s " % str(accuracy_not_noisy))
  82. for prediction in y_pred:
  83. f.write(str(prediction) + '\n')
  84. if __name__== "__main__":
  85. main()