prediction_scene.py 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687
  1. from sklearn.externals import joblib
  2. import numpy as np
  3. import pandas as pd
  4. from sklearn.metrics import accuracy_score
  5. from keras.models import Sequential
  6. from keras.layers import Conv1D, MaxPooling1D
  7. from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization
  8. from keras import backend as K
  9. from keras.models import model_from_json
  10. import sys, os, getopt
  11. import json
  12. from modules.utils import config as cfg
  13. output_model_folder = cfg.saved_models_folder
  14. def main():
  15. if len(sys.argv) <= 1:
  16. print('Run with default parameters...')
  17. print('python prediction_scene.py --data xxxx.csv --model xxxx.joblib --output xxxx --scene xxxx')
  18. sys.exit(2)
  19. try:
  20. opts, args = getopt.getopt(sys.argv[1:], "hd:o:s", ["help=", "data=", "model=", "output=", "scene="])
  21. except getopt.GetoptError:
  22. # print help information and exit:
  23. print('python prediction_scene.py --data xxxx.csv --model xxxx.joblib --output xxxx --scene xxxx')
  24. sys.exit(2)
  25. for o, a in opts:
  26. if o == "-h":
  27. print('python prediction_scene.py --data xxxx.csv --model xxxx.joblib --output xxxx --scene xxxx')
  28. sys.exit()
  29. elif o in ("-d", "--data"):
  30. p_data_file = a
  31. elif o in ("-m", "--model"):
  32. p_model_file = a
  33. elif o in ("-o", "--output"):
  34. p_output = a
  35. elif o in ("-s", "--scene"):
  36. p_scene = a
  37. else:
  38. assert False, "unhandled option"
  39. if not os.path.exists(output_model_folder):
  40. os.makedirs(output_model_folder)
  41. dataset = pd.read_csv(p_data_file, header=None, sep=";")
  42. y_dataset = dataset.ix[:,0]
  43. x_dataset = dataset.ix[:,1:]
  44. noisy_dataset = dataset[dataset.ix[:, 0] == 1]
  45. not_noisy_dataset = dataset[dataset.ix[:, 0] == 0]
  46. y_noisy_dataset = noisy_dataset.ix[:, 0]
  47. x_noisy_dataset = noisy_dataset.ix[:, 1:]
  48. y_not_noisy_dataset = not_noisy_dataset.ix[:, 0]
  49. x_not_noisy_dataset = not_noisy_dataset.ix[:, 1:]
  50. model = joblib.load(p_model_file)
  51. y_pred = model.predict(x_dataset)
  52. y_noisy_pred = model.predict(x_noisy_dataset)
  53. y_not_noisy_pred = model.predict(x_not_noisy_dataset)
  54. accuracy_global = accuracy_score(y_dataset, y_pred)
  55. accuracy_noisy = accuracy_score(y_noisy_dataset, y_noisy_pred)
  56. accuracy_not_noisy = accuracy_score(y_not_noisy_dataset, y_not_noisy_pred)
  57. if(p_scene):
  58. print(p_scene + " | " + str(accuracy_global) + " | " + str(accuracy_noisy) + " | " + str(accuracy_not_noisy))
  59. else:
  60. print(str(accuracy_global) + " \t | " + str(accuracy_noisy) + " \t | " + str(accuracy_not_noisy))
  61. with open(p_output, 'w') as f:
  62. f.write("Global accuracy found %s " % str(accuracy_global))
  63. f.write("Noisy accuracy found %s " % str(accuracy_noisy))
  64. f.write("Not noisy accuracy found %s " % str(accuracy_not_noisy))
  65. for prediction in y_pred:
  66. f.write(str(prediction) + '\n')
  67. if __name__== "__main__":
  68. main()