ensemble_model_train_v2.py 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. from sklearn.model_selection import train_test_split
  2. from sklearn.model_selection import GridSearchCV
  3. from sklearn.linear_model import LogisticRegression
  4. from sklearn.ensemble import RandomForestClassifier, VotingClassifier
  5. from sklearn.neighbors import KNeighborsClassifier
  6. from sklearn.ensemble import GradientBoostingClassifier
  7. import sklearn.svm as svm
  8. from sklearn.utils import shuffle
  9. from sklearn.externals import joblib
  10. import numpy as np
  11. import pandas as pd
  12. from sklearn.metrics import accuracy_score
  13. import sys, os, getopt
  14. current_dirpath = os.getcwd()
  15. output_model_folder = os.path.join(current_dirpath, 'saved_models')
  16. def get_best_model(X_train, y_train):
  17. Cs = [0.001, 0.01, 0.1, 1, 10, 20, 30]
  18. gammas = [0.001, 0.01, 0.1, 1, 5, 10]
  19. param_grid = {'kernel':['rbf'], 'C': Cs, 'gamma' : gammas}
  20. parameters = {'kernel':['rbf'], 'C': np.arange(1, 20)}
  21. svc = svm.SVC(gamma="scale", probability=True, max_iter=10000)
  22. clf = GridSearchCV(svc, parameters, cv=5, scoring='accuracy', verbose=10)
  23. clf.fit(X_train, y_train)
  24. model = clf.best_estimator_
  25. return model
  26. def main():
  27. if len(sys.argv) <= 1:
  28. print('Run with default parameters...')
  29. print('python smv_model_train.py --data xxxx --output xxxx')
  30. sys.exit(2)
  31. try:
  32. opts, args = getopt.getopt(sys.argv[1:], "hd:o", ["help=", "data=", "output="])
  33. except getopt.GetoptError:
  34. # print help information and exit:
  35. print('python smv_model_train.py --data xxxx --output xxxx')
  36. sys.exit(2)
  37. for o, a in opts:
  38. if o == "-h":
  39. print('python smv_model_train.py --data xxxx --output xxxx')
  40. sys.exit()
  41. elif o in ("-d", "--data"):
  42. p_data_file = a
  43. elif o in ("-o", "--output"):
  44. p_output = a
  45. else:
  46. assert False, "unhandled option"
  47. if not os.path.exists(output_model_folder):
  48. os.makedirs(output_model_folder)
  49. # get and split data
  50. dataset = pd.read_csv(p_data_file, header=None, sep=";")
  51. # default first shuffle of data
  52. dataset = shuffle(dataset)
  53. # get dataset with equal number of classes occurences
  54. noisy_df = dataset[dataset.ix[:, 0] == 1]
  55. not_noisy_df = dataset[dataset.ix[:, 0] == 0]
  56. nb_noisy = len(noisy_df.index)
  57. final_df = pd.concat([not_noisy_df[0:nb_noisy], noisy_df[:]])
  58. #final_df = pd.concat([not_noisy_df, noisy_df])
  59. # shuffle data another time
  60. final_df = shuffle(final_df)
  61. print(len(final_df.index))
  62. y_dataset = final_df.ix[:,0]
  63. x_dataset = final_df.ix[:,1:]
  64. X_train, X_test, y_train, y_test = train_test_split(x_dataset, y_dataset, test_size=0.5, random_state=42)
  65. svm_model = get_best_model(X_train, y_train)
  66. knc_model = KNeighborsClassifier(n_neighbors=2)
  67. gbc_model = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0)
  68. lr_model = LogisticRegression(solver='liblinear', multi_class='ovr', random_state=1)
  69. rf_model = RandomForestClassifier(n_estimators=100, random_state=1)
  70. ensemble_model = VotingClassifier(estimators=[
  71. ('lr', lr_model),
  72. ('knc', knc_model),
  73. ('gbc', gbc_model),
  74. ('svm', svm_model),
  75. ('rf', rf_model)],
  76. voting='soft', weights=[1, 1, 1, 1, 1])
  77. ensemble_model.fit(X_train, y_train)
  78. y_train_model = ensemble_model.predict(X_train)
  79. print("**Train :** " + str(accuracy_score(y_train, y_train_model)))
  80. y_pred = ensemble_model.predict(X_test)
  81. print("**Test :** " + str(accuracy_score(y_test, y_pred)))
  82. joblib.dump(ensemble_model, output_model_folder + '/' + p_output + '.joblib')
  83. if __name__== "__main__":
  84. main()