run_method_openML.py 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. import os, argparse
  2. import numpy as np
  3. import pandas as pd
  4. from sklearn.model_selection import train_test_split
  5. from sklearn.preprocessing import MinMaxScaler
  6. from sklearn.model_selection import GridSearchCV
  7. from sklearn.metrics import roc_auc_score, accuracy_score
  8. import sklearn.svm as svm
  9. from methods import features_selection_list, features_selection_method
  10. def train_model(X_train, y_train):
  11. print ('Creating model...')
  12. # here use of SVM with grid search CV
  13. Cs = [0.001, 0.01, 0.1, 1, 10, 100, 1000]
  14. gammas = [0.001, 0.01, 0.1,10, 100, 1000]
  15. param_grid = {'kernel':['rbf'], 'C': Cs, 'gamma' : gammas}
  16. svc = svm.SVC(probability=True, class_weight='balanced')
  17. clf = GridSearchCV(svc, param_grid, cv=2, verbose=1, n_jobs=-1)
  18. clf.fit(X_train, y_train)
  19. model = clf.best_estimator_
  20. return model
  21. def loadDataset(filename):
  22. ########################
  23. # 1. Get and prepare data
  24. ########################
  25. dataset = pd.read_csv(filename, sep=',')
  26. # change label as common
  27. min_label_value = min(dataset.iloc[:, -1])
  28. max_label_value = max(dataset.iloc[:, -1])
  29. dataset.iloc[:, -1] = dataset.iloc[:, -1].replace(min_label_value, 0)
  30. dataset.iloc[:, -1] = dataset.iloc[:, -1].replace(max_label_value, 1)
  31. X_dataset = dataset.iloc[:, :-1]
  32. y_dataset = dataset.iloc[:, -1]
  33. problem_size = len(X_dataset.columns)
  34. # min/max normalisation over feature
  35. # create a scaler object
  36. scaler = MinMaxScaler()
  37. # fit and transform the data
  38. X_dataset = np.array(pd.DataFrame(scaler.fit_transform(X_dataset), columns=X_dataset.columns))
  39. # prepare train, validation and test datasets
  40. X_train, X_test, y_train, y_test = train_test_split(X_dataset, y_dataset, test_size=0.3, shuffle=True)
  41. return X_train, y_train, X_test, y_test, problem_size
  42. def main():
  43. parser = argparse.ArgumentParser(description="Get features extraction from specific method")
  44. parser.add_argument('--data', type=str, help='open ml dataset filename prefix', required=True)
  45. parser.add_argument('--method', type=str, help='method name to use', choices=features_selection_list, required=True)
  46. parser.add_argument('--params', type=str, help='params used for the current selected method', required=True)
  47. parser.add_argument('--ntrain', type=int, help='number of training in order to keep mean of score', default=1)
  48. parser.add_argument('--output', type=str, help='output features selection results')
  49. args = parser.parse_args()
  50. p_data_file = args.data
  51. p_method = args.method
  52. p_params = args.params
  53. p_ntrain = args.ntrain
  54. p_output = args.output
  55. # load data from file and get problem size
  56. X_train, y_train, X_test, y_test, problem_size = loadDataset(p_data_file)
  57. # extract indices selected features
  58. features_indices = features_selection_method(p_method, p_params, X_train, y_train, problem_size)
  59. print(f'Selected features {len(features_indices)} over {problem_size}')
  60. auc_scores = []
  61. acc_scores = []
  62. for i in range(p_ntrain):
  63. # new split of dataset
  64. X_train, y_train, X_test, y_test, problem_size = loadDataset(p_data_file)
  65. # get reduced dataset
  66. X_train_reduced = X_train[:, features_indices]
  67. X_test_reduced = X_test[:, features_indices]
  68. # get trained model over reduce dataset
  69. model = train_model(X_train_reduced, y_train)
  70. # get predicted labels over test dataset
  71. y_test_model = model.predict(X_test_reduced)
  72. y_test_predict = [ 1 if x > 0.5 else 0 for x in y_test_model ]
  73. test_roc_auc = roc_auc_score(y_test, y_test_predict)
  74. test_acc = accuracy_score(y_test, y_test_predict)
  75. print(f'Run n°{i}: {test_roc_auc} (AUC ROC)')
  76. # append score into list of run
  77. auc_scores.append(test_roc_auc)
  78. acc_scores.append(test_acc)
  79. mean_auc_score = sum(auc_scores) / len(auc_scores)
  80. mean_acc_score = sum(acc_scores) / len(acc_scores)
  81. var_acc_score = np.var(acc_scores)
  82. var_auc_score = np.var(auc_scores)
  83. std_acc_score = np.std(acc_scores)
  84. std_auc_score = np.std(auc_scores)
  85. print(f'Model performance using {p_method} (params: {p_params}) is of {mean_auc_score:.2f}')
  86. # now save trained model and params obtained
  87. header_line = 'dataset;method;params;ntrain;n_features;acc_test;auc_test;var_acc_test;var_auc_test;std_acc_test;std_auc_test;features_indices\n'
  88. data_line = f'{p_data_file};{p_method};{p_params};{p_ntrain};{len(features_indices)};{mean_acc_score};{mean_auc_score};{var_acc_score};{var_auc_score};{std_acc_score};{std_auc_score};{" ".join(list(map(str, features_indices)))}\n'
  89. output_folder, _ = os.path.split(p_output)
  90. if len(output_folder) > 0:
  91. if not os.path.exists(output_folder):
  92. os.makedirs(output_folder)
  93. if not os.path.exists(p_output):
  94. with open(p_output, 'w') as f:
  95. f.write(header_line)
  96. with open(p_output, 'a') as f:
  97. f.write(data_line)
  98. if __name__ == "__main__":
  99. main()