find_best_filters.py 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. # main imports
  2. import os
  3. import sys
  4. import argparse
  5. import pandas as pd
  6. import numpy as np
  7. import logging
  8. # model imports
  9. from sklearn.model_selection import train_test_split
  10. from sklearn.model_selection import GridSearchCV
  11. from sklearn.linear_model import LogisticRegression
  12. from sklearn.ensemble import RandomForestClassifier, VotingClassifier
  13. import sklearn.svm as svm
  14. from sklearn.utils import shuffle
  15. from sklearn.externals import joblib
  16. from sklearn.metrics import roc_auc_score
  17. from sklearn.model_selection import cross_val_score
  18. # modules and config imports
  19. sys.path.insert(0, '') # trick to enable import of main folder module
  20. import custom_config as cfg
  21. import models as mdl
  22. from optimization.algorithms.IteratedLocalSearch import IteratedLocalSearch as ILS
  23. from optimization.solutions.BinarySolution import BinarySolution
  24. from optimization.operators.mutators.SimpleMutation import SimpleMutation
  25. from optimization.operators.mutators.SimpleBinaryMutation import SimpleBinaryMutation
  26. from optimization.operators.crossovers.SimpleCrossover import SimpleCrossover
  27. from optimization.operators.policies.RandomPolicy import RandomPolicy
  28. # variables and parameters
  29. models_list = cfg.models_names_list
  30. number_of_values = 26
  31. ils_iteration = 10000
  32. ls_iteration = 20
  33. # default validator
  34. def validator(solution):
  35. if list(solution.data).count(1) < 5:
  36. return False
  37. return True
  38. # init solution (13 filters)
  39. def init():
  40. return BinarySolution([], 13).random(validator)
  41. def loadDataset(filename):
  42. ########################
  43. # 1. Get and prepare data
  44. ########################
  45. dataset_train = pd.read_csv(filename + '.train', header=None, sep=";")
  46. dataset_test = pd.read_csv(filename + '.test', header=None, sep=";")
  47. # default first shuffle of data
  48. dataset_train = shuffle(dataset_train)
  49. dataset_test = shuffle(dataset_test)
  50. # get dataset with equal number of classes occurences
  51. noisy_df_train = dataset_train[dataset_train.iloc[:, 0] == 1]
  52. not_noisy_df_train = dataset_train[dataset_train.iloc[:, 0] == 0]
  53. nb_noisy_train = len(noisy_df_train.index)
  54. noisy_df_test = dataset_test[dataset_test.iloc[:, 0] == 1]
  55. not_noisy_df_test = dataset_test[dataset_test.iloc[:, 0] == 0]
  56. nb_noisy_test = len(noisy_df_test.index)
  57. final_df_train = pd.concat([not_noisy_df_train[0:nb_noisy_train], noisy_df_train])
  58. final_df_test = pd.concat([not_noisy_df_test[0:nb_noisy_test], noisy_df_test])
  59. # shuffle data another time
  60. final_df_train = shuffle(final_df_train)
  61. final_df_test = shuffle(final_df_test)
  62. # use of the whole data set for training
  63. x_dataset_train = final_df_train.iloc[:,1:]
  64. x_dataset_test = final_df_test.iloc[:,1:]
  65. y_dataset_train = final_df_train.iloc[:,0]
  66. y_dataset_test = final_df_test.iloc[:,0]
  67. return x_dataset_train, y_dataset_train, x_dataset_test, y_dataset_test
  68. def main():
  69. parser = argparse.ArgumentParser(description="Train and find best filters to use for model")
  70. parser.add_argument('--data', type=str, help='dataset filename prefix (without .train and .test)')
  71. parser.add_argument('--choice', type=str, help='model choice from list of choices', choices=models_list)
  72. args = parser.parse_args()
  73. p_data_file = args.data
  74. p_choice = args.choice
  75. # load data from file
  76. x_train, y_train, x_test, y_test = loadDataset(p_data_file)
  77. # create `logs` folder if necessary
  78. if not os.path.exists(cfg.logs_folder):
  79. os.makedirs(cfg.logs_folder)
  80. logging.basicConfig(format='%(asctime)s %(message)s', filename='logs/%s.log' % p_data_file.split('/')[-1], level=logging.DEBUG)
  81. # define evaluate function here (need of data information)
  82. def evaluate(solution):
  83. # get indices of filters data to use (filters selection from solution)
  84. indices = []
  85. for index, value in enumerate(solution.data):
  86. if value == 1:
  87. indices.append(index*2)
  88. indices.append(index*2+1)
  89. # keep only selected filters from solution
  90. x_train_filters = x_train.iloc[:, indices]
  91. y_train_filters = y_train
  92. x_test_filters = x_test.iloc[:, indices]
  93. model = mdl.get_trained_model(p_choice, x_train_filters, y_train_filters)
  94. y_test_model = model.predict(x_test_filters)
  95. test_roc_auc = roc_auc_score(y_test, y_test_model)
  96. return test_roc_auc
  97. # prepare optimization algorithm
  98. updators = [SimpleBinaryMutation(), SimpleMutation(), SimpleCrossover()]
  99. policy = RandomPolicy(updators)
  100. algo = ILS(init, evaluate, updators, policy, validator, True)
  101. bestSol = algo.run(ils_iteration, ls_iteration)
  102. # print best solution found
  103. print("Found ", bestSol)
  104. # save model information into .csv file
  105. if not os.path.exists(cfg.results_information_folder):
  106. os.makedirs(cfg.results_information_folder)
  107. filename_path = os.path.join(cfg.results_information_folder, cfg.optimization_filters_result_filename)
  108. line_info = p_data_file + ';' + str(ils_iteration) + ';' + str(ls_iteration) + ';' + str(bestSol.data) + ';' + str(list(bestSol.data).count(1)) + ';' + str(bestSol.fitness())
  109. with open(filename_path, 'a') as f:
  110. f.write(line_info + '\n')
  111. print('Result saved into %s' % filename_path)
  112. if __name__ == "__main__":
  113. main()