Parcourir la source

enable selection features comparisons

Jérôme BUISINE il y a 3 ans
Parent
commit
ef014958dc

+ 64 - 0
features_selection/methods.py

@@ -0,0 +1,64 @@
+from sklearn.feature_selection import VarianceThreshold
+from sklearn.feature_selection import SelectKBest
+from sklearn.feature_selection import chi2
+from sklearn.svm import LinearSVC
+from sklearn.feature_selection import SelectFromModel
+from sklearn.svm import SVC
+from sklearn.model_selection import StratifiedKFold
+from sklearn.feature_selection import RFECV
+from sklearn.ensemble import ExtraTreesClassifier
+
+features_selection_list = [
+    "variance_threshold",
+    "kbest",
+    "linearSVC",
+    "tree",
+    "rfecv"
+]
+
+def features_selection_method(name, params, X_train, y_train, problem_size):
+
+    indices = []
+
+    if name == "variance_threshold":
+        percent_to_keep = float(params)
+        #sel = VarianceThreshold(threshold=(percent_to_keep * (1 - percent_to_keep)))
+        sel = VarianceThreshold(threshold=(percent_to_keep))
+        sel.fit_transform(X_train)
+
+        indices = sel.get_support(indices=True)
+
+    if name == "kbest":
+        k_param = int(float(params) * problem_size) # here it's a percent over the whole dataset
+        model = SelectKBest(chi2, k=k_param).fit_transform(X_train, y_train)
+
+        indices = model.get_support(indices=True)
+
+    if name == "linearSVC":
+        C_param = float(params)
+        lsvc = LinearSVC(C=C_param, penalty="l1", dual=False).fit(X_train, y_train)
+        model = SelectFromModel(lsvc, prefit=True)
+
+        indices = model.get_support(indices=True)
+
+    if name == "tree":
+        n_estimarors_param = int(params)
+        clf = ExtraTreesClassifier(n_estimators=n_estimarors_param)
+        clf = clf.fit(X_train, y_train)
+        model = SelectFromModel(clf, prefit=True)
+
+        indices = model.get_support(indices=True)
+
+    if name == "rfecv":
+        cv_param = int(params)
+        # Create the RFE object and compute a cross-validated score
+        svc = SVC(kernel="linear")
+        # The "accuracy" scoring is proportional to the number of correct
+        # classifications
+        rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(cv_param),
+                    scoring='roc_auc')
+        rfecv.fit(X_train, y_train)
+
+        indices = rfecv.get_support(indices=True)
+
+    return indices

+ 71 - 0
features_selection/run_all_openML.py

@@ -0,0 +1,71 @@
+import os, argparse
+
+params = {
+    "variance_threshold": [
+        "0.001",
+        "0.01",
+        "0.05",
+        "0.1",
+    ],
+    "kbest": [
+        "0.9",
+        "0.8",
+        "0.7",
+        "0.6",
+    ],
+    "linearSVC": [
+        "0.1",
+        "1",
+        "10",
+        "100"
+    ],
+    "tree": [
+        "10",
+        "50",
+        "100",
+        "200",
+    ],
+    "rfecv": [
+        "3",
+        "4",
+        "5"
+    ]
+}
+
+open_ml_problems_folder = 'OpenML_datasets'
+
+def main():
+
+    parser = argparse.ArgumentParser(description="Get features extraction from specific methods and params")
+
+    parser.add_argument('--ntrain', type=int, help='number of training in order to keep mean of score', default=1)
+    parser.add_argument('--output', type=str, help='output features selection results', required=True)
+
+    args = parser.parse_args()
+
+    p_ntrain    = args.ntrain
+    p_output    = args.output
+
+    open_ml_problems = os.listdir(open_ml_problems_folder)
+
+    for ml_problem in open_ml_problems:
+
+        ml_problem_name = ml_problem.replace('.csv', '')
+        ml_problem_path = os.path.join(open_ml_problems_folder, ml_problem)
+
+        for key, values in params.items():
+
+            for param in values:
+
+                print(f'Run features selection for OpenML `{ml_problem_name}` problem with {{method: {key}, params: {param}, ntrain: {p_ntrain}}}')
+                command_str = f'python features_selection/run_method_openML.py ' \
+                            f'--data {ml_problem_path} ' \
+                            f'--method {key} ' \
+                            f'--params {param} ' \
+                            f'--ntrain {p_ntrain} ' \
+                            f'--output {p_output}'
+                             
+                os.system(command_str)
+
+if __name__ == "__main__":
+    main()

+ 137 - 0
features_selection/run_method_openML.py

@@ -0,0 +1,137 @@
+import os, argparse
+
+import numpy as np
+import pandas as pd
+
+from sklearn.model_selection import train_test_split
+from sklearn.preprocessing import MinMaxScaler
+from sklearn.model_selection import GridSearchCV
+from sklearn.metrics import roc_auc_score, accuracy_score
+import sklearn.svm as svm
+
+from methods import features_selection_list, features_selection_method
+
+
+def train_model(X_train, y_train):
+
+    print ('Creating model...')
+    # here use of SVM with grid search CV
+    Cs = [0.001, 0.01, 0.1, 1, 10, 100, 1000]
+    gammas = [0.001, 0.01, 0.1,10, 100, 1000]
+    param_grid = {'kernel':['rbf'], 'C': Cs, 'gamma' : gammas}
+
+    svc = svm.SVC(probability=True, class_weight='balanced')
+    clf = GridSearchCV(svc, param_grid, cv=2, verbose=1, n_jobs=-1)
+
+    clf.fit(X_train, y_train)
+
+    model = clf.best_estimator_
+
+    return model
+
+def loadDataset(filename):
+
+    ########################
+    # 1. Get and prepare data
+    ########################
+    dataset = pd.read_csv(filename, sep=',')
+
+    # change label as common
+    min_label_value = min(dataset.iloc[:, -1])
+    max_label_value = max(dataset.iloc[:, -1])
+
+    dataset.iloc[:, -1] = dataset.iloc[:, -1].replace(min_label_value, 0)
+    dataset.iloc[:, -1] = dataset.iloc[:, -1].replace(max_label_value, 1)
+
+    X_dataset = dataset.iloc[:, :-1]
+    y_dataset = dataset.iloc[:, -1]
+
+    problem_size = len(X_dataset.columns)
+
+    # min/max normalisation over feature
+    # create a scaler object
+    scaler = MinMaxScaler()
+    # fit and transform the data
+    X_dataset = np.array(pd.DataFrame(scaler.fit_transform(X_dataset), columns=X_dataset.columns))
+
+    # prepare train, validation and test datasets
+    X_train, X_test, y_train, y_test = train_test_split(X_dataset, y_dataset, test_size=0.3, shuffle=True)
+
+    return X_train, y_train, X_test, y_test, problem_size
+
+
+def main():
+
+    parser = argparse.ArgumentParser(description="Get features extraction from specific method")
+
+    parser.add_argument('--data', type=str, help='open ml dataset filename prefix', required=True)
+    parser.add_argument('--method', type=str, help='method name to use', choices=features_selection_list, required=True)
+    parser.add_argument('--params', type=str, help='params used for the current selected method', required=True)
+    parser.add_argument('--ntrain', type=int, help='number of training in order to keep mean of score', default=1)
+    parser.add_argument('--output', type=str, help='output features selection results')
+
+    args = parser.parse_args()
+
+    p_data_file = args.data
+    p_method    = args.method
+    p_params    = args.params
+    p_ntrain    = args.ntrain
+    p_output    = args.output
+
+    # load data from file and get problem size
+    X_train, y_train, X_test, y_test, problem_size = loadDataset(p_data_file)
+
+    features_indices = features_selection_method(p_method, p_params, X_train, y_train, problem_size)
+
+    print(f'Selected features {len(features_indices)} over {problem_size}')
+
+    # get reduced dataset
+    X_train_reduced = X_train[:, features_indices]
+    X_test_reduced = X_test[:, features_indices]
+
+
+    auc_scores = []
+    acc_scores = []
+    
+    for i in range(p_ntrain):
+
+        # get trained model over reduce dataset
+        model = train_model(X_train_reduced, y_train)
+
+        # get predicted labels over test dataset
+        y_test_model = model.predict(X_test_reduced)
+        y_test_predict = [ 1 if x > 0.5 else 0 for x in y_test_model ]
+        test_roc_auc = roc_auc_score(y_test, y_test_predict)
+        test_acc = accuracy_score(y_test, y_test_predict)
+
+        print(f'Run n°{i}: {test_roc_auc} (AUC ROC)')
+
+        # append score into list of run
+        auc_scores.append(test_roc_auc)
+        acc_scores.append(test_acc)
+
+    mean_auc_score = sum(auc_scores) / len(auc_scores)
+    mean_acc_score = sum(acc_scores) / len(acc_scores)
+
+    print(f'Model performance using {p_method} (params: {p_params}) is of {mean_auc_score:.2f}')
+
+    # now save trained model and params obtained
+    header_line = 'dataset;method;params;ntrain;n_features;acc_test;auc_test;features_indices\n'
+    data_line = f'{p_data_file};{p_method};{p_params};{p_ntrain};{len(features_indices)};{mean_acc_score};{mean_auc_score};{" ".join(list(map(str, features_indices)))}\n'
+
+    output_folder, _ = os.path.split(p_output)
+
+    if len(output_folder) > 0:
+        if not os.path.exists(output_folder):
+            os.makedirs(output_folder)
+
+    if not os.path.exists(p_output):
+        with open(p_output, 'w') as f:
+            f.write(header_line)
+
+    with open(p_output, 'a') as f:
+        f.write(data_line)
+    
+
+if __name__ == "__main__":
+    main()

+ 0 - 2
find_best_attributes_surrogate_openML.py

@@ -77,8 +77,6 @@ def train_model(X_train, y_train):
 
 def loadDataset(filename):
 
-    # TODO : load data using DL RNN 
-
     ########################
     # 1. Get and prepare data
     ########################