#!/usr/bin/env python # -*- encoding: utf-8 -*- """ A dummy model, with 3 strategies : * stratified; * most_frequent; * uniform. The doc is here : http://scikit-learn.org/dev/modules/generated/sklearn.dummy.DummyClassifier.html#sklearn.dummy.DummyClassifier -------------------------------------------------------------------------------- Sortie du script ---------------- .. runblock:: console $ python DummyClassifier.py Résultats --------- La soumission du résultat à Kaggle donne ??.??% (pas encore fait). -------------------------------------------------------------------------------- """ __author__ = 'Lilian BESSON (mailto:lilian.besson[AT]normale.fr)' from KaggleModel import * ################################################################################ # Beginning to learn from sklearn.dummy import DummyClassifier from sklearn.utils import shuffle ################################################################################ # ok, let use this 'cross validation' process to find the best # meta parameter : strategy strategy_quality = {} list_strategy = ['stratified', 'most_frequent', 'uniform'] #: Espace de recherche Number_try = 100 #: Nombre de tests utilisés pour méta-apprendre proportion_train = 0.75 #: Proportion d'individus utilisés pour méta-apprendre. print("Find the best value for the meta parameter strategy, with %i run for each..." % Number_try) print("Searching in the range : %s..." % str(list_strategy)) print("""Using the first part (%2.2f%%, %i passengers) of the training dataset as training, and the second part (%2.2f%%, %i passengers) as testing !""" % ( 100.0*proportion_train, int(number_passengers*proportion_train), 100.0*(1-proportion_train), number_passengers - int(number_passengers*proportion_train) )) for strategy in list_strategy: # train_data = shuffle(train_data) Dummy = DummyClassifier(strategy = strategy) print("For the strategy %s, learning from the first part of the dataset..." % strategy) quality=[] for nb_essais in xrange(Number_try): # train_data = shuffle(train_data) Dummy = Dummy.fit(train_data[0:int(number_passengers*proportion_train),1::], train_data[0:int(number_passengers*proportion_train),0]) Output = Dummy.predict(train_data[number_passengers - int(number_passengers*proportion_train)::,1::]) quality.append(100.0 * Output[Output == train_data[number_passengers - int(number_passengers*proportion_train)::,0]].size / Output.size) strategy_quality[strategy] = np.mean(quality) print("... this value of strategy seems to have a (mean) quality = %2.2f%%..." % np.mean(quality)) val = strategy_quality.values() #: La valeur optimale trouvée pour le paramètre strategy best_strategy = strategy_quality.keys()[val.index(np.max(val))] print("With trying each of the following strategy (%s), each %i times, the best one is %s. (for a quality = %2.2f%%)" % (str(list_strategy), Number_try, best_strategy, np.max(val))) ################################################################################ # And then use this 'best' value of strategy to predict on the test dataset print("Creating the Dummy Classifier classifier with best meta parameters.") Dummy = DummyClassifier(strategy = best_strategy) print("Learning...") Dummy = Dummy.fit(train_data[0::,1::],train_data[0::,0]) #: The score for this classifier. score = (100.0*Dummy.score(train_data[0::,1::], train_data[0::,0]) ) print(" Proportion of perfect fitting for the training dataset = %2.2f%%" % score ) # ~ must be < 95% # Predict on the testing set test_file_object = csv.reader(open('test.csv', 'rb')) header = test_file_object.next() print("Predicting for the testing dataset") Output = Dummy.predict(test_data) # Write the output open_file_object = csv.writer(open("csv/Dummy_best.csv", "wb")) z = 0 for row in test_file_object: row.insert(0, int(Output[z])) # Insert the prediction at the start of the row open_file_object.writerow(row) # Write the row to the file z += 1 print("Prediction: wrote in the file csv/Dummy_best.csv.")