Code source de AdaBoost

#!/usr/bin/env python
# -*- encoding: utf-8 -*-
""" A AdaBoost model.

The doc is here : http://scikit-learn.org/dev/modules/generated/sklearn.ensemble.AdaBoostClassifier.html

Overfitting
-----------
Je fais une vérification de l'overfitting via la fonction *Score*,
qui permet de savoir combien des passengers dans l'ensemble d'apprentissage
sont classés dans la bonne catégorie par le classifieur.

Ce score devrait être < 95%, sinon c'est "louche".

Ce paragraphe s'applique à tous les autres classifieurs présentés
après, mais il n'est pas répété.

--------------------------------------------------------------------------------

Sortie du script
----------------
.. runblock:: console

    $ python AdaBoost.py

Résultats
---------
La soumission du résultat à Kaggle donne 75.12%.

--------------------------------------------------------------------------------
"""
__author__	= 'Lilian BESSON (mailto:lilian.besson[AT]normale.fr)'

from KaggleModel import *

################################################################################
# Beginning to learn

from sklearn.ensemble import AdaBoostClassifier
from sklearn.utils import shuffle

################################################################################
# ok, let use this 'cross validation' process to find the best
# meta parameter : n_estimators
n_estimators_quality = {}
list_n_estimators = [1,5,10,20,30,40,50,60,75,82,100]	#: Espace de recherche
Number_try = 5		#: Nombre de tests utilisés pour méta-apprendre
proportion_train = 0.67 #: Proportion d'individus utilisés pour méta-apprendre.
print("Find the best value for the meta parameter n_estimators, with %i run for each..." % Number_try)
print("Searching in the range : %s..." % str(list_n_estimators))

print("""Using the first part (%2.2f%%, %i passengers) of the training dataset as training, 
and the second part (%2.2f%%, %i passengers) as testing !"""
 % ( 100.0*proportion_train, int(number_passengers*proportion_train),
     100.0*(1-proportion_train), number_passengers - int(number_passengers*proportion_train) ))

for n_estimators in list_n_estimators:
#	train_data = shuffle(train_data)
	AdaBoost = AdaBoostClassifier(n_estimators = n_estimators)
	print("For %i random tree(s), learning from the first part of the dataset..." % n_estimators)
	quality=[]
	for nb_essais in xrange(Number_try):
#		train_data = shuffle(train_data)
		AdaBoost = AdaBoost.fit(train_data[0:int(number_passengers*proportion_train),1::],
		 train_data[0:int(number_passengers*proportion_train),0])
		Output = AdaBoost.predict(train_data[number_passengers - int(number_passengers*proportion_train)::,1::])
		quality.append(100.0 * Output[Output == train_data[number_passengers - int(number_passengers*proportion_train)::,0]].size / Output.size)
	n_estimators_quality[n_estimators] = np.mean(quality)
	print("... this value of n_estimators seems to have a (mean) quality = %2.2f%%..." % np.mean(quality))

val = n_estimators_quality.values()
#: La valeur optimale trouvée pour le paramètre n_estimators
best_n_estimators = n_estimators_quality.keys()[val.index(np.max(val))]
print("With trying each of the following n_estimators (%s), each %i times, the best one is %.0f. (for a quality = %2.2f%%)"
 % (str(list_n_estimators), Number_try, best_n_estimators, np.max(val)))


################################################################################
# And then use this 'best' value of n_estimators to predict on the test dataset
print("Creating the adaboost classifier (of %i estimators)..." % n_estimators)
AdaBoost = AdaBoostClassifier(n_estimators = best_n_estimators)
print("Learning...")
AdaBoost = AdaBoost.fit(train_data[0::,1::],train_data[0::,0])
#: The score for this classifier.
score = (100.0*AdaBoost.score(train_data[0::,1::], train_data[0::,0]) )
print(" Proportion of perfect fitting for the training dataset = %2.2f%%" % 
  score)
# ~ must be < 95%

# Predict on the testing set
test_file_object = csv.reader(open('test.csv', 'rb'))
header = test_file_object.next()

print("Predicting for the testing dataset")
Output = AdaBoost.predict(test_data) 

# Write the output
open_file_object = csv.writer(open("csv/AdaBoost_best.csv", "wb"))

z = 0
for row in test_file_object:
 row.insert(0, int(Output[z])) # Insert the prediction at the start of the row
 open_file_object.writerow(row) # Write the row to the file
 z += 1

print("Prediction: wrote in the file csv/AdaBoost_best.csv.")