Code source de GradientBoosting

#!/usr/bin/env python
# -*- encoding: utf-8 -*-
""" A Gradient Boosting model.

Semble être bien performant, mais **très** lent !

The doc is here : http://scikit-learn.org/dev/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html#sklearn.ensemble.GradientBoostingClassifier

--------------------------------------------------------------------------------

Sortie du script
----------------
.. runblock:: console

    $ python GradientBoosting.py

Résultats
---------
La soumission du résultat à Kaggle donne 75.59%.

--------------------------------------------------------------------------------
"""
__author__	= 'Lilian BESSON (mailto:lilian.besson[AT]normale.fr)'

from KaggleModel import *

################################################################################
# Beginning to learn

from sklearn.ensemble import GradientBoostingClassifier
from sklearn.utils import shuffle

################################################################################
# ok, let use this 'cross validation' process to find the best
# meta parameter : n_estimators
n_estimators_quality = {}
#: Espace de recherche
list_n_estimators = [1, 2, 5, 7, 10, 15, 20, 50, 100]
Number_try = 10	#: Nombre de tests utilisés pour méta-apprendre (faible car l'algo est lent)
proportion_train = 0.68	#: Proportion d'individus utilisés pour méta-apprendre.
print("Find the best value for the meta parameter n_estimators, with %i run for each..." % Number_try)
print("Searching in the range : %s..." % str(list_n_estimators))

print("""Using the first part (%2.2f%%, %i passengers) of the training dataset as training, 
and the second part (%2.2f%%, %i passengers) as testing !"""
 % ( 100.0*proportion_train, int(number_passengers*proportion_train),
     100.0*(1-proportion_train), number_passengers - int(number_passengers*proportion_train) ))

for n_estimators in list_n_estimators:
	train_data = shuffle(train_data)
	GradientBoosting = GradientBoostingClassifier(n_estimators = n_estimators)
	print("For %i estimator(s), learning from the first part of the dataset..." % n_estimators)
	quality=[]
	for nb_essais in xrange(Number_try):
		train_data = shuffle(train_data)
		GradientBoosting = GradientBoosting.fit(train_data[0:int(number_passengers*proportion_train),1::],
		 train_data[0:int(number_passengers*proportion_train),0])
		Output = GradientBoosting.predict(train_data[number_passengers - int(number_passengers*proportion_train)::,1::])
		quality.append(100.0 * Output[Output == train_data[number_passengers - int(number_passengers*proportion_train)::,0]].size / Output.size)
	n_estimators_quality[n_estimators] = np.mean(quality)
	print("... this value of n_estimators seems to have a (mean) quality = %2.2f%%..." % np.mean(quality))

val = n_estimators_quality.values()
#: La valeur optimale trouvée pour le paramètre n_estimators
best_n_estimators = n_estimators_quality.keys()[val.index(np.max(val))]
print("With trying each of the following n_estimators (%s), each %i times, the best one is %.0f. (for a quality = %2.2f%%)"
 % (str(list_n_estimators), Number_try, best_n_estimators, np.max(val)))


################################################################################
# ok, let use this 'cross validation' process to find the best
# meta parameter : max_depth
max_depth_quality = {}
#: Espace de recherche
list_max_depth = [1,2,3,4,5,6,7,8,9,10,15,20,1000]
Number_try = 10
proportion_train = 0.67
print("Find the best value for the meta parameter max_depth, with %i run for each..." % Number_try)
print("Searching in the range : %s..." % str(list_max_depth))

print("""Using the first part (%2.2f%%, %i passengers) of the training dataset as training, 
and the second part (%2.2f%%, %i passengers) as testing !"""
 % ( 100.0*proportion_train, int(number_passengers*proportion_train),
     100.0*(1-proportion_train), number_passengers - int(number_passengers*proportion_train) ))

for max_depth in list_max_depth:
	train_data = shuffle(train_data)
	GradientBoosting = GradientBoostingClassifier(n_estimators = best_n_estimators, max_depth = max_depth)
	print("For random trees with depth <= %i, learning from the first part of the dataset..." % max_depth)
	quality=[]
	for nb_essais in xrange(Number_try):
		train_data = shuffle(train_data)
		GradientBoosting = GradientBoosting.fit(train_data[0:int(number_passengers*proportion_train),1::],
		 train_data[0:int(number_passengers*proportion_train),0])
		Output = GradientBoosting.predict(train_data[number_passengers - int(number_passengers*proportion_train)::,1::])
		quality.append(100.0 * Output[Output == train_data[number_passengers - int(number_passengers*proportion_train)::,0]].size / Output.size)
	max_depth_quality[max_depth] = np.mean(quality)
	print("... this value of max_depth seems to have a (mean) quality = %2.2f%%..." % np.mean(quality))

val = max_depth_quality.values()
#: La valeur optimale trouvée pour le paramètre n_estimators
best_max_depth = max_depth_quality.keys()[val.index(np.max(val))]
print("With trying each of the following max_depth (%s), each %i times, the best one is %.0f. (for a quality = %2.2f%%)"
 % (str(list_max_depth), Number_try, best_max_depth, np.max(val)))


################################################################################
# And then use this 'best' value of n_estimators to predict on the test dataset
print("Creating the Gradient Boosting classifier with best meta parameters.")
GradientBoosting = GradientBoostingClassifier(n_estimators = best_n_estimators, max_depth = best_max_depth)
print("Learning...")
GradientBoosting = GradientBoosting.fit(train_data[0::,1::],train_data[0::,0].astype(np.int))
#: The score for this classifier.
score = (100.0*GradientBoosting.score(train_data[0::,1::], train_data[0::,0]) ) 
print(" Proportion of perfect fitting for the training dataset = %2.2f%%" % 
 score )
# ~ must be < 95%

# Predict on the testing set
test_file_object = csv.reader(open('test.csv', 'rb'))
header = test_file_object.next()

print("Predicting for the testing dataset")
Output = GradientBoosting.predict(test_data) 

# Write the output
open_file_object = csv.writer(open("csv/GradientBoosting_best.csv", "wb"))

z = 0
for row in test_file_object:
 row.insert(0, int(Output[z])) # Insert the prediction at the start of the row
 open_file_object.writerow(row) # Write the row to the file
 z += 1

print("Prediction: wrote in the file csv/GradientBoosting_best.csv.")