#!/usr/bin/env python
# -*- encoding: utf-8 -*-
""" A Decision Tree model.
The doc is here : http://scikit-learn.org/dev/modules/generated/sklearn.tree.DecisionTreeClassifier.html#sklearn.tree.DecisionTreeClassifier
--------------------------------------------------------------------------------
Sortie du script
----------------
.. runblock:: console
$ python DecisionTree.py
Résultats
---------
La soumission du résultat à Kaggle donne 76.07%.
--------------------------------------------------------------------------------
"""
__author__ = 'Lilian BESSON (mailto:lilian.besson[AT]normale.fr)'
from KaggleModel import *
################################################################################
# Beginning to learn
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import shuffle
################################################################################
# ok, let use this 'cross validation' process to find the best
# meta parameter : max_depth
max_depth_quality = {}
list_max_depth = xrange(1,30) #: Espace de recherche
Number_try = 10 #: Nombre de tests utilisés pour méta-apprendre
proportion_train = 0.67 #: Proportion d'individus utilisés pour méta-apprendre.
print("Find the best value for the meta parameter max_depth, with %i run for each..." % Number_try)
print("Searching in the range : %s..." % str(list_max_depth))
print("""Using the first part (%2.2f%%, %i passengers) of the training dataset as training,
and the second part (%2.2f%%, %i passengers) as testing !"""
% ( 100.0*proportion_train, int(number_passengers*proportion_train),
100.0*(1-proportion_train), number_passengers - int(number_passengers*proportion_train) ))
for max_depth in list_max_depth:
# train_data = shuffle(train_data)
DecisionTree = DecisionTreeClassifier(
max_depth = max_depth,
criterion = 'entropy') # 'gini' or 'entropy'
print("For max_depth=%s, learning from the first part of the dataset..." % max_depth)
quality=[]
for nb_essais in xrange(Number_try):
# train_data = shuffle(train_data)
DecisionTree = DecisionTree.fit(train_data[0:int(number_passengers*proportion_train),1::],
train_data[0:int(number_passengers*proportion_train),0])
Output = DecisionTree.predict(train_data[number_passengers - int(number_passengers*proportion_train)::,1::])
quality.append(100.0 * Output[Output == train_data[number_passengers - int(number_passengers*proportion_train)::,0]].size / Output.size)
max_depth_quality[max_depth] = np.mean(quality)
print("... this value of max_depth seems to have a (mean) quality = %2.2f%%..." % np.mean(quality))
val = max_depth_quality.values()
#: La valeur optimale trouvée pour le paramètre max_depth
best_max_depth = max_depth_quality.keys()[val.index(np.max(val))]
print("With trying each of the following max_depth (%s), each %i times, the best one is %s. (for a quality = %2.2f%%)"
% (str(list_max_depth), Number_try, best_max_depth, np.max(val)))
################################################################################
# ok, let use this 'cross validation' process to find the best
# meta parameter : min_samples_split
min_samples_split_quality = {}
list_min_samples_split = xrange(1,10)
Number_try = 10
proportion_train = 0.67
print("Find the best value for the meta parameter min_samples_split, with %i run for each..." % Number_try)
print("Searching in the range : %s..." % str(list_min_samples_split))
print("""Using the first part (%2.2f%%, %i passengers) of the training dataset as training,
and the second part (%2.2f%%, %i passengers) as testing !"""
% ( 100.0*proportion_train, int(number_passengers*proportion_train),
100.0*(1-proportion_train), number_passengers - int(number_passengers*proportion_train) ))
for min_samples_split in list_min_samples_split:
# train_data = shuffle(train_data)
DecisionTree = DecisionTreeClassifier(
max_depth = best_max_depth,
min_samples_split = min_samples_split,
criterion = 'entropy') # 'gini' or 'entropy'
print("For min_samples_split=%s, learning from the first part of the dataset..." % min_samples_split)
quality=[]
for nb_essais in xrange(Number_try):
# train_data = shuffle(train_data)
DecisionTree = DecisionTree.fit(train_data[0:int(number_passengers*proportion_train),1::],
train_data[0:int(number_passengers*proportion_train),0])
Output = DecisionTree.predict(train_data[number_passengers - int(number_passengers*proportion_train)::,1::])
quality.append(100.0 * Output[Output == train_data[number_passengers - int(number_passengers*proportion_train)::,0]].size / Output.size)
min_samples_split_quality[min_samples_split] = np.mean(quality)
print("... this value of min_samples_split seems to have a (mean) quality = %2.2f%%..." % np.mean(quality))
val = min_samples_split_quality.values()
#: La valeur optimale trouvée pour le paramètre min_samples_split
best_min_samples_split = list_min_samples_split[val.index(np.max(val))]
print("With trying each of the following min_samples_split (%s), each %i times, the best one is %s. (for a quality = %2.2f%%)"
% (str(list_min_samples_split), Number_try, best_min_samples_split, np.max(val)))
################################################################################
# ok, let use this 'cross validation' process to find the best
# meta parameter : min_samples_leaf
min_samples_leaf_quality = {}
list_min_samples_leaf = xrange(1,10)
Number_try = 10
proportion_train = 0.67
print("Find the best value for the meta parameter min_samples_leaf, with %i run for each..." % Number_try)
print("Searching in the range : %s..." % str(list_min_samples_leaf))
print("""Using the first part (%2.2f%%, %i passengers) of the training dataset as training,
and the second part (%2.2f%%, %i passengers) as testing !"""
% ( 100.0*proportion_train, int(number_passengers*proportion_train),
100.0*(1-proportion_train), number_passengers - int(number_passengers*proportion_train) ))
for min_samples_leaf in list_min_samples_leaf:
# train_data = shuffle(train_data)
DecisionTree = DecisionTreeClassifier(
max_depth = best_max_depth,
min_samples_split = best_min_samples_split,
min_samples_leaf = min_samples_leaf,
criterion = 'entropy') # 'gini' or 'entropy'
print("For min_samples_leaf=%s, learning from the first part of the dataset..." % min_samples_leaf)
quality=[]
for nb_essais in xrange(Number_try):
# train_data = shuffle(train_data)
DecisionTree = DecisionTree.fit(train_data[0:int(number_passengers*proportion_train),1::],
train_data[0:int(number_passengers*proportion_train),0])
Output = DecisionTree.predict(train_data[number_passengers - int(number_passengers*proportion_train)::,1::])
quality.append(100.0 * Output[Output == train_data[number_passengers - int(number_passengers*proportion_train)::,0]].size / Output.size)
min_samples_leaf_quality[min_samples_leaf] = np.mean(quality)
print("... this value of min_samples_leaf seems to have a (mean) quality = %2.2f%%..." % np.mean(quality))
val = min_samples_leaf_quality.values()
#: La valeur optimale trouvée pour le paramètre min_samples_leaf
best_min_samples_leaf = list_min_samples_leaf[val.index(np.max(val))]
print("With trying each of the following min_samples_leaf (%s), each %i times, the best one is %s. (for a quality = %2.2f%%)"
% (str(list_min_samples_leaf), Number_try, best_min_samples_leaf, np.max(val)))
################################################################################
print("Creating the classifier, with optimal parameters.")
DecisionTree = DecisionTreeClassifier(
max_depth = best_max_depth,
min_samples_split = best_min_samples_split,
min_samples_leaf = best_min_samples_leaf,
criterion = 'entropy') # 'gini' or 'entropy'
print("Learning...")
DecisionTree = DecisionTree.fit(train_data[0::,1::],train_data[0::,0])
#: The score for this classifier.
score = (100.0*DecisionTree.score(train_data[0::,1::], train_data[0::,0]) )
print(" Proportion of perfect fitting for the training dataset = %2.2f%%" %
score)
# ~ must be < 95%
# Predict on the testing set
test_file_object = csv.reader(open('test.csv', 'rb'))
header = test_file_object.next()
print("Predicting for the testing dataset")
Output = DecisionTree.predict(test_data)
# Write the output
open_file_object = csv.writer(open("csv/DecisionTree_best.csv", "wb"))
z = 0
for row in test_file_object:
row.insert(0, int(Output[z])) # Insert the prediction at the start of the row
open_file_object.writerow(row) # Write the row to the file
z += 1
print("Prediction: wrote in the file csv/DecisionTree_best.csv.")