Commit 76a8859d authored by PauTheu's avatar PauTheu
Browse files

stuff

parent 20201f95
%% Cell type:code id:b23a2e7d-3719-4a6a-8142-8904143d239e tags:
``` python
import warnings
warnings.filterwarnings("ignore")
import os
import pandas as pd
from tqdm import tqdm
import numpy as np
from sklearn.metrics import f1_score
# Parameter optimization
from skopt.space import Integer, Real, Categorical, Identity
from skopt.utils import use_named_args
from skopt import gp_minimize
from skopt.plots import plot_convergence
# Model
from sklearn import svm
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
import pickle as pkl
```
%% Cell type:code id:14ed4e25-21ca-4d3c-9f6e-2e2c06894a0d tags:
``` python
df_train = pkl.load(open("train_ten_best_features.pkl" ,"rb"))
df_train.dropna(inplace = True)
```
%% Cell type:code id:af4e895b-9fb4-4251-bf00-710564d7addc tags:
``` python
df_test = pkl.load(open("test_ten_best_features.pkl" ,"rb"))
df_test.dropna(inplace = True)
```
%% Cell type:code id:0fcf1140-299b-44fb-b81b-a999453f833a tags:
``` python
y_train = df_train["label"]
y_test = df_test["label"]
X_train = df_train.drop(["label"], axis=1)
X_test = df_test.drop(["label"], axis=1)
```
%% Cell type:code id:90971468-651b-482f-9b3c-d0454900953c tags:
``` python
# drop
X_train.drop(["region"], axis = 1, inplace = True)
X_test.drop(["region"], axis= 1, inplace = True)
```
%% Cell type:markdown id:e67dda33-9761-435f-84fa-d03652fb4fa9 tags:
### LightGBM
%% Cell type:code id:23dccf02-779d-4210-9723-1dbca8d44c08 tags:
``` python
import lightgbm as lgb
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
import json
from sklearn.metrics import accuracy_score
```
%% Cell type:code id:f66fc2b3-2666-46ef-80d2-d0d260b8a963 tags:
``` python
df_train.all().corr(df_test.all())
```
%%%% Output: execute_result
0.7745966692414835
%% Cell type:code id:47d34961-594d-47bf-8fed-7c2bd15e2e4f tags:
``` python
X_train.shape
```
%%%% Output: execute_result
(33264, 11)
(33264, 10)
%% Cell type:code id:4efa2b64-9bda-4f0e-858a-815915b44732 tags:
``` python
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
# specify your configurations as a dict
print('Starting training...')
# train
gbm = lgb.LGBMRegressor(num_leaves=50,
gbm = lgb.LGBMRegressor(num_leaves=31,
learning_rate=0.01,
n_estimators=30,
n_estimators=40,
boosting_type = "dart")
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric='l1')
print('Starting predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
# eval
print('The rmse of prediction is:', mean_squared_error(y_test, y_pred) ** 0.5)
# feature importances
print('Feature importances:', list(gbm.feature_importances_))
```
%%%% Output: stream
Starting training...
[1] valid_0's l1: 0.499973 valid_0's l2: 0.250123
[2] valid_0's l1: 0.500054 valid_0's l2: 0.25024
[3] valid_0's l1: 0.500134 valid_0's l2: 0.25037
[4] valid_0's l1: 0.500213 valid_0's l2: 0.250517
[5] valid_0's l1: 0.500297 valid_0's l2: 0.25068
[6] valid_0's l1: 0.500332 valid_0's l2: 0.250821
[7] valid_0's l1: 0.500422 valid_0's l2: 0.251018
[8] valid_0's l1: 0.500395 valid_0's l2: 0.25095
[9] valid_0's l1: 0.500471 valid_0's l2: 0.251157
[10] valid_0's l1: 0.50056 valid_0's l2: 0.251371
[11] valid_0's l1: 0.500656 valid_0's l2: 0.251632
[12] valid_0's l1: 0.500624 valid_0's l2: 0.251541
[13] valid_0's l1: 0.500698 valid_0's l2: 0.251785
[14] valid_0's l1: 0.500799 valid_0's l2: 0.25205
[15] valid_0's l1: 0.50093 valid_0's l2: 0.252347
[16] valid_0's l1: 0.501018 valid_0's l2: 0.252634
[17] valid_0's l1: 0.501099 valid_0's l2: 0.252942
[18] valid_0's l1: 0.501186 valid_0's l2: 0.253254
[19] valid_0's l1: 0.501273 valid_0's l2: 0.253559
[20] valid_0's l1: 0.501362 valid_0's l2: 0.253862
[21] valid_0's l1: 0.501349 valid_0's l2: 0.253796
[22] valid_0's l1: 0.501435 valid_0's l2: 0.254137
[23] valid_0's l1: 0.501522 valid_0's l2: 0.254505
[24] valid_0's l1: 0.501558 valid_0's l2: 0.254815
[25] valid_0's l1: 0.501637 valid_0's l2: 0.255198
[26] valid_0's l1: 0.501581 valid_0's l2: 0.255414
[27] valid_0's l1: 0.501488 valid_0's l2: 0.255611
[28] valid_0's l1: 0.501447 valid_0's l2: 0.25547
[29] valid_0's l1: 0.501474 valid_0's l2: 0.255782
[30] valid_0's l1: 0.501404 valid_0's l2: 0.256021
[1] valid_0's l1: 0.499821 valid_0's l2: 0.249944
[2] valid_0's l1: 0.499748 valid_0's l2: 0.249875
[3] valid_0's l1: 0.499677 valid_0's l2: 0.249817
[4] valid_0's l1: 0.499604 valid_0's l2: 0.249767
[5] valid_0's l1: 0.499532 valid_0's l2: 0.249727
[6] valid_0's l1: 0.499463 valid_0's l2: 0.249698
[7] valid_0's l1: 0.499383 valid_0's l2: 0.249668
[8] valid_0's l1: 0.499407 valid_0's l2: 0.249674
[9] valid_0's l1: 0.499337 valid_0's l2: 0.249659
[10] valid_0's l1: 0.499259 valid_0's l2: 0.249645
[11] valid_0's l1: 0.499191 valid_0's l2: 0.249648
[12] valid_0's l1: 0.499215 valid_0's l2: 0.249646
[13] valid_0's l1: 0.499134 valid_0's l2: 0.249643
[14] valid_0's l1: 0.499028 valid_0's l2: 0.249618
[15] valid_0's l1: 0.498922 valid_0's l2: 0.249602
[16] valid_0's l1: 0.498855 valid_0's l2: 0.249633
[17] valid_0's l1: 0.498754 valid_0's l2: 0.249634
[18] valid_0's l1: 0.498699 valid_0's l2: 0.249677
[19] valid_0's l1: 0.498611 valid_0's l2: 0.249685
[20] valid_0's l1: 0.498533 valid_0's l2: 0.249706
[21] valid_0's l1: 0.498559 valid_0's l2: 0.249707
[22] valid_0's l1: 0.498515 valid_0's l2: 0.249782
[23] valid_0's l1: 0.498423 valid_0's l2: 0.249816
[24] valid_0's l1: 0.498287 valid_0's l2: 0.249791
[25] valid_0's l1: 0.498189 valid_0's l2: 0.249816
[26] valid_0's l1: 0.497964 valid_0's l2: 0.249686
[27] valid_0's l1: 0.497722 valid_0's l2: 0.249549
[28] valid_0's l1: 0.497758 valid_0's l2: 0.249534
[29] valid_0's l1: 0.497639 valid_0's l2: 0.249548
[30] valid_0's l1: 0.497405 valid_0's l2: 0.249437
[31] valid_0's l1: 0.497418 valid_0's l2: 0.249439
[32] valid_0's l1: 0.497203 valid_0's l2: 0.249346
[33] valid_0's l1: 0.49695 valid_0's l2: 0.249219
[34] valid_0's l1: 0.496893 valid_0's l2: 0.24932
[35] valid_0's l1: 0.49693 valid_0's l2: 0.24928
[36] valid_0's l1: 0.49701 valid_0's l2: 0.24925
[37] valid_0's l1: 0.496884 valid_0's l2: 0.249278
[38] valid_0's l1: 0.496692 valid_0's l2: 0.249215
[39] valid_0's l1: 0.496433 valid_0's l2: 0.24908
[40] valid_0's l1: 0.4964 valid_0's l2: 0.249091
Starting predicting...
The rmse of prediction is: 0.5059851983827391
Feature importances: [0, 117, 110, 276, 32, 108, 234, 70, 60, 460, 3]
The rmse of prediction is: 0.4990904333616724
Feature importances: [9, 40, 144, 352, 6, 134, 307, 161, 40, 7]
%% Cell type:code id:356b6771-39fe-42ee-90f5-ec5b63b40bee tags:
``` python
estimator = lgb.LGBMRegressor(num_leaves=31)
param_grid = {
'learning_rate': [0.01, 0.05, 0.1],
'n_estimators': [20, 30, 40, 200],
'num_leaves': [20, 31, 50]
'boosting_type': ['gbdt', 'dart', 'goss', 'rf']
}
gbm = GridSearchCV(estimator, param_grid, cv=3)
gbm.fit(X_train, y_train)
print('Best parameters found by grid search are:', gbm.best_params_)
```
%%%% Output: stream
Best parameters found by grid search are: {'boosting_type': 'dart', 'learning_rate': 0.01, 'n_estimators': 30, 'num_leaves': 50}
%% Cell type:code id:afe897d8-3050-4f9a-9b0c-9dac793baa3b tags:
``` python
## yikes
```
Best parameters found by grid search are: {'learning_rate': 0.01, 'n_estimators': 40, 'num_leaves': 31}
%% Cell type:markdown id:fbf4bd8c-fbd0-4cc7-9aa3-c98969c9a55c tags:
### EASY NN
%% Cell type:code id:cb955265-8cf0-430a-88ab-4708876a8147 tags:
``` python
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
```
%% Cell type:code id:f90bf39c-5781-4b27-98e2-dcd183f925ef tags:
``` python
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
```
%% Cell type:code id:f88fdd4c-d42b-4981-9729-a949c6780507 tags:
``` python
EPOCHS = 50
BATCH_SIZE = 64
LEARNING_RATE = 0.001
```
%% Cell type:code id:122bf29f-83bb-45ae-b78b-4808959fa58f tags:
``` python
class trainData(Dataset):
def __init__(self, X_data, y_data):
self.X_data = X_data
self.y_data = y_data
def __getitem__(self, index):
return self.X_data[index], self.y_data[index]
def __len__ (self):
return len(self.X_data)
train_data = trainData(torch.FloatTensor(X_train),
torch.FloatTensor(y_train))
```
%% Cell type:code id:c073a012-37fb-4424-ba66-fe344d00e34e tags:
``` python
class testData(Dataset):
def __init__(self, X_data):
self.X_data = X_data
def __getitem__(self, index):
return self.X_data[index]
def __len__ (self):
return len(self.X_data)
test_data = testData(torch.FloatTensor(X_test))
```
%% Cell type:code id:6d8bdcf5-e60c-4379-8024-a13a3ebf4b75 tags:
``` python
train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
test_loader = DataLoader(dataset=test_data, batch_size=1)
```
%% Cell type:code id:1c8a9f8d-f758-4a08-9350-b38db859e1b4 tags:
%% Cell type:code id:a3dba65f-67a5-4cdf-bf3b-1b09668470cd tags:
``` python
class binaryClassification(nn.Module):
def __init__(self):
super(binaryClassification, self).__init__()
# Number of input features is 11.
self.layer_1 = nn.Linear(11, 64)
# Number of input features is 10.
self.layer_1 = nn.Linear(10, 64)
self.layer_2 = nn.Linear(64, 64)
self.layer_3 = nn.Linear(64, 64)
self.layer_out = nn.Linear(64, 1)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.1)
self.batchnorm1 = nn.BatchNorm1d(64)
self.batchnorm2 = nn.BatchNorm1d(64)
self.batchnorm3 = nn.BatchNorm1d(64)
def forward(self, inputs):
x = self.relu(self.layer_1(inputs))
x = self.batchnorm1(x)
x = self.relu(self.layer_2(x))
x = self.batchnorm2(x)
x = self.relu(self.layer_3(x))
x = self.batchnorm3(x)
x = self.dropout(x)
x = self.layer_out(x)
return x
```
%% Cell type:code id:3896791a-41eb-4787-a0c3-93bd21282c19 tags:
``` python
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
```
%%%% Output: stream
cuda:0
%% Cell type:code id:7680c582-2d92-484c-bbd4-f22889f63647 tags:
``` python
model = binaryClassification()
model.to(device)
print(model)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
```
%%%% Output: stream
binaryClassification(
(layer_1): Linear(in_features=11, out_features=64, bias=True)
(layer_1): Linear(in_features=10, out_features=64, bias=True)
(layer_2): Linear(in_features=64, out_features=64, bias=True)
(layer_3): Linear(in_features=64, out_features=64, bias=True)
(layer_out): Linear(in_features=64, out_features=1, bias=True)
(relu): ReLU()
(dropout): Dropout(p=0.1, inplace=False)
(batchnorm1): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(batchnorm2): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(batchnorm3): BatchNorm1d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
%% Cell type:code id:6746f086-fa63-4c1c-a49e-310d7e2ec4b7 tags:
``` python
def binary_acc(y_pred, y_test):
y_pred_tag = torch.round(torch.sigmoid(y_pred))
correct_results_sum = (y_pred_tag == y_test).sum().float()
acc = correct_results_sum/y_test.shape[0]
acc = torch.round(acc * 100)
return acc
```
%% Cell type:code id:93f8739a-7dfa-4e34-9f66-81e942942d34 tags:
``` python
model.train()
for e in range(1, EPOCHS+1):
epoch_loss = 0
epoch_acc = 0
for X_batch, y_batch in train_loader:
X_batch, y_batch = X_batch.to(device), y_batch.to(device)
optimizer.zero_grad()
y_pred = model(X_batch)
loss = criterion(y_pred, y_batch.unsqueeze(1))
acc = binary_acc(y_pred, y_batch.unsqueeze(1))
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
print(f'Epoch {e+0:03}: | Loss: {epoch_loss/len(train_loader):.5f} | Acc: {epoch_acc/len(train_loader):.3f}')
```
%%%% Output: stream
Epoch 001: | Loss: 0.57255 | Acc: 68.510
Epoch 002: | Loss: 0.51966 | Acc: 72.412
Epoch 003: | Loss: 0.50320 | Acc: 72.960
Epoch 004: | Loss: 0.48726 | Acc: 73.865
Epoch 005: | Loss: 0.48121 | Acc: 74.381
Epoch 006: | Loss: 0.47483 | Acc: 74.637
Epoch 007: | Loss: 0.46612 | Acc: 75.467
Epoch 008: | Loss: 0.45879 | Acc: 75.863
Epoch 009: | Loss: 0.45398 | Acc: 75.887
Epoch 010: | Loss: 0.45117 | Acc: 76.433
Epoch 011: | Loss: 0.44418 | Acc: 76.796
Epoch 012: | Loss: 0.44035 | Acc: 76.696
Epoch 013: | Loss: 0.43966 | Acc: 76.646
Epoch 014: | Loss: 0.43620 | Acc: 76.937
Epoch 015: | Loss: 0.43335 | Acc: 77.183
Epoch 016: | Loss: 0.43053 | Acc: 77.279
Epoch 017: | Loss: 0.42891 | Acc: 77.277
Epoch 018: | Loss: 0.42899 | Acc: 77.281
Epoch 019: | Loss: 0.42436 | Acc: 77.512
Epoch 020: | Loss: 0.42328 | Acc: 77.627
Epoch 021: | Loss: 0.42370 | Acc: 77.792
Epoch 022: | Loss: 0.42102 | Acc: 77.644
Epoch 023: | Loss: 0.42057 | Acc: 77.800
Epoch 024: | Loss: 0.41770 | Acc: 77.879
Epoch 025: | Loss: 0.41552 | Acc: 78.081
Epoch 026: | Loss: 0.41711 | Acc: 77.954
Epoch 027: | Loss: 0.41543 | Acc: 77.835
Epoch 028: | Loss: 0.40957 | Acc: 78.260
Epoch 029: | Loss: 0.41276 | Acc: 78.279
Epoch 030: | Loss: 0.40988 | Acc: 78.279
Epoch 031: | Loss: 0.41333 | Acc: 78.271
Epoch 032: | Loss: 0.40900 | Acc: 78.362
Epoch 033: | Loss: 0.40603 | Acc: 78.381
Epoch 034: | Loss: 0.40833 | Acc: 78.404
Epoch 035: | Loss: 0.40930 | Acc: 78.338
Epoch 036: | Loss: 0.40871 | Acc: 78.440
Epoch 037: | Loss: 0.40228 | Acc: 78.644
Epoch 038: | Loss: 0.39928 | Acc: 78.852
Epoch 039: | Loss: 0.40197 | Acc: 79.058
Epoch 040: | Loss: 0.39958 | Acc: 78.588
Epoch 041: | Loss: 0.40289 | Acc: 78.994
Epoch 042: | Loss: 0.39919 | Acc: 78.838
Epoch 043: | Loss: 0.39836 | Acc: 78.873
Epoch 044: | Loss: 0.39842 | Acc: 78.838
Epoch 045: | Loss: 0.39995 | Acc: 78.815
Epoch 046: | Loss: 0.39698 | Acc: 78.896
Epoch 047: | Loss: 0.39460 | Acc: 78.965
Epoch 048: | Loss: 0.39443 | Acc: 79.200
Epoch 049: | Loss: 0.39654 | Acc: 79.104
Epoch 050: | Loss: 0.39124 | Acc: 79.223
Epoch 001: | Loss: 0.45768 | Acc: 75.760
Epoch 002: | Loss: 0.45737 | Acc: 76.108
Epoch 003: | Loss: 0.45794 | Acc: 75.906
Epoch 004: | Loss: 0.45438 | Acc: 76.223
Epoch 005: | Loss: 0.45624 | Acc: 75.925
Epoch 006: | Loss: 0.45805 | Acc: 76.079
Epoch 007: | Loss: 0.45458 | Acc: 76.065
Epoch 008: | Loss: 0.45494 | Acc: 76.065
Epoch 009: | Loss: 0.45509 | Acc: 76.123
Epoch 010: | Loss: 0.45424 | Acc: 76.388
Epoch 011: | Loss: 0.45597 | Acc: 76.140
Epoch 012: | Loss: 0.45391 | Acc: 76.210
Epoch 013: | Loss: 0.45040 | Acc: 76.235
%% Cell type:code id:13260656-39d3-4bc7-91fe-e7f5fe09c6d2 tags:
``` python
y_pred_list = []
model.eval()
with torch.no_grad():
for X_batch in test_loader:
X_batch = X_batch.to(device)
y_test_pred = model(X_batch)
y_test_pred = torch.sigmoid(y_test_pred)
y_pred_tag = torch.round(y_test_pred)
y_pred_list.append(y_pred_tag.cpu().numpy())
y_pred_list = [a.squeeze().tolist() for a in y_pred_list]
```
%% Cell type:code id:48dcf1ae-bfb5-4672-88f4-ec49fb42d7c5 tags:
``` python
accuracy_score(y_pred_list, y_test)
```
%%%% Output: execute_result
0.5237489113686609
%% Cell type:code id:1bd71613-25f7-4342-aa58-2abebce0f5f9 tags:
``` python
print(classification_report(y_test, y_pred_list))
```
%%%% Output: stream
precision recall f1-score support
0.0 0.52 0.60 0.56 7533
1.0 0.51 0.43 0.47 7394
0.0 0.52 0.66 0.58 7533
1.0 0.53 0.38 0.44 7394
accuracy 0.52 14927
macro avg 0.52 0.52 0.51 14927
weighted avg 0.52 0.52 0.51 14927
%% Cell type:markdown id:55bbaee2-335c-40ed-a5ad-3a616c91773f tags:
### TPOPT
%% Cell type:code id:5b393d8b-f625-4a26-b4e3-fa8b9e15dba6 tags:
``` python
from tpot import TPOTClassifier
```
%% Cell type:code id:5efe5da7-3642-4c68-a8bf-c8e093c937a6 tags:
``` python
df_train = pkl.load(open("./data/train" ,"rb"))
df_test = pkl.load(open("./data/test" ,"rb"))
```
%% Cell type:code id:5c0b7f2e-2da4-4538-8e28-e9d06746c1ec tags:
%% Cell type:code id:1a5c3412-48fb-4b65-862d-263a1e0f10df tags:
``` python
y_train = df_train["ret"]
y_test = df_test["ret"]
X_train = df_train.drop(["ret"], axis=1)
X_test = df_test.drop(["ret"], axis=1)
```
%% Cell type:code id:a17b4a46-ddc4-451a-985b-4cf9f4e54ae7 tags:
``` python
tpot = TPOTClassifier(generations=5, population_size=50, verbosity=2, random_state=42, n_jobs = -1)
tpot.fit(X_train, y_train)
print(tpot.score(X_test, y_test))
```
%%%% Output: display_data
%%%% Output: stream
Generation 1 - Current best internal CV score: 0.6669664268585132
Generation 2 - Current best internal CV score: 0.66810551558753
Generation 3 - Current best internal CV score: 0.6792565947242207
Generation 4 - Current best internal CV score: 0.6792565947242207
Generation 5 - Current best internal CV score: 0.6792565947242207
Best pipeline: MLPClassifier(ExtraTreesClassifier(input_matrix, bootstrap=False, criterion=entropy, max_features=0.15000000000000002, min_samples_leaf=4, min_samples_split=17, n_estimators=100), alpha=0.1, learning_rate_init=0.001)
0.512618507143811
%% Cell type:markdown id:22625cc8-5113-40b1-8d46-bc7289d34487 tags:
%% Cell type:markdown id:55bbaee2-335c-40ed-a5ad-3a616c91773f tags:
### Other data
### TPOPT
%% Cell type:code id:42ad79c1-3be2-41ae-b673-93c2fb20725d tags:
%% Cell type:code id:7e09669c-1904-48a4-aa97-3325582b77b0 tags:
``` python
df_test = pkl.load(open("test_ten_best_features.pkl" ,"rb"))
df_test.dropna(inplace = True)
df_train = pkl.load(open("train_ten_best_features.pkl" ,"rb"))
df_train.dropna(inplace = True)
y_train = df_train["label"]
y_test = df_test["label"]
X_train = df_train.drop(["label"], axis=1)
X_test = df_test.drop(["label"], axis=1)
from tpot import TPOTClassifier
```
%% Cell type:code id:2adbd233-18f1-4fc6-97b0-0d75ae7f5544 tags:
``` python
tpot = TPOTClassifier(generations=100, population_size=50, verbosity=2, random_state=42, n_jobs = -1, max_time_mins= 120)
tpot = TPOTClassifier(generations=5, population_size=50, verbosity=2, random_state=42, n_jobs = -1)
tpot.fit(X_train, y_train)
print(tpot.score(X_test, y_test))
```
%%%% Output: display_data
%%%% Output: stream
Generation 1 - Current best internal CV score: 0.6320305182449035
Generation 2 - Current best internal CV score: 0.6351871562198599
Generation 3 - Current best internal CV score: 0.6382534647018571
Generation 4 - Current best internal CV score: 0.6421921613992991
Generation 5 - Current best internal CV score: 0.6421921613992991
Generation 6 - Current best internal CV score: 0.6438173782411489
Generation 7 - Current best internal CV score: 0.6466120971925099
Generation 8 - Current best internal CV score: 0.6466120971925099
Generation 1 - Current best internal CV score: 0.6233733528357306
Generation 9 - Current best internal CV score: 0.6466120971925099
Generation 2 - Current best internal CV score: 0.6251162764002947
Generation 10 - Current best internal CV score: 0.6466120971925099
Generation 3 - Current best internal CV score: 0.6270732602556829