Commit b8a85cb2 authored by Florian RICHOUX's avatar Florian RICHOUX

Remove old files, add experimental process in PROCESS

parent bca39ff2
1. Use training and validation set, save weights. (output mirror-double or mirror-medium)
2. Load these weights and test them on the valisation set => give validation performance measurements. (output mirror-double_test-val or mirror-medium_test-val)
3. Run spot_min_loss.py on step 1's output to see when to stop final training. This give you an epoch number E.
4. Re train using training AND validation sets as training set, use the test set for testing, and stop at epoch E. => give final performance measurements. (output test-mirror-double_train-val or test-mirror-medium_train-val)
import keras
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
class Mail_Callback(keras.callbacks.Callback):
def get_data():
file = open('./private_mail_data.txt', 'r')
source = file.readline()
password = file.readline()
dest = file.readline()
smtphost = file.readline()
port = int(file.readline())
return source, password, dest, smtphost, port
def on_train_end(self, logs={}):
source, password, dest, smtphost, port = get_data()
s = smtplib.SMTP(host=smtphost, port=port)
s.starttls()
s.login(source, password)
msg = MIMEMultipart()
msg['From']=source
msg['To']=dest
msg['Subject'] = "{} training complete.".format(self.model.__class__.__name__)
msg.attach( MIMEText( "", 'plain' ) )
s.send_message( msg )
del msg
s.quit()
return
from keras.models import Model
from keras import layers
from keras import Input
import numpy as np
import tensorflow as tf
from models.abstract_model import AbstractModel
class FC2_2Dense(AbstractModel):
def __init__(self):
super().__init__()
def get_model(self):
input1 = Input(shape=(1166,20,), dtype=np.float32, name='protein1')
protein1 = layers.Flatten()(input1)
protein1 = layers.Dense(1000, activation='relu')(protein1)
protein1 = layers.BatchNormalization()(protein1)
protein1 = layers.Dense(1000, activation='relu')(protein1)
protein1 = layers.BatchNormalization()(protein1)
input2 = Input(shape=(1166,20,), dtype=np.float32, name='protein2')
protein2 = layers.Flatten()(input2)
protein2 = layers.Dense(1000, activation='relu')(protein2)
protein2 = layers.BatchNormalization()(protein2)
protein2 = layers.Dense(1000, activation='relu')(protein2)
protein2 = layers.BatchNormalization()(protein2)
head = layers.concatenate([protein1, protein2], axis=-1)
head = layers.Dense(100, activation='relu')(head)
head = layers.BatchNormalization()(head)
head = layers.Dense(1)(head)
output = layers.Activation(tf.nn.sigmoid)(head)
model = Model(inputs=[input1, input2], outputs=output)
return model
from keras.models import Model
from keras import layers
from keras import Input
import numpy as np
import tensorflow as tf
from models.abstract_model import AbstractModel
class LSTM32_3Conv3_2Dense(AbstractModel):
def __init__(self):
super().__init__()
def get_model(self):
input1 = Input(shape=(None,20,), dtype=np.float32, name='protein1')
protein1 = layers.Conv1D(5, 20, activation='relu')(input1)
protein1 = layers.MaxPooling1D(3)(protein1)
protein1 = layers.BatchNormalization()(protein1)
protein1 = layers.Conv1D(5, 20, activation='relu')(protein1)
protein1 = layers.MaxPooling1D(3)(protein1)
protein1 = layers.BatchNormalization()(protein1)
protein1 = layers.Conv1D(5, 20, activation='relu')(protein1)
protein1 = layers.MaxPooling1D(3)(protein1)
protein1 = layers.BatchNormalization()(protein1)
protein1 = layers.LSTM(32)(protein1)
input2 = Input(shape=(None,20,), dtype=np.float32, name='protein2')
protein2 = layers.Conv1D(5, 20, activation='relu')(input2)
protein2 = layers.MaxPooling1D(3)(protein2)
protein2 = layers.BatchNormalization()(protein2)
protein2 = layers.Conv1D(5, 20, activation='relu')(protein2)
protein2 = layers.MaxPooling1D(3)(protein2)
protein2 = layers.BatchNormalization()(protein2)
protein2 = layers.Conv1D(5, 20, activation='relu')(protein2)
protein2 = layers.MaxPooling1D(3)(protein2)
protein2 = layers.BatchNormalization()(protein2)
protein2 = layers.LSTM(32)(protein2)
head = layers.concatenate([protein1, protein2], axis=-1)
# head = layers.Dense(50, activation='relu')(head)
# head = layers.BatchNormalization()(head)
head = layers.Dense(25, activation='relu')(head)
head = layers.BatchNormalization()(head)
head = layers.Dense(1)(head)
output = layers.Activation(tf.nn.sigmoid)(head)
model = Model(inputs=[input1, input2], outputs=output)
return model
from keras.models import Model
from keras import layers
from keras import Input
import numpy as np
import tensorflow as tf
def make_model(x_train, y_train, x_val, y_val, params):
conv1 = layers.Conv1D(params['filters'], 20, activation='relu')
pool1 = layers.MaxPooling1D(3)
batchnorm1 = layers.BatchNormalization()
conv2 = layers.Conv1D(params['filters'], 20, activation='relu')
pool2 = layers.MaxPooling1D(3)
batchnorm2 = layers.BatchNormalization()
conv3 = layers.Conv1D(params['filters'], 20, activation='relu')
pool3 = layers.MaxPooling1D(3)
batchnorm3 = layers.BatchNormalization()
lstm = layers.LSTM(params['lstm_size'])
input1 = Input(shape=(None,20,), dtype=np.float32, name='protein1')
protein1 = conv1(input1)
protein1 = pool1(protein1)
protein1 = batchnorm1(protein1)
protein1 = conv2(protein1)
protein1 = pool2(protein1)
protein1 = batchnorm2(protein1)
protein1 = conv3(protein1)
protein1 = pool3(protein1)
protein1 = batchnorm3(protein1)
protein1 = lstm(protein1)
input2 = Input(shape=(None,20,), dtype=np.float32, name='protein2')
protein2 = conv1(input2)
protein2 = pool1(protein2)
protein2 = batchnorm1(protein2)
protein2 = conv2(protein2)
protein2 = pool2(protein2)
protein2 = batchnorm2(protein2)
protein2 = conv3(protein2)
protein2 = pool3(protein2)
protein2 = batchnorm3(protein2)
protein2 = lstm(protein2)
head = layers.concatenate([protein1, protein2], axis=-1)
# head = layers.Dense(50, activation='relu')(head)
# head = layers.BatchNormalization()(head)
head = layers.Dense(params['dense'], activation='relu')(head)
head = layers.BatchNormalization()(head)
head = layers.Dense(1)(head)
output = layers.Activation(tf.nn.sigmoid)(head)
model = Model(inputs=[input1, input2], outputs=output)
model.compile(loss='binary_crossentropy',
optimizer=params['optimizer'](lr=params['lr']),
metrics=['acc'])
history = model.fit(x_train, y_train,
validation_data=[x_val, y_val],
batch_size=1024,
epochs=50,
verbose=0)
return history, model
File fc2_100_2dense_2019-01-05_15:30_gpu-3-1_adam_0.001_2048_2_mirror-double.txt
fc2_100_2dense, epochs=2, batch=2048, optimizer=adam, learning rate=0.001, patience=2
Number of training samples: 91036
Loss
0: train_loss=0.3872924578609405, val_loss=0.24862531490007936
1: train_loss=0.1984376458212091, val_loss=0.21774172615131798
///////////////////////////////////////////
Accuracy
0: train_acc=0.8374159675139137, val_acc=0.9012474013808516
1: train_acc=0.9260292632298319, val_acc=0.9217175760022092
///////////////////////////////////////////
Validation metrics
Number of 0 predicted: 6003
Number of 1 predicted: 6503
Validation precision: 0.9532326878897276
Validation recall: 0.8932800246040289
Validation F1-score: 0.9222830832737953
File fc2_100_2dense_2019-01-05_15:31_gpu-3-1_adam_0.001_2048_6_mirror-double.txt
fc2_100_2dense, epochs=6, batch=2048, optimizer=adam, learning rate=0.001, patience=6
Number of training samples: 91036
Loss
0: train_loss=0.38646583883528435, val_loss=0.24874810088773203
1: train_loss=0.19818387037146631, val_loss=0.2137485619937632
2: train_loss=0.13475485231030557, val_loss=0.19514386956165186
3: train_loss=0.08879141405691628, val_loss=0.18851766600792605
4: train_loss=0.05506245791028184, val_loss=0.18956206869777825
5: train_loss=0.0320371428593876, val_loss=0.18431813973685027
///////////////////////////////////////////
Accuracy
0: train_acc=0.8395140386826178, val_acc=0.9008475929837064
1: train_acc=0.9256338152751766, val_acc=0.9199584202729815
2: train_acc=0.9516674725047377, val_acc=0.9304333922864151
3: train_acc=0.9714838086555689, val_acc=0.9365104754104931
4: train_acc=0.9846654069240873, val_acc=0.9378698221897099
5: train_acc=0.9925963353951486, val_acc=0.9437869821341347
///////////////////////////////////////////
Validation metrics
Number of 0 predicted: 5969
Number of 1 predicted: 6537
Validation precision: 0.9786675418444372
Validation recall: 0.9123451124368976
Validation F1-score: 0.9443432824004433
File fc2_100_2dense_2019-01-05_15:33_gpu-3-1_adam_0.001_2048_50_mirror-double.txt
fc2_100_2dense, epochs=50, batch=2048, optimizer=adam, learning rate=0.001, patience=50
Number of training samples: 91036
Loss
0: train_loss=0.39084005259719556, val_loss=0.24820902741529188
1: train_loss=0.19868336829688596, val_loss=0.21982846556235253
2: train_loss=0.13446721005543993, val_loss=0.19419807280023135
3: train_loss=0.09127538339409418, val_loss=0.19119984586868366
4: train_loss=0.057243844823147136, val_loss=0.19539317698139735
5: train_loss=0.03417560099190948, val_loss=0.19053472828030033
6: train_loss=0.019209828702688315, val_loss=0.19376062100531863
7: train_loss=0.010719036995995322, val_loss=0.20701293355843248
8: train_loss=0.006273768865402091, val_loss=0.2096985401429311
9: train_loss=0.003392524909270808, val_loss=0.21952877723082112
10: train_loss=0.0021173997586649914, val_loss=0.22810169825187096
11: train_loss=0.0015779878676677997, val_loss=0.23237065726635878
12: train_loss=0.0012973357506271587, val_loss=0.24036148083738074
13: train_loss=0.000980192218761744, val_loss=0.2449238352988713
14: train_loss=0.0008160841430854868, val_loss=0.2500674303352366
15: train_loss=0.0007044590684325056, val_loss=0.2538989233757121
16: train_loss=0.0006136990582911797, val_loss=0.25692077814894027
17: train_loss=0.0005446311450516558, val_loss=0.26114509836037064
18: train_loss=0.0004909603822186533, val_loss=0.26408119393638013
19: train_loss=0.00043652642148861887, val_loss=0.26690373849376914
20: train_loss=0.00040427474553936076, val_loss=0.2688997212589483
21: train_loss=0.0003737249798131803, val_loss=0.2716394969840517
22: train_loss=0.00033937537942469294, val_loss=0.2734848748878843
23: train_loss=0.00031484651955307534, val_loss=0.27757503865586075
24: train_loss=0.0002823829447792764, val_loss=0.27911913659922244
25: train_loss=0.0002650592014468279, val_loss=0.2811659785932147
26: train_loss=0.00024356591878407297, val_loss=0.28317420343084565
27: train_loss=0.0002288777369707194, val_loss=0.28445635900180205
28: train_loss=0.00021498552894707454, val_loss=0.2877035659398153
29: train_loss=0.00019978455341870238, val_loss=0.2894007458641837
30: train_loss=0.00018547798961860652, val_loss=0.290796727375776
31: train_loss=0.00017899703322312122, val_loss=0.29315332840313707
32: train_loss=0.00016441875571085525, val_loss=0.29486377897328153
33: train_loss=0.00015179964051989555, val_loss=0.29581815520992244
34: train_loss=0.00014454797966356397, val_loss=0.2977618077040367
35: train_loss=0.00013634766936709145, val_loss=0.2998111712137604
36: train_loss=0.00012988271101178333, val_loss=0.3010743594962693
37: train_loss=0.00012321842486698288, val_loss=0.302838659092996
38: train_loss=0.00011716578809035263, val_loss=0.30434125327747497
39: train_loss=0.00010814161058372393, val_loss=0.3050017132056383
40: train_loss=0.0001035789430454219, val_loss=0.3069154392828393
41: train_loss=9.777023387633345e-05, val_loss=0.30693599115118836
42: train_loss=9.248107107998815e-05, val_loss=0.30841674197297775
43: train_loss=8.866932920930427e-05, val_loss=0.3102477753894454
44: train_loss=8.531291696624557e-05, val_loss=0.3118808348760707
45: train_loss=7.980685583184382e-05, val_loss=0.31308416464358124
46: train_loss=7.750269896423334e-05, val_loss=0.3145050155902965
47: train_loss=7.23596472968019e-05, val_loss=0.31575389449718033
48: train_loss=6.908433984412263e-05, val_loss=0.316218740569674
49: train_loss=6.617149797810436e-05, val_loss=0.3175697931857485
///////////////////////////////////////////
Accuracy
0: train_acc=0.8329452084287137, val_acc=0.8998880540487691
1: train_acc=0.9255898765637882, val_acc=0.922757076917792
2: train_acc=0.9521288283313639, val_acc=0.9325123943654171
3: train_acc=0.9700338326516011, val_acc=0.9372301293662428
4: train_acc=0.9837426951267927, val_acc=0.9371501677478197
5: train_acc=0.992167933444272, val_acc=0.9415480569422044
6: train_acc=0.9962981679516123, val_acc=0.9424276348687773
7: train_acc=0.9983852542820586, val_acc=0.9439469059238468
8: train_acc=0.9993409200755745, val_acc=0.9436270594501541
9: train_acc=0.9998681840151149, val_acc=0.9443467132819856
10: train_acc=0.9999450766729645, val_acc=0.9444266752054381
11: train_acc=0.9999450766729645, val_acc=0.9453062530080929
12: train_acc=0.9999450766729645, val_acc=0.9441867907791162
13: train_acc=1.0, val_acc=0.9448264837265015
14: train_acc=1.0, val_acc=0.9449064453449246
15: train_acc=1.0, val_acc=0.9443467140159625
16: train_acc=1.0, val_acc=0.944586598871232
17: train_acc=1.0, val_acc=0.9445066372528088
18: train_acc=1.0, val_acc=0.9443467140159625
19: train_acc=1.0, val_acc=0.9443467137109331
20: train_acc=1.0, val_acc=0.9443467134059038
21: train_acc=1.0, val_acc=0.9438669436953648
22: train_acc=1.0, val_acc=0.9441867901690575
23: train_acc=1.0, val_acc=0.9441068285506343
24: train_acc=1.0, val_acc=0.9441068285506343
25: train_acc=1.0, val_acc=0.9441867901690575
26: train_acc=1.0, val_acc=0.9441867901690575
27: train_acc=1.0, val_acc=0.9445865982611733
28: train_acc=1.0, val_acc=0.9441867901690575
29: train_acc=1.0, val_acc=0.9442667517874807
30: train_acc=1.0, val_acc=0.9441867901690575
31: train_acc=1.0, val_acc=0.9438669436953648
32: train_acc=1.0, val_acc=0.9438669436953648
33: train_acc=1.0, val_acc=0.9438669436953648
34: train_acc=1.0, val_acc=0.9435470976506197
35: train_acc=1.0, val_acc=0.943627058535066
36: train_acc=1.0, val_acc=0.9435470976506197
37: train_acc=1.0, val_acc=0.9435470976506197
38: train_acc=1.0, val_acc=0.9433871744137734
39: train_acc=1.0, val_acc=0.9435470976506197
40: train_acc=1.0, val_acc=0.9434671352982197
41: train_acc=1.0, val_acc=0.9434671352982197
42: train_acc=1.0, val_acc=0.9433871736797965
43: train_acc=1.0, val_acc=0.9432272504429502
44: train_acc=1.0, val_acc=0.9429074047032345
45: train_acc=1.0, val_acc=0.9429074047032345
46: train_acc=1.0, val_acc=0.9428274430848114
47: train_acc=1.0, val_acc=0.9428274423508345
48: train_acc=1.0, val_acc=0.9430673272061039
49: train_acc=1.0, val_acc=0.9430673279400809
///////////////////////////////////////////
Validation metrics
Number of 0 predicted: 5950
Number of 1 predicted: 6556
Validation precision: 0.9794880210042665
Validation recall: 0.910463697376449
Validation F1-score: 0.9437154150197627
File fc2_2_2dense_2019-01-05_15:49_gpu-1-1_adam_0.001_2048_50_mirror-double.txt
fc2_2_2dense, epochs=50, batch=2048, optimizer=adam, learning rate=0.001, patience=50
Number of training samples: 91036
Loss
0: train_loss=0.5868324072545157, val_loss=0.5400454728446654
1: train_loss=0.4309438548710755, val_loss=0.48030461853987577
2: train_loss=0.36232180356995086, val_loss=0.4169283351354097
3: train_loss=0.32342582868772324, val_loss=0.3920299895054128
4: train_loss=0.29736668314470327, val_loss=0.3665638723345579
5: train_loss=0.2755200032010073, val_loss=0.37876797349152136
6: train_loss=0.25787431544702144, val_loss=0.3569301744899463
7: train_loss=0.2413620188720323, val_loss=0.3434009244875509
8: train_loss=0.2262995223476229, val_loss=0.3141578554716298
9: train_loss=0.21541131924250365, val_loss=0.32120865183068753
10: train_loss=0.2075656501239033, val_loss=0.2911624203686445
11: train_loss=0.1987251827544134, val_loss=0.298379240799728
12: train_loss=0.1924263693372962, val_loss=0.29917733652695033
13: train_loss=0.18572623092109747, val_loss=0.2895961823309781
14: train_loss=0.18101410088165557, val_loss=0.27934024445194255
15: train_loss=0.17685751623043858, val_loss=0.2754808639013994
16: train_loss=0.17138945412497522, val_loss=0.2794833493227008
17: train_loss=0.16770235678331888, val_loss=0.2785316342909432
18: train_loss=0.1657604711938289, val_loss=0.27257961836430084
19: train_loss=0.1615844426918147, val_loss=0.2741673314226316
20: train_loss=0.15936266384674105, val_loss=0.27736032493835483
21: train_loss=0.15749476990801278, val_loss=0.2780449430303499
22: train_loss=0.15461868640447712, val_loss=0.27725645385512193
23: train_loss=0.1513382743609811, val_loss=0.27558052124699534
24: train_loss=0.1469705086658927, val_loss=0.2748024411274303
25: train_loss=0.14310727268555623, val_loss=0.2728943998920771
26: train_loss=0.14118252259533992, val_loss=0.27296533424263664
27: train_loss=0.13834196010849126, val_loss=0.27264594629480804
28: train_loss=0.13667267574315498, val_loss=0.2706256294448757
29: train_loss=0.13597924177071202, val_loss=0.2764187418907409
30: train_loss=0.1344959570110575, val_loss=0.27054289224508876
31: train_loss=0.13359275488593342, val_loss=0.27949437384183706
32: train_loss=0.1326593114817319, val_loss=0.2876403712185406
33: train_loss=0.13127935183145673, val_loss=0.278278353379933
34: train_loss=0.12796957842772244, val_loss=0.2820234261673583
35: train_loss=0.1251422458643931, val_loss=0.2798856218614598
36: train_loss=0.12393653346632784, val_loss=0.2786651059148713
37: train_loss=0.12330459573824736, val_loss=0.2780863390185443
38: train_loss=0.12253441867118267, val_loss=0.2825432524440118
39: train_loss=0.121954147646206, val_loss=0.28174191819140193
40: train_loss=0.12142228508745673, val_loss=0.2890833710768157
41: train_loss=0.11949331372330732, val_loss=0.29112275923497577
42: train_loss=0.11951255439609398, val_loss=0.2896069054810997
43: train_loss=0.11814817820492222, val_loss=0.28583111281435375
44: train_loss=0.11899578665143404, val_loss=0.2917038509412211
45: train_loss=0.11775911522924369, val_loss=0.2893431853260514
46: train_loss=0.11660375264358885, val_loss=0.28850454747362286
47: train_loss=0.11487829206794797, val_loss=0.2831823964481561
48: train_loss=0.11426390091712606, val_loss=0.2938180812907833
49: train_loss=0.11412857218727641, val_loss=0.2880550358088783
///////////////////////////////////////////
Accuracy
0: train_acc=0.7033371414685271, val_acc=0.7130177514506935
1: train_acc=0.8209279846226354, val_acc=0.7461218611061259
2: train_acc=0.8561448217239692, val_acc=0.8221653608937998
3: train_acc=0.8731051454477441, val_acc=0.8344794495781002
4: train_acc=0.8835186080860544, val_acc=0.8495921957651063
5: train_acc=0.8914165821968606, val_acc=0.8353590277525095
6: train_acc=0.898644492540184, val_acc=0.844474652004913
7: train_acc=0.9057076318652973, val_acc=0.854469854612837
8: train_acc=0.9143415792131158, val_acc=0.8683831757895187
9: train_acc=0.9179884879108975, val_acc=0.8619862467446138
10: train_acc=0.9219649367620752, val_acc=0.880777227436278
11: train_acc=0.9252273828489164, val_acc=0.874220373810491
12: train_acc=0.9283580122961353, val_acc=0.8766991845916676
13: train_acc=0.931060239622246, val_acc=0.8846953462528722
14: train_acc=0.9323015072977529, val_acc=0.891971853281543
15: train_acc=0.9334548968721753, val_acc=0.8958899730132253
16: train_acc=0.9353881981304846, val_acc=0.8982088602525262
17: train_acc=0.9367393122165001, val_acc=0.8934111630232185
18: train_acc=0.9368491587055773, val_acc=0.8998880539915761
19: train_acc=0.9391559380979843, val_acc=0.8987685910286225
20: train_acc=0.9393426775801365, val_acc=0.8992483605580504
21: train_acc=0.9396392635461279, val_acc=0.9011674397052354
22: train_acc=0.9409793925084204, val_acc=0.9003678230920563
23: train_acc=0.9421657365661885, val_acc=0.8985287066023007
24: train_acc=0.9439891913616101, val_acc=0.902526787094511
25: train_acc=0.9448020560438989, val_acc=0.9023668642866122
26: train_acc=0.9455380290137784, val_acc=0.902446825600006
27: train_acc=0.9461421857080695, val_acc=0.9034063653261132
28: train_acc=0.946966035225997, val_acc=0.9064449059682981
29: train_acc=0.9469550505370193, val_acc=0.903806172684252
30: train_acc=0.9476690538382403, val_acc=0.9062849834654286
31: train_acc=0.9469660351552854, val_acc=0.904285942394791
32: train_acc=0.9471747438923023, val_acc=0.903006556928968
33: train_acc=0.9481523794539967, val_acc=0.9040460581495803
34: train_acc=0.9495584163825422, val_acc=0.9038861343026752
35: train_acc=0.9503163580782589, val_acc=0.9065248677106394
36: train_acc=0.9507886990547977, val_acc=0.907164560963054
37: train_acc=0.9508106681184791, val_acc=0.9070046377262077
38: train_acc=0.9506458984752171, val_acc=0.905645290213014
39: train_acc=0.9505250667707537, val_acc=0.9056452899079847
40: train_acc=0.9508106682625213, val_acc=0.9055653288996203
41: train_acc=0.9515686099346673, val_acc=0.9050855584551044
42: train_acc=0.950645898524977, val_acc=0.9067647525659088
43: train_acc=0.9519420886789801, val_acc=0.9064449060922163
44: train_acc=0.9513269476099852, val_acc=0.9058851753733129
45: train_acc=0.9513599014857348, val_acc=0.9046057896024605
46: train_acc=0.9519201196152987, val_acc=0.9056452900890959
47: train_acc=0.9524803374541592, val_acc=0.9084439464288769
48: train_acc=0.952029966196039, val_acc=0.9044458663656142
49: train_acc=0.9521947360566737, val_acc=0.9065248675867212
///////////////////////////////////////////
Validation metrics
Number of 0 predicted: 6311
Number of 1 predicted: 6195
Validation precision: 0.9123728257302265
Validation recall: 0.897497982243745
Validation F1-score: 0.9048742778094232
File fc2_2_2dense_2019-01-05_15:57_gpu-1-1_adam_0.001_2048_50_mirror-double_8.txt
fc2_2_2dense, epochs=50, batch=2048, optimizer=adam, learning rate=0.001, patience=50
Number of training samples: 91036
Loss
0: train_loss=0.5344712591096886, val_loss=0.4224936847455708
1: train_loss=0.3352715382271038, val_loss=0.3303721829135952
2: train_loss=0.265630046778883, val_loss=0.2911959946126952
3: train_loss=0.2304129916605775, val_loss=0.2702348824121581
4: train_loss=0.2058606543521941, val_loss=0.2569241955454552
5: train_loss=0.18724231594012383, val_loss=0.24502089602002025
6: train_loss=0.17198682377672483, val_loss=0.24615953491150847
7: train_loss=0.16164105220057118, val_loss=0.23904192374264663
8: train_loss=0.15091986672675245, val_loss=0.23973764366442427
9: train_loss=0.1429571703222369, val_loss=0.23734093756651317
10: train_loss=0.1345246459530414, val_loss=0.2301928297092719
11: train_loss=0.12652427807343572, val_loss=0.24034710654769348
12: train_loss=0.12080476483355256, val_loss=0.23924446400977623
13: train_loss=0.11523661155043642, val_loss=0.24012877862300563
14: train_loss=0.11042115571806964, val_loss=0.23530416168938556
15: train_loss=0.10533447246493582, val_loss=0.23887275232518365
16: train_loss=0.0976653429856147, val_loss=0.2523558064544561
17: train_loss=0.0917442775743881, val_loss=0.24487323217557447
18: train_loss=0.08795939981374334, val_loss=0.25438885899537966
19: train_loss=0.08546736741169166, val_loss=0.25288930759799017
20: train_loss=0.08312876693951214, val_loss=0.24905624935651005
21: train_loss=0.07815345962555967, val_loss=0.25687587441357923
22: train_loss=0.07408902098693707, val_loss=0.2740216440493329
23: train_loss=0.07154272803608293, val_loss=0.2787691712303198
24: train_loss=0.07024392430358782, val_loss=0.2798366623507792
25: train_loss=0.06869826948288126, val_loss=0.281157335791038
26: train_loss=0.0667276821113585, val_loss=0.27862836226794735
27: train_loss=0.06371263384405237, val_loss=0.29192081437422795
28: train_loss=0.06238483640975968, val_loss=0.30587797373290176
29: train_loss=0.060621403661203846, val_loss=0.29185343444509426
30: train_loss=0.05874740919336828, val_loss=0.2931189947789447
31: train_loss=0.058398200451982026, val_loss=0.3075554228755506
32: train_loss=0.05715898022247464, val_loss=0.3047667787720561
33: train_loss=0.054853411908701584, val_loss=0.3077406221897949
34: train_loss=0.05346448494844835, val_loss=0.31392198477900163
35: train_loss=0.05189913797838296, val_loss=0.3210133680263253
36: train_loss=0.050160847446155746, val_loss=0.3348484638117417
37: train_loss=0.05036856989656597, val_loss=0.33308192605726744
38: train_loss=0.048813870030912095, val_loss=0.3284035862188005
39: train_loss=0.048609034830090596, val_loss=0.342963143337179
40: train_loss=0.047596907799904636, val_loss=0.3380984117293499
41: train_loss=0.0461191867458427, val_loss=0.3428330183715491
42: train_loss=0.045473129607239794, val_loss=0.35261960535188225
43: train_loss=0.04435583245394006, val_loss=0.3419255777662055
44: train_loss=0.04380223080561693, val_loss=0.36403623463991985
45: train_loss=0.04360189446335235, val_loss=0.3504986384192143
46: train_loss=0.045412008543756, val_loss=0.35818024268250226
47: train_loss=0.04324922773446406, val_loss=0.37607159407524304
48: train_loss=0.042739301859701324, val_loss=0.3817920437359379
49: train_loss=0.042162007083543035, val_loss=0.3707650050761584
///////////////////////////////////////////
Accuracy
0: train_acc=0.745298563344569, val_acc=0.8065728449962548
1: train_acc=0.8749176149727815, val_acc=0.8615864380996322
2: train_acc=0.9015664135148997, val_acc=0.8821365749494716
3: train_acc=0.9151654292155488, val_acc=0.8927714694657746
4: train_acc=0.9251504899003634, val_acc=0.8993283220525553
5: train_acc=0.9316534116511301, val_acc=0.9056452900890959
6: train_acc=0.9387934441657397, val_acc=0.9044458660605849
7: train_acc=0.9424293687297609, val_acc=0.9096433707052244
8: train_acc=0.9470648972644209, val_acc=0.9118023348315971
9: train_acc=0.9492398608919345, val_acc=0.9112426039315825
10: train_acc=0.9528867703649249, val_acc=0.917959379326262
11: train_acc=0.955709828890042, val_acc=0.9162002241499001
12: train_acc=0.9576980532318244, val_acc=0.914680953218749
13: train_acc=0.9593896917045166, val_acc=0.9191588037265276
14: train_acc=0.961443824329445, val_acc=0.9206780741715381
15: train_acc=0.9636078033884525, val_acc=0.9219574603713381
16: train_acc=0.9663759393146019, val_acc=0.9208379977134138
17: train_acc=0.9690671824598109, val_acc=0.9225971533187233
18: train_acc=0.9701107255765835, val_acc=0.9209979209502601
19: train_acc=0.9716595629616191, val_acc=0.9201183427186579
20: train_acc=0.9718243332098582, val_acc=0.9233168077606135
21: train_acc=0.9739224044728444, val_acc=0.9230769232103734
22: train_acc=0.9750757944610606, val_acc=0.9202782663844518
23: train_acc=0.9756360120327885, val_acc=0.9197185349315715
24: train_acc=0.9767125090488813, val_acc=0.9229969615919502
25: train_acc=0.9769761412124537, val_acc=0.9217175760022092
26: train_acc=0.977887868561714, val_acc=0.923636654110388
27: train_acc=0.978316270585921, val_acc=0.9215576518502747
28: train_acc=0.9789643659418406, val_acc=0.9189189184423106
29: train_acc=0.9798431390277468, val_acc=0.924276346876662
30: train_acc=0.9807768356371096, val_acc=0.9218774984478855
31: train_acc=0.9804472954077641, val_acc=0.9217975374395211
32: train_acc=0.9804912341191525, val_acc=0.9244362701135084
33: train_acc=0.9817874248781325, val_acc=0.9241963855632682
34: train_acc=0.9818862868667964, val_acc=0.9225971533187233
35: train_acc=0.9825563512628268, val_acc=0.9229969617158684
36: train_acc=0.9833033083900374, val_acc=0.9205981128581444
37: train_acc=0.9833582318375444, val_acc=0.9228370386601333
38: train_acc=0.9838854958975565, val_acc=0.9238765396996343
39: train_acc=0.9840832198513136, val_acc=0.9199584194818116
40: train_acc=0.9842919285909495, val_acc=0.9219574605524492
41: train_acc=0.985181686585825, val_acc=0.9226771148132282
42: train_acc=0.9849949470801023, val_acc=0.9197984966739128
43: train_acc=0.9856540272219004, val_acc=0.9247561174450961