Commit e90fea4f authored by Florian RICHOUX's avatar Florian RICHOUX

New results to anlyse

parent da2599ac
......@@ -111,6 +111,7 @@ pytorch/model_weights/*
pytorch/weights/*
pytorch/models/new_data*
pytorch/models/redo_*
keras/weights/fc2_2dense*
*_error
private_mail_data.txt
clean.sh
\ No newline at end of file
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
protein1 (InputLayer) (None, None, 20) 0
__________________________________________________________________________________________________
protein2 (InputLayer) (None, None, 20) 0
__________________________________________________________________________________________________
conv1d_1 (Conv1D) (None, None, 5) 2005 protein1[0][0]
protein2[0][0]
__________________________________________________________________________________________________
max_pooling1d_1 (MaxPooling1D) (None, None, 5) 0 conv1d_1[0][0]
conv1d_1[1][0]
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, None, 5) 20 max_pooling1d_1[0][0]
max_pooling1d_1[1][0]
__________________________________________________________________________________________________
conv1d_2 (Conv1D) (None, None, 5) 505 batch_normalization_1[0][0]
batch_normalization_1[1][0]
__________________________________________________________________________________________________
max_pooling1d_2 (MaxPooling1D) (None, None, 5) 0 conv1d_2[0][0]
conv1d_2[1][0]
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, None, 5) 20 max_pooling1d_2[0][0]
max_pooling1d_2[1][0]
__________________________________________________________________________________________________
conv1d_3 (Conv1D) (None, None, 5) 505 batch_normalization_2[0][0]
batch_normalization_2[1][0]
__________________________________________________________________________________________________
max_pooling1d_3 (MaxPooling1D) (None, None, 5) 0 conv1d_3[0][0]
conv1d_3[1][0]
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, None, 5) 20 max_pooling1d_3[0][0]
max_pooling1d_3[1][0]
__________________________________________________________________________________________________
lstm_1 (LSTM) (None, 32) 4864 batch_normalization_3[0][0]
batch_normalization_3[1][0]
__________________________________________________________________________________________________
concatenate_1 (Concatenate) (None, 64) 0 lstm_1[0][0]
lstm_1[1][0]
__________________________________________________________________________________________________
dense_1 (Dense) (None, 25) 1625 concatenate_1[0][0]
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 25) 100 dense_1[0][0]
__________________________________________________________________________________________________
dense_2 (Dense) (None, 25) 650 batch_normalization_4[0][0]
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 25) 100 dense_2[0][0]
__________________________________________________________________________________________________
dense_3 (Dense) (None, 1) 26 batch_normalization_5[0][0]
__________________________________________________________________________________________________
activation_1 (Activation) (None, 1) 0 dense_3[0][0]
==================================================================================================
Total params: 10,440
Trainable params: 10,310
Non-trainable params: 130
__________________________________________________________________________________________________
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
protein1 (InputLayer) (None, None, 20) 0
__________________________________________________________________________________________________
protein2 (InputLayer) (None, None, 20) 0
__________________________________________________________________________________________________
conv1d_1 (Conv1D) (None, None, 5) 2005 protein1[0][0]
protein2[0][0]
__________________________________________________________________________________________________
max_pooling1d_1 (MaxPooling1D) (None, None, 5) 0 conv1d_1[0][0]
conv1d_1[1][0]
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, None, 5) 20 max_pooling1d_1[0][0]
max_pooling1d_1[1][0]
__________________________________________________________________________________________________
conv1d_2 (Conv1D) (None, None, 5) 505 batch_normalization_1[0][0]
batch_normalization_1[1][0]
__________________________________________________________________________________________________
max_pooling1d_2 (MaxPooling1D) (None, None, 5) 0 conv1d_2[0][0]
conv1d_2[1][0]
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, None, 5) 20 max_pooling1d_2[0][0]
max_pooling1d_2[1][0]
__________________________________________________________________________________________________
conv1d_3 (Conv1D) (None, None, 5) 505 batch_normalization_2[0][0]
batch_normalization_2[1][0]
__________________________________________________________________________________________________
max_pooling1d_3 (MaxPooling1D) (None, None, 5) 0 conv1d_3[0][0]
conv1d_3[1][0]
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, None, 5) 20 max_pooling1d_3[0][0]
max_pooling1d_3[1][0]
__________________________________________________________________________________________________
lstm_1 (LSTM) (None, 32) 4864 batch_normalization_3[0][0]
batch_normalization_3[1][0]
__________________________________________________________________________________________________
concatenate_1 (Concatenate) (None, 64) 0 lstm_1[0][0]
lstm_1[1][0]
__________________________________________________________________________________________________
dense_1 (Dense) (None, 25) 1625 concatenate_1[0][0]
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 25) 100 dense_1[0][0]
__________________________________________________________________________________________________
dense_2 (Dense) (None, 1) 26 batch_normalization_4[0][0]
__________________________________________________________________________________________________
activation_1 (Activation) (None, 1) 0 dense_2[0][0]
==================================================================================================
Total params: 9,690
Trainable params: 9,610
Non-trainable params: 80
__________________________________________________________________________________________________
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
protein1 (InputLayer) (None, None, 20) 0
__________________________________________________________________________________________________
protein2 (InputLayer) (None, None, 20) 0
__________________________________________________________________________________________________
conv1d_1 (Conv1D) (None, None, 5) 2005 protein1[0][0]
protein2[0][0]
__________________________________________________________________________________________________
max_pooling1d_1 (MaxPooling1D) (None, None, 5) 0 conv1d_1[0][0]
conv1d_1[1][0]
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, None, 5) 20 max_pooling1d_1[0][0]
max_pooling1d_1[1][0]
__________________________________________________________________________________________________
conv1d_2 (Conv1D) (None, None, 5) 505 batch_normalization_1[0][0]
batch_normalization_1[1][0]
__________________________________________________________________________________________________
max_pooling1d_2 (MaxPooling1D) (None, None, 5) 0 conv1d_2[0][0]
conv1d_2[1][0]
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, None, 5) 20 max_pooling1d_2[0][0]
max_pooling1d_2[1][0]
__________________________________________________________________________________________________
conv1d_3 (Conv1D) (None, None, 5) 505 batch_normalization_2[0][0]
batch_normalization_2[1][0]
__________________________________________________________________________________________________
max_pooling1d_3 (MaxPooling1D) (None, None, 5) 0 conv1d_3[0][0]
conv1d_3[1][0]
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, None, 5) 20 max_pooling1d_3[0][0]
max_pooling1d_3[1][0]
__________________________________________________________________________________________________
lstm_1 (LSTM) (None, 64) 17920 batch_normalization_3[0][0]
batch_normalization_3[1][0]
__________________________________________________________________________________________________
concatenate_1 (Concatenate) (None, 128) 0 lstm_1[0][0]
lstm_1[1][0]
__________________________________________________________________________________________________
dense_1 (Dense) (None, 25) 3225 concatenate_1[0][0]
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 25) 100 dense_1[0][0]
__________________________________________________________________________________________________
dense_2 (Dense) (None, 1) 26 batch_normalization_4[0][0]
__________________________________________________________________________________________________
activation_1 (Activation) (None, 1) 0 dense_2[0][0]
==================================================================================================
Total params: 24,346
Trainable params: 24,266
Non-trainable params: 80
__________________________________________________________________________________________________
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
protein1 (InputLayer) (None, None, 20) 0
__________________________________________________________________________________________________
protein2 (InputLayer) (None, None, 20) 0
__________________________________________________________________________________________________
conv1d_1 (Conv1D) (None, None, 5) 2005 protein1[0][0]
protein2[0][0]
__________________________________________________________________________________________________
max_pooling1d_1 (MaxPooling1D) (None, None, 5) 0 conv1d_1[0][0]
conv1d_1[1][0]
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, None, 5) 20 max_pooling1d_1[0][0]
max_pooling1d_1[1][0]
__________________________________________________________________________________________________
conv1d_2 (Conv1D) (None, None, 5) 505 batch_normalization_1[0][0]
batch_normalization_1[1][0]
__________________________________________________________________________________________________
max_pooling1d_2 (MaxPooling1D) (None, None, 5) 0 conv1d_2[0][0]
conv1d_2[1][0]
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, None, 5) 20 max_pooling1d_2[0][0]
max_pooling1d_2[1][0]
__________________________________________________________________________________________________
conv1d_3 (Conv1D) (None, None, 5) 505 batch_normalization_2[0][0]
batch_normalization_2[1][0]
__________________________________________________________________________________________________
max_pooling1d_3 (MaxPooling1D) (None, None, 5) 0 conv1d_3[0][0]
conv1d_3[1][0]
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, None, 5) 20 max_pooling1d_3[0][0]
max_pooling1d_3[1][0]
__________________________________________________________________________________________________
lstm_1 (LSTM) (None, 64) 17920 batch_normalization_3[0][0]
batch_normalization_3[1][0]
__________________________________________________________________________________________________
concatenate_1 (Concatenate) (None, 128) 0 lstm_1[0][0]
lstm_1[1][0]
__________________________________________________________________________________________________
dense_1 (Dense) (None, 25) 3225 concatenate_1[0][0]
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 25) 100 dense_1[0][0]
__________________________________________________________________________________________________
dense_2 (Dense) (None, 25) 650 batch_normalization_4[0][0]
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 25) 100 dense_2[0][0]
__________________________________________________________________________________________________
dense_3 (Dense) (None, 1) 26 batch_normalization_5[0][0]
__________________________________________________________________________________________________
activation_1 (Activation) (None, 1) 0 dense_3[0][0]
==================================================================================================
Total params: 25,096
Trainable params: 24,966
Non-trainable params: 130
__________________________________________________________________________________________________
File lstm32_3conv3_3dense_shared_2019-01-03_14:16_gpu-1-1_nadam_0.002_1024_300_mirror-double.txt
lstm32_3conv3_3dense_shared, epochs=300, batch=1024, optimizer=nadam, learning rate=0.002, patience=30
Number of training samples: 91036
Loss
0: train_loss=0.6039336638889244, val_loss=0.6273381451063493
1: train_loss=0.41170846658000426, val_loss=0.5016812535165768
2: train_loss=0.3607003718694456, val_loss=0.5301711452440054
3: train_loss=0.3390789602507218, val_loss=0.3850829799413948
4: train_loss=0.3260613660440173, val_loss=0.3582970602320191
5: train_loss=0.31654184167160604, val_loss=0.3864629761316329
6: train_loss=0.30750783664558257, val_loss=0.42712563624215016
7: train_loss=0.30207876725106536, val_loss=0.38379292039704976
8: train_loss=0.29541020426710796, val_loss=0.3652430268910986
9: train_loss=0.29186083338760244, val_loss=0.6023856339579713
10: train_loss=0.2701138617404814, val_loss=0.3934694708975986
11: train_loss=0.26223521872623573, val_loss=0.35725733974295276
12: train_loss=0.2599461512773767, val_loss=0.3845965575737208
13: train_loss=0.25777224427241896, val_loss=0.3279669900789235
14: train_loss=0.2560980514093256, val_loss=0.3255616294113747
15: train_loss=0.2546365893121773, val_loss=0.32440474425413773
16: train_loss=0.25303083952588173, val_loss=0.3213861753144456
17: train_loss=0.25229052538072255, val_loss=0.3754474795543345
18: train_loss=0.2507886689507724, val_loss=0.3200061753637673
19: train_loss=0.24950519872822688, val_loss=0.3874696589757324
20: train_loss=0.24816064191590537, val_loss=0.32231860274355756
21: train_loss=0.2467752906655358, val_loss=0.3317528529423019
22: train_loss=0.24657398291932944, val_loss=0.31534173814903044
23: train_loss=0.24456967330208362, val_loss=0.332342599451723
24: train_loss=0.24324068681123864, val_loss=0.3440952748036396
25: train_loss=0.24334608110781686, val_loss=0.3189489142657966
26: train_loss=0.24241342452735337, val_loss=0.3213114890073483
27: train_loss=0.2412438439027282, val_loss=0.38010728075163813
28: train_loss=0.2307073104180688, val_loss=0.3206881761036166
29: train_loss=0.2278157494760229, val_loss=0.33174907263233133
30: train_loss=0.22740931756307656, val_loss=0.3435221865532514
31: train_loss=0.22655404129715112, val_loss=0.3209876411774817
32: train_loss=0.2254060024174693, val_loss=0.32638889079797023
33: train_loss=0.22124628920070924, val_loss=0.31979566167664114
34: train_loss=0.2207155048243531, val_loss=0.320847476346577
35: train_loss=0.22077053758414403, val_loss=0.3186844421085045
36: train_loss=0.2202299660047751, val_loss=0.3242415607299649
37: train_loss=0.21950386047462803, val_loss=0.32280963989099504
38: train_loss=0.21803327770401604, val_loss=0.31967152219742906
39: train_loss=0.21801988621724763, val_loss=0.3199921718372758
40: train_loss=0.2182475784816821, val_loss=0.3198738051198605
41: train_loss=0.21824278588155446, val_loss=0.3203683970271159
42: train_loss=0.2177342780916839, val_loss=0.32027969765182723
43: train_loss=0.21746683113067114, val_loss=0.3201550900526243
44: train_loss=0.2173610409905304, val_loss=0.32032974453500873
45: train_loss=0.21730611181931178, val_loss=0.3207389225096926
46: train_loss=0.2172060294760264, val_loss=0.3204191847122442
47: train_loss=0.2169257669369446, val_loss=0.32045806566658125
48: train_loss=0.21713739978279253, val_loss=0.320483356561734
49: train_loss=0.2171480797191586, val_loss=0.3204245232409064
50: train_loss=0.2172945270683839, val_loss=0.32055589457559486
51: train_loss=0.21746991524611392, val_loss=0.3205040182978482
52: train_loss=0.21684331344452445, val_loss=0.3204596007888305
53: train_loss=0.21735272372529493, val_loss=0.32072337735524736
54: train_loss=0.2167686520501093, val_loss=0.3205913948131984
55: train_loss=0.21696590877104646, val_loss=0.32051483797546615
56: train_loss=0.21674755941879206, val_loss=0.32071883082580477
57: train_loss=0.21689422924112964, val_loss=0.32053959354884565
58: train_loss=0.21683800103160616, val_loss=0.32040061033021805
59: train_loss=0.2170616070413334, val_loss=0.3205232015280233
60: train_loss=0.217077007497822, val_loss=0.3206612102683784
61: train_loss=0.21704670339606705, val_loss=0.32042804370398487
///////////////////////////////////////////
Accuracy
0: train_acc=0.6809943319074121, val_acc=0.7446825522223454
1: train_acc=0.8242783072810951, val_acc=0.8216056293170013
2: train_acc=0.852948284014556, val_acc=0.8171277786853045
3: train_acc=0.8635704558979591, val_acc=0.8474332316959264
4: train_acc=0.8693703589919602, val_acc=0.8505517356723247
5: train_acc=0.8739949029083542, val_acc=0.8495921959462174
6: train_acc=0.87911375708496, val_acc=0.8364784906487379
7: train_acc=0.8813326595940936, val_acc=0.8475931557906679
8: train_acc=0.8840019334111074, val_acc=0.8516711978441083
9: train_acc=0.8853969858418587, val_acc=0.7778666244112891
10: train_acc=0.8975240562574857, val_acc=0.8361586442989635
11: train_acc=0.9014565665936961, val_acc=0.8555893168418136
12: train_acc=0.9020826927392731, val_acc=0.8357588354728708
13: train_acc=0.9031152513608702, val_acc=0.8754997604964314
14: train_acc=0.9045432573792868, val_acc=0.8777386850783029
15: train_acc=0.9053671077928945, val_acc=0.8766192224203787
16: train_acc=0.9058394482351679, val_acc=0.8771789544833177
17: train_acc=0.9057296013401537, val_acc=0.8431153043678011
18: train_acc=0.9067511751758722, val_acc=0.8801375336309977
19: train_acc=0.9063227736833116, val_acc=0.8379177999042728
20: train_acc=0.9076189637404135, val_acc=0.8802174955544502
21: train_acc=0.9085965994225795, val_acc=0.8770190310653603
22: train_acc=0.9091787862282205, val_acc=0.8790980333254735
23: train_acc=0.9100685442257149, val_acc=0.8662242119681753
24: train_acc=0.9105738390282434, val_acc=0.8618263238699898
25: train_acc=0.910694670468193, val_acc=0.8797377262728587
26: train_acc=0.9113867041217071, val_acc=0.8796577647783538
27: train_acc=0.911309811487428, val_acc=0.8431153043678011
28: train_acc=0.9180104576031266, val_acc=0.8798176875862526
29: train_acc=0.9194824024769735, val_acc=0.8715816410125856
30: train_acc=0.9196801262604991, val_acc=0.8645450188963771
31: train_acc=0.9197570194028541, val_acc=0.8847753078045701
32: train_acc=0.9208225315597377, val_acc=0.8758995679784884
33: train_acc=0.9228986333714381, val_acc=0.8789381100886272
34: train_acc=0.9233490050145436, val_acc=0.8803774189152146
35: train_acc=0.9233050662560143, val_acc=0.8846153846916419
36: train_acc=0.923568698346256, val_acc=0.8754997601914021
37: train_acc=0.924008084745167, val_acc=0.8776587238888273
38: train_acc=0.9246781491176269, val_acc=0.8834159602913765
39: train_acc=0.9246012565566784, val_acc=0.8822964980623997
40: train_acc=0.9246891340737372, val_acc=0.8822964974523411
41: train_acc=0.9240959619715227, val_acc=0.8840556532387617
42: train_acc=0.9249198119006251, val_acc=0.8839756913153092
43: train_acc=0.9254800295954435, val_acc=0.8822964977573704
44: train_acc=0.9249967048020369, val_acc=0.8826963061545156
45: train_acc=0.9249637507089146, val_acc=0.8807772270073304
46: train_acc=0.9253811681384262, val_acc=0.8815768428865326
47: train_acc=0.9250626126242478, val_acc=0.8819766512836777
48: train_acc=0.9255788916574379, val_acc=0.8817367661233789
49: train_acc=0.9252383672471908, val_acc=0.8821365745205241
50: train_acc=0.9247989805575765, val_acc=0.8816568045049558
51: train_acc=0.9250296585075549, val_acc=0.8818167280468314
52: train_acc=0.9253042750222605, val_acc=0.8821365745205241
53: train_acc=0.9250406429791598, val_acc=0.8814169199547157
54: train_acc=0.9255898767078303, val_acc=0.8843754997124543
55: train_acc=0.9251724592311776, val_acc=0.882856228657385
56: train_acc=0.9253811678241524, val_acc=0.8813369583362926
57: train_acc=0.9250296586280266, val_acc=0.8819766512836777
58: train_acc=0.9255569228111291, val_acc=0.8824564209942167
59: train_acc=0.9250076895617261, val_acc=0.8839756916203385
60: train_acc=0.9253152599050404, val_acc=0.8817367661233789
61: train_acc=0.9253701834494485, val_acc=0.8823764593757936
///////////////////////////////////////////
Validation metrics
Number of 0 predicted: 6485
Number of 1 predicted: 6021
Validation precision: 0.8733180177223498
Validation recall: 0.8839063278525162
Validation F1-score: 0.8785802723895997
File lstm32_3conv4_2dense_shared_2019-01-03_14:19_gpu-3-1_nadam_0.002_1024_300_mirror-double.txt
lstm32_3conv4_2dense_shared, epochs=300, batch=1024, optimizer=nadam, learning rate=0.002, patience=30
Number of training samples: 91036
Loss
0: train_loss=0.540706068785435, val_loss=1.012402835396334
1: train_loss=0.397409923211296, val_loss=0.4587385101154787
2: train_loss=0.35769293991305967, val_loss=0.6782202844655592
3: train_loss=0.33677666748867463, val_loss=0.610840970680997
4: train_loss=0.3237247462156677, val_loss=0.4034239396693591
5: train_loss=0.31464631682964606, val_loss=0.4129713659722691
6: train_loss=0.30862561815929695, val_loss=0.37640536830422594
7: train_loss=0.3033370911326024, val_loss=0.36013990326078726
8: train_loss=0.29860609831759294, val_loss=0.3957767732692836
9: train_loss=0.2940380821732332, val_loss=0.5257355810813135
10: train_loss=0.29145091725462186, val_loss=0.3513203753712805
11: train_loss=0.2865272850928279, val_loss=0.4092977430159695
12: train_loss=0.2841496652684504, val_loss=0.39472373137088196
13: train_loss=0.28179327168889556, val_loss=0.38427407385424844
14: train_loss=0.2804572769787762, val_loss=0.34485204716348733
15: train_loss=0.2764784047426985, val_loss=0.3352301340474523
16: train_loss=0.27359566557747317, val_loss=0.3622937026537458
17: train_loss=0.27211200099370825, val_loss=0.348379367011348
18: train_loss=0.2698206067245022, val_loss=0.4319396269296658
19: train_loss=0.268365480233917, val_loss=0.33695861215345885
20: train_loss=0.26589450816696664, val_loss=0.34522944650191717
21: train_loss=0.2462007520421066, val_loss=0.3217131851263929
22: train_loss=0.23912904365231688, val_loss=0.33289574094053537
23: train_loss=0.23809885349954713, val_loss=0.3254096528663266
24: train_loss=0.23664912401849406, val_loss=0.3160373292149648
25: train_loss=0.23525202229407546, val_loss=0.3529351597239871
26: train_loss=0.234264134690267, val_loss=0.33024803743388353
27: train_loss=0.23261836508732645, val_loss=0.31398244519319873
28: train_loss=0.2319036264207735, val_loss=0.3237813672751746
29: train_loss=0.23110376423547838, val_loss=0.31227173021978594
30: train_loss=0.23074354099428052, val_loss=0.3078861051648212
31: train_loss=0.22991192900716506, val_loss=0.31121736637653397
32: train_loss=0.2292798466810553, val_loss=0.3218274454765085
33: train_loss=0.22803459876979756, val_loss=0.31800182822626455
34: train_loss=0.22709858533530397, val_loss=0.3095262022747072
35: train_loss=0.2273507057066107, val_loss=0.30917397581366146
36: train_loss=0.21668430954562304, val_loss=0.3050325785029436
37: train_loss=0.21408424495254796, val_loss=0.3118411808533991
38: train_loss=0.21405949212489267, val_loss=0.30490872564171667
39: train_loss=0.21296010041581007, val_loss=0.30476488679880986
40: train_loss=0.21269247030126562, val_loss=0.30398779001290105
41: train_loss=0.21245714470012791, val_loss=0.3023668414932927
42: train_loss=0.2118234015253842, val_loss=0.30899348997524523
43: train_loss=0.212146253383685, val_loss=0.3075811568401574
44: train_loss=0.21136765213511133, val_loss=0.31090247517163594
45: train_loss=0.21156837547674995, val_loss=0.30627659466025275
46: train_loss=0.2109655449105103, val_loss=0.3049920093969366
47: train_loss=0.20673600074523016, val_loss=0.3051556218638909
48: train_loss=0.2059053069934772, val_loss=0.30310122471188955
49: train_loss=0.2057941625097461, val_loss=0.3050608505311249
50: train_loss=0.20600238768087184, val_loss=0.3042892423006662
51: train_loss=0.2052230535196113, val_loss=0.30743451223227763
52: train_loss=0.2044975384811226, val_loss=0.3045583028574476
53: train_loss=0.2044774793594875, val_loss=0.3042504315698088
54: train_loss=0.2039413015838195, val_loss=0.30432385138544144
55: train_loss=0.20409222590668746, val_loss=0.30443052276694904
56: train_loss=0.2040319830163501, val_loss=0.3044096794954666
57: train_loss=0.20344605906508637, val_loss=0.30434476900783214
58: train_loss=0.20361539546019417, val_loss=0.30426929719292145
59: train_loss=0.20379730307567182, val_loss=0.3042271177939905
60: train_loss=0.203617948818916, val_loss=0.3045275818104365
61: train_loss=0.20372611566447663, val_loss=0.30436118954890024
62: train_loss=0.20324618526183164, val_loss=0.30451253917757537
63: train_loss=0.20320838866844984, val_loss=0.3043856308800725
64: train_loss=0.20331009581381923, val_loss=0.30448396928821697
65: train_loss=0.2033669164779028, val_loss=0.304381954351578
66: train_loss=0.20354686546181117, val_loss=0.3045264283752472
67: train_loss=0.20371558377267585, val_loss=0.30447255267765966
68: train_loss=0.20333858683336714, val_loss=0.30445448260449914
69: train_loss=0.2032762206503228, val_loss=0.30445426712984774
70: train_loss=0.20307337417180468, val_loss=0.30455477663217384
71: train_loss=0.20315873982868735, val_loss=0.30434536538790574
///////////////////////////////////////////
Accuracy
0: train_acc=0.7209455602365668, val_acc=0.5755637295242693
1: train_acc=0.8318137877739304, val_acc=0.7908204067197586
2: train_acc=0.8533437320923019, val_acc=0.6960658879827618
3: train_acc=0.8640647654772444, val_acc=0.7380457381410597
4: train_acc=0.8708423039129484, val_acc=0.8331201027988835
5: train_acc=0.8750604153821305, val_acc=0.8330401402653722
6: train_acc=0.8788720946162356, val_acc=0.8428754195125316
7: train_acc=0.880124346472644, val_acc=0.852230929659211
8: train_acc=0.8831670988873096, val_acc=0.8300815609937742
9: train_acc=0.8856276639123086, val_acc=0.7941787943313089
10: train_acc=0.8857155410679527, val_acc=0.8559091636777286
11: train_acc=0.8871435475708745, val_acc=0.8287222131755511
12: train_acc=0.8881980753766249, val_acc=0.8335998725094224
13: train_acc=0.8898457755491035, val_acc=0.8475931557334748
14: train_acc=0.8901643307751975, val_acc=0.857348473238293
15: train_acc=0.8923722483282208, val_acc=0.8687829843677751
16: train_acc=0.8937563159757121, val_acc=0.8534303530204702
17: train_acc=0.8936794236085658, val_acc=0.8569486642882821
18: train_acc=0.8948108441926373, val_acc=0.830241483925591
19: train_acc=0.8965134670897933, val_acc=0.8643850956595308
20: train_acc=0.8962827892838571, val_acc=0.8587877814548217
21: train_acc=0.9072345008151991, val_acc=0.8737406042810633
22: train_acc=0.9098818052018787, val_acc=0.8659843275990464
23: train_acc=0.9107605779997007, val_acc=0.8778986090491262
24: train_acc=0.9107495935752368, val_acc=0.8789381096596797
25: train_acc=0.9117821523906361, val_acc=0.8519110824515415
26: train_acc=0.9120238145450867, val_acc=0.8671837521804231
27: train_acc=0.9122654775716471, val_acc=0.8771789544833177
28: train_acc=0.913341974684641, val_acc=0.8711018711781284
29: train_acc=0.9129025882150185, val_acc=0.8754997604964314
30: train_acc=0.9135067445452757, val_acc=0.8782184552177894
31: train_acc=0.9127488029464602, val_acc=0.8774188393385872
32: train_acc=0.9135836373000265, val_acc=0.8698224855883873
33: train_acc=0.9140779474607182, val_acc=0.8721413722176294
34: train_acc=0.9152642908166082, val_acc=0.8782184552177894
35: train_acc=0.9140340087021889, val_acc=0.8811770346704987
36: train_acc=0.9197130805002826, val_acc=0.8807772273123597
37: train_acc=0.9211630565985324, val_acc=0.8745402210753536
38: train_acc=0.9207126850025679, val_acc=0.883415960720324
39: train_acc=0.9213168419613728, val_acc=0.8830961135126544
40: train_acc=0.9212399492328115, val_acc=0.8818966896652546
41: train_acc=0.9212838874596945, val_acc=0.8845354233782482
42: train_acc=0.9212838878472989, val_acc=0.8812569970228987
43: train_acc=0.9209213934986415, val_acc=0.8830161518942313
44: train_acc=0.9218001667102572, val_acc=0.8808571889307829
45: train_acc=0.9215145653834834, val_acc=0.8800575727465514
46: train_acc=0.9216134277099916, val_acc=0.8830961135126544
47: train_acc=0.9239751310160784, val_acc=0.8833359983679239
48: train_acc=0.9247220881904301, val_acc=0.8830161518942313
49: train_acc=0.9243486094932585, val_acc=0.8816568043810376
50: train_acc=0.9239531616119337, val_acc=0.8826963061545156
51: train_acc=0.9241618698434936, val_acc=0.8809371498152292
52: train_acc=0.9250955665968985, val_acc=0.8829361902758082
53: train_acc=0.9251504897537024, val_acc=0.8837358071940166
54: train_acc=0.9250406432934336, val_acc=0.8832560367495007
55: train_acc=0.9251724592783187, val_acc=0.8838157688124397
56: train_acc=0.9249967047758474, val_acc=0.8830161518942313
57: train_acc=0.9253262445704474, val_acc=0.8842955385229787
58: train_acc=0.9254251068498146, val_acc=0.8835758832231934
59: train_acc=0.9251065514561078, val_acc=0.8838957304308629
60: train_acc=0.9255898767340198, val_acc=0.882856228657385
61: train_acc=0.9256557843388581, val_acc=0.8834959223387471
62: train_acc=0.9257436620497191, val_acc=0.883415959986347
63: train_acc=0.9252932902835229, val_acc=0.8841356152861324
64: train_acc=0.9254141219172748, val_acc=0.8834959216047702
65: train_acc=0.9254580605789031, val_acc=0.8838957304308629
66: train_acc=0.9251614745395811, val_acc=0.8839756913153092
67: train_acc=0.9253372292594251, val_acc=0.8836558448416165
68: train_acc=0.9258535083161857, val_acc=0.8837358064600397
69: train_acc=0.925721692669145, val_acc=0.8838957304308629
70: train_acc=0.9256008611322942, val_acc=0.883415959986347
71: train_acc=0.9258205546080487, val_acc=0.884455461759825
///////////////////////////////////////////
Validation metrics
Number of 0 predicted: 6517
Number of 1 predicted: 5989
Validation precision: 0.8728257302264523
Validation recall: 0.8881282350976791
Validation F1-score: 0.8804104940825954
File lstm64_3conv3_2dense_shared_2019-01-03_14:05_gpu-0-1_nadam_0.002_1024_300_mirror-double.txt
lstm64_3conv3_2dense_shared, epochs=300, batch=1024, optimizer=nadam, learning rate=0.002, patience=30
Number of training samples: 91036
Loss
0: train_loss=0.5598276521635891, val_loss=3.6223008606732416
1: train_loss=0.39814387729022427, val_loss=0.9746763653208422
2: train_loss=0.35994307381432455, val_loss=1.4511137930618254
3: train_loss=0.3390712891507481, val_loss=1.123965091768616
4: train_loss=0.3241116351512778, val_loss=1.0023232816162824
5: train_loss=0.3176022208770406, val_loss=0.6603359667449956
6: train_loss=0.30693594047094963, val_loss=1.0091243786524338
7: train_loss=0.3017313441512004, val_loss=1.0468030666020056
8: train_loss=0.2970293005833051, val_loss=1.3134075897933006
9: train_loss=0.29249740409811265, val_loss=1.0636314347364912
10: train_loss=0.28856905170246283, val_loss=0.7580047829159161
11: train_loss=0.2639896145219035, val_loss=0.7914771836852142
12: train_loss=0.2561728531918106, val_loss=1.0397672197369983
13: train_loss=0.25253071618774686, val_loss=0.6762389207237075
14: train_loss=0.25103316383829244, val_loss=0.8660456809867159
15: train_loss=0.24926900044702321, val_loss=0.723956717923224
16: train_loss=0.23787957282570021, val_loss=0.3161277543749215
17: train_loss=0.23494032855543837, val_loss=0.3112490950642291
18: train_loss=0.23364027859998707, val_loss=0.3198958601348022
19: train_loss=0.23295574701886756, val_loss=0.31704450159306985
20: train_loss=0.2318038955056514, val_loss=0.3156413483501491
21: train_loss=0.23124244829054702, val_loss=0.3195062053312249
22: train_loss=0.23014309913544617, val_loss=0.3176927847631946
23: train_loss=0.2254560434591467, val_loss=0.315604208231773
24: train_loss=0.22470003406802141, val_loss=0.3153240891879756
25: train_loss=0.22428052074654964, val_loss=0.3149345259408693
26: train_loss=0.2242646146320313, val_loss=0.31605022810619965
27: train_loss=0.2238685596189053, val_loss=0.3163682206733578
28: train_loss=0.22232424616983892, val_loss=0.3162072031579551
29: train_loss=0.22225306683548032, val_loss=0.3162627021646416
30: train_loss=0.2219797481019843, val_loss=0.3162584736473786
31: train_loss=0.22200062376129023, val_loss=0.31629264693097764
32: train_loss=0.22163542279763865, val_loss=0.3157223263311897
33: train_loss=0.22150309552398495, val_loss=0.3160493769884758
34: train_loss=0.2209954405566595, val_loss=0.31614531302821935
35: train_loss=0.2211001434220733, val_loss=0.3164487395041774
36: train_loss=0.22131289360817488, val_loss=0.3163346526442805
37: train_loss=0.22180279811769835, val_loss=0.3165078797131448
38: train_loss=0.2209589396545666, val_loss=0.31650429622813947
39: train_loss=0.22085488358099792, val_loss=0.31657334378618596
40: train_loss=0.22099626663712332, val_loss=0.31648031177376623
41: train_loss=0.22108017566637875, val_loss=0.3165688355236307
42: train_loss=0.2208708370670534, val_loss=0.31648326601654975
43: train_loss=0.22063234338829935, val_loss=0.31661002782369907
44: train_loss=0.22091900643803092, val_loss=0.31646203002928924
45: train_loss=0.22089913097865824, val_loss=0.31647516093825256
46: train_loss=0.22056073361014256, val_loss=0.3164807155849875
47: train_loss=0.22068494629569746, val_loss=0.316523175258246
48: train_loss=0.2209242222137205, val_loss=0.31643129192923003
49: train_loss=0.22065489750473827, val_loss=0.3165567576284697
50: train_loss=0.22104893211512516, val_loss=0.3164417333561345
51: train_loss=0.22073641148545975, val_loss=0.3164759232304715
52: train_loss=0.22081090687783575, val_loss=0.3165286467129872
53: train_loss=0.22119011288761092, val_loss=0.3165570976408924
54: train_loss=0.22092072145175398, val_loss=0.31646492986245933
55: train_loss=0.22077523875547642, val_loss=0.31652152014506946
56: train_loss=0.22093081080152477, val_loss=0.31646693738464277
57: train_loss=0.2207320540700815, val_loss=0.31643664112915404
58: train_loss=0.2208106055953307, val_loss=0.316489587068634
59: train_loss=0.2206688923048813, val_loss=0.3164707831377108
60: train_loss=0.2207044794140028, val_loss=0.3164777787336441
61: train_loss=0.22093188952014758, val_loss=0.3164279398518945
62: train_loss=0.2211018678888903, val_loss=0.31648062713123715
63: train_loss=0.22100850969606683, val_loss=0.316511006597784
64: train_loss=0.22061595880501803, val_loss=0.31650961138392647
65: train_loss=0.22097780216300136, val_loss=0.3165088234978372
66: train_loss=0.2210378787003605, val_loss=0.3164361038246899
///////////////////////////////////////////
Accuracy
0: train_acc=0.7094885538974585, val_acc=0.5127138969003344
1: train_acc=0.8316600025053723, val_acc=0.5187909806344713
2: train_acc=0.8532448701507791, val_acc=0.5522149371925438
3: train_acc=0.8631969769127031, val_acc=0.49728130497361267
4: train_acc=0.8700184542012186, val_acc=0.5762833837850483
5: train_acc=0.8738081638635664, val_acc=0.6713577486907084
6: train_acc=0.878158091194543, val_acc=0.6338557491069142
7: train_acc=0.8797288983006161, val_acc=0.5698864543111958
8: train_acc=0.8828705128479876, val_acc=0.567087798219251
9: train_acc=0.8849026757544973, val_acc=0.5505357423477625
10: train_acc=0.8854738785494682, val_acc=0.705341436453825
11: train_acc=0.8979085194734033, val_acc=0.6593635050121468
12: train_acc=0.9014345972864525, val_acc=0.620662081762064
13: train_acc=0.9029394964236536, val_acc=0.7054213977672187
14: train_acc=0.9030822972913184, val_acc=0.6561650405802498
15: train_acc=0.9033239596657606, val_acc=0.6765552531542367
16: train_acc=0.9104749768457774, val_acc=0.8786982244993808
17: train_acc=0.9115624587446499, val_acc=0.8805373420281428
18: train_acc=0.9110791337810117, val_acc=0.8823764596808229
19: train_acc=0.9113317807449115, val_acc=0.8790980325914965
20: train_acc=0.9125291091538129, val_acc=0.880137533936027
21: train_acc=0.912759787370924, val_acc=0.8845354226442712
22: train_acc=0.91290258828573, val_acc=0.8821365743966059
23: train_acc=0.9151654291186476, val_acc=0.8826163438021155
24: train_acc=0.9153411839825338, val_acc=0.8833359983679239
25: train_acc=0.915681708343021, val_acc=0.881416920259745
26: train_acc=0.9158464784210284, val_acc=0.8830961142466314
27: train_acc=0.9164066959691858, val_acc=0.8817367667334377
28: train_acc=0.9166922975840439, val_acc=0.8836558448416165
29: train_acc=0.9169669143423118, val_acc=0.8830961135126544
30: train_acc=0.9169559298209469, val_acc=0.8826963054205387
31: train_acc=0.9170657761371734, val_acc=0.8849352314703639
32: train_acc=0.9172964539143013, val_acc=0.8849352314703639
33: train_acc=0.9168021441909737, val_acc=0.8851751163256334
34: train_acc=0.9179884879816091, val_acc=0.8851751155916565
35: train_acc=0.9175930403647982, val_acc=0.8846953458811175
36: train_acc=0.9177248563523022, val_acc=0.8852550779440566
37: train_acc=0.9173184232477344, val_acc=0.8847753074995407
38: train_acc=0.9175820552175046, val_acc=0.8846953458811175
39: train_acc=0.9177248558913673, val_acc=0.884455461025848
40: train_acc=0.9177797795798176, val_acc=0.8847753074995407
41: train_acc=0.9180104576031266, val_acc=0.8845354226442712
42: train_acc=0.9171866072130892, val_acc=0.8842955377890018
43: train_acc=0.9179225799420255, val_acc=0.8845354226442712
44: train_acc=0.9178786415946709, val_acc=0.8851751155916565
45: train_acc=0.9179775033895325, val_acc=0.8851751155916565
46: train_acc=0.9178456875984496, val_acc=0.8848552691179639
47: train_acc=0.9179665186743655, val_acc=0.884935230736387
48: train_acc=0.9178347027392404, val_acc=0.8850951539732334
49: train_acc=0.9178237183645366, val_acc=0.884935230736387
50: train_acc=0.9178127333822367, val_acc=0.8850151923548102
51: train_acc=0.9176479634508904, val_acc=0.8845354233782482
52: train_acc=0.9178786417151426, val_acc=0.8850951539732334