Commit c01f6aee authored by Florian RICHOUX's avatar Florian RICHOUX

Last results

parent 186060a3
......@@ -189,14 +189,14 @@ if __name__ == '__main__':
if test_set:
#callbacks_list = []
callbacks_list = [ callbacks.ReduceLROnPlateau( monitor='loss', factor=0.5, patience=5, min_lr=0.0003, cooldown=1, verbose=1 ) ]
callbacks_list = [ callbacks.ReduceLROnPlateau( monitor='loss', factor=0.9, patience=5, min_lr=0.0008, cooldown=1, verbose=1 ) ]
callbacks_list.append( callbacks.EarlyStopping(monitor='acc', patience=patience, verbose=1 ) )
if save_weights:
callbacks_list.append( callbacks.ModelCheckpoint( filepath='weights/' + file_name + '.h5',
monitor='loss', save_best_only=True, verbose=1) )
else:
#callbacks_list = []
callbacks_list = [ callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.5, patience=5, min_lr=0.0003, cooldown=1, verbose=1 ) ]
callbacks_list = [ callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.9, patience=5, min_lr=0.0008, cooldown=1, verbose=1 ) ]
callbacks_list.append( callbacks.EarlyStopping(monitor='val_acc', patience=patience, verbose=1) )
#callbacks_list = [ callbacks.EarlyStopping(monitor='val_loss', patience=patience,) ]
if save_weights:
......
File lstm32_3conv3_2dense_shared_2019-01-04_02:54_gpu-2-1_adam_0.002_1024_300_mirror-double.txt
lstm32_3conv3_2dense_shared, epochs=300, batch=1024, optimizer=adam, learning rate=0.002, patience=10
Number of training samples: 91036
Loss
0: train_loss=0.5885286999859737, val_loss=0.6154925547446057
1: train_loss=0.4027363874891939, val_loss=0.9512303109438195
2: train_loss=0.3595120599993849, val_loss=0.4653214673109099
3: train_loss=0.33830590912587477, val_loss=0.6638718610970512
4: train_loss=0.32599860647660295, val_loss=0.4116446550047228
5: train_loss=0.3161366324893221, val_loss=0.4325444793083487
6: train_loss=0.3080826078322772, val_loss=0.3598606665954578
7: train_loss=0.3012759554595131, val_loss=0.3558256012924801
8: train_loss=0.29635871098337896, val_loss=0.40121119235508657
9: train_loss=0.29170714336888504, val_loss=0.34488259370987
10: train_loss=0.2857905901522276, val_loss=0.3734693540042445
11: train_loss=0.28399176681192045, val_loss=0.3352573125024576
12: train_loss=0.28251170255271607, val_loss=0.3589011989897315
13: train_loss=0.2763605011573308, val_loss=0.33956361231604865
14: train_loss=0.2755154468863253, val_loss=0.3380558968314777
15: train_loss=0.27263037797866135, val_loss=0.371787341789076
16: train_loss=0.2707374692565554, val_loss=0.3362676775657557
17: train_loss=0.2543421921348618, val_loss=0.3126062105869877
18: train_loss=0.2493016455867409, val_loss=0.3846107774851857
19: train_loss=0.24829999098742572, val_loss=0.3147588491763913
20: train_loss=0.24579898191805732, val_loss=0.4020364743822883
21: train_loss=0.24521192457435095, val_loss=0.3132884950375111
22: train_loss=0.24398135479595776, val_loss=0.31585469918691045
23: train_loss=0.2341294334512681, val_loss=0.3181782974093776
24: train_loss=0.23133271290828644, val_loss=0.30342814784783584
25: train_loss=0.23039746648584153, val_loss=0.31145713988655227
26: train_loss=0.23050446457988355, val_loss=0.3009875660159579
27: train_loss=0.2280946879154287, val_loss=0.3176396800273326
28: train_loss=0.22876980961000276, val_loss=0.2954558077219599
29: train_loss=0.2277072578540266, val_loss=0.2956306379393885
30: train_loss=0.22658980950262045, val_loss=0.29892994727723105
31: train_loss=0.22570338022313954, val_loss=0.29732988918473885
32: train_loss=0.22613922559899152, val_loss=0.3180553596219539
33: train_loss=0.22502286819407788, val_loss=0.3058977581092344
34: train_loss=0.2183941429941436, val_loss=0.29933012160952904
35: train_loss=0.21732162777731387, val_loss=0.31140872818955073
36: train_loss=0.21672284170992423, val_loss=0.2959829348526858
37: train_loss=0.21684105791587907, val_loss=0.2965661634755253
38: train_loss=0.21581031962573788, val_loss=0.29852897480223134
39: train_loss=0.21227001352479155, val_loss=0.3007199825448226
40: train_loss=0.21177652348711085, val_loss=0.29460831204071514
41: train_loss=0.21121445031193103, val_loss=0.29369834509159726
42: train_loss=0.21117714229056012, val_loss=0.29448551791735733
43: train_loss=0.21085494990636167, val_loss=0.29595839204205604
44: train_loss=0.21041300448001524, val_loss=0.2963680713229726
45: train_loss=0.21004706971323434, val_loss=0.2972312726422384
46: train_loss=0.2102985677222459, val_loss=0.29446672075903935
47: train_loss=0.2083883184876277, val_loss=0.2939011870423031
48: train_loss=0.20760304455027187, val_loss=0.2969445344802381
49: train_loss=0.2073733599267969, val_loss=0.29529532596204405
50: train_loss=0.20740291541396033, val_loss=0.2951782221662966
51: train_loss=0.2070943440372325, val_loss=0.2952990284513973
52: train_loss=0.206350079129312, val_loss=0.2956064274296577
///////////////////////////////////////////
Accuracy
0: train_acc=0.688288149834622, val_acc=0.6846313764910573
1: train_acc=0.8315501559508214, val_acc=0.6664001280434433
2: train_acc=0.8534316094391291, val_acc=0.8148888531883449
3: train_acc=0.86465793767636, val_acc=0.710139133254185
4: train_acc=0.8701832240854239, val_acc=0.8197665122839121
5: train_acc=0.8741377036817369, val_acc=0.82440428608573
6: train_acc=0.87850960058709, val_acc=0.8539101234649861
7: train_acc=0.88070653368946, val_acc=0.855509355652338
8: train_acc=0.8828595283790016, val_acc=0.8272829044728818
9: train_acc=0.8849905531510847, val_acc=0.8628658244233502
10: train_acc=0.8877257347405494, val_acc=0.8483128094413882
11: train_acc=0.888857155518423, val_acc=0.8657444424387476
12: train_acc=0.8893294957433237, val_acc=0.852790660073085
13: train_acc=0.8914165823644733, val_acc=0.8647049413992465
14: train_acc=0.8919877847954104, val_acc=0.8651047496152805
15: train_acc=0.8927237576212477, val_acc=0.8432752276046475
16: train_acc=0.8944263806388754, val_acc=0.8723812570728989
17: train_acc=0.9025660175824397, val_acc=0.8715016792702442
18: train_acc=0.9046421194412811, val_acc=0.8430353431783255
19: train_acc=0.9056087701909074, val_acc=0.8770190306364127
20: train_acc=0.9064985278715091, val_acc=0.8225651691670267
21: train_acc=0.9065534513687762, val_acc=0.8756596831232191
22: train_acc=0.9072674548114203, val_acc=0.8735806816542757
23: train_acc=0.911815106410428, val_acc=0.8774988005280628
24: train_acc=0.9139681002985715, val_acc=0.8824564211753279
25: train_acc=0.9141438551624576, val_acc=0.8765392615359324
26: train_acc=0.9138912080518966, val_acc=0.8846153848727532
27: train_acc=0.9153301989059519, val_acc=0.8790980325914965
28: train_acc=0.914748011639376, val_acc=0.8890132734570793
29: train_acc=0.9151544444270511, val_acc=0.8889333119625743
30: train_acc=0.9162089720154286, val_acc=0.8892531581312375
31: train_acc=0.9169010061063071, val_acc=0.888293619015189
32: train_acc=0.9164066960425163, val_acc=0.8777386856883617
33: train_acc=0.9169559298209469, val_acc=0.883335999225819
34: train_acc=0.9208774549836742, val_acc=0.8870142331204185
35: train_acc=0.9215585046265182, val_acc=0.8795778021781173
36: train_acc=0.9215914583556069, val_acc=0.8882936188912708
37: train_acc=0.9209982863764828, val_acc=0.8896529665283827
38: train_acc=0.9218660752343463, val_acc=0.8875739642015442
39: train_acc=0.9241179316166106, val_acc=0.8862146172412163
40: train_acc=0.9243486093963573, val_acc=0.8899728125731278
41: train_acc=0.9242497471667503, val_acc=0.8942107786545844
42: train_acc=0.9247660265613552, val_acc=0.8943707025014894
43: train_acc=0.9241618701341968, val_acc=0.8909323519942056
44: train_acc=0.9245792875637084, val_acc=0.8927714701330264
45: train_acc=0.9253042751427322, val_acc=0.8886934275934454
46: train_acc=0.9248648888171518, val_acc=0.8914121223148033
47: train_acc=0.9264137261786168, val_acc=0.8931712779201127
48: train_acc=0.9259413856132529, val_acc=0.8890932356855611
49: train_acc=0.9265125884815545, val_acc=0.8915720455516496
50: train_acc=0.9269409901914878, val_acc=0.8930913166067189
51: train_acc=0.9267981900257011, val_acc=0.8916520074751021
52: train_acc=0.9275451468857789, val_acc=0.8933312014619884
///////////////////////////////////////////
Validation metrics
Number of 0 predicted: 6640
Number of 1 predicted: 5866
Validation precision: 0.871841155234657
Validation recall: 0.905727923627685
Validation F1-score: 0.8884615384615384
File lstm32_3conv3_2dense_shared_2019-01-04_03:10_gpu-2-1_adam_0.001_1024_300_mirror-double_without-plateau.txt
lstm32_3conv3_2dense_shared, epochs=300, batch=1024, optimizer=adam, learning rate=0.001, patience=10
Number of training samples: 91036
Loss
0: train_loss=0.6621972043126961, val_loss=1.2100498182343118
1: train_loss=0.46967956728081256, val_loss=0.4804315954689863
2: train_loss=0.38243821032583947, val_loss=0.4319734521652248
3: train_loss=0.3555133739317169, val_loss=0.4679481047173605
4: train_loss=0.3396266147234385, val_loss=0.3729723867236872
5: train_loss=0.33026132715901246, val_loss=0.3905164589457525
6: train_loss=0.32027716185596244, val_loss=0.38997876368654877
7: train_loss=0.3137813354581902, val_loss=0.3584065768845688
8: train_loss=0.30798197180599735, val_loss=0.35529606103630196
9: train_loss=0.30173858287590155, val_loss=0.366284138700132
10: train_loss=0.29880862899088284, val_loss=0.36162536687685476
11: train_loss=0.29580603168380404, val_loss=0.3737135404408426
12: train_loss=0.2928701788953069, val_loss=0.33768885224871725
13: train_loss=0.28846538519287335, val_loss=0.4073143110510904
14: train_loss=0.2870450377335966, val_loss=0.34447195799507446
15: train_loss=0.283791245563393, val_loss=0.33084667946707813
16: train_loss=0.2814367565262679, val_loss=0.347865950402576
17: train_loss=0.28054741935317445, val_loss=0.33140444192679697
18: train_loss=0.2765060409984059, val_loss=0.339317726722893
19: train_loss=0.27585279037366933, val_loss=0.3772429551446074
20: train_loss=0.27262103037332586, val_loss=0.3546922607325028
21: train_loss=0.2712882637671183, val_loss=0.33853379565392533
22: train_loss=0.26952599190210563, val_loss=0.32809665173830727
23: train_loss=0.26657230694854744, val_loss=0.3229723312598161
24: train_loss=0.26636502537467976, val_loss=0.33547435541944504
25: train_loss=0.26428056923700627, val_loss=0.351044250401081
26: train_loss=0.26398380981675823, val_loss=0.3225776283822593
27: train_loss=0.262213282639042, val_loss=0.32133176131151625
///////////////////////////////////////////
Accuracy
0: train_acc=0.6236873323450483, val_acc=0.5257476407033089
1: train_acc=0.7843929872586567, val_acc=0.7833040145879819
2: train_acc=0.8417329409508081, val_acc=0.8044938428600598
3: train_acc=0.8558592203474352, val_acc=0.7916200225989608
4: train_acc=0.8631530383715463, val_acc=0.8434351508414938
5: train_acc=0.8685245399022753, val_acc=0.8318407159890249
6: train_acc=0.8715343380771573, val_acc=0.8421557652517526
7: train_acc=0.8750604157225937, val_acc=0.8533503917070764
8: train_acc=0.8772134099459626, val_acc=0.8560690864284344
9: train_acc=0.8800474540321671, val_acc=0.8491923879780198
10: train_acc=0.8829913443350782, val_acc=0.8562290098463919
11: train_acc=0.8839250406799273, val_acc=0.8431952662912536
12: train_acc=0.8842326114317974, val_acc=0.8605469370601312
13: train_acc=0.8869787776369092, val_acc=0.8281624819705072
14: train_acc=0.887099608715444, val_acc=0.857428433941628
15: train_acc=0.8885276153126481, val_acc=0.8656644809442426
16: train_acc=0.8896041124020715, val_acc=0.8587078207514867
17: train_acc=0.8894393428583295, val_acc=0.8692627536493664
18: train_acc=0.8911309810638889, val_acc=0.8616663997180554
19: train_acc=0.8912298428587505, val_acc=0.8393571090358897
20: train_acc=0.8933718528540766, val_acc=0.8596673599914534
21: train_acc=0.894459334512006, val_acc=0.8684631378940825
22: train_acc=0.8941737334549839, val_acc=0.8679034061361728
23: train_acc=0.8950634915703309, val_acc=0.869022868794097
24: train_acc=0.8953381078676639, val_acc=0.8637454017970575
25: train_acc=0.8966233136207736, val_acc=0.8569486645361185
26: train_acc=0.8958214335803214, val_acc=0.8666240203653205
27: train_acc=0.8975130717623102, val_acc=0.8675035987208409
///////////////////////////////////////////
Validation metrics
Number of 0 predicted: 6447
Number of 1 predicted: 6059
Validation precision: 0.8611749261568756
Validation recall: 0.8661495296253507
Validation F1-score: 0.8636550645931045
File lstm32_3conv3_2dense_shared_2019-01-04_03:48_gpu-2-1_adam_0.002_1024_300_mirror-double.txt
lstm32_3conv3_2dense_shared, epochs=300, batch=1024, optimizer=adam, learning rate=0.002, patience=10
Number of training samples: 91036
Loss
0: train_loss=0.5921013652505761, val_loss=2.2464406868562343
1: train_loss=0.40590896845005314, val_loss=1.9764086339600693
2: train_loss=0.3631615540221169, val_loss=1.7334498346624003
3: train_loss=0.3382549509900416, val_loss=0.7333316202928557
4: train_loss=0.3263070829930585, val_loss=1.991849724243264
5: train_loss=0.3163740841946077, val_loss=1.8620599827994428
6: train_loss=0.3102656695845257, val_loss=0.49888201783547187
7: train_loss=0.3048958603343937, val_loss=0.7521477661742109
8: train_loss=0.29692151562427666, val_loss=0.360443313126943
9: train_loss=0.29124534504475097, val_loss=0.35366929880947034
10: train_loss=0.2893249388410806, val_loss=0.5543489066025132
11: train_loss=0.2848139834181905, val_loss=0.36476571547895476
12: train_loss=0.2809245757451873, val_loss=1.0038570070884407
13: train_loss=0.27879398672943917, val_loss=0.33545640464136356
14: train_loss=0.2750121281830968, val_loss=0.40225195916160744
15: train_loss=0.27248914308495387, val_loss=0.9415893567460605
16: train_loss=0.26938117419543556, val_loss=0.342087967093994
17: train_loss=0.2697448901695591, val_loss=0.4719999421029859
18: train_loss=0.26484179344997083, val_loss=0.4664249164703921
19: train_loss=0.24932199394699245, val_loss=0.37605386808474084
20: train_loss=0.24369765898324341, val_loss=0.552079412902086
21: train_loss=0.24361100891881074, val_loss=0.3499735429127579
22: train_loss=0.24133087508306755, val_loss=0.34718002495471334
23: train_loss=0.23844700103259903, val_loss=0.328943780621738
24: train_loss=0.23881028044768002, val_loss=0.3321157734374628
25: train_loss=0.2389647230382198, val_loss=0.3193102795401326
26: train_loss=0.23788250725515184, val_loss=0.3629160424661354
27: train_loss=0.23724537579572455, val_loss=0.4730293992832339
28: train_loss=0.23373836697947137, val_loss=0.6899830622042387
29: train_loss=0.23359117201150964, val_loss=0.30596411183855354
30: train_loss=0.2337391003773184, val_loss=0.30708683240552836
31: train_loss=0.2315825478766043, val_loss=0.4479179660758838
32: train_loss=0.23199406022781216, val_loss=0.31065418620443563
33: train_loss=0.22968121241656844, val_loss=0.37605953904298633
34: train_loss=0.22975721954906536, val_loss=0.4819465330051991
35: train_loss=0.22017089136442772, val_loss=0.4423965324778643
36: train_loss=0.21512713035777048, val_loss=0.3510244092797539
37: train_loss=0.2144585311638041, val_loss=0.3556953524086698
38: train_loss=0.21320801476462925, val_loss=0.30425933986668696
39: train_loss=0.2125027924841551, val_loss=0.3832450781071756
40: train_loss=0.21391636231273584, val_loss=0.4133679147541056
41: train_loss=0.21175788939876525, val_loss=0.30750796404739733
42: train_loss=0.21115466491519957, val_loss=0.30071040613830863
43: train_loss=0.21069161942692474, val_loss=0.2989862054495855
44: train_loss=0.21082051841276875, val_loss=0.4556543374803187
45: train_loss=0.2090666273676651, val_loss=0.3032538813056134
46: train_loss=0.20866852216911974, val_loss=0.34666311815226153
47: train_loss=0.20846847604888674, val_loss=0.2992089907779477
48: train_loss=0.20908041337346292, val_loss=0.3035134168009024
49: train_loss=0.20183114845619465, val_loss=0.3048422867377129
50: train_loss=0.2002237568309015, val_loss=0.3043474513074026
51: train_loss=0.20015729478291905, val_loss=0.3098965045711088
52: train_loss=0.19922447712272975, val_loss=0.2988176824387676
53: train_loss=0.19897722951166608, val_loss=0.29534203443225243
54: train_loss=0.1975906608863834, val_loss=0.31062013525667714
55: train_loss=0.19866455426583932, val_loss=0.3064167444446307
56: train_loss=0.1974330963935598, val_loss=0.2974526270301785
57: train_loss=0.19713480532787414, val_loss=0.3027408043184453
58: train_loss=0.19532792510406596, val_loss=0.31380692581434205
59: train_loss=0.19603507896237332, val_loss=0.303816743799158
60: train_loss=0.1956547173750443, val_loss=0.3143124228932277
61: train_loss=0.19500264403959786, val_loss=0.32443301717357675
62: train_loss=0.19505456606233637, val_loss=0.29883840148227414
63: train_loss=0.1943898201172096, val_loss=0.30625177333398035
///////////////////////////////////////////
Accuracy
0: train_acc=0.6823564303707503, val_acc=0.5163921322628877
1: train_acc=0.8281668790263887, val_acc=0.5151927070714523
2: train_acc=0.8513005843003933, val_acc=0.5245482173420497
3: train_acc=0.8635375014669924, val_acc=0.6911882301168444
4: train_acc=0.8693593740829909, val_acc=0.514553014124067
5: train_acc=0.874115734591866, val_acc=0.5177514791660226
6: train_acc=0.8762028206080386, val_acc=0.7871421715383166
7: train_acc=0.8794652663544165, val_acc=0.6996641607546109
8: train_acc=0.8828485436638345, val_acc=0.8563889335121858
9: train_acc=0.8861329584503232, val_acc=0.8558292023071419
10: train_acc=0.8864844679607229, val_acc=0.7582760276593115
11: train_acc=0.8879234585712159, val_acc=0.850791619612506
12: train_acc=0.8896041123785009, val_acc=0.60738845310382
13: train_acc=0.8912298430054116, val_acc=0.864065249004727
14: train_acc=0.8943604726202433, val_acc=0.8389573003337152
15: train_acc=0.8947119818896997, val_acc=0.6238605469851308
16: train_acc=0.8949316751009404, val_acc=0.8706221010386419
17: train_acc=0.8942066872102619, val_acc=0.8020949940023356
18: train_acc=0.8968430070991471, val_acc=0.8172877024178236
19: train_acc=0.9038622084383209, val_acc=0.8510315048967231
20: train_acc=0.9072894239746219, val_acc=0.7605949142885536
21: train_acc=0.9067951142486754, val_acc=0.8554293936049673
22: train_acc=0.9082231202906624, val_acc=0.8582280506787253
23: train_acc=0.9098708204893305, val_acc=0.873020949839173
24: train_acc=0.9094863569093791, val_acc=0.8664640972523924
25: train_acc=0.9087064458828483, val_acc=0.8749400289863581
26: train_acc=0.9104200533249397, val_acc=0.8504717732627316
27: train_acc=0.9100136207755889, val_acc=0.7980169519488952
28: train_acc=0.9117052593216116, val_acc=0.7192547572015591
29: train_acc=0.9114635969733589, val_acc=0.8806173038944023
30: train_acc=0.9124192625730726, val_acc=0.8805373424570904
31: train_acc=0.9127707719132405, val_acc=0.8072924995048703
32: train_acc=0.9128586490924552, val_acc=0.874460259094708
33: train_acc=0.9142646866521672, val_acc=0.8564688952545271
34: train_acc=0.9139790852546817, val_acc=0.8044938424883052
35: train_acc=0.9187244608545875, val_acc=0.8190468575941855
36: train_acc=0.9206907156481832, val_acc=0.8576683196547926
37: train_acc=0.9210092709449889, val_acc=0.8650247886069161
38: train_acc=0.9218880445887311, val_acc=0.8814968812681094
39: train_acc=0.9227009093679209, val_acc=0.8448744600970288
40: train_acc=0.9212289643264612, val_acc=0.8303214458490437
41: train_acc=0.9233050660857826, val_acc=0.8889333123915218
42: train_acc=0.9227887865707061, val_acc=0.8885335044233242
43: train_acc=0.922810756312695, val_acc=0.8878138495524864
44: train_acc=0.9222175844776133, val_acc=0.8126499278153035
45: train_acc=0.9236126367669412, val_acc=0.8794178794369437
46: train_acc=0.9224482621840294, val_acc=0.8619062850022724
47: train_acc=0.9239311920873173, val_acc=0.8870142333682548
48: train_acc=0.9235577136808489, val_acc=0.8883735804525009
49: train_acc=0.926919020787343, val_acc=0.8825363829843944
50: train_acc=0.927435300231708, val_acc=0.8891731972467912
51: train_acc=0.9275451467181662, val_acc=0.8762993767473882
52: train_acc=0.9282371809268973, val_acc=0.8888533508970168
53: train_acc=0.9280174874013828, val_acc=0.8917319684262736
54: train_acc=0.9287095214922612, val_acc=0.8814968812109165
55: train_acc=0.9279845335753931, val_acc=0.8832560371212553
56: train_acc=0.9289841378603058, val_acc=0.8890932354472569
57: train_acc=0.9283580120525732, val_acc=0.884215576418415
58: train_acc=0.9293576169686523, val_acc=0.8906924673867725
59: train_acc=0.9290610310262314, val_acc=0.8891731967606508
60: train_acc=0.9292038312182076, val_acc=0.8781384931132257
61: train_acc=0.9293356471769032, val_acc=0.8725411806814998
62: train_acc=0.9294015551457753, val_acc=0.8913321610681347
63: train_acc=0.9295992794609472, val_acc=0.8866943870184804
///////////////////////////////////////////
Validation metrics
Number of 0 predicted: 6671
Number of 1 predicted: 5835
Validation precision: 0.8624876928126025
Validation recall: 0.900771208226221
Validation F1-score: 0.8812138486042417
File lstm32_3conv3_2dense_shared_2019-01-04_05:58_gpu-2-1_adam_0.0012_1024_300_test-mirror-double_train-val.txt
lstm32_3conv3_2dense_shared, epochs=300, batch=1024, optimizer=adam, learning rate=0.0012, patience=300
Number of training samples: 103542
Test loss: 1.2903765810860528
Test accuracy: 0.7333333333333333
Number of 0 predicted: 572
Number of 1 predicted: 148
Test precision: 0.4153846153846154
Test recall: 0.7297297297297297
Test F1-score: 0.5294117647058824
File lstm32_3conv3_2dense_shared_2019-01-04_05:59_gpu-1-1_adam_0.0012_512_300_test-mirror-double_train-val.txt
lstm32_3conv3_2dense_shared, epochs=300, batch=512, optimizer=adam, learning rate=0.0012, patience=300
Number of training samples: 103542
Test loss: 1.2712968932257758
Test accuracy: 0.7361111111111112
Number of 0 predicted: 526
Number of 1 predicted: 194
Test precision: 0.5076923076923077
Test recall: 0.6804123711340206
Test F1-score: 0.5814977973568282
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment