Commit 52447dd5 authored by Florian RICHOUX's avatar Florian RICHOUX

Add conv only model

parent 2096e00e
......@@ -25,7 +25,7 @@ from models.fc2_2dense import FC2_2Dense
from models.fc2_100_2dense import FC2_100_2Dense
from models.fc2_20_2dense import FC2_20_2Dense
from models.fc2_2_2dense import FC2_2_2Dense
from models.3conv3_2dense_shared import 3Conv3_2Dense_S
from models.conv3_3_2dense_shared import Conv3_3_2Dense_S
import tensorflow as tf
from keras import callbacks
......@@ -106,8 +106,8 @@ def factory_model( model_name ):
return FC2_20_2Dense(), 'fc2_20_2dense'
elif model_name == 'fc2_2_2dense':
return FC2_2_2Dense(), 'fc2_2_2dense'
elif model_name == '3conv3_2dense_shared':
return 3Conv3_2Dense_S(), '3conv3_2dense_shared'
elif model_name == 'conv3_3_2dense_shared':
return Conv3_3_2Dense_S(), 'conv3_3_2dense_shared'
else:
print("Model unknown. Terminating.")
sys.exit(1)
......@@ -139,7 +139,7 @@ def make_parser():
parser.add_argument('-train', type=str, help='File containing the training set')
parser.add_argument('-val', type=str, help='File containing the validation set')
parser.add_argument('-test', type=str, help='File containing the test set')
parser.add_argument('-model', type=str, help='choose among: lstm32_3conv3_2dense, lstm32_2conv3_2dense_shared, lstm32_3conv3_2dense_shared, lstm32_2conv3_4dense_shared, lstm32_3conv4_2dense_shared, lstm64_3conv3_2dense_shared, lstm64drop_3conv3_3dense_shared, lstm64x2_3conv3_10dense_shared, lstm64x2_embed2_10dense_shared, lstm64x2_embed4_10dense_shared, fc6_embed3_2dense, fc2_2dense, fc2_100_2dense, fc2_20_2dense, fc2_2_2dense', '3conv3_2dense_shared')
parser.add_argument('-model', type=str, help='choose among: lstm32_3conv3_2dense, lstm32_2conv3_2dense_shared, lstm32_3conv3_2dense_shared, lstm32_2conv3_4dense_shared, lstm32_3conv4_2dense_shared, lstm64_3conv3_2dense_shared, lstm64drop_3conv3_3dense_shared, lstm64x2_3conv3_10dense_shared, lstm64x2_embed2_10dense_shared, lstm64x2_embed4_10dense_shared, fc6_embed3_2dense, fc2_2dense, fc2_100_2dense, fc2_20_2dense, fc2_2_2dense, conv3_3_2dense_shared')
parser.add_argument('-epochs', type=int, default=50, help='Number of epochs [default: 50]')
parser.add_argument('-batch', type=int, default=64, help='Batch size [default: 64]')
parser.add_argument('-patience', type=int, default=0, help='Number of epochs before triggering the early stopping criterion [default: infinite patience]')
......
......@@ -6,7 +6,7 @@ import tensorflow as tf
from models.abstract_model import AbstractModel
class 3Conv3_2Dense_S(AbstractModel):
class Conv3_3_2Dense_S(AbstractModel):
def __init__(self):
super().__init__()
......@@ -21,7 +21,7 @@ class 3Conv3_2Dense_S(AbstractModel):
pool3 = layers.MaxPooling1D(3)
batchnorm3 = layers.BatchNormalization()
input1 = Input(shape=(None,24,), dtype=np.float32, name='protein1')
input1 = Input(shape=(1166,24,), dtype=np.float32, name='protein1')
protein1 = conv1(input1)
protein1 = pool1(protein1)
protein1 = batchnorm1(protein1)
......@@ -32,7 +32,7 @@ class 3Conv3_2Dense_S(AbstractModel):
protein1 = pool3(protein1)
protein1 = batchnorm3(protein1)
input2 = Input(shape=(None,24,), dtype=np.float32, name='protein2')
input2 = Input(shape=(1166,24,), dtype=np.float32, name='protein2')
protein2 = conv1(input2)
protein2 = pool1(protein2)
protein2 = batchnorm1(protein2)
......
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
protein1 (InputLayer) (None, 1166, 24) 0
__________________________________________________________________________________________________
protein2 (InputLayer) (None, 1166, 24) 0
__________________________________________________________________________________________________
conv1d_1 (Conv1D) (None, 1147, 5) 2405 protein1[0][0]
protein2[0][0]
__________________________________________________________________________________________________
max_pooling1d_1 (MaxPooling1D) (None, 382, 5) 0 conv1d_1[0][0]
conv1d_1[1][0]
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 382, 5) 20 max_pooling1d_1[0][0]
max_pooling1d_1[1][0]
__________________________________________________________________________________________________
conv1d_2 (Conv1D) (None, 363, 5) 505 batch_normalization_1[0][0]
batch_normalization_1[1][0]
__________________________________________________________________________________________________
max_pooling1d_2 (MaxPooling1D) (None, 121, 5) 0 conv1d_2[0][0]
conv1d_2[1][0]
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 121, 5) 20 max_pooling1d_2[0][0]
max_pooling1d_2[1][0]
__________________________________________________________________________________________________
conv1d_3 (Conv1D) (None, 102, 5) 505 batch_normalization_2[0][0]
batch_normalization_2[1][0]
__________________________________________________________________________________________________
max_pooling1d_3 (MaxPooling1D) (None, 34, 5) 0 conv1d_3[0][0]
conv1d_3[1][0]
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 34, 5) 20 max_pooling1d_3[0][0]
max_pooling1d_3[1][0]
__________________________________________________________________________________________________
concatenate_1 (Concatenate) (None, 34, 10) 0 batch_normalization_3[0][0]
batch_normalization_3[1][0]
__________________________________________________________________________________________________
flatten_1 (Flatten) (None, 340) 0 concatenate_1[0][0]
__________________________________________________________________________________________________
dense_1 (Dense) (None, 25) 8525 flatten_1[0][0]
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 25) 100 dense_1[0][0]
__________________________________________________________________________________________________
dense_2 (Dense) (None, 1) 26 batch_normalization_4[0][0]
__________________________________________________________________________________________________
activation_1 (Activation) (None, 1) 0 dense_2[0][0]
==================================================================================================
Total params: 12,126
Trainable params: 12,046
Non-trainable params: 80
__________________________________________________________________________________________________
File conv3_3_2dense_shared_2019-01-10_06:20_gpu-0-1_adam_0.001_2048_30_AA24_mirror-double.txt
conv3_3_2dense_shared, epochs=30, batch=2048, optimizer=adam, learning rate=0.001, patience=30
Number of training samples: 91036
Loss
0: train_loss=0.6929796497784828, val_loss=0.7130014644838916
1: train_loss=0.5330452509245693, val_loss=1.293326859281251
2: train_loss=0.45042111691452663, val_loss=0.5734953243179968
3: train_loss=0.4061468544712521, val_loss=1.0551703082796442
4: train_loss=0.37864003882445224, val_loss=0.4184950811041654
5: train_loss=0.35830279173046053, val_loss=0.4448342591347779
6: train_loss=0.34319442840199504, val_loss=0.40512562409107655
7: train_loss=0.33075865000726007, val_loss=0.3872245987443834
8: train_loss=0.32151784554396906, val_loss=0.38603681824444847
9: train_loss=0.3138805034591798, val_loss=0.37247626766784087
10: train_loss=0.30709861031870905, val_loss=0.3663379636865872
11: train_loss=0.3010973330750669, val_loss=0.36556457906994805
12: train_loss=0.2962078469067981, val_loss=0.3592261939635567
13: train_loss=0.2917862361580758, val_loss=0.35561102589626
14: train_loss=0.2872506275179202, val_loss=0.3605492151910241
15: train_loss=0.28286845355687773, val_loss=0.35405079916288773
16: train_loss=0.2795693141511918, val_loss=0.36137981510974493
17: train_loss=0.2754319892511452, val_loss=0.34162419683624795
18: train_loss=0.2725806994806186, val_loss=0.3480636268382299
19: train_loss=0.2710435802878204, val_loss=0.34697027513261375
20: train_loss=0.2670528202040333, val_loss=0.33843076221469726
21: train_loss=0.26373235412667784, val_loss=0.37995304787099865
22: train_loss=0.26208886712641566, val_loss=0.3401984741415917
23: train_loss=0.2599339051549184, val_loss=0.33166138018281405
24: train_loss=0.25753018033201214, val_loss=0.3669357815923909
25: train_loss=0.256214698513644, val_loss=0.33457668854125344
26: train_loss=0.254699833513359, val_loss=0.33037221467954414
27: train_loss=0.252283466861272, val_loss=0.3299542251165897
28: train_loss=0.25033039702090676, val_loss=0.3309670161642343
29: train_loss=0.2489701264918314, val_loss=0.37809131396840556
///////////////////////////////////////////
Accuracy
0: train_acc=0.6074629817561465, val_acc=0.5921957456034201
1: train_acc=0.7396524452598502, val_acc=0.5176715172425701
2: train_acc=0.7959159016137781, val_acc=0.7325283863459437
3: train_acc=0.8207632145446279, val_acc=0.5356628815688916
4: train_acc=0.8368008261830239, val_acc=0.8124100431411453
5: train_acc=0.8477745070216096, val_acc=0.8000959539039791
6: train_acc=0.8565292849372679, val_acc=0.8208060132566879
7: train_acc=0.8620435872859017, val_acc=0.8296817526538219
8: train_acc=0.8676128126708678, val_acc=0.8308012153117462
9: train_acc=0.871105935859148, val_acc=0.8387174153545275
10: train_acc=0.87424755047985, val_acc=0.8425555733438684
11: train_acc=0.8775539344295403, val_acc=0.8399168395069567
12: train_acc=0.8777736280519559, val_acc=0.8466336160645607
13: train_acc=0.8809591809938231, val_acc=0.8479929637016724
14: train_acc=0.8828924823961745, val_acc=0.8462338077913336
15: train_acc=0.885298123588681, val_acc=0.8504717735677609
16: train_acc=0.8861549277339964, val_acc=0.8471933475174409
17: train_acc=0.8882090603327352, val_acc=0.8546297781547126
18: train_acc=0.8896150972377103, val_acc=0.8539900850834092
19: train_acc=0.8891757106947571, val_acc=0.8549496241994577
20: train_acc=0.8920866468338342, val_acc=0.8577482808442682
21: train_acc=0.89421767187305, val_acc=0.8403166484569676
22: train_acc=0.8941847184346647, val_acc=0.858068127441879
23: train_acc=0.8952502305417882, val_acc=0.865504557345174
24: train_acc=0.8967551297523199, val_acc=0.8453542304176265
25: train_acc=0.8964255900808104, val_acc=0.8613465537972285
26: train_acc=0.8972933784070273, val_acc=0.8650247884258049
27: train_acc=0.8985675997382921, val_acc=0.8648648651889586
28: train_acc=0.8988202470897964, val_acc=0.8663041744444936
29: train_acc=0.8995781887619424, val_acc=0.8459139615654774
///////////////////////////////////////////
Validation metrics
Number of 0 predicted: 5363
Number of 1 predicted: 7143
Validation precision: 0.9279619297669839
Validation recall: 0.7916841663166737
Validation F1-score: 0.8544232076754553
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment