Commit f99812f0 authored by Florian RICHOUX's avatar Florian RICHOUX

Add LSTM 64x2 models


Former-commit-id: 52a9b362
parent 6b28fd2a
......@@ -15,6 +15,8 @@ from models.lstm32_3conv4_2dense_shared import LSTM32_3Conv4_2Dense_S
from models.lstm32_3conv3_3dense_shared import LSTM32_3Conv3_3Dense_S
from models.lstm64_3conv3_2dense_shared import LSTM64_3Conv3_2Dense_S
from models.lstm64drop_3conv3_3dense_shared import LSTM64Drop_3Conv3_3Dense_S
from models.lstm64x2_3conv3_10dense_shared import LSTM64x2_3Conv3_10Dense_S
from models.lstm64x2_enbed2_10dense_shared import LSTM64x2_Embed2_10Dense_S
from models.fc6_embed3_2dense import FC6_Embed3_2Dense
from models.fc2_2dense import FC2_2Dense
......@@ -77,6 +79,10 @@ def factory_model( model_name ):
return LSTM64_3Conv3_2Dense_S(), 'lstm64_3conv3_2dense_shared'
elif model_name == 'lstm64drop_3conv3_3dense_shared':
return LSTM64Drop_3Conv3_3Dense_S(), 'lstm64drop_3conv3_3dense_shared'
elif model_name == 'lstm64x2_3conv3_10dense_shared':
return LSTM64x2_3Conv3_10Dense_S(), 'lstm64x2_3conv3_10dense_shared'
elif model_name == 'lstm64x2_embed2_10dense_shared':
return LSTM64x2_Embed2_10Dense_S(), 'lstm64x2_embed2_10dense_shared'
elif model_name == 'fc6_embed3_2dense':
return FC6_Embed3_2Dense(), 'fc6_embed3_2dense'
elif model_name == 'fc2_2dense':
......@@ -112,7 +118,7 @@ def make_parser():
parser.add_argument('-train', type=str, help='File containing the training set')
parser.add_argument('-val', type=str, help='File containing the validation set')
parser.add_argument('-test', type=str, help='File containing the test set')
parser.add_argument('-model', type=str, help='choose among: lstm32_3conv3_2dense, lstm32_3conv3_2dense_shared, lstm32_3conv4_2dense_shared, lstm64_3conv3_2dense_shared, lstm64drop_3conv3_3dense_shared, fc6_embed3_2dense, fc2_2dense')
parser.add_argument('-model', type=str, help='choose among: lstm32_3conv3_2dense, lstm32_3conv3_2dense_shared, lstm32_3conv4_2dense_shared, lstm64_3conv3_2dense_shared, lstm64drop_3conv3_3dense_shared, lstm64x2_3conv3_10dense_shared, lstm64x2_embed2_10dense_shared, fc6_embed3_2dense, fc2_2dense')
parser.add_argument('-epochs', type=int, default=50, help='Number of epochs [default: 50]')
parser.add_argument('-batch', type=int, default=64, help='Batch size [default: 64]')
parser.add_argument('-patience', type=int, default=0, help='Number of epochs before triggering the early stopping criterion [default: infinite patience]')
......
from keras.models import Model
from keras import layers
from keras import Input
import numpy as np
import tensorflow as tf
from models.abstract_model import AbstractModel
class LSTM64x2_3Conv3_10Dense_S(AbstractModel):
def __init__(self):
super().__init__()
def get_model(self):
conv1 = layers.Conv1D(5, 20, activation='relu')
pool1 = layers.MaxPooling1D(3)
batchnorm1 = layers.BatchNormalization()
conv2 = layers.Conv1D(5, 20, activation='relu')
pool2 = layers.MaxPooling1D(3)
batchnorm2 = layers.BatchNormalization()
conv3 = layers.Conv1D(5, 20, activation='relu')
pool3 = layers.MaxPooling1D(3)
batchnorm3 = layers.BatchNormalization()
lstm1 = layers.LSTM(64, return_sequences=True)
lstm2 = layers.LSTM(64)
input1 = Input(shape=(None,20,), dtype=np.float32, name='protein1')
protein1 = conv1(input1)
protein1 = pool1(protein1)
protein1 = batchnorm1(protein1)
protein1 = conv2(protein1)
protein1 = pool2(protein1)
protein1 = batchnorm2(protein1)
protein1 = conv3(protein1)
protein1 = pool3(protein1)
protein1 = batchnorm3(protein1)
protein1 = lstm1(protein1)
protein1 = lstm2(protein1)
input2 = Input(shape=(None,20,), dtype=np.float32, name='protein2')
protein2 = conv1(input2)
protein2 = pool1(protein2)
protein2 = batchnorm1(protein2)
protein2 = conv2(protein2)
protein2 = pool2(protein2)
protein2 = batchnorm2(protein2)
protein2 = conv3(protein2)
protein2 = pool3(protein2)
protein2 = batchnorm3(protein2)
protein2 = lstm1(protein2)
protein2 = lstm2(protein2)
head = layers.concatenate([protein1, protein2], axis=-1)
head = layers.Dense(100, activation='relu')(head)
head = layers.BatchNormalization()(head)
head = layers.Dense(100, activation='relu')(head)
head = layers.BatchNormalization()(head)
head = layers.Dense(50, activation='relu')(head)
head = layers.BatchNormalization()(head)
head = layers.Dense(50, activation='relu')(head)
head = layers.BatchNormalization()(head)
head = layers.Dense(50, activation='relu')(head)
head = layers.BatchNormalization()(head)
head = layers.Dense(25, activation='relu')(head)
head = layers.BatchNormalization()(head)
head = layers.Dense(25, activation='relu')(head)
head = layers.BatchNormalization()(head)
head = layers.Dense(25, activation='relu')(head)
head = layers.BatchNormalization()(head)
head = layers.Dense(25, activation='relu')(head)
head = layers.BatchNormalization()(head)
head = layers.Dense(1)(head)
output = layers.Activation(tf.nn.sigmoid)(head)
model = Model(inputs=[input1, input2], outputs=output)
return model
from keras.models import Model
from keras import layers
from keras import Input
import numpy as np
import tensorflow as tf
from models.abstract_model import AbstractModel
class LSTM64x2_Embed2_10Dense_S(AbstractModel):
def __init__(self):
super().__init__()
def get_model(self):
embed = layers.Embedding(21, 2, embeddings_initializer='glorot_uniform')
lstm1 = layers.LSTM(64, return_sequences=True)
lstm2 = layers.LSTM(64)
input1 = Input(shape=(None,20,), dtype=np.float32, name='protein1')
protein1 = embed(input1)
protein1 = lstm1(protein1)
protein1 = lstm2(protein1)
input2 = Input(shape=(None,20,), dtype=np.float32, name='protein2')
protein2 = embed(input2)
protein2 = lstm1(protein2)
protein2 = lstm2(protein2)
head = layers.concatenate([protein1, protein2], axis=-1)
head = layers.Dense(100, activation='relu')(head)
head = layers.BatchNormalization()(head)
head = layers.Dense(100, activation='relu')(head)
head = layers.BatchNormalization()(head)
head = layers.Dense(50, activation='relu')(head)
head = layers.BatchNormalization()(head)
head = layers.Dense(50, activation='relu')(head)
head = layers.BatchNormalization()(head)
head = layers.Dense(50, activation='relu')(head)
head = layers.BatchNormalization()(head)
head = layers.Dense(25, activation='relu')(head)
head = layers.BatchNormalization()(head)
head = layers.Dense(25, activation='relu')(head)
head = layers.BatchNormalization()(head)
head = layers.Dense(25, activation='relu')(head)
head = layers.BatchNormalization()(head)
head = layers.Dense(25, activation='relu')(head)
head = layers.BatchNormalization()(head)
head = layers.Dense(1)(head)
output = layers.Activation(tf.nn.sigmoid)(head)
model = Model(inputs=[input1, input2], outputs=output)
return model
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment