Python tensorflow.keras.layers.GRU Examples
The following are 11
code examples of tensorflow.keras.layers.GRU().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.keras.layers
, or try the search function
.
Example #1
Source File: RNN.py From nn_builder with MIT License | 7 votes |
def create_and_append_layer(self, layer, rnn_hidden_layers, activation, output_layer=False): layer_type_name = layer[0].lower() hidden_size = layer[1] if output_layer and self.return_final_seq_only: return_sequences = False else: return_sequences = True if layer_type_name == "lstm": rnn_hidden_layers.extend([LSTM(units=hidden_size, kernel_initializer=self.initialiser_function, return_sequences=return_sequences)]) elif layer_type_name == "gru": rnn_hidden_layers.extend([GRU(units=hidden_size, kernel_initializer=self.initialiser_function, return_sequences=return_sequences)]) elif layer_type_name == "linear": rnn_hidden_layers.extend( [Dense(units=hidden_size, activation=activation, kernel_initializer=self.initialiser_function)]) else: raise ValueError("Wrong layer names") input_dim = hidden_size return input_dim
Example #2
Source File: seqtoseq.py From deepchem with MIT License | 5 votes |
def _create_encoder(self, n_layers, dropout): """Create the encoder as a tf.keras.Model.""" input = self._create_features() gather_indices = Input(shape=(2,), dtype=tf.int32) prev_layer = input for i in range(n_layers): if dropout > 0.0: prev_layer = Dropout(rate=dropout)(prev_layer) prev_layer = GRU( self._embedding_dimension, return_sequences=True)(prev_layer) prev_layer = Lambda(lambda x: tf.gather_nd(x[0], x[1]))( [prev_layer, gather_indices]) return tf.keras.Model(inputs=[input, gather_indices], outputs=prev_layer)
Example #3
Source File: seqtoseq.py From deepchem with MIT License | 5 votes |
def _create_decoder(self, n_layers, dropout): """Create the decoder as a tf.keras.Model.""" input = Input(shape=(self._embedding_dimension,)) prev_layer = layers.Stack()(self._max_output_length * [input]) for i in range(n_layers): if dropout > 0.0: prev_layer = Dropout(dropout)(prev_layer) prev_layer = GRU( self._embedding_dimension, return_sequences=True)(prev_layer) output = Dense( len(self._output_tokens), activation=tf.nn.softmax)(prev_layer) return tf.keras.Model(inputs=input, outputs=output)
Example #4
Source File: seqtoseq.py From deepchem with MIT License | 5 votes |
def __init__(self, num_tokens, max_output_length, embedding_dimension=196, filter_sizes=[9, 9, 10], kernel_sizes=[9, 9, 11], decoder_dimension=488, **kwargs): """ Parameters ---------- filter_sizes: list of int Number of filters for each 1D convolution in the encoder kernel_sizes: list of int Kernel size for each 1D convolution in the encoder decoder_dimension: int Number of channels for the GRU Decoder """ if len(filter_sizes) != len(kernel_sizes): raise ValueError("Must have same number of layers and kernels") self._filter_sizes = filter_sizes self._kernel_sizes = kernel_sizes self._decoder_dimension = decoder_dimension super(AspuruGuzikAutoEncoder, self).__init__( input_tokens=num_tokens, output_tokens=num_tokens, max_output_length=max_output_length, embedding_dimension=embedding_dimension, variational=True, reverse_input=False, **kwargs)
Example #5
Source File: seqtoseq.py From deepchem with MIT License | 5 votes |
def _create_decoder(self, n_layers, dropout): """Create the decoder as a tf.keras.Model.""" input = Input(shape=(self._embedding_dimension,)) prev_layer = Dense(self._embedding_dimension, activation=tf.nn.relu)(input) prev_layer = layers.Stack()(self._max_output_length * [prev_layer]) for i in range(3): if dropout > 0.0: prev_layer = Dropout(dropout)(prev_layer) prev_layer = GRU( self._decoder_dimension, return_sequences=True)(prev_layer) output = Dense( len(self._output_tokens), activation=tf.nn.softmax)(prev_layer) return tf.keras.Model(inputs=input, outputs=output)
Example #6
Source File: train.py From stacks-usecase with Apache License 2.0 | 5 votes |
def _rnn(dim=1000, classes=10, dropout=0.6): """recurrent model""" _model = Sequential() _model.add(Embedding(dim, 64)) _model.add(GRU(64)) _model.add(Dense(64, activation="relu")) _model.add(Dropout(dropout)) _model.add(Dense(10, activation="sigmoid")) return _model
Example #7
Source File: models.py From medaka with Mozilla Public License 2.0 | 5 votes |
def build_majority(feature_len, num_classes, gru_size=128, classify_activation='softmax', time_steps=None, allow_cudnn=True): """Build a mock model that simply sums counts. :param feature_len: int, number of features for each pileup column. :param num_classes: int, number of output class labels. :param gru_size: int, size of each GRU layer. :param classify_activation: str, activation to use in classification layer. :param time_steps: int, number of pileup columns in a sample. :param allow_cudnn: bool, opt-in to cudnn when using a GPU. :returns: `keras.models.Sequential` object. """ import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Lambda, Activation def sum_counts(f): """Sum forward and reverse counts.""" # TODO write to handle multiple dtypes # acgtACGTdD # sum base counts b = f[:, :, 0:4] + f[:, :, 4:8] # sum deletion counts (indexing in this way retains correct shape) d = f[:, :, 8:9] + f[:, :, 9:10] return tf.concat([d, b], axis=-1) model = Sequential() model.add(Lambda(sum_counts, output_shape=(time_steps, num_classes))) model.add(Activation('softmax')) return model
Example #8
Source File: bilstm_gru_siamese_network.py From DeepPavlov with Apache License 2.0 | 5 votes |
def create_model(self) -> Model: input = [] if self.use_matrix: for i in range(self.num_context_turns + 1): input.append(Input(shape=(self.max_sequence_length,))) context = input[:self.num_context_turns] response = input[-1] emb_layer = self.embedding_layer() emb_c = [emb_layer(el) for el in context] emb_r = emb_layer(response) else: for i in range(self.num_context_turns + 1): input.append(Input(shape=(self.max_sequence_length, self.embedding_dim,))) context = input[:self.num_context_turns] response = input[-1] emb_c = context emb_r = response lstm_layer = self.lstm_layer() lstm_c = [lstm_layer(el) for el in emb_c] lstm_r = lstm_layer(emb_r) pooling_layer = GlobalMaxPooling1D(name="pooling") lstm_c = [pooling_layer(el) for el in lstm_c] lstm_r = pooling_layer(lstm_r) lstm_c = [Lambda(lambda x: K.expand_dims(x, 1))(el) for el in lstm_c] lstm_c = Lambda(lambda x: K.concatenate(x, 1))(lstm_c) gru_layer = GRU(2 * self.hidden_dim, name="gru") gru_c = gru_layer(lstm_c) if self.triplet_mode: dist = Lambda(self._pairwise_distances)([gru_c, lstm_r]) else: dist = Lambda(self._diff_mult_dist)([gru_c, lstm_r]) dist = Dense(1, activation='sigmoid', name="score_model")(dist) model = Model(context + [response], dist) return model
Example #9
Source File: basic.py From autokeras with MIT License | 5 votes |
def build(self, hp, inputs=None): inputs = nest.flatten(inputs) utils.validate_num_inputs(inputs, 1) input_node = inputs[0] shape = input_node.shape.as_list() if len(shape) != 3: raise ValueError( 'Expect the input tensor to have ' 'at least 3 dimensions for rnn models, ' 'but got {shape}'.format(shape=input_node.shape)) feature_size = shape[-1] output_node = input_node bidirectional = self.bidirectional if bidirectional is None: bidirectional = hp.Boolean('bidirectional', default=True) layer_type = self.layer_type or hp.Choice('layer_type', ['gru', 'lstm'], default='lstm') num_layers = self.num_layers or hp.Choice('num_layers', [1, 2, 3], default=2) rnn_layers = { 'gru': layers.GRU, 'lstm': layers.LSTM } in_layer = rnn_layers[layer_type] for i in range(num_layers): return_sequences = True if i == num_layers - 1: return_sequences = self.return_sequences if bidirectional: output_node = layers.Bidirectional( in_layer(feature_size, return_sequences=return_sequences))(output_node) else: output_node = in_layer( feature_size, return_sequences=return_sequences)(output_node) return output_node
Example #10
Source File: chemnet_models.py From deepchem with MIT License | 4 votes |
def __init__(self, char_to_idx, n_tasks=10, max_seq_len=270, embedding_dim=50, n_classes=2, use_bidir=True, use_conv=True, filters=192, kernel_size=3, strides=1, rnn_sizes=[224, 384], rnn_types=["GRU", "GRU"], mode="regression", **kwargs): """ Parameters ---------- char_to_idx: dict, char_to_idx contains character to index mapping for SMILES characters embedding_dim: int, default 50 Size of character embeddings used. use_bidir: bool, default True Whether to use BiDirectional RNN Cells use_conv: bool, default True Whether to use a conv-layer kernel_size: int, default 3 Kernel size for convolutions filters: int, default 192 Number of filters strides: int, default 1 Strides used in convolution rnn_sizes: list[int], default [224, 384] Number of hidden units in the RNN cells mode: str, default regression Whether to use model for regression or classification """ self.char_to_idx = char_to_idx self.n_classes = n_classes self.max_seq_len = max_seq_len self.embedding_dim = embedding_dim self.use_bidir = use_bidir self.use_conv = use_conv if use_conv: self.kernel_size = kernel_size self.filters = filters self.strides = strides self.rnn_types = rnn_types self.rnn_sizes = rnn_sizes assert len(rnn_sizes) == len( rnn_types), "Should have same number of hidden units as RNNs" self.n_tasks = n_tasks self.mode = mode model, loss, output_types = self._build_graph() super(Smiles2Vec, self).__init__( model=model, loss=loss, output_types=output_types, **kwargs)
Example #11
Source File: models.py From medaka with Mozilla Public License 2.0 | 4 votes |
def build_model(feature_len, num_classes, gru_size=128, classify_activation='softmax', time_steps=None, allow_cudnn=True): """Build a bidirectional GRU model with CuDNNGRU support. CuDNNGRU implementation is claimed to give speed-up on GPU of 7x. The function will build a model capable of running on GPU with CuDNNGRU provided a) a GPU is present, b) the option has been allowed by the `allow_cudnn` argument; otherwise a compatible (but not CuDNNGRU accelerated model) is built. :param feature_len: int, number of features for each pileup column. :param num_classes: int, number of output class labels. :param gru_size: int, size of each GRU layer. :param classify_activation: str, activation to use in classification layer. :param time_steps: int, number of pileup columns in a sample. :param allow_cudnn: bool, opt-in to cudnn when using a GPU. :returns: `keras.models.Sequential` object. """ import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, GRU, CuDNNGRU, Bidirectional # Determine whether to use CuDNNGRU or not cudnn = False if tf.test.is_gpu_available(cuda_only=True) and allow_cudnn: cudnn = True logger.info("Building model with cudnn optimization: {}".format(cudnn)) model = Sequential() input_shape = (time_steps, feature_len) for i in [1, 2]: name = 'gru{}'.format(i) # Options here are to be mutually compatible: train with CuDNNGRU # but allow inference with GRU (on cpu). # https://gist.github.com/bzamecnik/bd3786a074f8cb891bc2a397343070f1 if cudnn: gru = CuDNNGRU(gru_size, return_sequences=True, name=name) else: gru = GRU( gru_size, reset_after=True, recurrent_activation='sigmoid', return_sequences=True, name=name) model.add(Bidirectional(gru, input_shape=input_shape)) # see keras #10417 for why we specify input shape model.add(Dense( num_classes, activation=classify_activation, name='classify', input_shape=(time_steps, 2 * gru_size) )) return model