Python tensorflow.python.ops.math_ops.tanh() Examples
The following are 30
code examples of tensorflow.python.ops.math_ops.tanh().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.python.ops.math_ops
, or try the search function
.
Example #1
Source File: rnn_cell.py From auto-alt-text-lambda-api with MIT License | 6 votes |
def _attention(self, query, attn_states): conv2d = nn_ops.conv2d reduce_sum = math_ops.reduce_sum softmax = nn_ops.softmax tanh = math_ops.tanh with vs.variable_scope("attention"): k = vs.get_variable( "attn_w", [1, 1, self._attn_size, self._attn_vec_size]) v = vs.get_variable("attn_v", [self._attn_vec_size]) hidden = array_ops.reshape(attn_states, [-1, self._attn_length, 1, self._attn_size]) hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME") y = _linear(query, self._attn_vec_size, True) y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size]) s = reduce_sum(v * tanh(hidden_features + y), [2, 3]) a = softmax(s) d = reduce_sum( array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2]) new_attns = array_ops.reshape(d, [-1, self._attn_size]) new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1]) return new_attns, new_attn_states
Example #2
Source File: ConvLSTMCell.py From Conv3D_BICLSTM with MIT License | 6 votes |
def __init__(self, num_units, input_size=None, use_peepholes=False, cell_clip=None, initializer=None, num_proj=None, proj_clip=None, num_unit_shards=1, num_proj_shards=1, forget_bias=1.0, state_is_tuple=False, activation=tanh): # if not state_is_tuple: # logging.warn( # "%s: Using a concatenated state is slower and will soon be " # "deprecated. Use state_is_tuple=True." % self) if input_size is not None: logging.warn("%s: The input_size parameter is deprecated." % self) #self._use_peepholes = use_peepholes #self._cell_clip = cell_clip #self._initializer = initializer #self._num_proj = num_proj #self._num_unit_shards = num_unit_shards #self._num_proj_shards = num_proj_shards self._num_units = num_units self._forget_bias = forget_bias self._state_is_tuple = state_is_tuple self._activation = activation
Example #3
Source File: rnn_cell.py From ROLO with Apache License 2.0 | 6 votes |
def __init__(self, num_units, forget_bias=1.0, input_size=None, state_is_tuple=True, activation=tanh): """Initialize the basic LSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). input_size: Deprecated and unused. state_is_tuple: If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. If False, they are concatenated along the column axis. The latter behavior will soon be deprecated. activation: Activation function of the inner states. """ if not state_is_tuple: logging.warn("%s: Using a concatenated state is slower and will soon be " "deprecated. Use state_is_tuple=True.", self) if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._forget_bias = forget_bias self._state_is_tuple = state_is_tuple self._activation = activation
Example #4
Source File: rnn_cell.py From Multiview2Novelview with MIT License | 6 votes |
def _attention(self, query, attn_states): conv2d = nn_ops.conv2d reduce_sum = math_ops.reduce_sum softmax = nn_ops.softmax tanh = math_ops.tanh with vs.variable_scope("attention"): k = vs.get_variable( "attn_w", [1, 1, self._attn_size, self._attn_vec_size]) v = vs.get_variable("attn_v", [self._attn_vec_size]) hidden = array_ops.reshape(attn_states, [-1, self._attn_length, 1, self._attn_size]) hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME") if self._linear3 is None: self._linear3 = _Linear(query, self._attn_vec_size, True) y = self._linear3(query) y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size]) s = reduce_sum(v * tanh(hidden_features + y), [2, 3]) a = softmax(s) d = reduce_sum( array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2]) new_attns = array_ops.reshape(d, [-1, self._attn_size]) new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1]) return new_attns, new_attn_states
Example #5
Source File: convrnn.py From audio-super-res with MIT License | 6 votes |
def __init__(self, shape, filter_size, num_features, forget_bias=1.0, input_size=None, state_is_tuple=False, activation=tf.nn.tanh): """Initialize the basic Conv LSTM cell. Args: shape: int tuple thats the height and width of the cell filter_size: int tuple thats the height and width of the filter num_features: int thats the depth of the cell forget_bias: float, The bias added to forget gates (see above). input_size: Deprecated and unused. state_is_tuple: If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. If False, they are concatenated along the column axis. The latter behavior will soon be deprecated. activation: Activation function of the inner states. """ #if not state_is_tuple: #logging.warn("%s: Using a concatenated state is slower and will soon be " # "deprecated. Use state_is_tuple=True.", self) if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self.shape = shape self.filter_size = filter_size self.num_features = num_features self._forget_bias = forget_bias self._state_is_tuple = state_is_tuple self._activation = activation
Example #6
Source File: convrnn.py From audio-super-res with MIT License | 6 votes |
def call(self, inputs, state, scope=None): cell, hidden = state new_hidden = _conv([inputs, hidden], self._kernel_shape, 4*self._output_channels, self._use_bias) gates = array_ops.split(value=new_hidden, num_or_size_splits=4, axis=self._conv_ndims+1) input_gate, new_input, forget_gate, output_gate = gates new_cell = math_ops.sigmoid(forget_gate + self._forget_bias) * cell new_cell += math_ops.sigmoid(input_gate) * math_ops.tanh(new_input) output = math_ops.tanh(new_cell) * math_ops.sigmoid(output_gate) if self._skip_connection: output = array_ops.concat([output, inputs], axis=-1) new_state = rnn_cell_impl.LSTMStateTuple(new_cell, output) return output, new_state
Example #7
Source File: rnn_cell.py From Multiview2Novelview with MIT License | 6 votes |
def __init__(self, num_units, initializer=None, forget_bias=1.0, activation=math_ops.tanh, reuse=None): """Initialize the parameters for an UGRNN cell. Args: num_units: int, The number of units in the UGRNN cell initializer: (optional) The initializer to use for the weight matrices. forget_bias: (optional) float, default 1.0, The initial bias of the forget gate, used to reduce the scale of forgetting at the beginning of the training. activation: (optional) Activation function of the inner states. Default is `tf.tanh`. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. """ super(UGRNNCell, self).__init__(_reuse=reuse) self._num_units = num_units self._initializer = initializer self._forget_bias = forget_bias self._activation = activation self._reuse = reuse self._linear = None
Example #8
Source File: rnn_cell.py From deep_image_model with Apache License 2.0 | 6 votes |
def __init__(self, num_units, forget_bias=1.0, input_size=None, state_is_tuple=True, activation=tanh): """Initialize the basic LSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). input_size: Deprecated and unused. state_is_tuple: If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. If False, they are concatenated along the column axis. The latter behavior will soon be deprecated. activation: Activation function of the inner states. """ if not state_is_tuple: logging.warn("%s: Using a concatenated state is slower and will soon be " "deprecated. Use state_is_tuple=True.", self) if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._forget_bias = forget_bias self._state_is_tuple = state_is_tuple self._activation = activation
Example #9
Source File: rnn_cell.py From Multiview2Novelview with MIT License | 6 votes |
def call(self, inputs, state, scope=None): cell, hidden = state new_hidden = _conv([inputs, hidden], self._kernel_shape, 4*self._output_channels, self._use_bias) gates = array_ops.split(value=new_hidden, num_or_size_splits=4, axis=self._conv_ndims+1) input_gate, new_input, forget_gate, output_gate = gates new_cell = math_ops.sigmoid(forget_gate + self._forget_bias) * cell new_cell += math_ops.sigmoid(input_gate) * math_ops.tanh(new_input) output = math_ops.tanh(new_cell) * self._activation(output_gate) if self._skip_connection: output = array_ops.concat([output, inputs], axis=-1) new_state = rnn_cell_impl.LSTMStateTuple(new_cell, output) return output, new_state
Example #10
Source File: rnn_cell.py From deep_image_model with Apache License 2.0 | 6 votes |
def _attention(self, query, attn_states): conv2d = nn_ops.conv2d reduce_sum = math_ops.reduce_sum softmax = nn_ops.softmax tanh = math_ops.tanh with vs.variable_scope("Attention"): k = vs.get_variable("AttnW", [1, 1, self._attn_size, self._attn_vec_size]) v = vs.get_variable("AttnV", [self._attn_vec_size]) hidden = array_ops.reshape(attn_states, [-1, self._attn_length, 1, self._attn_size]) hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME") y = _linear(query, self._attn_vec_size, True) y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size]) s = reduce_sum(v * tanh(hidden_features + y), [2, 3]) a = softmax(s) d = reduce_sum( array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2]) new_attns = array_ops.reshape(d, [-1, self._attn_size]) new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1]) return new_attns, new_attn_states
Example #11
Source File: rnn_cell.py From ecm with Apache License 2.0 | 6 votes |
def __init__(self, num_units, forget_bias=1.0, input_size=None, state_is_tuple=True, activation=tanh): """Initialize the basic LSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). input_size: Deprecated and unused. state_is_tuple: If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. If False, they are concatenated along the column axis. The latter behavior will soon be deprecated. activation: Activation function of the inner states. """ if not state_is_tuple: logging.warn("%s: Using a concatenated state is slower and will soon be " "deprecated. Use state_is_tuple=True.", self) if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._forget_bias = forget_bias self._state_is_tuple = state_is_tuple self._activation = activation
Example #12
Source File: rnn_cell.py From lambda-packs with MIT License | 6 votes |
def _attention(self, query, attn_states): conv2d = nn_ops.conv2d reduce_sum = math_ops.reduce_sum softmax = nn_ops.softmax tanh = math_ops.tanh with vs.variable_scope("attention"): k = vs.get_variable( "attn_w", [1, 1, self._attn_size, self._attn_vec_size]) v = vs.get_variable("attn_v", [self._attn_vec_size]) hidden = array_ops.reshape(attn_states, [-1, self._attn_length, 1, self._attn_size]) hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME") y = _linear(query, self._attn_vec_size, True) y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size]) s = reduce_sum(v * tanh(hidden_features + y), [2, 3]) a = softmax(s) d = reduce_sum( array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2]) new_attns = array_ops.reshape(d, [-1, self._attn_size]) new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1]) return new_attns, new_attn_states
Example #13
Source File: rnn_cell_impl.py From lambda-packs with MIT License | 6 votes |
def __init__(self, num_units, forget_bias=1.0, state_is_tuple=True, activation=None, reuse=None): """Initialize the basic LSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). state_is_tuple: If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. If False, they are concatenated along the column axis. The latter behavior will soon be deprecated. activation: Activation function of the inner states. Default: `tanh`. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. """ super(BasicLSTMCell, self).__init__(_reuse=reuse) if not state_is_tuple: logging.warn("%s: Using a concatenated state is slower and will soon be " "deprecated. Use state_is_tuple=True.", self) self._num_units = num_units self._forget_bias = forget_bias self._state_is_tuple = state_is_tuple self._activation = activation or math_ops.tanh
Example #14
Source File: seq2seq_model.py From DeepAffinity with GNU General Public License v3.0 | 5 votes |
def __init__(self, num_units, activation=None, reuse=None, kernel_initializer=None, bias_initializer=None): super(GRUCell, self).__init__(_reuse=reuse) self._num_units = num_units self._activation = activation or math_ops.tanh self._kernel_initializer = kernel_initializer self._bias_initializer = bias_initializer
Example #15
Source File: seq2seq_model.py From DeepAffinity with GNU General Public License v3.0 | 5 votes |
def __init__(self, num_units, activation=None, reuse=None, kernel_initializer=None, bias_initializer=None): super(GRUCell, self).__init__(_reuse=reuse) self._num_units = num_units self._activation = activation or math_ops.tanh self._kernel_initializer = kernel_initializer self._bias_initializer = bias_initializer
Example #16
Source File: seq2seq_model.py From DeepAffinity with GNU General Public License v3.0 | 5 votes |
def __init__(self, num_units, activation=None, reuse=None, kernel_initializer=None, bias_initializer=None): super(GRUCell, self).__init__(_reuse=reuse) self._num_units = num_units self._activation = activation or math_ops.tanh self._kernel_initializer = kernel_initializer self._bias_initializer = bias_initializer
Example #17
Source File: seq2seq_model.py From DeepAffinity with GNU General Public License v3.0 | 5 votes |
def __init__(self, num_units, activation=None, reuse=None, kernel_initializer=None, bias_initializer=None): super(GRUCell, self).__init__(_reuse=reuse) self._num_units = num_units self._activation = activation or math_ops.tanh self._kernel_initializer = kernel_initializer self._bias_initializer = bias_initializer
Example #18
Source File: attention_decoder.py From ccm with Apache License 2.0 | 5 votes |
def _attn_add_fun(v, keys, query): return math_ops.reduce_sum(v * math_ops.tanh(keys + query), [2])
Example #19
Source File: seq2seq_model.py From DeepAffinity with GNU General Public License v3.0 | 5 votes |
def __init__(self, num_units, activation=None, reuse=None, kernel_initializer=None, bias_initializer=None): super(GRUCell, self).__init__(_reuse=reuse) self._num_units = num_units self._activation = activation or math_ops.tanh self._kernel_initializer = kernel_initializer self._bias_initializer = bias_initializer
Example #20
Source File: fops.py From shuttleNet with GNU General Public License v3.0 | 5 votes |
def __init__(self, batch_size, num_mem, num_round, input_offset, cell=None, echocell=None, mem_size=2, mem_dim=1024, activation=tanh, dummy_value=0.0): """ args: num_mem: number of cells mem_size: number of memory lines, only work for MemGrid mem_dim: length of memory line, only work for MemGrid num_round: the round number of processing in the cell """ self._batch_size = batch_size self._num_mem = num_mem self._mem_dim = mem_dim self._num_round = num_round self._input_offset = input_offset if cell is None: self.check = True self._mem_cells = [MemGrid(batch_size, mem_size, mem_dim, "Mem_%d"%i, activation=activation, dummy_value=dummy_value) for i in xrange(num_mem)] else: self.check = False self._mem_cells = [cell] * num_mem self.echocell = echocell
Example #21
Source File: attention_decoder.py From ccm with Apache License 2.0 | 5 votes |
def _attn_add_fun(v, keys, query): return math_ops.reduce_sum(v * math_ops.tanh(keys + query), [2])
Example #22
Source File: gru_cell.py From NMT_GAN with Apache License 2.0 | 5 votes |
def __init__(self, num_units, scope, input_size=None, activation=math_ops.tanh, init_device="/cpu:0", prefix='gru_layer', precision = 'float32',reuse_var=False): if input_size is not None: self._num_units = num_units self._activation = activation self._input_size = input_size self._scope = scope self._precision = precision if reuse_var == False: with vs.variable_scope(self._scope or "gru_layer"): with tf.device(init_device): embDim = self._input_size dim = self._num_units W = numpy.concatenate([norm_weight(embDim, dim), norm_weight(embDim, dim)], axis=1) W=tf.get_variable('W', initializer = tf.constant(W)) b = numpy.zeros((2 * dim,)).astype(self._precision) b = tf.get_variable('b', initializer = tf.constant(b)) U = numpy.concatenate([ortho_weight(dim), ortho_weight(dim)], axis=1) U = tf.get_variable('U', initializer = tf.constant(U)) Wx = norm_weight(embDim, dim) Wx=tf.get_variable('Wx', initializer = tf.constant(Wx)) bx = numpy.zeros((dim,)).astype(self._precision) bx=tf.get_variable('bx', initializer = tf.constant(bx)) Ux = ortho_weight(dim) Ux=tf.get_variable('Ux', initializer = tf.constant(Ux))
Example #23
Source File: gru_cell.py From NMT_GAN with Apache License 2.0 | 5 votes |
def __call__(self, inputs, state=None): embs = inputs[0] if len(inputs)==1: mask_slice = None else: mask_slice = inputs[1] """Gated recurrent unit (GRU) with nunits cells.""" tf.get_variable_scope().reuse_variables() W = tf.get_variable('W', dtype=self._precision) b = tf.get_variable('b', dtype=self._precision) U = tf.get_variable('U', dtype=self._precision) Wx = tf.get_variable('Wx', dtype=self._precision) bx = tf.get_variable('bx', dtype=self._precision) # graph build emb2hidden = math_ops.matmul(embs, Wx) + bx emb2gates = math_ops.matmul(embs, W) + b nsamples = tf.shape(embs)[0] if state == None: state = tf.zeros([nsamples, self._num_units],dtype=self._precision) if mask_slice is None: mask_slice = tf.ones([nsamples, self._num_units]) # for decoding # gates input for first gru layer state2gates = math_ops.matmul(state,U) gates = emb2gates + state2gates gates = math_ops.sigmoid(gates) r, u = array_ops.split(gates, 2, 1) h = r * emb2hidden h += u * state h = math_ops.tanh(h) h = mask_slice * h + (1. - mask_slice) * state new_h = h return new_h, new_h
Example #24
Source File: gru_cell.py From NMT_GAN with Apache License 2.0 | 5 votes |
def __init__(self, num_units, scope, input_size=None, activation=math_ops.tanh, init_device="/cpu:0", prefix='gru_layer', precision = 'float32',reuse_var=False): if input_size is not None: self._num_units = num_units self._activation = activation self._input_size = input_size self._scope = scope self._precision = precision if reuse_var == False: with vs.variable_scope(self._scope or "ran_layer"): with tf.device(init_device): embDim = self._input_size dim = self._num_units W = numpy.concatenate([norm_weight(embDim, dim), norm_weight(embDim, dim)], axis=1) W=tf.get_variable('W', initializer = tf.constant(W)) b = numpy.zeros((2 * dim,)).astype(self._precision) b = tf.get_variable('b', initializer = tf.constant(b)) U = numpy.concatenate([ortho_weight(dim), ortho_weight(dim)], axis=1) U = tf.get_variable('U', initializer = tf.constant(U)) Wx = norm_weight(embDim, dim) Wx=tf.get_variable('Wx', initializer = tf.constant(Wx)) bx = numpy.zeros((dim,)).astype(self._precision) bx=tf.get_variable('bx', initializer = tf.constant(bx))
Example #25
Source File: utils.py From MIMN with MIT License | 5 votes |
def __init__(self, num_units, activation=None, reuse=None, kernel_initializer=None, bias_initializer=None): super(VecAttGRUCell, self).__init__(_reuse=reuse) self._num_units = num_units self._activation = activation or math_ops.tanh self._kernel_initializer = kernel_initializer self._bias_initializer = bias_initializer self._gate_linear = None self._candidate_linear = None
Example #26
Source File: mod_core_rnn_cell_impl.py From RGAN with MIT License | 5 votes |
def __init__(self, num_units, forget_bias=1.0, input_size=None, state_is_tuple=True, activation=tanh, reuse=None): """Initialize the basic LSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). input_size: Deprecated and unused. state_is_tuple: If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. If False, they are concatenated along the column axis. The latter behavior will soon be deprecated. activation: Activation function of the inner states. reuse: (optional) Python boolean describing whether to reuse variables in an existing scope. If not `True`, and the existing scope already has the given variables, an error is raised. """ if not state_is_tuple: logging.warn("%s: Using a concatenated state is slower and will soon be " "deprecated. Use state_is_tuple=True.", self) if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._forget_bias = forget_bias self._state_is_tuple = state_is_tuple self._activation = activation self._reuse = reuse
Example #27
Source File: mod_core_rnn_cell_impl.py From RGAN with MIT License | 5 votes |
def __init__(self, num_units, input_size=None, activation=tanh, reuse=None): if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._activation = activation self._reuse = reuse
Example #28
Source File: mod_core_rnn_cell_impl.py From RGAN with MIT License | 5 votes |
def __init__(self, num_units, input_size=None, activation=tanh, reuse=None): if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._activation = activation self._reuse = reuse
Example #29
Source File: rnn_cell.py From ecm with Apache License 2.0 | 5 votes |
def __init__(self, num_units, input_size=None, activation=tanh): if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._activation = activation
Example #30
Source File: rnn_cell.py From ecm with Apache License 2.0 | 5 votes |
def __init__(self, num_units, input_size=None, activation=tanh): if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._activation = activation