Python tensorflow.sigmoid() Examples

The following are 30 code examples of tensorflow.sigmoid(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: model_utils.py    From lm with MIT License 6 votes vote down vote up
def __call__(self, inputs, state, scope=None):
        num_proj = self._num_units if self._num_proj is None else self._num_proj

        c_prev = tf.slice(state, [0, 0], [-1, self._num_units])
        m_prev = tf.slice(state, [0, self._num_units], [-1, num_proj])

        input_size = inputs.get_shape().with_rank(2)[1]
        if input_size.value is None:
            raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
        with tf.variable_scope(type(self).__name__,
                               initializer=self._initializer):  # "LSTMCell"
            # i = input_gate, j = new_input, f = forget_gate, o = output_gate
            cell_inputs = tf.concat(1, [inputs, m_prev])
            lstm_matrix = tf.nn.bias_add(tf.matmul(cell_inputs, self._concat_w), self._b)
            i, j, f, o = tf.split(1, 4, lstm_matrix)

            c = tf.sigmoid(f + 1.0) * c_prev + tf.sigmoid(i) * tf.tanh(j)
            m = tf.sigmoid(o) * tf.tanh(c)

            if self._num_proj is not None:
                m = tf.matmul(m, self._concat_w_proj)

        new_state = tf.concat(1, [c, m])
        return m, new_state 
Example #2
Source File: common_layers.py    From fine-lm with MIT License 6 votes vote down vote up
def conv_lstm(x,
              kernel_size,
              filters,
              padding="SAME",
              dilation_rate=(1, 1),
              name=None,
              reuse=None):
  """Convolutional LSTM in 1 dimension."""
  with tf.variable_scope(
      name, default_name="conv_lstm", values=[x], reuse=reuse):
    gates = conv(
        x,
        4 * filters,
        kernel_size,
        padding=padding,
        dilation_rate=dilation_rate)
    g = tf.split(layer_norm(gates, 4 * filters), 4, axis=3)
    new_cell = tf.sigmoid(g[0]) * x + tf.sigmoid(g[1]) * tf.tanh(g[3])
    return tf.sigmoid(g[2]) * tf.tanh(new_cell) 
Example #3
Source File: model.py    From PathCon with MIT License 6 votes vote down vote up
def _build_model(self):
        # define initial relation features
        if self.use_context or (self.use_path and self.path_type == 'rnn'):
            self._build_relation_feature()

        self.scores = 0.0

        if self.use_context:
            edges_list, mask_list = self._get_neighbors_and_masks(self.labels, self.entity_pairs, self.train_edges)
            self.aggregators = self._get_neighbor_aggregators()  # define aggregators for each layer
            self.aggregated_neighbors = self._aggregate_neighbors(edges_list, mask_list)  # [batch_size, n_relations]
            self.scores += self.aggregated_neighbors

        if self.use_path:
            if self.path_type == 'embedding':
                self.W, self.b = self._get_weight_and_bias(self.n_paths, self.n_relations)  # [batch_size, n_relations]
                self.scores += tf.sparse_tensor_dense_matmul(self.path_features, self.W) + self.b

            elif self.path_type == 'rnn':
                rnn_output = self._rnn(self.path_ids)  # [batch_size, path_samples, n_relations]
                self.scores += self._aggregate_paths(rnn_output)

        # narrow the range of scores to [0, 1] for the ease of calculating ranking-based metrics
        self.scores_normalized = tf.sigmoid(self.scores) 
Example #4
Source File: common_layers.py    From fine-lm with MIT License 6 votes vote down vote up
def gated_linear_unit_layer(x, name=None):
  """Gated linear unit layer.

  Paper: Language Modeling with Gated Convolutional Networks.
  Link: https://arxiv.org/abs/1612.08083
  x = Wx * sigmoid(W'x).

  Args:
    x: A tensor
    name: A string

  Returns:
    A tensor of the same shape as x.
  """
  with tf.variable_scope(name, default_name="glu_layer", values=[x]):
    depth = shape_list(x)[-1]
    x = tf.layers.dense(x, depth * 2, activation=None)
    x, gating_x = tf.split(x, 2, axis=-1)
    return x * tf.nn.sigmoid(gating_x) 
Example #5
Source File: blocks_lstm.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _Apply(self, *args):
    xtransform = self._TransformInputs(*args)
    depth_axis = len(self._output_shape) - 1

    if self.hidden is not None:
      htransform = self._TransformHidden(self.hidden)
      f, i, j, o = tf.split(
          value=htransform + xtransform, num_or_size_splits=4, axis=depth_axis)
    else:
      f, i, j, o = tf.split(
          value=xtransform, num_or_size_splits=4, axis=depth_axis)

    if self.cell is not None:
      self.cell = tf.sigmoid(f) * self.cell + tf.sigmoid(i) * tf.tanh(j)
    else:
      self.cell = tf.sigmoid(i) * tf.tanh(j)

    self.hidden = tf.sigmoid(o) * tf.tanh(self.cell)
    return self.hidden 
Example #6
Source File: loss.py    From tensorflow_constrained_optimization with Apache License 2.0 6 votes vote down vote up
def is_normalized(self):
    """Returns true only if the associated loss is normalized.

    We call a classification loss "normalized" if there exists a random variable
    Z such that, for any values of the predictions and weights:

    > loss(predictions, weights) = E[zero-one-loss(predictions + Z, weights)]

    where the expectation is taken over Z.

    Intuitively, a normalized loss can be interpreted as a smoothed zero-one
    loss (e.g. a ramp or a sigmoid), while a non-normalized loss will typically
    be some unbounded relaxation (e.g. a hinge).

    Returns:
      True if the loss is normalized. False otherwise.
    """ 
Example #7
Source File: models.py    From tf2-yolo3 with Apache License 2.0 6 votes vote down vote up
def yolo_boxes(pred, anchors, num_classes, training=True):
    # pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...classes))
    grid_size = tf.shape(pred)[1:3][::-1]
    grid_y, grid_x = tf.shape(pred)[1], tf.shape(pred)[2]

    box_xy, box_wh, objectness, class_probs = tf.split(pred, (2, 2, 1, num_classes), axis=-1)
    box_xy = tf.sigmoid(box_xy)

    objectness = tf.sigmoid(objectness)
    class_probs = tf.nn.softmax(class_probs)
    pred_box = tf.concat((box_xy, box_wh), axis=-1)  # original xywh for loss

    # !!! grid[x][y] == (y, x)
    grid = tf.meshgrid(tf.range(grid_x), tf.range(grid_y))
    grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)  # [gx, gy, 1, 2]

    box_xy = (box_xy + tf.cast(grid, tf.float32)) / tf.cast(grid_size, tf.float32)
    box_wh = tf.exp(box_wh) * anchors

    box_x1y1 = box_xy - box_wh / 2
    box_x2y2 = box_xy + box_wh / 2
    bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)

    return bbox, objectness, class_probs, pred_box 
Example #8
Source File: post_processing_builder.py    From object_detector_app with MIT License 6 votes vote down vote up
def _build_score_converter(score_converter_config):
  """Builds score converter based on the config.

  Builds one of [tf.identity, tf.sigmoid, tf.softmax] score converters based on
  the config.

  Args:
    score_converter_config: post_processing_pb2.PostProcessing.score_converter.

  Returns:
    Callable score converter op.

  Raises:
    ValueError: On unknown score converter.
  """
  if score_converter_config == post_processing_pb2.PostProcessing.IDENTITY:
    return tf.identity
  if score_converter_config == post_processing_pb2.PostProcessing.SIGMOID:
    return tf.sigmoid
  if score_converter_config == post_processing_pb2.PostProcessing.SOFTMAX:
    return tf.nn.softmax
  raise ValueError('Unknown score converter.') 
Example #9
Source File: post_processing_builder.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _build_score_converter(score_converter_config):
  """Builds score converter based on the config.

  Builds one of [tf.identity, tf.sigmoid, tf.softmax] score converters based on
  the config.

  Args:
    score_converter_config: post_processing_pb2.PostProcessing.score_converter.

  Returns:
    Callable score converter op.

  Raises:
    ValueError: On unknown score converter.
  """
  if score_converter_config == post_processing_pb2.PostProcessing.IDENTITY:
    return tf.identity
  if score_converter_config == post_processing_pb2.PostProcessing.SIGMOID:
    return tf.sigmoid
  if score_converter_config == post_processing_pb2.PostProcessing.SOFTMAX:
    return tf.nn.softmax
  raise ValueError('Unknown score converter.') 
Example #10
Source File: post_processing_builder.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def _build_score_converter(score_converter_config, logit_scale):
  """Builds score converter based on the config.

  Builds one of [tf.identity, tf.sigmoid, tf.softmax] score converters based on
  the config.

  Args:
    score_converter_config: post_processing_pb2.PostProcessing.score_converter.
    logit_scale: temperature to use for SOFTMAX score_converter.

  Returns:
    Callable score converter op.

  Raises:
    ValueError: On unknown score converter.
  """
  if score_converter_config == post_processing_pb2.PostProcessing.IDENTITY:
    return _score_converter_fn_with_logit_scale(tf.identity, logit_scale)
  if score_converter_config == post_processing_pb2.PostProcessing.SIGMOID:
    return _score_converter_fn_with_logit_scale(tf.sigmoid, logit_scale)
  if score_converter_config == post_processing_pb2.PostProcessing.SOFTMAX:
    return _score_converter_fn_with_logit_scale(tf.nn.softmax, logit_scale)
  raise ValueError('Unknown score converter.') 
Example #11
Source File: box_predictor_builder.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def build_score_converter(score_converter_config, is_training):
  """Builds score converter based on the config.

  Builds one of [tf.identity, tf.sigmoid] score converters based on the config
  and whether the BoxPredictor is for training or inference.

  Args:
    score_converter_config:
      box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.score_converter.
    is_training: Indicates whether the BoxPredictor is in training mode.

  Returns:
    Callable score converter op.

  Raises:
    ValueError: On unknown score converter.
  """
  if score_converter_config == (
      box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.IDENTITY):
    return tf.identity
  if score_converter_config == (
      box_predictor_pb2.WeightSharedConvolutionalBoxPredictor.SIGMOID):
    return tf.identity if is_training else tf.sigmoid
  raise ValueError('Unknown score converter.') 
Example #12
Source File: lstm_cell.py    From RDPG with MIT License 5 votes vote down vote up
def __call__(self, inputs, state, scope=None):
    """Long short-term memory cell (LSTM)."""
    with tf.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
      # Parameters of gates are concatenated into one multiply for efficiency.
      c, h = tf.split(1, 2, state)
      concat = self._linear([inputs, h], 4 * self._num_units, True)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      i, j, f, o = tf.split(1, 4, concat)

      new_c = c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i) * tf.tanh(j)
      new_h = tf.tanh(new_c) * tf.sigmoid(o)

      return new_h, tf.concat(1, [new_c, new_h]) 
Example #13
Source File: distributions.py    From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 5 votes vote down vote up
def __init__(self, logits):
        self.logits = logits
        self.ps = tf.sigmoid(logits) 
Example #14
Source File: model.py    From DexiNed with MIT License 5 votes vote down vote up
def pre_process_binary_cross_entropy(bc_loss,input, label,arg, use_tf_loss=False):
    # preprocess data
    y = label
    loss = 0
    w_loss=1.0
    preds = []
    for tmp_p in input:
        # tmp_p = input[i]

        # loss processing
        tmp_y = tf.cast(y, dtype=tf.float32)
        mask = tf.dtypes.cast(tmp_y > 0., tf.float32)
        b,h,w,c=mask.get_shape()
        positives = tf.math.reduce_sum(mask, axis=[1, 2, 3], keepdims=True)
        # positives = tf.math.reduce_sum(mask)
        negatives = h*w*c-positives
        # negatives = tf.math.reduce_sum(1. - tmp_y)

        beta2 = positives / (negatives + positives) # negatives in hed
        beta = negatives/ (positives + negatives) # positives in hed
        # pos_w = beta/(1-beta)
        pos_w = tf.where(tf.greater(y, 0.0), beta, beta2)
        # pos_w = tf.where(tf.equal(mask, 0.0), beta, beta2)
        logits = tf.sigmoid(tmp_p)

        l_cost = bc_loss(y_true=tmp_y, y_pred=logits,
                         sample_weight=pos_w)

        # cost = tf.math.reduce_mean(cost * (1 - beta))
        # l_cost= tf.where(tf.equal(positives, 0.0), 0.0, cost)

        preds.append(logits)
        loss += (l_cost*1.0)


    # mask[mask != 0] = negatives / (positives + negatives)
    # mask[mask == 0] = positives / (positives + negatives)

    return preds, loss 
Example #15
Source File: distributions.py    From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 5 votes vote down vote up
def __init__(self, logits):
        self.logits = logits
        self.ps = tf.sigmoid(logits) 
Example #16
Source File: distributions.py    From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 5 votes vote down vote up
def __init__(self, logits):
        self.logits = logits
        self.ps = tf.sigmoid(logits) 
Example #17
Source File: enas_common_ops.py    From deep_architect with MIT License 5 votes vote down vote up
def lstm(x, prev_c, prev_h, w):
    ifog = tf.matmul(tf.concat([x, prev_h], axis=1), w)
    i, f, o, g = tf.split(ifog, 4, axis=1)
    i = tf.sigmoid(i)
    f = tf.sigmoid(f)
    o = tf.sigmoid(o)
    g = tf.tanh(g)
    next_c = i * g + f * prev_c
    next_h = o * tf.tanh(next_c)
    return next_c, next_h 
Example #18
Source File: model_deploy_test.py    From ctw-baseline with MIT License 5 votes vote down vote up
def LogisticClassifier(inputs, labels, scope=None, reuse=None):
  with tf.variable_scope(scope, 'LogisticClassifier', [inputs, labels],
                         reuse=reuse):
    predictions = slim.fully_connected(inputs, 1, activation_fn=tf.sigmoid,
                                       scope='fully_connected')
    slim.losses.log_loss(predictions, labels)
    return predictions 
Example #19
Source File: model_deploy_test.py    From ctw-baseline with MIT License 5 votes vote down vote up
def BatchNormClassifier(inputs, labels, scope=None, reuse=None):
  with tf.variable_scope(scope, 'BatchNormClassifier', [inputs, labels],
                         reuse=reuse):
    inputs = slim.batch_norm(inputs, decay=0.1)
    predictions = slim.fully_connected(inputs, 1,
                                       activation_fn=tf.sigmoid,
                                       scope='fully_connected')
    slim.losses.log_loss(predictions, labels)
    return predictions 
Example #20
Source File: model.py    From BetaElephant with MIT License 5 votes vote down vote up
def get_model(name):
    name = functools.partial('{}-{}'.format, name)

    self_pos = tf.placeholder(Config.dtype, Config.data_shape, name='self_pos')
    self_ability = tf.placeholder(Config.dtype, Config.data_shape, name='self_ability')
    enemy_pos = tf.placeholder(Config.dtype, Config.data_shape, name='enemy_pos')
    input_label = tf.placeholder(Config.dtype, Config.label_shape, name='input_label')

    x = tf.concat(3, [self_pos, self_ability, enemy_pos], name=name('input_concat'))
    y = input_label

    nl = tf.nn.tanh

    def conv_pip(name, x):
        name = functools.partial('{}_{}'.format, name)

        x = conv2d(name('0'), x, Config.data_shape[3]*2, kernel=3, stride=1, nl=nl)
        x = conv2d(name('1'), x, Config.data_shape[3], kernel=3, stride=1, nl=nl)
        return x

    pred = conv_pip(name('conv0'), x)
    for layer in range(5):
        pred_branch = tf.concat(3, [pred,x], name=name('concate%d'%layer))
        pred += conv_pip(name('conv%d'%(layer+1)), pred_branch)

    pred = tf.sigmoid(5*pred, name=name('control_tanh'))

    # another formula of y*logy
    loss = -tf.reduce_sum(tf.mul(pred, y), reduction_indices=[1,2,3])
    pred = tf.mul(pred, self_ability)
    return Model([self_pos, self_ability, enemy_pos], input_label, loss, pred) 
Example #21
Source File: filters.py    From exposure with MIT License 5 votes vote down vote up
def filter_param_regressor(self, features):
    return tf.sigmoid(features) 
Example #22
Source File: filters.py    From exposure with MIT License 5 votes vote down vote up
def filter_param_regressor(self, features):
    return tf.sigmoid(features) 
Example #23
Source File: filters.py    From exposure with MIT License 5 votes vote down vote up
def filter_param_regressor(self, features):
    return tf.sigmoid(features) 
Example #24
Source File: filters.py    From exposure with MIT License 5 votes vote down vote up
def get_mask(self, img, mask_parameters):
    with tf.name_scope(name='mask'):
      # Five parameters for one filter
      filter_input_range = 5
      assert mask_parameters.shape[1] == self.get_num_mask_parameters()
      mask_parameters = tanh_range(
          l=-filter_input_range, r=filter_input_range,
          initial=0)(mask_parameters)
      size = list(map(int, img.shape[1:3]))
      grid = np.zeros(shape=[1] + size + [2], dtype=np.float32)

      shorter_edge = min(size[0], size[1])
      for i in range(size[0]):
        for j in range(size[1]):
          grid[0, i, j,
               0] = (i + (shorter_edge - size[0]) / 2.0) / shorter_edge - 0.5
          grid[0, i, j,
               1] = (j + (shorter_edge - size[1]) / 2.0) / shorter_edge - 0.5
      grid = tf.constant(grid)
      # (Ax)^2 + (By)^2 + C
      inp = (grid[:, :, :, 0, None] * mask_parameters[:, None, None, 0, None]) ** 2 + \
            (grid[:, :, :, 1, None] * mask_parameters[:, None, None, 1, None]) ** 2 + \
            mask_parameters[:, None, None, 2, None] - filter_input_range
      # Sharpness and inversion
      inp *= self.cfg.maximum_sharpness * mask_parameters[:, None, None, 3,
                                                          None] / filter_input_range
      mask = tf.sigmoid(inp)
      # Strength
      mask *= mask_parameters[:, None, None, 4,
                              None] / filter_input_range * 0.5 + 0.5
      if not self.use_masking():
        print('* Masking Disabled')
        mask = mask * 0 + 1
      else:
        print('* Masking Enabled')
      print('mask', mask.shape)
    return mask 
Example #25
Source File: filters.py    From exposure with MIT License 5 votes vote down vote up
def get_num_mask_parameters(self):
    return 5

  # Input: no need for tanh or sigmoid
  # Closer to 1 values are applied by filter more strongly
  # no additional TF variables inside 
Example #26
Source File: filters.py    From exposure with MIT License 5 votes vote down vote up
def filter_param_regressor(self, features):
    return tf.sigmoid(features) 
Example #27
Source File: filters.py    From exposure with MIT License 5 votes vote down vote up
def get_mask(self, img, mask_parameters):
    if not self.use_masking():
      print('* Masking Disabled')
      return tf.ones(shape=(1, 1, 1, 1), dtype=tf.float32)
    else:
      print('* Masking Enabled')
    with tf.name_scope(name='mask'):
      # Six parameters for one filter
      filter_input_range = 5
      assert mask_parameters.shape[1] == self.get_num_mask_parameters()
      mask_parameters = tanh_range(
          l=-filter_input_range, r=filter_input_range,
          initial=0)(mask_parameters)
      size = list(map(int, img.shape[1:3]))
      grid = np.zeros(shape=[1] + size + [2], dtype=np.float32)

      shorter_edge = min(size[0], size[1])
      for i in range(size[0]):
        for j in range(size[1]):
          grid[0, i, j,
               0] = (i + (shorter_edge - size[0]) / 2.0) / shorter_edge - 0.5
          grid[0, i, j,
               1] = (j + (shorter_edge - size[1]) / 2.0) / shorter_edge - 0.5
      grid = tf.constant(grid)
      # Ax + By + C * L + D
      inp = grid[:, :, :, 0, None] * mask_parameters[:, None, None, 0, None] + \
            grid[:, :, :, 1, None] * mask_parameters[:, None, None, 1, None] + \
            mask_parameters[:, None, None, 2, None] * (rgb2lum(img) - 0.5) + \
            mask_parameters[:, None, None, 3, None] * 2
      # Sharpness and inversion
      inp *= self.cfg.maximum_sharpness * mask_parameters[:, None, None, 4,
                                                          None] / filter_input_range
      mask = tf.sigmoid(inp)
      # Strength
      mask = mask * (
          mask_parameters[:, None, None, 5, None] / filter_input_range * 0.5 +
          0.5) * (1 - self.cfg.minimum_strength) + self.cfg.minimum_strength
      print('mask', mask.shape)
    return mask 
Example #28
Source File: filters.py    From exposure with MIT License 5 votes vote down vote up
def get_num_mask_parameters(self):
    return 6

  # Input: no need for tanh or sigmoid
  # Closer to 1 values are applied by filter more strongly
  # no additional TF variables inside 
Example #29
Source File: activation.py    From icme2019 with MIT License 5 votes vote down vote up
def call(self, inputs, **kwargs):

        inputs_normed = self.bn(inputs)
        # tf.layers.batch_normalization(
        # inputs, axis=self.axis, epsilon=self.epsilon, center=False, scale=False)
        x_p = tf.sigmoid(inputs_normed)
        return self.alphas * (1.0 - x_p) * inputs + x_p * inputs 
Example #30
Source File: model.py    From BetaElephant with MIT License 5 votes vote down vote up
def get_model(name):
    name = functools.partial('{}-{}'.format, name)

    self_pos = tf.placeholder(Config.dtype, Config.data_shape, name='self_pos')
    self_ability = tf.placeholder(Config.dtype, Config.data_shape, name='self_ability')
    enemy_pos = tf.placeholder(Config.dtype, Config.data_shape, name='enemy_pos')
    input_label = tf.placeholder(Config.dtype, Config.label_shape, name='input_label')

    x = tf.concat(3, [self_pos, self_ability, enemy_pos], name=name('input_concat'))
    y = input_label

    nl = tf.nn.tanh

    def conv_pip(name, x, nl):
        name = functools.partial('{}_{}'.format, name)

        x = conv2d(name('0'), x, Config.data_shape[3]*2, kernel=3, stride=1, nl=nl)
        x = conv2d(name('1'), x, Config.data_shape[3], kernel=3, stride=1, nl=nl)
        return x

    for layer in range(5):
        x_branch = conv_pip(name('conv%d'%layer), x, nl)
        x = tf.concat(3, [x,x_branch], name=name('concate%d'%layer))

    x = conv_pip(name('conv5'), x, nl=None)
    pred = tf.sigmoid(x)

    # another formula of y*logy
    loss = -tf.log(tf.reduce_sum(tf.mul(x, y), reduction_indices=[1,2,3]))
    loss += - 0.1 * tf.log(tf.reduce_sum(tf.mul(x, self_ability), reduction_indices=[1,2,3]))
    pred = tf.mul(pred, self_ability)

    return Model([self_pos, self_ability, enemy_pos], input_label, loss, pred)