Python tensorflow.select() Examples

The following are 30 code examples of tensorflow.select(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: nn.py    From DualLearning with MIT License 6 votes vote down vote up
def sample_from_discretized_mix_logistic(l,nr_mix):
    ls = int_shape(l)
    xs = ls[:-1] + [3]
    # unpack parameters
    logit_probs = l[:, :, :, :nr_mix]
    l = tf.reshape(l[:, :, :, nr_mix:], xs + [nr_mix*3])
    # sample mixture indicator from softmax
    sel = tf.one_hot(tf.argmax(logit_probs - tf.log(-tf.log(tf.random_uniform(logit_probs.get_shape(), minval=1e-5, maxval=1. - 1e-5))), 3), depth=nr_mix, dtype=tf.float32)
    sel = tf.reshape(sel, xs[:-1] + [1,nr_mix])
    # select logistic parameters
    means = tf.reduce_sum(l[:,:,:,:,:nr_mix]*sel,4)
    log_scales = tf.maximum(tf.reduce_sum(l[:,:,:,:,nr_mix:2*nr_mix]*sel,4), -7.)
    coeffs = tf.reduce_sum(tf.nn.tanh(l[:,:,:,:,2*nr_mix:3*nr_mix])*sel,4)
    # sample from logistic & clip to interval
    # we don't actually round to the nearest 8bit value when sampling
    u = tf.random_uniform(means.get_shape(), minval=1e-5, maxval=1. - 1e-5)
    x = means + tf.exp(log_scales)*(tf.log(u) - tf.log(1. - u))
    x0 = tf.minimum(tf.maximum(x[:,:,:,0], -1.), 1.)
    x1 = tf.minimum(tf.maximum(x[:,:,:,1] + coeffs[:,:,:,0]*x0, -1.), 1.)
    x2 = tf.minimum(tf.maximum(x[:,:,:,2] + coeffs[:,:,:,1]*x0 + coeffs[:,:,:,2]*x1, -1.), 1.)
    return tf.concat([tf.reshape(x0,xs[:-1]+[1]), tf.reshape(x1,xs[:-1]+[1]), tf.reshape(x2,xs[:-1]+[1])],3) 
Example #2
Source File: loss.py    From tensorflow-litterbox with Apache License 2.0 6 votes vote down vote up
def _compute_huber(predictions, labels, delta=1.0):
    predictions.get_shape().assert_is_compatible_with(labels.get_shape())
    predictions = tf.to_float(predictions)
    labels = tf.to_float(labels)
    delta = tf.to_float(delta)

    diff = predictions - labels
    diff_abs = tf.abs(diff)
    delta_fact = 0.5 * tf.square(delta)
    condition = tf.less(diff_abs, delta)
    left_opt = 0.5 * tf.square(diff)
    right_opt = delta * diff_abs - delta_fact
    losses_val = tf.select(condition, left_opt, right_opt)
    return losses_val


# Returns non-reduced tensor of unweighted losses with batch dimension matching inputs 
Example #3
Source File: nn.py    From weightnorm with MIT License 6 votes vote down vote up
def sample_from_discretized_mix_logistic(l,nr_mix):
    ls = int_shape(l)
    xs = ls[:-1] + [3]
    # unpack parameters
    logit_probs = l[:, :, :, :nr_mix]
    l = tf.reshape(l[:, :, :, nr_mix:], xs + [nr_mix*3])
    # sample mixture indicator from softmax
    sel = tf.one_hot(tf.argmax(logit_probs - tf.log(-tf.log(tf.random_uniform(logit_probs.get_shape(), minval=1e-5, maxval=1. - 1e-5))), 3), depth=nr_mix, dtype=tf.float32)
    sel = tf.reshape(sel, xs[:-1] + [1,nr_mix])
    # select logistic parameters
    means = tf.reduce_sum(l[:,:,:,:,:nr_mix]*sel,4)
    log_scales = tf.maximum(tf.reduce_sum(l[:,:,:,:,nr_mix:2*nr_mix]*sel,4), -7.)
    coeffs = tf.reduce_sum(tf.nn.tanh(l[:,:,:,:,2*nr_mix:3*nr_mix])*sel,4)
    # sample from logistic & clip to interval
    # we don't actually round to the nearest 8bit value when sampling
    u = tf.random_uniform(means.get_shape(), minval=1e-5, maxval=1. - 1e-5)
    x = means + tf.exp(log_scales)*(tf.log(u) - tf.log(1. - u))
    x0 = tf.minimum(tf.maximum(x[:,:,:,0], -1.), 1.)
    x1 = tf.minimum(tf.maximum(x[:,:,:,1] + coeffs[:,:,:,0]*x0, -1.), 1.)
    x2 = tf.minimum(tf.maximum(x[:,:,:,2] + coeffs[:,:,:,1]*x0 + coeffs[:,:,:,2]*x1, -1.), 1.)
    return tf.concat(3,[tf.reshape(x0,xs[:-1]+[1]), tf.reshape(x1,xs[:-1]+[1]), tf.reshape(x2,xs[:-1]+[1])]) 
Example #4
Source File: util.py    From keras-rl2 with MIT License 6 votes vote down vote up
def huber_loss(y_true, y_pred, clip_value):
    # Huber loss, see https://en.wikipedia.org/wiki/Huber_loss and
    # https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b
    # for details.
    assert clip_value > 0.

    x = y_true - y_pred
    if np.isinf(clip_value):
        # Spacial case for infinity since Tensorflow does have problems
        # if we compare `K.abs(x) < np.inf`.
        return .5 * K.square(x)

    condition = K.abs(x) < clip_value
    squared_loss = .5 * K.square(x)
    linear_loss = clip_value * (K.abs(x) - .5 * clip_value)
    import tensorflow as tf
    if hasattr(tf, 'select'):
        return tf.select(condition, squared_loss, linear_loss)  # condition, true, false
    else:
        return tf.where(condition, squared_loss, linear_loss)  # condition, true, false 
Example #5
Source File: utils.py    From lang2program with Apache License 2.0 6 votes vote down vote up
def broadcast(tensor, target_tensor):
    """Broadcast a tensor to match the shape of a target tensor.

    Args:
        tensor (Tensor): tensor to be tiled
        target_tensor (Tensor): tensor whose shape is to be matched
    """
    rank = lambda t: t.get_shape().ndims
    assert rank(tensor) == rank(target_tensor)  # TODO: assert that tensors have no overlapping non-unity dimensions

    orig_shape = tf.shape(tensor)
    target_shape = tf.shape(target_tensor)

    # if dim == 1, set it to target_dim
    # else, set it to 1
    tiling_factor = tf.select(tf.equal(orig_shape, 1), target_shape, tf.ones([rank(tensor)], dtype=tf.int32))
    broadcasted = tf.tile(tensor, tiling_factor)

    # Add static shape information
    broadcasted.set_shape(target_tensor.get_shape())

    return broadcasted 
Example #6
Source File: nn.py    From gradient-checkpointing with MIT License 6 votes vote down vote up
def sample_from_discretized_mix_logistic(l,nr_mix):
    ls = int_shape(l)
    xs = ls[:-1] + [3]
    # unpack parameters
    logit_probs = l[:, :, :, :nr_mix]
    l = tf.reshape(l[:, :, :, nr_mix:], xs + [nr_mix*3])
    # sample mixture indicator from softmax
    sel = tf.one_hot(tf.argmax(logit_probs - tf.log(-tf.log(tf.random_uniform(logit_probs.get_shape(), minval=1e-5, maxval=1. - 1e-5))), 3), depth=nr_mix, dtype=tf.float32)
    sel = tf.reshape(sel, xs[:-1] + [1,nr_mix])
    # select logistic parameters
    means = tf.reduce_sum(l[:,:,:,:,:nr_mix]*sel,4)
    log_scales = tf.maximum(tf.reduce_sum(l[:,:,:,:,nr_mix:2*nr_mix]*sel,4), -7.)
    coeffs = tf.reduce_sum(tf.nn.tanh(l[:,:,:,:,2*nr_mix:3*nr_mix])*sel,4)
    # sample from logistic & clip to interval
    # we don't actually round to the nearest 8bit value when sampling
    u = tf.random_uniform(means.get_shape(), minval=1e-5, maxval=1. - 1e-5)
    x = means + tf.exp(log_scales)*(tf.log(u) - tf.log(1. - u))
    x0 = tf.minimum(tf.maximum(x[:,:,:,0], -1.), 1.)
    x1 = tf.minimum(tf.maximum(x[:,:,:,1] + coeffs[:,:,:,0]*x0, -1.), 1.)
    x2 = tf.minimum(tf.maximum(x[:,:,:,2] + coeffs[:,:,:,1]*x0 + coeffs[:,:,:,2]*x1, -1.), 1.)
    return tf.concat([tf.reshape(x0,xs[:-1]+[1]), tf.reshape(x1,xs[:-1]+[1]), tf.reshape(x2,xs[:-1]+[1])],3) 
Example #7
Source File: utils.py    From lang2program with Apache License 2.0 6 votes vote down vote up
def broadcast(tensor, target_tensor):
    """Broadcast a tensor to match the shape of a target tensor.

    Args:
        tensor (Tensor): tensor to be tiled
        target_tensor (Tensor): tensor whose shape is to be matched
    """
    rank = lambda t: t.get_shape().ndims
    assert rank(tensor) == rank(target_tensor)  # TODO: assert that tensors have no overlapping non-unity dimensions

    orig_shape = tf.shape(tensor)
    target_shape = tf.shape(target_tensor)

    # if dim == 1, set it to target_dim
    # else, set it to 1
    tiling_factor = tf.select(tf.equal(orig_shape, 1), target_shape, tf.ones([rank(tensor)], dtype=tf.int32))
    broadcasted = tf.tile(tensor, tiling_factor)

    # Add static shape information
    broadcasted.set_shape(target_tensor.get_shape())

    return broadcasted 
Example #8
Source File: seq_batch.py    From lang2program with Apache License 2.0 6 votes vote down vote up
def change_pad_value(values, mask, pad_val):
    """Given a set of values and a pad mask, change the value of all pad entries.

    Args:
        values (Tensor): of shape [batch_size, seq_length, :, ..., :].
        mask (Tensor): binary float tensor of shape [batch_size, seq_length]
        pad_val (float): value to set all pad entries to

    Returns:
        Tensor: a new Tensor of same shape as values
    """
    # broadcast the mask to match shape of values
    mask = expand_dims_for_broadcast(mask, values)  # (batch_size, seq_length, 1, ..., 1)
    mask = broadcast(mask, values)
    mask = tf.cast(mask, tf.bool)  # cast to bool

    # broadcast val
    broadcast_val = pad_val * tf.ones(tf.shape(values))

    new_values = tf.select(mask, values, broadcast_val)
    return new_values 
Example #9
Source File: seq_batch.py    From lang2program with Apache License 2.0 6 votes vote down vote up
def change_pad_value(values, mask, pad_val):
    """Given a set of values and a pad mask, change the value of all pad entries.

    Args:
        values (Tensor): of shape [batch_size, seq_length, :, ..., :].
        mask (Tensor): binary float tensor of shape [batch_size, seq_length]
        pad_val (float): value to set all pad entries to

    Returns:
        Tensor: a new Tensor of same shape as values
    """
    # broadcast the mask to match shape of values
    mask = expand_dims_for_broadcast(mask, values)  # (batch_size, seq_length, 1, ..., 1)
    mask = broadcast(mask, values)
    mask = tf.cast(mask, tf.bool)  # cast to bool

    # broadcast val
    broadcast_val = pad_val * tf.ones(tf.shape(values))

    new_values = tf.select(mask, values, broadcast_val)
    return new_values 
Example #10
Source File: model.py    From Action_Recognition_Zoo with MIT License 6 votes vote down vote up
def compute_first_or_last(self, select, first=True):
    #perform first ot last operation on row select with probabilistic row selection
    answer = tf.zeros_like(select)
    running_sum = tf.zeros([self.batch_size, 1], self.data_type)
    for i in range(self.max_elements):
      if (first):
        current = tf.slice(select, [0, i], [self.batch_size, 1])
      else:
        current = tf.slice(select, [0, self.max_elements - 1 - i],
                           [self.batch_size, 1])
      curr_prob = current * (1 - running_sum)
      curr_prob = curr_prob * tf.cast(curr_prob >= 0.0, self.data_type)
      running_sum += curr_prob
      temp_ans = []
      curr_prob = tf.expand_dims(tf.reshape(curr_prob, [self.batch_size]), 0)
      for i_ans in range(self.max_elements):
        if (not (first) and i_ans == self.max_elements - 1 - i):
          temp_ans.append(curr_prob)
        elif (first and i_ans == i):
          temp_ans.append(curr_prob)
        else:
          temp_ans.append(tf.zeros_like(curr_prob))
      temp_ans = tf.transpose(tf.concat(0, temp_ans))
      answer += temp_ans
    return answer 
Example #11
Source File: keras_extensions.py    From onto-lstm with Apache License 2.0 6 votes vote down vote up
def switch(condition, then_tensor, else_tensor):
    """
    Keras' implementation of switch for tensorflow uses tf.switch which accepts only scalar conditions.
    It should use tf.select instead.
    """
    if K.backend() == 'tensorflow':
        import tensorflow as tf
        condition_shape = condition.get_shape()
        input_shape = then_tensor.get_shape()
        if condition_shape[-1] != input_shape[-1] and condition_shape[-1] == 1:
            # This means the last dim is an embedding dim. Keras does not mask this dimension. But tf wants
            # the condition and the then and else tensors to be the same shape.
            condition = K.dot(tf.cast(condition, tf.float32), tf.ones((1, input_shape[-1])))
        return tf.select(tf.cast(condition, dtype=tf.bool), then_tensor, else_tensor)
    else:
        import theano.tensor as T
        return T.switch(condition, then_tensor, else_tensor) 
Example #12
Source File: model.py    From ECO-pytorch with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def compute_first_or_last(self, select, first=True):
    #perform first ot last operation on row select with probabilistic row selection
    answer = tf.zeros_like(select)
    running_sum = tf.zeros([self.batch_size, 1], self.data_type)
    for i in range(self.max_elements):
      if (first):
        current = tf.slice(select, [0, i], [self.batch_size, 1])
      else:
        current = tf.slice(select, [0, self.max_elements - 1 - i],
                           [self.batch_size, 1])
      curr_prob = current * (1 - running_sum)
      curr_prob = curr_prob * tf.cast(curr_prob >= 0.0, self.data_type)
      running_sum += curr_prob
      temp_ans = []
      curr_prob = tf.expand_dims(tf.reshape(curr_prob, [self.batch_size]), 0)
      for i_ans in range(self.max_elements):
        if (not (first) and i_ans == self.max_elements - 1 - i):
          temp_ans.append(curr_prob)
        elif (first and i_ans == i):
          temp_ans.append(curr_prob)
        else:
          temp_ans.append(tf.zeros_like(curr_prob))
      temp_ans = tf.transpose(tf.concat(0, temp_ans))
      answer += temp_ans
    return answer 
Example #13
Source File: ternary.py    From ternarynet with Apache License 2.0 6 votes vote down vote up
def p_ternarize(x, p):

    x = tf.tanh(x)
    shape = x.get_shape()

    thre = tf.get_variable('T', trainable=False, collections=[tf.GraphKeys.VARIABLES, 'thresholds'],
            initializer=0.05)
    flat_x = tf.reshape(x, [-1])
    k = int(flat_x.get_shape().dims[0].value * (1 - p))
    topK, _ = tf.nn.top_k(tf.abs(flat_x), k)
    update_thre = thre.assign(topK[-1])
    tf.add_to_collection('update_thre_op', update_thre)

    mask = tf.zeros(shape)
    mask = tf.select((x > thre) | (x < -thre), tf.ones(shape), mask)

    with G.gradient_override_map({"Sign": "Identity", "Mul": "Add"}):
        w =  tf.sign(x) * tf.stop_gradient(mask)

    tf.histogram_summary(w.name, w)
    return w 
Example #14
Source File: ternary.py    From ternarynet with Apache License 2.0 6 votes vote down vote up
def tw_ternarize(x, thre):

    shape = x.get_shape()

    thre_x = tf.stop_gradient(tf.reduce_max(tf.abs(x)) * thre)

    w_p = tf.get_variable('Wp', collections=[tf.GraphKeys.VARIABLES, 'positives'], initializer=1.0)
    w_n = tf.get_variable('Wn', collections=[tf.GraphKeys.VARIABLES, 'negatives'], initializer=1.0)

    tf.scalar_summary(w_p.name, w_p)
    tf.scalar_summary(w_n.name, w_n)

    mask = tf.ones(shape)
    mask_p = tf.select(x > thre_x, tf.ones(shape) * w_p, mask)
    mask_np = tf.select(x < -thre_x, tf.ones(shape) * w_n, mask_p)
    mask_z = tf.select((x < thre_x) & (x > - thre_x), tf.zeros(shape), mask)

    with G.gradient_override_map({"Sign": "Identity", "Mul": "Add"}):
        w =  tf.sign(x) * tf.stop_gradient(mask_z)

    w = w * mask_np

    tf.histogram_summary(w.name, w)
    return w 
Example #15
Source File: crbm_backup.py    From Convolutional_Deep_Belief_Network with MIT License 6 votes vote down vote up
def draw_samples(self, mean_activation, method ='forward'):
    """INTENT : Draw samples from distribution of specified parameter
    ------------------------------------------------------------------------------------------------------------------------------------------
    PARAMETERS :
    mean_activation         :        parameter of the distribution to draw sampels from
    method                  :        which direction for drawing sample ie forward or backward
    ------------------------------------------------------------------------------------------------------------------------------------------
    REMARK : If FORWARD then samples for HIDDEN layer (BERNOULLI)
             If BACKWARD then samples for VISIBLE layer (BERNOULLI OR GAUSSIAN if self.gaussian_unit = True)"""

    if self.gaussian_unit and method == 'backward': 
      'In this case mean_activation is the mean of the normal distribution, variance being self.variance^2'
      mu = tf.reshape(mean_activation, [-1])
      dist = tf.contrib.distributions.MultivariateNormalDiag(mu, self.sigma)
      samples = dist.sample()
      return tf.reshape(samples,[self.batch_size,self.visible_height,self.visible_width,self.visible_channels])
    elif method == 'forward':
      height   =  self.hidden_height
      width    =  self.hidden_width
      channels =  self.filter_number  
    elif method == 'backward':
      height   =  self.visible_height
      width    =  self.visible_width
      channels =  self.visible_channels
    return tf.select(tf.random_uniform([self.batch_size,height,width,channels]) - mean_activation < 0, tf.ones([self.batch_size,height,width,channels]), tf.zeros([self.batch_size,height,width,channels])) 
Example #16
Source File: nn.py    From fast-pixel-cnn with MIT License 6 votes vote down vote up
def sample_from_discretized_mix_logistic(l,nr_mix,seed=None):
    ls = int_shape(l)
    xs = ls[:-1] + [3]
    # unpack parameters
    logit_probs = l[:, :, :, :nr_mix]
    l = tf.reshape(l[:, :, :, nr_mix:], xs + [nr_mix*3])
    # sample mixture indicator from softmax
    sel = tf.one_hot(tf.argmax(logit_probs - tf.log(-tf.log(tf.random_uniform(logit_probs.get_shape(), minval=1e-5, maxval=1. - 1e-5, seed=seed))), 3), depth=nr_mix, dtype=tf.float32)    
    sel = tf.reshape(sel, xs[:-1] + [1,nr_mix])
    # select logistic parameters
    means = tf.reduce_sum(l[:,:,:,:,:nr_mix]*sel,4)
    log_scales = tf.maximum(tf.reduce_sum(l[:,:,:,:,nr_mix:2*nr_mix]*sel,4), -7.)
    coeffs = tf.reduce_sum(tf.nn.tanh(l[:,:,:,:,2*nr_mix:3*nr_mix])*sel,4)
    # sample from logistic & clip to interval
    # we don't actually round to the nearest 8bit value when sampling
    
    u = tf.random_uniform(means.get_shape(), minval=1e-5, maxval=1. - 1e-5, seed=(seed + 1 if seed is not None else None))
    
    x = means + tf.exp(log_scales)*(tf.log(u) - tf.log(1. - u))
    x0 = tf.minimum(tf.maximum(x[:,:,:,0], -1.), 1.)
    x1 = tf.minimum(tf.maximum(x[:,:,:,1] + coeffs[:,:,:,0]*x0, -1.), 1.)
    x2 = tf.minimum(tf.maximum(x[:,:,:,2] + coeffs[:,:,:,1]*x0 + coeffs[:,:,:,2]*x1, -1.), 1.)
    return tf.concat([tf.reshape(x0,xs[:-1]+[1]), tf.reshape(x1,xs[:-1]+[1]), tf.reshape(x2,xs[:-1]+[1])], 3) 
Example #17
Source File: tensorflow_backend.py    From KerasNeuralFingerprint with MIT License 5 votes vote down vote up
def random_binomial(shape, p=0.0, dtype=_FLOATX, seed=None):
    if seed is None:
        seed = np.random.randint(10e6)
    return tf.select(tf.random_uniform(shape, dtype=dtype, seed=seed) <= p,
                     tf.ones(shape, dtype=dtype),
                     tf.zeros(shape, dtype=dtype)) 
Example #18
Source File: model.py    From ECO-pytorch with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def error_computation(self):
    #computes the error of each example in a batch
    math_error = 0.5 * tf.square(tf.sub(self.scalar_output, self.batch_answer))
    #scale math error
    math_error = math_error / self.rows
    math_error = tf.minimum(math_error, self.utility.FLAGS.max_math_error *
                            tf.ones(tf.shape(math_error), self.data_type))
    self.init_print_error = tf.select(
        self.batch_gold_select, -1 * tf.log(self.batch_lookup_answer + 1e-300 +
                                            self.invert_select_full_mask), -1 *
        tf.log(1 - self.batch_lookup_answer)) * self.select_full_mask
    print_error_1 = self.init_print_error * tf.cast(
        tf.equal(self.batch_print_answer, 0.0), self.data_type)
    print_error = tf.reduce_sum(tf.reduce_sum((print_error_1), 1), 1)
    for val in range(1, 58):
      print_error += self.compute_lookup_error(val + 0.0)
    print_error = print_error * self.utility.FLAGS.print_cost / self.num_entries
    if (self.mode == "train"):
      error = tf.select(
          tf.logical_and(
              tf.not_equal(self.batch_answer, 0.0),
              tf.not_equal(
                  tf.reduce_sum(tf.reduce_sum(self.batch_print_answer, 1), 1),
                  0.0)),
          self.soft_min(math_error, print_error),
          tf.select(
              tf.not_equal(self.batch_answer, 0.0), math_error, print_error))
    else:
      error = tf.select(
          tf.logical_and(
              tf.equal(self.scalar_output, 0.0),
              tf.equal(
                  tf.reduce_sum(tf.reduce_sum(self.batch_lookup_answer, 1), 1),
                  0.0)),
          tf.ones_like(math_error),
          tf.select(
              tf.equal(self.scalar_output, 0.0), print_error, math_error))
    return error 
Example #19
Source File: model.py    From ECO-pytorch with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def one_pass(self, select, question_embedding, hidden_vectors, hprev,
               prev_select_1, curr_pass):
    #Performs one timestep which involves selecting an operation and a column
    attention_vector = self.perform_attention(
        hprev, hidden_vectors, self.question_length,
        self.batch_question_attention_mask)  #batch_size * embedding_dims
    controller_vector = tf.nn.relu(
        tf.matmul(hprev, self.params["controller_prev"]) + tf.matmul(
            tf.concat(1, [question_embedding, attention_vector]), self.params[
                "controller"]))
    column_controller_vector = tf.nn.relu(
        tf.matmul(hprev, self.params["column_controller_prev"]) + tf.matmul(
            tf.concat(1, [question_embedding, attention_vector]), self.params[
                "column_controller"]))
    controller_vector = nn_utils.apply_dropout(
        controller_vector, self.utility.FLAGS.dropout, self.mode)
    self.operation_logits = tf.matmul(controller_vector,
                                      tf.transpose(self.params_unit))
    softmax = tf.nn.softmax(self.operation_logits)
    soft_softmax = softmax
    #compute column softmax: bs * max_columns
    weighted_op_representation = tf.transpose(
        tf.matmul(tf.transpose(self.params_unit), tf.transpose(softmax)))
    column_controller_vector = tf.nn.relu(
        tf.matmul(
            tf.concat(1, [
                column_controller_vector, weighted_op_representation
            ]), self.params["break_conditional"]))
    full_column_softmax = self.compute_column_softmax(column_controller_vector,
                                                      curr_pass)
    soft_column_softmax = full_column_softmax
    if (self.mode == "test"):
      full_column_softmax = self.make_hard_softmax(full_column_softmax)
      softmax = self.make_hard_softmax(softmax)
    output, select = self.perform_operations(softmax, full_column_softmax,
                                             select, prev_select_1, curr_pass)
    return output, select, softmax, soft_softmax, full_column_softmax, soft_column_softmax 
Example #20
Source File: model.py    From ECO-pytorch with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def compute_max_or_min(self, select, maxi=True):
    #computes the argmax and argmin of a column with probabilistic row selection
    answer = tf.zeros([
        self.batch_size, self.num_cols + self.num_word_cols, self.max_elements
    ], self.data_type)
    sum_prob = tf.zeros([self.batch_size, self.num_cols + self.num_word_cols],
                        self.data_type)
    for j in range(self.max_elements):
      if (maxi):
        curr_pos = j
      else:
        curr_pos = self.max_elements - 1 - j
      select_index = tf.slice(self.full_processed_sorted_index_column,
                              [0, 0, curr_pos], [self.batch_size, -1, 1])
      select_mask = tf.equal(
          tf.tile(
              tf.expand_dims(
                  tf.tile(
                      tf.expand_dims(tf.range(self.max_elements), 0),
                      [self.batch_size, 1]), 1),
              [1, self.num_cols + self.num_word_cols, 1]), select_index)
      curr_prob = tf.expand_dims(select, 1) * tf.cast(
          select_mask, self.data_type) * self.select_bad_number_mask
      curr_prob = curr_prob * tf.expand_dims((1 - sum_prob), 2)
      curr_prob = curr_prob * tf.expand_dims(
          tf.cast((1 - sum_prob) > 0.0, self.data_type), 2)
      answer = tf.select(select_mask, curr_prob, answer)
      sum_prob += tf.reduce_sum(curr_prob, 2)
    return answer 
Example #21
Source File: model.py    From ECO-pytorch with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def make_hard_softmax(self, softmax):
    #converts soft selection to hard selection. used at test time
    cond = tf.equal(
        softmax, tf.reshape(tf.reduce_max(softmax, 1), [self.batch_size, 1]))
    softmax = tf.select(
        cond, tf.fill(tf.shape(softmax), 1.0), tf.fill(tf.shape(softmax), 0.0))
    softmax = tf.cast(softmax, self.data_type)
    return softmax 
Example #22
Source File: relation_kd_utils.py    From BERT with Apache License 2.0 5 votes vote down vote up
def huber_loss(labels, predictions, delta=1.0):
	residual = tf.abs(predictions - labels)
	condition = tf.less(residual, delta) # < 1 is true
	small_res = 0.5 * tf.square(residual)
	large_res = delta * residual - 0.5 * tf.square(delta)
	loss = tf.cast(condition, tf.float32) * small_res + (1-tf.cast(condition, tf.float32)) * large_res
	return loss
	# return tf.select(condition, small_res, large_res) 
Example #23
Source File: symbolic_functions.py    From DDRL with Apache License 2.0 5 votes vote down vote up
def huber_loss(x, delta=1, name=None):
    if name is None:
        name = 'huber_loss'
    sqrcost = tf.square(x)
    abscost = tf.abs(x)
    return tf.reduce_sum(
            tf.select(abscost < delta,
                sqrcost * 0.5,
                abscost * delta - 0.5 * delta ** 2),
            name=name) 
Example #24
Source File: resnet_model.py    From Action_Recognition_Zoo with MIT License 5 votes vote down vote up
def _relu(self, x, leakiness=0.0):
    """Relu, with optional leaky support."""
    return tf.select(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu') 
Example #25
Source File: model.py    From Action_Recognition_Zoo with MIT License 5 votes vote down vote up
def error_computation(self):
    #computes the error of each example in a batch
    math_error = 0.5 * tf.square(tf.sub(self.scalar_output, self.batch_answer))
    #scale math error
    math_error = math_error / self.rows
    math_error = tf.minimum(math_error, self.utility.FLAGS.max_math_error *
                            tf.ones(tf.shape(math_error), self.data_type))
    self.init_print_error = tf.select(
        self.batch_gold_select, -1 * tf.log(self.batch_lookup_answer + 1e-300 +
                                            self.invert_select_full_mask), -1 *
        tf.log(1 - self.batch_lookup_answer)) * self.select_full_mask
    print_error_1 = self.init_print_error * tf.cast(
        tf.equal(self.batch_print_answer, 0.0), self.data_type)
    print_error = tf.reduce_sum(tf.reduce_sum((print_error_1), 1), 1)
    for val in range(1, 58):
      print_error += self.compute_lookup_error(val + 0.0)
    print_error = print_error * self.utility.FLAGS.print_cost / self.num_entries
    if (self.mode == "train"):
      error = tf.select(
          tf.logical_and(
              tf.not_equal(self.batch_answer, 0.0),
              tf.not_equal(
                  tf.reduce_sum(tf.reduce_sum(self.batch_print_answer, 1), 1),
                  0.0)),
          self.soft_min(math_error, print_error),
          tf.select(
              tf.not_equal(self.batch_answer, 0.0), math_error, print_error))
    else:
      error = tf.select(
          tf.logical_and(
              tf.equal(self.scalar_output, 0.0),
              tf.equal(
                  tf.reduce_sum(tf.reduce_sum(self.batch_lookup_answer, 1), 1),
                  0.0)),
          tf.ones_like(math_error),
          tf.select(
              tf.equal(self.scalar_output, 0.0), print_error, math_error))
    return error 
Example #26
Source File: model.py    From Action_Recognition_Zoo with MIT License 5 votes vote down vote up
def one_pass(self, select, question_embedding, hidden_vectors, hprev,
               prev_select_1, curr_pass):
    #Performs one timestep which involves selecting an operation and a column
    attention_vector = self.perform_attention(
        hprev, hidden_vectors, self.question_length,
        self.batch_question_attention_mask)  #batch_size * embedding_dims
    controller_vector = tf.nn.relu(
        tf.matmul(hprev, self.params["controller_prev"]) + tf.matmul(
            tf.concat(1, [question_embedding, attention_vector]), self.params[
                "controller"]))
    column_controller_vector = tf.nn.relu(
        tf.matmul(hprev, self.params["column_controller_prev"]) + tf.matmul(
            tf.concat(1, [question_embedding, attention_vector]), self.params[
                "column_controller"]))
    controller_vector = nn_utils.apply_dropout(
        controller_vector, self.utility.FLAGS.dropout, self.mode)
    self.operation_logits = tf.matmul(controller_vector,
                                      tf.transpose(self.params_unit))
    softmax = tf.nn.softmax(self.operation_logits)
    soft_softmax = softmax
    #compute column softmax: bs * max_columns
    weighted_op_representation = tf.transpose(
        tf.matmul(tf.transpose(self.params_unit), tf.transpose(softmax)))
    column_controller_vector = tf.nn.relu(
        tf.matmul(
            tf.concat(1, [
                column_controller_vector, weighted_op_representation
            ]), self.params["break_conditional"]))
    full_column_softmax = self.compute_column_softmax(column_controller_vector,
                                                      curr_pass)
    soft_column_softmax = full_column_softmax
    if (self.mode == "test"):
      full_column_softmax = self.make_hard_softmax(full_column_softmax)
      softmax = self.make_hard_softmax(softmax)
    output, select = self.perform_operations(softmax, full_column_softmax,
                                             select, prev_select_1, curr_pass)
    return output, select, softmax, soft_softmax, full_column_softmax, soft_column_softmax 
Example #27
Source File: cwise_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _compare(self, c, x, y, use_gpu):
    np_ans = np.where(c, x, y)
    with self.test_session(use_gpu=use_gpu):
      out = tf.select(c, x, y)
      tf_ans = out.eval()
    self.assertAllEqual(np_ans, tf_ans)
    self.assertShapeEqual(np_ans, out) 
Example #28
Source File: model.py    From Action_Recognition_Zoo with MIT License 5 votes vote down vote up
def make_hard_softmax(self, softmax):
    #converts soft selection to hard selection. used at test time
    cond = tf.equal(
        softmax, tf.reshape(tf.reduce_max(softmax, 1), [self.batch_size, 1]))
    softmax = tf.select(
        cond, tf.fill(tf.shape(softmax), 1.0), tf.fill(tf.shape(softmax), 0.0))
    softmax = tf.cast(softmax, self.data_type)
    return softmax 
Example #29
Source File: tensorflow_grad_inverter.py    From ddpg-aigym with MIT License 5 votes vote down vote up
def __init__(self, action_bounds):

        self.sess = tf.InteractiveSession()       
        
        self.action_size = len(action_bounds[0])
        
        self.action_input = tf.placeholder(tf.float32, [None, self.action_size])
        self.pmax = tf.constant(action_bounds[0], dtype = tf.float32)
        self.pmin = tf.constant(action_bounds[1], dtype = tf.float32)
        self.prange = tf.constant([x - y for x, y in zip(action_bounds[0],action_bounds[1])], dtype = tf.float32)
        self.pdiff_max = tf.div(-self.action_input+self.pmax, self.prange)
        self.pdiff_min = tf.div(self.action_input - self.pmin, self.prange)
        self.zeros_act_grad_filter = tf.zeros([self.action_size])
        self.act_grad = tf.placeholder(tf.float32, [None, self.action_size])
        self.grad_inverter = tf.select(tf.greater(self.act_grad, self.zeros_act_grad_filter), tf.mul(self.act_grad, self.pdiff_max), tf.mul(self.act_grad, self.pdiff_min)) 
Example #30
Source File: util.py    From keras-rl with MIT License 5 votes vote down vote up
def huber_loss(y_true, y_pred, clip_value):
    # Huber loss, see https://en.wikipedia.org/wiki/Huber_loss and
    # https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b
    # for details.
    assert clip_value > 0.

    x = y_true - y_pred
    if np.isinf(clip_value):
        # Spacial case for infinity since Tensorflow does have problems
        # if we compare `K.abs(x) < np.inf`.
        return .5 * K.square(x)

    condition = K.abs(x) < clip_value
    squared_loss = .5 * K.square(x)
    linear_loss = clip_value * (K.abs(x) - .5 * clip_value)
    if K.backend() == 'tensorflow':
        import tensorflow as tf
        if hasattr(tf, 'select'):
            return tf.select(condition, squared_loss, linear_loss)  # condition, true, false
        else:
            return tf.where(condition, squared_loss, linear_loss)  # condition, true, false
    elif K.backend() == 'theano':
        from theano import tensor as T
        return T.switch(condition, squared_loss, linear_loss)
    else:
        raise RuntimeError('Unknown backend "{}".'.format(K.backend()))