Python tensorflow.add() Examples

The following are 30 code examples of tensorflow.add(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: tf_utils.py    From reinforcement_learning with MIT License 6 votes vote down vote up
def conv2d(x, n_kernel, k_sz, stride=1):
  """convolutional layer with relu activation wrapper
  Args:
    x:          4d tensor [batch, height, width, channels]
    n_kernel:   number of kernels (output size)
    k_sz:       2d array, kernel size. e.g. [8,8]
    stride:     stride
  Returns
    a conv2d layer
  """
  W = tf.Variable(tf.random_normal([k_sz[0], k_sz[1], int(x.get_shape()[3]), n_kernel]))
  b = tf.Variable(tf.random_normal([n_kernel]))
  # - strides[0] and strides[1] must be 1
  # - padding can be 'VALID'(without padding) or 'SAME'(zero padding)
  #     - http://stackoverflow.com/questions/37674306/what-is-the-difference-between-same-and-valid-padding-in-tf-nn-max-pool-of-t
  conv = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='SAME')
  conv = tf.nn.bias_add(conv, b)  # add bias term
  # rectified linear unit: https://en.wikipedia.org/wiki/Rectifier_(neural_networks)
  return tf.nn.relu(conv) 
Example #2
Source File: dnn.py    From deep_architect with MIT License 6 votes vote down vote up
def affine(h_num_hidden, h_W_init_fn, h_b_init_fn):

    def compile_fn(di, dh):
        m = dh['num_hidden']
        shape = di['in'].get_shape().as_list()
        n = np.product(shape[1:])
        W = tf.Variable(dh['W_init_fn']([n, m]))
        b = tf.Variable(dh['b_init_fn']([m]))

        def forward_fn(di):
            In = di['in']
            if len(shape) > 2:
                In = tf.reshape(In, [-1, n])
            return {'out': tf.add(tf.matmul(In, W), b)}

        return forward_fn

    return siso_tensorflow_module(
        'Affine', compile_fn, {
            'num_hidden': h_num_hidden,
            'W_init_fn': h_W_init_fn,
            'b_init_fn': h_b_init_fn
        }) 
Example #3
Source File: cifar10.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
  var = _variable_on_cpu(
      name,
      shape,
      tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
  if wd is not None:
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var 
Example #4
Source File: component.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def add_regularizer(self, cost):
    """Adds L2 regularization for parameters which have it turned on.

    Args:
      cost: float cost before regularization.

    Returns:
      Updated cost optionally including regularization.
    """
    if self.network is None:
      return cost
    regularized_weights = self.network.get_l2_regularized_weights()
    if not regularized_weights:
      return cost
    l2_coeff = self.master.hyperparams.l2_regularization_coefficient
    if l2_coeff == 0.0:
      return cost
    tf.logging.info('[%s] Regularizing parameters: %s', self.name,
                    [w.name for w in regularized_weights])
    l2_costs = [tf.nn.l2_loss(p) for p in regularized_weights]
    return tf.add(cost, l2_coeff * tf.add_n(l2_costs), name='regularizer') 
Example #5
Source File: svdpp.py    From tf-recsys with MIT License 6 votes vote down vote up
def _create_prediction(self, mu, b_u, b_i, p_u, q_i, y_u, g_i=None):
        with tf.variable_scope('prediction'):
            if g_i is None:
                pred = tf.reduce_sum(
                    tf.multiply(tf.add(p_u, y_u), q_i),
                    axis=1)
            else:
                pred = tf.reduce_sum(
                    tf.multiply(tf.add(p_u, y_u), tf.add(q_i, g_i)),
                    axis=1)

            pred = tf.add_n([b_u, b_i, pred])

            pred = tf.add(pred, mu, name='pred')

        return pred 
Example #6
Source File: deep_cnn.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd is not None:
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var 
Example #7
Source File: svd.py    From tf-recsys with MIT License 6 votes vote down vote up
def _create_prediction(self, mu, b_u, b_i, p_u, q_i):
        """Returns the tensor of prediction.

           Note that the prediction 
            r_hat = \mu + b_u + b_i + p_u * q_i
        """
        with tf.variable_scope('prediction'):
            pred = tf.reduce_sum(
                tf.multiply(p_u, q_i),
                axis=1)

            pred = tf.add_n([b_u, b_i, pred])

            pred = tf.add(pred, mu, name='pred')

        return pred 
Example #8
Source File: svd.py    From tf-recsys with MIT License 6 votes vote down vote up
def _create_optimizer(self, loss):
        """Returns the optimizer.

           The objective function is defined as the sum of
            loss and regularizers' losses.
        """
        with tf.variable_scope('optimizer'):
            objective = tf.add(
                loss,
                tf.add_n(tf.get_collection(
                    tf.GraphKeys.REGULARIZATION_LOSSES)),
                name='objective')

            try:
                optimizer = tf.contrib.keras.optimizers.Nadam(
                ).minimize(objective, name='optimizer')
            except:
                optimizer = tf.train.AdamOptimizer().minimize(objective, name='optimizer')

        return optimizer 
Example #9
Source File: tf_utils.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def accum_val_ops(outputs, names, global_step, output_dir, metric_summary, N):
  """Processes the collected outputs to compute AP for action prediction.
  
  Args:
    outputs        : List of scalar ops to summarize.
    names          : Name of the scalar ops.
    global_step    : global_step.
    output_dir     : where to store results.
    metric_summary : summary object to add summaries to.
    N              : number of outputs to process.
  """
  outs = []
  if N >= 0:
    outputs = outputs[:N]
  for i in range(len(outputs[0])):
    scalar = np.array(map(lambda x: x[i], outputs))
    assert(scalar.ndim == 1)
    add_value_to_summary(metric_summary, names[i], np.mean(scalar),
                         tag_str='{:>27s}:  [{:s}]: %f'.format(names[i], ''))
    outs.append(np.mean(scalar))
  return outs 
Example #10
Source File: losses.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None):
  """Define a L1L2 regularizer.

  Args:
    weight_l1: scale the L1 loss by this factor.
    weight_l2: scale the L2 loss by this factor.
    scope: Optional scope for name_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.name_scope(scope, 'L1L2Regularizer', [tensor]):
      weight_l1_t = tf.convert_to_tensor(weight_l1,
                                         dtype=tensor.dtype.base_dtype,
                                         name='weight_l1')
      weight_l2_t = tf.convert_to_tensor(weight_l2,
                                         dtype=tensor.dtype.base_dtype,
                                         name='weight_l2')
      reg_l1 = tf.multiply(weight_l1_t, tf.reduce_sum(tf.abs(tensor)),
                      name='value_l1')
      reg_l2 = tf.multiply(weight_l2_t, tf.nn.l2_loss(tensor),
                      name='value_l2')
      return tf.add(reg_l1, reg_l2, name='value')
  return regularizer 
Example #11
Source File: Autoencoder.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus, optimizer = tf.train.AdamOptimizer()):
        self.n_input = n_input
        self.n_hidden = n_hidden
        self.transfer = transfer_function

        network_weights = self._initialize_weights()
        self.weights = network_weights

        # model
        self.x = tf.placeholder(tf.float32, [None, self.n_input])
        self.hidden = self.transfer(tf.add(tf.matmul(self.x, self.weights['w1']), self.weights['b1']))
        self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])

        # cost
        self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
        self.optimizer = optimizer.minimize(self.cost)

        init = tf.global_variables_initializer()
        self.sess = tf.Session()
        self.sess.run(init) 
Example #12
Source File: DenoisingAutoencoder.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimizer = tf.train.AdamOptimizer(),
                 scale = 0.1):
        self.n_input = n_input
        self.n_hidden = n_hidden
        self.transfer = transfer_function
        self.scale = tf.placeholder(tf.float32)
        self.training_scale = scale
        network_weights = self._initialize_weights()
        self.weights = network_weights

        # model
        self.x = tf.placeholder(tf.float32, [None, self.n_input])
        self.hidden = self.transfer(tf.add(tf.matmul(self.x + scale * tf.random_normal((n_input,)),
                self.weights['w1']),
                self.weights['b1']))
        self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])

        # cost
        self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
        self.optimizer = optimizer.minimize(self.cost)

        init = tf.global_variables_initializer()
        self.sess = tf.Session()
        self.sess.run(init) 
Example #13
Source File: tf_atari_wrappers.py    From fine-lm with MIT License 6 votes vote down vote up
def simulate(self, action):
    with tf.name_scope("environment/simulate"):  # Do we need this?
      initializer = (tf.zeros(self.old_shape, dtype=tf.float32),
                     tf.fill((len(self),), 0.0), tf.fill((len(self),), False))

      def not_done_step(a, _):
        reward, done = self._batch_env.simulate(action)
        with tf.control_dependencies([reward, done]):
          r0 = self._batch_env.observ + 0
          r1 = tf.add(a[1], reward)
          r2 = tf.logical_or(a[2], done)
          return (r0, r1, r2)

      simulate_ret = tf.scan(not_done_step, tf.range(self.skip),
                             initializer=initializer, parallel_iterations=1,
                             infer_shape=False)
      observations, rewards, dones = simulate_ret
      split_observations = tf.split(observations, self.skip, axis=0)
      split_observations = [tf.squeeze(o, axis=0) for o in split_observations]
      observation = tf.concat(split_observations, axis=-1)
      with tf.control_dependencies([self._observ.assign(observation)]):
        return tf.identity(rewards[-1, ...]), tf.identity(dones[-1, ...]) 
Example #14
Source File: DenoisingAutoencoder.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimizer = tf.train.AdamOptimizer(),
                 dropout_probability = 0.95):
        self.n_input = n_input
        self.n_hidden = n_hidden
        self.transfer = transfer_function
        self.dropout_probability = dropout_probability
        self.keep_prob = tf.placeholder(tf.float32)

        network_weights = self._initialize_weights()
        self.weights = network_weights

        # model
        self.x = tf.placeholder(tf.float32, [None, self.n_input])
        self.hidden = self.transfer(tf.add(tf.matmul(tf.nn.dropout(self.x, self.keep_prob), self.weights['w1']),
                                           self.weights['b1']))
        self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])

        # cost
        self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
        self.optimizer = optimizer.minimize(self.cost)

        init = tf.global_variables_initializer()
        self.sess = tf.Session()
        self.sess.run(init) 
Example #15
Source File: tf_utils.py    From reinforcement_learning with MIT License 6 votes vote down vote up
def conv2d(x, n_kernel, k_sz, stride=1):
  """convolutional layer with relu activation wrapper
  Args:
    x:          4d tensor [batch, height, width, channels]
    n_kernel:   number of kernels (output size)
    k_sz:       2d array, kernel size. e.g. [8,8]
    stride:     stride
  Returns
    a conv2d layer
  """
  W = tf.Variable(tf.random_normal([k_sz[0], k_sz[1], int(x.get_shape()[3]), n_kernel]))
  b = tf.Variable(tf.random_normal([n_kernel]))
  # - strides[0] and strides[1] must be 1
  # - padding can be 'VALID'(without padding) or 'SAME'(zero padding)
  #     - http://stackoverflow.com/questions/37674306/what-is-the-difference-between-same-and-valid-padding-in-tf-nn-max-pool-of-t
  conv = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='SAME')
  conv = tf.nn.bias_add(conv, b) # add bias term
  return tf.nn.relu(conv) # rectified linear unit: https://en.wikipedia.org/wiki/Rectifier_(neural_networks) 
Example #16
Source File: tf_utils.py    From reinforcement_learning with MIT License 6 votes vote down vote up
def fc(x, n_output, scope="fc", activation_fn=None, initializer=None):
  """fully connected layer with relu activation wrapper
  Args
    x:          2d tensor [batch, n_input]
    n_output    output size
  """
  with tf.variable_scope(scope):
    if initializer is None:
      # default initialization
      W = tf.Variable(tf.random_normal([int(x.get_shape()[1]), n_output]))
      b = tf.Variable(tf.random_normal([n_output]))
    else:
      W = tf.get_variable("W", shape=[int(x.get_shape()[1]), n_output], initializer=initializer)
      b = tf.get_variable("b", shape=[n_output],
                          initializer=tf.constant_initializer(.0, dtype=tf.float32))
    fc1 = tf.add(tf.matmul(x, W), b)
    if not activation_fn is None:
      fc1 = activation_fn(fc1)
  return fc1 
Example #17
Source File: tf_utils.py    From reinforcement_learning with MIT License 6 votes vote down vote up
def conv2d(x, n_kernel, k_sz, stride=1):
  """convolutional layer with relu activation wrapper
  Args:
    x:          4d tensor [batch, height, width, channels]
    n_kernel:   number of kernels (output size)
    k_sz:       2d array, kernel size. e.g. [8,8]
    stride:     stride
  Returns
    a conv2d layer
  """
  W = tf.Variable(tf.random_normal([k_sz[0], k_sz[1], int(x.get_shape()[3]), n_kernel]))
  b = tf.Variable(tf.random_normal([n_kernel]))
  # - strides[0] and strides[1] must be 1
  # - padding can be 'VALID'(without padding) or 'SAME'(zero padding)
  #     - http://stackoverflow.com/questions/37674306/what-is-the-difference-between-same-and-valid-padding-in-tf-nn-max-pool-of-t
  conv = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='SAME')
  conv = tf.nn.bias_add(conv, b) # add bias term
  return tf.nn.relu(conv) # rectified linear unit: https://en.wikipedia.org/wiki/Rectifier_(neural_networks) 
Example #18
Source File: tf_utils.py    From reinforcement_learning with MIT License 6 votes vote down vote up
def conv2d(x, n_kernel, k_sz, stride=1):
  """convolutional layer with relu activation wrapper
  Args:
    x:          4d tensor [batch, height, width, channels]
    n_kernel:   number of kernels (output size)
    k_sz:       2d array, kernel size. e.g. [8,8]
    stride:     stride
  Returns
    a conv2d layer
  """
  W = tf.Variable(tf.random_normal([k_sz[0], k_sz[1], int(x.get_shape()[3]), n_kernel]))
  b = tf.Variable(tf.random_normal([n_kernel]))
  # - strides[0] and strides[1] must be 1
  # - padding can be 'VALID'(without padding) or 'SAME'(zero padding)
  #     - http://stackoverflow.com/questions/37674306/what-is-the-difference-between-same-and-valid-padding-in-tf-nn-max-pool-of-t
  conv = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='SAME')
  conv = tf.nn.bias_add(conv, b) # add bias term
  return tf.nn.relu(conv) # rectified linear unit: https://en.wikipedia.org/wiki/Rectifier_(neural_networks) 
Example #19
Source File: tf_utils.py    From reinforcement_learning with MIT License 6 votes vote down vote up
def fc(x, n_output, scope="fc", activation_fn=None, initializer=None):
  """fully connected layer with relu activation wrapper
  Args
    x:          2d tensor [batch, n_input]
    n_output    output size
  """
  with tf.variable_scope(scope):
    if initializer is None:
      # default initialization
      W = tf.Variable(tf.random_normal([int(x.get_shape()[1]), n_output]))
      b = tf.Variable(tf.random_normal([n_output]))
    else:
      W = tf.get_variable("W", shape=[int(x.get_shape()[1]), n_output], initializer=initializer)
      b = tf.get_variable("b", shape=[n_output], initializer=tf.constant_initializer(.0, dtype=tf.float32))
    fc1 = tf.add(tf.matmul(x, W), b)
    if not activation_fn is None:
      fc1 = activation_fn(fc1)
  return fc1 
Example #20
Source File: pcr_model.py    From pointnet-registration-framework with MIT License 6 votes vote down vote up
def get_loss_b(self,predicted_transformation,batch_size,template_pointclouds_pl,source_pointclouds_pl):	
		with tf.variable_scope('loss') as LossEvaluation:
			predicted_position = tf.slice(predicted_transformation,[0,0],[batch_size,3])
			predicted_quat = tf.slice(predicted_transformation,[0,3],[batch_size,4])

			# with tf.variable_scope('quat_normalization') as norm:
			norm_predicted_quat = tf.reduce_sum(tf.square(predicted_quat),1)
			norm_predicted_quat = tf.sqrt(norm_predicted_quat)
			norm_predicted_quat = tf.reshape(norm_predicted_quat,(batch_size,1))
			const = tf.constant(0.0000001,shape=(batch_size,1),dtype=tf.float32)
			norm_predicted_quat = tf.add(norm_predicted_quat,const)
			predicted_norm_quat = tf.divide(predicted_quat,norm_predicted_quat)
	
			transformed_predicted_point_cloud = helper.transformation_quat_tensor(source_pointclouds_pl, predicted_norm_quat, predicted_position)

			# Use 1024 Points to find loss.
			#loss = tf_util_loss.earth_mover(template_pointclouds_pl, transformed_predicted_point_cloud)
			loss = tf_util_loss.chamfer(template_pointclouds_pl, transformed_predicted_point_cloud)
			# loss = 0
		return loss 
Example #21
Source File: ipcr_model.py    From pointnet-registration-framework with MIT License 6 votes vote down vote up
def get_loss(predicted_transformation, batch_size, template_pointclouds_pl, source_pointclouds_pl):
	with tf.variable_scope('loss') as LossEvaluation:
		predicted_position = tf.slice(predicted_transformation,[0,0],[batch_size,3])
		predicted_quat = tf.slice(predicted_transformation,[0,3],[batch_size,4])

		# with tf.variable_scope('quat_normalization') as norm:
		norm_predicted_quat = tf.reduce_sum(tf.square(predicted_quat),1)
		norm_predicted_quat = tf.sqrt(norm_predicted_quat)
		norm_predicted_quat = tf.reshape(norm_predicted_quat,(batch_size,1))
		const = tf.constant(0.0000001,shape=(batch_size,1),dtype=tf.float32)
		norm_predicted_quat = tf.add(norm_predicted_quat,const)
		predicted_norm_quat = tf.divide(predicted_quat,norm_predicted_quat)

		transformed_predicted_point_cloud = helper.transformation_quat_tensor(source_pointclouds_pl, predicted_norm_quat,predicted_position)

		#loss = tf_util_loss.earth_mover(template_pointclouds_pl, transformed_predicted_point_cloud)
		loss = tf_util_loss.chamfer(template_pointclouds_pl, transformed_predicted_point_cloud)
	return loss 
Example #22
Source File: fcn8s_tensorflow.py    From fcn8s_tensorflow with GNU General Public License v3.0 6 votes vote down vote up
def _build_optimizer(self):
        '''
        Builds the training-relevant part of the graph.
        '''

        with tf.name_scope('optimizer'):
            # Create a training step counter.
            global_step = tf.Variable(0, trainable=False, name='global_step')
            # Create placeholder for the learning rate.
            learning_rate = tf.placeholder(dtype=tf.float32, shape=[], name='learning_rate')
            # Compute the regularizatin loss.
            regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) # This is a list of the individual loss values, so we still need to sum them up.
            regularization_loss = tf.add_n(regularization_losses, name='regularization_loss') # Scalar
            # Compute the total loss.
            approximation_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.labels, logits=self.fcn8s_output), name='approximation_loss') # Scalar
            total_loss = tf.add(approximation_loss, regularization_loss, name='total_loss')
            # Compute the gradients and apply them.
            optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, name='adam_optimizer')
            train_op = optimizer.minimize(total_loss, global_step=global_step, name='train_op')

        return total_loss, train_op, learning_rate, global_step 
Example #23
Source File: expert_utils.py    From fine-lm with MIT License 6 votes vote down vote up
def add_scope(scope=None, scope_fn=None):
  """Return a decorator which add a TF name/variable scope to a function.

  Note that the function returned by the decorator accept an additional 'name'
  parameter, which can overwrite the name scope given when the function is
  created.

  Args:
    scope (str): name of the scope. If None, the function name is used.
    scope_fn (fct): Either tf.name_scope or tf.variable_scope

  Returns:
    fct: the add_scope decorator
  """
  def decorator(f):

    @functools.wraps(f)
    def decorated(*args, **kwargs):
      name = kwargs.pop("name", None)  # Python 2 hack for keyword only args
      with scope_fn(name or scope or f.__name__):
        return f(*args, **kwargs)

    return decorated

  return decorator 
Example #24
Source File: tf_atari_wrappers.py    From fine-lm with MIT License 6 votes vote down vote up
def simulate(self, action):
    with tf.name_scope("environment/simulate"):  # Do we need this?
      initializer = (tf.zeros_like(self._observ),
                     tf.fill((len(self),), 0.0), tf.fill((len(self),), False))

      def not_done_step(a, _):
        reward, done = self._batch_env.simulate(action)
        with tf.control_dependencies([reward, done]):
          # TODO(piotrmilos): possibly ignore envs with done
          r0 = tf.maximum(a[0], self._batch_env.observ)
          r1 = tf.add(a[1], reward)
          r2 = tf.logical_or(a[2], done)

          return (r0, r1, r2)

      simulate_ret = tf.scan(not_done_step, tf.range(self.skip),
                             initializer=initializer, parallel_iterations=1,
                             infer_shape=False)
      simulate_ret = [ret[-1, ...] for ret in simulate_ret]

      with tf.control_dependencies([self._observ.assign(simulate_ret[0])]):
        return tf.identity(simulate_ret[1]), tf.identity(simulate_ret[2]) 
Example #25
Source File: bisenet_v2.py    From lanenet-lane-detection with Apache License 2.0 5 votes vote down vote up
def __call__(self, *args, **kwargs):
        """

        :param args:
        :param kwargs:
        :return:
        """
        input_tensor = kwargs['input_tensor']
        name_scope = kwargs['name']
        output_channels = input_tensor.get_shape().as_list()[-1]
        if 'padding' in kwargs:
            self._padding = kwargs['padding']
        with tf.variable_scope(name_or_scope=name_scope):
            result = tf.reduce_mean(input_tensor, axis=[1, 2], keepdims=True, name='global_avg_pooling')
            result = self.layerbn(result, self._is_training, 'bn')
            result = self._conv_block(
                input_tensor=result,
                k_size=1,
                output_channels=output_channels,
                stride=1,
                name='conv_block_1',
                padding=self._padding,
                use_bias=False,
                need_activate=True
            )
            result = tf.add(result, input_tensor, name='fused_features')
            result = self.conv2d(
                inputdata=result,
                out_channel=output_channels,
                kernel_size=3,
                padding=self._padding,
                stride=1,
                use_bias=False,
                name='final_conv_block'
            )
        return result 
Example #26
Source File: functions.py    From tangent with Apache License 2.0 5 votes vote down vote up
def tfe_add_bcast(s, t):
  return tf.add(s, t) 
Example #27
Source File: functions.py    From tangent with Apache License 2.0 5 votes vote down vote up
def tfe_add(t1, t2):
  return tf.add(t1, t2) 
Example #28
Source File: functions.py    From tangent with Apache License 2.0 5 votes vote down vote up
def saxpy_call(a, b, c):
  return add(mul(a, b), c) 
Example #29
Source File: tf_mnist.py    From MachineLearningSamples-tf with MIT License 5 votes vote down vote up
def conv_net(x, weights, biases, dropout):
    # Reshape input picture
    x = tf.reshape(x, shape=[-1, 28, 28, 1])

    # Convolution Layer
    conv1 = conv2d(x, weights['wc1'], biases['bc1'])
    # Max Pooling (down-sampling)
    conv1 = maxpool2d(conv1, k=2)

    # Convolution Layer
    conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
    # Max Pooling (down-sampling)
    conv2 = maxpool2d(conv2, k=2)

    # Fully connected layer
    # Reshape conv2 output to fit fully connected layer input
    fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
    fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
    fc1 = tf.nn.relu(fc1)
    # Apply Dropout
    fc1 = tf.nn.dropout(fc1, dropout)

    # Output, class prediction
    out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
    return out

# Store layers weight & bias 
Example #30
Source File: argmax_matcher.py    From vehicle_counting_tensorflow with MIT License 5 votes vote down vote up
def _set_values_using_indicator(self, x, indicator, val):
    """Set the indicated fields of x to val.

    Args:
      x: tensor.
      indicator: boolean with same shape as x.
      val: scalar with value to set.

    Returns:
      modified tensor.
    """
    indicator = tf.cast(indicator, x.dtype)
    return tf.add(tf.multiply(x, 1 - indicator), val * indicator)