Python tensorflow.mul() Examples

The following are 30 code examples of tensorflow.mul(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: cost.py    From LapSRN-tensorflow with Apache License 2.0 6 votes vote down vote up
def cosine_similarity(v1, v2):
    """Cosine similarity [-1, 1], `wiki <https://en.wikipedia.org/wiki/Cosine_similarity>`_.

    Parameters
    -----------
    v1, v2 : tensor of [batch_size, n_feature], with the same number of features.

    Returns
    -----------
    a tensor of [batch_size, ]
    """
    try: ## TF1.0
        cost = tf.reduce_sum(tf.multiply(v1, v2), 1) / (tf.sqrt(tf.reduce_sum(tf.multiply(v1, v1), 1)) * tf.sqrt(tf.reduce_sum(tf.multiply(v2, v2), 1)))
    except: ## TF0.12
        cost = tf.reduce_sum(tf.mul(v1, v2), reduction_indices=1) / (tf.sqrt(tf.reduce_sum(tf.mul(v1, v1), reduction_indices=1)) * tf.sqrt(tf.reduce_sum(tf.mul(v2, v2), reduction_indices=1)))
    return cost


## Regularization Functions 
Example #2
Source File: gaussMMVAE_collapsed.py    From mixture_density_VAEs with MIT License 6 votes vote down vote up
def get_component_samples(self, latent_dim, batchSize):
        a_inv = tf.pow(self.kumar_a,-1)
        b_inv = tf.pow(self.kumar_b,-1)

        # compose into stick segments using pi = v \prod (1-v)
        v_means = tf.mul(self.kumar_b, beta_fn(1.+a_inv, self.kumar_b))
        components = tf.to_int32(tf.argmax(tf.concat(1, self.compose_stick_segments(v_means)), 1))
        components = tf.concat(1, [tf.expand_dims(tf.range(0,batchSize),1), tf.expand_dims(components,1)])

        # sample a z
        all_z = []
        for d in xrange(latent_dim):
            temp_z = tf.concat(1, [tf.expand_dims(self.z[k][:, d],1) for k in xrange(self.K)])
            all_z.append(tf.expand_dims(tf.gather_nd(temp_z, components),1))

        return tf.concat(1, all_z) 
Example #3
Source File: test_computations.py    From ngraph-python with Apache License 2.0 6 votes vote down vote up
def test_binary_ops_combined(self):
        # computation
        a = tf.placeholder(tf.float32, shape=(2, 3))
        b = tf.placeholder(tf.float32, shape=(2, 3))
        c = tf.add(a, b)
        d = tf.mul(c, a)
        e = tf.div(d, b)
        f = tf.sub(a, e)
        g = tf.maximum(a, f)

        # value
        a_val = np.random.rand(*tf_obj_shape(a))
        b_val = np.random.rand(*tf_obj_shape(b))

        # test
        self.run(g, tf_feed_dict={a: a_val, b: b_val}) 
Example #4
Source File: gaussMMVAE_collapsed.py    From mixture_density_VAEs with MIT License 6 votes vote down vote up
def f_prop(self):

        # init variational params
        self.mu = []
        self.sigma = []
        self.kumar_a = []
        self.kumar_b = []
        self.z = []
        x_recon_linear = []

        h1 = mlp(self.X, self.encoder_params['base'])
        
        for k in xrange(self.K):
            self.mu.append(mlp(h1, self.encoder_params['mu'][k]))
            self.sigma.append(tf.exp(mlp(h1, self.encoder_params['sigma'][k])))
            self.z.append(self.mu[-1] + tf.mul(self.sigma[-1], tf.random_normal(tf.shape(self.sigma[-1]))))
            x_recon_linear.append(mlp(self.z[-1], self.decoder_params))

        self.kumar_a = tf.exp(mlp(h1, self.encoder_params['kumar_a']))
        self.kumar_b = tf.exp(mlp(h1, self.encoder_params['kumar_b']))

        return x_recon_linear 
Example #5
Source File: cifar10.py    From ml with Apache License 2.0 6 votes vote down vote up
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    # weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var 
Example #6
Source File: cifar10.py    From ml with Apache License 2.0 6 votes vote down vote up
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    # weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var 
Example #7
Source File: qrnn.py    From tensorflow_end2end_speech_recognition with MIT License 6 votes vote down vote up
def _step(self, f, z, o):
        """
        Args:
            f:
            z:
            o:
        Returns:
            h:
        """
        with tf.variable_scope("fo-Pool"):
            # f,z,o is batch_size x size
            f = tf.sigmoid(f)
            z = tf.tanh(z)
            o = tf.sigmoid(o)
            self.c = tf.mul(f, self.c) + tf.mul(1 - f, z)
            self.h = tf.mul(o, self.c)  # h is size vector

        return self.h 
Example #8
Source File: general.py    From qrn with MIT License 6 votes vote down vote up
def variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.

    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.

    Args:
      name: name of the variable
      shape: list of ints
      stddev: standard deviation of a truncated Gaussian
      wd: add L2Loss weight decay multiplied by this float. If None, weight
          decay is not added for this Variable.

    Returns:
      Variable Tensor
    """
    var = variable_on_cpu(name, shape,
                           tf.truncated_normal_initializer(stddev=stddev))
    if wd:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var 
Example #9
Source File: gaussMMVAE_collapsed.py    From mixture_density_VAEs with MIT License 6 votes vote down vote up
def compute_kumar2beta_kld(a, b, alpha, beta):
    # precompute some terms
    ab = tf.mul(a,b)
    a_inv = tf.pow(a, -1)
    b_inv = tf.pow(b, -1)

    # compute taylor expansion for E[log (1-v)] term
    kl = tf.mul(tf.pow(1+ab,-1), beta_fn(a_inv, b))
    for idx in xrange(10):
        kl += tf.mul(tf.pow(idx+2+ab,-1), beta_fn(tf.mul(idx+2., a_inv), b))
    kl = tf.mul(tf.mul(beta-1,b), kl)

    kl += tf.mul(tf.div(a-alpha,a), -0.57721 - tf.digamma(b) - b_inv)
    # add normalization constants                                                                                                                         
    kl += tf.log(ab) + tf.log(beta_fn(alpha, beta))

    # final term                                                                                                  
    kl += tf.div(-(b-1),b)

    return kl 
Example #10
Source File: losses.py    From inception_v3 with Apache License 2.0 6 votes vote down vote up
def l1_regularizer(weight=1.0, scope=None):
  """Define a L1 regularizer.

  Args:
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.op_scope([tensor], scope, 'L1Regularizer'):
      l1_weight = tf.convert_to_tensor(weight,
                                       dtype=tensor.dtype.base_dtype,
                                       name='weight')
      return tf.mul(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value')
  return regularizer 
Example #11
Source File: losses.py    From inception_v3 with Apache License 2.0 6 votes vote down vote up
def l2_regularizer(weight=1.0, scope=None):
  """Define a L2 regularizer.

  Args:
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.op_scope([tensor], scope, 'L2Regularizer'):
      l2_weight = tf.convert_to_tensor(weight,
                                       dtype=tensor.dtype.base_dtype,
                                       name='weight')
      return tf.mul(l2_weight, tf.nn.l2_loss(tensor), name='value')
  return regularizer 
Example #12
Source File: losses.py    From inception_v3 with Apache License 2.0 6 votes vote down vote up
def l1_l2_regularizer(weight_l1=1.0, weight_l2=1.0, scope=None):
  """Define a L1L2 regularizer.

  Args:
    weight_l1: scale the L1 loss by this factor.
    weight_l2: scale the L2 loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    a regularizer function.
  """
  def regularizer(tensor):
    with tf.op_scope([tensor], scope, 'L1L2Regularizer'):
      weight_l1_t = tf.convert_to_tensor(weight_l1,
                                         dtype=tensor.dtype.base_dtype,
                                         name='weight_l1')
      weight_l2_t = tf.convert_to_tensor(weight_l2,
                                         dtype=tensor.dtype.base_dtype,
                                         name='weight_l2')
      reg_l1 = tf.mul(weight_l1_t, tf.reduce_sum(tf.abs(tensor)),
                      name='value_l1')
      reg_l2 = tf.mul(weight_l2_t, tf.nn.l2_loss(tensor),
                      name='value_l2')
      return tf.add(reg_l1, reg_l2, name='value')
  return regularizer 
Example #13
Source File: LSPModels.py    From deeppose with GNU General Public License v3.0 6 votes vote down vote up
def loss(logits, labels):
    """Calculates Mean Pixel Error.
    
    Args:
      logits: Logits from inference().
      labels: Labels from distorted_inputs or inputs(). 1-D tensor
              of shape [batch_size]
    
    Returns:
      Loss tensor of type float.
    """
    
    labelValidity = tf.sign(labels, name='label_validity')
    
    minop = tf.sub(logits, labels, name='Diff_Op')
    
    absop = tf.abs(minop, name='Abs_Op')
    
    lossValues = tf.mul(labelValidity, absop, name='lossValues')
    
    loss_mean = tf.reduce_mean(lossValues, name='MeanPixelError')
    
    tf.add_to_collection('losses', loss_mean)
    
    return tf.add_n(tf.get_collection('losses'), name='total_loss'), loss_mean 
Example #14
Source File: LSPModels.py    From deeppose with GNU General Public License v3.0 6 votes vote down vote up
def _variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.
    
    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.
    
    Args:
      name: name of the variable
      shape: list of ints
      stddev: standard deviation of a truncated Gaussian
      wd: add L2Loss weight decay multiplied by this float. If None, weight
          decay is not added for this Variable.
    
    Returns:
      Variable Tensor
    """
    var = tf.Variable(tf.random_normal(shape, stddev=stddev), name=name)
    if wd:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var 
Example #15
Source File: LSPModels.py    From deeppose with GNU General Public License v3.0 6 votes vote down vote up
def _variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.
    
    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.
    
    Args:
      name: name of the variable
      shape: list of ints
      stddev: standard deviation of a truncated Gaussian
      wd: add L2Loss weight decay multiplied by this float. If None, weight
          decay is not added for this Variable.
    
    Returns:
      Variable Tensor
    """
    var = tf.Variable(tf.random_normal(shape, stddev=stddev), name=name)
    '''if wd:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)'''
    return var 
Example #16
Source File: dg_mnist.py    From deligan with MIT License 6 votes vote down vote up
def Minibatch_Discriminator(input, num_kernels=100, dim_per_kernel=5, init=False, name='MD'):
    num_inputs=df_dim*4
    theta = tf.get_variable(name+"/theta",[num_inputs, num_kernels, dim_per_kernel], initializer=tf.random_normal_initializer(stddev=0.05))
    log_weight_scale = tf.get_variable(name+"/lws",[num_kernels, dim_per_kernel], initializer=tf.constant_initializer(0.0))
    W = tf.mul(theta, tf.expand_dims(tf.exp(log_weight_scale)/tf.sqrt(tf.reduce_sum(tf.square(theta),0)),0))
    W = tf.reshape(W,[-1,num_kernels*dim_per_kernel])
    x = input
    x=tf.reshape(x, [batchsize,num_inputs])
    activation = tf.matmul(x, W)
    activation = tf.reshape(activation,[-1,num_kernels,dim_per_kernel])
    abs_dif = tf.mul(tf.reduce_sum(tf.abs(tf.sub(tf.expand_dims(activation,3),tf.expand_dims(tf.transpose(activation,[1,2,0]),0))),2),
                                                1-tf.expand_dims(tf.constant(np.eye(batchsize),dtype=np.float32),1))
    f = tf.reduce_sum(tf.exp(-abs_dif),2)/tf.reduce_sum(tf.exp(-abs_dif))
    print(f.get_shape())
    print(input.get_shape())
    return tf.concat(1,[x, f]) 
Example #17
Source File: losses.py    From inception_v3 with Apache License 2.0 6 votes vote down vote up
def l2_loss(tensor, weight=1.0, scope=None):
  """Define a L2Loss, useful for regularize, i.e. weight decay.

  Args:
    tensor: tensor to regularize.
    weight: an optional weight to modulate the loss.
    scope: Optional scope for op_scope.

  Returns:
    the L2 loss op.
  """
  with tf.op_scope([tensor], scope, 'L2Loss'):
    weight = tf.convert_to_tensor(weight,
                                  dtype=tensor.dtype.base_dtype,
                                  name='loss_weight')
    loss = tf.mul(weight, tf.nn.l2_loss(tensor), name='value')
    tf.add_to_collection(LOSSES_COLLECTION, loss)
    return loss 
Example #18
Source File: losses.py    From inception_v3 with Apache License 2.0 6 votes vote down vote up
def l1_loss(tensor, weight=1.0, scope=None):
  """Define a L1Loss, useful for regularize, i.e. lasso.

  Args:
    tensor: tensor to regularize.
    weight: scale the loss by this factor.
    scope: Optional scope for op_scope.

  Returns:
    the L1 loss op.
  """
  with tf.op_scope([tensor], scope, 'L1Loss'):
    weight = tf.convert_to_tensor(weight,
                                  dtype=tensor.dtype.base_dtype,
                                  name='loss_weight')
    loss = tf.mul(weight, tf.reduce_sum(tf.abs(tensor)), name='value')
    tf.add_to_collection(LOSSES_COLLECTION, loss)
    return loss 
Example #19
Source File: nnlib.py    From rec-attend-public with MIT License 6 votes vote down vote up
def weight_variable(shape,
                    initializer=None,
                    init_val=None,
                    wd=None,
                    name=None,
                    trainable=True):
  """Initialize weights.
  Args:
    shape: shape of the weights, list of int
    wd: weight decay
  """
  log = logger.get()
  if initializer is None:
    initializer = tf.truncated_normal_initializer(stddev=0.01)
  if init_val is None:
    var = tf.Variable(initializer(shape), name=name, trainable=trainable)
  else:
    var = tf.Variable(init_val, name=name, trainable=trainable)
  if wd:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var 
Example #20
Source File: gaussMMVAE_collapsed.py    From mixture_density_VAEs with MIT License 5 votes vote down vote up
def log_normal_pdf(x, mu, sigma):
    d = mu - x
    d2 = tf.mul(-1., tf.mul(d,d))
    s2 = tf.mul(2., tf.mul(sigma,sigma))
    return tf.reduce_sum(tf.div(d2,s2) - tf.log(tf.mul(sigma, 2.506628)), reduction_indices=1, keep_dims=True) 
Example #21
Source File: gaussMMVAE_collapsed.py    From mixture_density_VAEs with MIT License 5 votes vote down vote up
def log_kumar_pdf(v, a, b):
    return tf.reduce_sum(tf.mul(a-1, tf.log(v)) + tf.mul(b-1, tf.log(1-tf.pow(v,a))) + tf.log(a) + tf.log(b), reduction_indices=1, keep_dims=True) 
Example #22
Source File: gaussMMVAE_collapsed.py    From mixture_density_VAEs with MIT License 5 votes vote down vote up
def gauss_cross_entropy(mu_post, sigma_post, mu_prior, sigma_prior):
    d = (mu_post - mu_prior)
    d = tf.mul(d,d)
    return tf.reduce_sum(-tf.div(d + tf.mul(sigma_post,sigma_post),(2.*sigma_prior*sigma_prior)) - tf.log(sigma_prior*2.506628), reduction_indices=1, keep_dims=True) 
Example #23
Source File: gaussMMVAE_collapsed.py    From mixture_density_VAEs with MIT License 5 votes vote down vote up
def compose_stick_segments(self, v):

        segments = []
        self.remaining_stick = [tf.ones((tf.shape(v)[0],1))]
        for i in xrange(self.K-1):
            curr_v = tf.expand_dims(v[:,i],1)
            segments.append( tf.mul(curr_v, self.remaining_stick[-1]) )
            self.remaining_stick.append( tf.mul(1-curr_v, self.remaining_stick[-1]) )
        segments.append(self.remaining_stick[-1])

        return segments 
Example #24
Source File: siamese_network.py    From deep-siamese-text-similarity with MIT License 5 votes vote down vote up
def contrastive_loss(self, y,d,batch_size):
        tmp= y *tf.square(d)
        #tmp= tf.mul(y,tf.square(d))
        tmp2 = (1-y) *tf.square(tf.maximum((1 - d),0))
        return tf.reduce_sum(tmp +tmp2)/batch_size/2 
Example #25
Source File: gaussMMVAE_collapsed.py    From mixture_density_VAEs with MIT License 5 votes vote down vote up
def get_ELBO(self):
        a_inv = tf.pow(self.kumar_a,-1)
        b_inv = tf.pow(self.kumar_b,-1)

        # compute Kumaraswamy means
        v_means = tf.mul(self.kumar_b, beta_fn(1.+a_inv, self.kumar_b))

        # compute Kumaraswamy samples
        uni_samples = tf.random_uniform(tf.shape(v_means), minval=1e-8, maxval=1-1e-8) 
        v_samples = tf.pow(1-tf.pow(uni_samples, b_inv), a_inv)

        # compose into stick segments using pi = v \prod (1-v)
        self.pi_means = self.compose_stick_segments(v_means)
        self.pi_samples = self.compose_stick_segments(v_samples)
    
        # compose elbo
        elbo = tf.mul(self.pi_means[0], -compute_nll(self.X, self.x_recons_linear[0]) + gauss_cross_entropy(self.mu[0], self.sigma[0], self.prior['mu'][0], self.prior['sigma'][0]))
        for k in xrange(self.K-1):
            elbo += tf.mul(self.pi_means[k+1], -compute_nll(self.X, self.x_recons_linear[k+1]) \
                               + gauss_cross_entropy(self.mu[k+1], self.sigma[k+1], self.prior['mu'][k+1], self.prior['sigma'][k+1]))
            elbo -= compute_kumar2beta_kld(tf.expand_dims(self.kumar_a[:,k],1), tf.expand_dims(self.kumar_b[:,k],1), \
                                               self.prior['dirichlet_alpha'], (self.K-1-k)*self.prior['dirichlet_alpha'])

        elbo += mcMixtureEntropy(self.pi_samples, self.z, self.mu, self.sigma, self.K)

        return tf.reduce_mean(elbo) 
Example #26
Source File: gaussMMVAE_collapsed.py    From mixture_density_VAEs with MIT License 5 votes vote down vote up
def init_mlp(layer_sizes, std=.01, bias_init=0.):
    params = {'w':[], 'b':[]}
    for n_in, n_out in zip(layer_sizes[:-1], layer_sizes[1:]):
        params['w'].append(tf.Variable(tf.random_normal([n_in, n_out], stddev=std)))
        params['b'].append(tf.Variable(tf.mul(bias_init, tf.ones([n_out,]))))
    return params 
Example #27
Source File: processor_sdc.py    From tensorflow-litterbox with Apache License 2.0 5 votes vote down vote up
def process_example(self, tensors, mode='eval', thread_id=0):
        train = (mode == 'train')
        image, image_timestamp, camera_id = tensors[:3]

        #FIXME push single/multi image handling into image_process_sdc if we want to share random augmentations
        if self.num_input_images > 1:
            assert(len(image.get_shape()) > 0)
            print('Multi image', image.get_shape())
            split_image = tf.unpack(image)
            split_processed = []
            for i, x in enumerate(split_image):
                suffix = '%d' % i
                xp, _ = image_preprocess_sdc(
                    x, camera_id,
                    height=self.height, width=self.width, image_fmt=self.image_fmt,
                    normalize=self.standardize_input, train=train, summary_suffix=suffix, thread_id=thread_id)
                split_processed.append(xp)
            processed_image = tf.pack(split_processed)
            #FIXME need to sort out flip across mult-images
            flip_coeff = tf.constant(1.0, dtype=tf.float32)
        else:
            print('Single image')
            processed_image, flip_coeff = image_preprocess_sdc(
                image, camera_id,
                height=self.height, width=self.width, image_fmt=self.image_fmt,
                normalize=self.standardize_input, train=train, thread_id=thread_id)

        if mode != 'pred':
            steering_angle, gps_coord = tensors[-2:]
            if steering_angle is not None:
                steering_angle = tf.mul(steering_angle, flip_coeff)
                if self.standardize_labels:
                    steering_angle /= STEERING_STD
                elif self.mu_law_steering:
                    print("Encode mu-law angles")
                    steering_angle = mu_law_steering_enc(steering_angle)
            if gps_coord is not None and self.standardize_labels:
                gps_coord = (gps_coord - GPS_MEAN) / GPS_STD
            return processed_image, image_timestamp, steering_angle, gps_coord
        else:
            return processed_image, image_timestamp, tf.zeros((1,)), tf.zeros((2,)) 
Example #28
Source File: build_inception_v4.py    From tensorflow-litterbox with Apache License 2.0 5 votes vote down vote up
def _block_b_res(net, ver=2, res_scale=None, scope='BlockB', activation_fn=tf.nn.relu):
    # 17 x 17 grid

    # configure branch filter numbers
    if ver == 1:
        br1_num = 128
        br2_num = 128
        br2_inc = 0
    else:
        br1_num = 192
        br2_num = 128
        br2_inc = 32

    # default padding = SAME
    # default stride = 1
    with tf.variable_scope(scope):
        shortcut = tf.identity(net, name='Shortcut')
        if res_scale:
            shortcut = tf.mul(shortcut, res_scale)  # scale residual
        with tf.variable_scope('Br1_1x1'):
            br1 = layers.conv2d(net, br1_num, [1, 1], scope='Conv1_1x1')
        with tf.variable_scope('Br2_7x7'):
            br2 = layers.conv2d(net, br2_num, [1, 1], scope='Conv1_1x1')
            br2 = layers.conv2d(br2, br2_num + 1*br2_inc, [1, 7], scope='Conv2_1x7')
            br2 = layers.conv2d(br2, br2_num + 2*br2_inc, [7, 1], scope='Conv3_7x1')
        net = tf.concat(3, [br1, br2], name='Concat1')
        net = layers.conv2d(net, shortcut.get_shape()[-1], [1, 1], activation_fn=None, scope='Conv4_1x1')
        # 17 x 17 x 896 res-v1, 1152 res-v2. Typo in paper, 1152, not 1154
        net = activation_fn(tf.add(shortcut, net, name='Sum1'))
    return net 
Example #29
Source File: build_inception_v4.py    From tensorflow-litterbox with Apache License 2.0 5 votes vote down vote up
def _block_a_res(net, ver=2, res_scale=None, scope='BlockA', activation_fn=tf.nn.relu):
    # 35x35 grid

    # configure branch filter numbers
    br3_num = 32
    if ver == 1:
        br3_inc = 0
    else:
        br3_inc = 16

    # default padding = SAME
    # default stride = 1
    with tf.variable_scope(scope):
        shortcut = tf.identity(net, name='Shortcut')
        if res_scale:
            shortcut = tf.mul(shortcut, res_scale)  # scale residual
        with tf.variable_scope('Br1_1x1'):
            br1 = layers.conv2d(net, 32, [1, 1], scope='Conv1_1x1')
        with tf.variable_scope('Br2_3x3'):
            br2 = layers.conv2d(net, 32, [1, 1], scope='Conv1_1x1')
            br2 = layers.conv2d(br2, 32, [3, 3], scope='Conv2_3x3')
        with tf.variable_scope('Br3_3x3Dbl'):
            br3 = layers.conv2d(net, br3_num, [1, 1], scope='Conv1_1x1')
            br3 = layers.conv2d(br3, br3_num + 1*br3_inc, [3, 3], scope='Conv2_3x3')
            br3 = layers.conv2d(br3, br3_num + 2*br3_inc, [3, 3], scope='Conv3_3x3')
        net = tf.concat(3, [br1, br2, br3], name='Concat1')
        net = layers.conv2d(net, shortcut.get_shape()[-1], [1, 1], activation_fn=None, scope='Conv4_1x1')
        net = activation_fn(tf.add(shortcut, net, name='Sum1'))
        # 35 x 35 x 256 res-v1, 384 res-v2
    return net 
Example #30
Source File: build_inception_v4.py    From tensorflow-litterbox with Apache License 2.0 5 votes vote down vote up
def _block_c_res(net, ver=2, res_scale=None, scope='BlockC', activation_fn=tf.nn.relu):
    # 8 x 8 grid

    # configure branch filter numbers
    br2_num = 192
    if ver == 1:
        br2_inc = 0
    else:
        br2_inc = 32

    # default padding = SAME
    # default stride = 1
    with tf.variable_scope(scope):
        shortcut = tf.identity(net, name='Shortcut')
        if res_scale:
            shortcut = tf.mul(shortcut, res_scale)  # scale residual
        with tf.variable_scope('Br1_1x1'):
            br1 = layers.conv2d(net, 192, [1, 1], scope='Conv1_1x1')
        with tf.variable_scope('Br2_3x3'):
            br2 = layers.conv2d(net, br2_num, [1, 1], scope='Conv1_1x1')
            br2 = layers.conv2d(br2, br2_num + 1*br2_inc, [1, 3], scope='Conv2_1x3')
            br2 = layers.conv2d(br2, br2_num + 2*br2_inc, [3, 1], scope='Conv3_3x1')
        net = tf.concat(3, [br1, br2], name='Concat1')
        net = layers.conv2d(net, shortcut.get_shape()[-1], [1, 1], activation_fn=None, scope='Conv4_1x1')
        # 1792 res-1, 2144 (2048?) res-2
        net = activation_fn(tf.add(shortcut, net, name='Sum1'))
    return net