Python tensorflow.reciprocal() Examples

The following are 30 code examples of tensorflow.reciprocal(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: attention.py    From glas with Apache License 2.0 6 votes vote down vote up
def _get_filter(self, data, grid, scope=None):
        """ Generate an attention filter """
        with tf.variable_scope(scope, 'filter', [data]):
            x_offset, y_offset, log_stride, log_scale, log_gamma = tf.split(
                layers.linear(data, 5, scope='parameters'), 5, axis=1)

            center = self._get_center(grid, (x_offset, y_offset), tf.exp(log_stride))

            scale = tf.expand_dims(tf.maximum(tf.exp(log_scale), self.epsilon), -1)
            filter_x = 1 + tf.square((self.data_x - center[0]) / tf.maximum(scale, self.epsilon))
            filter_y = 1 + tf.square((self.data_y - center[1]) / tf.maximum(scale, self.epsilon))

            filter_x = tf.reciprocal(tf.maximum(pi * scale * filter_x, self.epsilon))
            filter_y = tf.reciprocal(tf.maximum(pi * scale * filter_y, self.epsilon))

            return filter_x, filter_y, tf.exp(log_gamma) 
Example #2
Source File: messaging_cell_helpers.py    From mac-graph with The Unlicense 6 votes vote down vote up
def layer_normalize(tensor):
	'''Apologies if I've abused this term'''

	in_shape = tf.shape(tensor)
	axes = list(range(1, len(tensor.shape)))

	# Keep batch axis
	t = tf.reduce_sum(tensor, axis=axes )
	t += EPSILON
	t = tf.reciprocal(t)
	t = tf.check_numerics(t, "1/sum")

	tensor = tf.einsum('brc,b->brc', tensor, t)

	tensor = dynamic_assert_shape(tensor, in_shape, "layer_normalize_tensor")
	return tensor 
Example #3
Source File: messaging_cell_helpers.py    From shortest-path with The Unlicense 6 votes vote down vote up
def layer_normalize(tensor):
	'''Apologies if I've abused this term'''

	in_shape = tf.shape(tensor)
	axes = list(range(1, len(tensor.shape)))

	# Keep batch axis
	t = tf.reduce_sum(tensor, axis=axes )
	t += EPSILON
	t = tf.reciprocal(t)
	t = tf.check_numerics(t, "1/sum")

	tensor = tf.einsum('brc,b->brc', tensor, t)

	tensor = dynamic_assert_shape(tensor, in_shape, "layer_normalize_tensor")
	return tensor 
Example #4
Source File: sparse.py    From sparse_convolution with MIT License 6 votes vote down vote up
def sparse_conv(tensor,binary_mask = None,filters=32,kernel_size=3,strides=2,l2_scale=0.0):

    if binary_mask == None: #first layer has no binary mask
        b,h,w,c = tensor.get_shape()
        channels=tf.split(tensor,c,axis=3)
        #assume that if one channel has no information, all channels have no information
        binary_mask = tf.where(tf.equal(channels[0], 0), tf.zeros_like(channels[0]), tf.ones_like(channels[0])) #mask should only have the size of (B,H,W,1)

    features = tf.multiply(tensor,binary_mask)
    features = tf.layers.conv2d(features, filters=filters, kernel_size=kernel_size, strides=(strides, strides), trainable=True, use_bias=False, padding="same",kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=l2_scale))

    norm = tf.layers.conv2d(binary_mask, filters=filters,kernel_size=kernel_size,strides=(strides, strides),kernel_initializer=tf.ones_initializer(),trainable=False,use_bias=False,padding="same")
    norm = tf.where(tf.equal(norm,0),tf.zeros_like(norm),tf.reciprocal(norm))
    _,_,_,bias_size = norm.get_shape()

    b = tf.Variable(tf.constant(0.0, shape=[bias_size]),trainable=True)
    feature = tf.multiply(features,norm)+b
    mask = tf.layers.max_pooling2d(binary_mask,strides = strides,pool_size=kernel_size,padding="same")

    return feature,mask 
Example #5
Source File: modules.py    From PlaneNet with MIT License 6 votes vote down vote up
def planeDepthsModule(plane_parameters, width, height, info):
    urange = (tf.range(width, dtype=tf.float32) / (width + 1) * (info[16] + 1) - info[2]) / info[0]
    urange = tf.tile(tf.reshape(urange, [1, -1]), [height, 1])
    vrange = (tf.range(height, dtype=tf.float32) / (height + 1) * (info[17] + 1) - info[6]) / info[5]
    vrange = tf.tile(tf.reshape(vrange, [-1, 1]), [1, width])
            
    ranges = tf.stack([urange, np.ones([height, width]), -vrange], axis=2)
    ranges = tf.reshape(ranges, [-1, 3])
            
    planesD = tf.norm(plane_parameters, axis=1, keep_dims=True)
    planesD = tf.clip_by_value(planesD, 1e-5, 10)
    planesNormal = tf.div(tf.negative(plane_parameters), tf.tile(planesD, [1, 3]))

    normalXYZ = tf.matmul(ranges, planesNormal, transpose_b=True)
    normalXYZ = tf.multiply(tf.sign(normalXYZ), tf.clip_by_value(tf.abs(normalXYZ), 1e-4, 1000000))
    normalXYZ = tf.reciprocal(normalXYZ)
    plane_depths = tf.negative(normalXYZ) * tf.reshape(planesD, [-1])
    plane_depths = tf.reshape(plane_depths, [height, width, -1])

    plane_depths = tf.clip_by_value(plane_depths, 0, 10)
    
    return plane_depths 
Example #6
Source File: modules.py    From PlaneNet with MIT License 6 votes vote down vote up
def planeDepthsModule(plane_parameters, width, height, info):
    urange = (tf.range(width, dtype=tf.float32) / (width + 1) * (info[16] + 1) - info[2]) / info[0]
    urange = tf.tile(tf.reshape(urange, [1, -1]), [height, 1])
    vrange = (tf.range(height, dtype=tf.float32) / (height + 1) * (info[17] + 1) - info[6]) / info[5]
    vrange = tf.tile(tf.reshape(vrange, [-1, 1]), [1, width])
            
    ranges = tf.stack([urange, np.ones([height, width]), -vrange], axis=2)
    ranges = tf.reshape(ranges, [-1, 3])
            
    planesD = tf.norm(plane_parameters, axis=1, keep_dims=True)
    planesD = tf.clip_by_value(planesD, 1e-5, 10)
    planesNormal = tf.div(tf.negative(plane_parameters), tf.tile(planesD, [1, 3]))

    normalXYZ = tf.matmul(ranges, planesNormal, transpose_b=True)
    normalXYZ = tf.multiply(tf.sign(normalXYZ), tf.clip_by_value(tf.abs(normalXYZ), 1e-4, 1000000))
    normalXYZ = tf.reciprocal(normalXYZ)
    plane_depths = tf.negative(normalXYZ) * tf.reshape(planesD, [-1])
    plane_depths = tf.reshape(plane_depths, [height, width, -1])

    plane_depths = tf.clip_by_value(plane_depths, 0, 10)
    
    return plane_depths 
Example #7
Source File: modules.py    From PlaneNet with MIT License 6 votes vote down vote up
def planeDepthsModule(plane_parameters, width, height, info):
    urange = (tf.range(width, dtype=tf.float32) / (width + 1) * (info[16] + 1) - info[2]) / info[0]
    urange = tf.tile(tf.reshape(urange, [1, -1]), [height, 1])
    vrange = (tf.range(height, dtype=tf.float32) / (height + 1) * (info[17] + 1) - info[6]) / info[5]
    vrange = tf.tile(tf.reshape(vrange, [-1, 1]), [1, width])
            
    ranges = tf.stack([urange, np.ones([height, width]), -vrange], axis=2)
    ranges = tf.reshape(ranges, [-1, 3])
            
    planesD = tf.norm(plane_parameters, axis=1, keep_dims=True)
    planesD = tf.clip_by_value(planesD, 1e-5, 10)
    planesNormal = tf.div(tf.negative(plane_parameters), tf.tile(planesD, [1, 3]))

    normalXYZ = tf.matmul(ranges, planesNormal, transpose_b=True)
    normalXYZ = tf.multiply(tf.sign(normalXYZ), tf.clip_by_value(tf.abs(normalXYZ), 1e-4, 1000000))
    normalXYZ = tf.reciprocal(normalXYZ)
    plane_depths = tf.negative(normalXYZ) * tf.reshape(planesD, [-1])
    plane_depths = tf.reshape(plane_depths, [height, width, -1])

    plane_depths = tf.clip_by_value(plane_depths, 0, 10)
    
    return plane_depths 
Example #8
Source File: modules.py    From PlaneNet with MIT License 6 votes vote down vote up
def planeDepthsModule(plane_parameters, width, height):
    focalLength = 517.97
    urange = (tf.range(width, dtype=tf.float32) / (width + 1) - 0.5) / focalLength * 641
    urange = tf.tile(tf.reshape(urange, [1, -1]), [height, 1])
    vrange = (tf.range(height, dtype=tf.float32) / (height + 1) - 0.5) / focalLength * 481
    vrange = tf.tile(tf.reshape(vrange, [-1, 1]), [1, width])
            
    ranges = tf.stack([urange, np.ones([height, width]), -vrange], axis=2)
    ranges = tf.reshape(ranges, [-1, 3])
            
    planesD = tf.norm(plane_parameters, axis=1, keep_dims=True)
    planesD = tf.clip_by_value(planesD, 1e-5, 10)
    planesNormal = tf.div(tf.negative(plane_parameters), tf.tile(planesD, [1, 3]))

    normalXYZ = tf.matmul(ranges, planesNormal, transpose_b=True)
    normalXYZ = tf.multiply(tf.sign(normalXYZ), tf.clip_by_value(tf.abs(normalXYZ), 1e-4, 1000000))
    normalXYZ = tf.reciprocal(normalXYZ)
    plane_depths = tf.negative(normalXYZ) * tf.reshape(planesD, [-1])
    plane_depths = tf.reshape(plane_depths, [height, width, -1])

    plane_depths = tf.clip_by_value(plane_depths, 0, 10)
    
    return plane_depths 
Example #9
Source File: test_tf_wpe.py    From nara_wpe with MIT License 5 votes vote down vote up
def test_recursive_wpe(self):
        with self.test_session() as sess:
            T = 5000
            D = 2
            K = 1
            delay = 3
            Y = np.random.normal(size=(D, T)) \
                + 1j * np.random.normal(size=(D, T))
            Y = tf.convert_to_tensor(Y[None])
            power = tf.reduce_mean(tf.real(Y) ** 2 + tf.imag(Y) ** 2, axis=1)
            inv_power = tf.reciprocal(power)
            step_enhanced = tf_wpe.wpe_step(
                Y, inv_power, taps=K, delay=D)
            recursive_enhanced = tf_wpe.recursive_wpe(
                tf.transpose(Y, (2, 0, 1)),
                tf.transpose(power),
                1.,
                taps=K,
                delay=D,
                only_use_final_filters=True
            )
            recursive_enhanced = tf.transpose(recursive_enhanced, (1, 2, 0))
            recursive_enhanced, step_enhanced = sess.run(
                [recursive_enhanced, step_enhanced]
            )
        np.testing.assert_allclose(
            recursive_enhanced[..., -200:],
            step_enhanced[..., -200:],
            atol=0.01, rtol=0.2
        ) 
Example #10
Source File: matrix_structures.py    From VFF with Apache License 2.0 5 votes vote down vote up
def inv(self):
        di = tf.reciprocal(self.d)
        d_col = tf.expand_dims(self.d, 1)
        DiW = self.W / d_col
        M = tf.eye(tf.shape(self.W)[1], float_type) + tf.matmul(tf.transpose(DiW), self.W)
        L = tf.cholesky(M)
        v = tf.transpose(tf.matrix_triangular_solve(L, tf.transpose(DiW), lower=True))
        return LowRankMatNeg(di, V) 
Example #11
Source File: matrix_structures.py    From VFF with Apache License 2.0 5 votes vote down vote up
def inv(self):
        di = tf.reciprocal(self.d)
        Div = self.v * di
        M = 1. + tf.reduce_sum(Div * self.v)
        v_new = Div / tf.sqrt(M)
        return Rank1MatNeg(di, v_new) 
Example #12
Source File: matrix_structures.py    From VFF with Apache License 2.0 5 votes vote down vote up
def inv(self):
        return DiagMat(tf.reciprocal(self.d)) 
Example #13
Source File: blocks.py    From fold with Apache License 2.0 5 votes vote down vote up
def _tf_safe_reciprocal(x):
  return tf.reciprocal(x + tf.cast(tf.equal(x, 0), x.dtype)) 
Example #14
Source File: tf_wpe.py    From nara_wpe with MIT License 5 votes vote down vote up
def get_power_inverse(signal):
    """Calculates inverse power for `signal`

    Args:
        signal (tf.Tensor): Single frequency signal with shape (D, T).
        psd_context: context for power estimation
    Returns:
        tf.Tensor: Inverse power with shape (T,)

    """
    power = get_power(signal)
    eps = 1e-10 * tf.reduce_max(power)
    inverse_power = tf.reciprocal(tf.maximum(power, eps))
    return inverse_power 
Example #15
Source File: model.py    From incremental-sequence-learning with MIT License 5 votes vote down vote up
def get_mixture_coef( self, args, output ):
      # returns the tf slices containing mdn dist params
      # ie, eq 18 -> 23 of http://arxiv.org/abs/1308.0850
      z = output    

      #get the remaining parameters
      last = args.nroutputvars_raw - args.nrClassOutputVars
      
      z_eos = z[ :, 0 ]
      z_eos = tf.sigmoid( z_eos ) #eos: sigmoid, eq 18

      z_eod = z[ :, 1 ]
      z_eod = tf.sigmoid( z_eod ) #eod: sigmoid

      z_pi, z_mu1, z_mu2, z_sigma1, z_sigma2, z_corr = tf.split( z[ :, 2:last ], 6, 1 ) #eq 20: mu1, mu2: no transformation required

      # process output z's into MDN parameters

      # softmax all the pi's:
      max_pi = tf.reduce_max( z_pi, 1, keep_dims = True )
      z_pi = tf.subtract( z_pi, max_pi ) #EdJ: subtract max pi for numerical stabilization

      z_pi = tf.exp( z_pi ) #eq 19
      normalize_pi = tf.reciprocal( tf.reduce_sum( z_pi, 1, keep_dims = True ) )
      z_pi = tf.multiply( normalize_pi, z_pi ) #19

      # exponentiate the sigmas and also make corr between -1 and 1.
      z_sigma1 = tf.exp( z_sigma1 ) #eq 21
      z_sigma2 = tf.exp( z_sigma2 )
      z_corr_tanh = tf.tanh( z_corr ) #eq 22
      z_corr_tanh = .95 * z_corr_tanh #avoid -1 and 1 

      z_corr_tanh_adj = z_corr_tanh 

      return [ z_pi, z_mu1, z_mu2, z_sigma1, z_sigma2, z_corr_tanh_adj, z_eos, z_eod ] 
Example #16
Source File: losses.py    From label-reg with Apache License 2.0 5 votes vote down vote up
def cauchy_kernel1d(sigma):  # this is an approximation
    if sigma == 0:
        return 0
    else:
        tail = int(sigma*5)
        # k = tf.reciprocal(([((x/sigma)**2+1)*sigma*3.141592653589793 for x in range(-tail, tail+1)]))
        k = tf.reciprocal([((x/sigma)**2+1) for x in range(-tail, tail + 1)])
        return k / tf.reduce_sum(k) 
Example #17
Source File: losses.py    From Pixel2Mesh with Apache License 2.0 5 votes vote down vote up
def laplace_coord(pred, placeholders, block_id):
	vertex = tf.concat([pred, tf.zeros([1,3])], 0)
	indices = placeholders['lape_idx'][block_id-1][:, :8]
	weights = tf.cast(placeholders['lape_idx'][block_id-1][:,-1], tf.float32)

	weights = tf.tile(tf.reshape(tf.reciprocal(weights), [-1,1]), [1,3])
	laplace = tf.reduce_sum(tf.gather(vertex, indices), 1)
	laplace = tf.subtract(pred, tf.multiply(laplace, weights))
	return laplace 
Example #18
Source File: quasi_newton.py    From neupy with MIT License 5 votes vote down vote up
def safe_reciprocal(value, epsilon):
    """
    The same as regular function in the tensorflow accept that it ensures
    that non of the input values have magnitude smaller than epsilon.
    Otherwise small values will be capped to the epsilon.
    """
    inv_epsilon = 1. / epsilon
    return tf.clip_by_value(
        tf.reciprocal(value),
        -inv_epsilon,
        inv_epsilon
    ) 
Example #19
Source File: messaging_cell_helpers.py    From mac-graph with The Unlicense 5 votes vote down vote up
def calc_normalized_adjacency(context, node_state):
	# Aggregate via adjacency matrix with normalisation (that does not include self-edges)
	adj = tf.cast(context.features["kb_adjacency"], tf.float32)
	degree = tf.reduce_sum(adj, -1, keepdims=True)
	inv_degree = tf.reciprocal(degree)
	node_mask = tf.expand_dims(tf.sequence_mask(context.features["kb_nodes_len"], context.args["kb_node_max_len"]), -1)
	inv_degree = tf.where(node_mask, inv_degree, tf.zeros(tf.shape(inv_degree)))
	inv_degree = tf.where(tf.greater(degree, 0), inv_degree, tf.zeros(tf.shape(inv_degree)))
	inv_degree = tf.check_numerics(inv_degree, "inv_degree")
	adj_norm = inv_degree * adj
	adj_norm = tf.cast(adj_norm, node_state.dtype)
	adj_norm = tf.check_numerics(adj_norm, "adj_norm")
	node_incoming = tf.einsum('bnw,bnm->bmw', node_state, adj_norm)

	return node_incoming 
Example #20
Source File: fwgrad_tests.py    From tensorflow-forward-ad with MIT License 5 votes vote down vote up
def test_basic(self):
    with tf.Graph().as_default(), self.test_session() as sess:
      rnd = np.random.RandomState(0)
      x = self.get_random_tensor([18, 12], rnd=rnd)
      y = tf.reciprocal(x)
      self.assert_bw_fw(sess, x, y, rnd=rnd) 
Example #21
Source File: losses.py    From Pixel2MeshPlusPlus with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def laplace_coord(pred, placeholders, block_id):
    vertex = tf.concat([pred, tf.zeros([1, 3])], 0)
    indices = placeholders['lape_idx'][block_id - 1][:, :8]
    weights = tf.cast(placeholders['lape_idx'][block_id - 1][:, -1], tf.float32)

    weights = tf.tile(tf.reshape(tf.reciprocal(weights), [-1, 1]), [1, 3])
    laplace = tf.reduce_sum(tf.gather(vertex, indices), 1)
    laplace = tf.subtract(pred, tf.multiply(laplace, weights))
    return laplace 
Example #22
Source File: layers.py    From text-gan-tensorflow with MIT License 5 votes vote down vote up
def _apply_dropout_mask(tensor_shape, keep_prob=1.0, normalize=True):
    random_tensor = keep_prob + tf.random_uniform(tensor_shape, dtype=tf.float32)
    binary_mask = tf.floor(random_tensor)
    if normalize:
        binary_mask = tf.reciprocal(keep_prob) * binary_mask
    return binary_mask 
Example #23
Source File: tf_lib.py    From phillip with GNU General Public License v3.0 5 votes vote down vote up
def power(x, p):
  if p == 1:
    return x
  if p == -1:
    return tf.reciprocal(x)
  return tf.pow(x, p) 
Example #24
Source File: hc_deeplab.py    From SIGGRAPH18SSS with MIT License 5 votes vote down vote up
def lossfunction(self, tweightmat, tindicator, tembeddings):

		with tf.variable_scope('loss_computation') as scope:
			# tembeddings: #pts x 64
			sqrvals = tf.reduce_sum(tf.square(tembeddings), 1, keep_dims=True)
			# sqrvals: #pts x 1
			sqrvalsmat = tf.tile(sqrvals, [1, tf.shape(sqrvals)[0]])
			sqrvalsmat2 = tf.add(sqrvalsmat,tf.transpose(sqrvalsmat))
			distmat =  tf.add(sqrvalsmat2, tf.scalar_mul(-2.0, tf.matmul(tembeddings,  tf.transpose(tembeddings))))/64.0

			sigmamat = tf.scalar_mul(2.0, tf.reciprocal(1.0+tf.exp(distmat)))
			posnegmapping = tf.log(tf.add(tf.scalar_mul(0.5, 1.0-tindicator), tf.multiply(tindicator, sigmamat)))
			wcrossentropy = tf.multiply(tf.negative(tindicator+2.0), posnegmapping)
			lossval = tf.reduce_mean(wcrossentropy)
		return lossval 
Example #25
Source File: estimator.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def _compute_tower_grads(self, tower_loss, tower_params, use_fp16=False,
                           loss_scale=None, colocate_gradients_with_ops=True):
    """docstring."""
    if use_fp16:
      assert loss_scale
      scaled_loss = tf.multiply(
          tower_loss,
          tf.convert_to_tensor(loss_scale, dtype=tower_loss.dtype),
          name="scaling_loss")
    else:
      scaled_loss = tower_loss

    grads = tf.gradients(
        scaled_loss, tower_params,
        colocate_gradients_with_ops=colocate_gradients_with_ops)
    assert grads
    for g in grads:
      assert g.dtype == tf.float32, "grad.dtype isn't fp32: %s" % g.name
    # Downscale grads
    for var, grad in zip(tower_params, grads):
      if grad is None:
        misc_utils.print_out("%s gradient is None!" % var.name)

    if use_fp16:
      grads = [
          grad * tf.reciprocal(loss_scale) for grad in grads
      ]
    return tower_params, grads 
Example #26
Source File: estimator.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def _compute_tower_grads(self, tower_loss, tower_params, use_fp16=False,
                           loss_scale=None, colocate_gradients_with_ops=True):
    """docstring."""
    if use_fp16:
      assert loss_scale
      scaled_loss = tf.multiply(
          tower_loss,
          tf.convert_to_tensor(loss_scale, dtype=tower_loss.dtype),
          name="scaling_loss")
    else:
      scaled_loss = tower_loss

    grads = tf.gradients(
        scaled_loss, tower_params,
        colocate_gradients_with_ops=colocate_gradients_with_ops)
    assert grads
    for g in grads:
      assert g.dtype == tf.float32, "grad.dtype isn't fp32: %s" % g.name
    # Downscale grads
    for var, grad in zip(tower_params, grads):
      if grad is None:
        misc_utils.print_out("%s gradient is None!" % var.name)

    if use_fp16:
      grads = [
          grad * tf.reciprocal(loss_scale) for grad in grads
      ]
    return tower_params, grads 
Example #27
Source File: estimator.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def _compute_tower_grads(self, tower_loss, tower_params, use_fp16=False,
                           loss_scale=None, colocate_gradients_with_ops=True):
    """docstring."""
    if use_fp16:
      assert loss_scale
      scaled_loss = tf.multiply(
          tower_loss,
          tf.convert_to_tensor(loss_scale, dtype=tower_loss.dtype),
          name="scaling_loss")
    else:
      scaled_loss = tower_loss

    grads = tf.gradients(
        scaled_loss, tower_params,
        colocate_gradients_with_ops=colocate_gradients_with_ops)
    assert grads
    for g in grads:
      assert g.dtype == tf.float32, "grad.dtype isn't fp32: %s" % g.name
    # Downscale grads
    for var, grad in zip(tower_params, grads):
      if grad is None:
        misc_utils.print_out("%s gradient is None!" % var.name)

    if use_fp16:
      grads = [
          grad * tf.reciprocal(loss_scale) for grad in grads
      ]
    return tower_params, grads 
Example #28
Source File: messaging_cell.py    From shortest-path with The Unlicense 5 votes vote down vote up
def calc_normalized_adjacency(context, node_state):
	# Aggregate via adjacency matrix with normalisation (that does not include self-edges)
	adj = tf.cast(context.features["kb_adjacency"], tf.float32)
	degree = tf.reduce_sum(adj, -1, keepdims=True)
	inv_degree = tf.reciprocal(degree)
	node_mask = tf.expand_dims(tf.sequence_mask(context.features["kb_nodes_len"], context.args["kb_node_max_len"]), -1)
	inv_degree = tf.where(node_mask, inv_degree, tf.zeros(tf.shape(inv_degree)))
	inv_degree = tf.where(tf.greater(degree, 0), inv_degree, tf.zeros(tf.shape(inv_degree)))
	inv_degree = tf.check_numerics(inv_degree, "inv_degree")
	adj_norm = inv_degree * adj
	adj_norm = tf.cast(adj_norm, node_state.dtype)
	adj_norm = tf.check_numerics(adj_norm, "adj_norm")
	node_incoming = tf.einsum('bnw,bnm->bmw', node_state, adj_norm)

	return node_incoming 
Example #29
Source File: attention.py    From glas with Apache License 2.0 5 votes vote down vote up
def write(self, data):
        """ Do a filtered write given the data """
        if not self.write_grid:
            raise ValueError('Writing is not supported')

        filter_x, filter_y, gamma = self.get_filter(data, self.write_grid, scope='write/filter')

        filter_y_transpose = tf.transpose(filter_y, [0, 2, 1])
        window = layers.linear(data, reduce_prod(self.write_grid.size))
        window = tf.reshape(window, (-1, self.write_grid.size[1], self.write_grid.size[0]))
        patch = tf.matmul(filter_y_transpose, tf.matmul(window, filter_x))

        return tf.reciprocal(tf.maximum(gamma, self.epsilon)) * layers.flatten(patch) 
Example #30
Source File: deeplab_model.py    From SketchySceneColorization with MIT License 4 votes vote down vote up
def _batch_norm(self, name, x):
        """Batch normalization."""
        with tf.variable_scope(name):
            params_shape = [x.get_shape()[-1]]

            beta = tf.get_variable(
                'beta', params_shape, tf.float32,
                initializer=tf.constant_initializer(0.0, tf.float32),
                trainable=False)
            gamma = tf.get_variable(
                'gamma', params_shape, tf.float32,
                initializer=tf.constant_initializer(1.0, tf.float32),
                trainable=False)
            factor = tf.get_variable(
                'factor', 1, tf.float32,
                initializer=tf.constant_initializer(1.0, tf.float32),
                trainable=False)

            if self.bn:
                mean, variance = tf.nn.moments(x, [0, 1, 2], name='moments')

                moving_mean = tf.get_variable(
                    'mean', params_shape, tf.float32,
                    initializer=tf.constant_initializer(0.0, tf.float32),
                    trainable=False)
                moving_variance = tf.get_variable(
                    'variance', params_shape, tf.float32,
                    initializer=tf.constant_initializer(1.0, tf.float32),
                    trainable=False)

                self._extra_train_ops.append(moving_averages.assign_moving_average(
                    moving_mean, mean, 0.9))
                self._extra_train_ops.append(moving_averages.assign_moving_average(
                    moving_variance, variance, 0.9))
            else:
                mean = tf.get_variable(
                    'mean', params_shape, tf.float32,
                    initializer=tf.constant_initializer(0.0, tf.float32),
                    trainable=False)
                variance = tf.get_variable(
                    'variance', params_shape, tf.float32,
                    initializer=tf.constant_initializer(1.0, tf.float32),
                    trainable=False)

                # inv_factor = tf.reciprocal(factor)
                inv_factor = tf.div(1., factor)
                mean = tf.multiply(inv_factor, mean)
                variance = tf.multiply(inv_factor, variance)

                # tf.summary.histogram(mean.op.name, mean)
                # tf.summary.histogram(variance.op.name, variance)
            # elipson used to be 1e-5. Maybe 0.001 solves NaN problem in deeper net.
            y = tf.nn.batch_normalization(
                x, mean, variance, beta, gamma, 0.001)
            y.set_shape(x.get_shape())
            return y