Python tensorflow.linspace() Examples

The following are 30 code examples of tensorflow.linspace(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: Dense_Transformer_Networks_3D.py    From Unet_3D with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self,input_shape,control_points_ratio):
        self.num_batch = input_shape[0]
        self.depth = input_shape[1]
        self.height = input_shape[2]
        self.width = input_shape[3]
        self.num_channels = input_shape[4]
        self.out_height = self.height
        self.out_width = self.width
        self.out_depth = self.depth
        self.X_controlP_number = int(input_shape[3] / \
                                (control_points_ratio))
        self.Y_controlP_number = int(input_shape[2] / \
                                (control_points_ratio))
        self.Z_controlP_number = int(input_shape[1] / \
                                (control_points_ratio))
        init_x = np.linspace(-5,5,self.X_controlP_number)
        init_y = np.linspace(-5,5,self.Y_controlP_number)
        init_z = np.linspace(-5,5,self.Z_controlP_number)
        x_s = np.tile(init_x, [self.Y_controlP_number*self.Z_controlP_number])
        y_s = np.tile(np.repeat(init_y,self.X_controlP_number),[self.Z_controlP_number])
        z_s = np.repeat(init_z,self.X_controlP_number*self.Y_controlP_number)        
        self.initial = np.array([x_s,y_s,z_s]) 
Example #2
Source File: ops.py    From mac-network with Apache License 2.0 6 votes vote down vote up
def locationPE(h, w, dim, outDim = -1, addBias = True):    
    x = tf.expand_dims(tf.to_float(tf.linspace(-config.locationBias, config.locationBias, w)), axis = -1)
    y = tf.expand_dims(tf.to_float(tf.linspace(-config.locationBias, config.locationBias, h)), axis = -1)
    i = tf.expand_dims(tf.to_float(tf.range(dim)), axis = 0)

    peSinX = tf.sin(x / (tf.pow(10000.0, i / dim)))
    peCosX = tf.cos(x / (tf.pow(10000.0, i / dim)))
    peSinY = tf.sin(y / (tf.pow(10000.0, i / dim)))
    peCosY = tf.cos(y / (tf.pow(10000.0, i / dim)))

    peSinX = tf.tile(tf.expand_dims(peSinX, axis = 0), [h, 1, 1])
    peCosX = tf.tile(tf.expand_dims(peCosX, axis = 0), [h, 1, 1])
    peSinY = tf.tile(tf.expand_dims(peSinY, axis = 1), [1, w, 1])
    peCosY = tf.tile(tf.expand_dims(peCosY, axis = 1), [1, w, 1]) 

    grid = tf.concat([peSinX, peCosX, peSinY, peCosY], axis = -1)
    dim *= 4
    
    if outDim > 0:
        grid = linear(grid, dim, outDim, addBias = addBias, name = "locationPE")
        dim = outDim

    return grid, dim 
Example #3
Source File: triangle_rasterizer.py    From graphics with Apache License 2.0 6 votes vote down vote up
def _perspective_correct_barycentrics(vertices_per_pixel, model_to_eye_matrix,
                                      perspective_matrix, image_size_float):
  """Creates the pixels grid and computes barycentrics."""
  # Construct the pixel grid with half-integer pixel centers.
  width = image_size_float[1]
  height = image_size_float[0]
  px = tf.linspace(0.5, width - 0.5, num=int(width))
  py = tf.linspace(0.5, height - 0.5, num=int(height))
  xv, yv = tf.meshgrid(px, py)
  pixel_position = tf.stack((xv, yv), axis=-1)

  return glm.perspective_correct_barycentrics(vertices_per_pixel,
                                              pixel_position,
                                              model_to_eye_matrix,
                                              perspective_matrix,
                                              (width, height)) 
Example #4
Source File: grid.py    From graphics with Apache License 2.0 6 votes vote down vote up
def _grid(starts, stops, nums):
  """Generates a M-D uniform axis-aligned grid.

  Warning:
    This op is not differentiable. Indeed, the gradient of tf.linspace and
    tf.meshgrid are currently not defined.

  Args:
    starts: A tensor of shape `[M]` representing the start points for each
      dimension.
    stops: A tensor of shape `[M]` representing the end points for each
      dimension.
    nums: A tensor of shape `[M]` representing the number of subdivisions for
      each dimension.

  Returns:
    A tensor of shape `[nums[0], ..., nums[M-1], M]` containing an M-D uniform
      grid.
  """
  params = [tf.unstack(tensor) for tensor in [starts, stops, nums]]
  layout = [tf.linspace(*param) for param in zip(*params)]
  return tf.stack(tf.meshgrid(*layout, indexing="ij"), axis=-1) 
Example #5
Source File: model.py    From DeepWarp with Apache License 2.0 6 votes vote down vote up
def meshgrid(self, height, width, ones_flag=None):
        # get the mesh-grid in a special area(-1,1)
        # output:
        #   @shape --> 2,H*W
        #   @explanation --> (0,:) means all x-coordinate in a mesh
        #                    (1,:) means all y-coordinate in a mesh
        with tf.variable_scope('meshgrid'):
            y_linspace = tf.linspace(-1., 1., height)
            x_linspace = tf.linspace(-1., 1., width)
            x_coordinates, y_coordinates = tf.meshgrid(x_linspace, y_linspace)
            x_coordinates = tf.reshape(x_coordinates, shape=[-1])
            y_coordinates = tf.reshape(y_coordinates, shape=[-1])
            if ones_flag is None:
                indices_grid = tf.stack([x_coordinates, y_coordinates], axis=0)
            else:
                indices_grid = tf.stack([x_coordinates, y_coordinates, tf.ones_like(x_coordinates)], axis=0)
            return indices_grid 
Example #6
Source File: problems.py    From tfdiffeq with MIT License 6 votes vote down vote up
def construct_problem(device, npts=10, ode='constant', reverse=False):
    with tf.device(device):
        f = PROBLEMS[ode]()

    t_points = tf.linspace(1., 8., npts)
    sol = f.y_exact(t_points)

    def _flip(x, dim):
        # indices = [slice(None)] * len(x.shape)
        # indices[dim] = tf.range(x.shape[dim] - 1, -1, -1, dtype=tf.int64)
        return x[::-1]  # x[list(indices)]

    if reverse:
        t_points = tf.identity(_flip(t_points, 0))
        sol = tf.identity(_flip(sol, 0))

    return f, sol[0], t_points, sol 
Example #7
Source File: geo_utils.py    From DeepMatchVO with MIT License 6 votes vote down vote up
def meshgrid(batch, height, width, is_homogeneous=True):
    """Construct a 2D meshgrid.

    Args:
      batch: batch size
      height: height of the grid
      width: width of the grid
      is_homogeneous: whether to return in homogeneous coordinates
    Returns:
      x,y grid coordinates [batch, 2 (3 if homogeneous), height, width]
    """
    x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])),
                    tf.transpose(tf.expand_dims(
                        tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
    y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
                    tf.ones(shape=tf.stack([1, width])))
    x_t = (x_t + 1.0) * 0.5 * tf.cast(width - 1, tf.float32)
    y_t = (y_t + 1.0) * 0.5 * tf.cast(height - 1, tf.float32)
    if is_homogeneous:
        ones = tf.ones_like(x_t)
        coords = tf.stack([x_t, y_t, ones], axis=0)
    else:
        coords = tf.stack([x_t, y_t], axis=0)
    coords = tf.tile(tf.expand_dims(coords, 0), [batch, 1, 1, 1])
    return coords 
Example #8
Source File: gradient_tests.py    From tfdiffeq with MIT License 6 votes vote down vote up
def problem(self):
        tf.keras.backend.set_floatx('float64')

        class Odefunc(tf.keras.Model):

            def __init__(self):
                super(Odefunc, self).__init__()
                self.A = tf.Variable([[-0.1, -2.0], [2.0, -0.1]], dtype=tf.float64)
                self.unused_module = tf.keras.layers.Dense(5, dtype=tf.float64)
                self.unused_module.build((5,))

            def call(self, t, y):
                y = tfdiffeq.cast_double(y)
                return tf.linalg.matvec(self.A, y ** 3)

        y0 = tf.convert_to_tensor([2., 0.], dtype=tf.float64)
        t_points = tf.linspace(
            tf.constant(0., dtype=tf.float64),
            tf.constant(25., dtype=tf.float64),
            10
        )
        func = Odefunc()
        return func, y0, t_points 
Example #9
Source File: spatial_transformer.py    From hfnet with MIT License 6 votes vote down vote up
def _meshgrid(height, width):
    with tf.name_scope('meshgrid'):
        # This should be equivalent to:
        #  x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
        #                         np.linspace(-1, 1, height))
        #  ones = np.ones(np.prod(x_t.shape))
        #  grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
        x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])),
                        tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
        y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
                        tf.ones(shape=tf.stack([1, width])))

        x_t_flat = tf.reshape(x_t, (1, -1))
        y_t_flat = tf.reshape(y_t, (1, -1))

        ones = tf.ones_like(x_t_flat)
        grid = tf.concat(axis=0, values=[x_t_flat, y_t_flat, ones])
        return grid 
Example #10
Source File: det_tools.py    From hfnet with MIT License 6 votes vote down vote up
def soft_argmax_2d(patches_bhwc, patch_size, do_softmax=True, com_strength=10):
    # Returns the relative soft-argmax position, in the -1 to 1 coordinate
    # system of the patch

    width = patch_size
    height = patch_size

    x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])),
                    tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
    y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
                    tf.ones(shape=tf.stack([1, width])))
    xy_grid = tf.stack([x_t, y_t], axis=-1)[None] # BHW2

    maxes_bhwc = patches_bhwc
    if do_softmax:
        exps_bhwc = tf.exp(
                        com_strength*(patches_bhwc - tf.reduce_max(
                            patches_bhwc, axis=(1, 2), keep_dims=True)))
        maxes_bhwc = exps_bhwc / (
            tf.reduce_sum(exps_bhwc, axis=(1, 2), keep_dims=True) + 1e-8)

    dxdy = tf.reduce_sum(xy_grid * maxes_bhwc, axis=(1,2))

    return dxdy 
Example #11
Source File: spatial_transformer.py    From Recursive-Cascaded-Networks with MIT License 6 votes vote down vote up
def _meshgrid(self, height, width, depth):
        x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])),
                        tf.transpose(tf.expand_dims(tf.linspace(0.0,
                                                                tf.cast(width, tf.float32)-1.0, width), 1), [1, 0]))
        y_t = tf.matmul(tf.expand_dims(tf.linspace(0.0,
                                                   tf.cast(height, tf.float32)-1.0, height), 1),
                        tf.ones(shape=tf.stack([1, width])))

        x_t = tf.tile(tf.expand_dims(x_t, 2), [1, 1, depth])
        y_t = tf.tile(tf.expand_dims(y_t, 2), [1, 1, depth])

        z_t = tf.linspace(0.0, tf.cast(depth, tf.float32)-1.0, depth)
        z_t = tf.expand_dims(tf.expand_dims(z_t, 0), 0)
        z_t = tf.tile(z_t, [height, width, 1])

        return x_t, y_t, z_t 
Example #12
Source File: tools.py    From tf-monodepth2 with MIT License 6 votes vote down vote up
def meshgrid(batch, height, width, is_homogeneous=True):
  """Construct a 2D meshgrid.
  Args:
    batch: batch size
    height: height of the grid
    width: width of the grid
    is_homogeneous: whether to return in homogeneous coordinates
  Returns:
    x,y grid coordinates [batch, 2 (3 if homogeneous), height, width]
  """
  x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])),
                  tf.transpose(tf.expand_dims(
                      tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
  y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
                  tf.ones(shape=tf.stack([1, width])))
  x_t = (x_t + 1.0) * 0.5 * tf.cast(width - 1, tf.float32)
  y_t = (y_t + 1.0) * 0.5 * tf.cast(height - 1, tf.float32)
  if is_homogeneous:
    ones = tf.ones_like(x_t)
    coords = tf.stack([x_t, y_t, ones], axis=0)
  else:
    coords = tf.stack([x_t, y_t], axis=0)
  coords = tf.tile(tf.expand_dims(coords, 0), [batch, 1, 1, 1])
  return coords 
Example #13
Source File: learner.py    From seed_rl with Apache License 2.0 6 votes vote down vote up
def get_actors_epsilon(actor_ids, num_training_actors, num_eval_actors,
                       eval_epsilon):
  """Per-actor epsilon as in Apex and R2D2.

  Args:
    actor_ids: <int32>[inference_batch_size], the actor task IDs (in range
      [0, num_training_actors+num_eval_actors)).
    num_training_actors: Number of training actors. Training actors should have
      IDs in [0, num_training_actors).
    num_eval_actors: Number of evaluation actors. Eval actors should have IDs in
      [num_training_actors, num_training_actors + num_eval_actors).
    eval_epsilon: Epsilon used for eval actors.

  Returns:
    A 1D float32 tensor with one epsilon for each input actor ID.
  """
  # <float32>[num_training_actors + num_eval_actors]
  epsilons = tf.concat(
      [tf.math.pow(0.4, tf.linspace(1., 8., num=num_training_actors)),
       tf.constant([eval_epsilon] * num_eval_actors)],
      axis=0)
  return tf.gather(epsilons, actor_ids) 
Example #14
Source File: utils.py    From GeoNet with MIT License 6 votes vote down vote up
def meshgrid(batch, height, width, is_homogeneous=True):
  """Construct a 2D meshgrid.

  Args:
    batch: batch size
    height: height of the grid
    width: width of the grid
    is_homogeneous: whether to return in homogeneous coordinates
  Returns:
    x,y grid coordinates [batch, 2 (3 if homogeneous), height, width]
  """
  x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])),
                  tf.transpose(tf.expand_dims(
                      tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
  y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
                  tf.ones(shape=tf.stack([1, width])))
  x_t = (x_t + 1.0) * 0.5 * tf.cast(width - 1, tf.float32)
  y_t = (y_t + 1.0) * 0.5 * tf.cast(height - 1, tf.float32)
  if is_homogeneous:
    ones = tf.ones_like(x_t)
    coords = tf.stack([x_t, y_t, ones], axis=0)
  else:
    coords = tf.stack([x_t, y_t], axis=0)
  coords = tf.tile(tf.expand_dims(coords, 0), [batch, 1, 1, 1])
  return coords 
Example #15
Source File: pcn_emd.py    From pcn with MIT License 6 votes vote down vote up
def create_decoder(self, features):
        with tf.variable_scope('decoder', reuse=tf.AUTO_REUSE):
            coarse = mlp(features, [1024, 1024, self.num_coarse * 3])
            coarse = tf.reshape(coarse, [-1, self.num_coarse, 3])

        with tf.variable_scope('folding', reuse=tf.AUTO_REUSE):
            x = tf.linspace(-self.grid_scale, self.grid_scale, self.grid_size)
            y = tf.linspace(-self.grid_scale, self.grid_scale, self.grid_size)
            grid = tf.meshgrid(x, y)
            grid = tf.expand_dims(tf.reshape(tf.stack(grid, axis=2), [-1, 2]), 0)
            grid_feat = tf.tile(grid, [features.shape[0], self.num_coarse, 1])

            point_feat = tf.tile(tf.expand_dims(coarse, 2), [1, 1, self.grid_size ** 2, 1])
            point_feat = tf.reshape(point_feat, [-1, self.num_fine, 3])

            global_feat = tf.tile(tf.expand_dims(features, 1), [1, self.num_fine, 1])

            feat = tf.concat([grid_feat, point_feat, global_feat], axis=2)

            center = tf.tile(tf.expand_dims(coarse, 2), [1, 1, self.grid_size ** 2, 1])
            center = tf.reshape(center, [-1, self.num_fine, 3])

            fine = mlp_conv(feat, [512, 512, 3]) + center
        return coarse, fine 
Example #16
Source File: Dense_Transformer_Networks_3D.py    From Unet_3D with GNU General Public License v3.0 6 votes vote down vote up
def _local_Networks(self,input_dim,x):
        with tf.variable_scope('_local_Networks'):
            x = tf.reshape(x,[-1,self.height*self.width*self.depth*self.num_channels])
            W_fc_loc1 = weight_variable([self.height*self.width*self.depth*self.num_channels, 20])
            b_fc_loc1 = bias_variable([20])
            W_fc_loc2 = weight_variable([20, self.X_controlP_number*self.Y_controlP_number*self.Z_controlP_number*3])
            initial = self.initial.astype('float32')
            initial = initial.flatten()
            b_fc_loc2 = tf.Variable(initial_value=initial, name='b_fc_loc2')
            h_fc_loc1 = tf.nn.tanh(tf.matmul(x, W_fc_loc1) + b_fc_loc1)
            h_fc_loc2 = tf.nn.tanh(tf.matmul(h_fc_loc1, W_fc_loc2) + b_fc_loc2)
            #temp use
            if Debug == True:
                x = np.linspace(-1.0,1.0,self.X_controlP_number)
                y = np.linspace(-1.0,1.0,self.Y_controlP_number)
                z = np.linspace(-1.0,1.0,self.Z_controlP_number)
                x_s = tf.tile(x,[self.Y_controlP_number*self.Z_controlP_number],'float64')
                y_s = tf.tile(self._repeat(y,self.X_controlP_number,'float64'),[self.Z_controlP_number])
                z_s = self._repeat(z,self.X_controlP_number*self.Y_controlP_number,'float64')
                h_fc_loc2 = tf.concat([x_s,y_s,z_s],0)
                h_fc_loc2 = tf.tile(h_fc_loc2,[self.num_batch])
                h_fc_loc2 = tf.reshape(h_fc_loc2,[self.num_batch,-1])
            return h_fc_loc2 
Example #17
Source File: pcn_cd.py    From pcn with MIT License 6 votes vote down vote up
def create_decoder(self, features):
        with tf.variable_scope('decoder', reuse=tf.AUTO_REUSE):
            coarse = mlp(features, [1024, 1024, self.num_coarse * 3])
            coarse = tf.reshape(coarse, [-1, self.num_coarse, 3])

        with tf.variable_scope('folding', reuse=tf.AUTO_REUSE):
            grid = tf.meshgrid(tf.linspace(-0.05, 0.05, self.grid_size), tf.linspace(-0.05, 0.05, self.grid_size))
            grid = tf.expand_dims(tf.reshape(tf.stack(grid, axis=2), [-1, 2]), 0)
            grid_feat = tf.tile(grid, [features.shape[0], self.num_coarse, 1])

            point_feat = tf.tile(tf.expand_dims(coarse, 2), [1, 1, self.grid_size ** 2, 1])
            point_feat = tf.reshape(point_feat, [-1, self.num_fine, 3])

            global_feat = tf.tile(tf.expand_dims(features, 1), [1, self.num_fine, 1])

            feat = tf.concat([grid_feat, point_feat, global_feat], axis=2)

            center = tf.tile(tf.expand_dims(coarse, 2), [1, 1, self.grid_size ** 2, 1])
            center = tf.reshape(center, [-1, self.num_fine, 3])

            fine = mlp_conv(feat, [512, 512, 3]) + center
        return coarse, fine 
Example #18
Source File: Motion.py    From VideoSuperResolution with MIT License 6 votes vote down vote up
def _grid_norm(width, height, bounds=(-1.0, 1.0)):
  """generate a normalized mesh grid

    Args:
        width: width of the pixels(mesh)
        height: height of the pixels
        bounds: normalized lower and upper bounds
    Return:
        This should be equivalent to:
        >>>  x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
        >>>                         np.linspace(-1, 1, height))
        >>>  ones = np.ones(np.prod(x_t.shape))
        >>>  grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
  """
  x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])),
                  tf.transpose(tf.expand_dims(
                      tf.linspace(*bounds, width), 1), [1, 0]))
  y_t = tf.matmul(tf.expand_dims(tf.linspace(*bounds, height), 1),
                  tf.ones(shape=tf.stack([1, width])))

  grid = tf.stack([x_t, y_t], axis=-1)
  return grid 
Example #19
Source File: transform.py    From neural-flow-style with MIT License 6 votes vote down vote up
def rot_mat_uniform(phi0, phi1, phi_unit, theta0, theta1, theta_unit):
    if phi_unit == 0:
        phi = [(phi1-phi0)/2]
    else:
        n_phi = np.abs(phi1-phi0) / float(phi_unit) + 1
        phi = np.linspace(phi0, phi1, n_phi, endpoint=True)

    if theta_unit == 0:
        theta = [(theta1-theta0)/2]
    else:
        n_theta = np.abs(theta1-theta0) / float(theta_unit) + 1
        theta = np.linspace(theta0, theta1, n_theta, endpoint=True)    

    views = []
    for phi_ in phi:
        for theta_ in theta:
            views.append({'phi':phi_, 'theta':theta_})

    return views 
Example #20
Source File: spatial_transformer.py    From pyslam with GNU General Public License v3.0 6 votes vote down vote up
def _meshgrid(height, width):
    with tf.variable_scope('_meshgrid'):
        # This should be equivalent to:
        #  x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
        #                         np.linspace(-1, 1, height))
        #  ones = np.ones(np.prod(x_t.shape))
        #  grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
        x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])),
                        tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
        y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
                        tf.ones(shape=tf.stack([1, width])))

        x_t_flat = tf.reshape(x_t, (1, -1))
        y_t_flat = tf.reshape(y_t, (1, -1))

        ones = tf.ones_like(x_t_flat)
        grid = tf.concat(axis=0, values=[x_t_flat, y_t_flat, ones])
        return grid 
Example #21
Source File: projector.py    From stereo-magnification with Apache License 2.0 6 votes vote down vote up
def meshgrid_abs(batch, height, width, is_homogeneous=True):
  """Construct a 2D meshgrid in the absolute coordinates.

  Args:
    batch: batch size
    height: height of the grid
    width: width of the grid
    is_homogeneous: whether to return in homogeneous coordinates
  Returns:
    x,y grid coordinates [batch, 2 (3 if homogeneous), height, width]
  """
  xs = tf.linspace(0.0, tf.cast(width-1, tf.float32), width)
  ys = tf.linspace(0.0, tf.cast(height-1, tf.float32), height)
  xs, ys = tf.meshgrid(xs, ys)

  if is_homogeneous:
    ones = tf.ones_like(xs)
    coords = tf.stack([xs, ys, ones], axis=0)
  else:
    coords = tf.stack([xs, ys], axis=0)
  coords = tf.tile(tf.expand_dims(coords, 0), [batch, 1, 1, 1])
  return coords 
Example #22
Source File: videosr_ops.py    From PFNL with MIT License 6 votes vote down vote up
def meshgrid(height, width):
    with tf.variable_scope('_meshgrid'):
        # This should be equivalent to:
        #  x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
        #                         np.linspace(-1, 1, height))
        #  ones = np.ones(np.prod(x_t.shape))
        #  grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])

        # with tf.device('/cpu:0'):
        #     x_t = tf.matmul(tf.ones(shape=tf.pack([height, 1])),
        #                     tf.transpose(tf.expand_dims(tf.linspace(0.0, -1.0 + width, width), 1), [1, 0]))
        #     y_t = tf.matmul(tf.expand_dims(tf.linspace(0.0, -1.0 + height, height), 1),
        #                     tf.ones(shape=tf.pack([1, width])))
        # x_t = tf.expand_dims(x_t, 2)
        # y_t = tf.expand_dims(y_t, 2)
        # grid = tf.concat(2, [x_t, y_t])
        with tf.device('/cpu:0'):
            grid = tf.meshgrid(list(range(height)), list(range(width)), indexing='ij')
            grid = tf.cast(tf.stack(grid, axis=2)[:, :, ::-1], tf.float32)
    return grid 
Example #23
Source File: RigidTransformation3DImputation.py    From aitom with GNU General Public License v3.0 6 votes vote down vote up
def _mgrid(self, *args, **kwargs):
        """
        create orthogonal grid
        similar to np.mgrid
        Parameters
        ----------
        args : int
            number of points on each axis
        low : float
            minimum coordinate value
        high : float
            maximum coordinate value
        Returns
        -------
        grid : tf.Tensor [len(args), args[0], ...]
            orthogonal grid
        """
        low = kwargs.pop("low", -1)
        high = kwargs.pop("high", 1)
        low = tf.to_float(low)
        high = tf.to_float(high)
        coords = (tf.linspace(low, high, arg) for arg in args)
        grid = tf.stack(tf.meshgrid(*coords, indexing='ij'))

        return grid 
Example #24
Source File: spatial_transformer.py    From lmdis-rep with Apache License 2.0 6 votes vote down vote up
def _meshgrid(height, width):
    with tf.variable_scope('_meshgrid'):
        # This should be equivalent to:
        #  x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
        #                         np.linspace(-1, 1, height))
        #  ones = np.ones(np.prod(x_t.shape))
        #  grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
        x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])),
                        tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
        y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
                        tf.ones(shape=tf.stack([1, width])))

        x_t_flat = tf.reshape(x_t, (1, -1))
        y_t_flat = tf.reshape(y_t, (1, -1))

        ones = tf.ones_like(x_t_flat)
        grid = tf.concat(axis=0, values=[x_t_flat, y_t_flat, ones])
        return grid 
Example #25
Source File: project.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def _meshgrid_abs(height, width):
  """Meshgrid in the absolute coordinates."""
  x_t = tf.matmul(
      tf.ones(shape=tf.stack([height, 1])),
      tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
  y_t = tf.matmul(
      tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
      tf.ones(shape=tf.stack([1, width])))
  x_t = (x_t + 1.0) * 0.5 * tf.cast(width - 1, tf.float32)
  y_t = (y_t + 1.0) * 0.5 * tf.cast(height - 1, tf.float32)
  x_t_flat = tf.reshape(x_t, (1, -1))
  y_t_flat = tf.reshape(y_t, (1, -1))
  ones = tf.ones_like(x_t_flat)
  grid = tf.concat([x_t_flat, y_t_flat, ones], axis=0)
  return grid 
Example #26
Source File: Dense_Transformer_Networks_3D.py    From Unet_3D with GNU General Public License v3.0 5 votes vote down vote up
def _makeT(self,cp):
        with tf.variable_scope('_makeT'):
            cp = tf.reshape(cp,(-1,3,self.X_controlP_number*self.Y_controlP_number*self.Z_controlP_number))
            cp = tf.cast(cp,'float32')       
            N_f = tf.shape(cp)[0]         
            #c_s
            x,y,z = tf.linspace(-1.,1.,self.X_controlP_number),tf.linspace(-1.,1.,self.Y_controlP_number),tf.linspace(-1.,1.,self.Z_controlP_number)
            x   = tf.tile(x,[self.Y_controlP_number*self.Z_controlP_number])
            y   = tf.tile(self._repeat(y,self.X_controlP_number,'float32'),[self.Z_controlP_number])
            z   = self._repeat(z,self.X_controlP_number*self.Y_controlP_number,'float32')
            xs,ys,zs = tf.transpose(tf.reshape(x,(-1,1))),tf.transpose(tf.reshape(y,(-1,1))),tf.transpose(tf.reshape(z,(-1,1)))
            cp_s = tf.concat([xs,ys,zs],0)
            cp_s_trans = tf.transpose(cp_s)
            # (4*4*4)*3 -> 64 * 3
            ##===Compute distance R
            xs_trans,ys_trans,zs_trans = tf.transpose(tf.stack([xs],axis=2),perm=[1,0,2]),tf.transpose(tf.stack([ys],axis=2),perm=[1,0,2]),tf.transpose(tf.stack([zs],axis=2),perm=[1,0,2])        
            xs, xs_trans = tf.meshgrid(xs,xs_trans);ys, ys_trans = tf.meshgrid(ys,ys_trans);zs, zs_trans = tf.meshgrid(zs,zs_trans)
            Rx,Ry, Rz = tf.square(tf.subtract(xs,xs_trans)),tf.square(tf.subtract(ys,ys_trans)),tf.square(tf.subtract(zs,zs_trans))
            R = tf.add_n([Rx,Ry,Rz])
            R = tf.multiply(R,tf.log(tf.clip_by_value(R,1e-10,1e+10)))
            ones = tf.ones([self.Y_controlP_number*self.X_controlP_number*self.Z_controlP_number,1],tf.float32)
            ones_trans = tf.transpose(ones)
            zeros = tf.zeros([4,4],tf.float32)
            Deltas1 = tf.concat([ones, cp_s_trans, R],1)
            Deltas2 = tf.concat([ones_trans,cp_s],0)
            Deltas2 = tf.concat([zeros,Deltas2],1)          
            Deltas = tf.concat([Deltas1,Deltas2],0)
            ##get deltas_inv
            Deltas_inv = tf.matrix_inverse(Deltas)
            Deltas_inv = tf.expand_dims(Deltas_inv,0)
            Deltas_inv = tf.reshape(Deltas_inv,[-1])
            Deltas_inv_f = tf.tile(Deltas_inv,tf.stack([N_f]))
            Deltas_inv_f = tf.reshape(Deltas_inv_f,tf.stack([N_f,self.X_controlP_number*self.Y_controlP_number*self.Z_controlP_number+4, -1]))
            cp_trans =tf.transpose(cp,perm=[0,2,1])
            zeros_f_In = tf.zeros([N_f,4,3],tf.float32)
            cp = tf.concat([cp_trans,zeros_f_In],1)
            T = tf.transpose(tf.matmul(Deltas_inv_f,cp),[0,2,1])
            return T 
Example #27
Source File: project.py    From multilabel-image-classification-tensorflow with MIT License 5 votes vote down vote up
def _meshgrid_abs(height, width):
  """Meshgrid in the absolute coordinates."""
  x_t = tf.matmul(
      tf.ones(shape=tf.stack([height, 1])),
      tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
  y_t = tf.matmul(
      tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
      tf.ones(shape=tf.stack([1, width])))
  x_t = (x_t + 1.0) * 0.5 * tf.cast(width - 1, tf.float32)
  y_t = (y_t + 1.0) * 0.5 * tf.cast(height - 1, tf.float32)
  x_t_flat = tf.reshape(x_t, (1, -1))
  y_t_flat = tf.reshape(y_t, (1, -1))
  ones = tf.ones_like(x_t_flat)
  grid = tf.concat([x_t_flat, y_t_flat, ones], axis=0)
  return grid 
Example #28
Source File: nbeats_layer.py    From Time-series-prediction with MIT License 5 votes vote down vote up
def linspace(self,backcast_length, forecast_length):
        lin_space = tf.linspace(-float(backcast_length),float(forecast_length), backcast_length+forecast_length)
        b_ls=lin_space[:backcast_length]
        f_ls=lin_space[backcast_length:]
        return b_ls, f_ls 
Example #29
Source File: spatial_transform_ops.py    From Live-feed-object-device-identification-using-Tensorflow-and-OpenCV with Apache License 2.0 5 votes vote down vote up
def _coordinate_vector_1d(start, end, size, align_endpoints):
  """Generates uniformly spaced coordinate vector.

  Args:
    start: A float tensor of shape [batch, num_boxes] indicating start values.
    end: A float tensor of shape [batch, num_boxes] indicating end values.
    size: Number of points in coordinate vector.
    align_endpoints: Whether to align first and last points exactly to
      endpoints.

  Returns:
    A 3D float tensor of shape [batch, num_boxes, size] containing grid
    coordinates.
  """
  start = tf.expand_dims(start, -1)
  end = tf.expand_dims(end, -1)
  length = end - start
  if align_endpoints:
    relative_grid_spacing = tf.linspace(0.0, 1.0, size)
    offset = 0 if size > 1 else length / 2
  else:
    relative_grid_spacing = tf.linspace(0.0, 1.0, size + 1)[:-1]
    offset = length / (2 * size)
  relative_grid_spacing = tf.reshape(relative_grid_spacing, [1, 1, size])
  relative_grid_spacing = tf.cast(relative_grid_spacing, dtype=start.dtype)
  absolute_grid = start + offset + relative_grid_spacing * length
  return absolute_grid 
Example #30
Source File: Dense_Transformer_Networks_3D.py    From Unet_3D with GNU General Public License v3.0 5 votes vote down vote up
def _meshgrid(self):
        with tf.variable_scope('_meshgrid'):
            x_use = tf.linspace(-1.0, 1.0, self.out_height)
            y_use = tf.linspace(-1.0, 1.0, self.out_width)
            z_use = tf.linspace(-1.0, 1.0, self.out_depth)
            x_t = tf.tile(x_use,[self.out_width*self.out_depth])
            y_t = tf.tile(self._repeat(y_use,self.out_height,'float32'),[self.out_depth])
            z_t = self._repeat(z_use,self.out_height*self.out_width,'float32')

            x_t_flat = tf.reshape(x_t, (1, -1))
            y_t_flat = tf.reshape(y_t, (1, -1))
            z_t_flat = tf.reshape(z_t, (1, -1))
            px,py,pz = tf.stack([x_t_flat],axis=2),tf.stack([y_t_flat],axis=2),tf.stack([z_t_flat],axis=2)
            #source control points
            x,y,z = tf.linspace(-1.,1.,self.X_controlP_number),tf.linspace(-1.,1.,self.Y_controlP_number),tf.linspace(-1.,1.,self.Z_controlP_number)
            x   = tf.tile(x,[self.Y_controlP_number*self.Z_controlP_number])
            y   = tf.tile(self._repeat(y,self.X_controlP_number,'float32'),[self.Z_controlP_number])
            z   = self._repeat(z,self.X_controlP_number*self.Y_controlP_number,'float32')
            xs,ys,zs = tf.transpose(tf.reshape(x,(-1,1))),tf.transpose(tf.reshape(y,(-1,1))),tf.transpose(tf.reshape(z,(-1,1)))
            cpx,cpy,cpz = tf.transpose(tf.stack([xs],axis=2),perm=[1,0,2]),tf.transpose(tf.stack([ys],axis=2),perm=[1,0,2]),tf.transpose(tf.stack([zs],axis=2),perm=[1,0,2])
            px, cpx = tf.meshgrid(px,cpx);py, cpy = tf.meshgrid(py,cpy); pz, cpz = tf.meshgrid(pz,cpz)        
            #Compute distance R
            Rx,Ry,Rz = tf.square(tf.subtract(px,cpx)),tf.square(tf.subtract(py,cpy)),tf.square(tf.subtract(pz,cpz))
            R = tf.add(tf.add(Rx,Ry),Rz)        
            R = tf.multiply(R,tf.log(tf.clip_by_value(R,1e-10,1e+10)))
            #Source coordinates
            ones = tf.ones_like(x_t_flat) 
            grid = tf.concat([ones, x_t_flat, y_t_flat,z_t_flat,R],0)
            return grid