Python numpy.tile() Examples

The following are 30 code examples for showing how to use numpy.tile(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    License: MIT License 7 votes vote down vote up
def __init__(self, input_wave_file, output_wave_file, target_phrase):
        self.pop_size = 100
        self.elite_size = 10
        self.mutation_p = 0.005
        self.noise_stdev = 40
        self.noise_threshold = 1
        self.mu = 0.9
        self.alpha = 0.001
        self.max_iters = 3000
        self.num_points_estimate = 100
        self.delta_for_gradient = 100
        self.delta_for_perturbation = 1e3
        self.input_audio = load_wav(input_wave_file).astype(np.float32)
        self.pop = np.expand_dims(self.input_audio, axis=0)
        self.pop = np.tile(self.pop, (self.pop_size, 1))
        self.output_wave_file = output_wave_file
        self.target_phrase = target_phrase
        self.funcs = self.setup_graph(self.pop, np.array([toks.index(x) for x in target_phrase])) 
Example 2
Project: DOTA_models   Author: ringringyi   File: resnet_v2_test.py    License: Apache License 2.0 6 votes vote down vote up
def create_test_input(batch_size, height, width, channels):
  """Create test input tensor.

  Args:
    batch_size: The number of images per batch or `None` if unknown.
    height: The height of each image or `None` if unknown.
    width: The width of each image or `None` if unknown.
    channels: The number of channels per image or `None` if unknown.

  Returns:
    Either a placeholder `Tensor` of dimension
      [batch_size, height, width, channels] if any of the inputs are `None` or a
    constant `Tensor` with the mesh grid values along the spatial dimensions.
  """
  if None in [batch_size, height, width, channels]:
    return tf.placeholder(tf.float32, (batch_size, height, width, channels))
  else:
    return tf.to_float(
        np.tile(
            np.reshape(
                np.reshape(np.arange(height), [height, 1]) +
                np.reshape(np.arange(width), [1, width]),
                [1, height, width, 1]),
            [batch_size, 1, 1, channels])) 
Example 3
Project: DOTA_models   Author: ringringyi   File: resnet_v1_test.py    License: Apache License 2.0 6 votes vote down vote up
def create_test_input(batch_size, height, width, channels):
  """Create test input tensor.

  Args:
    batch_size: The number of images per batch or `None` if unknown.
    height: The height of each image or `None` if unknown.
    width: The width of each image or `None` if unknown.
    channels: The number of channels per image or `None` if unknown.

  Returns:
    Either a placeholder `Tensor` of dimension
      [batch_size, height, width, channels] if any of the inputs are `None` or a
    constant `Tensor` with the mesh grid values along the spatial dimensions.
  """
  if None in [batch_size, height, width, channels]:
    return tf.placeholder(tf.float32, (batch_size, height, width, channels))
  else:
    return tf.to_float(
        np.tile(
            np.reshape(
                np.reshape(np.arange(height), [height, 1]) +
                np.reshape(np.arange(width), [1, width]),
                [1, height, width, 1]),
            [batch_size, 1, 1, channels])) 
Example 4
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: competition_model_class.py    License: Apache License 2.0 6 votes vote down vote up
def sample_batch(self, data_inputs, ground_truth, ruitu_inputs, batch_size, certain_id=None, certain_feature=None):
        
        max_i, _, max_j, _ = data_inputs.shape # Example: (1148, 37, 10, 9)-(sample_ind, timestep, sta_id, features)
        
        if certain_id == None and certain_feature == None:
            id_ = np.random.randint(max_j, size=batch_size)
            i = np.random.randint(max_i, size=batch_size)
            batch_inputs = data_inputs[i,:,id_,:]
            batch_ouputs = ground_truth[i,:,id_,:]
            batch_ruitu = ruitu_inputs[i,:,id_,:]

            # id used for embedding
            expd_id = np.expand_dims(id_,axis=1)
            batch_ids = np.tile(expd_id,(1,37))
            #batch_time = 

        elif certain_id != None:
            pass

        return batch_inputs, batch_ruitu, batch_ouputs, batch_ids 
Example 5
Project: Modeling-Cloth   Author: the3dadvantage   File: ModelingCloth.py    License: MIT License 6 votes vote down vote up
def zxy_grid(co_y, tymin, tymax, subs, c, t, c_peat, t_peat):
    # create linespace grid between bottom and top of tri z
    #subs = 7
    t_min = np.min(tymin)
    t_max = np.max(tymax)
    divs = np.linspace(t_min, t_max, num=subs, dtype=np.float32)            
    
    # figure out which triangles and which co are in each section
    co_bools = (co_y > divs[:-1][:, nax]) & (co_y < divs[1:][:, nax])
    tri_bools = (tymin < divs[1:][:, nax]) & (tymax > divs[:-1][:, nax])

    for i, j in zip(co_bools, tri_bools):
        if (np.sum(i) > 0) & (np.sum(j) > 0):
            c3 = c[i]
            t3 = t[j]
        
            c_peat.append(np.repeat(c3, t3.shape[0]))
            t_peat.append(np.tile(t3, c3.shape[0])) 
Example 6
Project: RingNet   Author: soubhiksanyal   File: dynamic_contour_embedding.py    License: MIT License 6 votes vote down vote up
def load_dynamic_contour(template_flame_path='None', contour_embeddings_path='None', static_embedding_path='None', angle=0):
    template_mesh = Mesh(filename=template_flame_path)
    contour_embeddings_path = contour_embeddings_path
    dynamic_lmks_embeddings = np.load(contour_embeddings_path, allow_pickle=True).item()
    lmk_face_idx_static, lmk_b_coords_static = load_static_embedding(static_embedding_path)
    lmk_face_idx_dynamic = dynamic_lmks_embeddings['lmk_face_idx'][angle]
    lmk_b_coords_dynamic = dynamic_lmks_embeddings['lmk_b_coords'][angle]
    dynamic_lmks = mesh_points_by_barycentric_coordinates(template_mesh.v, template_mesh.f, lmk_face_idx_dynamic, lmk_b_coords_dynamic)
    static_lmks = mesh_points_by_barycentric_coordinates(template_mesh.v, template_mesh.f, lmk_face_idx_static, lmk_b_coords_static)
    total_lmks = np.vstack([dynamic_lmks, static_lmks])

    # Visualization of the pose dependent contour on the template mesh
    vertex_colors = np.ones([template_mesh.v.shape[0], 4]) * [0.3, 0.3, 0.3, 0.8]
    tri_mesh = trimesh.Trimesh(template_mesh.v, template_mesh.f,
                               vertex_colors=vertex_colors)
    mesh = pyrender.Mesh.from_trimesh(tri_mesh)
    scene = pyrender.Scene()
    scene.add(mesh)
    sm = trimesh.creation.uv_sphere(radius=0.005)
    sm.visual.vertex_colors = [0.9, 0.1, 0.1, 1.0]
    tfs = np.tile(np.eye(4), (len(total_lmks), 1, 1))
    tfs[:, :3, 3] = total_lmks
    joints_pcl = pyrender.Mesh.from_trimesh(sm, poses=tfs)
    scene.add(joints_pcl)
    pyrender.Viewer(scene, use_raymond_lighting=True) 
Example 7
Project: DeepLab_v3   Author: leimao   File: resnet_v2_test.py    License: MIT License 6 votes vote down vote up
def create_test_input(batch_size, height, width, channels):
  """Create test input tensor.

  Args:
    batch_size: The number of images per batch or `None` if unknown.
    height: The height of each image or `None` if unknown.
    width: The width of each image or `None` if unknown.
    channels: The number of channels per image or `None` if unknown.

  Returns:
    Either a placeholder `Tensor` of dimension
      [batch_size, height, width, channels] if any of the inputs are `None` or a
    constant `Tensor` with the mesh grid values along the spatial dimensions.
  """
  if None in [batch_size, height, width, channels]:
    return tf.placeholder(tf.float32, (batch_size, height, width, channels))
  else:
    return tf.to_float(
        np.tile(
            np.reshape(
                np.reshape(np.arange(height), [height, 1]) +
                np.reshape(np.arange(width), [1, width]),
                [1, height, width, 1]),
            [batch_size, 1, 1, channels])) 
Example 8
Project: DeepLab_v3   Author: leimao   File: resnet_v1_test.py    License: MIT License 6 votes vote down vote up
def create_test_input(batch_size, height, width, channels):
  """Create test input tensor.

  Args:
    batch_size: The number of images per batch or `None` if unknown.
    height: The height of each image or `None` if unknown.
    width: The width of each image or `None` if unknown.
    channels: The number of channels per image or `None` if unknown.

  Returns:
    Either a placeholder `Tensor` of dimension
      [batch_size, height, width, channels] if any of the inputs are `None` or a
    constant `Tensor` with the mesh grid values along the spatial dimensions.
  """
  if None in [batch_size, height, width, channels]:
    return tf.placeholder(tf.float32, (batch_size, height, width, channels))
  else:
    return tf.to_float(
        np.tile(
            np.reshape(
                np.reshape(np.arange(height), [height, 1]) +
                np.reshape(np.arange(width), [1, width]),
                [1, height, width, 1]),
            [batch_size, 1, 1, channels])) 
Example 9
Project: pymoo   Author: msu-coinlab   File: sympart.py    License: Apache License 2.0 6 votes vote down vote up
def _calc_pareto_set(self, n_pareto_points=500):
        # The SYM-PART test problem has 9 equivalent Pareto subsets.
        h = int(n_pareto_points / 9)
        PS = zeros((h * 9, self.n_var))
        cnt = 0
        for row in [-1, 0, 1]:
            for col in [1, 0, -1]:
                X1 = np.linspace(row * self.c - self.a, row * self.c + self.a, h)
                X2 = np.tile(col * self.b, h)
                PS[cnt * h:cnt * h + h, :] = np.vstack((X1, X2)).T
                cnt = cnt + 1
        if self.w != 0:
            # If rotated, we apply the rotation matrix to PS
            # Calculate the rotation matrix
            RM = np.array([
                [cos(self.w), -sin(self.w)],
                [sin(self.w), cos(self.w)]
            ])
            PS = np.array([np.matmul(RM, x) for x in PS])
        return PS 
Example 10
Project: pyscf   Author: pyscf   File: test_common.py    License: Apache License 2.0 6 votes vote down vote up
def tdhf_frozen_mask(eri, kind="ov"):
    if isinstance(eri.nocc, int):
        nocc = int(eri.model.mo_occ.sum() // 2)
        mask = eri.space
    else:
        nocc = numpy.array(tuple(int(i.sum() // 2) for i in eri.model.mo_occ))
        assert numpy.all(nocc == nocc[0])
        assert numpy.all(eri.space == eri.space[0, numpy.newaxis, :])
        nocc = nocc[0]
        mask = eri.space[0]
    mask_o = mask[:nocc]
    mask_v = mask[nocc:]
    if kind == "ov":
        mask_ov = numpy.outer(mask_o, mask_v).reshape(-1)
        return numpy.tile(mask_ov, 2)
    elif kind == "1ov":
        return numpy.outer(mask_o, mask_v).reshape(-1)
    elif kind == "sov":
        mask_ov = numpy.outer(mask_o, mask_v).reshape(-1)
        nk = len(eri.model.mo_occ)
        return numpy.tile(mask_ov, 2 * nk ** 2)
    elif kind == "o,v":
        return mask_o, mask_v 
Example 11
Project: pyscf   Author: pyscf   File: test_common.py    License: Apache License 2.0 6 votes vote down vote up
def tdhf_frozen_mask(eri, kind="ov"):
    if isinstance(eri.nocc, int):
        nocc = int(eri.model.mo_occ.sum() // 2)
        mask = eri.space
    else:
        nocc = numpy.array(tuple(int(i.sum() // 2) for i in eri.model.mo_occ))
        assert numpy.all(nocc == nocc[0])
        assert numpy.all(eri.space == eri.space[0, numpy.newaxis, :])
        nocc = nocc[0]
        mask = eri.space[0]
    mask_o = mask[:nocc]
    mask_v = mask[nocc:]
    if kind == "ov":
        mask_ov = numpy.outer(mask_o, mask_v).reshape(-1)
        return numpy.tile(mask_ov, 2)
    elif kind == "1ov":
        return numpy.outer(mask_o, mask_v).reshape(-1)
    elif kind == "sov":
        mask_ov = numpy.outer(mask_o, mask_v).reshape(-1)
        nk = len(eri.model.mo_occ)
        return numpy.tile(mask_ov, 2 * nk ** 2)
    elif kind == "o,v":
        return mask_o, mask_v 
Example 12
Project: pyscf   Author: pyscf   File: test_common.py    License: Apache License 2.0 6 votes vote down vote up
def tdhf_frozen_mask(eri, kind="ov"):
    if isinstance(eri.nocc, int):
        nocc = int(eri.model.mo_occ.sum() // 2)
        mask = eri.space
    else:
        nocc = numpy.array(tuple(int(i.sum() // 2) for i in eri.model.mo_occ))
        assert numpy.all(nocc == nocc[0])
        assert numpy.all(eri.space == eri.space[0, numpy.newaxis, :])
        nocc = nocc[0]
        mask = eri.space[0]
    mask_o = mask[:nocc]
    mask_v = mask[nocc:]
    if kind == "ov":
        mask_ov = numpy.outer(mask_o, mask_v).reshape(-1)
        return numpy.tile(mask_ov, 2)
    elif kind == "1ov":
        return numpy.outer(mask_o, mask_v).reshape(-1)
    elif kind == "sov":
        mask_ov = numpy.outer(mask_o, mask_v).reshape(-1)
        nk = len(eri.model.mo_occ)
        return numpy.tile(mask_ov, 2 * nk ** 2)
    elif kind == "o,v":
        return mask_o, mask_v 
Example 13
Project: pyscf   Author: pyscf   File: test_common.py    License: Apache License 2.0 6 votes vote down vote up
def tdhf_frozen_mask(eri, kind="ov"):
    if isinstance(eri.nocc, int):
        nocc = int(eri.model.mo_occ.sum() // 2)
        mask = eri.space
    else:
        nocc = numpy.array(tuple(int(i.sum() // 2) for i in eri.model.mo_occ))
        assert numpy.all(nocc == nocc[0])
        assert numpy.all(eri.space == eri.space[0, numpy.newaxis, :])
        nocc = nocc[0]
        mask = eri.space[0]
    mask_o = mask[:nocc]
    mask_v = mask[nocc:]
    if kind == "ov":
        mask_ov = numpy.outer(mask_o, mask_v).reshape(-1)
        return numpy.tile(mask_ov, 2)
    elif kind == "1ov":
        return numpy.outer(mask_o, mask_v).reshape(-1)
    elif kind == "sov":
        mask_ov = numpy.outer(mask_o, mask_v).reshape(-1)
        nk = len(eri.model.mo_occ)
        return numpy.tile(mask_ov, 2 * nk ** 2)
    elif kind == "o,v":
        return mask_o, mask_v 
Example 14
Project: simpleflow   Author: PytLab   File: operations.py    License: MIT License 6 votes vote down vote up
def compute_gradient(self, grad=None):
        ''' Compute the gradient for negative operation wrt input value.

        :param grad: The gradient of other operation wrt the negative output.
        :type grad: ndarray.
        '''
        input_value = self.input_nodes[0].output_value

        if grad is None:
            grad = np.ones_like(self.output_value)

        output_shape = np.array(np.shape(input_value))
        output_shape[self.axis] = 1.0
        tile_scaling = np.shape(input_value) // output_shape
        grad = np.reshape(grad, output_shape)
        return np.tile(grad, tile_scaling) 
Example 15
Project: NeuroKit   Author: neuropsychology   File: tests_complexity.py    License: MIT License 6 votes vote down vote up
def pyeeg_ap_entropy(X, M, R):
    N = len(X)

    Em = pyeeg_embed_seq(X, 1, M)
    A = np.tile(Em, (len(Em), 1, 1))
    B = np.transpose(A, [1, 0, 2])
    D = np.abs(A - B)  # D[i,j,k] = |Em[i][k] - Em[j][k]|
    InRange = np.max(D, axis=2) <= R

    # Probability that random M-sequences are in range
    Cm = InRange.mean(axis=0)

    # M+1-sequences in range if M-sequences are in range & last values are close
    Dp = np.abs(np.tile(X[M:], (N - M, 1)) - np.tile(X[M:], (N - M, 1)).T)

    Cmp = np.logical_and(Dp <= R, InRange[:-1, :-1]).mean(axis=0)

    Phi_m, Phi_mp = np.sum(np.log(Cm)), np.sum(np.log(Cmp))

    Ap_En = (Phi_m - Phi_mp) / (N - M)

    return Ap_En 
Example 16
Project: NeuroKit   Author: neuropsychology   File: tests_complexity.py    License: MIT License 6 votes vote down vote up
def pyeeg_samp_entropy(X, M, R):
    N = len(X)

    Em = pyeeg_embed_seq(X, 1, M)[:-1]
    A = np.tile(Em, (len(Em), 1, 1))
    B = np.transpose(A, [1, 0, 2])
    D = np.abs(A - B)  # D[i,j,k] = |Em[i][k] - Em[j][k]|
    InRange = np.max(D, axis=2) <= R
    np.fill_diagonal(InRange, 0)  # Don't count self-matches

    Cm = InRange.sum(axis=0)  # Probability that random M-sequences are in range
    Dp = np.abs(np.tile(X[M:], (N - M, 1)) - np.tile(X[M:], (N - M, 1)).T)

    Cmp = np.logical_and(Dp <= R, InRange).sum(axis=0)

    # Avoid taking log(0)
    Samp_En = np.log(np.sum(Cm + 1e-100) / np.sum(Cmp + 1e-100))

    return Samp_En


# =============================================================================
# Entropy
# ============================================================================= 
Example 17
Project: me-ica   Author: ME-ICA   File: mghformat.py    License: GNU Lesser General Public License v2.1 6 votes vote down vote up
def update_header(self):
        ''' Harmonize header with image data and affine
        '''
        hdr = self._header
        if not self._data is None:
            hdr.set_data_shape(self._data.shape)

        if not self._affine is None:
            # for more information, go through save_mgh.m in FreeSurfer dist
            MdcD = self._affine[:3, :3]
            delta = np.sqrt(np.sum(MdcD * MdcD, axis=0))
            Mdc = MdcD / np.tile(delta, (3, 1))
            Pcrs_c = np.array([0, 0, 0, 1], dtype=np.float)
            Pcrs_c[:3] = np.array([self._data.shape[0], self._data.shape[1],
                                   self._data.shape[2]], dtype=np.float) / 2.0
            Pxyz_c = np.dot(self._affine, Pcrs_c)

            hdr['delta'][:] = delta
            hdr['Mdc'][:, :] = Mdc.T
            hdr['Pxyz_c'][:] = Pxyz_c[:3] 
Example 18
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    License: MIT License 5 votes vote down vote up
def setup_graph(self, input_audio_batch, target_phrase): 
        batch_size = input_audio_batch.shape[0]
        weird = (input_audio_batch.shape[1] - 1) // 320 
        logits_arg2 = np.tile(weird, batch_size)
        dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32)
        dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32)
        
        pass_in = np.clip(input_audio_batch, -2**15, 2**15-1)
        seq_len = np.tile(weird, batch_size).astype(np.int32)
        
        with tf.variable_scope('', reuse=tf.AUTO_REUSE):
            
            inputs = tf.placeholder(tf.float32, shape=pass_in.shape, name='a')
            len_batch = tf.placeholder(tf.float32, name='b')
            arg2_logits = tf.placeholder(tf.int32, shape=logits_arg2.shape, name='c')
            arg1_dense = tf.placeholder(tf.float32, shape=dense_arg1.shape, name='d')
            arg2_dense = tf.placeholder(tf.int32, shape=dense_arg2.shape, name='e')
            len_seq = tf.placeholder(tf.int32, shape=seq_len.shape, name='f')
            
            logits = get_logits(inputs, arg2_logits)
            target = ctc_label_dense_to_sparse(arg1_dense, arg2_dense, len_batch)
            ctcloss = tf.nn.ctc_loss(labels=tf.cast(target, tf.int32), inputs=logits, sequence_length=len_seq)
            decoded, _ = tf.nn.ctc_greedy_decoder(logits, arg2_logits, merge_repeated=True)
            
            sess = tf.Session()
            saver = tf.train.Saver(tf.global_variables())
            saver.restore(sess, "models/session_dump")
            
        func1 = lambda a, b, c, d, e, f: sess.run(ctcloss, 
            feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
        func2 = lambda a, b, c, d, e, f: sess.run([ctcloss, decoded], 
            feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f})
        return (func1, func2) 
Example 19
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    License: MIT License 5 votes vote down vote up
def getctcloss(self, input_audio_batch, target_phrase, decode=False):
        batch_size = input_audio_batch.shape[0]
        weird = (input_audio_batch.shape[1] - 1) // 320 
        logits_arg2 = np.tile(weird, batch_size)
        dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32)
        dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32)
        
        pass_in = np.clip(input_audio_batch, -2**15, 2**15-1)
        seq_len = np.tile(weird, batch_size).astype(np.int32)

        if decode:
            return self.funcs[1](pass_in, batch_size, logits_arg2, dense_arg1, dense_arg2, seq_len)
        else:
            return self.funcs[0](pass_in, batch_size, logits_arg2, dense_arg1, dense_arg2, seq_len) 
Example 20
Project: mmdetection   Author: open-mmlab   File: dataset_wrappers.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, dataset, times):
        self.dataset = dataset
        self.times = times
        self.CLASSES = dataset.CLASSES
        if hasattr(self.dataset, 'flag'):
            self.flag = np.tile(self.dataset.flag, times)

        self._ori_len = len(self.dataset) 
Example 21
Project: neural-combinatorial-optimization-rl-tensorflow   Author: MichelDeudon   File: dataset.py    License: MIT License 5 votes vote down vote up
def test_batch(self, seed=0):
        # Generate random TSP-TW instance
        input_, or_sequence, tw_open, tw_close = self.gen_instance(test_mode=True, seed=seed)
        # Store batch
        input_batch = np.tile(input_,(self.batch_size,1,1))
        return input_batch, or_sequence, tw_open, tw_close


    # Plot a tour 
Example 22
Project: neural-combinatorial-optimization-rl-tensorflow   Author: MichelDeudon   File: dataset.py    License: MIT License 5 votes vote down vote up
def test_batch(self, batch_size, max_length, dimension, seed=0):
        # Generate random TSP instance
        input_, or_sequence = self.gen_instance(max_length, dimension, test_mode=True, seed=seed)

        # Store batch
        input_batch = np.tile(input_,(batch_size,1,1))

        return input_batch, or_sequence


    # Plot a tour 
Example 23
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: rl_data.py    License: Apache License 2.0 5 votes vote down vote up
def reset(self):
        self.state_ = np.tile(
            np.asarray([env.reset() for env in self.env], dtype=np.uint8).transpose((0, 3, 1, 2)),
            (1, self.input_length, 1, 1)) 
Example 24
Project: DOTA_models   Author: ringringyi   File: ops_test.py    License: Apache License 2.0 5 votes vote down vote up
def test_position_sensitive_with_equal_channels(self):
    num_spatial_bins = [2, 2]
    image_shape = [1, 3, 3, 4]
    crop_size = [2, 2]

    image = tf.constant(range(1, 3 * 3 + 1), dtype=tf.float32,
                        shape=[1, 3, 3, 1])
    tiled_image = tf.tile(image, [1, 1, 1, image_shape[3]])
    boxes = tf.random_uniform((3, 4))
    box_ind = tf.constant([0, 0, 0], dtype=tf.int32)

    # All channels are equal so position-sensitive crop and resize should
    # work as the usual crop and resize for just one channel.
    crop = tf.image.crop_and_resize(image, boxes, box_ind, crop_size)
    crop_and_pool = tf.reduce_mean(crop, [1, 2], keep_dims=True)

    ps_crop_and_pool = ops.position_sensitive_crop_regions(
        tiled_image,
        boxes,
        box_ind,
        crop_size,
        num_spatial_bins,
        global_pool=True)

    with self.test_session() as sess:
      expected_output, output = sess.run((crop_and_pool, ps_crop_and_pool))
      self.assertAllClose(output, expected_output) 
Example 25
Project: DOTA_models   Author: ringringyi   File: ops_test.py    License: Apache License 2.0 5 votes vote down vote up
def test_position_sensitive_with_global_pool_false(self):
    num_spatial_bins = [3, 2]
    image_shape = [1, 3, 2, 6]
    num_boxes = 2

    # First channel is 1's, second channel is 2's, etc.
    image = tf.constant(range(1, 3 * 2 + 1) * 6, dtype=tf.float32,
                        shape=image_shape)
    boxes = tf.random_uniform((num_boxes, 4))
    box_ind = tf.constant([0, 0], dtype=tf.int32)

    expected_output = []

    # Expected output, when crop_size = [3, 2].
    expected_output.append(np.expand_dims(
        np.tile(np.array([[1, 2],
                          [3, 4],
                          [5, 6]]), (num_boxes, 1, 1)),
        axis=-1))

    # Expected output, when crop_size = [6, 4].
    expected_output.append(np.expand_dims(
        np.tile(np.array([[1, 1, 2, 2],
                          [1, 1, 2, 2],
                          [3, 3, 4, 4],
                          [3, 3, 4, 4],
                          [5, 5, 6, 6],
                          [5, 5, 6, 6]]), (num_boxes, 1, 1)),
        axis=-1))

    for crop_size_mult in range(1, 3):
      crop_size = [3 * crop_size_mult, 2 * crop_size_mult]
      ps_crop = ops.position_sensitive_crop_regions(
          image, boxes, box_ind, crop_size, num_spatial_bins, global_pool=False)
      with self.test_session() as sess:
        output = sess.run(ps_crop)

      self.assertAllEqual(output, expected_output[crop_size_mult - 1]) 
Example 26
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: seq2seq_class.py    License: Apache License 2.0 5 votes vote down vote up
def sample_batch(self, data_inputs, ground_truth, ruitu_inputs, batch_size, certain_id=None, certain_feature=None):
        max_i, _, max_j, _ = data_inputs.shape # Example: (1148, 37, 10, 9)-(sample_ind, timestep, sta_id, features)

        id_ = np.random.randint(max_j, size=batch_size)
        i = np.random.randint(max_i, size=batch_size)
        batch_inputs = data_inputs[i,:,id_,:]
        batch_ouputs = ground_truth[i,:,id_,:]
        batch_ruitu = ruitu_inputs[i,:,id_,:]
        # id used for embedding
        if self.id_embd and (not self.time_embd): 
            expd_id = np.expand_dims(id_,axis=1)
            batch_ids = np.tile(expd_id,(1,37))
            return batch_inputs, batch_ruitu, batch_ouputs, batch_ids
        elif (not self.id_embd) and (self.time_embd):
            time_range = np.array(range(37))
            batch_time = np.tile(time_range,(batch_size,1))
            #batch_time = np.expand_dims(batch_time, axis=-1)

            return batch_inputs, batch_ruitu, batch_ouputs, batch_time
        elif (self.id_embd) and (self.time_embd):
            expd_id = np.expand_dims(id_,axis=1)
            batch_ids = np.tile(expd_id,(1,37))

            time_range = np.array(range(37))
            batch_time = np.tile(time_range,(batch_size,1))
            #batch_time = np.expand_dims(batch_time, axis=-1)

            return batch_inputs, batch_ruitu, batch_ouputs, batch_ids, batch_time
        
        elif (not self.id_embd) and (not self.time_embd): 
            return batch_inputs, batch_ruitu, batch_ouputs 
Example 27
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: competition_model_class.py    License: Apache License 2.0 5 votes vote down vote up
def sample_batch(self, data_inputs, ground_truth, ruitu_inputs, batch_size, certain_id=None, certain_feature=None):
        max_i, _, max_j, _ = data_inputs.shape # Example: (1148, 37, 10, 9)-(sample_ind, timestep, sta_id, features)

        id_ = np.random.randint(max_j, size=batch_size)
        i = np.random.randint(max_i, size=batch_size)
        batch_inputs = data_inputs[i,:,id_,:]
        batch_ouputs = ground_truth[i,:,id_,:]
        batch_ruitu = ruitu_inputs[i,:,id_,:]
        # id used for embedding
        if self.id_embd and (not self.time_embd): 
            expd_id = np.expand_dims(id_,axis=1)
            batch_ids = np.tile(expd_id,(1,37))
            return batch_inputs, batch_ruitu, batch_ouputs, batch_ids
        elif (not self.id_embd) and (self.time_embd):
            time_range = np.array(range(37))
            batch_time = np.tile(time_range,(batch_size,1))
            #batch_time = np.expand_dims(batch_time, axis=-1)

            return batch_inputs, batch_ruitu, batch_ouputs, batch_time
        elif (self.id_embd) and (self.time_embd):
            expd_id = np.expand_dims(id_,axis=1)
            batch_ids = np.tile(expd_id,(1,37))

            time_range = np.array(range(37))
            batch_time = np.tile(time_range,(batch_size,1))
            #batch_time = np.expand_dims(batch_time, axis=-1)

            return batch_inputs, batch_ruitu, batch_ouputs, batch_ids, batch_time
        
        elif (not self.id_embd) and (not self.time_embd): 
            return batch_inputs, batch_ruitu, batch_ouputs 
Example 28
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: Train_from_scratch.py    License: Apache License 2.0 5 votes vote down vote up
def train(processed_path, train_data, val_data, model_save_path, model_name):
    train_dict = load_pkl(processed_path, train_data)
    val_dict = load_pkl(processed_path, val_data)


    print(train_dict.keys())
    print('Original input_obs data shape:')
    print(train_dict['input_obs'].shape)
    print(val_dict['input_obs'].shape)

    print('After clipping the 9 days, input_obs data shape:')
    train_dict['input_obs'] = train_dict['input_obs'][:,:-9,:,:]
    val_dict['input_obs'] = val_dict['input_obs'][:,:-9,:,:]
    print(train_dict['input_obs'].shape)
    print(val_dict['input_obs'].shape)

    enc_dec = Seq2Seq_Class(model_save_path=model_save_path,
                     model_structure_name=model_name, 
                     model_weights_name=model_name, 
                     model_name=model_name)
    enc_dec.build_graph()

    val_size=val_dict['input_ruitu'].shape[0] # 87 val samples
    val_ids=[]
    val_times=[]
    for i in range(10):
        val_ids.append(np.ones(shape=(val_size,37))*i)
    val_ids = np.stack(val_ids, axis=-1)
    print('val_ids.shape is:', val_ids.shape)
    val_times = np.array(range(37))
    val_times = np.tile(val_times,(val_size,1))
    print('val_times.shape is:',val_times.shape)

    enc_dec.fit(train_dict['input_obs'], train_dict['input_ruitu'], train_dict['ground_truth'],
           val_dict['input_obs'], val_dict['input_ruitu'], val_dict['ground_truth'], val_ids = val_ids, val_times=val_times,
            iterations=10000, batch_size=512, validation=True)

    print('Training finished!') 
Example 29
Project: graph-neural-networks   Author: alelab-upenn   File: graphTools.py    License: GNU General Public License v3.0 5 votes vote down vote up
def matrixPowers(S,K):
    """
    matrixPowers(A, K) Computes the matrix powers A^k for k = 0, ..., K-1

    Inputs:
        A: either a single N x N matrix or a collection E x N x N of E matrices.
        K: integer, maximum power to be computed (up to K-1)

    Outputs:
        AK: either a collection of K matrices K x N x N (if the input was a
            single matrix) or a collection E x K x N x N (if the input was a
            collection of E matrices).
    """
    # S can be either a single GSO (N x N) or a collection of GSOs (E x N x N)
    if len(S.shape) == 2:
        N = S.shape[0]
        assert S.shape[1] == N
        E = 1
        S = S.reshape(1, N, N)
        scalarWeights = True
    elif len(S.shape) == 3:
        E = S.shape[0]
        N = S.shape[1]
        assert S.shape[2] == N
        scalarWeights = False

    # Now, let's build the powers of S:
    thisSK = np.tile(np.eye(N, N).reshape(1,N,N), [E, 1, 1])
    SK = thisSK.reshape(E, 1, N, N)
    for k in range(1,K):
        thisSK = thisSK @ S
        SK = np.concatenate((SK, thisSK.reshape(E, 1, N, N)), axis = 1)
    # Take out the first dimension if it was a single GSO
    if scalarWeights:
        SK = SK.reshape(K, N, N)

    return SK 
Example 30
Project: python-toolbox-for-rapid   Author: Esri   File: CreateDischargeTable.py    License: Apache License 2.0 5 votes vote down vote up
def createFlatTable(self, in_nc, out_table):
        """Create discharge table"""
        # obtain numpy array from the netCDF data
        data_nc = NET.Dataset(in_nc)
        comid = data_nc.variables[self.vars_oi[0]][:]
        qout = data_nc.variables[self.vars_oi[1]][:]


        time_size = len(data_nc.dimensions[self.dims_oi[0]])  # to adapt to the changes of Qout dimensions
        comid_size = len(data_nc.dimensions[self.dims_oi[1]]) # to adapt to the changes of Qout dimensions
        total_size = time_size * comid_size

        qout_arr = qout.reshape(total_size, 1)
        time_arr = NUM.repeat(NUM.arange(1,time_size+1), comid_size)
        time_arr = time_arr.reshape(total_size, 1)
        comid_arr = NUM.tile(comid, time_size)
        comid_arr = comid_arr.reshape(total_size, 1)
        data_table = NUM.hstack((time_arr, comid_arr, qout_arr))

        # convert to numpy structured array
        str_arr = NUM.core.records.fromarrays(data_table.transpose(),
                    NUM.dtype([(self.fields_oi[0], NUM.int32), (self.fields_oi[1], NUM.int32), (self.fields_oi[2], NUM.float32)]))

        data_nc.close()

        # numpy structured array to table
        arcpy.da.NumPyArrayToTable(str_arr, out_table)

        return