Python numpy.transpose() Examples

The following are code examples for showing how to use numpy.transpose(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def train_lr_rfeinman(densities_pos, densities_neg, uncerts_pos, uncerts_neg):
    """
    TODO
    :param densities_pos:
    :param densities_neg:
    :param uncerts_pos:
    :param uncerts_neg:
    :return:
    """
    values_neg = np.concatenate(
        (densities_neg.reshape((1, -1)),
         uncerts_neg.reshape((1, -1))),
        axis=0).transpose([1, 0])
    values_pos = np.concatenate(
        (densities_pos.reshape((1, -1)),
         uncerts_pos.reshape((1, -1))),
        axis=0).transpose([1, 0])

    values = np.concatenate((values_neg, values_pos))
    labels = np.concatenate(
        (np.zeros_like(densities_neg), np.ones_like(densities_pos)))

    lr = LogisticRegressionCV(n_jobs=-1).fit(values, labels)

    return values, labels, lr 
Example 2
Project: good-semi-bad-gan   Author: christiancosgrove   File: good-semi.py    MIT License 6 votes vote down vote up
def plot(samples):
    width = min(12,int(np.sqrt(len(samples))))
    fig = plt.figure(figsize=(width, width))
    gs = gridspec.GridSpec(width, width)
    gs.update(wspace=0.05, hspace=0.05)

    for ind, sample in enumerate(samples):
        if ind >= width*width:
            break
        ax = plt.subplot(gs[ind])
        plt.axis('off')
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.set_aspect('equal')
        sample = sample * 0.5 + 0.5
        sample = np.transpose(sample, (1, 2, 0))
        plt.imshow(sample)

    return fig 
Example 3
Project: deep-learning-note   Author: wdxtub   File: nn.py    MIT License 6 votes vote down vote up
def train(self, inputs_list, targets_list):
        inputs = np.array(inputs_list, ndmin=2).T
        targets = np.array(targets_list, ndmin=2).T

        hidden_inputs = np.dot(self.wih, inputs)
        hidden_outputs = self.activation_function(hidden_inputs)

        final_inputs = np.dot(self.who, hidden_outputs)
        final_outputs = self.activation_function(final_inputs)

        output_errors = targets - final_outputs
        hidden_errors = np.dot(self.who.T, output_errors)

        self.who += self.lr * np.dot((output_errors *
                                      final_outputs *
                                      (1.0 - final_outputs)), np.transpose(hidden_outputs))
        self.wih += self.lr * np.dot((hidden_errors *
                                      hidden_outputs *
                                      (1.0 - hidden_outputs)), np.transpose(inputs))
        
        pass

    # query 
Example 4
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: dot.py    Apache License 2.0 6 votes vote down vote up
def measure_cost(repeat, scipy_trans_lhs, scipy_dns_lhs, func_name, *args, **kwargs):
    """Measure time cost of running a function
    """
    mx.nd.waitall()
    args_list = []
    for arg in args:
        args_list.append(arg)
    start = time.time()
    if scipy_trans_lhs:
        args_list[0] = np.transpose(args_list[0]) if scipy_dns_lhs else sp.spmatrix.transpose(args_list[0])
    for _ in range(repeat):
        func_name(*args_list, **kwargs)
    mx.nd.waitall()
    end = time.time()
    diff = end - start
    return diff / repeat 
Example 5
Project: DOTA_models   Author: ringringyi   File: seq2seq_attention_model.py    Apache License 2.0 6 votes vote down vote up
def decode_topk(self, sess, latest_tokens, enc_top_states, dec_init_states):
    """Return the topK results and new decoder states."""
    feed = {
        self._enc_top_states: enc_top_states,
        self._dec_in_state:
            np.squeeze(np.array(dec_init_states)),
        self._abstracts:
            np.transpose(np.array([latest_tokens])),
        self._abstract_lens: np.ones([len(dec_init_states)], np.int32)}

    results = sess.run(
        [self._topk_ids, self._topk_log_probs, self._dec_out_state],
        feed_dict=feed)

    ids, probs, states = results[0], results[1], results[2]
    new_states = [s for s in states]
    return ids, probs, new_states 
Example 6
Project: DOTA_models   Author: ringringyi   File: script_preprocess_annoations_S3DIS.py    Apache License 2.0 6 votes vote down vote up
def _write_map_files(b_in, b_out, transform):
  cats = get_categories()

  env = utils.Foo(padding=10, resolution=5, num_point_threshold=2,
                  valid_min=-10, valid_max=200, n_samples_per_face=200)
  robot = utils.Foo(radius=15, base=10, height=140, sensor_height=120,
                    camera_elevation_degree=-15)
  
  building_loader = factory.get_dataset('sbpd')
  for flip in [False, True]:
    b = nav_env.Building(b_out, robot, env, flip=flip,
                         building_loader=building_loader)
    logging.info("building_in: %s, building_out: %s, transform: %d", b_in,
                 b_out, transform)
    maps = _get_semantic_maps(b_in, transform, b.map, flip, cats)
    maps = np.transpose(np.array(maps), axes=[1,2,0])

    #  Load file from the cache.
    file_name = '{:s}_{:d}_{:d}_{:d}_{:d}_{:d}_{:d}.pkl'
    file_name = file_name.format(b.building_name, b.map.size[0], b.map.size[1],
                                 b.map.origin[0], b.map.origin[1],
                                 b.map.resolution, flip)
    out_file = os.path.join(DATA_DIR, 'processing', 'class-maps', file_name)
    logging.info('Writing semantic maps to %s.', out_file)
    save_variables(out_file, [maps, cats], ['maps', 'cats'], overwrite=True) 
Example 7
Project: DOTA_models   Author: ringringyi   File: np_box_ops.py    Apache License 2.0 6 votes vote down vote up
def intersection(boxes1, boxes2):
  """Compute pairwise intersection areas between boxes.

  Args:
    boxes1: a numpy array with shape [N, 4] holding N boxes
    boxes2: a numpy array with shape [M, 4] holding M boxes

  Returns:
    a numpy array with shape [N*M] representing pairwise intersection area
  """
  [y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
  [y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)

  all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
  all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
  intersect_heights = np.maximum(
      np.zeros(all_pairs_max_ymin.shape),
      all_pairs_min_ymax - all_pairs_max_ymin)
  all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
  all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
  intersect_widths = np.maximum(
      np.zeros(all_pairs_max_xmin.shape),
      all_pairs_min_xmax - all_pairs_max_xmin)
  return intersect_heights * intersect_widths 
Example 8
Project: dssmplay   Author: lightning-huang   File: dssm_v1.py    GNU General Public License v3.0 6 votes vote down vote up
def pull_batch(query_data, doc_data, batch_idx):
    query_in = query_data[batch_idx * BS:(batch_idx + 1) * BS, :]
    doc_in = doc_data[batch_idx * BS:(batch_idx + 1) * BS, :]
    
    query_in = query_in.tocoo()
    doc_in = doc_in.tocoo()
    
    query_in = tf.SparseTensorValue(
        np.transpose([np.array(query_in.row, dtype=np.int64), np.array(query_in.col, dtype=np.int64)]),
        np.array(query_in.data, dtype=np.float),
        np.array(query_in.shape, dtype=np.int64))
    doc_in = tf.SparseTensorValue(
        np.transpose([np.array(doc_in.row, dtype=np.int64), np.array(doc_in.col, dtype=np.int64)]),
        np.array(doc_in.data, dtype=np.float),
        np.array(doc_in.shape, dtype=np.int64))

    return query_in, doc_in 
Example 9
Project: dssmplay   Author: lightning-huang   File: dssm_v3.py    GNU General Public License v3.0 6 votes vote down vote up
def pull_batch(query_data, doc_data, batch_idx):
    # start = time.time()
    query_in = query_data[batch_idx * BS:(batch_idx + 1) * BS, :]
    doc_in = doc_data[batch_idx * BS:(batch_idx + 1) * BS, :]
    
    if batch_idx == 0:
      print(query_in.getrow(53))
    query_in = query_in.tocoo()
    doc_in = doc_in.tocoo()
    
    

    query_in = tf.SparseTensorValue(
        np.transpose([np.array(query_in.row, dtype=np.int64), np.array(query_in.col, dtype=np.int64)]),
        np.array(query_in.data, dtype=np.float),
        np.array(query_in.shape, dtype=np.int64))
    doc_in = tf.SparseTensorValue(
        np.transpose([np.array(doc_in.row, dtype=np.int64), np.array(doc_in.col, dtype=np.int64)]),
        np.array(doc_in.data, dtype=np.float),
        np.array(doc_in.shape, dtype=np.int64))

    # end = time.time()
    # print("Pull_batch time: %f" % (end - start))

    return query_in, doc_in 
Example 10
Project: dssmplay   Author: lightning-huang   File: dssm_v1_eva.py    GNU General Public License v3.0 6 votes vote down vote up
def pull_batch(query_data, doc_data, batch_idx):
    query_in = query_data[batch_idx * BS:(batch_idx + 1) * BS, :]
    doc_in = doc_data[batch_idx * BS:(batch_idx + 1) * BS, :]
    
    query_in = query_in.toarray()
    doc_in = doc_in.toarray()

    idx_query_in = np.where(query_in > 0)
    idx_doc_in = np.where(doc_in > 0)

    """
    print(query_in.shape, doc_in.shape)
    print(np.vstack(idx_query_in).shape, query_in[idx_query_in].shape, query_in.shape)
    print(np.vstack(idx_doc_in).shape, doc_in[idx_doc_in].shape, doc_in.shape)
    """
    return (np.transpose(np.vstack(idx_query_in)), query_in[idx_query_in], query_in.shape), (np.transpose(np.vstack(idx_doc_in)), doc_in[idx_doc_in], doc_in.shape) 
Example 11
Project: dssmplay   Author: lightning-huang   File: evaluate.py    GNU General Public License v3.0 6 votes vote down vote up
def pull_batch(query_data, doc_data, max_rows, batch_idx):
    # start = time.time()
    query_in = query_data[batch_idx * BS:np.min((batch_idx + 1) * BS, max_rows), :]
    doc_in = doc_data[batch_idx * BS:np.min((batch_idx + 1) * BS, max_rows), :]

    query_in = query_in.tocoo()
    doc_in = doc_in.tocoo()

    query_in = tf.SparseTensorValue(
        np.transpose([np.array(query_in.row, dtype=np.int64), np.array(query_in.col, dtype=np.int64)]),
        np.array(query_in.data, dtype=np.float),
        np.array([BS] + query_in.shape[1:], dtype=np.int64))
    doc_in = tf.SparseTensorValue(
        np.transpose([np.array(doc_in.row, dtype=np.int64), np.array(doc_in.col, dtype=np.int64)]),
        np.array(doc_in.data, dtype=np.float),
        np.array([BS] + query_in.shape[1:], dtype=np.int64))

    # end = time.time()
    # print("Pull_batch time: %f" % (end - start))

    return query_in, doc_in 
Example 12
Project: lung_nodule_classifier   Author: xairc   File: prepare.py    MIT License 6 votes vote down vote up
def resample(imgs, spacing, new_spacing,order=2):
    if len(imgs.shape)==3:
        new_shape = np.round(imgs.shape * spacing / new_spacing)
        true_spacing = spacing * imgs.shape / new_shape
        resize_factor = new_shape / imgs.shape
        imgs = zoom(imgs, resize_factor, mode = 'nearest',order=order)
        return imgs, true_spacing
    elif len(imgs.shape)==4:
        n = imgs.shape[-1]
        newimg = []
        for i in range(n):
            slice = imgs[:,:,:,i]
            newslice,true_spacing = resample(slice,spacing,new_spacing)
            newimg.append(newslice)
        newimg=np.transpose(np.array(newimg),[1,2,3,0])
        return newimg,true_spacing
    else:
        raise ValueError('wrong shape') 
Example 13
Project: denoisers   Author: IDKiro   File: loader.py    MIT License 6 votes vote down vote up
def get_patch(origin_img, noise_img, patch_size):
	H = origin_img.shape[0]
	W = origin_img.shape[1]

	ps_temp = min(H, W, patch_size + 1) - 1

	xx = np.random.randint(0, W-ps_temp)
	yy = np.random.randint(0, H-ps_temp)
	
	patch_origin_img = origin_img[yy:yy+ps_temp, xx:xx+ps_temp, :]
	patch_noise_img = noise_img[yy:yy+ps_temp, xx:xx+ps_temp, :]

	if np.random.randint(2, size=1)[0] == 1:
		patch_origin_img = np.flip(patch_origin_img, axis=1).copy()
		patch_noise_img = np.flip(patch_noise_img, axis=1).copy()
	if np.random.randint(2, size=1)[0] == 1: 
		patch_origin_img = np.flip(patch_origin_img, axis=0).copy()
		patch_noise_img = np.flip(patch_noise_img, axis=0).copy()
	if np.random.randint(2, size=1)[0] == 1:
		patch_origin_img = np.transpose(patch_origin_img, (1, 0, 2)).copy()
		patch_noise_img = np.transpose(patch_noise_img, (1, 0, 2)).copy()
	
	return patch_origin_img, patch_noise_img 
Example 14
Project: SyNEThesia   Author: RunOrVeith   File: synethesia.py    MIT License 5 votes vote down vote up
def random_start_img(img_size, batch_size, num_channels=3, num_ones_offset=None):
    zeros = np.zeros((*img_size, batch_size * num_channels))
    num_ones_offset = np.random.choice([1, 0], size=batch_size * num_channels)
    zeros += num_ones_offset * np.random.random(size=batch_size * num_channels)
    img = np.transpose(zeros.reshape((*img_size, batch_size, num_channels)), axes=[2, 0, 1, 3])
    return img 
Example 15
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharatransform.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_transform_unit_simple(self):
        """Class SpharaTransform, mode='unit', simple triangular mesh

        Determine the SPHARA forward and inverse transform with unit
        edge weight for a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA transform instance for the mesh
        st_unit_simple = st.SpharaTransform(testtrimesh, mode='unit')

        # the data to transform
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(st_unit_simple.basis()[0])])

        # SPHARA analysis
        coef_unit_simple = st_unit_simple.analysis(data)

        # SPHARA synthesis
        recon_unit_simple = st_unit_simple.synthesis(coef_unit_simple)

        self.assertTrue(
            np.allclose(
                np.absolute(coef_unit_simple),
                [[0.0, 0.0, 0.0],
                 [1.73205081, 0.0, 0.0],
                 [1.0, 0.0, 0.0],
                 [0.0, 1.0, 0.0],
                 [0.0, 0.0, 1.0]])
            and
            np.allclose(
                recon_unit_simple,
                data)) 
Example 16
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharatransform.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_transform_ie_simple(self):
        """Class SpharaTransform, mode='inv_euclidean', simple triangular mesh

        Determine the SPHARA forward and inverse transform with
        inverse Euclidean edge weight for a simple triangular mesh, 3
        vertices, single triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA transform instance for the mesh
        st_ie_simple = st.SpharaTransform(testtrimesh, mode='inv_euclidean')

        # the data to transform
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(st_ie_simple.basis()[0])])

        # SPHARA analysis
        coef_ie_simple = st_ie_simple.analysis(data)

        # SPHARA synthesis
        recon_ie_simple = st_ie_simple.synthesis(coef_ie_simple)

        self.assertTrue(
            np.allclose(
                np.absolute(coef_ie_simple),
                [[0.0, 0.0, 0.0],
                 [1.73205081, 0.0, 0.0],
                 [1.0, 0.0, 0.0],
                 [0.0, 1.0, 0.0],
                 [0.0, 0.0, 1.0]])
            and
            np.allclose(
                recon_ie_simple,
                data)) 
Example 17
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharatransform.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_transform_fem_simple(self):
        """Class SpharaTransform, mode='fem', simple triangular mesh

        Determine the SPHARA forward and inverse transform with fem
        discretisation for a simple triangular mesh, 3 vertices,
        single triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA transform instance for the mesh
        st_fem_simple = st.SpharaTransform(testtrimesh, mode='fem')

        # the data to transform
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(st_fem_simple.basis()[0])])

        # SPHARA analysis
        coef_fem_simple = st_fem_simple.analysis(data)

        # SPHARA synthesis
        recon_fem_simple = st_fem_simple.synthesis(coef_fem_simple)

        self.assertTrue(
            np.allclose(
                np.absolute(coef_fem_simple),
                [[0.0, 0.0, 0.0],
                 [1.87082868, 0.0, 0.0],
                 [1.0, 0.0, 0.0],
                 [0.0, 1.0, 0.0],
                 [0.0, 0.0, 1.0]])
            and
            np.allclose(
                recon_fem_simple,
                data)) 
Example 18
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_filter_unit_allpass_simple(self):
        """Class SpharaFilter, mode='unit', allpass, simple mesh

        Apply a SPHARA spatial allpass filter with unit edge weight to
        data sampled at a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA filter instance for the mesh
        sf_unit_simple = sf.SpharaFilter(testtrimesh, mode='unit',
                                         specification=0)

        # the data to filter
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(sf_unit_simple.basis()[0])])

        # apply SPHARA based spatial allpass filter
        data_filt_unit_simple = sf_unit_simple.filter(data)

        self.assertTrue(
            np.allclose(
                data_filt_unit_simple,
                data)) 
Example 19
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_filter_unit_dc_simple(self):
        """Class SpharaFilter, mode='unit', dc-pass, simple mesh

        Apply a SPHARA spatial dc-pass filter with unit edge weight to
        data sampled at a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA filter instance for the mesh
        sf_unit_simple = sf.SpharaFilter(testtrimesh, mode='unit',
                                         specification=1)

        # the data to filter
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(sf_unit_simple.basis()[0])])

        # reference for filtered data
        data_filt_ref = data.copy()
        data_filt_ref[3] = [0., 0., 0.]
        data_filt_ref[4] = [0., 0., 0.]

        # apply SPHARA based spatial dc-pass filter
        data_filt_unit_simple = sf_unit_simple.filter(data)

        self.assertTrue(
            np.allclose(
                data_filt_unit_simple,
                data_filt_ref)) 
Example 20
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_filter_ie_allpass_simple(self):
        """Class SpharaFilter, mode='inv_euclidean', allpass, simple mesh

        Apply a SPHARA spatial allpass filter with inv_euclidean edge weight to
        data sampled at a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA filter instance for the mesh
        sf_ie_simple = sf.SpharaFilter(testtrimesh, mode='inv_euclidean',
                                       specification=0)

        # the data to filter
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(sf_ie_simple.basis()[0])])

        # apply SPHARA based spatial allpass filter
        data_filt_ie_simple = sf_ie_simple.filter(data)

        self.assertTrue(
            np.allclose(
                data_filt_ie_simple,
                data)) 
Example 21
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_filter_ie_dc_simple(self):
        """Class SpharaFilter, mode='inv_euclidean', dc-pass, simple mesh

        Apply a SPHARA spatial dc-pass filter with inv_euclidean edge weight to
        data sampled at a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA filter instance for the mesh
        sf_ie_simple = sf.SpharaFilter(testtrimesh, mode='inv_euclidean',
                                       specification=1)

        # the data to filter
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(sf_ie_simple.basis()[0])])

        # reference for filtered data
        data_filt_ref = data.copy()
        data_filt_ref[3] = [0., 0., 0.]
        data_filt_ref[4] = [0., 0., 0.]

        # apply SPHARA based spatial dc-pass filter
        data_filt_ie_simple = sf_ie_simple.filter(data)

        self.assertTrue(
            np.allclose(
                data_filt_ie_simple,
                data_filt_ref)) 
Example 22
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_filter_ie_low_simple(self):
        """Class SpharaFilter, mode='inv_euclidean', lowpass, simple mesh

        Apply a SPHARA spatial lowpass filter with inv_euclidean edge weight to
        data sampled at a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA filter instance for the mesh
        sf_ie_simple = sf.SpharaFilter(testtrimesh, mode='inv_euclidean',
                                         specification=[1., 1., 0.])

        # the data to filter
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(sf_ie_simple.basis()[0])])

        # reference for filtered data
        data_filt_ref = data.copy()
        data_filt_ref[4] = [0., 0., 0.]

        # apply SPHARA based spatial lowpass filter
        data_filt_ie_simple = sf_ie_simple.filter(data)

        self.assertTrue(
            np.allclose(
                data_filt_ie_simple,
                data_filt_ref)) 
Example 23
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_filter_fem_allpass_simple(self):
        """Class SpharaFilter, mode='fem', allpass, simple mesh

        Apply a SPHARA spatial allpass filter with fem edge weight to
        data sampled at a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA filter instance for the mesh
        sf_fem_simple = sf.SpharaFilter(testtrimesh, mode='fem',
                                        specification=0)

        # the data to filter
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(sf_fem_simple.basis()[0])])

        # apply SPHARA based spatial allpass filter
        data_filt_fem_simple = sf_fem_simple.filter(data)

        self.assertTrue(
            np.allclose(
                data_filt_fem_simple,
                data)) 
Example 24
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_filter_fem_dc_simple(self):
        """Class SpharaFilter, mode='fem', dc-pass, simple mesh

        Apply a SPHARA spatial dc-pass filter with fem edge weight to
        data sampled at a simple triangular mesh, 3 vertices, single
        triangle.

        """
        # define the simple test mesh
        testtrimesh = tm.TriMesh([[0, 1, 2]],
                                 [[1, 0, 0], [0, 2, 0], [0, 0, 3]])

        # create a SPHARA filter instance for the mesh
        sf_fem_simple = sf.SpharaFilter(testtrimesh, mode='fem',
                                        specification=1)

        # the data to filter
        data = np.concatenate([[[0., 0., 0.], [1., 1., 1.]],
                               np.transpose(sf_fem_simple.basis()[0])])

        # reference for filtered data
        data_filt_ref = data.copy()
        data_filt_ref[3] = [0., 0., 0.]
        data_filt_ref[4] = [0., 0., 0.]

        # apply SPHARA based spatial dc-pass filter
        data_filt_fem_simple = sf_fem_simple.filter(data)

        self.assertTrue(
            np.allclose(
                data_filt_fem_simple,
                data_filt_ref)) 
Example 25
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharabasis.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_basis_unit_result(self):
        """Class SpharaBasis, method basis, mode='unit', triangular test mesh

        Determine the SPHARA basis with unit edge weight for a
        triangular mesh with 118 vertices. The valid basis vectors of
        the SPHARA basis can point in opposite directions
        (multiplication by -1). To compare the calculated basis with
        the reference basis, the transposed matrix of the calculated
        basis is multiplied by the matrix of the reference basis. If
        the calculated basis is correct, then the result matrix of the
        matrix multiplication contains only the elements 1 and -1 at
        the main diagonal, all other elements of the matrix are 0 or
        very small.

        """
        testtrimesh = tm.TriMesh(self.testdatatriangles,
                                 self.testdatavertices)
        sb_unit = sb.SpharaBasis(testtrimesh, mode='unit')
        sb_unit_fun, sb_unit_freq = sb_unit.basis()
        self.assertTrue(
            np.allclose(np.absolute(np.matmul
                                    (np.transpose(sb_unit_fun),
                                     self.testdataspharabasisunitweight)),
                        np.identity(np.size(sb_unit_freq)))
            and
            np.allclose(sb_unit_freq, self.testdataspharanatfrequnitweight)
        ) 
Example 26
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharabasis.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_basis_ie_result(self):
        """Class SpharaBasis, method basis, mode='inv_euclidean', triangular
        test mesh

        Determine the SPHARA basis with inverse euclidean edge weight
        for a triangular mesh with 118 vertices. The valid basis
        vectors of the SPHARA basis can point in opposite directions
        (multiplication by -1). To compare the calculated basis with
        the reference basis, the transposed matrix of the calculated
        basis is multiplied by the matrix of the reference basis. If
        the calculated basis is correct, then the result matrix of the
        matrix multiplication contains only the elements 1 and -1 at
        the main diagonal, all other elements of the matrix are 0 or
        very small.

        """
        testtrimesh = tm.TriMesh(self.testdatatriangles,
                                 self.testdatavertices)
        sb_ie = sb.SpharaBasis(testtrimesh, mode='inv_euclidean')
        sb_ie_fun, sb_ie_freq = sb_ie.basis()
        self.assertTrue(
            np.allclose(np.absolute(np.matmul
                                    (np.transpose(sb_ie_fun),
                                     self.testdataspharabasisieweight)),
                        np.identity(np.size(sb_ie_freq)))
            and
            np.allclose(sb_ie_freq, self.testdataspharanatfreqieweight)
        ) 
Example 27
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: test_spharabasis.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_basis_fem_result(self):
        """Class SpharaBasis, method basis, mode='fem', triangular test mesh

        Determine the SPHARA basis with FEM discretization for a
        triangular mesh with 118 vertices. The valid basis vectors of
        the SPHARA basis can point in opposite directions
        (multiplication by -1). To compare the calculated basis with
        the reference basis, the transposed matrix of the calculated
        basis is multiplied by the matrix of the reference basis. If
        the calculated basis is correct, then the result matrix of the
        matrix multiplication contains only the elements 1 and -1 at
        the main diagonal, all other elements of the matrix are 0 or
        very small.

        """
        testtrimesh = tm.TriMesh(self.testdatatriangles,
                                 self.testdatavertices)
        sb_fem = sb.SpharaBasis(testtrimesh, mode='fem')
        sb_fem_fun, sb_fem_freq = sb_fem.basis()
        self.assertTrue(
            np.allclose(np.absolute(np.matmul(np.matmul
                                    (np.transpose(sb_fem_fun),
                                     self.testdatamassmatrix),
                                     self.testdataspharabasisfemweight)),
                        np.identity(np.size(sb_fem_freq)))
            and
            np.allclose(sb_fem_freq, self.testdataspharanatfreqfemweight)
        ) 
Example 28
Project: good-semi-bad-gan   Author: christiancosgrove   File: cifar10_data.py    MIT License 5 votes vote down vote up
def unpickle(file):
    fo = open(file, 'rb')
    d = pickle.load(fo, encoding="bytes")
    fo.close()
    # nhwc
    # return {'x': np.cast[np.float32]((-127.5 + np.transpose(d[b'data'].reshape((10000,3,32,32)), (0,2,3,1)))/128.), 'y': np.array(d[b'labels']).astype(np.uint8)}
    # nchw
    return {'x': np.cast[np.float32]((-127.5 + d[b'data'].reshape((10000,3,32,32)))/128.), 'y': np.array(d[b'labels']).astype(np.uint8)}

#load cifar 
Example 29
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: icedataset.py    MIT License 5 votes vote down vote up
def __call__(self, sample):
        image, labels = sample['image'], sample['labels']
        # swap color axis because
        # numpy image: H x W x C
        # torch image: C X H X W
        # image = image.transpose((2, 0, 1))
        image = image.astype(float) / 255
        return {'image': torch.from_numpy(image.copy()).float(),
                'labels': torch.from_numpy(labels).float()
                } 
Example 30
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: icedataset.py    MIT License 5 votes vote down vote up
def __call__(self, sample):
        image, labels = sample['image'], sample['labels']
        if random.random() < 0.7:
            image = np.transpose(image, 0)
        return {'image': image, 'labels': labels} 
Example 31
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def lid_term(logits, batch_size=100):
    """Calculate LID loss term for a minibatch of logits

    :param logits: 
    :return: 
    """
    # y_pred = tf.nn.softmax(logits)
    y_pred = logits

    # calculate pairwise distance
    r = tf.reduce_sum(y_pred * y_pred, 1)
    # turn r into column vector
    r1 = tf.reshape(r, [-1, 1])
    D = r1 - 2 * tf.matmul(y_pred, tf.transpose(y_pred)) + tf.transpose(r1) + \
        tf.ones([batch_size, batch_size])

    # find the k nearest neighbor
    D1 = -tf.sqrt(D)
    D2, _ = tf.nn.top_k(D1, k=21, sorted=True)
    D3 = -D2[:, 1:]

    m = tf.transpose(tf.multiply(tf.transpose(D3), 1.0 / D3[:, -1]))
    v_log = tf.reduce_sum(tf.log(m + 1e-9), axis=1)  # to avoid nan
    lids = -20 / v_log

    ## batch normalize lids
    # lids = tf.nn.l2_normalize(lids, dim=0, epsilon=1e-12)

    return lids 
Example 32
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def lid_adv_term(clean_logits, adv_logits, batch_size=100):
    """Calculate LID loss term for a minibatch of advs logits

    :param logits: clean logits
    :param A_logits: adversarial logits
    :return: 
    """
    # y_pred = tf.nn.softmax(logits)
    c_pred = tf.reshape(clean_logits, (batch_size, -1))
    a_pred = tf.reshape(adv_logits, (batch_size, -1))

    # calculate pairwise distance
    r = tf.reduce_sum(c_pred * a_pred, 1)
    # turn r into column vector
    r1 = tf.reshape(r, [-1, 1])
    D = r1 - 2 * tf.matmul(c_pred, tf.transpose(a_pred)) + tf.transpose(r1) + \
        tf.ones([batch_size, batch_size])

    # find the k nearest neighbor
    D1 = -tf.sqrt(D)
    D2, _ = tf.nn.top_k(D1, k=21, sorted=True)
    D3 = -D2[:, 1:]

    m = tf.transpose(tf.multiply(tf.transpose(D3), 1.0 / D3[:, -1]))
    v_log = tf.reduce_sum(tf.log(m + 1e-9), axis=1)  # to avoid nan
    lids = -20 / v_log

    ## batch normalize lids
    lids = tf.nn.l2_normalize(lids, dim=0, epsilon=1e-12)

    return lids 
Example 33
Project: programsynthesishunting   Author: flexgp   File: save_plots.py    GNU General Public License v3.0 5 votes vote down vote up
def save_box_plot(data, names, title):
    """
    Given an array of some data, and a list of names of that data, generate
    and save a box plot of that data.

    :param data: An array of some data to be plotted.
    :param names: A list of names of that data.
    :param title: The title of the plot.
    :return: Nothing
    """

    from algorithm.parameters import params

    import matplotlib.pyplot as plt
    plt.rc('font', family='Times New Roman')

    # Set up the figure.
    fig = plt.figure()
    ax1 = fig.add_subplot(1, 1, 1)

    # Plot tight layout.
    plt.tight_layout()

    # Plot the data.
    ax1.boxplot(np.transpose(data), 1)

    # Plot title.
    plt.title(title)

    # Generate list of numbers for plotting names.
    nums = list(range(len(data))[1:]) + [len(data)]

    # Plot names for each data point.
    plt.xticks(nums, names, rotation='vertical', fontsize=8)

    # Save plot.
    plt.savefig(path.join(params['FILE_PATH'], (title + '.pdf')))

    # Close plot.
    plt.close() 
Example 34
Project: Random-Erasing   Author: zhunzhong07   File: visualize.py    Apache License 2.0 5 votes vote down vote up
def make_image(img, mean=(0,0,0), std=(1,1,1)):
    for i in range(0, 3):
        img[i] = img[i] * std[i] + mean[i]    # unnormalize
    npimg = img.numpy()
    return np.transpose(npimg, (1, 2, 0)) 
Example 35
Project: DJFeet   Author: libre-man   File: pickers.py    MIT License 5 votes vote down vote up
def distance(self, song_q, song_p, weights=None):
        """Calculate the distance between two MFCCs.

        This is done based on this paper:
        http://cs229.stanford.edu/proj2009/RajaniEkkizogloy.pdf

        :param str song_q: The first song used.
        :param str song_p: The second song used.
        :param numpy.array weights: The weights vector to use.
        :returns: The distance between the two given songs. This distance is
                  symmetric.
        :rtype: int
        """
        if weights is None:
            weights = self.weights

        def kl(p, q):
            cov_p = self.covariance(p, weights)
            cov_q = self.covariance(q, weights)
            cov_q_inv = numpy.linalg.inv(cov_q)
            m_p = self.song_properties[p][1]
            m_q = self.song_properties[q][1]
            d = cov_p.shape[0]
            return (
                numpy.log(numpy.linalg.det(cov_q) / numpy.linalg.det(cov_p)) +
                numpy.trace(numpy.dot(cov_q_inv, cov_p)) + numpy.dot(
                    numpy.transpose(m_p - m_q),
                    numpy.dot(cov_q_inv, (m_p - m_q))) - d) / 2

        return (kl(song_q, song_p) + kl(song_p, song_q)) / 2 
Example 36
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: bucket_io.py    Apache License 2.0 5 votes vote down vote up
def make_data_iter_plan(self):
        "make a random data iteration plan"
        # truncate each bucket into multiple of batch-size
        bucket_n_batches = []
        for i in range(len(self.data)):
            bucket_n_batches.append(np.floor((self.data[i]) / self.batch_size))
            self.data[i] = self.data[i][:int(bucket_n_batches[i]*self.batch_size)]

        bucket_plan = np.hstack([np.zeros(n, int)+i for i, n in enumerate(bucket_n_batches)])
        np.random.shuffle(bucket_plan)

        bucket_idx_all = [np.random.permutation(len(x)) for x in self.data]

        self.bucket_plan = bucket_plan
        self.bucket_idx_all = bucket_idx_all
        self.bucket_curr_idx = [0 for x in self.data]

        self.data_buffer = []
        self.label_buffer = []
        for i_bucket in range(len(self.data)):
            if not self.model_parallel:
                data = np.zeros((self.batch_size, self.buckets[i_bucket]))
                label = np.zeros((self.batch_size, self.buckets[i_bucket]))
                self.data_buffer.append(data)
                self.label_buffer.append(label)
            else:
                data = np.zeros((self.buckets[i_bucket], self.batch_size))
                self.data_buffer.append(data)

        if self.model_parallel:
            # Transpose data if model parallel
            for i in range(len(self.data)):
                bucket_data = self.data[i]
                self.data[i] = np.transpose(bucket_data) 
Example 37
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: stt_utils.py    Apache License 2.0 5 votes vote down vote up
def spectrogram_from_file(filename, step=10, window=20, max_freq=None,
                          eps=1e-14, overwrite=False, save_feature_as_csvfile=False):
    """ Calculate the log of linear spectrogram from FFT energy
    Params:
        filename (str): Path to the audio file
        step (int): Step size in milliseconds between windows
        window (int): FFT window size in milliseconds
        max_freq (int): Only FFT bins corresponding to frequencies between
            [0, max_freq] are returned
        eps (float): Small value to ensure numerical stability (for ln(x))
    """

    csvfilename = filename.replace(".wav", ".csv")
    if (os.path.isfile(csvfilename) is False) or overwrite:
        with soundfile.SoundFile(filename) as sound_file:
            audio = sound_file.read(dtype='float32')
            sample_rate = sound_file.samplerate
            if audio.ndim >= 2:
                audio = np.mean(audio, 1)
            if max_freq is None:
                max_freq = sample_rate / 2
            if max_freq > sample_rate / 2:
                raise ValueError("max_freq must not be greater than half of "
                                 " sample rate")
            if step > window:
                raise ValueError("step size must not be greater than window size")
            hop_length = int(0.001 * step * sample_rate)
            fft_length = int(0.001 * window * sample_rate)

            pxx, freqs = spectrogram(
                audio, fft_length=fft_length, sample_rate=sample_rate,
                hop_length=hop_length)

            ind = np.where(freqs <= max_freq)[0][-1] + 1
            res = np.transpose(np.log(pxx[:ind, :] + eps))
            if save_feature_as_csvfile:
                np.savetxt(csvfilename, res)
            return res
    else:
        return np.loadtxt(csvfilename) 
Example 38
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: VAE.py    Apache License 2.0 5 votes vote down vote up
def encoder(model, x):
        params = model.arg_params
        encoder_n = np.shape(params['encoder_h_bias'].asnumpy())[0]
        encoder_h = np.dot(params['encoder_h_weight'].asnumpy(), np.transpose(x)) \
                    + np.reshape(params['encoder_h_bias'].asnumpy(), (encoder_n,1))
        act_h = np.tanh(encoder_h)
        mu = np.transpose(np.dot(params['mu_weight'].asnumpy(),act_h)) + params['mu_bias'].asnumpy()
        logvar = np.transpose(np.dot(params['logvar_weight'].asnumpy(),act_h)) + params['logvar_bias'].asnumpy()
        return mu,logvar 
Example 39
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: VAE.py    Apache License 2.0 5 votes vote down vote up
def decoder(model, z):
        params = model.arg_params
        decoder_n = np.shape(params['decoder_z_bias'].asnumpy())[0]
        decoder_z = np.dot(params['decoder_z_weight'].asnumpy(),np.transpose(z)) \
                    + np.reshape(params['decoder_z_bias'].asnumpy(),(decoder_n,1))
        act_z = np.tanh(decoder_z)
        decoder_x = np.transpose(np.dot(params['decoder_x_weight'].asnumpy(),act_z)) + params['decoder_x_bias'].asnumpy()
        reconstructed_x = 1/(1+np.exp(-decoder_x))
        return reconstructed_x 
Example 40
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_transpose():
    for ndim in range(1, 7):
        for t in range(5):
            dims = list(np.random.randint(1, 10, size=ndim))
            axes = list(range(ndim))
            random.shuffle(axes)
            axes = tuple(axes)
            x = mx.nd.array(np.random.normal(size=dims))
            y = mx.nd.transpose(x, axes=axes)
            assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())

            y = mx.nd.transpose(x)
            assert_allclose(np.transpose(x.asnumpy()), y.asnumpy()) 
Example 41
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def _make_symm_symbol(a, ndims):
    assert ndims >= 2
    tr_shape = list(range(ndims))
    tr_shape[-1] = ndims-2
    tr_shape[-2] = ndims-1
    tr_shape = tuple(tr_shape)
    return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape)) 
Example 42
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def _gelqf_combined_symbol(a):
    q, l = mx.sym.linalg.gelqf(a)
    q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
    l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
    return mx.sym.Group([q_qt, l_q])

# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying! 
Example 43
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_gluon_data_vision.py    Apache License 2.0 5 votes vote down vote up
def test_to_tensor():
    data_in = np.random.uniform(0, 255, (300, 300, 3)).astype(dtype=np.uint8)
    out_nd = transforms.ToTensor()(nd.array(data_in, dtype='uint8'))
    assert_almost_equal(out_nd.asnumpy(), np.transpose(
        data_in.astype(dtype=np.float32) / 255.0, (2, 0, 1))) 
Example 44
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: mxnet_export_test.py    Apache License 2.0 5 votes vote down vote up
def test_spacetodepth():
    n, c, h, w = shape = (1, 1, 4, 6)
    input1 = np.random.rand(n, c, h, w).astype("float32")
    blocksize = 2
    inputs = [helper.make_tensor_value_info("input1", TensorProto.FLOAT, shape=shape)]

    outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=(1, 4, 2, 3))]

    nodes = [helper.make_node("SpaceToDepth", ["input1"], ["output"], block_size=blocksize)]

    graph = helper.make_graph(nodes,
                              "spacetodepth_test",
                              inputs,
                              outputs)

    spacetodepth_model = helper.make_model(graph)

    bkd_rep = backend.prepare(spacetodepth_model)
    output = bkd_rep.run([input1])

    tmp = np.reshape(input1, [n, c,
                    h // blocksize, blocksize,
                    w // blocksize, blocksize])
    tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
    numpy_op = np.reshape(tmp, [n, c * (blocksize**2),
                    h // blocksize,
                    w // blocksize])

    npt.assert_almost_equal(output[0], numpy_op) 
Example 45
Project: Multi-modal-learning   Author: vanya2v   File: infer.py    Apache License 2.0 5 votes vote down vote up
def infer(args):
    s = tf.Session()

    filenames = pd.read_csv(args.csv, dtype=str).as_matrix()

    inputs, outputs = ResNetFCN.load(args.model_path, s)

    r = reader.OrgansReader([tf.float32, tf.int32],[[None, None, None, 1], [None, None, None]]) #,name='val_queue')

    
    for f in filenames:

        x, y = r._read_sample([f], is_training=False)

        sw = SlidingWindow(x.shape[1:4], [64, 64, 64], striding=[64, 64, 64])

        # Allocate the prediction output and a counter for averaging probabilities
        y_prob = np.zeros(y.shape + (num_classes,))
        y_pred_count = np.zeros_like(y_prob)
        for slicer in sw:
            y_sw = s.run(outputs['y_prob'], feed_dict={inputs[0]: x[slicer]})
            y_prob[slicer] += y_sw
            y_pred_count[slicer] += 1

        y_prob /= y_pred_count
        
        y_ = np.argmax(y_prob, axis=-1)

        dscs = metrics.dice(y_, y, num_classes)
        
        print(f[0] + ';  mean DSC = {:.3f}\n\t'.format(np.mean(dscs[1:]))
              + ', '.join(['DSC {}: {:.3f}'.format(i, dsc) for i, dsc in enumerate(dscs)]))

        y_ = np.squeeze (y_, axis = 0)

        itk_prediction = sitk.GetImageFromArray(y_)
        ds = np.transpose(dscs)
        DSC_all.append(ds)

    np.save('DSC_MR.npy', DSC_all) 
Example 46
Project: DOTA_models   Author: ringringyi   File: synthetic_data_utils.py    Apache License 2.0 5 votes vote down vote up
def nparray_and_transpose(data_a_b_c):
  """Convert the list of items in data to a numpy array, and transpose it
  Args:
    data: data_asbsc: a nested, nested list of length a, with sublist length
      b, with sublist length c.
  Returns:
    a numpy 3-tensor with dimensions a x c x b
"""
  data_axbxc = np.array([datum_b_c for datum_b_c in data_a_b_c])
  data_axcxb = np.transpose(data_axbxc, axes=[0,2,1])
  return data_axcxb 
Example 47
Project: DOTA_models   Author: ringringyi   File: gen_synthetic_single.py    Apache License 2.0 5 votes vote down vote up
def GenerateSample(filename, code_shape, layer_depth):
  # {0, +1} binary codes.
  # No conversion since the output file is expected to store
  # codes using {0, +1} codes (and not {-1, +1}).
  code = synthetic_model.GenerateSingleCode(code_shape)
  code = np.round(code)

  # Reformat the code so as to be compatible with what is generated
  # by the image encoder.
  # The image encoder generates a tensor of size:
  # iteration_count x batch_size x height x width x iteration_depth.
  # Here: batch_size = 1
  if code_shape[-1] % layer_depth != 0:
    raise ValueError('Number of layers is not an integer')
  height = code_shape[0]
  width = code_shape[1]
  code = code.reshape([1, height, width, -1, layer_depth])
  code = np.transpose(code, [3, 0, 1, 2, 4])

  int_codes = code.astype(np.int8)
  exported_codes = np.packbits(int_codes.reshape(-1))

  output = io.BytesIO()
  np.savez_compressed(output, shape=int_codes.shape, codes=exported_codes)
  with tf.gfile.FastGFile(filename, 'wb') as code_file:
    code_file.write(output.getvalue()) 
Example 48
Project: dssmplay   Author: lightning-huang   File: dssm_v3.py    GNU General Public License v3.0 5 votes vote down vote up
def pull_batch(query_data, doc_data, batch_idx):
    # start = time.time()
    query_in = query_data[batch_idx * BS:(batch_idx + 1) * BS, :]
    doc_in = doc_data[batch_idx * BS:(batch_idx + 1) * BS, :]
    
    if batch_idx == 0:
      print(query_in.getrow(53))
    query_in = query_in.tocoo()
    doc_in = doc_in.tocoo()
    
    

    query_in = tf.SparseTensorValue(
        np.transpose([np.array(query_in.row, dtype=np.int64), np.array(query_in.col, dtype=np.int64)]),
        np.array(query_in.data, dtype=np.float),
        np.array(query_in.shape, dtype=np.int64))
    doc_in = tf.SparseTensorValue(
        np.transpose([np.array(doc_in.row, dtype=np.int64), np.array(doc_in.col, dtype=np.int64)]),
        np.array(doc_in.data, dtype=np.float),
        np.array(doc_in.shape, dtype=np.int64))
    print("pulled_query_in.shape:%s"%(query_in.dense_shape))
    print("pulled_doc_in.shape:%s"%(doc_in.dense_shape))

    # end = time.time()
    # print("Pull_batch time: %f" % (end - start))

    return query_in, doc_in 
Example 49
Project: gullikson-scripts   Author: kgullikson88   File: DataStructures.py    MIT License 5 votes vote down vote up
def output(self, outfilename):
        np.savetxt(outfilename, np.transpose((self.x, self.y, self.cont, self.err))) 
Example 50
Project: gullikson-scripts   Author: kgullikson88   File: DataStructures.py    MIT License 5 votes vote down vote up
def toarray(self, norm=False):
        """
        Turns the data structure into a multidimensional np array
        If norm == True, it will have shape (self.size(), 2) and the y
          axis will be divided by the continuum axis
        Otherwise, it will have shape (self.size(), 4)
        """
        if norm:
            return np.array((self.x, self.y / self.cont)).transpose()
        else:
            return np.array((self.x, self.y, self.cont, self.err)).transpose() 
Example 51
Project: ARPET   Author: juliagarriga   File: kalman_filter.py    MIT License 5 votes vote down vote up
def _predict(self):
        
        self.x = np.matmul(self.F, self.x)
        self.P = np.matmul(np.matmul(self.F, self.P), np.transpose(self.F)) + self.Q 
Example 52
Project: ARPET   Author: juliagarriga   File: kalman_filter.py    MIT License 5 votes vote down vote up
def _update(self, z):
        
        y = z - np.matmul(self.H, self.x)
        S = np.matmul(np.matmul(self.H, self.P), np.transpose(self.H)) + self.R
        K = np.matmul(np.matmul(self.P, np.transpose(self.H)), np.linalg.inv(S))
        
        self.x = self.x + np.matmul(K, y)
        
        tmp = np.eye(4) - np.matmul(K, self.H)
        self.P = np.matmul(np.matmul(tmp, self.P), np.transpose(tmp))
        self.P = self.P + np.matmul(np.matmul(K, self.R), np.transpose(K)) 
Example 53
Project: Manga-colorization---cycle-gan   Author: OValery16   File: util.py    Mozilla Public License 2.0 5 votes vote down vote up
def tensor2im(input_image, imtype=np.uint8):
    if isinstance(input_image, torch.Tensor):
        image_tensor = input_image.data
    else:
        return input_image
    image_numpy = image_tensor[0].cpu().float().numpy()
    if image_numpy.shape[0] == 1:
        image_numpy = np.tile(image_numpy, (3, 1, 1))
    image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
    return image_numpy.astype(imtype) 
Example 54
Project: SSGAN-Tensorflow   Author: clvrai   File: download.py    MIT License 5 votes vote down vote up
def download_svhn(download_path):
    data_dir = os.path.join(download_path, 'svhn')

    import scipy.io as sio
    # svhn file loader
    def svhn_loader(url, path):
        cmd = ['curl', url, '-o', path]
        subprocess.call(cmd)
        m = sio.loadmat(path)
        return m['X'], m['y']

    if check_file(data_dir):
        print('SVHN was downloaded.')
        return

    data_url = 'http://ufldl.stanford.edu/housenumbers/train_32x32.mat'
    train_image, train_label = svhn_loader(data_url, os.path.join(data_dir, 'train_32x32.mat'))

    data_url = 'http://ufldl.stanford.edu/housenumbers/test_32x32.mat'
    test_image, test_label = svhn_loader(data_url, os.path.join(data_dir, 'test_32x32.mat'))

    prepare_h5py(np.transpose(train_image, (3, 0, 1, 2)), train_label,
                 np.transpose(test_image, (3, 0, 1, 2)), test_label, data_dir)

    cmd = ['rm', '-f', os.path.join(data_dir, '*.mat')]
    subprocess.call(cmd) 
Example 55
Project: kitti-object-eval-python   Author: traveller59   File: kitti_common.py    MIT License 5 votes vote down vote up
def intersection(boxes1, boxes2, add1=False):
    """Compute pairwise intersection areas between boxes.

    Args:
        boxes1: a numpy array with shape [N, 4] holding N boxes
        boxes2: a numpy array with shape [M, 4] holding M boxes

    Returns:
        a numpy array with shape [N*M] representing pairwise intersection area
    """
    [y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
    [y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)

    all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
    all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
    if add1:
        all_pairs_min_ymax += 1.0
    intersect_heights = np.maximum(
        np.zeros(all_pairs_max_ymin.shape),
        all_pairs_min_ymax - all_pairs_max_ymin)

    all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
    all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
    if add1:
        all_pairs_min_xmax += 1.0
    intersect_widths = np.maximum(
        np.zeros(all_pairs_max_xmin.shape),
        all_pairs_min_xmax - all_pairs_max_xmin)
    return intersect_heights * intersect_widths 
Example 56
Project: MotionGAN   Author: magnux   File: npangles.py    MIT License 5 votes vote down vote up
def rotmat_to_quaternion(R):
    """
    Converts a rotation matrix to a quaternion
    Tensorization of code in:
    https://github.com/una-dinosauria/human-motion-prediction/blob/master/src/data_utils.py
    Args:
      R: (..., 3, 3) rotation matrix Tensor
    Returns:
      q: (..., 4) quaternion Tensor
    """
    trans_dims = range(len(R.shape))
    trans_dims[-1], trans_dims[-2] = trans_dims[-2], trans_dims[-1]
    rotdiff = R - np.transpose(R, trans_dims)

    r = np.stack([-rotdiff[..., 1, 2], rotdiff[..., 0, 2], -rotdiff[..., 0, 1]], axis=-1)
    rnorm = np.sqrt(np.sum(np.square(r), axis=-1, keepdims=True) + 1e-8)
    sintheta = rnorm / 2.0
    r0 = r / rnorm

    costheta = np.expand_dims((np.trace(R) - 1.0) / 2.0, axis=-1)

    theta = np.arctan2(sintheta, costheta)

    q = np.concatenate([np.cos(theta / 2),  r0 * np.sin(theta / 2)], axis=-1)

    return q 
Example 57
Project: MotionGAN   Author: magnux   File: npangles.py    MIT License 5 votes vote down vote up
def expmap_to_rotmat(r):
    """
    Converts an exponential map angle to a rotation matrix
    Tensorization of code in:
    https://github.com/una-dinosauria/human-motion-prediction/blob/master/src/data_utils.py
    Args:
      r: (..., 3) exponential map Tensor
    Returns:
      R: (..., 3, 3) rotation matrix Tensor
    """
    base_shape = [int(d) for d in r.shape][:-1]
    zero_dim = np.zeros(base_shape)

    theta = np.sqrt(np.sum(np.square(r), axis=-1, keepdims=True) + 1e-8)
    r0 = r / theta

    r0x = np.reshape(
        np.stack([zero_dim, -1.0 * r0[..., 2], r0[..., 1],
                  zero_dim, zero_dim, -1.0 * r0[..., 0],
                  zero_dim, zero_dim, zero_dim], axis=-1),
        base_shape + [3, 3]
    )
    trans_dims = range(len(r0x.shape))
    trans_dims[-1], trans_dims[-2] = trans_dims[-2], trans_dims[-1]
    r0x = r0x - np.transpose(r0x, trans_dims)

    tile_eye = np.tile(np.reshape(np.eye(3), [1 for _ in base_shape] + [3, 3]), base_shape + [1, 1])
    theta = np.expand_dims(theta, axis=-1)

    R = tile_eye + np.sin(theta) * r0x + (1.0 - np.cos(theta)) * np.matmul(r0x, r0x)
    return R 
Example 58
Project: MotionGAN   Author: magnux   File: test.py    MIT License 5 votes vote down vote up
def flat_edm(x):
            idxs = np.triu_indices(x.shape[1], k=1)
            x_edm = edm(x)
            x_edm = x_edm[:, idxs[0], idxs[1], :]
            x_edm = np.transpose(np.squeeze(x_edm, 0), (1, 0))
            return x_edm 
Example 59
Project: MotionGAN   Author: magnux   File: test.py    MIT License 5 votes vote down vote up
def prepare_expmap(expmap):
                expmap = subsample(expmap)
                expmap = expmap.reshape((expmap.shape[0], 33, 3))
                ex_std = expmap.std(0)
                dim_to_use = np.where((ex_std >= 1e-4).all(axis=-1))[0]
                expmap = expmap[:, dim_to_use, :]
                expmap = expmap.transpose((1, 0, 2))
                return expmap 
Example 60
Project: pidforest   Author: vatsalsharan   File: subcube.py    MIT License 5 votes vote down vote up
def __str__(self):
        str_val = "Id: " + str(self.id_string) + ", Axis: " + str(self.sp_axis) + "\n"
        str_val += "Boundary: "
        for i in range(self.dim):
            str_val += " [" + str(self.start[i]) + ", " + str(self.end[i]) + "]"
            if i < self.dim -1:
                str_val += " x"
            else:
                str_val += "\n"
        str_val += "Points:\n " + str(np.transpose(self.points)) + "\n"
        str_val += "Indices: " + str(self.indices) + "\n"
        return str_val 
Example 61
Project: pidforest   Author: vatsalsharan   File: forest.py    MIT License 5 votes vote down vote up
def __str__(self):
        str_val = "Id: " + str(self.id_string) + "\n"
        str_val += "Boundary: "
        for i in range(self.cube.dim):
            str_val += " [" + str(self.cube.start[i]) + ", " + str(self.cube.end[i]) + "]"
            if i < self.cube.dim - 1:
                str_val += " x"
            else:
                str_val += "\n"
        str_val += "Points:\n " + str(np.transpose(self.point_set.points)) + "\n"
        str_val += "Indices: " + str(self.point_set.indices) + "\n"
        return str_val 
Example 62
Project: pidforest   Author: vatsalsharan   File: myforest.py    MIT License 5 votes vote down vote up
def split_points(self, arr, indices):
        if not self.child:
            return arr, indices
        n_child = len(self.child)
        if arr.size == 0:
            return [[] for _ in range(n_child)], [[] for _ in range(n_child)]
        _, n_arr = np.shape(arr)
        assert n_arr == len(indices)
        s_arr = arr[self.split_axis]
        s_start = self.start[self.split_axis]
        s_end = self.end[self.split_axis]
        pts_split = [[] for _ in range(n_child)]
        index_split = [[] for _ in range(n_child)]  # type: List[List[int]]
        for i in range(n_arr):
            if (s_arr[i] >= s_start) and (s_arr[i] < self.split_vals[0]):
                j = 0
            elif (s_arr[i] < s_end) and (s_arr[i] >= self.split_vals[-1]):
                j = n_child - 1
            else:
                for k in range(1, n_child - 1):
                    if (s_arr[i] >= self.split_vals[k - 1]) and (s_arr[i] < self.split_vals[k]):
                        j = k
                        break
            pts_split[j].append(arr[:, i])
            index_split[j].append(indices[i])
            # Transpose the arrays to get the shape of the form (dim, *)
        for j in range(n_child):
            pts_split[j] = np.transpose(pts_split[j])
        return pts_split, index_split 
Example 63
Project: pidforest   Author: vatsalsharan   File: myforest.py    MIT License 5 votes vote down vote up
def __str__(self):
        str_val = "Id: " + str(self.id_string) + "\n"
        str_val += "Boundary: "
        for i in range(self.cube.dim):
            str_val += " [" + str(self.cube.start[i]) + ", " + str(self.cube.end[i]) + "]"
            if i < self.cube.dim - 1:
                str_val += " x"
            else:
                str_val += "\n"
        str_val += "Points:\n " + str(np.transpose(self.point_set.points)) + "\n"
        str_val += "Indices: " + str(self.point_set.indices) + "\n"
        return str_val 
Example 64
Project: pidforest   Author: vatsalsharan   File: old_forest.py    MIT License 5 votes vote down vote up
def split_points(self, arr, indices):
        if not self.child:
            return arr, indices
        n_child = len(self.child)
        if arr.size == 0:
            return [[] for _ in range(n_child)], [[] for _ in range(n_child)]
        _, n_arr = np.shape(arr)
        assert n_arr == len(indices)
        s_arr = arr[self.split_axis]
        s_start = self.start[self.split_axis]
        s_end = self.end[self.split_axis]
        pts_split = [[] for _ in range(n_child)]
        index_split = [[] for _ in range(n_child)]  # type: List[List[int]]
        for i in range(n_arr):
            if (s_arr[i] >= s_start) and (s_arr[i] < self.split_vals[0]):
                j = 0
            elif (s_arr[i] < s_end) and (s_arr[i] >= self.split_vals[-1]):
                j = n_child - 1
            else:
                for k in range(1, n_child - 1):
                    if (s_arr[i] >= self.split_vals[k - 1]) and (s_arr[i] < self.split_vals[k]):
                        j = k
                        break
            pts_split[j].append(arr[:, i])
            index_split[j].append(indices[i])
            # Transpose the arrays to get the shape of the form (dim, *)
        for j in range(n_child):
            pts_split[j] = np.transpose(pts_split[j])
        return pts_split, index_split 
Example 65
Project: lung_nodule_classifier   Author: xairc   File: data.py    MIT License 5 votes vote down vote up
def augment(sample, target, ifflip = True, ifrotate=True, ifswap = True):
    #                     angle1 = np.random.rand()*180

    if ifrotate:
        validrot = False
        counter = 0
        while not validrot:
            newtarget = np.copy(target)
            angle1 = np.random.rand()*180
            size = np.array(sample.shape[2:4]).astype('float')
            rotmat = np.array([[np.cos(angle1/180*np.pi),-np.sin(angle1/180*np.pi)],[np.sin(angle1/180*np.pi),np.cos(angle1/180*np.pi)]])
            newtarget[1:3] = np.dot(rotmat,target[1:3]-size/2)+size/2
            if np.all(newtarget[:3]>target[3]) and np.all(newtarget[:3]< np.array(sample.shape[1:4])-newtarget[3]):
                validrot = True
                target = newtarget
                sample = rotate(sample,angle1,axes=(2,3),reshape=False)
            else:
                counter += 1
                if counter ==3:
                    break
    if ifswap:
        if sample.shape[1]==sample.shape[2] and sample.shape[1]==sample.shape[3]:
            axisorder = np.random.permutation(3)
            sample = np.transpose(sample,np.concatenate([[0],axisorder+1]))
            target[:3] = target[:3][axisorder]

    if ifflip:
#         flipid = np.array([np.random.randint(2),np.random.randint(2),np.random.randint(2)])*2-1
        flipid = np.array([1,np.random.randint(2),np.random.randint(2)])*2-1
        sample = np.ascontiguousarray(sample[:,::flipid[0],::flipid[1],::flipid[2]])
        for ax in range(3):
            if flipid[ax]==-1:
                target[ax] = np.array(sample.shape[ax+1])-target[ax]

    return sample, target 
Example 66
Project: denoisers   Author: IDKiro   File: utils.py    MIT License 5 votes vote down vote up
def hwc_to_chw(img):
    return np.transpose(img, axes=[2, 0, 1]) 
Example 67
Project: denoisers   Author: IDKiro   File: utils.py    MIT License 5 votes vote down vote up
def chw_to_hwc(img):
    return np.transpose(img, axes=[1, 2, 0]) 
Example 68
Project: kaldi-python-io   Author: funcwj   File: _io_kernel.py    Apache License 2.0 4 votes vote down vote up
def uncompress(cdata, cps_type, head):
    """ 
        In format CM(kOneByteWithColHeaders):
        PerColHeader, ...(x C), ... uint8 sequence ...
            first: get each PerColHeader pch for a single column
            then : using pch to uncompress each float in the column
        We load it seperately at a time 
        In format CM2(kTwoByte):
        ...uint16 sequence...
        In format CM3(kOneByte):
        ...uint8 sequence...
    """
    min_val, prange, num_rows, num_cols = head
    # mat = np.zeros([num_rows, num_cols])
    print_info('\tUncompress to matrix {} X {}'.format(num_rows, num_cols))
    if cps_type == 'CM':
        # checking compressed data size, 8 is the sizeof PerColHeader
        assert len(cdata) == num_cols * (8 + num_rows)
        chead, cmain = cdata[:8 * num_cols], cdata[8 * num_cols:]
        # type uint16
        pch = np.fromstring(chead, dtype=np.uint16).astype(np.float32)
        pch = np.transpose(pch.reshape(num_cols, 4))
        pch = pch * prange / 65535.0 + min_val
        # type uint8
        uint8 = np.fromstring(cmain, dtype=np.uint8).astype(np.float32)
        uint8 = np.transpose(uint8.reshape(num_cols, num_rows))
        # precompute index
        le64_index = uint8 <= 64
        gt92_index = uint8 >= 193
        # le92_index = np.logical_not(np.logical_xor(le64_index, gt92_index))
        return np.where(
            le64_index,
            uint8 * (pch[1] - pch[0]) / 64.0 + pch[0],
            np.where(gt92_index,
                     (uint8 - 192) * (pch[3] - pch[2]) / 63.0 + pch[2],
                     (uint8 - 64) * (pch[2] - pch[1]) / 128.0 + pch[1]))
    else:
        if cps_type == 'CM2':
            inc = float(prange / 65535.0)
            uint_seq = np.fromstring(cdata, dtype=np.uint16).astype(np.float32)
        else:
            inc = float(prange / 255.0)
            uint_seq = np.fromstring(cdata, dtype=np.uint8).astype(np.float32)
        mat = min_val + uint_seq.reshape(num_rows, num_cols) * inc

    return mat 
Example 69
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def get_data(dataset='mnist'):
    """
    images in [-0.5, 0.5] (instead of [0, 1]) which suits C&W attack and generally gives better performance
    
    :param dataset:
    :return: 
    """
    assert dataset in ['mnist', 'cifar', 'svhn'], \
        "dataset parameter must be either 'mnist' 'cifar' or 'svhn'"
    if dataset == 'mnist':
        # the data, shuffled and split between train and test sets
        (X_train, y_train), (X_test, y_test) = mnist.load_data()
        # reshape to (n_samples, 28, 28, 1)
        X_train = X_train.reshape(-1, 28, 28, 1)
        X_test = X_test.reshape(-1, 28, 28, 1)

    elif dataset == 'cifar':
        # the data, shuffled and split between train and test sets
        (X_train, y_train), (X_test, y_test) = cifar10.load_data()
    else:
        if not os.path.isfile(os.path.join(PATH_DATA, "svhn_train.mat")):
            print('Downloading SVHN train set...')
            call(
                "curl -o ../data/svhn_train.mat "
                "http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
                shell=True
            )
        if not os.path.isfile(os.path.join(PATH_DATA, "svhn_test.mat")):
            print('Downloading SVHN test set...')
            call(
                "curl -o ../data/svhn_test.mat "
                "http://ufldl.stanford.edu/housenumbers/test_32x32.mat",
                shell=True
            )
        train = sio.loadmat(os.path.join(PATH_DATA,'svhn_train.mat'))
        test = sio.loadmat(os.path.join(PATH_DATA, 'svhn_test.mat'))
        X_train = np.transpose(train['X'], axes=[3, 0, 1, 2])
        X_test = np.transpose(test['X'], axes=[3, 0, 1, 2])
        # reshape (n_samples, 1) to (n_samples,) and change 1-index
        # to 0-index
        y_train = np.reshape(train['y'], (-1,)) - 1
        y_test = np.reshape(test['y'], (-1,)) - 1

    # cast pixels to floats, normalize to [0, 1] range
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train = ((X_train/255.0) - (1.0 - CLIP_MAX))
    X_test = (X_test/255.0) - (1.0 - CLIP_MAX)

    # one-hot-encode the labels
    Y_train = np_utils.to_categorical(y_train, 10)
    Y_test = np_utils.to_categorical(y_test, 10)

    print("X_train:", X_train.shape)
    print("Y_train:", Y_train.shape)
    print("X_test:", X_test.shape)
    print("Y_test", Y_test.shape)

    return X_train, Y_train, X_test, Y_test 
Example 70
Project: neural-fingerprinting   Author: StephanZheng   File: utils_svhn.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def read_SVHN(data_folder):
    """ Reads and parses examples from SVHN data files """

    train_img = []
    train_label = []
    test_img = []
    test_label = []

    train_file_list = [
        'train_32x32.mat', 'extra_32x32.mat'
    ]
    test_file_list = ["test_32x32.mat"]

    for i in xrange(len(train_file_list)):
        tmp_dict = sio.loadmat(os.path.join(data_folder, train_file_list[i]))
        train_img.append(tmp_dict["X"])
        train_label.append(tmp_dict["y"])

    tmp_dict = sio.loadmat(
        os.path.join(data_folder, test_file_list[0]))
    test_img.append(tmp_dict["X"])
    test_label.append(tmp_dict["y"])

    train_img = np.concatenate(train_img, axis=-1)
    train_label = np.concatenate(train_label).flatten()
    test_img = np.concatenate(test_img, axis=-1)
    test_label = np.concatenate(test_label).flatten()

    # change format from [H, W, C, B] to [B, H, W, C] for feeding to Tensorflow
    train_img = np.transpose(train_img, [3, 0, 1, 2])
    test_img = np.transpose(test_img, [3, 0, 1, 2])

    mean_img = np.mean(np.concatenate([train_img, test_img]), axis=0)

    train_img = train_img - mean_img
    test_img = test_img - mean_img
    train_y = train_label - 1  # 0-based label
    test_y = test_label - 1    # 0-based label

    train_label = np.eye(10)[train_y]
    test_label = np.eye(10)[test_y]

    return train_img, train_label, test_img, test_label 
Example 71
Project: YOLOv1_tensorflow_windows   Author: FatherRen   File: main.py    GNU General Public License v3.0 4 votes vote down vote up
def __init__(self, is_training=True):
        # 下面这些参数在Data模块介绍过
        self.classes = cfg.CLASSES
        self.num_class = len(self.classes)
        self.image_size = cfg.IMAGE_SIZE
        self.cell_size = cfg.CELL_SIZE
        self.boxes_per_cell = cfg.BOXES_PER_CELL
        self.output_size = (self.cell_size * self.cell_size) * (self.num_class + self.boxes_per_cell * 5)
        self.scale = 1.0 * self.image_size / self.cell_size
        self.boundary1 = self.cell_size * self.cell_size * self.num_class
        self.boundary2 = self.boundary1 + self.cell_size * self.cell_size * self.boxes_per_cell

        # 这些scale是在构建loss函数时各个损失的比重,使损失更加均衡
        self.object_scale = cfg.OBJECT_SCALE
        self.noobject_scale = cfg.NOOBJECT_SCALE
        self.class_scale = cfg.CLASS_SCALE
        self.coord_scale = cfg.COORD_SCALE

        # base学习率
        self.learning_rate = cfg.LEARNING_RATE
        # batch size
        self.batch_size = cfg.BATCH_SIZE
        # leaky relu中的alpha参数
        self.alpha = cfg.ALPHA

        # 坐标偏移量
        self.offset = np.transpose(np.reshape(np.array([np.arange(self.cell_size)] * self.cell_size * self.boxes_per_cell),
                                              (self.boxes_per_cell, self.cell_size, self.cell_size)), (1, 2, 0))

        # 网络的输入
        self.images = tf.placeholder(tf.float32, [None, self.image_size, self.image_size, 3], name='images')
        # 网络的输出,(batch_size, 1470)
        self.logits = self.build_network(self.images, num_outputs=self.output_size,
                                         alpha=self.alpha, is_training=is_training)

        # label
        self.labels = tf.placeholder(tf.float32, [None, self.cell_size, self.cell_size, 5 + self.num_class])
        # 损失,这里把所有的损失集中起来了
        self.loss_layer(self.logits, self.labels)
        # 得到全部损失
        self.total_loss = tf.losses.get_total_loss()
        # 记录总损失,在tensorboard中查看
        tf.summary.scalar('total_loss', self.total_loss) 
Example 72
Project: YOLOv1_tensorflow_windows   Author: FatherRen   File: main.py    GNU General Public License v3.0 4 votes vote down vote up
def build_network(self, images, num_outputs, alpha, keep_prob=0.5, is_training=True, scope='yolo'):
        """
        :param images:输入数据
        :param num_outputs: 输出的shape
        :param alpha: leaky relu的alpha参数
        :param keep_prob: dropout参数
        :param is_training: 在训练使dropout为0.5,在测试使dropout为1
        :param scope: 管理tensor
        :return: 输出(batch_size, 1470)
        """
        with tf.variable_scope(scope):
            # slim.arg_scope()的作用是在调用list中函数时,自动传入后面的那些参数,从而简化代码
            with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                activation_fn=leaky_relu(alpha),
                                weights_regularizer=slim.l2_regularizer(0.0005),
                                weights_initializer=tf.truncated_normal_initializer(0.0, 0.01)):
                net = tf.pad(images, np.array([[0, 0], [3, 3], [3, 3], [0, 0]]),name='pad_1')
                net = slim.conv2d(net, 64, 7, 2, padding='VALID', scope='conv_2')
                net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_3')
                net = slim.conv2d(net, 192, 3, scope='conv_4')
                net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_5')
                net = slim.conv2d(net, 128, 1, scope='conv_6')
                net = slim.conv2d(net, 256, 3, scope='conv_7')
                net = slim.conv2d(net, 256, 1, scope='conv_8')
                net = slim.conv2d(net, 512, 3, scope='conv_9')
                net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_10')
                net = slim.conv2d(net, 256, 1, scope='conv_11')
                net = slim.conv2d(net, 512, 3, scope='conv_12')
                net = slim.conv2d(net, 256, 1, scope='conv_13')
                net = slim.conv2d(net, 512, 3, scope='conv_14')
                net = slim.conv2d(net, 256, 1, scope='conv_15')
                net = slim.conv2d(net, 512, 3, scope='conv_16')
                net = slim.conv2d(net, 256, 1, scope='conv_17')
                net = slim.conv2d(net, 512, 3, scope='conv_18')
                net = slim.conv2d(net, 512, 1, scope='conv_19')
                net = slim.conv2d(net, 1024, 3, scope='conv_20')
                net = slim.max_pool2d(net, 2, padding='SAME', scope='pool_21')
                net = slim.conv2d(net, 512, 1, scope='conv_22')
                net = slim.conv2d(net, 1024, 3, scope='conv_23')
                net = slim.conv2d(net, 512, 1, scope='conv_24')
                net = slim.conv2d(net, 1024, 3, scope='conv_25')
                net = slim.conv2d(net, 1024, 3, scope='conv_26')
                net = tf.pad(net, np.array([[0, 0], [1, 1], [1, 1], [0, 0]]), name='pad_27')
                net = slim.conv2d(net, 1024, 3, 2, padding='VALID', scope='conv_28')
                net = slim.conv2d(net, 1024, 3, scope='conv_29')
                net = slim.conv2d(net, 1024, 3, scope='conv_30')
                net = tf.transpose(net, [0, 3, 1, 2], name='trans_31')
                net = slim.flatten(net, scope='flat_32')
                net = slim.fully_connected(net, 512, scope='fc_33')
                net = slim.fully_connected(net, 4096, scope='fc_34')
                net = slim.dropout(net, keep_prob=keep_prob, is_training=is_training, scope='dropout_35')
                net = slim.fully_connected(net, num_outputs, activation_fn=None, scope='fc_36')
        return net 
Example 73
Project: YOLOv1_tensorflow_windows   Author: FatherRen   File: main.py    GNU General Public License v3.0 4 votes vote down vote up
def interpret_output(self, output):
        probs = np.zeros((self.cell_size, self.cell_size, self.boxes_per_cell, self.num_class))
        class_probs = np.reshape(output[0:self.boundary1], (self.cell_size, self.cell_size, self.num_class))
        scales = np.reshape(output[self.boundary1:self.boundary2],
                            (self.cell_size, self.cell_size, self.boxes_per_cell))
        boxes = np.reshape(output[self.boundary2:], (self.cell_size, self.cell_size, self.boxes_per_cell, 4))
        offset = np.array([np.arange(self.cell_size)] * self.cell_size * self.boxes_per_cell)
        offset = np.transpose(np.reshape(offset, [self.boxes_per_cell, self.cell_size, self.cell_size]), (1, 2, 0))

        boxes[:, :, :, 0] += offset
        boxes[:, :, :, 1] += np.transpose(offset, (1, 0, 2))
        boxes[:, :, :, :2] = 1.0 * boxes[:, :, :, 0:2] / self.cell_size
        boxes[:, :, :, 2:] = np.square(boxes[:, :, :, 2:])

        boxes *= self.image_size

        for i in range(self.boxes_per_cell):
            for j in range(self.num_class):
                probs[:, :, i, j] = np.multiply(class_probs[:, :, j], scales[:, :, i])

        filter_mat_probs = np.array(probs >= self.threshold, dtype='bool')
        filter_mat_boxes = np.nonzero(filter_mat_probs)
        boxes_filtered = boxes[filter_mat_boxes[0],
                               filter_mat_boxes[1], filter_mat_boxes[2]]
        probs_filtered = probs[filter_mat_probs]
        classes_num_filtered = np.argmax(filter_mat_probs,
                                         axis=3)[filter_mat_boxes[0], filter_mat_boxes[1], filter_mat_boxes[2]]

        argsort = np.array(np.argsort(probs_filtered))[::-1]
        boxes_filtered = boxes_filtered[argsort]
        probs_filtered = probs_filtered[argsort]
        classes_num_filtered = classes_num_filtered[argsort]

        for i in range(len(boxes_filtered)):
            if probs_filtered[i] == 0:
                continue
            for j in range(i + 1, len(boxes_filtered)):
                if self.iou(boxes_filtered[i], boxes_filtered[j]) > self.iou_threshold:
                    probs_filtered[j] = 0.0

        filter_iou = np.array(probs_filtered > 0.0, dtype='bool')
        boxes_filtered = boxes_filtered[filter_iou]
        probs_filtered = probs_filtered[filter_iou]
        classes_num_filtered = classes_num_filtered[filter_iou]

        result = []
        for i in range(len(boxes_filtered)):
            result.append([self.classes[classes_num_filtered[i]],
                           boxes_filtered[i][0],
                           boxes_filtered[i][1],
                           boxes_filtered[i][2],
                           boxes_filtered[i][3],
                           probs_filtered[i]])
        return result 
Example 74
Project: fuku-ml   Author: fukuball   File: DecisionStump.py    MIT License 4 votes vote down vote up
def train(self):

        if (self.status != 'init'):
            print("Please load train data and init W first.")
            return self.W

        self.status = 'train'

        error_in = self.data_num / self.data_num

        for i in range(0, self.train_X.shape[1]):

            dim_X = self.train_X[:, i]
            dim_XY = np.transpose(np.array([dim_X, self.train_Y]))
            sort_index = np.argsort(dim_XY[:, 0])
            sort_dim_XY = dim_XY[sort_index]
            sort_u = self.u[sort_index]

            sort_dim_X = sort_dim_XY[:, 0]
            sort_dim_Y = sort_dim_XY[:, 1]

            thetas = np.array([float("-inf")] + [(sort_dim_X[j] + sort_dim_X[j + 1]) / 2 for j in range(0, self.data_num - 1)] + [float("inf")])
            error_in_i = sum(sort_u)
            sign_i = 1
            theta_i = 0.0

            for theta in thetas:
                y_positive = np.where(sort_dim_X > theta, 1, -1)
                y_negative = np.where(sort_dim_X < theta, 1, -1)
                error_positive = sum((y_positive != sort_dim_Y) * sort_u)
                error_negative = sum((y_negative != sort_dim_Y) * sort_u)
                if error_positive > error_negative:
                    if error_in_i > error_negative:
                        error_in_i = error_negative
                        sign_i = -1
                        theta_i = theta
                else:
                    if error_in_i > error_positive:
                        error_in_i = error_positive
                        sign_i = 1
                        theta_i = theta

            if error_in > error_in_i:
                error_in = error_in_i
                self.sign = sign_i
                self.feature_index = i
                self.theta = theta_i

        return self.W 
Example 75
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: dot.py    Apache License 2.0 4 votes vote down vote up
def test_dot_real(data_dict):
    """Dot operator testing with real datasets"""
    data_dir = os.path.join(os.getcwd(), 'data')

    path = os.path.join(data_dir, data_dict['data_name'])
    if not os.path.exists(path):
        get_bz2_data(
            data_dir,
            data_dict['data_name'],
            data_dict['url'],
            data_dict['data_origin_name']
        )
        assert os.path.exists(path)

    k = data_dict['feature_dim']
    m = data_dict['m']
    batch_size_list = data_dict['batch_size']

    default_output_index = data_dict['default_index']['output_dim']
    default_batch_size_index = data_dict['default_index']['batch_size']
    density = estimate_density(path, data_dict['feature_dim'])
    num_batches = data_dict['num_batches']

    assert default_batch_size_index < len(batch_size_list)
    assert default_output_index < len(m)
    if ARGS.verbose:
        print("Running Benchmarking on %r data") % data_dict['data_mini']
    print('{:>15} {:>10} {:>10} {:>10} {:>20} {:>15} {:>15} {:>10} {:>10}'.format('density(%)',
                                                                                 'n',
                                                                                 'm',
                                                                                 'k',
                                                                                 't_dense/t_sparse',
                                                                                 't_dense(ms)',
                                                                                 't_sparse(ms)',
                                                                                 'is_transpose',
                                                                                 'rhs_rsp'))


    for output_dim in m:
        _compare_sparse_dense(data_dir, data_dict['data_name'], data_dict['data_mini'],
                              k, output_dim, density,
                              batch_size_list[default_batch_size_index], num_batches)
        _compare_sparse_dense(data_dir, data_dict['data_name'], data_dict['data_mini'],
                              k, output_dim, density,
                              batch_size_list[default_batch_size_index], num_batches,
                              transpose=True)
        _compare_sparse_dense(data_dir, data_dict['data_name'], data_dict['data_mini'],
                              k, output_dim, density,
                              batch_size_list[default_batch_size_index], num_batches, rsp=True)

    for batch_size in batch_size_list:
        _compare_sparse_dense(data_dir, data_dict['data_name'], data_dict['data_mini'],
                              k, m[default_output_index], density, batch_size, num_batches)
        _compare_sparse_dense(data_dir, data_dict['data_name'], data_dict['data_mini'],
                              k, m[default_output_index], density, batch_size, num_batches,
                              transpose=True)
        _compare_sparse_dense(data_dir, data_dict['data_name'], data_dict['data_mini'],
                              k, output_dim, density,
                              batch_size_list[default_batch_size_index], num_batches, rsp=True) 
Example 76
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 4 votes vote down vote up
def test_prelu():
    def fprelu(x, gamma):
        pos_indices = x > 0
        out = x.copy()
        if len(x.shape) == 4:
            out = out.transpose(2,3,0,1)
            out = np.multiply(out, gamma)
            out = out.transpose(2,3,0,1)
        else:
            out = np.multiply(out, gamma)
        out[pos_indices] = x[pos_indices]
        return out
    def fprelu_grad(x, y, gamma):
        pos_indices = x > 0
        if len(x.shape) == 4:
            grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
            grad_x = grad_x.transpose(2,3,0,1)
        else:
            grad_x = np.multiply(np.ones(x.shape), gamma)
        grad_gam = np.zeros(gamma.shape)
        copy_x = x.copy()
        copy_x[pos_indices] = 0.0
        grad_x[pos_indices] = 1.0
        if len(gamma.shape) > 1 and len(x.shape) != 4:
            grad_gam = copy_x
        elif len(gamma.shape) > 1 and len(x.shape) == 4:
            grad_gam = np.sum(copy_x, axis=(2,3))
        elif gamma.shape[0] == 1:
            grad_gam = np.sum(np.sum(copy_x))
        elif gamma.shape[0] > 1 and len(x.shape) != 4:
            grad_gam = np.sum(copy_x, axis=0)
        elif gamma.shape[0] > 1 and len(x.shape) == 4:
            grad_gam = np.sum(copy_x, axis=(0,2,3))
        return (grad_x, grad_gam)
    x = mx.symbol.Variable("x")
    gamma = mx.symbol.Variable("gamma")
    for shape in [(3,4), (3,4,4,5)]:
        for dtype in [np.float16, np.float32, np.float64]:
            for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
                gam_full = np.array([gam, gam, gam])
                xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
                rtol = 1e-2
                atol = 1e-3
                eps = 1e-4
                xa[abs(xa) < eps] = 1.0
                y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
                ya = fprelu(xa, gam)
                ya_full = fprelu(xa, gam_full)
                g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
                g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
                # Skip numeric check for float16 type to get rid of flaky behavior
                if dtype is not np.float16:
                    check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
                    check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
                check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
                check_symbolic_backward(y, [xa, gam], [np.ones(shape), np.ones(gam.shape)], [g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
                check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
                check_symbolic_backward(y, [xa, gam_full], [np.ones(shape), np.ones(gam_full.shape)],
                                        [g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype) 
Example 77
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 4 votes vote down vote up
def test_depthtospace():
    def f(x, blocksize):
        b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
        tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
        tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
        y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
        return y

    block = random.randint(2, 4)
    rand_mul1 = random.randint(1, 4)
    n = random.randint(1, 5)
    c = block * block * rand_mul1
    h = random.randint(1, 5)
    w = random.randint(1, 5)
    shape_inp = (n, c, h, w)
    data = rand_ndarray(shape_inp, 'default')
    data_np = data.asnumpy()
    expected = f(data_np, block)
    output = mx.nd.depth_to_space(data, block)
    assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)

    shape_out = (n, c // (block ** 2), h * block, w * block)
    data = mx.sym.Variable('data')
    dts_sym = mx.sym.depth_to_space(data, block)
    check_numeric_gradient(dts_sym, [np.ones(shape_inp)])

    check_symbolic_forward(dts_sym, [data_np], [expected])
    check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])

    def test_invalid_depth_dim():
        invalid_shape_inp = (n, block - 1, h, w)
        data = rand_ndarray(invalid_shape_inp, 'default')
        assertRaises(MXNetError, mx.nd.depth_to_space, data, block)

    def test_invalid_space_dim():
        invalid_shape_inp = (n, block ** 2, 0, block + 1)
        data = rand_ndarray(invalid_shape_inp, 'default')
        assertRaises(MXNetError, mx.nd.depth_to_space, data, block)

    def test_invalid_block_size():
        block = 0
        invalid_shape_inp = (n , c, h, w)
        data = rand_ndarray(invalid_shape_inp, 'default')
        assertRaises(MXNetError, mx.nd.depth_to_space, data, block)

    test_invalid_depth_dim()
    test_invalid_space_dim()
    test_invalid_block_size() 
Example 78
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 4 votes vote down vote up
def test_spacetodepth():
    def f(x, blocksize):
        b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
        tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
        tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
        y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
        return y

    block = random.randint(2, 4)
    rand_mul1 = random.randint(1, 4)
    rand_mul2 = random.randint(1, 4)
    n = random.randint(1, 5)
    c = random.randint(1, 5)
    h = block * rand_mul1
    w = block * rand_mul2
    shape_inp = (n, c, h, w)
    data = rand_ndarray(shape_inp, 'default')
    data_np = data.asnumpy()
    expected = f(data_np, block)
    output = mx.nd.space_to_depth(data, block)
    assert_almost_equal(output.asnumpy(), expected, atol=1e-3, rtol=1e-3)

    shape_out = (n, c * (block ** 2), h // block, w // block)
    data = mx.sym.Variable('data')
    dts_sym = mx.sym.space_to_depth(data, block)
    check_numeric_gradient(dts_sym, [np.ones(shape_inp)])

    check_symbolic_forward(dts_sym, [data_np], [expected])
    check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])

    def test_invalid_space_dim():
        invalid_shape_inp = (n , c, block - 1, w)
        data = rand_ndarray(invalid_shape_inp, 'default')
        assertRaises(MXNetError, mx.nd.space_to_depth, data, block)

    def test_invalid_block_size():
        block = 0
        invalid_shape_inp = (n, c, h, w)
        data = rand_ndarray(invalid_shape_inp, 'default')
        assertRaises(MXNetError, mx.nd.space_to_depth, data, block)

    def test_invalid_depth_dim():
        invalid_shape_inp = (n, 0, h, w)
        data = rand_ndarray(invalid_shape_inp, 'default')
        assertRaises(MXNetError, mx.nd.space_to_depth, data, block)

    test_invalid_space_dim()
    test_invalid_block_size()
    test_invalid_depth_dim() 
Example 79
Project: MotionGAN   Author: magnux   File: data_input.py    MIT License 4 votes vote down vote up
def process_pose(self, pose):
        # Remove nans
        pose[np.isnan(pose)] = 0

        # Trim zero frames
        pose_nz = pose[:, :3, :] != 0
        plen = np.int32(pose.shape[2])
        for f in range(plen):
            if np.any(pose_nz[:, :, f]):
                pose = pose[:, :, f:]
                pose_nz = pose_nz[:, :, f:]
                break

        plen = np.int32(pose.shape[2])
        for f in range(plen-1, 0, -1):
            if np.any(pose_nz[:, :, f]):
                pose = pose[:, :, :f+1]
                break

        plen = np.int32(pose.shape[2])

        # Format tracking state
        if pose.shape[1] > 3:
            pose[:, 3, :] = (pose[:, 3, :] > 0).astype('float32')
        else:
            pose = np.concatenate([pose, np.ones((pose.shape[0], 1, pose.shape[2]))], axis=1)

        # Dataset specific processing
        if self.data_set == 'NTURGBD':
            pose = pose[:25, :, :]  # Warning: only taking first skeleton
            pose[:, :3, :] -= pose[0, np.newaxis, :3, 0, np.newaxis]  # Recentering sequence by hip start position
            pose[:, :3, :] = pose[:, :3, :] * 1.0e3  # Rescale to mm
            pose_1 = pose[:, 1, :].copy()
            pose[:, 1, :] = pose[:, 2, :]  # Swapping Y-Z coords
            pose[:, 2, :] = pose_1
        elif self.data_set == 'MSRC12':
            pose[:, :3, :] -= pose[0, np.newaxis, :3, 0, np.newaxis]  # Recentering sequence by hip start position
            pose[:, :3, :] = pose[:, :3, :] * 1.0e3  # Rescale to mm
            pose_1 = pose[:, 1, :].copy()
            pose[:, 1, :] = pose[:, 2, :]  # Swapping Y-Z coords
            pose[:, 2, :] = pose_1
        elif self.data_set == 'Human36':
            pose = pose[self.used_joints, ...]
            # pose[:, :3, :] = pose[:, :3, :] / 1.0e3 # Rescale to meters
            # pose = pose[:, :, range(0, plen, 2)]  # Subsampling to 25hz
            # pose = pose[:, :, range(0, plen, 10)]  # Subsampling to 5hz
            plen = np.int32(pose.shape[2])
        elif self.data_set == 'Human36_expmaps':
            pose = pose[self.used_joints, ...]
            # pose = pose[:, :, range(0, plen, 2)]  # Subsampling to 25hz
            # pose = pose[:, :, range(0, plen, 10)]  # Subsampling to 5hz
            plen = np.int32(pose.shape[2])
            # pose[:, :3, :] = (pose[:, :3, :] + 90) / 180

        pose = np.transpose(pose, (0, 2, 1))

        return pose, plen 
Example 80
Project: ArtGAN   Author: cs-chan   File: ingest_stl10.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def collectdata(self,):
        print 'Start Collect Data...'

        train_x_path = os.path.join(self.input_dir, 'train_X.bin')
        train_y_path = os.path.join(self.input_dir, 'train_y.bin')
        test_x_path = os.path.join(self.input_dir, 'test_X.bin')
        test_y_path = os.path.join(self.input_dir, 'test_y.bin')

        train_xf = open(train_x_path, 'rb')
        train_x = np.fromfile(train_xf, dtype=np.uint8)
        train_x = np.reshape(train_x, (-1, 3, 96, 96))
        train_x = np.transpose(train_x, (0, 3, 2, 1))
        train_yf = open(train_y_path, 'rb')
        train_y = np.fromfile(train_yf, dtype=np.uint8)

        test_xf = open(test_x_path, 'rb')
        test_x = np.fromfile(test_xf, dtype=np.uint8)
        test_x = np.reshape(test_x, (-1, 3, 96, 96))
        test_x = np.transpose(test_x, (0, 3, 2, 1))
        test_yf = open(test_y_path, 'rb')
        test_y = np.fromfile(test_yf, dtype=np.uint8)

        idx = np.zeros(10, dtype=np.int)
        for i in xrange(train_x.shape[0]):
            outdir = os.path.join(self.outimgdir, 'train', str(train_y[i]-1))
            if not os.path.exists(outdir):
                os.mkdir(outdir)

            if not self.skipimg:
                transform_and_save(img_arr=train_x[i], output_filename=os.path.join(outdir, str(idx[train_y[i]-1]) + '.jpg'))
            self.trainpairlist[os.path.join('images', 'train', str(train_y[i]-1), str(idx[train_y[i]-1]) + '.jpg')] = \
                os.path.join('labels', str(train_y[i] - 1) + '.txt')
            idx[train_y[i]-1] += 1

        idx = np.zeros(10, dtype=np.int)
        for i in xrange(test_x.shape[0]):
            outdir = os.path.join(self.outimgdir, 'val', str(test_y[i]-1))
            if not os.path.exists(outdir):
                os.mkdir(outdir)

            if not self.skipimg:
                transform_and_save(img_arr=test_x[i],
                                   output_filename=os.path.join(outdir, str(idx[test_y[i]-1]) + '.jpg'))
            self.valpairlist[os.path.join('images', 'val', str(test_y[i]-1), str(idx[test_y[i]-1]) + '.jpg')] = \
                os.path.join('labels', str(test_y[i] - 1) + '.txt')
            idx[test_y[i]-1] += 1

        print 'Finished Collect Data...'