Python numpy.concatenate() Examples

The following are 50 code examples for showing how to use numpy.concatenate(). They are extracted from open source Python projects. You can vote up the examples you like or vote down the exmaples you don't like. You can also save this page to your account.

Example 1
Project: rca-evaluation   Author: sieve-microservices   File: kshape.py    (license) View Source Project 9 votes vote down vote up
def roll_zeropad(a, shift, axis=None):
    a = np.asanyarray(a)
    if shift == 0: return a
    if axis is None:
        n = a.size
        reshape = True
    else:
        n = a.shape[axis]
        reshape = False
    if np.abs(shift) > n:
        res = np.zeros_like(a)
    elif shift < 0:
        shift += n
        zeros = np.zeros_like(a.take(np.arange(n-shift), axis))
        res = np.concatenate((a.take(np.arange(n-shift,n), axis), zeros), axis)
    else:
        zeros = np.zeros_like(a.take(np.arange(n-shift,n), axis))
        res = np.concatenate((zeros, a.take(np.arange(n-shift), axis)), axis)
    if reshape:
        return res.reshape(a.shape)
    else:
        return res 
Example 2
Project: rca-evaluation   Author: sieve-microservices   File: kshape.py    (license) View Source Project 7 votes vote down vote up
def _ncc_c(x, y):
    """
    >>> _ncc_c([1,2,3,4], [1,2,3,4])
    array([ 0.13333333,  0.36666667,  0.66666667,  1.        ,  0.66666667,
            0.36666667,  0.13333333])
    >>> _ncc_c([1,1,1], [1,1,1])
    array([ 0.33333333,  0.66666667,  1.        ,  0.66666667,  0.33333333])
    >>> _ncc_c([1,2,3], [-1,-1,-1])
    array([-0.15430335, -0.46291005, -0.9258201 , -0.77151675, -0.46291005])
    """
    den = np.array(norm(x) * norm(y))
    den[den == 0] = np.Inf

    x_len = len(x)
    fft_size = 1<<(2*x_len-1).bit_length()
    cc = ifft(fft(x, fft_size) * np.conj(fft(y, fft_size)))
    cc = np.concatenate((cc[-(x_len-1):], cc[:x_len]))
    return np.real(cc) / den 
Example 3
Project: gcForest   Author: pylablanche   File: GCForest.py    (license) View Source Project 7 votes vote down vote up
def _create_feat_arr(self, X, prf_crf_pred):
        """ Concatenate the original feature vector with the predicition probabilities
        of a cascade layer.

        :param X: np.array
            Array containing the input samples.
            Must be of shape [n_samples, data] where data is a 1D array.

        :param prf_crf_pred: list
            Prediction probabilities by a cascade layer for X.

        :return: np.array
            Concatenation of X and the predicted probabilities.
            To be used for the next layer in a cascade forest.
        """
        swap_pred = np.swapaxes(prf_crf_pred, 0, 1)
        add_feat = swap_pred.reshape([np.shape(X)[0], -1])
        feat_arr = np.concatenate([add_feat, X], axis=1)

        return feat_arr 
Example 4
Project: lang-reps   Author: chaitanyamalaviya   File: sequence2sequence.py    (license) View Source Project 6 votes vote down vote up
def encode_batch_seq(self, src_seq, src_seq_rev):

        forward_states = self.enc_fwd_lstm.initial_state().add_inputs(src_seq)
        backward_states = self.enc_bwd_lstm.initial_state().add_inputs(src_seq_rev)[::-1]

        src_encodings = []
        forward_cells = []
        backward_cells = []
        for forward_state, backward_state in zip(forward_states, backward_states):
            fwd_cell, fwd_enc = forward_state.s()
            bak_cell, bak_enc = backward_state.s()

            src_encodings.append(dynet.concatenate([fwd_enc, bak_enc]))
            forward_cells.append(fwd_cell)
            backward_cells.append(bak_cell)

        decoder_init = dynet.concatenate([forward_cells[-1], backward_cells[0]])
	decoder_all = [dynet.concatenate([fwd, bwd]) for fwd, bwd in zip(forward_cells, list(reversed(backward_cells)))]
        return src_encodings, decoder_all 
Example 5
Project: treecat   Author: posterior   File: serving_test.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def test_server_logprob_normalized(N, V, C, M):
    model = generate_fake_model(N, V, C, M)
    config = TINY_CONFIG.copy()
    config['model_num_clusters'] = M
    model['config'] = config
    server = TreeCatServer(model)

    # The total probability of all categorical rows should be 1.
    ragged_index = model['suffstats']['ragged_index']
    factors = []
    for v in range(V):
        C = ragged_index[v + 1] - ragged_index[v]
        factors.append([one_hot(c, C) for c in range(C)])
    data = np.array(
        [np.concatenate(columns) for columns in itertools.product(*factors)],
        dtype=np.int8)
    logprobs = server.logprob(data)
    logtotal = np.logaddexp.reduce(logprobs)
    assert logtotal == pytest.approx(0.0, abs=1e-5) 
Example 6
Project: human-rl   Author: gsastry   File: train_catastrophe_model_human.py    (MIT License) View Source Project 6 votes vote down vote up
def load_features(self, episode_paths):
        features = {}
        labels = None
        for episode_path in episode_paths:
            episode_features, episode_labels = self.load_features_episode(episode_path)
            for key, value in episode_features.items():
                if key not in features:
                    features[key] = value
                else:
                    features[key] = np.concatenate([features[key], value], axis=0)
            if labels is None:
                labels = episode_labels
            else:
                labels = np.concatenate([labels, episode_labels], axis=0)
            print(episode_path)
        return features, labels 
Example 7
Project: zipline-chinese   Author: zhanghan1990   File: trading.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def minutes_for_days_in_range(self, start, end):
        """
        Get all market minutes for the days between start and end, inclusive.
        """
        start_date = self.normalize_date(start)
        end_date = self.normalize_date(end)

        all_minutes = []
        for day in self.days_in_range(start_date, end_date):
            day_minutes = self.market_minutes_for_day(day)
            all_minutes.append(day_minutes)

        # Concatenate all minutes and truncate minutes before start/after end.
        return pd.DatetimeIndex(
            np.concatenate(all_minutes), copy=False, tz='UTC',
        ) 
Example 8
Project: cellranger   Author: 10XGenomics   File: common.py    (license) View Source Project 6 votes vote down vote up
def plot_barcode_rank(chart, sample_properties, sample_data):
    """ Generate the RNA counter barcode rank plot """
    if sample_properties.get('genomes') is None or sample_data.barcode_summary is None:
        return None

    if len(sample_properties['genomes']) == 0:
        return None

    counts_per_bc = []
    for genome in sample_properties['genomes']:
        key = cr_utils.format_barcode_summary_h5_key(genome, cr_constants.TRANSCRIPTOME_REGION, cr_constants.CONF_MAPPED_DEDUPED_READ_TYPE)
        if key in sample_data.barcode_summary:
            counts_per_bc.append(sample_data.barcode_summary[key][:])
        else:
            # Not guaranteed to exist, depending on pipeline
            return
    counts_per_bc = np.concatenate(counts_per_bc)

    return _plot_barcode_rank(chart, counts_per_bc, sample_data.num_cells) 
Example 9
Project: cellranger   Author: 10XGenomics   File: utils.py    (license) View Source Project 6 votes vote down vote up
def numpy_groupby(values, keys):
    """ Group a collection of numpy arrays by key arrays.
        Yields (key_tuple, view_tuple) where key_tuple is the key grouped on and view_tuple is a tuple of views into the value arrays.
          values: tuple of arrays to group
          keys: tuple of sorted, numeric arrays to group by """

    if len(values) == 0:
        return
    if len(values[0]) == 0:
        return

    for key_array in keys:
        assert len(key_array) == len(keys[0])
    for value_array in values:
        assert len(value_array) == len(keys[0])

    # The indices where any of the keys differ from the previous key become group boundaries
    key_change_indices = np.logical_or.reduce(tuple(np.concatenate(([1], np.diff(key))) != 0 for key in keys))
    group_starts = np.flatnonzero(key_change_indices)
    group_ends = np.roll(group_starts, -1)
    group_ends[-1] = len(keys[0])

    for group_start, group_end in itertools.izip(group_starts, group_ends):
        yield tuple(key[group_start] for key in keys), tuple(value[group_start:group_end] for value in values) 
Example 10
Project: cellranger   Author: 10XGenomics   File: molecule_counter.py    (license) View Source Project 6 votes vote down vote up
def concatenate(out_filename, in_filenames, metrics=None):
        # Append each column from each input h5 to the output h5
        out_mc = MoleculeCounter.open(out_filename, mode='w')
        ref_set = False
        for in_filename in in_filenames:
            in_mc = MoleculeCounter.open(in_filename, mode='r')
            # if no metrics specified, copy them from the first file
            if metrics is None:
                metrics = in_mc.get_all_metrics()
            for name, array_tuple in in_mc.columns.iteritems():
                h5_array, _ = array_tuple
                out_mc.add_many(name, h5_array[:])
            if not ref_set: # only set once
                for name, h5_array in in_mc.ref_columns.iteritems():
                    out_mc.set_ref_column(name, h5_array[:])
                ref_set = True
            in_mc.close()
        out_mc.set_all_metrics(metrics)
        out_mc.save() 
Example 11
Project: cellranger   Author: 10XGenomics   File: molecule_counter.py    (license) View Source Project 6 votes vote down vote up
def concatenate_sort(out_filename, in_filenames, sort_cols, metrics=None):
        in_mcs = [MoleculeCounter.open(f, 'r') for f in in_filenames]
        out_mc = MoleculeCounter.open(out_filename, mode='w')
        if metrics is None:
            metrics = in_mcs[0].get_all_metrics()
        out_mc.set_all_metrics(metrics)
        for col, array in in_mcs[0].ref_columns.iteritems():
            out_mc.set_ref_column(col, array[:])
        sort_array = []
        # reverse sort columns so they get sorted in the right order
        for col in reversed(sort_cols):
            sort_array.append(np.concatenate([mc.get_column(col) for mc in in_mcs]))
        sort_index = np.lexsort(sort_array)
        for col in MOLECULE_INFO_COLUMNS:
            col_sorted = np.concatenate([mc.get_column(col) for mc in in_mcs])[sort_index]
            out_mc.add_many(col, col_sorted)
        for mc in in_mcs:
            mc.close()
        out_mc.save() 
Example 12
Project: cellranger   Author: 10XGenomics   File: __init__.py    (license) View Source Project 6 votes vote down vote up
def get_bc_counts(genomes, genes, molecule_counter):
    genome_ids = molecule_counter.get_column('genome')
    genome_index = cr_reference.get_genome_index(genomes)
    conf_mapped_reads = molecule_counter.get_column('reads')
    barcodes = molecule_counter.get_column('barcode')

    bc_counts = {}
    for genome in genomes:
        genome_id = cr_reference.get_genome_id(genome, genome_index)
        genome_indices = genome_ids == genome_id
        if genome_indices.sum() == 0:
            # edge case - there's no data for this genome (e.g. empty sample, false barnyard sample, or nothing confidently mapped)
            continue
        bcs_for_genome = barcodes[genome_indices]
        # only count UMIs with at least one conf mapped read
        umi_conf_mapped_to_genome = conf_mapped_reads[genome_indices] > 0
        bc_breaks = bcs_for_genome[1:] - bcs_for_genome[:-1]
        bc_breaks = np.concatenate(([1], bc_breaks)) # first row is always a break
        bc_break_indices = np.nonzero(bc_breaks)[0]
        unique_bcs = bcs_for_genome[bc_break_indices]
        umis_per_bc = np.add.reduceat(umi_conf_mapped_to_genome, bc_break_indices)
        cmb_reads_per_bc = np.add.reduceat(conf_mapped_reads[genome_indices], bc_break_indices)
        bc_counts[genome] = (unique_bcs, umis_per_bc, cmb_reads_per_bc)

    return bc_counts 
Example 13
Project: j3dview   Author: blank63   File: model.py    (MIT License) View Source Project 6 votes vote down vote up
def load(shape,vertex_array):
        destination = vertex_array[gx.VA_PTNMTXIDX.name]
        vertex_index = 0
        matrix_table = numpy.zeros(10,numpy.uint32)

        for batch in shape.batches:
            source = numpy.concatenate([primitive.vertices[gx.VA_PTNMTXIDX.name] for primitive in batch.primitives])
            source //= 3

            for i,index in enumerate(batch.matrix_table):
                if index == 0xFFFF: continue
                matrix_table[i] = index

            length = sum(len(primitive.vertices) for primitive in batch.primitives)
            numpy.take(matrix_table,source,0,destination[vertex_index:vertex_index + length])
            vertex_index += length

        glEnableVertexAttribArray(MATRIX_INDEX_ATTRIBUTE_LOCATION)
        vertex_type = vertex_array.dtype
        stride = vertex_type.itemsize
        offset = vertex_type.fields[gx.VA_PTNMTXIDX.name][1]
        glVertexAttribIPointer(MATRIX_INDEX_ATTRIBUTE_LOCATION,1,GL_UNSIGNED_INT,stride,GLvoidp(offset)) 
Example 14
Project: pointnet   Author: charlesq34   File: indoor3d_util.py    (license) View Source Project 6 votes vote down vote up
def sample_data(data, num_sample):
    """ data is in N x ...
        we want to keep num_samplexC of them.
        if N > num_sample, we will randomly keep num_sample of them.
        if N < num_sample, we will randomly duplicate samples.
    """
    N = data.shape[0]
    if (N == num_sample):
        return data, range(N)
    elif (N > num_sample):
        sample = np.random.choice(N, num_sample)
        return data[sample, ...], sample
    else:
        sample = np.random.choice(N, num_sample-N)
        dup_data = data[sample, ...]
        return np.concatenate([data, dup_data], 0), range(N)+list(sample) 
Example 15
Project: benchmarks   Author: tensorflow   File: datasets.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def read_data_files(self, subset='train'):
    """Reads from data file and returns images and labels in a numpy array."""
    assert self.data_dir, ('Cannot call `read_data_files` when using synthetic '
                           'data')
    if subset == 'train':
      filenames = [os.path.join(self.data_dir, 'data_batch_%d' % i)
                   for i in xrange(1, 6)]
    elif subset == 'validation':
      filenames = [os.path.join(self.data_dir, 'test_batch')]
    else:
      raise ValueError('Invalid data subset "%s"' % subset)

    inputs = []
    for filename in filenames:
      with gfile.Open(filename, 'r') as f:
        inputs.append(cPickle.load(f))
    # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
    # input format.
    all_images = np.concatenate(
        [each_input['data'] for each_input in inputs]).astype(np.float32)
    all_labels = np.concatenate(
        [each_input['labels'] for each_input in inputs])
    return all_images, all_labels 
Example 16
Project: dl4mt-multi   Author: nyu-dl   File: layers.py    (BSD 3-Clause "New" or "Revised" License) View Source Project 6 votes vote down vote up
def param_init_gru(params, prefix='gru', nin=None, dim=None, hiero=False):
    if not hiero:
        W = numpy.concatenate([norm_weight(nin, dim),
                               norm_weight(nin, dim)], axis=1)
        params[_p(prefix, 'W')] = W
        params[_p(prefix, 'b')] = numpy.zeros((2 * dim,)).astype('float32')
    U = numpy.concatenate([ortho_weight(dim),
                           ortho_weight(dim)], axis=1)
    params[_p(prefix, 'U')] = U

    Wx = norm_weight(nin, dim)
    params[_p(prefix, 'Wx')] = Wx
    Ux = ortho_weight(dim)
    params[_p(prefix, 'Ux')] = Ux
    params[_p(prefix, 'bx')] = numpy.zeros((dim,)).astype('float32')

    return params 
Example 17
Project: cnn-text   Author: chenchongthu   File: data_helpers.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def load_data_and_labels(positive_data_file, negative_data_file):
    """
    Loads MR polarity data from files, splits the data into words and generates labels.
    Returns split sentences and labels.
    """
    # Load data from files
    positive_examples = list(open(positive_data_file, "r").readlines())
    positive_examples = [s.strip() for s in positive_examples]
    negative_examples = list(open(negative_data_file, "r").readlines())
    negative_examples = [s.strip() for s in negative_examples]
    # Split by words
    x_text = positive_examples + negative_examples
    x_text = [clean_str(sent) for sent in x_text]
    # Generate labels
    positive_labels = [[0, 1] for _ in positive_examples]
    negative_labels = [[1, 0] for _ in negative_examples]
    y = np.concatenate([positive_labels, negative_labels], 0)
    return [x_text, y] 
Example 18
Project: vae-npvc   Author: JeremyCCHsu   File: analyzer.py    (license) View Source Project 6 votes vote down vote up
def extract_and_save_bin_to(dir_to_bin, dir_to_source):
    sets = [s for s in os.listdir(dir_to_source) if s in SETS]
    for d in sets:
        path = join(dir_to_source, d)
        speakers = [s for s in os.listdir(path) if s in SPEAKERS]
        for s in speakers:
            path = join(dir_to_source, d, s)
            output_dir = join(dir_to_bin, d, s)
            if not tf.gfile.Exists(output_dir):
                tf.gfile.MakeDirs(output_dir)
            for f in os.listdir(path):
                filename = join(path, f)
                print(filename)
                if not os.path.isdir(filename):
                    features = extract(filename)
                    labels = SPEAKERS.index(s) * np.ones(
                        [features.shape[0], 1],
                        np.float32,
                    )
                    b = os.path.splitext(f)[0]
                    features = np.concatenate([features, labels], 1)
                    with open(join(output_dir, '{}.bin'.format(b)), 'wb') as fp:
                        fp.write(features.tostring()) 
Example 19
Project: vae-npvc   Author: JeremyCCHsu   File: validate.py    (license) View Source Project 6 votes vote down vote up
def plot_spectra(results):
    plt.figure(figsize=(10, 4))
    plt.imshow(
        np.concatenate(
            [np.flipud(results['x'].T),
             np.flipud(results['xh'].T),
             np.flipud(results['x_conv'].T)],
            0),
        aspect='auto',
        cmap='jet',
    )
    plt.colorbar()
    plt.title('Upper: Real input; Mid: Reconstrution; Lower: Conversion to target.')
    plt.savefig(
        os.path.join(
            args.logdir,
            '{}.png'.format(
                os.path.split(str(results['f'], 'utf-8'))[-1]
            )
        )
    ) 
Example 20
Project: sea-lion-counter   Author: rdinse   File: contextual_inception_model.py    (license) View Source Project 6 votes vote down vote up
def preprocessExample(self, image, coords, angle, shear_x, shear_y, scale):
    size_in = image.shape[0]
    size_out = self.config['tile_size'] + 2 * self.config['contextual_pad']
    
    # h = base64.b64encode(struct.pack(">q", hash(image.tostring()))).decode()

    # data_preparation.imshow(image, coords=coords, save=True, title='%s_preprocessExampleA' %h)
    
    image = self.applyLinearTransformToImage(image, angle, shear_x, shear_y, scale, size_out)
    image = self.applyColorAugmentation(image, self.config['aug_color_std'], \
                                        self.config['aug_gamma_factor'])
    coords[:, 1:] = self.applyLinearTransformToCoords(coords[:, 1:], angle, shear_x,
                                                      shear_y, scale, size_in, size_out)
    target = self.generateCountMaps(coords)
    large_target = self.generateLargeCountMaps(coords)

    if self.config['draw_border'] and self.config['contextual_pad'] > 0:
      image = self.draw_border(image, self.config['contextual_pad'], self.config['tile_size'])
      
    # data_preparation.imshow(image, coords=coords, save=True, title='%s_preprocessExampleB' % h)
    # t = np.concatenate(np.moveaxis(target, -1, 0))
    # data_preparation.imshow(t, normalize=True, save=True, title='%s_preprocessExampleC' % h)
    
    return image.astype(np.float32), target, large_target 
Example 21
Project: ProtScan   Author: gianlucacorrado   File: model.py    (MIT License) View Source Project 6 votes vote down vote up
def score(self, profiles, bin_sites):
        """Compute AUC ROC from predictions."""
        app_profiles = list()
        app_true_vals = list()
        for k, profile in profiles.iteritems():
            app_profiles.append(profile)
            true_vals = np.zeros(len(profile))
            bins = bin_sites.get(k, False)
            if bins is not False:
                for s, e, _ in bins:
                    true_vals[s:e] = 1
            app_true_vals.append(true_vals)
        vec_profiles = np.concatenate(app_profiles)
        vec_true_vals = np.concatenate(app_true_vals)
        roc_auc = roc_auc_score(vec_true_vals, vec_profiles)
        return roc_auc 
Example 22
Project: lstm-context-embeddings   Author: chaitjo   File: data_helpers.py    (MIT License) View Source Project 6 votes vote down vote up
def load_data_and_labels():
    """
    Loads MR polarity data from files, splits the data into words and generates labels.
    Returns split sentences and labels.
    """
    # Load data from files
    positive_examples = list(open("./data/rt-polaritydata/rt-polarity.pos", "r").readlines())
    positive_examples = [s.strip() for s in positive_examples]
    negative_examples = list(open("./data/rt-polaritydata/rt-polarity.neg", "r").readlines())
    negative_examples = [s.strip() for s in negative_examples]
    # Split by words
    x_text = positive_examples + negative_examples
    x_text = [clean_str(sent) for sent in x_text]
    # Generate labels
    positive_labels = [[0, 1] for _ in positive_examples]
    negative_labels = [[1, 0] for _ in negative_examples]
    y = np.concatenate([positive_labels, negative_labels], 0)
    return [x_text, y] 
Example 23
Project: lstm-context-embeddings   Author: chaitjo   File: data_helpers.py    (MIT License) View Source Project 6 votes vote down vote up
def load_data_and_labels():
    """
    Loads polarity data from files, splits the data into words and generates labels.
    Returns split sentences and labels.
    """
    
    # Load data from files
    positive_examples = list(open("./data/rt-polaritydata/rt-polarity.pos", "r").readlines())
    positive_examples = [s.strip() for s in positive_examples]
    negative_examples = list(open("./data/rt-polaritydata/rt-polarity.neg", "r").readlines())
    negative_examples = [s.strip() for s in negative_examples]
    
    # Split by words
    x_text = positive_examples + negative_examples
    x_text = [clean_str(sent) for sent in x_text]
    
    # Generate labels
    positive_labels = [[0, 1] for _ in positive_examples]
    negative_labels = [[1, 0] for _ in negative_examples]
    y = np.concatenate([positive_labels, negative_labels], 0)

    # Generate sequence lengths
    seqlen = np.array([len(sent.split(" ")) for sent in x_text])
    
    return [x_text, y, seqlen] 
Example 24
Project: lstm-context-embeddings   Author: chaitjo   File: data_helpers.py    (MIT License) View Source Project 6 votes vote down vote up
def load_data_and_labels():
    """
    Loads polarity data from files, splits the data into words and generates labels.
    Returns split sentences and labels.
    """
    
    # Load data from files
    positive_examples = list(open("./data/rt-polaritydata/rt-polarity.pos", "r").readlines())
    positive_examples = [s.strip() for s in positive_examples]
    negative_examples = list(open("./data/rt-polaritydata/rt-polarity.neg", "r").readlines())
    negative_examples = [s.strip() for s in negative_examples]
    
    # Split by words
    x_text = positive_examples + negative_examples
    x_text = [clean_str(sent) for sent in x_text]
    
    # Generate labels
    positive_labels = [[0, 1] for _ in positive_examples]
    negative_labels = [[1, 0] for _ in negative_examples]
    y = np.concatenate([positive_labels, negative_labels], 0)
    
    return [x_text, y] 
Example 25
Project: tensorboard   Author: dmlc   File: x2num.py    (Apache License 2.0) View Source Project 6 votes vote down vote up
def _prepare_image(I):
    assert isinstance(I, np.ndarray), 'plugin error, should pass numpy array here'
    assert I.ndim == 2 or I.ndim == 3 or I.ndim == 4
    if I.ndim == 4:  # NCHW
        if I.shape[1] == 1:  # N1HW
            I = np.concatenate((I, I, I), 1)  # N3HW
        assert I.shape[1] == 3
        I = make_grid(I)  # 3xHxW
    if I.ndim == 3 and I.shape[0] == 1:  # 1xHxW
        I = np.concatenate((I, I, I), 0)  # 3xHxW
    if I.ndim == 2:  # HxW
        I = np.expand_dims(I, 0)  # 1xHxW
        I = np.concatenate((I, I, I), 0)  # 3xHxW
    I = I.transpose(1, 2, 0)

    return I 
Example 26
Project: lung-cancer-detector   Author: YichenGong   File: util.py    (MIT License) View Source Project 6 votes vote down vote up
def combine_img_prediction(data, gt, pred):
    """
    Combines the data, grouth thruth and the prediction into one rgb image
    
    :param data: the data tensor
    :param gt: the ground thruth tensor
    :param pred: the prediction tensor
    
    :returns img: the concatenated rgb image 
    """
    ny = pred.shape[2]
    ch = data.shape[3]
    img = np.concatenate((to_rgb(crop_to_shape(data, pred.shape).reshape(-1, ny, ch)), 
                          to_rgb(crop_to_shape(gt[..., 1], pred.shape).reshape(-1, ny, 1)), 
                          to_rgb(pred[..., 1].reshape(-1, ny, 1))), axis=1)
    return img 
Example 27
Project: sharedbuffers   Author: jampp   File: mapped_struct.py    (BSD 3-Clause "New" or "Revised" License) View Source Project 6 votes vote down vote up
def testMerge(self, dtype=dtype):
            testarray1 = range(1,101)
            testarray2 = range(5,106)
            a = numpy.empty((100,2), dtype=dtype)
            b = numpy.empty((100,2), dtype=dtype)
            merged = numpy.empty((200,2), dtype=dtype)
            incompatible1 = numpy.empty((200,3), dtype=dtype)
            incompatible2 = numpy.empty(200, dtype=dtype)
            a[:,0] = numpy.arange(1,101)
            a[:,1] = numpy.arange(2,102)
            b[:,0] = numpy.arange(5,105)
            b[:,1] = numpy.arange(6,106)
            ref = numpy.concatenate([a,b])
            ref = ref[numpy.argsort(ref[:,0])]
            self.assertEqual(mapped_struct.index_merge(a, b, merged), 200)
            self.assertTrue((merged == ref).all())
            self.assertRaises(ValueError, mapped_struct.index_merge, a, b, incompatible1)
            self.assertRaises(ValueError, mapped_struct.index_merge, a, incompatible1, merged)
            self.assertRaises(ValueError, mapped_struct.index_merge, a, b, incompatible2)
            self.assertRaises(ValueError, mapped_struct.index_merge, a, incompatible2, merged) 
Example 28
Project: NeoAnalysis   Author: neoanalysis   File: ROI.py    (license) View Source Project 6 votes vote down vote up
def getArrayRegion(self, arr, img=None, axes=(0,1), **kwds):
        rgns = []
        for l in self.lines:
            rgn = l.getArrayRegion(arr, img, axes=axes, **kwds)
            if rgn is None:
                continue
                #return None
            rgns.append(rgn)
            #print l.state['size']
            
        ## make sure orthogonal axis is the same size
        ## (sometimes fp errors cause differences)
        if img.axisOrder == 'row-major':
            axes = axes[::-1]
        ms = min([r.shape[axes[1]] for r in rgns])
        sl = [slice(None)] * rgns[0].ndim
        sl[axes[1]] = slice(0,ms)
        rgns = [r[sl] for r in rgns]
        #print [r.shape for r in rgns], axes
        
        return np.concatenate(rgns, axis=axes[0]) 
Example 29
Project: NeoAnalysis   Author: neoanalysis   File: ROI.py    (license) View Source Project 6 votes vote down vote up
def getArrayRegion(self, data, img, axes=(0,1), order=1, **kwds):
        """
        Use the position of this ROI relative to an imageItem to pull a slice 
        from an array.
        
        Since this pulls 1D data from a 2D coordinate system, the return value 
        will have ndim = data.ndim-1
        
        See ROI.getArrayRegion() for a description of the arguments.
        """
        
        imgPts = [self.mapToItem(img, h['item'].pos()) for h in self.handles]
        rgns = []
        for i in range(len(imgPts)-1):
            d = Point(imgPts[i+1] - imgPts[i])
            o = Point(imgPts[i])
            r = fn.affineSlice(data, shape=(int(d.length()),), vectors=[Point(d.norm())], origin=o, axes=axes, order=order, **kwds)
            rgns.append(r)
            
        return np.concatenate(rgns, axis=axes[0]) 
Example 30
Project: NeoAnalysis   Author: neoanalysis   File: elphyio.py    (license) View Source Project 6 votes vote down vote up
def load_bytes(self, data_blocks, dtype='<i1', start=None, end=None, expected_size=None):
        """
        Return list of bytes contained
        in the specified set of blocks.
        
        NB : load all data as files cannot exceed 4Gb 
             find later other solutions to spare memory.
        """
        chunks = list()
        raw = ''
        # keep only data blocks having
        # a size greater than zero
        blocks = [k for k in data_blocks if k.size > 0]
        for data_block in blocks :
            self.file.seek(data_block.start)
            raw = self.file.read(data_block.size)[0:expected_size]
            databytes = np.frombuffer(raw, dtype=dtype)
            chunks.append(databytes)
        # concatenate all chunks and return
        # the specified slice
        if len(chunks)>0 :
            databytes = np.concatenate(chunks)
            return databytes[start:end]
        else :
            return np.array([]) 
Example 31
Project: NeoAnalysis   Author: neoanalysis   File: ROI.py    (license) View Source Project 6 votes vote down vote up
def getArrayRegion(self, arr, img=None, axes=(0,1), **kwds):
        rgns = []
        for l in self.lines:
            rgn = l.getArrayRegion(arr, img, axes=axes, **kwds)
            if rgn is None:
                continue
                #return None
            rgns.append(rgn)
            #print l.state['size']
            
        ## make sure orthogonal axis is the same size
        ## (sometimes fp errors cause differences)
        if img.axisOrder == 'row-major':
            axes = axes[::-1]
        ms = min([r.shape[axes[1]] for r in rgns])
        sl = [slice(None)] * rgns[0].ndim
        sl[axes[1]] = slice(0,ms)
        rgns = [r[sl] for r in rgns]
        #print [r.shape for r in rgns], axes
        
        return np.concatenate(rgns, axis=axes[0]) 
Example 32
Project: NeoAnalysis   Author: neoanalysis   File: ROI.py    (license) View Source Project 6 votes vote down vote up
def getArrayRegion(self, data, img, axes=(0,1), order=1, **kwds):
        """
        Use the position of this ROI relative to an imageItem to pull a slice 
        from an array.
        
        Since this pulls 1D data from a 2D coordinate system, the return value 
        will have ndim = data.ndim-1
        
        See ROI.getArrayRegion() for a description of the arguments.
        """
        
        imgPts = [self.mapToItem(img, h['item'].pos()) for h in self.handles]
        rgns = []
        for i in range(len(imgPts)-1):
            d = Point(imgPts[i+1] - imgPts[i])
            o = Point(imgPts[i])
            r = fn.affineSlice(data, shape=(int(d.length()),), vectors=[Point(d.norm())], origin=o, axes=axes, order=order, **kwds)
            rgns.append(r)
            
        return np.concatenate(rgns, axis=axes[0]) 
Example 33
Project: NeoAnalysis   Author: neoanalysis   File: elphyio.py    (license) View Source Project 6 votes vote down vote up
def load_bytes(self, data_blocks, dtype='<i1', start=None, end=None, expected_size=None):
        """
        Return list of bytes contained
        in the specified set of blocks.
        
        NB : load all data as files cannot exceed 4Gb 
             find later other solutions to spare memory.
        """
        chunks = list()
        raw = ''
        # keep only data blocks having
        # a size greater than zero
        blocks = [k for k in data_blocks if k.size > 0]
        for data_block in blocks :
            self.file.seek(data_block.start)
            raw = self.file.read(data_block.size)[0:expected_size]
            databytes = np.frombuffer(raw, dtype=dtype)
            chunks.append(databytes)
        # concatenate all chunks and return
        # the specified slice
        if len(chunks)>0 :
            databytes = np.concatenate(chunks)
            return databytes[start:end]
        else :
            return np.array([]) 
Example 34
Project: scientific-paper-summarisation   Author: EdCo95   File: AbstractNetPreprocessor.py    (license) View Source Project 6 votes vote down vote up
def extra_processing(self):

        data_dir = BASE_DIR + "/Data/Generated_Data/Sentences_And_SummaryBool/Abstract_Neg/AbstractNet/data.pkl"
        write_dir = BASE_DIR + \
            "/Data/Generated_Data/Sentences_And_SummaryBool/Abstract_Neg/AbstractNet/abstractnet_data.pkl"

        print("----> Reading data...")
        t = time.time()
        data = useful_functions.load_pickled_object(data_dir)
        print("----> Done, took ", time.time() - t, " seconds")

        print("----> Beginning processing...")
        t = time.time()
        self.start_time = t
        new_data = self.pool2.map(self.process_item, data)
        # new_data = np.concatenate(new_data, axis=0)
        print("----> Done, took ", (time.time() - t) / 60, " minutes")

        useful_functions.pickle_list(new_data, write_dir) 
Example 35
Project: DeepLearning_PlantDiseases   Author: MarkoArsenovic   File: occlusion.py    (license) View Source Project 6 votes vote down vote up
def Occlusion_exp(image,occluding_size,occluding_stride,model,preprocess,classes,groundTruth):    
    img = np.copy(image)
    height, width,_= img.shape
    output_height = int(math.ceil((height-occluding_size)/occluding_stride+1))
    output_width = int(math.ceil((width-occluding_size)/occluding_stride+1))
    ocludedImages=[]
    for h in range(output_height):
        for w in range(output_width):
            #occluder region
            h_start = h*occluding_stride
            w_start = w*occluding_stride
            h_end = min(height, h_start + occluding_size)
            w_end = min(width, w_start + occluding_size)
            
            input_image = copy.copy(img)
            input_image[h_start:h_end,w_start:w_end,:] =  0
            ocludedImages.append(preprocess(Image.fromarray(input_image)))
            
    L = np.empty(output_height*output_width)
    L.fill(groundTruth)
    L = torch.from_numpy(L)
    tensor_images = torch.stack([img for img in ocludedImages])
    dataset = torch.utils.data.TensorDataset(tensor_images,L) 
    dataloader = torch.utils.data.DataLoader(dataset,batch_size=5,shuffle=False, num_workers=8) 

    heatmap=np.empty(0)
    model.eval()
    for data in dataloader:
        images, labels = data
        
        if use_gpu:
            images, labels = (images.cuda()), (labels.cuda(async=True))
        
        outputs = model(Variable(images))
        m = nn.Softmax()
        outputs=m(outputs)
        if use_gpu:   
            outs=outputs.cpu()
        heatmap = np.concatenate((heatmap,outs[0:outs.size()[0],groundTruth].data.numpy()))
        
    return heatmap.reshape((output_height, output_width)) 
Example 36
Project: deep-q-learning   Author: alvinwan   File: dqn_utils.py    (license) View Source Project 6 votes vote down vote up
def _encode_observation(self, idx):
        end_idx   = idx + 1 # make noninclusive
        start_idx = end_idx - self.frame_history_len
        # this checks if we are using low-dimensional observations, such as RAM
        # state, in which case we just directly return the latest RAM.
        if len(self.obs.shape) == 2:
            return self.obs[end_idx-1]
        # if there weren't enough frames ever in the buffer for context
        if start_idx < 0 and self.num_in_buffer != self.size:
            start_idx = 0
        for idx in range(start_idx, end_idx - 1):
            if self.done[idx % self.size]:
                start_idx = idx + 1
        missing_context = self.frame_history_len - (end_idx - start_idx)
        # if zero padding is needed for missing context
        # or we are on the boundry of the buffer
        if start_idx < 0 or missing_context > 0:
            frames = [np.zeros_like(self.obs[0]) for _ in range(missing_context)]
            for idx in range(start_idx, end_idx):
                frames.append(self.obs[idx % self.size])
            return np.concatenate(frames, 2)
        else:
            # this optimization has potential to saves about 30% compute time \o/
            img_h, img_w = self.obs.shape[1], self.obs.shape[2]
            return self.obs[start_idx:end_idx].transpose(1, 2, 0, 3).reshape(img_h, img_w, -1) 
Example 37
Project: tfutils   Author: neuroailab   File: data.py    (MIT License) View Source Project 6 votes vote down vote up
def get_data(self, dsource, sliceval):
        if self.subslice is None:
            return dsource[sliceval]
        else:
            subslice_inds = self.subsliceinds[sliceval]
            mbs = self.mini_batch_size
            bn0 = subslice_inds.min() // mbs
            bn1 = subslice_inds.max() // mbs
            stims = []
            for _bn in range(bn0, bn1 + 1):
                _s = np.asarray(dsource[_bn * mbs: (_bn + 1) * mbs])
                new_inds = isin(np.arange(_bn * mbs, (_bn + 1) * mbs), subslice_inds)
                new_array = _s[new_inds]
                stims.append(new_array)
            stims = np.concatenate(stims)
            return stims 
Example 38
Project: piperine   Author: DNA-and-Natural-Algorithms-Group   File: energetics.py    (MIT License) View Source Project 6 votes vote down vote up
def calculate_unrestricted_toehold_characteristics(self):
        import stickydesign as sd
        ends = sd.easyends('TD',
                           self.length,
                           alphabet='h',
                           adjs=['c', 'g'],
                           energetics=self)
        n_ends = len(ends)
        e_array = sd.energy_array_uniform(ends, self)
        e_array = e_array[n_ends:, :n_ends]
        for i in range(n_ends):
            e_array[i,i] = 0
        e_spr = e_array.max()/self.targetdG
        e_vec_ext = self.th_external_dG(ends)
        e_vec_int = self.th_internal_dG(ends)
        e_vec_all = np.concatenate( (e_vec_int, e_vec_ext))
        e_avg = e_vec_all.mean()
        e_dev = np.max(np.abs(e_vec_all - self.targetdG))
        return e_avg, e_spr, e_dev, n_ends 
Example 39
Project: piperine   Author: DNA-and-Natural-Algorithms-Group   File: energetics.py    (MIT License) View Source Project 6 votes vote down vote up
def calculate_unrestricted_toehold_characteristics(self):
        import stickydesign as sd
        ends = sd.easyends('TD',
                           self.length,
                           alphabet=self.alphabet,
                           adjs=self.adjs,
                           energetics=self)
        n_ends = len(ends)
        e_array = sd.energy_array_uniform(ends, self)
        e_array = e_array[n_ends:, :n_ends]
        for i in range(n_ends):
            e_array[i,i] = 0
        e_spr = e_array.max()/self.targetdG
        e_vec_ext = self.th_external_dG(ends)
        e_vec_int = self.th_internal_dG(ends)
        e_vec_all = np.concatenate( (e_vec_int, e_vec_ext))
        e_avg = e_vec_all.mean()
        e_dev = np.max(np.abs(e_vec_all - self.targetdG))
        return e_avg, e_dev, e_spr, n_ends 
Example 40
Project: lang-reps   Author: chaitanyamalaviya   File: lang2vec.py    (license) View Source Project 5 votes vote down vote up
def get_concatenated_sets(lang_codes, feature_set_str):
    feature_set_parts = feature_set_str.split("+")
    feature_names = []
    feature_values = np.ndarray((len(lang_codes),0))
    for feature_set_part in feature_set_parts:
        more_feature_names, more_feature_values = get_union_sets(lang_codes, feature_set_part)
        feature_names += more_feature_names
        feature_values = np.concatenate([feature_values, more_feature_values], axis=1)
    return feature_names, feature_values 
Example 41
Project: lang-reps   Author: chaitanyamalaviya   File: sequence2sequence.py    (license) View Source Project 5 votes vote down vote up
def encode_seq(self, src_seq):
        """
        Encode a single sentence
        :param src_seq: source sentence
        :return: encoded vector
        """

        src_seq_rev = list(reversed(src_seq))
        fwd_vectors = self.enc_fwd_lstm.initial_state().transduce(src_seq)
        bwd_vectors = self.enc_bwd_lstm.initial_state().transduce(src_seq_rev)
        bwd_vectors = list(reversed(bwd_vectors))
        vectors = [dynet.concatenate(list(p)) for p in zip(fwd_vectors, bwd_vectors)]
        return vectors 
Example 42
Project: lang-reps   Author: chaitanyamalaviya   File: sequence2sequence.py    (license) View Source Project 5 votes vote down vote up
def encode_batch_seq(self, src_seq, src_seq_rev):
        """
        Encodes a batch of sentences
        :param src_seq: batch of sentences
        :param src_seq_rev: batch of sentences in reversed order
        :return: last hidden state of the encoder
        """
        fwd_vectors = self.enc_fwd_lstm.initial_state().transduce(src_seq)
        bwd_vectors = list(reversed(self.enc_bwd_lstm.initial_state().transduce(src_seq_rev)))
        return dynet.concatenate([fwd_vectors[-1], bwd_vectors[-1]]) 
Example 43
Project: lang-reps   Author: chaitanyamalaviya   File: sequence2sequence.py    (license) View Source Project 5 votes vote down vote up
def encode_seq(self, src_seq, src_seq_rev):
        fwd_vectors = self.enc_fwd_lstm.initial_state().transduce(src_seq)
        bwd_vectors = list(reversed(self.enc_fwd_lstm.initial_state().transduce(src_seq_rev)))
        return [dynet.concatenate(list(p)) for p in zip(fwd_vectors, bwd_vectors)] 
Example 44
Project: subtitle-synchronization   Author: AlbertoSabater   File: audio_converter.py    (GNU Lesser General Public License v3.0) View Source Project 5 votes vote down vote up
def generateDatasets(train_files, cut_data, len_mfcc, step_mfcc, hop_len, freq):
    
    X, Y = [], []
    
    for tf in train_files:

        train_data, labels = generateSingleDataset(tf, cut_data, len_mfcc, step_mfcc, hop_len, freq)
                
        X.append(train_data)
        Y.append(labels)
        
    X = np.concatenate(X)
    Y = np.concatenate(Y)
        
    if cut_data:
        filename = STORE_DIR + 'dataset_CUT_' + str(freq) + '_' + str(hop_len) + '_' + str(len_mfcc) + '_' + str(step_mfcc) + '_' + str(X.shape[0]) + '_' + str(X.shape[1]) + '_' + str(X.shape[2]) + '.pickle'
    else:
        filename = STORE_DIR + 'dataset_' + str(freq) + '_' + str(hop_len) + '_' + str(len_mfcc) + '_' + str(step_mfcc) + '_' + str(X.shape[0]) + '_' + str(X.shape[1]) + '_' + str(X.shape[2]) + '.pickle'
    print filename
    with open(filename, 'w') as f:
        pickle.dump([X, Y], f)
        
    return X, Y


# Generate a dataset from all available files 
Example 45
Project: pyballd   Author: Yurlungur   File: orthopoly.py    (GNU Lesser General Public License v3.0) View Source Project 5 votes vote down vote up
def get_quadrature_points(order):
    """
    Returns the quadrature points for Gauss-Lobatto quadrature
    as a function of the order of the polynomial we want to
    represent.
    See: https://en.wikipedia.org/wiki/Gaussian_quadrature
    """
    return np.sort(np.concatenate((np.array([-1,1]),
                                   poly.basis(order).deriv().roots()))) 
Example 46
Project: pyballd   Author: Yurlungur   File: orthopoly.py    (GNU Lesser General Public License v3.0) View Source Project 5 votes vote down vote up
def get_integration_weights(order,nodes=None):
    """
    Returns the integration weights for Gauss-Lobatto quadrature
    as a function of the order of the polynomial we want to
    represent.
    See: https://en.wikipedia.org/wiki/Gaussian_quadrature
    See: arXive:gr-qc/0609020v1
    """
    if np.all(nodes == False):
        nodes=get_quadrature_points(order)
    if poly == polynomial.chebyshev.Chebyshev:
        weights = np.empty((order+1))
        weights[1:-1] = np.pi/order
        weights[0] = np.pi/(2*order)
        weights[-1] = weights[0]
        return weights
    elif poly == polynomial.legendre.Legendre:
        interior_weights = 2/((order+1)*order*poly.basis(order)(nodes[1:-1])**2)
        boundary_weights = np.array([1-0.5*np.sum(interior_weights)])
        weights = np.concatenate((boundary_weights,
                                  interior_weights,
                                  boundary_weights))
        return weights
    else:
        raise ValueError("Not a known polynomial type.")
        return False 
Example 47
Project: almond-nnparser   Author: Stanford-Mobisocial-IoT-Lab   File: thingtalk.py    (license) View Source Project 5 votes vote down vote up
def constrain_value_logits(self, logits, curr_state):
        first_value_token = self.num_functions + self.num_begin_tokens + self.num_control_tokens
        num_value_tokens = self.output_size - first_value_token
        value_allowed_token_matrix = np.concatenate((self.allowed_token_matrix[:,:self.num_control_tokens], self.allowed_token_matrix[:,first_value_token:]), axis=1)
        
        with tf.name_scope('constrain_logits'):
            allowed_tokens = tf.gather(tf.constant(value_allowed_token_matrix), curr_state)
            assert allowed_tokens.get_shape()[1:] == (self.num_control_tokens + num_value_tokens,)

            constrained_logits = logits - tf.to_float(tf.logical_not(allowed_tokens)) * 1e+10
        return constrained_logits 
Example 48
Project: crnn   Author: wulivicte   File: convert_t7.py    (license) View Source Project 5 votes vote down vote up
def trans_pos(param, part_indexes, dim=0):
    parts = np.split(param, len(part_indexes), dim)
    new_parts = []
    for i in part_indexes:
        new_parts.append(parts[i])
    return np.concatenate(new_parts, dim) 
Example 49
Project: treecat   Author: posterior   File: format.py    (Apache License 2.0) View Source Project 5 votes vote down vote up
def import_data(data_csvs_in,
                types_csv_in,
                values_csv_in,
                groups_csv_in,
                dataset_out,
                encoding='utf-8'):
    """Import a comma-delimited list of csv files into internal treecat format.

    Common encodings include: utf-8, cp1252.
    """
    schema = load_schema(types_csv_in, values_csv_in, groups_csv_in, encoding)
    data = np.concatenate([
        load_data(schema, data_csv_in, encoding)
        for data_csv_in in data_csvs_in.split(',')
    ])
    data.flags.writeable = False
    print('Imported data shape: [{}, {}]'.format(data.shape[0], data.shape[1]))
    ragged_index = schema['ragged_index']
    for v, name in enumerate(schema['feature_names']):
        beg, end = ragged_index[v:v + 2]
        count = np.count_nonzero(data[:, beg:end].max(1))
        if count == 0:
            print('WARNING: No values found for feature {}'.format(name))
    feature_types = [TY_MULTINOMIAL] * len(schema['feature_names'])
    table = Table(feature_types, ragged_index, data)
    dataset = {
        'schema': schema,
        'table': table,
    }
    pickle_dump(dataset, dataset_out) 
Example 50
Project: treecat   Author: posterior   File: serving.py    (Apache License 2.0) View Source Project 5 votes vote down vote up
def sample(self, N, counts, data=None):
        size = len(self._ensemble)
        pvals = np.ones(size, dtype=np.float32) / size
        sub_Ns = np.random.multinomial(N, pvals)
        samples = np.concatenate([
            server.sample(sub_N, counts, data)
            for server, sub_N in zip(self._ensemble, sub_Ns)
        ])
        np.random.shuffle(samples)
        assert samples.shape[0] == N
        return samples