Python numpy.append() Examples

The following are code examples for showing how to use numpy.append(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: model-api-sequence   Author: evandowning   File: evaluation.py    GNU General Public License v3.0 6 votes vote down vote up
def sequence_generator(fn,n):
    xSet = np.array([])
    ySet = np.array([])

    x = np.array([])
    y = np.array([])

    num = 0

    # Read in sample's sequences
    with open(fn, 'rb') as fr:
        for e in enumerate(range(n)):
            t = pkl.load(fr)
            x = t[0]
            y = t[1]

            if len(xSet) == 0:
                xSet = x
                ySet = y
            else:
                xSet = np.vstack([xSet,x])
                ySet = np.append(ySet,y)

    return xSet,ySet 
Example 2
Project: kuaa   Author: rafaelwerneck   File: OPF.py    GNU General Public License v3.0 6 votes vote down vote up
def __findPrototypes(self, MST, labels):
        """
        Parameters:
        MST = MST adjacency matrix
        labels = labels of the nodes
        Note:

        Return:
        seeds = List with OPF prototypes
        """
        n = MST.shape[0]
        if n != len(labels): return []

        seeds = []

        for i in range(n):
            w = MST[i, :]
            wix = np.where(w != float("inf"))[0]
            l = map(lambda x: labels[x], wix)
            if set(l + [labels[i]]).__len__() > 1:
                seeds.append(i)

        return seeds 
Example 3
Project: Lane-And-Vehicle-Detection   Author: JustinHeaton   File: main.py    MIT License 6 votes vote down vote up
def found_search(self, x, y):
        '''
        This function is applied when the lane lines have been detected in the previous frame.
        It uses a sliding window to search for lane pixels in close proximity (+/- 25 pixels in the x direction)
        around the previous detected polynomial.
        '''
        xvals = []
        yvals = []
        if self.found == True:
            i = 720
            j = 630
            while j >= 0:
                yval = np.mean([i,j])
                xval = (np.mean(self.fit0))*yval**2 + (np.mean(self.fit1))*yval + (np.mean(self.fit2))
                x_idx = np.where((((xval - 25) < x)&(x < (xval + 25))&((y > j) & (y < i))))
                x_window, y_window = x[x_idx], y[x_idx]
                if np.sum(x_window) != 0:
                    np.append(xvals, x_window)
                    np.append(yvals, y_window)
                i -= 90
                j -= 90
        if np.sum(xvals) == 0:
            self.found = False # If no lane pixels were detected then perform blind search
        return xvals, yvals, self.found 
Example 4
Project: models   Author: kipoi   File: gtf_utils.py    MIT License 6 votes vote down vote up
def add_exon(self, chrom, strand, start, stop):
        if strand != self.strand or chrom != self.chrom:
            print("The exon has different chrom or strand to the transcript.")
            return
        _exon = np.array([start, stop], "int").reshape(1, 2)
        self.exons = np.append(self.exons, _exon, axis=0)
        self.exons = np.sort(self.exons, axis=0)
        self.tranL += abs(int(stop) - int(start) + 1)
        self.exonNum += 1

        self.seglen = np.zeros(self.exons.shape[0] * 2 - 1, "int")
        self.seglen[0] = self.exons[0, 1] - self.exons[0, 0] + 1
        for i in range(1, self.exons.shape[0]):
            self.seglen[i * 2 - 1] = self.exons[i, 0] - self.exons[i - 1, 1] - 1
            self.seglen[i * 2] = self.exons[i, 1] - self.exons[i, 0] + 1

        if ["-", "-1", "0", 0, -1].count(self.strand) > 0:
            self.seglen = self.seglen[::-1] 
Example 5
Project: models   Author: kipoi   File: gtf_utils.py    MIT License 6 votes vote down vote up
def add_exon(self, chrom, strand, start, stop):
        if strand != self.strand or chrom != self.chrom:
            print("The exon has different chrom or strand to the transcript.")
            return
        _exon = np.array([start, stop], "int").reshape(1, 2)
        self.exons = np.append(self.exons, _exon, axis=0)
        self.exons = np.sort(self.exons, axis=0)
        self.tranL += abs(int(stop) - int(start) + 1)
        self.exonNum += 1

        self.seglen = np.zeros(self.exons.shape[0] * 2 - 1, "int")
        self.seglen[0] = self.exons[0, 1] - self.exons[0, 0] + 1
        for i in range(1, self.exons.shape[0]):
            self.seglen[i * 2 - 1] = self.exons[i, 0] - self.exons[i - 1, 1] - 1
            self.seglen[i * 2] = self.exons[i, 1] - self.exons[i, 0] + 1

        if ["-", "-1", "0", 0, -1].count(self.strand) > 0:
            self.seglen = self.seglen[::-1] 
Example 6
Project: models   Author: kipoi   File: dataloader.py    MIT License 6 votes vote down vote up
def add_exon(self, chrom, strand, start, stop):
        if strand != self.strand or chrom != self.chrom:
            print("The exon has different chrom or strand to the transcript.")
            return
        _exon = np.array([start, stop], "int").reshape(1,2)
        self.exons = np.append(self.exons, _exon, axis=0)
        self.exons = np.sort(self.exons, axis=0)
        self.tranL += abs(int(stop) - int(start) + 1)
        self.exonNum += 1


        self.seglen = np.zeros(self.exons.shape[0] * 2 - 1, "int")
        self.seglen[0] = self.exons[0,1]-self.exons[0,0] + 1
        for i in range(1, self.exons.shape[0]):
            self.seglen[i*2-1] = self.exons[i,0]-self.exons[i-1,1] - 1
            self.seglen[i*2] = self.exons[i,1]-self.exons[i,0] + 1

        if ["-","-1","0",0,-1].count(self.strand) > 0:
            self.seglen = self.seglen[::-1] 
Example 7
Project: models   Author: kipoi   File: dataloader.py    MIT License 6 votes vote down vote up
def add_exon(self, chrom, strand, start, stop):
        if strand != self.strand or chrom != self.chrom:
            print("The exon has different chrom or strand to the transcript.")
            return
        _exon = np.array([start, stop], "int").reshape(1,2)
        self.exons = np.append(self.exons, _exon, axis=0)
        self.exons = np.sort(self.exons, axis=0)
        self.tranL += abs(int(stop) - int(start) + 1)
        self.exonNum += 1


        self.seglen = np.zeros(self.exons.shape[0] * 2 - 1, "int")
        self.seglen[0] = self.exons[0,1]-self.exons[0,0] + 1
        for i in range(1, self.exons.shape[0]):
            self.seglen[i*2-1] = self.exons[i,0]-self.exons[i-1,1] - 1
            self.seglen[i*2] = self.exons[i,1]-self.exons[i,0] + 1

        if ["-","-1","0",0,-1].count(self.strand) > 0:
            self.seglen = self.seglen[::-1] 
Example 8
Project: models   Author: kipoi   File: gtf_utils.py    MIT License 6 votes vote down vote up
def add_exon(self, chrom, strand, start, stop):
        if strand != self.strand or chrom != self.chrom:
            print("The exon has different chrom or strand to the transcript.")
            return
        _exon = np.array([start, stop], "int").reshape(1, 2)
        self.exons = np.append(self.exons, _exon, axis=0)
        self.exons = np.sort(self.exons, axis=0)
        self.tranL += abs(int(stop) - int(start) + 1)
        self.exonNum += 1

        self.seglen = np.zeros(self.exons.shape[0] * 2 - 1, "int")
        self.seglen[0] = self.exons[0, 1] - self.exons[0, 0] + 1
        for i in range(1, self.exons.shape[0]):
            self.seglen[i * 2 - 1] = self.exons[i, 0] - self.exons[i - 1, 1] - 1
            self.seglen[i * 2] = self.exons[i, 1] - self.exons[i, 0] + 1

        if ["-", "-1", "0", 0, -1].count(self.strand) > 0:
            self.seglen = self.seglen[::-1] 
Example 9
Project: skylab   Author: coenders   File: basellh.py    GNU General Public License v3.0 6 votes vote down vote up
def _select_events(self, src_ra, src_dec, scramble=False, inject=None):
        r"""Select events for log-likelihood evaluation.

        This method must set the private attributes number of total
        events `_nevents` and number of selected events `_nselected`.

        Parameters
        ----------
        src_ra : float
            Right ascension of source position
        src_dec : float
            Declination of source position
        scramble : bool, optional
            Scramble select events in right ascension.
        inject : ndarray, optional
            Structured array containing additional events to append to
            selection

        """
        pass 
Example 10
Project: skylab   Author: coenders   File: grbllh.py    GNU General Public License v3.0 6 votes vote down vote up
def llh(self, nsources, **others):
        SoB = self._signal / self._events["B"]

        weights, wgrad = self.llh_model.weight(self._events, **others)
        x = SoB * weights

        # Multiply by two for chi-square distributed test-statistic.
        ts = 2. * (-nsources + np.log1p(nsources / self.nbackground * x).sum())
        nsgrad = -1. + (x / (self.nbackground + x * nsources)).sum()

        if wgrad is not None:
            pgrad = np.sum(
                nsources / (self.nbackground + x*nsources) * SoB * wgrad,
                axis=-1)
        else:
            pgrad = np.zeros((0, ))

        grad = 2. * np.append(nsgrad, pgrad)

        return ts, grad 
Example 11
Project: core   Author: lifemapper   File: matrix.py    GNU General Public License v3.0 6 votes vote down vote up
def slice(self, *args):
        """
        @summary: Subsets the matrix and returns a new instance
        @param *args: These are iterables for the indices to retrieve
        @note: The first parameter will be for axis 0, second for axis 1, etc
        """
        newData = np.copy(self.data)
        newHeaders = deepcopy(self.headers)
        # For each arg in the list
        for i in range(len(args)):
            # Subset the data matrix
            newData = newData.take(args[i], axis=i)
            # Subset the headers
            tmp = []
            for j in args[i]:
                tmp.append(newHeaders[str(i)][j])
            newHeaders[str(i)] = tmp
        return Matrix(newData, headers=newHeaders)
    
    # ........................... 
Example 12
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 6 votes vote down vote up
def test_ctc_loss_with_large_classes():
    ctx = default_context()
    num_classes = 6000
    seq_len = 8
    batch_size = 2
    data = np.empty((num_classes, 0))
    for i in range(seq_len * batch_size) :
        row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
        data = np.append(data, row/13, axis=1)
    data = data.reshape(seq_len, batch_size, num_classes)
    label = np.array([
        [100, 200, 300, 400, 500, 0, 0, 0],
        [1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
    nd_data = mx.nd.array(data)
    nd_label = mx.nd.array(label)
    loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
    expected_loss = np.array([688.02826, 145.34462])
    assert_almost_equal(loss.asnumpy(), expected_loss) 
Example 13
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator_gpu.py    Apache License 2.0 6 votes vote down vote up
def test_elementwisesum_with_type():
    dev_types = [[mx.gpu(0), [np.float64, np.float32, np.float16]],
                 [mx.cpu(0), [np.float64, np.float32]] ]
    for num_args in range(1, 6):
        ews_arg_shape = {}
        for i in range(num_args):
            ews_arg_shape['ews_arg'+str(i)] = (2, 10)
        sym = mx.sym.ElementWiseSum(name='ews', num_args=num_args)
        ctx_list = []
        for dev, types in dev_types:
            for dtype in types:
                ews_arg_dtype = {'type_dict':{}}
                for i in range(num_args):
                    ews_arg_dtype['type_dict']['ews_arg'+str(i)] = dtype
                ctx_elem = {'ctx': dev}
                ctx_elem.update(ews_arg_shape)
                ctx_elem.update(ews_arg_dtype)
                ctx_list.append(ctx_elem)
    check_consistency(sym, ctx_list) 
Example 14
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator_gpu.py    Apache License 2.0 6 votes vote down vote up
def test_embedding_with_type():
    def test_embedding_helper(data_types, weight_types, low_pad, high_pad):
        NVD = [[20, 10, 20], [200, 10, 300]]
        for N, V, D in NVD:
            sym = mx.sym.Embedding(name='embedding', input_dim=V, output_dim=D)
            ctx_list = []
            for data_type in data_types:
                for weight_type in weight_types:
                    ctx_list.append({'ctx': mx.gpu(0), 'embedding_data': (N,),
                        'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
                    ctx_list.append({'ctx': mx.cpu(0), 'embedding_data': (N,),
                        'type_dict': {'embedding_data': data_type, 'embedding_weight': weight_type}})
            arg_params = {'embedding_data': np.random.randint(low=-low_pad, high=V+high_pad, size=(N,))}
            check_consistency(sym, ctx_list, grad_req={'embedding_data': 'null','embedding_weight': 'write'},
                              arg_params=arg_params)

    data_types = [np.float16, np.float32, np.float64, np.int32]
    weight_types = [np.float16, np.float32, np.float64]
    test_embedding_helper(data_types, weight_types, 5, 5)
    data_types = [np.uint8]
    weight_types = [np.float16, np.float32, np.float64]
    test_embedding_helper(data_types, weight_types, 0, 5) 
Example 15
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: bcfstore.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def __init__(self, filename):
        self._filename = filename
        print 'Loading BCF file to memory ... '+filename
        file = open(filename, 'rb')
        size = numpy.fromstring(file.read(8), dtype=numpy.uint64)
        file_sizes = numpy.fromstring(file.read(8*size), dtype=numpy.uint64)
        self._offsets = numpy.append(numpy.uint64(0),
                                     numpy.add.accumulate(file_sizes))
        self._memory = file.read()
        file.close() 
Example 16
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: bcfstore.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def __init__(self, filename):
        self._filename = filename
        print 'Opening BCF file ... '+filename
        self._file = open(filename, 'rb')
        size = numpy.fromstring(self._file.read(8), dtype=numpy.uint64)
        file_sizes = numpy.fromstring(self._file.read(8*size),
                                      dtype=numpy.uint64)
        self._offsets = numpy.append(numpy.uint64(0),
                                     numpy.add.accumulate(file_sizes)) 
Example 17
Project: meta-transfer-learning   Author: erfaneshrati   File: reptile.py    MIT License 5 votes vote down vote up
def _test_predictions(self, train_set, test_set, input_ph, predictions):
        if self._transductive:
            inputs, _ = zip(*test_set)
            return self.session.run(predictions, feed_dict={input_ph: inputs})
        res = []
        for test_sample in test_set:
            inputs, _ = zip(*train_set)
            inputs += (test_sample[0],)
            res.append(self.session.run(predictions, feed_dict={input_ph: inputs})[-1])
        return res 
Example 18
Project: meta-transfer-learning   Author: erfaneshrati   File: reptile.py    MIT License 5 votes vote down vote up
def _mini_batches(samples, batch_size, num_batches, replacement):
    """
    Generate mini-batches from some data.

    Returns:
      An iterable of sequences of (input, label) pairs,
        where each sequence is a mini-batch.
    """
    samples = list(samples)
    if replacement:
        for _ in range(num_batches):
            yield random.sample(samples, batch_size)
        return
    cur_batch = []
    batch_count = 0
    while True:
        random.shuffle(samples)
        for sample in samples:
            cur_batch.append(sample)
            if len(cur_batch) < batch_size:
                continue
            yield cur_batch
            cur_batch = []
            batch_count += 1
            if batch_count == num_batches:
                return 
Example 19
Project: meta-transfer-learning   Author: erfaneshrati   File: reptile.py    MIT License 5 votes vote down vote up
def _split_train_test(samples, test_shots=1):
    """
    Split a few-shot task into a train and a test set.

    Args:
      samples: an iterable of (input, label) pairs.
      test_shots: the number of examples per class in the
        test set.

    Returns:
      A tuple (train, test), where train and test are
        sequences of (input, label) pairs.
    """
    train_set = list(samples)
    test_set = []
    labels = set(item[1] for item in train_set)
    for _ in range(test_shots):
        for label in labels:
            for i, item in enumerate(train_set):
                if item[1] == label:
                    del train_set[i]
                    test_set.append(item)
                    break
    if len(test_set) < len(labels) * test_shots:
        raise IndexError('not enough examples of each class for test set')
    return train_set, test_set 
Example 20
Project: meta-transfer-learning   Author: erfaneshrati   File: reptile.py    MIT License 5 votes vote down vote up
def train_step(self,
                   dataset,
                   input_ph,
                   label_ph,
                   minimize_op,
                   num_classes,
                   num_shots,
                   inner_batch_size,
                   inner_iters,
                   replacement,
                   meta_step_size,
                   meta_batch_size):
        old_vars = self._model_state.export_variables()
        updates = []
        for _ in range(meta_batch_size):
            mini_dataset = _sample_mini_dataset(dataset, num_classes, num_shots)
            mini_batches = self._mini_batches(mini_dataset, inner_batch_size, inner_iters,
                                              replacement)
            for batch in mini_batches:
                inputs, labels = zip(*batch)
                last_backup = self._model_state.export_variables()
                if self._pre_step_op:
                    self.session.run(self._pre_step_op)
                self.session.run(minimize_op, feed_dict={input_ph: inputs, label_ph: labels})
            updates.append(subtract_vars(self._model_state.export_variables(), last_backup))
            self._model_state.import_variables(old_vars)
        update = average_vars(updates)
        self._model_state.import_variables(add_vars(old_vars, scale_vars(update, meta_step_size)))

    # pylint: disable=R0913,R0914 
Example 21
Project: projection-methods   Author: akshayka   File: soc.py    GNU General Public License v3.0 5 votes vote down vote up
def project(self, x_0):
        if self.contains(x_0):
            return x_0
        z = x_0[:-1]
        t = x_0[-1]
        norm_z = np.linalg.norm(z, 2)

        # As given in [BV04, Chapter 8.Exercises]
        if norm_z <= -t:
            # this certainly will never happen when t > 0
            return np.zeros(np.shape(x_0))
        elif self._contains(norm_z, t):
            return x_0
        else:
            return 0.5 * (1 + t/norm_z) * np.append(z, norm_z) 
Example 22
Project: wikilinks   Author: trovdimi   File: click_distributions.py    MIT License 5 votes vote down vote up
def plot_counts_frequency():

    fig = plt.figure()
    ax = fig.add_subplot(111)


    category_distributions = read_pickle(HOME+'output/category_counts_distribution.obj')
    data = category_distributions['counts']
    data = [int(x[0]) for x in data]
    #to consider the edges that have zero transitions we substract the number transitions from the number of edges in wikipeida
    #number_of_edges = 339463340
    #zeros = np.zeros((number_of_edges - len(data)))
    #data = np.append(zeros, data)
    #bins = [0,11]
    #bins.extend(np.linspace(100,10000))
    #data = data.extend(listofzeros)
    #print data
    hist, bin_edges = np.histogram(data, bins=10000)
    #print len(hist)
    #print len(bin_edges)
    print hist, bin_edges

    ax.set_yscale('log')
    ax.set_xscale('log')
    ax.plot(bin_edges[:-1], hist, marker='o', markersize=3, markeredgecolor='none', color='#D65F5F')

    #ax.set_ylim([10**0, 10**6])
    #ax.set_xlim([10**0, 10**6])
    ax.set_xlabel('Number of transitions')
    ax.set_ylabel('Frequency')

    fig.tight_layout()
    fig.savefig( 'output/agg_counts_distributions.pdf', bbox_inches='tight') 
Example 23
Project: FRIDA   Author: LCAV   File: tools_fri_doa_plane.py    MIT License 5 votes vote down vote up
def compute_b(G_lst, GtG_lst, beta_lst, Rc0, num_bands, a_ri):
    """
    compute the uniform sinusoidal samples b from the updated annihilating
    filter coeffiients.
    :param GtG_lst: list of G^H G for different subbands
    :param beta_lst: list of beta-s for different subbands
    :param Rc0: right-dual matrix, here it is the convolution matrix associated with c
    :param num_bands: number of bands
    :param L: size of b: L by 1
    :param a_ri: a 2D numpy array. each column corresponds to the measurements within a subband
    :return:
    """
    b_lst = []
    a_Gb_lst = []
    for loop in range(num_bands):
        GtG_loop = GtG_lst[loop]
        beta_loop = beta_lst[loop]
        b_loop = beta_loop - \
                 linalg.solve(GtG_loop,
                              np.dot(Rc0.T,
                                     linalg.solve(np.dot(Rc0, linalg.solve(GtG_loop, Rc0.T)),
                                                  np.dot(Rc0, beta_loop)))
                              )

        b_lst.append(b_loop)
        a_Gb_lst.append(a_ri[:, loop] - np.dot(G_lst[loop], b_loop))

    return np.column_stack(b_lst), linalg.norm(np.concatenate(a_Gb_lst)) 
Example 24
Project: kuaa   Author: rafaelwerneck   File: OPF.py    GNU General Public License v3.0 5 votes vote down vote up
def simplefit(self):
        Classifier.simplefit(self)

        #Werneck
        #-------
        try:
            graph = distance.squareform(distance.pdist(self._vftr, self._distance))
        except:
            try:
                descriptor_path = os.path.join(os.path.dirname(__file__), '..',
                        '..', 'descriptors', self._distance)
                sys.path.append(descriptor_path)
                software = __import__('plugin_' + self._distance)
                print software
                graph = distance.squareform(distance.pdist(self._vftr, software.distance))
            except:
                graph = distance.squareform(distance.pdist(self._vftr))
        #-------

        mst = self.__prim(graph)  # mst = minimum spanning tree
        prototypes = self.__findPrototypes(mst, self._vltr)
        prototypes = np.array(prototypes)

        vltr = np.array(self._vltr)
        prototypes_labels = vltr[prototypes]

        C, P, L = self.__dijkstra(mst, prototypes, prototypes_labels)

        # Storing data on the self.
        self.C, self.P, self.L = C, P, L
        self.vftr, self.vltr = self._vftr, self._vltr

        assert len(self.vftr) == len(self.L)
        assert len(self.L) == len(self.C) 
Example 25
Project: Lane-And-Vehicle-Detection   Author: JustinHeaton   File: main.py    MIT License 5 votes vote down vote up
def filter_boxes(min_score, boxes, scores, classes):
    """Return boxes with a confidence >= `min_score`"""
    n = len(classes)
    idxs = []
    for i in range(n):
        if scores[i] >= min_score:
            idxs.append(i)

    filtered_boxes = boxes[idxs, ...]
    filtered_scores = scores[idxs, ...]
    filtered_classes = classes[idxs, ...]
    return filtered_boxes, filtered_scores, filtered_classes 
Example 26
Project: animal-tracking   Author: colinlaney   File: track.py    Creative Commons Zero v1.0 Universal 5 votes vote down vote up
def drawFloorCrop(event, x, y, flags, params):
    global perspectiveMatrix, name, RENEW_TETRAGON
    imgCroppingPolygon = np.zeros_like(params['imgFloorCorners'])
    if event == cv2.EVENT_RBUTTONUP:
        cv2.destroyWindow(f'Floor Corners for {name}')
    if len(params['croppingPolygons'][name]) > 4 and event == cv2.EVENT_LBUTTONUP:
        RENEW_TETRAGON = True
        h = params['imgFloorCorners'].shape[0]
        # delete 5th extra vertex of the floor cropping tetragon
        params['croppingPolygons'][name] = np.delete(params['croppingPolygons'][name], -1, 0)
        params['croppingPolygons'][name] = params['croppingPolygons'][name] - [h,0]
        
        # Sort cropping tetragon vertices counter-clockwise starting with top left
        params['croppingPolygons'][name] = counterclockwiseSort(params['croppingPolygons'][name])
        # Get the matrix of perspective transformation
        params['croppingPolygons'][name] = np.reshape(params['croppingPolygons'][name], (4,2))
        tetragonVertices = np.float32(params['croppingPolygons'][name])
        tetragonVerticesUpd = np.float32([[0,0], [0,h], [h,h], [h,0]])
        perspectiveMatrix[name] = cv2.getPerspectiveTransform(tetragonVertices, tetragonVerticesUpd)
    if event == cv2.EVENT_LBUTTONDOWN:
        if len(params['croppingPolygons'][name]) == 4 and RENEW_TETRAGON:
            params['croppingPolygons'][name] = np.array([[0,0]])
            RENEW_TETRAGON = False
        if len(params['croppingPolygons'][name]) == 1:
            params['croppingPolygons'][name][0] = [x,y]
        params['croppingPolygons'][name] = np.append(params['croppingPolygons'][name], [[x,y]], axis=0)
    if event == cv2.EVENT_MOUSEMOVE and not (len(params['croppingPolygons'][name]) == 4 and RENEW_TETRAGON):
        params['croppingPolygons'][name][-1] = [x,y]
        if len(params['croppingPolygons'][name]) > 1:
            cv2.fillPoly(
                imgCroppingPolygon,
                [np.reshape(
                    params['croppingPolygons'][name],
                    (len(params['croppingPolygons'][name]),2)
                )],
                BGR_COLOR['green'], cv2.LINE_AA)
            imgCroppingPolygon = cv2.addWeighted(params['imgFloorCorners'], 1.0, imgCroppingPolygon, 0.5, 0.)
            cv2.imshow(f'Floor Corners for {name}', imgCroppingPolygon) 
Example 27
Project: models   Author: kipoi   File: gtf_utils.py    MIT License 5 votes vote down vote up
def add_transcipt(self, transcript):
        self.trans.append(transcript)
        self.tranNum += 1 
Example 28
Project: models   Author: kipoi   File: gtf_utils.py    MIT License 5 votes vote down vote up
def get_gene_info(self):
        RV = [self.geneID, self.geneName, self.chrom, self.strand, self.start,
              self.stop, self.biotype]
        _trans = []
        for t in self.trans:
            _trans.append(t.tranID)
        RV.append(",".join(_trans))
        return RV 
Example 29
Project: models   Author: kipoi   File: gtf_utils.py    MIT License 5 votes vote down vote up
def add_premRNA(self):
        _tran = Transcript(self.chrom, self.strand, self.start, self.stop,
                           self.geneID + ".p", self.geneName, self.biotype)
        _tran.add_exon(self.chrom, self.strand, self.start, self.stop)
        self.trans.append(_tran)
        self.tranNum += 1 
Example 30
Project: models   Author: kipoi   File: gtf_utils.py    MIT License 5 votes vote down vote up
def add_transcipt(self, transcript):
        self.trans.append(transcript)
        self.tranNum += 1 
Example 31
Project: models   Author: kipoi   File: gtf_utils.py    MIT License 5 votes vote down vote up
def get_gene_info(self):
        RV = [self.geneID, self.geneName, self.chrom, self.strand, self.start,
              self.stop, self.biotype]
        _trans = []
        for t in self.trans:
            _trans.append(t.tranID)
        RV.append(",".join(_trans))
        return RV 
Example 32
Project: models   Author: kipoi   File: gtf_utils.py    MIT License 5 votes vote down vote up
def add_premRNA(self):
        _tran = Transcript(self.chrom, self.strand, self.start, self.stop,
                           self.geneID + ".p", self.geneName, self.biotype)
        _tran.add_exon(self.chrom, self.strand, self.start, self.stop)
        self.trans.append(_tran)
        self.tranNum += 1 
Example 33
Project: models   Author: kipoi   File: dataloader.py    MIT License 5 votes vote down vote up
def get_gene_info(self):
        RV = [self.geneID, self.geneName, self.chrom, self.strand, self.start,
              self.stop, self.biotype]
        _trans = []
        for t in self.trans:
            _trans.append(t.tranID)
        RV.append(",".join(_trans))
        return RV 
Example 34
Project: models   Author: kipoi   File: dataloader.py    MIT License 5 votes vote down vote up
def add_premRNA(self):
        _tran = Transcript(self.chrom, self.strand, self.start, self.stop, 
                           self.geneID+".p", self.geneName, self.biotype)
        _tran.add_exon(self.chrom, self.strand, self.start, self.stop)
        self.trans.append(_tran)
        self.tranNum += 1 
Example 35
Project: models   Author: kipoi   File: gtf_utils.py    MIT License 5 votes vote down vote up
def add_transcipt(self, transcript):
        self.trans.append(transcript)
        self.tranNum += 1 
Example 36
Project: models   Author: kipoi   File: gtf_utils.py    MIT License 5 votes vote down vote up
def add_premRNA(self):
        _tran = Transcript(self.chrom, self.strand, self.start, self.stop,
                           self.geneID + ".p", self.geneName, self.biotype)
        _tran.add_exon(self.chrom, self.strand, self.start, self.stop)
        self.trans.append(_tran)
        self.tranNum += 1 
Example 37
Project: skylab   Author: coenders   File: ps_injector.py    GNU General Public License v3.0 5 votes vote down vote up
def __str__(self):
        lines = [repr(self)]
        lines.append(67 * "-")

        lines.append(
            "\tSpectral index     : {0:6.2f}\n"
            "\tSource declination : {1:5.1f} deg\n"
            "\tlog10 Energy range : {2:5.1f} to {3:5.1f}".format(
                self.gamma, np.degrees(self.src_dec), *self.e_range))

        lines.append(67 * "-")

        return "\n".join(lines) 
Example 38
Project: skylab   Author: coenders   File: psLLH.py    GNU General Public License v3.0 5 votes vote down vote up
def sinDec_range(self):
        r"""ndarray: Lower and upper allowed sine declination
        """
        srange = [s.llh_model.sinDec_range for s in self._samples.itervalues()]
        srange.append(super(MultiPointSourceLLH, self).sinDec_range)
        srange = np.vstack(srange)
        return np.array([np.amin(srange[:, 0]), np.amax(srange[:, 1])]) 
Example 39
Project: skylab   Author: coenders   File: data.py    GNU General Public License v3.0 5 votes vote down vote up
def init(Nexp, NMC, energy=True, **kwargs):
    Nsrc = kwargs.pop("Nsrc", 0)

    arr_exp = exp(Nexp - Nsrc)
    arr_mc = MC(NMC)

    if Nsrc > 0:
        inj = PointSourceInjector(2, sinDec_bandwidth=1, seed=0)
        inj.fill(0., arr_mc, 333.)

        source = inj.sample(np.pi, Nsrc, poisson=False).next()[1]

        arr_exp = np.append(arr_exp, source)

    if energy:
        llh_model = PowerLawLLH(["logE"], min(50, Nexp // 50),
                                range=[[0.9 * arr_mc["logE"].min(),
                                        1.1 * arr_mc["logE"].max()]],
                                sinDec_bins=min(50, Nexp // 50),
                                sinDec_range=[-1., 1.],
                                bounds=(0, 5))
    else:
        llh_model = UniformLLH(sinDec_bins=max(3, Nexp // 200),
                               sinDec_range=[-1., 1.])

    llh = PointSourceLLH(arr_exp, arr_mc, 365., llh_model=llh_model,
                         mode="all", nsource=25, scramble=False,
                         nsource_bounds=(-Nexp / 2., Nexp / 2.)
                                        if not energy else (0., Nexp / 2.),
                         seed=np.random.randint(2**32),
                         **kwargs)

    return llh 
Example 40
Project: neural-pipeline   Author: toodef   File: train_config.py    MIT License 5 votes vote down vote up
def _calc(self, output: Tensor, target: Tensor):
        """
        Calculate metric by output from model and target. Method for internal use

        :param output: output from model
        :param target: ground truth
        """
        self._values = np.append(self._values, self.calc(output, target)) 
Example 41
Project: neural-pipeline   Author: toodef   File: train_config.py    MIT License 5 votes vote down vote up
def add(self, item: AbstractMetric or 'MetricsGroup') -> 'MetricsGroup':
        """
        Add :class:`AbstractMetric` or :class:`MetricsGroup`

        :param item: object to add
        :return: self object
        :rtype: :class:`MetricsGroup`
        """
        if isinstance(item, type(self)):
            item._set_level(self.__lvl + 1)
            self.__metrics_groups.append(item)
        else:
            self.__metrics.append(item)
        return self 
Example 42
Project: neural-pipeline   Author: toodef   File: train_config.py    MIT License 5 votes vote down vote up
def add_metric(self, metric: AbstractMetric) -> AbstractMetric:
        """
        Add :class:`AbstractMetric` object

        :param metric: metric to add
        :return: metric object
        :rtype: :class:`AbstractMetric`
        """
        self._metrics.append(metric)
        return metric 
Example 43
Project: neural-pipeline   Author: toodef   File: train_config.py    MIT License 5 votes vote down vote up
def add_metrics_group(self, group: MetricsGroup) -> MetricsGroup:
        """
        Add :class:`MetricsGroup` object

        :param group: metrics group to add
        :return: metrics group object
        :rtype: :class:`MetricsGroup`
        """
        self._metrics_groups.append(group)
        return group 
Example 44
Project: neural-pipeline   Author: toodef   File: train_config.py    MIT License 5 votes vote down vote up
def _process_batch(self, batch, data_processor: TrainDataProcessor):
        cur_loss = data_processor.process_batch(batch, metrics_processor=self.metrics_processor(), is_train=self._is_train)
        if self._losses is None:
            self._losses = cur_loss
        else:
            self._losses = np.append(self._losses, cur_loss) 
Example 45
Project: core   Author: lifemapper   File: matrix.py    GNU General Public License v3.0 5 votes vote down vote up
def load_new(cls, filename):
        """
        @summary: Attempt to load a Matrix object from a file
        @param fn: File location of a stored matrix
        """
        headerLines = []
        dataLines = []
        doHeaders = True
        with open(filename) as inF:
            for line in inF:
                if doHeaders:
                    if line.startswith(DATA_KEY):
                        doHeaders = False
                    else:
                        headerLines.append(line)
                else:
                    dataLines.append(line)
        s = StringIO()
        for line in dataLines:
            s.write(line)
        s.seek(0)
        
        myObj = json.loads(''.join(headerLines))
            
        headers = myObj[HEADERS_KEY]
        # Load returns a tuple if compressed
        tmp = np.load(s)
        if isinstance(tmp, np.ndarray):
            data = tmp
        else:
            data = tmp.items()[0][1]
        return cls(data, headers=headers)
    
    # ........................... 
Example 46
Project: core   Author: lifemapper   File: matrix.py    GNU General Public License v3.0 5 votes vote down vote up
def concatenate(cls, mtxList, axis=0):
        """
        @summary: Concatenates multiple Matrix objects together to form a new 
                         Matrix object
        @param mtxList: A List of Matrix objects to concatenate together
        @param axis: The axis to concatenate these Matrix objects on
        @note: Assumes that headers for other axes are the same
        """
        mtxObjs = []
        axisHeaders = []
        for mtx in mtxList:
            if not isinstance(mtx, Matrix):
                mtx = Matrix(mtx)
            if mtx.data is not None:
                # Make sure we reshape if necessary if adding new axis (stacking)
                if mtx.data.ndim < axis + 1: # Add 1 since zero-based
                    newShape = list(mtx.data.shape) + [1]
                    mtx.data = mtx.data.reshape(newShape)
                    mtx.setHeaders([''], axis=str(axis))
                
                h = mtx.getHeaders(axis=str(axis))
                if h is None:
                    h = ['']
                axisHeaders.extend(h)
                #axisHeaders.extend(mtx.getHeaders(axis=str(axis)))
                mtxObjs.append(mtx.data)
            
        # Create a new data matrix
        newData = np.concatenate(mtxObjs, axis=axis)
        # Use the first Matrix's headers as the base
        newHeaders = mtxList[0].getHeaders()
        # Replace the axis of headers with the concatenated version
        newHeaders[str(axis)] = axisHeaders
        return cls(newData, headers=newHeaders)
    
    # ........................... 
Example 47
Project: core   Author: lifemapper   File: matrix.py    GNU General Public License v3.0 5 votes vote down vote up
def append(self, mtx, axis=0):
        """
        @summary: Appends the provided Matrix object to this one
        @param mtx: The Matrix object to append to this one
        @param axis: The axis to append this matrix on
        @note: Only keeps the headers for the append axis, assumes the other 
                     axes are the same
        """
        self.data = np.append(self.data, mtx, axis=axis)
        self.headers[str(axis)].append(mtx.getHeaders(axis=axis))
    
    # ........................... 
Example 48
Project: DJFeet   Author: libre-man   File: test_transitioners.py    MIT License 5 votes vote down vote up
def test_merge_sample(inf_jukebox_transitioner, random_song_files, monkeypatch,
                      same):
    mocking_append = MockingFunction(func=numpy.append)
    monkeypatch.setattr(numpy, 'append', mocking_append)
    song1 = Song(random_song_files[0])
    if same:
        song2 = song1
    else:
        song2 = Song(random_song_files[1])
    res, time_delta = inf_jukebox_transitioner.merge(song1, song2)
    assert (time_delta == inf_jukebox_transitioner.segment_size) == same
    assert mocking_append.called != same
    assert abs(
        librosa.core.get_duration(res, song1.sampling_rate) -
        inf_jukebox_transitioner.segment_size) < 0.0001 
Example 49
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_crop():
    for ndim in range(1, 6):
        for t in range(5):
            dims = []
            begin = []
            end = []
            idx = []
            for i in range(ndim):
                d = random.randint(1, 5)
                b = random.randint(0, d-1)
                e = random.randint(b+1, d)
                if b == 0 and random.randint(0, 1):
                    b = None
                elif b != 0 and random.randint(0, 1):
                    b -= d
                if e == d and random.randint(0, 1):
                    e = None
                elif e != d and random.randint(0, 1):
                    e -= d
                dims.append(d)
                begin.append(b)
                end.append(e)
                idx.append(slice(b, e))
            x = mx.nd.array(np.random.normal(size=dims))
            y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
            assert_allclose(x.asnumpy()[idx], y.asnumpy())

            vx = mx.sym.Variable('x')
            vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
            check_numeric_gradient(vy, [x.asnumpy()]) 
Example 50
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_slice_axis():
    for ndim in range(1, 6):
        shape = np.random.randint(1, 11, size=(ndim,))
        for t in range(ndim):
            d = shape[t]
            b = random.randint(0, d-1)
            e = random.randint(b+1, d)
            if np.random.rand() > 0.6:
                e = None
            else:
                if e < d and np.random.rand() > 0.5:
                    e = e - d
            if np.random.rand() > 0.5:
                b = b - d
            idx = []
            for i in range(ndim):
                idx.append(slice(0, shape[i]))
            idx[t] = slice(b, e)

            X = mx.symbol.Variable('X')
            x = mx.nd.array(np.random.normal(size=shape))
            Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)

            xgrad = mx.nd.empty(x.shape)
            exec1 = Y.bind(default_context(), args = [x], args_grad = {'X': xgrad})
            exec1.forward(is_train=True)
            y = exec1.outputs[0]
            assert_allclose(x.asnumpy()[idx], y.asnumpy())
            exec1.backward([y])
            xx = x.asnumpy()
            xx[:] = 0.0
            xx[idx] = x.asnumpy()[idx]
            assert_allclose(xx, xgrad.asnumpy())
            x_grad_npy = np.random.normal(size=x.shape)
            xgrad = mx.nd.array(x_grad_npy)
            exec2 = Y.bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
            exec2.forward(is_train=True)
            exec2.backward([exec2.outputs[0]])
            xx = np.zeros(shape=x.shape, dtype=np.float32)
            xx[idx] = x.asnumpy()[idx]
            assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5) 
Example 51
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_slice_like():
    for ndim in range(1, 6):
        from_shape = np.random.randint(1, 11, size=(ndim,))
        shape = [s + np.random.randint(0, 3) for s in from_shape]
        for t in range(ndim):
            if t > 0:
                axes = np.random.randint(0, ndim, size=t).tolist()
            else:
                axes = []
            idx = []
            for i in range(ndim):
                idx.append(slice(0, shape[i]))
                if i in axes or not axes:
                    idx[i] = slice(0, from_shape[i])

            if axes:
                pos = np.random.randint(0, t)
                if axes[pos] > 0:
                    axes[pos] -= ndim  # negative index

            X = mx.symbol.Variable('X')
            X_1 = mx.symbol.Variable('X1')
            x = mx.nd.array(np.random.normal(size=shape))
            x1 = mx.nd.array(np.random.normal(size=from_shape))
            Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)

            xgrad = mx.nd.empty(x.shape)
            xgrad1 = mx.nd.empty(x1.shape)
            exec1 = Y.bind(default_context(), args = [x, x1],
                           args_grad = {'X': xgrad, 'X1': xgrad1})
            exec1.forward(is_train=True)
            y = exec1.outputs[0]
            assert_allclose(x.asnumpy()[idx], y.asnumpy())
            exec1.backward([y])
            xx = x.asnumpy()
            xx[:] = 0.0
            xx[idx] = x.asnumpy()[idx]
            assert_allclose(xx, xgrad.asnumpy())
            assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy()) 
Example 52
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_slice():
    def test_slice_forward_backward(a, index):
        a_np = a.asnumpy()
        begin = []
        end = []
        step = []
        for slice_i in index:
            begin.append(slice_i.start)
            end.append(slice_i.stop)
            step.append(slice_i.step)
        b = mx.nd.slice(a, begin=begin, end=end, step=step)
        b_np = a_np[index]
        assert same(b.asnumpy(), b_np)

        data = mx.sym.Variable('data')
        slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
        expected_in_grad = np.zeros_like(a_np)
        expected_in_grad[index] = b_np
        check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])

    shape = (16, 14, 17, 20)
    arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
    index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
                  (slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
                  (slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
                  (slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
                  (slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
    for index in index_list:
        test_slice_forward_backward(arr, index)

    # check numeric gradient
    in_data = np.arange(36).reshape(2, 2, 3, 3)
    data = mx.sym.Variable('data')
    slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
    check_numeric_gradient(slice_sym, [in_data]) 
Example 53
Project: smach_based_introspection_framework   Author: birlrobotics   File: generate_synthetic_data.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def run(df, csv_save_path):
    num_data = 5
    dis_thresthod = 2.0
    traj = df.values
    interested_data_fields = df.columns.values
    N,D = traj.shape
    A   = np.eye(N)
    x   = np.eye(N, k=-1)*-2.0
    A   = A + x
    x   = np.eye(N, k=-2) 
    A   = A + x
    _row = np.append([np.zeros(N-2)],[1, -2])
    A = np.vstack([A, _row])
    _row = np.append([np.zeros(N-1)],[1])
    A = np.vstack([A, _row])
    R_1 = np.linalg.inv(np.dot(A.T, A))
    y = np.amax(R_1, axis=0)
    y = np.matlib.repmat(y, N, 1)
    M = np.divide(R_1, y) * (1.0/N)

    traj_results = []
    synthetic_data = []
    for ind_D in range(num_data):
        theta = traj
        theta_k = np.random.multivariate_normal(np.zeros(N), R_1, D).T
        test_traj = theta + theta_k
        while fastdtw(test_traj, theta, dist=lambda x, y: np.linalg.norm(x - y, ord=1)) > dis_thresthod:
            print('.\n')
            theta_k = np.dot(M, theta_k)
            test_traj = theta + theta_k
        synthetic_traj = theta + theta_k 
Example 54
Project: smach_based_introspection_framework   Author: birlrobotics   File: log_likelihood_incremental_calculator.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def add_one_sample_and_get_loglik(self, sample):
        framelogprob = self.model._compute_log_likelihood(sample) 
        if self.fwdlattice is None:
            self.fwdlattice = np.zeros((1, self.n_components))
            for i in range(self.n_components):
                self.fwdlattice[0, i] = self.log_startprob[i] + framelogprob[0, i]
        else:
            self.fwdlattice = np.append(self.fwdlattice, np.zeros((1, self.n_components)), axis=0)
            for j in range(self.n_components):
                for i in range(self.n_components):
                    self.work_buffer[i] = self.fwdlattice[-2, i] + self.log_transmat[i, j]

                self.fwdlattice[-1, j] = logsumexp(self.work_buffer) + framelogprob[0, j]
        
        return logsumexp(self.fwdlattice[-1]) 
Example 55
Project: smach_based_introspection_framework   Author: birlrobotics   File: log_likelihood_incremental_calculator.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def add_one_sample_and_get_loglik(self, sample):
        self.samples.append(sample)
        return self.model.score(np.concatenate(self.samples, axis=0)) 
Example 56
Project: DiscEvolution   Author: rbooth200   File: run_model.py    GNU General Public License v3.0 5 votes vote down vote up
def setup_output(model):
    
    out = model['output']

    # Setup of the output controller
    output_times = np.arange(out['first'], out['last'], out['interval'])
    if not np.allclose(out['last'], output_times[-1], 1e-12):
        output_times = np.append(output_times, out['last'])

    output_times *= yr

    if out['plot']:
        plot = np.array(out["plot_times"]) * yr
    else:
        plot = []

    EC = Event_Controller(save=output_times, plot=plot)
    
    # Base string for output:
    mkdir_p(out['directory'])
    base_name = os.path.join(out['directory'], out['base'] + '_{:04d}')

    format = out['format']
    if format.lower() == 'hdf5':
        base_name += '.h5'
    elif format.lower() == 'ascii':
        base_name += '.dat'
    else:
        raise ValueError ("Output format {} not recognized".format(format))

    return base_name, EC 
Example 57
Project: oceanmapper   Author: vtamsitt   File: oceanmapper.py    Apache License 2.0 4 votes vote down vote up
def topography3d(mode,topo_x=None,topo_y=None,topo_z=None,topo_limits=None,zscale=500.,topo_vmin=None,topo_vmax=None,topo_cmap='bone',topo_cmap_reverse=False,land_constant=False,land_color=(0.7,0.7,0.7),set_view=None):
    """
    mode: string; coordinate system of 3D projection. Options are 'rectangle' (default), 'sphere' or 'cylinder'
    topo: array_like, optional; input topography file, default is etopo 30 
##TODO: need to define sign of topography 
    topo_limits: array_like, optional; longitude and latitude limits for 3d topography plot [lon min, lon max, lat min, lat max], longitudes range -180 to 180, latitude -90 to 90, default is entire globe
    zscale: scalar, optional; change vertical scaling for plotting, such that the vertical axis is scaled as topo_z/zscale (assumes topo_z units are m); default zscale is 500
    topo_cmap: string, optional; set colormap for topography, default is bone 
    topo_cmap_reverse: string, optional; reverse topography colormap, default is false
    land_constant: string optional; if True, land is set to one colour, default is False
    land_color: color, optional; RGB triplet specifying land colour, defauly is gret
    set_view: array_like, optional; set the mayavi camera angle with input [azimuth, elevation, distance, focal point], default is None 
    """
        
    #load topo data
    if topo_x is not None and topo_y is not None and topo_z is not None:
        xraw = topo_x
        yraw = topo_y
        zraw = topo_z

    else:
        tfile = np.load('etopo1_30min.npz')
        xraw = tfile['x']
        yraw = tfile['y']
        zraw = np.swapaxes(tfile['z'][:,:],0,1)
    

    #create coordinate variables
    phi = (yraw[:]*np.pi*2)/360.+np.pi/2.
    theta = (xraw[:]*np.pi*2)/360.
    c = zraw
    theta=np.append(theta,theta[0])
    c = np.concatenate((c,np.expand_dims(c[0,:],axis=0)),axis=0)

    if topo_vmin is None:
        tvmin = 0
    else:
        tvmin = topo_vmin
    if topo_vmax is None:
        tvmax = 7000
    else:
        tvmax = topo_vmax
   
    if topo_limits is not None:
        phi_1 = topo_limits[2]
        phi_2 = topo_limits[3]
        theta_1 = topo_limits[0]
        theta_2 = topo_limits[1]

        phi_ind1 = np.argmin(np.abs(yraw-phi_1))
        phi_ind2 = np.argmin(np.abs(yraw-phi_2))
        theta_ind1 = np.argmin(np.abs(xraw-theta_1))
        theta_ind2 = np.argmin(np.abs(xraw-theta_2))

        #restrict topo extent
        phi=phi[phi_ind1:phi_ind2]
        theta=theta[theta_ind1:theta_ind2]
        c = c[theta_ind1:theta_ind2:,phi_ind1:phi_ind2] 
Example 58
Project: meta-transfer-learning   Author: erfaneshrati   File: reptile.py    MIT License 4 votes vote down vote up
def train_step(self,
                   dataset,
                   input_ph,
                   label_ph,
                   minimize_op,
                   num_classes,
                   num_shots,
                   inner_batch_size,
                   inner_iters,
                   replacement,
                   meta_step_size,
                   meta_batch_size):
        """
        Perform a Reptile training step.

        Args:
          dataset: a sequence of data classes, where each data
            class has a sample(n) method.
          input_ph: placeholder for a batch of samples.
          label_ph: placeholder for a batch of labels.
          minimize_op: TensorFlow Op to minimize a loss on the
            batch specified by input_ph and label_ph.
          num_classes: number of data classes to sample.
          num_shots: number of examples per data class.
          inner_batch_size: batch size for every inner-loop
            training iteration.
          inner_iters: number of inner-loop iterations.
          replacement: sample with replacement.
          meta_step_size: interpolation coefficient.
          meta_batch_size: how many inner-loops to run.
        """
        old_vars = self._model_state.export_variables()
        new_vars = []
        for _ in range(meta_batch_size):
            mini_dataset = _sample_mini_dataset(dataset, num_classes, num_shots)
            for batch in _mini_batches(mini_dataset, inner_batch_size, inner_iters, replacement):
                inputs, labels = zip(*batch)
                if self._pre_step_op:
                    self.session.run(self._pre_step_op)
                self.session.run(minimize_op, feed_dict={input_ph: inputs, label_ph: labels})
            new_vars.append(self._model_state.export_variables())
            self._model_state.import_variables(old_vars)
        new_vars = average_vars(new_vars)
        self._model_state.import_variables(interpolate_vars(old_vars, new_vars, meta_step_size))
    # pylint: disable=R0913,R0914 
Example 59
Project: PyVolRender   Author: chapering   File: imgVolRender.py    GNU General Public License v2.0 4 votes vote down vote up
def LoadVolumeData(self, data_set):
		if data_set.endswith('nii') or data_set.endswith('nii.gz'):
			try:
				from nifti import NiftiImage
			except ImportError:
				print "Apparently you don't have PyNIfTI installed, see http://www.siafoo.net/snippet/310 for instructions"
				exit(1)
			nim = NiftiImage(data_set)

			img_data = nim.data
			img_header = nim.header
			print nim.data.shape

			if len(nim.data.shape) == 3: # single volume
				pass
			elif len(nim.data.shape) == 4: # multiple volume
				alldata = numpy.array(nim.data[0])
				print alldata.shape
				for i in range(1, len(nim.data)):
					#self.addVol( nim.data[i] )
					#alldata = numpy.append( alldata, nim.data[i], axis=0)
					alldata = numpy.add( alldata, nim.data[i])
				print alldata.shape
				img_data = alldata
			elif len(nim.data.shape) == 5: # tensor field volume
				alldata = numpy.array(nim.data[0][0])
				print alldata.shape
				for i in range(1, len(nim.data)):
					#self.addVol( nim.data[i] )
					#alldata = numpy.append( alldata, nim.data[i][0], axis=0)
					alldata = numpy.add( alldata, nim.data[i][0])
				print alldata.shape
				img_data = alldata

		elif data_set.endswith('hdr'):
			# Use the header to figure out the shape of the data
			# then load the raw data and reshape the array 
			shape = [int(x) for x in open(data_set).readline().split()]
			img_data = numpy.frombuffer(open(data_set.replace('.hdr', '.dat'), 'rb').read(),
									numpy.uint8)\
					.reshape((shape[2], shape[1], shape[0]))
                        
		self.addVol( img_data, img_header )
		return 0 
Example 60
Project: PyVolRender   Author: chapering   File: imgVolRender_GPU.py    GNU General Public License v2.0 4 votes vote down vote up
def LoadVolumeData(self, data_set):
		if data_set.endswith('nii') or data_set.endswith('nii.gz'):
			try:
				from nifti import NiftiImage
			except ImportError:
				print "Apparently you don't have PyNIfTI installed, see http://www.siafoo.net/snippet/310 for instructions"
				exit(1)
			nim = NiftiImage(data_set)

			img_data = nim.data
			img_header = nim.header
			print nim.data.shape

			if len(nim.data.shape) == 3: # single volume
				pass
			elif len(nim.data.shape) == 4: # multiple volume
				alldata = numpy.array(nim.data[0])
				print alldata.shape
				for i in range(1, len(nim.data)):
					#self.addVol( nim.data[i] )
					#alldata = numpy.append( alldata, nim.data[i], axis=0)
					alldata = numpy.add( alldata, nim.data[i])
				print alldata.shape
				img_data = alldata
			elif len(nim.data.shape) == 5: # tensor field volume
				alldata = numpy.array(nim.data[0][0])
				print alldata.shape
				for i in range(1, len(nim.data)):
					#self.addVol( nim.data[i] )
					#alldata = numpy.append( alldata, nim.data[i][0], axis=0)
					alldata = numpy.add( alldata, nim.data[i][0])
				print alldata.shape
				img_data = alldata

		elif data_set.endswith('hdr'):
			# Use the header to figure out the shape of the data
			# then load the raw data and reshape the array 
			shape = [int(x) for x in open(data_set).readline().split()]
			img_data = numpy.frombuffer(open(data_set.replace('.hdr', '.dat'), 'rb').read(),
									numpy.uint8)\
					.reshape((shape[2], shape[1], shape[0]))
                        
		self.addVol( img_data, img_header )
		return 0 
Example 61
Project: PyVolRender   Author: chapering   File: imgVolRender_fixedpoint.py    GNU General Public License v2.0 4 votes vote down vote up
def LoadVolumeData(self, data_set):
		if data_set.endswith('nii') or data_set.endswith('nii.gz'):
			try:
				from nifti import NiftiImage
			except ImportError:
				print "Apparently you don't have PyNIfTI installed, see http://www.siafoo.net/snippet/310 for instructions"
				exit(1)
			nim = NiftiImage(data_set)

			img_data = nim.data
			img_header = nim.header
			print nim.data.shape

			if len(nim.data.shape) == 3: # single volume
				pass
			elif len(nim.data.shape) == 4: # multiple volume
				alldata = numpy.array(nim.data[0])
				print alldata.shape
				for i in range(1, len(nim.data)):
					#self.addVol( nim.data[i] )
					#alldata = numpy.append( alldata, nim.data[i], axis=0)
					alldata = numpy.add( alldata, nim.data[i])
				print alldata.shape
				img_data = alldata
			elif len(nim.data.shape) == 5: # tensor field volume
				alldata = numpy.array(nim.data[0][0])
				print alldata.shape
				for i in range(1, len(nim.data)):
					#self.addVol( nim.data[i] )
					#alldata = numpy.append( alldata, nim.data[i][0], axis=0)
					alldata = numpy.add( alldata, nim.data[i][0])
				print alldata.shape
				img_data = alldata

		elif data_set.endswith('hdr'):
			# Use the header to figure out the shape of the data
			# then load the raw data and reshape the array 
			shape = [int(x) for x in open(data_set).readline().split()]
			img_data = numpy.frombuffer(open(data_set.replace('.hdr', '.dat'), 'rb').read(),
									numpy.uint8)\
					.reshape((shape[2], shape[1], shape[0]))
                        
		self.addVol( img_data, img_header )
		return 0 
Example 62
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: proposal_target_layer.py    MIT License 4 votes vote down vote up
def _sample_rois(all_rois, all_scores, gt_boxes, fg_rois_per_image, rois_per_image, num_classes):
    """Generate a random sample of RoIs comprising foreground and background
    examples.
    """
    # overlaps: (rois x gt_boxes)
    overlaps = bbox_overlaps(
        np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),
        np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))
    gt_assignment = overlaps.argmax(axis=1)
    max_overlaps = overlaps.max(axis=1)
    labels = gt_boxes[gt_assignment, 4]

    # Select foreground RoIs as those with >= FG_THRESH overlap
    fg_inds = np.where(max_overlaps >= cfg.FLAGS.roi_fg_threshold)[0]
    # Guard against the case when an image has fewer than fg_rois_per_image
    # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
    bg_inds = np.where((max_overlaps < cfg.FLAGS.roi_bg_threshold_high) &
                       (max_overlaps >= cfg.FLAGS.roi_bg_threshold_low))[0]

    # Small modification to the original version where we ensure a fixed number of regions are sampled
    if fg_inds.size > 0 and bg_inds.size > 0:
        fg_rois_per_image = min(fg_rois_per_image, fg_inds.size)
        fg_inds = npr.choice(fg_inds, size=int(fg_rois_per_image), replace=False)
        bg_rois_per_image = rois_per_image - fg_rois_per_image
        to_replace = bg_inds.size < bg_rois_per_image
        bg_inds = npr.choice(bg_inds, size=int(bg_rois_per_image), replace=to_replace)
    elif fg_inds.size > 0:
        to_replace = fg_inds.size < rois_per_image
        fg_inds = npr.choice(fg_inds, size=int(rois_per_image), replace=to_replace)
        fg_rois_per_image = rois_per_image
    elif bg_inds.size > 0:
        to_replace = bg_inds.size < rois_per_image
        bg_inds = npr.choice(bg_inds, size=int(rois_per_image), replace=to_replace)
        fg_rois_per_image = 0
    else:
        import pdb
        pdb.set_trace()

    # The indices that we're selecting (both fg and bg)
    keep_inds = np.append(fg_inds, bg_inds)
    # Select sampled values from various arrays:
    labels = labels[keep_inds]
    # Clamp labels for the background RoIs to 0
    labels[int(fg_rois_per_image):] = 0
    rois = all_rois[keep_inds]
    roi_scores = all_scores[keep_inds]

    bbox_target_data = _compute_targets(
        rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels)

    bbox_targets, bbox_inside_weights = \
        _get_bbox_regression_labels(bbox_target_data, num_classes)

    return labels, rois, roi_scores, bbox_targets, bbox_inside_weights 
Example 63
Project: Tacotron   Author: ElwynWang   File: signal_process.py    GNU General Public License v3.0 4 votes vote down vote up
def get_spectrograms(fpath):
    '''Returns normalized log(melspectrogram) and log(magnitude) from `sound_file`.
    Args:
      sound_file: A string. The full path of a sound file.

    Returns:
      mel: A 2d array of shape (T, n_mels) <- Transposed
      mag: A 2d array of shape (T, 1+n_fft/2) <- Transposed
    '''
    # Loading sound file
    y, sr = librosa.load(fpath, sr=Hp.sample_rate)

    # Trimming
    y, _ = librosa.effects.trim(y)

    # Preemphasis
    y = np.append(y[0], y[1:] - Hp.preemphasis * y[:-1])

    # stft
    linear = librosa.stft(y=y,
                          n_fft=Hp.num_fft,
                          hop_length=Hp.hop_length,
                          win_length=Hp.win_length)

    # magnitude spectrogram
    mag = np.abs(linear)  # (1+n_fft//2, T)

    # mel spectrogram
    mel_basis = librosa.filters.mel(Hp.sample_rate, Hp.num_fft, Hp.num_mels)  # (n_mels, 1+n_fft//2)
    mel = np.dot(mel_basis, mag)  # (n_mels, t)

    # to decibel
    mel = 20 * np.log10(np.maximum(1e-5, mel))
    mag = 20 * np.log10(np.maximum(1e-5, mag))

    # normalize
    mel = np.clip((mel - Hp.ref_db + Hp.max_db) / Hp.max_db, 1e-8, 1)
    mag = np.clip((mag - Hp.ref_db + Hp.max_db) / Hp.max_db, 1e-8, 1)

    # Transpose
    mel = mel.T.astype(np.float32)  # (T, n_mels)
    mag = mag.T.astype(np.float32)  # (T, 1+n_fft//2)

    return mel, mag 
Example 64
Project: dc_tts   Author: Kyubyong   File: utils.py    Apache License 2.0 4 votes vote down vote up
def get_spectrograms(fpath):
    '''Parse the wave file in `fpath` and
    Returns normalized melspectrogram and linear spectrogram.

    Args:
      fpath: A string. The full path of a sound file.

    Returns:
      mel: A 2d array of shape (T, n_mels) and dtype of float32.
      mag: A 2d array of shape (T, 1+n_fft/2) and dtype of float32.
    '''
    # Loading sound file
    y, sr = librosa.load(fpath, sr=hp.sr)

    # Trimming
    y, _ = librosa.effects.trim(y)

    # Preemphasis
    y = np.append(y[0], y[1:] - hp.preemphasis * y[:-1])

    # stft
    linear = librosa.stft(y=y,
                          n_fft=hp.n_fft,
                          hop_length=hp.hop_length,
                          win_length=hp.win_length)

    # magnitude spectrogram
    mag = np.abs(linear)  # (1+n_fft//2, T)

    # mel spectrogram
    mel_basis = librosa.filters.mel(hp.sr, hp.n_fft, hp.n_mels)  # (n_mels, 1+n_fft//2)
    mel = np.dot(mel_basis, mag)  # (n_mels, t)

    # to decibel
    mel = 20 * np.log10(np.maximum(1e-5, mel))
    mag = 20 * np.log10(np.maximum(1e-5, mag))

    # normalize
    mel = np.clip((mel - hp.ref_db + hp.max_db) / hp.max_db, 1e-8, 1)
    mag = np.clip((mag - hp.ref_db + hp.max_db) / hp.max_db, 1e-8, 1)

    # Transpose
    mel = mel.T.astype(np.float32)  # (T, n_mels)
    mag = mag.T.astype(np.float32)  # (T, 1+n_fft//2)

    return mel, mag 
Example 65
Project: models   Author: kipoi   File: dataloader.py    MIT License 4 votes vote down vote up
def __getitem__(self, idx):
        if self.fasta_extractor is None:
            # Fasta
            self.fasta_extractor = FastaExtractor(self.fasta_file)
            # DNase
            self.dnase_extractor = BigwigExtractor(self.dnase_file)
            self.mappability_extractor = BigwigExtractor(self.mappability_file)

        # Get the interval
        interval = self.bt[idx]
        if interval.stop - interval.start != self.SEQ_WIDTH:
            center = (interval.start + interval.stop) // 2
            interval.start = center - self.SEQ_WIDTH // 2
            interval.end = center + self.SEQ_WIDTH // 2 + self.SEQ_WIDTH % 2
        # Get the gencode features
        gencode_counts = np.array([v[idx].count for k, v in self.overlap_beds],
                                  dtype=bool)

        # Run the fasta extractor
        seq = np.squeeze(self.fasta_extractor([interval]), axis=0)
        seq_rc = seq[::-1, ::-1]

        # Dnase
        dnase = np.squeeze(self.dnase_extractor([interval], axis=0))[:, np.newaxis]
        dnase[np.isnan(dnase)] = 0  # NA fill
        dnase_rc = dnase[::-1]

        bigwig_list = [seq]
        bigwig_rc_list = [seq_rc]
        mappability = np.squeeze(self.mappability_extractor([interval], axis=0))[:, np.newaxis]
        mappability[np.isnan(mappability)] = 0  # NA fill
        mappability_rc = mappability[::-1]
        bigwig_list.append(mappability)
        bigwig_rc_list.append(mappability_rc)
        bigwig_list.append(dnase)
        bigwig_rc_list.append(dnase_rc)

        ranges = GenomicRanges.from_interval(interval)
        ranges_rc = GenomicRanges.from_interval(interval)
        ranges_rc.strand = "-"

        return {
            "inputs": [
                np.concatenate(bigwig_list, axis=-1),  # stack along the last axis
                np.concatenate(bigwig_rc_list, axis=-1),  # RC version
                np.append(self.meta_feat, gencode_counts)
            ],
            "targets": {},  # No Targets
            "metadata": {
                "ranges": ranges,
                "ranges_rc": ranges_rc
            }
        } 
Example 66
Project: skylab   Author: coenders   File: basellh.py    GNU General Public License v3.0 4 votes vote down vote up
def _active_region(self, src_ra, src_dec, ts, beta, inj, n_iter, trials,
                       logger, **kwargs):
        if len(trials) > 0:
            n_inj = int(np.mean(trials["n_inj"]))
        else:
            n_inj = 0

        logger.info("Quick estimate of active region...")
        logger.info("Start with {0:d} events.".format(n_inj + 1))

        stop = False
        while not stop:
            n_inj, inject = inj.sample(
                src_ra, n_inj + 1, poisson=False).next()

            fmin, pbest = self.fit_source(
                src_ra, src_dec, scramble=True, inject=inject)

            trial = np.empty((1, ), dtype=trials.dtype)
            trial["n_inj"] = n_inj
            trial["TS"] = fmin

            for key in self.params:
                trial[key] = pbest[key]

            trials = np.append(trials, trial)

            mts = np.bincount(trials["n_inj"], weights=trials["TS"])
            mw = np.bincount(trials["n_inj"])
            mts[mw > 0] /= mw[mw > 0]

            residuals = mts - ts

            stop = (
                np.count_nonzero(residuals > 0.)/len(residuals) > beta or
                np.all(residuals > 0.)
                )

        mu = len(mts) * beta
        logger.info("Active region: mu = {0:.1f}".format(mu))

        # Do trials around active region.
        trials = np.append(
            trials, self.do_trials(
                src_ra, src_dec, n_iter,
                mu=inj.sample(src_ra, mu), **kwargs))

        return trials 
Example 67
Project: skylab   Author: coenders   File: psLLH.py    GNU General Public License v3.0 4 votes vote down vote up
def __str__(self):
        lines = [repr(self)]
        lines.append(67 * "-")
        lines.append("Number of Data Events: {0:7d}".format(self.exp.size))

        if self.exp.size > 0:
            srange = (
                np.rad2deg(np.arcsin(np.amin(self.exp["sinDec"]))),
                np.rad2deg(np.arcsin(np.amax(self.exp["sinDec"])))
                )

            if "logE" in self.exp.dtype.fields:
                erange = (
                    np.amin(self.exp["logE"]),
                    np.amax(self.exp["logE"])
                    )
        else:
            srange = (np.nan, np.nan)
            erange = (np.nan, np.nan)

        lines.append("\tDeclination Range  : {0:6.1f} - {1:6.1f} deg".format(
            *srange))
        lines.append("\tlog10 Energy Range : {0:6.1f} - {1:6.1f}".format(
            *erange))
        lines.append("\tLivetime of sample : {0:7.2f} days".format(
            self.livetime))

        lines.append(67 * "-")

        if self.mode == "all":
            lines.append("Using all events")
        else:
            lines.append("Selected Events - {0:8s}: {1:7d}".format(
                self.mode, self._nselected))

        lines.append(67 * "-")
        lines.append("Likelihood model:")

        lines.append("\n\t".join(
            l if len(set(l)) > 2 else l[:-len("\t".expandtabs())]
            for l in str(self.llh_model).splitlines()))

        lines.append("Fit Parameter\tSeed\tBounds")

        lines.extend(
            "{0:15s}\t{1:.2f}\t{2:.2f} to {3:.2f}".format(p, s, *b)
            for p, s, b in zip(self.params, self.par_seeds, self.par_bounds)
            )

        lines.append(67 * "-")

        return "\n".join(lines) 
Example 68
Project: skylab   Author: coenders   File: psLLH.py    GNU General Public License v3.0 4 votes vote down vote up
def llh(self, nsources, **others):
        SoB = self._signal / self._events["B"]

        weights, wgrad = self.llh_model.weight(self._events, **others)
        x = (SoB*weights - 1.) / self._nevents

        alpha = nsources * x
        ts = np.empty_like(alpha, dtype=np.float)

        # Taylor-expand likelihood function and gradients around threshold
        # alpha value in order to avoid divergences.
        aval = -1. + self._aval
        mask = alpha > aval

        ts[mask] = np.log1p(alpha[mask])

        arel = (alpha[~mask] - aval) / self._aval
        ts[~mask] = np.log1p(aval) + arel - arel**2 / 2.

        ts = ts.sum()

        nsgrad = np.empty_like(alpha, dtype=np.float)
        nsgrad[mask] = x[mask] / (1. + alpha[mask])
        nsgrad[~mask] = x[~mask] * (1. - arel) / self._aval
        nsgrad = nsgrad.sum()

        if self._nevents > self._nselected:
            ndiff = self._nevents - self._nselected
            ts += ndiff * np.log1p(-nsources / self._nevents)
            nsgrad -= ndiff / (self._nevents - nsources)

        if wgrad is not None:
            pgrad = SoB * wgrad / self._nevents
            pgrad[:, mask] *= nsources / (1. + alpha[mask])
            pgrad[:, ~mask] *= nsources * (1. - arel) / self._aval
            pgrad = pgrad.sum(axis=-1)

        else:
            pgrad = np.zeros((0,))

        # Multiply by two for chi-square distributed test-statistic.
        ts *= 2.
        grad = 2. * np.append(nsgrad, pgrad)

        return ts, grad 
Example 69
Project: skylab   Author: coenders   File: grbllh.py    GNU General Public License v3.0 4 votes vote down vote up
def _select_events(self, src_ra, src_dec, scramble=False, inject=None):
        r"""Select events for log-likelihood evaluation.

        If `scramble` is `True`, `nbackground` (plus Poisson
        fluctuations) events are selected from the off-source time
        range. Otherwise, the on-source events ``data["on"]`` are
        selected.

        Note
        ----
        In the current implementation, the selection depends only on the
        on-source time range. Hence, `src_ra` and `src_dec` are ignored.

        """
        # We will chose new events, so it is time to clean the likelihood
        # model's cache.
        self.llh_model.reset()

        if scramble:
            N = self.random.poisson(self.nbackground)

            if N > 0:
                self._events = self.random.choice(self.data["off"], N)
                self._events["ra"] = self.random.uniform(0., 2.*np.pi, N)
            else:
                self._events = np.empty(0, dtype=self.data["off"].dtype)
        else:
            self._events = self.data["on"]

        if inject is not None:
            remove = np.logical_or(
                inject["sinDec"] < self.llh_model.sinDec_range[0],
                inject["sinDec"] > self.llh_model.sinDec_range[-1])

            if np.any(remove):
                inject = inject[np.logical_not(remove)]

            inject = numpy.lib.recfunctions.append_fields(
                inject, names="B", data=self.llh_model.background(inject),
                usemask=False)

            self._events = np.append(
                self._events, inject[list(self._events.dtype.names)])

        self._signal = self.llh_model.signal(src_ra, src_dec, self._events)

        # Method has to set number of events and number of selected
        # events. Here, both numbers are equal.
        self._nevents = self._events.size
        self._nselected = self._nevents 
Example 70
Project: core   Author: lifemapper   File: matrix.py    GNU General Public License v3.0 4 votes vote down vote up
def flatten_2D(self):
        """
        @summary: Flattens a higher dimension Matrix object into a 2D matrix
        """
        flatMtx = self
        while flatMtx.data.ndim > 2:
            # More than two dimensions so we must flatten
            oldShape = flatMtx.data.shape
            oldNumRows = oldShape[0]
            newShape = tuple([oldShape[0]*oldShape[2], oldShape[1]] + list(oldShape[3:]))
            newMtx = Matrix(np.zeros(newShape))
            
            oldRH = flatMtx.getRowHeaders()
            newRH = []
            
            # Get old headers
            try:
                oldHeaders = flatMtx.getHeaders(axis=2)
            except KeyError:
                oldHeaders = [''] * oldShape[2]
                
                
            # Set data and headers
            for i in range(oldShape[2]):
                oh = oldHeaders[i]
                # Set data
                startRow = i * oldNumRows
                endRow = (i+1) * oldNumRows
                newMtx.data[startRow:endRow,:] = flatMtx.data[:,:,i]
                
                # Set row headers
                for rh in oldRH:
                    if not isinstance(rh, list):
                        rh = [rh]
                    newRH.append(rh+[oh])
            
            # Set the headers on the new matrix
            newMtx.setRowHeaders(newRH)
            newMtx.setColumnHeaders(flatMtx.getColumnHeaders())
            
            # Higher order headers
            for axis in flatMtx.headers.keys():
                if int(axis) > 2:
                    # Reduce the key of the axis by one and set headers on new matrix
                    newMtx.setHeaders(flatMtx.getHeaders(axis=axis), axis=str(int(axis) - 1))
            
            flatMtx = newMtx
        
        return flatMtx
    
    # ........................... 
Example 71
Project: fuku-ml   Author: fukuball   File: SupportVectorRegression.py    MIT License 4 votes vote down vote up
def train(self):

        if (self.status != 'init'):
            print("Please load train data and init W first.")
            return self.W

        self.status = 'train'

        original_X = self.train_X[:, 1:]

        K = utility.Kernel.kernel_matrix(self, original_X)

        # P = Q, q = p, G = -A, h = -c

        P = cvxopt.matrix(np.bmat([[K, -K], [-K, K]]))
        q = cvxopt.matrix(np.bmat([self.epsilon - self.train_Y, self.epsilon + self.train_Y]).reshape((-1, 1)))
        G = cvxopt.matrix(np.bmat([[-np.eye(2 * self.data_num)], [np.eye(2 * self.data_num)]]))
        h = cvxopt.matrix(np.bmat([[np.zeros((2 * self.data_num, 1))], [self.C * np.ones((2 * self.data_num, 1))]]))
        # A = cvxopt.matrix(np.append(np.ones(self.data_num), -1 * np.ones(self.data_num)), (1, 2*self.data_num))
        # b = cvxopt.matrix(0.0)
        cvxopt.solvers.options['show_progress'] = False
        solution = cvxopt.solvers.qp(P, q, G, h)

        # Lagrange multipliers
        alpha = np.array(solution['x']).reshape((2, -1))
        self.alpha_upper = alpha[0]
        self.alpha_lower = alpha[1]
        self.beta = self.alpha_upper - self.alpha_lower

        sv = abs(self.beta) > 1e-5
        self.sv_index = np.arange(len(self.beta))[sv]
        self.sv_beta = self.beta[sv]
        self.sv_X = original_X[sv]
        self.sv_Y = self.train_Y[sv]

        free_sv_upper = np.logical_and(self.alpha_upper > 1e-5, self.alpha_upper < self.C)
        self.free_sv_index_upper = np.arange(len(self.alpha_upper))[free_sv_upper]
        self.free_sv_alpha_upper = self.alpha_upper[free_sv_upper]
        self.free_sv_X_upper = original_X[free_sv_upper]
        self.free_sv_Y_upper = self.train_Y[free_sv_upper]

        free_sv_lower = np.logical_and(self.alpha_lower > 1e-5, self.alpha_lower < self.C)
        self.free_sv_index_lower = np.arange(len(self.alpha_lower))[free_sv_lower]
        self.free_sv_alpha_lower = self.alpha_lower[free_sv_lower]
        self.free_sv_X_lower = original_X[free_sv_lower]
        self.free_sv_Y_lower = self.train_Y[free_sv_lower]

        short_b_upper = self.free_sv_Y_upper[0] - np.sum(self.sv_beta * utility.Kernel.kernel_matrix_xX(self, self.free_sv_X_upper[0], self.sv_X)) - self.epsilon
        short_b_lower = self.free_sv_Y_lower[0] - np.sum(self.sv_beta * utility.Kernel.kernel_matrix_xX(self, self.free_sv_X_lower[0], self.sv_X)) + self.epsilon

        self.sv_avg_b = (short_b_upper + short_b_lower) / 2

        return self.W 
Example 72
Project: DJFeet   Author: libre-man   File: transitioners.py    MIT License 4 votes vote down vote up
def fade_frames(self, prev_song, prev_mid_sample, next_song,
                    next_mid_sample):
        """Create a transition between two songs given a matching beat.

        Use coarse fading to create a (smooth) transition between two songs
        given a beat that matches in both songs.

        :param dj_feet.song.Song prev_song: The song that is currently playing.
        :param int prev_mid_sample: The sample index of the prev_song that
                                    should be in the middle of the merge.
        :param dj_feet.song.Song next_song: The song to play after prev_song.
        :param int next_mid_sample: The sample index of the next_song that
                                    should be in the middle of the merge.
        :returns: A tuple contain respectively an audio array that is the fade
                  in and fade out from prev_song to next_song, the start sample
                  index used in the merge (inclusive) of prev_seg and the end
                  sample index used in the merge (inclusive) of next_seg.
        :returns: An tuple containing the created transition, the start time of
                  the transition and the end time.
        :rtype: tuple(numpy.array, int, int)
        """
        sample_offset = librosa.core.time_to_samples(
            [self.fade_time / 2], prev_song.sampling_rate)[0]
        prev_seg = np.array(prev_song.time_series[
            prev_mid_sample - sample_offset:prev_mid_sample + sample_offset])
        next_seg = np.array(next_song.time_series[
            next_mid_sample - sample_offset:next_mid_sample + sample_offset])

        final_seg = np.array([])
        delta = 1 / len(prev_seg)

        if len(prev_seg) != len(next_seg):
            l.critical("Segments are not of the same length during fading." +
                       " (%d and %d)" + "Next starts at %d and ends at %d",
                       len(prev_seg),
                       len(next_seg), next_mid_sample - sample_offset,
                       next_mid_sample + sample_offset)

        for p in range(0, len(prev_seg), self.fade_steps):
            end = min(p + self.fade_steps, len(prev_seg))
            final_seg = np.append(final_seg, prev_seg[p:end] * (1 - delta * (
                (end + p) / 2)))

        for n in range(0, len(next_seg), self.fade_steps):
            end = min(n + self.fade_steps, len(next_seg))
            final_seg[n:end] += next_seg[n:end] * (delta * ((end + n) / 2))

        return (final_seg, prev_mid_sample - sample_offset,
                next_mid_sample + sample_offset) 
Example 73
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: proposal_target.py    Apache License 2.0 4 votes vote down vote up
def sample_rois(rois, gt_boxes, num_classes, rois_per_image, fg_rois_per_image, fg_overlap, box_stds):
    """
    generate random sample of ROIs comprising foreground and background examples
    :param rois: [n, 5] (batch_index, x1, y1, x2, y2)
    :param gt_boxes: [n, 5] (x1, y1, x2, y2, cls)
    :param num_classes: number of classes
    :param rois_per_image: total roi number
    :param fg_rois_per_image: foreground roi number
    :param fg_overlap: overlap threshold for fg rois
    :param box_stds: std var of bbox reg
    :return: (rois, labels, bbox_targets, bbox_weights)
    """
    overlaps = bbox_overlaps(rois[:, 1:], gt_boxes[:, :4])
    gt_assignment = overlaps.argmax(axis=1)
    labels = gt_boxes[gt_assignment, 4]
    max_overlaps = overlaps.max(axis=1)

    # select foreground RoI with FG_THRESH overlap
    fg_indexes = np.where(max_overlaps >= fg_overlap)[0]
    # guard against the case when an image has fewer than fg_rois_per_image foreground RoIs
    fg_rois_this_image = min(fg_rois_per_image, len(fg_indexes))
    # sample foreground regions without replacement
    if len(fg_indexes) > fg_rois_this_image:
        fg_indexes = np.random.choice(fg_indexes, size=fg_rois_this_image, replace=False)

    # select background RoIs as those within [0, FG_THRESH)
    bg_indexes = np.where(max_overlaps < fg_overlap)[0]
    # compute number of background RoIs to take from this image (guarding against there being fewer than desired)
    bg_rois_this_image = rois_per_image - fg_rois_this_image
    bg_rois_this_image = min(bg_rois_this_image, len(bg_indexes))
    # sample bg rois without replacement
    if len(bg_indexes) > bg_rois_this_image:
        bg_indexes = np.random.choice(bg_indexes, size=bg_rois_this_image, replace=False)

    # indexes selected
    keep_indexes = np.append(fg_indexes, bg_indexes)
    # pad more bg rois to ensure a fixed minibatch size
    while len(keep_indexes) < rois_per_image:
        gap = min(len(bg_indexes), rois_per_image - len(keep_indexes))
        gap_indexes = np.random.choice(range(len(bg_indexes)), size=gap, replace=False)
        keep_indexes = np.append(keep_indexes, bg_indexes[gap_indexes])

    # sample rois and labels
    rois = rois[keep_indexes]
    labels = labels[keep_indexes]
    # set labels of bg rois to be 0
    labels[fg_rois_this_image:] = 0

    # load or compute bbox_target
    targets = bbox_transform(rois[:, 1:], gt_boxes[gt_assignment[keep_indexes], :4], box_stds=box_stds)
    bbox_targets = np.zeros((rois_per_image, 4 * num_classes), dtype=np.float32)
    bbox_weights = np.zeros((rois_per_image, 4 * num_classes), dtype=np.float32)
    for i in range(fg_rois_this_image):
        cls_ind = int(labels[i])
        bbox_targets[i, cls_ind * 4:(cls_ind + 1) * 4] = targets[i]
        bbox_weights[i, cls_ind * 4:(cls_ind + 1) * 4] = 1

    return rois, labels, bbox_targets, bbox_weights 
Example 74
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 4 votes vote down vote up
def test_concat():
    for dimension in range(4):
        n = 2
        merge = [2, 3, 4, 5, 6]
        a = 2
        b = 3
        c = 4
        # test  2D
        if dimension<2:
            for dim in range(2, 6):
                shapes = []
                for i in range(dim):
                    if dimension == 0:
                        shapes.append((merge[i], a))
                    elif dimension == 1:
                        shapes.append((a, merge[i]))
                    check_concat_with_shape(shapes,dimension,True)
                    check_concat_with_shape(shapes,dimension,False)
                    # Test negative dim
                    check_concat_with_shape(shapes, dimension - 2, True)
                    check_concat_with_shape(shapes, dimension - 2, False)

        #test 3D
        if dimension<3:
            for dim in range(2, 6):
                shapes = []
                for i in range(dim):
                    if dimension == 0:
                        shapes.append((merge[i], a,b))
                    elif dimension ==1:
                        shapes.append((a,merge[i],b))
                    elif dimension ==2:
                        shapes.append((a,b,merge[i]))
                check_concat_with_shape(shapes,dimension,True)
                check_concat_with_shape(shapes,dimension,False)
                # Test negative dim
                check_concat_with_shape(shapes, dimension - 3, True)
                check_concat_with_shape(shapes, dimension - 3, False)
        # test 4D
        for dim in range(2, 6):
            shapes = []
            for i in range(dim):
                if dimension == 0:
                    shapes.append((merge[i],a,b,c))
                elif dimension == 1:
                    shapes.append((a,merge[i],b,c))
                elif dimension ==2:
                    shapes.append((a,b,merge[i],c))
                elif dimension ==3:
                    shapes.append((a,b,c,merge[i]))
            check_concat_with_shape(shapes,dimension,True)
            check_concat_with_shape(shapes,dimension,False)
            # Test negative dim
            check_concat_with_shape(shapes, dimension - 4, True)
            check_concat_with_shape(shapes, dimension - 4, False) 
Example 75
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 4 votes vote down vote up
def test_op_output_names_monitor():
    def check_name(op_sym, expected_names):
        output_names = []

        def get_output_names_callback(name, arr):
            output_names.append(py_str(name))

        op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
        op_exe.set_monitor_callback(get_output_names_callback)
        op_exe.forward()
        for output_name, expected_name in zip(output_names, expected_names):
            assert output_name == expected_name

    data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
    conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
    check_name(conv_sym, ['conv_output'])

    deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
    check_name(deconv_sym, ['deconv_output'])

    fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
    check_name(fc_sym, ['fc_output'])

    lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
    check_name(lrn_sym, ['lrn_output', 'lrn_tmp_norm'])

    act_sym = mx.sym.Activation(data, act_type='relu', name='act')
    check_name(act_sym, ['act_output'])

    cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
    check_name(cc_sym, ['concat_output'])

    sm_sym = mx.sym.softmax(data, name='softmax')
    check_name(sm_sym, ['softmax_output'])

    sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
    check_name(sa_sym, ['softmax_output'])

    us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
                               name='upsampling')
    check_name(us_sym, ['upsampling_output'])

    us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
                            name='pooling')
    check_name(us_sym, ['pooling_output']) 
Example 76
Project: GST-Tacotron   Author: KinglittleQ   File: utils.py    MIT License 4 votes vote down vote up
def get_spectrograms(fpath):
    '''Returns normalized log(melspectrogram) and log(magnitude) from `sound_file`.
    Args:
      sound_file: A string. The full path of a sound file.
    Returns:
      mel: A 2d array of shape (T, n_mels) <- Transposed
      mag: A 2d array of shape (T, 1+n_fft/2) <- Transposed
    '''

    # Loading sound file
    y, sr = librosa.load(fpath, sr=hp.sr)

    # Trimming
    y, _ = librosa.effects.trim(y)

    # Preemphasis
    y = np.append(y[0], y[1:] - hp.preemphasis * y[:-1])

    # stft
    linear = librosa.stft(y=y,
                          n_fft=hp.n_fft,
                          hop_length=hp.hop_length,
                          win_length=hp.win_length)

    # magnitude spectrogram
    mag = np.abs(linear)  # (1+n_fft//2, T)

    # mel spectrogram
    mel_basis = librosa.filters.mel(hp.sr, hp.n_fft, hp.n_mels)  # (n_mels, 1+n_fft//2)
    mel = np.dot(mel_basis, mag)  # (n_mels, t)

    # to decibel
    mel = 20 * np.log10(np.maximum(1e-5, mel))
    mag = 20 * np.log10(np.maximum(1e-5, mag))

    # normalize
    mel = np.clip((mel - hp.ref_db + hp.max_db) / hp.max_db, 1e-8, 1)
    mag = np.clip((mag - hp.ref_db + hp.max_db) / hp.max_db, 1e-8, 1)

    # Transpose
    mel = mel.T.astype(np.float32)  # (T, n_mels)
    mag = mag.T.astype(np.float32)  # (T, 1+n_fft//2)

    return mel, mag 
Example 77
Project: numpynet   Author: uptake   File: examples.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def make_checkerboard_training_set(
    num_points=0, noise=0.0, randomize=True, x_min=0.0, x_max=1.0, y_min=0.0, y_max=1.0
):
    """
    Makes a binary array like a checkerboard (to work on an xor like problem)
    :param num_points: (int) The number of points you want in your training set
    :param noise: (float) percent to bit-flip in the training data, allows it to be imperfect
    :param randomize: (bool) True if you want the locations to be random, False if you want an ordered grid
    :param x_min: (float) minimum x of the 2D domain
    :param x_max: (float) maximum x of the 2D domain
    :param y_min: (float) minimum y of the 2D domain
    :param y_max: (float) maximum y of the 2D domain
    :return:
    """
    log.out.info("Generating target data.")
    # Select coordinates to do an XOR like operation on
    coords = []
    bools = []
    if randomize:
        for i in range(num_points):
            # Add num_points randomly
            coord_point = np.random.random(2)
            coord_point[0] = coord_point[0] * (x_max - x_min) + x_min
            coord_point[1] = coord_point[1] * (y_max - y_min) + y_min
            coords.append(coord_point)
    else:
        x_points = np.linspace(x_min, x_max, int(np.sqrt(num_points)))
        y_points = np.linspace(y_min, y_max, int(np.sqrt(num_points)))
        for i in range(int(np.sqrt(num_points))):
            for j in range(int(np.sqrt(num_points))):
                # Add num_points randomly
                coord_point = [x_points[i], y_points[j]]
                coords.append(coord_point)
    # Assign an xor boolean value to the coordinates
    for coord_point in coords:
        bool_point = np.array(
            [np.round(coord_point[0]) % 2, np.round(coord_point[1]) % 2]
        ).astype(bool)
        bools.append(np.logical_xor(bool_point[0], bool_point[1]))
    # If noisy then bit flip
    if noise > 0.0:
        for i in enumerate(bools):
            if np.random.random() < noise:
                bools[i] = np.logical_not(bools[i])
    # Build training vectors
    train_in = None
    train_out = None
    for i, coord in enumerate(coords):
        # Need to initialize the arrays
        if i == 0:
            train_in = np.array([coord])
            train_out = np.array([[bools[i]]])
        else:
            train_in = np.append(train_in, np.array([coord]), axis=0)
            train_out = np.append(train_out, np.array([[bools[i]]]), axis=1)

    train_out = train_out.T
    return train_in, train_out 
Example 78
Project: numpynet   Author: uptake   File: examples.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def make_smiley_training_set(num_points=0, delta=0.05):
    """
    Makes a binary array that looks like a smiley face (for fun, challenging problem)
    :param num_points: (int) The number of points you want in your training set
    :param delta:
    :return:
    """
    log.out.info("Generating happy data.")
    # Select coordinates to do an XOR like operation on
    coords = []
    bools = []
    x_min = 0.0
    x_max = 1.0
    y_min = 0.0
    y_max = 1.0
    for i in range(num_points):
        # Add num_points randomly
        coord_point = np.random.random(2)
        coord_point[0] = coord_point[0] * (x_max - x_min) + x_min
        coord_point[1] = coord_point[1] * (y_max - y_min) + y_min
        coords.append(coord_point)

    # Assign an xor boolean value to the coordinates
    for coord_point in coords:
        x = coord_point[0]
        y = coord_point[1]
        if (abs(x - 0.65) < delta) & (abs(y - 0.65) < (0.05 + delta)):
            bools.append(True)
        elif (abs(x - 0.35) < delta) & (abs(y - 0.65) < (0.05 + delta)):
            bools.append(True)
        elif (x > 0.2) & (x < 0.8) & (abs(y - ((1.5 * (x - 0.5)) ** 2 + 0.25)) < delta):
            bools.append(True)
        else:
            bools.append(False)

    # Build training vectors
    train_in = None
    train_out = None
    for i, coord in enumerate(coords):
        # Need to initialize the arrays
        if i == 0:
            train_in = np.array([coord])
            train_out = np.array([[bools[i]]])
        else:
            train_in = np.append(train_in, np.array([coord]), axis=0)
            train_out = np.append(train_out, np.array([[bools[i]]]), axis=1)

    train_out = train_out.T
    return train_in, train_out 
Example 79
Project: DOTA_models   Author: ringringyi   File: msssim.py    Apache License 2.0 4 votes vote down vote up
def MultiScaleSSIM(img1, img2, max_val=255, filter_size=11, filter_sigma=1.5,
                   k1=0.01, k2=0.03, weights=None):
  """Return the MS-SSIM score between `img1` and `img2`.

  This function implements Multi-Scale Structural Similarity (MS-SSIM) Image
  Quality Assessment according to Zhou Wang's paper, "Multi-scale structural
  similarity for image quality assessment" (2003).
  Link: https://ece.uwaterloo.ca/~z70wang/publications/msssim.pdf

  Author's MATLAB implementation:
  http://www.cns.nyu.edu/~lcv/ssim/msssim.zip

  Arguments:
    img1: Numpy array holding the first RGB image batch.
    img2: Numpy array holding the second RGB image batch.
    max_val: the dynamic range of the images (i.e., the difference between the
      maximum the and minimum allowed values).
    filter_size: Size of blur kernel to use (will be reduced for small images).
    filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced
      for small images).
    k1: Constant used to maintain stability in the SSIM calculation (0.01 in
      the original paper).
    k2: Constant used to maintain stability in the SSIM calculation (0.03 in
      the original paper).
    weights: List of weights for each level; if none, use five levels and the
      weights from the original paper.

  Returns:
    MS-SSIM score between `img1` and `img2`.

  Raises:
    RuntimeError: If input images don't have the same shape or don't have four
      dimensions: [batch_size, height, width, depth].
  """
  if img1.shape != img2.shape:
    raise RuntimeError('Input images must have the same shape (%s vs. %s).',
                       img1.shape, img2.shape)
  if img1.ndim != 4:
    raise RuntimeError('Input images must have four dimensions, not %d',
                       img1.ndim)

  # Note: default weights don't sum to 1.0 but do match the paper / matlab code.
  weights = np.array(weights if weights else
                     [0.0448, 0.2856, 0.3001, 0.2363, 0.1333])
  levels = weights.size
  downsample_filter = np.ones((1, 2, 2, 1)) / 4.0
  im1, im2 = [x.astype(np.float64) for x in [img1, img2]]
  mssim = np.array([])
  mcs = np.array([])
  for _ in range(levels):
    ssim, cs = _SSIMForMultiScale(
        im1, im2, max_val=max_val, filter_size=filter_size,
        filter_sigma=filter_sigma, k1=k1, k2=k2)
    mssim = np.append(mssim, ssim)
    mcs = np.append(mcs, cs)
    filtered = [convolve(im, downsample_filter, mode='reflect')
                for im in [im1, im2]]
    im1, im2 = [x[:, ::2, ::2, :] for x in filtered]
  return (np.prod(mcs[0:levels-1] ** weights[0:levels-1]) *
          (mssim[levels-1] ** weights[levels-1])) 
Example 80
Project: DiscEvolution   Author: rbooth200   File: krome_chem.py    GNU General Public License v3.0 4 votes vote down vote up
def explore_rates(self, T, rho, dust_frac, chem, cells, xvar, 
                      process_rates=None,
                      **kwargs):
        """Wrapper for the krome_explore_rates function"""

        # Load the reaction names
        reaction_names = []
        
        reactionfile = os.path.join(KROME_PATH, 'reactions_verbatim.dat')
        with open(reactionfile) as f:
            for l in range(_krome.krome_nrea):
                reaction_names.append(f.readline().strip())

        m_gas = chem.gas.masses * m_H
        m_ice = chem.ice.masses * m_H

        n = np.empty(_nmols + _ngrain, dtype='f8')

        gas_data = chem.gas.data.T
        ice_data = chem.ice.data.T
        
        rate_data = []
        for i in cells:
            T_i, rho_i, eps_i = T[i], rho[i], dust_frac[i]
            
            rho_i /= 1 - eps_i

            # Compute the number density
            n[_krome_gas] = (gas_data[i] / m_gas) * rho_i
            n[_krome_ice] = (ice_data[i] / m_ice) * rho_i

            if self._call_back is not None:
                opt = { kw : arg[i] for (kw, arg) in kwargs.items() }
                self._call_back(_krome, T_i, n, eps_i, **opt)

            # Create space for the result
            flux = np.zeros(_krome.krome_nrea, dtype='f8')

            # Do not send dummy grain species.
            _krome.lib.krome_get_flux(n[:-_ngrain], T_i, flux)
 
            if process_rates:
                flux = process_rates(flux, reaction_names)

            #Create the result
            maxflux = flux.max()
            sumflux = flux.sum()

            format = '{:8d} ' +'{:17E} '*5 + '{:3} {:50}\n'
            result = ''
            for j in range(_krome.krome_nrea):
                result += format.format(j+1, xvar, T_i, flux[j], 
                                        flux[j]/maxflux, flux[j]/sumflux,
                                        " ", reaction_names[j])
            rate_data.append(result)
        return rate_data