Python numpy.row_stack() Examples

The following are 30 code examples for showing how to use numpy.row_stack(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: face_landmark   Author: 610265158   File: augmentation.py    License: Apache License 2.0 8 votes vote down vote up
def Perspective_aug(src,strength,label=None):
    image = src
    pts_base = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]])
    pts1=np.random.rand(4, 2)*random.uniform(-strength,strength)+pts_base
    pts1=pts1.astype(np.float32)
    #pts1 =np.float32([[56, 65], [368, 52], [28, 387], [389, 398]])
    M = cv2.getPerspectiveTransform(pts1, pts_base)
    trans_img = cv2.warpPerspective(image, M, (src.shape[1], src.shape[0]))

    label_rotated=None
    if label is not  None:
        label=label.T
        full_label = np.row_stack((label, np.ones(shape=(1, label.shape[1]))))
        label_rotated = np.dot(M, full_label)
        label_rotated=label_rotated.astype(np.int32)
        label_rotated=label_rotated.T
    return trans_img,label_rotated 
Example 2
Project: pymoo   Author: msu-coinlab   File: util.py    License: Apache License 2.0 6 votes vote down vote up
def normalize(data, bounds, reverse=False, return_bounds=False):
    from pymoo.util.normalization import normalize as _normalize

    _F = np.row_stack([e[0] for e in data])
    if bounds is None:
        bounds = (_F.min(axis=0), _F.max(axis=0))

    to_plot = []
    for k in range(len(data)):
        F = _normalize(data[k][0], bounds[0], bounds[1])

        if reverse:
            F = 1 - F

        to_plot.append([F, data[k][1]])

    if return_bounds:
        return to_plot, bounds
    else:
        return to_plot 
Example 3
Project: pymoo   Author: msu-coinlab   File: star_coordinate.py    License: Apache License 2.0 6 votes vote down vote up
def _do(self):

        # initial a figure with a single plot
        self.init_figure()

        # equal axis length and no ticks
        equal_axis(self.ax)
        no_ticks(self.ax)

        # determine the overall scale of points
        _F = np.row_stack([e[0] for e in self.to_plot])
        _min, _max = _F.min(axis=0), _F.max(axis=0)

        V = get_uniform_points_around_circle(self.n_dim)

        plot_axes_arrow(self.ax, V, extend_factor=self.axis_extension, **{**self.axis_style, **self.arrow_style})
        plot_axis_labels(self.ax, V, self.get_labels(), **self.axis_label_style)

        # normalize in range for this plot - here no implicit normalization as in radviz
        bounds = parse_bounds(self.bounds, self.n_dim)
        to_plot_norm = normalize(self.to_plot, bounds)

        for k, (F, kwargs) in enumerate(to_plot_norm):
            N = (F[..., None] * V).sum(axis=1)
            self.ax.scatter(N[:, 0], N[:, 1], **kwargs) 
Example 4
Project: pymoo   Author: msu-coinlab   File: point_crossover.py    License: Apache License 2.0 6 votes vote down vote up
def _do(self, problem, X, **kwargs):

        # get the X of parents and count the matings
        _, n_matings, n_var = X.shape

        # start point of crossover
        r = np.row_stack([np.random.permutation(n_var - 1) + 1 for _ in range(n_matings)])[:, :self.n_points]
        r.sort(axis=1)
        r = np.column_stack([r, np.full(n_matings, n_var)])

        # the mask do to the crossover
        M = np.full((n_matings, n_var), False)

        # create for each individual the crossover range
        for i in range(n_matings):

            j = 0
            while j < r.shape[1] - 1:
                a, b = r[i, j], r[i, j + 1]
                M[i, a:b] = True
                j += 2

        _X = crossover_mask(X, M)

        return _X 
Example 5
Project: pymoo   Author: msu-coinlab   File: reduction.py    License: Apache License 2.0 6 votes vote down vote up
def _do(self):
        rnd = sample_on_unit_simplex(self.n_sample_points, self.n_dim, unit_simplex_mapping=self.sampling)

        def h(n):
            return get_partition_closest_to_points(n, self.n_dim)

        H = h(self.n_points)

        E = get_reference_directions("das-dennis", self.n_dim, n_partitions=H)
        E = E[np.any(E == 0, axis=1)]

        # add the edge coordinates
        X = np.row_stack([E, rnd])

        I = select_points_with_maximum_distance(X, self.n_points, selected=list(range((len(E)))))
        centroids = X[I].copy()

        if self.kmeans:
            #centroids = kmeans(X, centroids, self.kmeans_max_iter, self.kmeans_a_tol, 0)
            centroids = kmeans(X, centroids, self.kmeans_max_iter, self.kmeans_a_tol, len(E))

        return centroids 
Example 6
Project: pymoo   Author: msu-coinlab   File: performance.py    License: Apache License 2.0 6 votes vote down vote up
def mean_mean(z):
    for row in np.eye(z.shape[1]):
        if not np.any(np.all(row == z, axis=1)):
            z = np.row_stack([z, row])
    n_points, n_dim = z.shape

    D = vectorized_cdist(z, z)
    np.fill_diagonal(D, np.inf)

    k = n_dim - 1
    I = D.argsort(axis=1)[:, :k]

    first = np.column_stack([np.arange(n_points) for _ in range(k)])

    val = np.mean(D[first, I], axis=1)

    return val.mean() 
Example 7
Project: pymoo   Author: msu-coinlab   File: wfg_pareto_fronts.py    License: Apache License 2.0 6 votes vote down vote up
def calc_pareto_front(problem, ref_dirs):
    n_pareto_points = 200
    np.random.seed(1)

    pf = problem.pareto_front(n_pareto_points=n_pareto_points, use_cache=False)
    # survival = ReferenceDirectionSurvival(ref_dirs)
    survival = RankAndCrowdingSurvival()

    for i in range(1000):
        _pf = problem.pareto_front(n_pareto_points=n_pareto_points, use_cache=False)
        F = np.row_stack([pf, _pf])

        pop = Population().new("F", F)
        pop = survival.do(problem, pop, n_pareto_points // 2)

        pf = pop.get("F")

    return pf 
Example 8
Project: pyscf   Author: pyscf   File: test_0018_fermi_energy.py    License: Apache License 2.0 6 votes vote down vote up
def test_fermi_energy_spin_resolved_even_kpoints(self):
    """ This is to test the determination of Fermi level in spin-resolved case"""
    ee = np.row_stack((np.linspace(-10.1, 100.0, 1003), 
                       np.linspace(-10.2, 100.0, 1003),
                       np.linspace(-10.3, 100.0, 1003),
                       np.linspace(-10.4, 100.0, 1003))).reshape((4,1,1003))
    nelec = 20.0
    telec = 0.02
    nkpts = ee.shape[0]
    nspin = ee.shape[-2]
    #print(ee)
    fermi_energy = get_fermi_energy(ee, nelec, telec)
    occ = (3.0-nspin)*fermi_dirac_occupations(telec, ee, fermi_energy)

    #print(occ)
    #print(occ.sum()/nkpts)
    #print(fermi_energy)

    self.assertAlmostEqual(occ.sum()/nkpts, 20.0)
    self.assertAlmostEqual(fermi_energy, -9.2045998319213016) 
Example 9
Project: pyscf   Author: pyscf   File: test_0018_fermi_energy.py    License: Apache License 2.0 6 votes vote down vote up
def test_fermi_energy_spin_resolved_even_kpoints_spin2(self):
    """ This is to test the determination of Fermi level in spin-resolved case"""
    ee = np.row_stack((np.linspace(-10.1, 100.0, 1003), 
                       np.linspace(-10.2, 100.0, 1003),
                       np.linspace(-10.3, 100.0, 1003),
                       np.linspace(-10.4, 100.0, 1003))).reshape((2,2,1003))
    nelec = 20.0
    telec = 0.02
    nkpts = ee.shape[0]
    nspin = ee.shape[-2]
    #print(ee)
    fermi_energy = get_fermi_energy(ee, nelec, telec)
    occ = (3.0-nspin)*fermi_dirac_occupations(telec, ee, fermi_energy)

    #print(occ)
    #print(occ.sum()/nkpts)
    #print(fermi_energy)

    self.assertAlmostEqual(occ.sum()/nkpts, 20.0)
    self.assertAlmostEqual(fermi_energy, -9.2045998319213016) 
Example 10
Project: sand-glyphs   Author: inconvergent   File: glyphs.py    License: MIT License 6 votes vote down vote up
def _get_glyph(gnum, height, width, shift_prob, shift_size):
  if isinstance(gnum, list):
    n = randint(*gnum)
  else:
    n = gnum

  glyph = random_points_in_circle(
      n, 0, 0, 0.5
      )*array((width, height), 'float')
  _spatial_sort(glyph)

  if random()<shift_prob:
    shift = ((-1)**randint(0,2))*shift_size*height
    glyph[:,1] += shift
  if random()<0.5:
    ii = randint(0,n-1,size=(1))
    xy = glyph[ii,:]
    glyph = row_stack((glyph, xy))


  return glyph 
Example 11
Project: vnpy_crypto   Author: birforce   File: constraint.py    License: MIT License 6 votes vote down vote up
def combine(cls, constraints):
        """Create a new LinearConstraint by ANDing together several existing
        LinearConstraints.

        :arg constraints: An iterable of LinearConstraint objects. Their
          :attr:`variable_names` attributes must all match.
        :returns: A new LinearConstraint object.
        """
        if not constraints:
            raise ValueError("no constraints specified")
        variable_names = constraints[0].variable_names
        for constraint in constraints:
            if constraint.variable_names != variable_names:
                raise ValueError("variable names don't match")
        coefs = np.row_stack([c.coefs for c in constraints])
        constants = np.row_stack([c.constants for c in constraints])
        return cls(variable_names, coefs, constants) 
Example 12
def DataStru(self):
        self.datai['train'] = np.row_stack((np.array(self.yanzhneg_pr), np.array(self.yanzhneg_real)))  # 此处添加行
        self.datai['predict'] = np.row_stack((np.array(self.predi), np.array(self.preal)))
        # 将训练数据转置
        datapst = self.datai['train'].T
        # 为训练数据定义DataFrame的列名
        mingcheng = ['第%s个模型列' % str(dd) for dd in list(range(len(self.datai['train']) - 1))] + [self.zi]
        self.datai['train'] = pd.DataFrame(datapst, columns=mingcheng)

        # 将预测数据转置
        dapst = self.datai['predict'].T
        # 为训练数据定义DataFrame的列名
        mingche= ['第%s个模型列' % str(dd) for dd in list(range(len(self.datai['predict']) - 1))] + [self.zi]
        self.datai['predict'] = pd.DataFrame(dapst, columns=mingche)
        return print('二层的数据准备完毕')

    # 定义均方误差的函数 
Example 13
def DataStru(self):
        self.datai['train'] = np.row_stack((np.array(self.yanzhneg_pr), np.array(self.yanzhneg_real)))  # 此处添加行
        self.datai['predict'] = np.row_stack((np.array(self.predi), np.array(self.preal)))
        # 将训练数据转置
        datapst = self.datai['train'].T
        # 为训练数据定义DataFrame的列名
        mingcheng = ['第%s个模型列' % str(dd) for dd in list(range(len(self.datai['train']) - 1))] + [self.zi]
        self.datai['train'] = pd.DataFrame(datapst, columns=mingcheng)

        # 将预测数据转置
        dapst = self.datai['predict'].T
        # 为训练数据定义DataFrame的列名
        mingche= ['第%s个模型列' % str(dd) for dd in list(range(len(self.datai['predict']) - 1))] + [self.zi]
        self.datai['predict'] = pd.DataFrame(dapst, columns=mingche)
        return print('二层的数据准备完毕')

    # 创建将预测的多维的数字类别转化为一维原始名称类别的函数 
Example 14
def DataStru(self):
        self.datai['train'] = np.row_stack((np.array(self.yanzhneg_pr), np.array(self.yanzhneg_real)))  # 此处添加行
        self.datai['predict'] = np.row_stack((np.array(self.predi), np.array(self.preal)))
        # 将训练数据转置
        datapst = self.datai['train'].T
        # 为训练数据定义DataFrame的列名
        mingcheng = ['第%s个模型列' % str(dd) for dd in list(range(len(self.datai['train']) - 1))] + [self.zi]
        self.datai['train'] = pd.DataFrame(datapst, columns=mingcheng)

        # 将预测数据转置
        dapst = self.datai['predict'].T
        # 为训练数据定义DataFrame的列名
        mingche= ['第%s个模型列' % str(dd) for dd in list(range(len(self.datai['predict']) - 1))] + [self.zi]
        self.datai['predict'] = pd.DataFrame(dapst, columns=mingche)
        return print('二层的数据准备完毕')

    # 定义均方误差的函数 
Example 15
def DataStru(self):
        self.datai['train'] = np.row_stack((np.array(self.yanzhneg_pr), np.array(self.yanzhneg_real)))  # 此处添加行
        self.datai['predict'] = np.row_stack((np.array(self.predi), np.array(self.preal)))
        # 将训练数据转置
        datapst = self.datai['train'].T
        # 为训练数据定义DataFrame的列名
        mingcheng = ['第%s个模型列' % str(dd) for dd in list(range(len(self.datai['train']) - 1))] + [self.zi]
        self.datai['train'] = pd.DataFrame(datapst, columns=mingcheng)

        # 将预测数据转置
        dapst = self.datai['predict'].T
        # 为训练数据定义DataFrame的列名
        mingche= ['第%s个模型列' % str(dd) for dd in list(range(len(self.datai['predict']) - 1))] + [self.zi]
        self.datai['predict'] = pd.DataFrame(dapst, columns=mingche)
        return print('二层的数据准备完毕')

    # 创建将预测的多维的数字类别转化为一维原始名称类别的函数 
Example 16
Project: btgym   Author: Kismuz   File: base.py    License: GNU Lesser General Public License v3.0 6 votes vote down vote up
def get_raw_state(self):
        """
        Default state observation composer.

        Returns:
             and updates time-embedded environment state observation as [n,4] numpy matrix, where:
                4 - number of signal features  == state_shape[1],
                n - time-embedding length  == state_shape[0] == <set by user>.

        Note:
            `self.raw_state` is used to render environment `human` mode and should not be modified.

        """
        self.raw_state = np.row_stack(
            (
                np.frombuffer(self.data.open.get(size=self.time_dim)),
                np.frombuffer(self.data.high.get(size=self.time_dim)),
                np.frombuffer(self.data.low.get(size=self.time_dim)),
                np.frombuffer(self.data.close.get(size=self.time_dim)),
            )
        ).T

        return self.raw_state 
Example 17
Project: btgym   Author: Kismuz   File: base.py    License: GNU Lesser General Public License v3.0 6 votes vote down vote up
def get_raw_state(self):
        """
        Default state observation composer.

        Returns:
             and updates time-embedded environment state observation as [n, 4] numpy matrix, where:
                4 - number of signal features  == state_shape[1],
                n - time-embedding length  == state_shape[0] == <set by user>.

        Note:
            `self.raw_state` is used to render environment `human` mode and should not be modified.

        """
        self.raw_state = np.row_stack(
            (
                np.frombuffer(self.data.open.get(size=self.time_dim)),
                np.frombuffer(self.data.high.get(size=self.time_dim)),
                np.frombuffer(self.data.low.get(size=self.time_dim)),
                np.frombuffer(self.data.close.get(size=self.time_dim)),
            )
        ).T

        return self.raw_state 
Example 18
Project: btgym   Author: Kismuz   File: base.py    License: GNU Lesser General Public License v3.0 6 votes vote down vote up
def get_raw_state(self):
        """
        Default state observation composer.

        Returns:
             and updates time-embedded environment state observation as [n, 4] numpy matrix, where:
                4 - number of signal features  == state_shape[1],
                n - time-embedding length  == state_shape[0] == <set by user>.

        Note:
            `self.raw_state` is used to render environment `human` mode and should not be modified.

        """
        self.raw_state = np.row_stack(
            (
                np.frombuffer(self.data.open.get(size=self.time_dim)),
                np.frombuffer(self.data.high.get(size=self.time_dim)),
                np.frombuffer(self.data.low.get(size=self.time_dim)),
                np.frombuffer(self.data.close.get(size=self.time_dim)),
            )
        ).T

        return self.raw_state 
Example 19
Project: btgym   Author: Kismuz   File: base.py    License: GNU Lesser General Public License v3.0 6 votes vote down vote up
def get_raw_state(self):
        """
        Default state observation composer.

        Returns:
             and updates time-embedded environment state observation as [n,4] numpy matrix, where:
                4 - number of signal features  == state_shape[1],
                n - time-embedding length  == state_shape[0] == <set by user>.

        Note:
            `self.raw_state` is used to render environment `human` mode and should not be modified.

        """
        self.raw_state = np.row_stack(
            (
                np.frombuffer(self.data.open.get(size=self.time_dim)),
                np.frombuffer(self.data.high.get(size=self.time_dim)),
                np.frombuffer(self.data.low.get(size=self.time_dim)),
                np.frombuffer(self.data.close.get(size=self.time_dim)),
            )
        ).T

        return self.raw_state 
Example 20
Project: Broad-Learning-System   Author: LiangjunFeng   File: bls_enhence.py    License: MIT License 6 votes vote down vote up
def addingenhence_nodes(self, data, label, step = 1, batchsize = 'auto'):
        if batchsize == 'auto':
            batchsize = data.shape[1]
            
        mappingdata = self.mapping_generator.transform(data)
        inputdata = self.transform(data)         
        localenhence_generator = node_generator()
        extraenhence_nodes = localenhence_generator.generator_nodes(mappingdata,step,batchsize,self._enhence_function)
            
        D = self.pesuedoinverse.dot(extraenhence_nodes)
        C = extraenhence_nodes - inputdata.dot(D)
        BT = self.pinv(C) if (C == 0).any() else  np.mat((D.T.dot(D)+np.eye(D.shape[1]))).I.dot(D.T).dot(self.pesuedoinverse)
        
        self.W = np.row_stack((self.W-D.dot(BT).dot(label),BT.dot(label))) 
        self.enhence_generator.update(localenhence_generator.Wlist,localenhence_generator.blist)
        self.pesuedoinverse =  np.row_stack((self.pesuedoinverse - D.dot(BT),BT)) 
Example 21
Project: Broad-Learning-System   Author: LiangjunFeng   File: bls_enhmap.py    License: MIT License 6 votes vote down vote up
def adding_nodes(self, data, label, mapstep = 1, enhencestep = 1, batchsize = 'auto'):
        if batchsize == 'auto':
            batchsize = data.shape[1]
        
        mappingdata = self.mapping_generator.transform(data)        
        inputdata = self.transform(data) 
        
        localmap_generator = node_generator()
        extramap_nodes = localmap_generator.generator_nodes(data,mapstep,batchsize,self._map_function)
        localenhence_generator = node_generator()
        extraenh_nodes = localenhence_generator.generator_nodes(mappingdata,enhencestep,batchsize,self._map_function)
        extra_nodes = np.column_stack((extramap_nodes,extraenh_nodes))
        
        D = self.pesuedoinverse.dot(extra_nodes)
        C = extra_nodes - inputdata.dot(D)
        BT = self.pinv(C) if (C == 0).any() else  np.mat((D.T.dot(D)+np.eye(D.shape[1]))).I.dot(D.T).dot(self.pesuedoinverse)
        
        self.W = np.row_stack((self.W-D.dot(BT).dot(label),BT.dot(label))) 
        self.pesuedoinverse =  np.row_stack((self.pesuedoinverse - D.dot(BT),BT)) 
        self.local_mapgeneratorlist.append(localmap_generator)
        self.local_enhgeneratorlist.append(localenhence_generator) 
Example 22
Project: Broad-Learning-System   Author: LiangjunFeng   File: bls_addinput.py    License: MIT License 6 votes vote down vote up
def adding_nodes(self, data, label, mapstep = 1, enhencestep = 1, batchsize = 'auto'):
        if batchsize == 'auto':
            batchsize = data.shape[1]
        
        mappingdata = self.mapping_generator.transform(data)        
        inputdata = self.transform(data) 
        
        localmap_generator = node_generator()
        extramap_nodes = localmap_generator.generator_nodes(data,mapstep,batchsize,self._map_function)
        localenhence_generator = node_generator()
        extraenh_nodes = localenhence_generator.generator_nodes(mappingdata,enhencestep,batchsize,self._map_function)
        extra_nodes = np.column_stack((extramap_nodes,extraenh_nodes))
        
        D = self.pesuedoinverse.dot(extra_nodes)
        C = extra_nodes - inputdata.dot(D)
        BT = self.pinv(C) if (C == 0).any() else  np.mat((D.T.dot(D)+np.eye(D.shape[1]))).I.dot(D.T).dot(self.pesuedoinverse)
        
        self.W = np.row_stack((self.W-D.dot(BT).dot(label),BT.dot(label))) 
        self.pesuedoinverse =  np.row_stack((self.pesuedoinverse - D.dot(BT),BT)) 
        self.local_mapgeneratorlist.append(localmap_generator)
        self.local_enhgeneratorlist.append(localenhence_generator) 
Example 23
Project: Computable   Author: ktraunmueller   File: test_basic.py    License: MIT License 6 votes vote down vote up
def test_hyp0f1(self):
        # scalar input
        assert_allclose(special.hyp0f1(2.5, 0.5), 1.21482702689997, rtol=1e-12)
        assert_allclose(special.hyp0f1(2.5, 0), 1.0, rtol=1e-15)

        # float input, expected values match mpmath
        x = special.hyp0f1(3.0, [-1.5, -1, 0, 1, 1.5])
        expected = np.array([0.58493659229143, 0.70566805723127, 1.0,
                             1.37789689539747, 1.60373685288480])
        assert_allclose(x, expected, rtol=1e-12)

        # complex input
        x = special.hyp0f1(3.0, np.array([-1.5, -1, 0, 1, 1.5]) + 0.j)
        assert_allclose(x, expected.astype(np.complex), rtol=1e-12)

        # test broadcasting
        x1 = [0.5, 1.5, 2.5]
        x2 = [0, 1, 0.5]
        x = special.hyp0f1(x1, x2)
        expected = [1.0, 1.8134302039235093, 1.21482702689997]
        assert_allclose(x, expected, rtol=1e-12)
        x = special.hyp0f1(np.row_stack([x1] * 2), x2)
        assert_allclose(x, np.row_stack([expected] * 2), rtol=1e-12)
        assert_raises(ValueError, special.hyp0f1,
                      np.row_stack([x1] * 3), [0, 1]) 
Example 24
Project: Computable   Author: ktraunmueller   File: test_basic.py    License: MIT License 6 votes vote down vote up
def test_polygamma(self):
        poly2 = special.polygamma(2,1)
        poly3 = special.polygamma(3,1)
        assert_almost_equal(poly2,-2.4041138063,10)
        assert_almost_equal(poly3,6.4939394023,10)

        # Test polygamma(0, x) == psi(x)
        x = [2, 3, 1.1e14]
        assert_almost_equal(special.polygamma(0, x), special.psi(x))

        # Test broadcasting
        n = [0, 1, 2]
        x = [0.5, 1.5, 2.5]
        expected = [-1.9635100260214238, 0.93480220054467933,
                    -0.23620405164172739]
        assert_almost_equal(special.polygamma(n, x), expected)
        expected = np.row_stack([expected]*2)
        assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)),
                            expected)
        assert_almost_equal(special.polygamma(np.row_stack([n]*2), x),
                            expected) 
Example 25
Project: face_landmark   Author: 610265158   File: augmentation.py    License: Apache License 2.0 6 votes vote down vote up
def Affine_aug(src,strength,label=None):
    image = src
    pts_base = np.float32([[10,100],[200,50],[100,250]])
    pts1 = np.random.rand(3, 2) * random.uniform(-strength, strength) + pts_base
    pts1 = pts1.astype(np.float32)
    M = cv2.getAffineTransform(pts1, pts_base)
    trans_img = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]) ,
                                            borderMode=cv2.BORDER_CONSTANT,
                                          borderValue=cfg.DATA.PIXEL_MEAN)
    label_rotated=None
    if label is not None:
        label=label.T
        full_label = np.row_stack((label, np.ones(shape=(1, label.shape[1]))))
        label_rotated = np.dot(M, full_label)
        #label_rotated = label_rotated.astype(np.int32)
        label_rotated=label_rotated.T
    return trans_img,label_rotated 
Example 26
Project: GraphicDesignPatternByPython   Author: Relph1119   File: test_basic.py    License: MIT License 6 votes vote down vote up
def test_hyp0f1(self):
        # scalar input
        assert_allclose(special.hyp0f1(2.5, 0.5), 1.21482702689997, rtol=1e-12)
        assert_allclose(special.hyp0f1(2.5, 0), 1.0, rtol=1e-15)

        # float input, expected values match mpmath
        x = special.hyp0f1(3.0, [-1.5, -1, 0, 1, 1.5])
        expected = np.array([0.58493659229143, 0.70566805723127, 1.0,
                             1.37789689539747, 1.60373685288480])
        assert_allclose(x, expected, rtol=1e-12)

        # complex input
        x = special.hyp0f1(3.0, np.array([-1.5, -1, 0, 1, 1.5]) + 0.j)
        assert_allclose(x, expected.astype(complex), rtol=1e-12)

        # test broadcasting
        x1 = [0.5, 1.5, 2.5]
        x2 = [0, 1, 0.5]
        x = special.hyp0f1(x1, x2)
        expected = [1.0, 1.8134302039235093, 1.21482702689997]
        assert_allclose(x, expected, rtol=1e-12)
        x = special.hyp0f1(np.row_stack([x1] * 2), x2)
        assert_allclose(x, np.row_stack([expected] * 2), rtol=1e-12)
        assert_raises(ValueError, special.hyp0f1,
                      np.row_stack([x1] * 3), [0, 1]) 
Example 27
Project: GraphicDesignPatternByPython   Author: Relph1119   File: test_basic.py    License: MIT License 6 votes vote down vote up
def test_polygamma(self):
        poly2 = special.polygamma(2,1)
        poly3 = special.polygamma(3,1)
        assert_almost_equal(poly2,-2.4041138063,10)
        assert_almost_equal(poly3,6.4939394023,10)

        # Test polygamma(0, x) == psi(x)
        x = [2, 3, 1.1e14]
        assert_almost_equal(special.polygamma(0, x), special.psi(x))

        # Test broadcasting
        n = [0, 1, 2]
        x = [0.5, 1.5, 2.5]
        expected = [-1.9635100260214238, 0.93480220054467933,
                    -0.23620405164172739]
        assert_almost_equal(special.polygamma(n, x), expected)
        expected = np.row_stack([expected]*2)
        assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)),
                            expected)
        assert_almost_equal(special.polygamma(np.row_stack([n]*2), x),
                            expected) 
Example 28
Project: fracture   Author: inconvergent   File: fracture.py    License: MIT License 6 votes vote down vote up
def _append_tmp_sources(self):

    from scipy.spatial import cKDTree as kdt
    from scipy.spatial import Delaunay as triag

    sources = row_stack([self.sources]+self.tmp_sources)
    tree = kdt(sources)
    self.sources = sources
    self.tree = tree
    self.tmp_sources = []
    self.tri = triag(
      self.sources,
      incremental=False,
      qhull_options='QJ Qc'
    )
    self.num_sources = len(self.sources)

    return len(sources) 
Example 29
Project: YOLOV3   Author: Peterisfar   File: evaluator.py    License: MIT License 6 votes vote down vote up
def get_bbox(self, img, multi_test=False, flip_test=False):
        if multi_test:
            test_input_sizes = range(320, 640, 96)
            bboxes_list = []
            for test_input_size in test_input_sizes:
                valid_scale =(0, np.inf)
                bboxes_list.append(self.__predict(img, test_input_size, valid_scale))
                if flip_test:
                    bboxes_flip = self.__predict(img[:, ::-1], test_input_size, valid_scale)
                    bboxes_flip[:, [0, 2]] = img.shape[1] - bboxes_flip[:, [2, 0]]
                    bboxes_list.append(bboxes_flip)
            bboxes = np.row_stack(bboxes_list)
        else:
            bboxes = self.__predict(img, self.val_shape, (0, np.inf))

        bboxes = nms(bboxes, self.conf_thresh, self.nms_thresh)

        return bboxes 
Example 30
Project: pymoo   Author: msu-coinlab   File: wfg.py    License: Apache License 2.0 5 votes vote down vote up
def _calc_pareto_set(self, n_pareto_points=500, *args, **kwargs):
        ps = np.ones((2 ** self.k, self.k))
        for i, s in enumerate(powerset(np.arange(self.k))):
            ps[i, s] = 0

        rnd = self._rand_optimal_position(n_pareto_points - len(ps))
        ps = np.row_stack([ps, rnd])
        ps = self._positional_to_optimal(ps)
        return ps