Python numpy.ones() Examples

The following are code examples for showing how to use numpy.ones(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: b2ac   Author: hbldh   File: reference.py    MIT License 8 votes vote down vote up
def _calculate_scatter_matrix_py(x, y):
    """Calculates the complete scatter matrix for the input coordinates.

    :param x: The x coordinates.
    :type x: :py:class:`numpy.ndarray`
    :param y: The y coordinates.
    :type y: :py:class:`numpy.ndarray`
    :return: The complete scatter matrix.
    :rtype: :py:class:`numpy.ndarray`

    """
    D = np.ones((len(x), 6), dtype=x.dtype)
    D[:, 0] = x * x
    D[:, 1] = x * y
    D[:, 2] = y * y
    D[:, 3] = x
    D[:, 4] = y

    return D.T.dot(D) 
Example 2
Project: b2ac   Author: hbldh   File: double.py    MIT License 7 votes vote down vote up
def _calculate_scatter_matrix_double(x, y):
    """Calculates the complete scatter matrix for the input coordinates.

    :param x: The x coordinates.
    :type x: :py:class:`numpy.ndarray`
    :param y: The y coordinates.
    :type y: :py:class:`numpy.ndarray`
    :return: The complete scatter matrix.
    :rtype: :py:class:`numpy.ndarray`

    """
    D = np.ones((len(x), 6), 'int64')
    D[:, 0] = x * x
    D[:, 1] = x * y
    D[:, 2] = y * y
    D[:, 3] = x
    D[:, 4] = y

    return D.T.dot(D) 
Example 3
Project: projection-methods   Author: akshayka   File: dykstra.py    GNU General Public License v3.0 7 votes vote down vote up
def solve(self, problem):
        left_set = problem.sets[0]
        right_set = problem.sets[1]

        iterate = (self._initial_iterate if
            self._initial_iterate is not None else np.ones(problem.dimension))

        # (p_n), (q_n) are the auxiliary sequences, (a_n), (b_n) the main
        # sequences, defined in Bauschke's 98 paper
        # (Dykstra's Alternating Projection Algorithm for Two Sets).
        self.p = [None] * (self.max_iters + 1)
        self.q = [None] * (self.max_iters + 1)
        self.a = [None] * (self.max_iters + 1)
        self.b = [None] * (self.max_iters + 1)
        zero_vector = np.zeros(problem.dimension)
        self.p[0] = self.q[0] = zero_vector
        self.b[0] = self.a[0] = iterate
        residuals = []

        status = Optimizer.Status.INACCURATE
        for n in xrange(1, self.max_iters + 1):
            if self.verbose:
                print 'iteration %d' % n
            # TODO(akshayka): Robust stopping criterion
            residuals.append(self._compute_residual(
                self.b[n-1], left_set, right_set))
            if self.verbose:
                print '\tresidual: %e' % sum(residuals[-1])
            if self._is_optimal(residuals[-1]):
                status = Optimizer.Status.OPTIMAL
                if not self.do_all_iters:
                    break

            self.a[n] = left_set.project(self.b[n-1] + self.p[n-1])
            self.b[n] = right_set.project(self.a[n] + self.q[n-1])
            self.p[n] = self.b[n-1] + self.p[n-1] - self.a[n]
            self.q[n] = self.a[n] + self.q[n-1] - self.b[n]

        # TODO(akshayka): does it matter if I return self.b vs self.a?
        # the first implementation returned self.a ...
        return self.b, residuals, status 
Example 4
Project: FRIDA   Author: LCAV   File: point_cloud.py    MIT License 7 votes vote down vote up
def classical_mds(self, D):
        ''' 
        Classical multidimensional scaling

        Parameters
        ----------
        D : square 2D ndarray
            Euclidean Distance Matrix (matrix containing squared distances between points
        '''

        # Apply MDS algorithm for denoising
        n = D.shape[0]
        J = np.eye(n) - np.ones((n,n))/float(n)
        G = -0.5*np.dot(J, np.dot(D, J))

        s, U = np.linalg.eig(G)

        # we need to sort the eigenvalues in decreasing order
        s = np.real(s)
        o = np.argsort(s)
        s = s[o[::-1]]
        U = U[:,o[::-1]]

        S = np.diag(s)[0:self.dim,:]
        self.X = np.dot(np.sqrt(S),U.T) 
Example 5
Project: fenics-topopt   Author: zfergus   File: solver.py    MIT License 6 votes vote down vote up
def __init__(self, nelx, nely, volfrac, penal, rmin, ft, gui, bc):
        self.n = nelx * nely
        self.opt = nlopt.opt(nlopt.LD_MMA, self.n)
        self.passive = bc.get_passive_elements()
        self.xPhys = np.ones(self.n)
        if self.passive is not None:
            self.xPhys[self.passive] = 0

        # set bounds
        ub = np.ones(self.n, dtype=float)
        self.opt.set_upper_bounds(ub)
        lb = np.zeros(self.n, dtype=float)
        self.opt.set_lower_bounds(lb)

        # set stopping criteria
        self.opt.set_maxeval(2000)
        self.opt.set_ftol_rel(0.001)

        # set objective and constraint functions
        self.opt.set_min_objective(self.compliance_function)
        self.opt.add_inequality_constraint(self.volume_function, 0)

        # setup filter
        self.ft = ft
        self.filtering = Filter(nelx, nely, rmin)

        # setup problem def
        self.init_problem(nelx, nely, penal, bc)
        self.volfrac = volfrac

        # set GUI callback
        self.init_gui(gui) 
Example 6
Project: xrft   Author: xgcm   File: test_xrft.py    MIT License 6 votes vote down vote up
def test_cross_phase_2d(self, dask):
        Ny, Nx = (32, 16)
        x = np.linspace(0, 1, num=Nx, endpoint=False)
        y = np.ones(Ny)
        f = 6
        phase_offset = np.pi/2
        signal1 = np.cos(2*np.pi*f*x)  # frequency = 1/(2*pi)
        signal2 = np.cos(2*np.pi*f*x - phase_offset)
        da1 = xr.DataArray(data=signal1*y[:,np.newaxis], name='a',
                          dims=['y','x'], coords={'y':y, 'x':x})
        da2 = xr.DataArray(data=signal2*y[:,np.newaxis], name='b',
                          dims=['y','x'], coords={'y':y, 'x':x})
        with pytest.raises(ValueError):
            xrft.cross_phase(da1, da2, dim=['y','x'])

        if dask:
            da1 = da1.chunk({'x': 16})
            da2 = da2.chunk({'x': 16})
        cp = xrft.cross_phase(da1, da2, dim=['x'])
        actual_phase_offset = cp.sel(freq_x=f).values
        npt.assert_almost_equal(actual_phase_offset, phase_offset) 
Example 7
Project: prediction-constrained-topic-models   Author: dtak   File: slda_utils__init_manager.py    MIT License 6 votes vote down vote up
def init_topics_KV__rand_active_words(
        n_states=10,
        frac_words_active=0.5,
        blend_frac_active=0.5,
        n_vocabs=144,
        seed=0):
    prng = np.random.RandomState(int(seed))
    unif_topics_KV = np.ones((n_states, n_vocabs)) / float(n_vocabs)
    active_topics_KV = np.zeros((n_states, n_vocabs))
    for k in xrange(n_states):
        active_words_U = prng.choice(
            np.arange(n_vocabs, dtype=np.int32),
            int(frac_words_active * n_vocabs),
            replace=False)
        active_topics_KV[k, active_words_U] = 1.0 / active_words_U.size
    topics_KV = (1 - blend_frac_active) * unif_topics_KV \
        + blend_frac_active * active_topics_KV
    topics_KV /= topics_KV.sum(axis=1)[:,np.newaxis]
    return topics_KV 
Example 8
Project: prediction-constrained-topic-models   Author: dtak   File: slda_utils__init_manager.py    MIT License 6 votes vote down vote up
def init_topics_KV__rand_docs(
        dataset=None,
        n_states=10,
        n_vocabs=144,
        blend_frac_doc=0.5,
        seed=0):
    prng = np.random.RandomState(int(seed))
    unif_topics_KV = np.ones((n_states, n_vocabs)) / float(n_vocabs)
    doc_KV = np.zeros((n_states, n_vocabs))
    chosen_doc_ids = prng.choice(
        np.arange(dataset['n_docs'], dtype=np.int32),
        n_states,
        replace=False)
    for k in xrange(n_states):
        start_d = dataset['doc_indptr_Dp1'][chosen_doc_ids[k]]
        stop_d = dataset['doc_indptr_Dp1'][chosen_doc_ids[k] + 1]
        active_words_U = dataset['word_id_U'][start_d:stop_d]
        doc_KV[k, active_words_U] = dataset['word_ct_U'][start_d:stop_d]
    doc_KV /= doc_KV.sum(axis=1)[:,np.newaxis]
    topics_KV = (1 - blend_frac_doc) * unif_topics_KV \
        + blend_frac_doc * doc_KV
    topics_KV /= topics_KV.sum(axis=1)[:,np.newaxis]
    return topics_KV 
Example 9
Project: prediction-constrained-topic-models   Author: dtak   File: calc_N_d_K__vb_qpiDir_qzCat.py    MIT License 6 votes vote down vote up
def make_initial_P_d_K(
        init_name,
        prng=np.random,
        alpha_K=None,
        init_P_d_K_list=None):
    K = alpha_K.size

    if init_name.count('warm'):
        return init_P_d_K_list.pop()
    elif init_name.count('uniform_sample'):
        return prng.dirichlet(np.ones(K))
    elif init_name.count('prior_sample'):
        return prng.dirichlet(alpha_K)
    elif init_name.count("prior_mean"):
        return alpha_K / np.sum(alpha_K) #np.zeros(K, dtype=alpha_K.dtype)
    else:
        raise ValueError("Unrecognized vb lstep_init_name: " + init_name) 
Example 10
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: baseop.py    MIT License 6 votes vote down vote up
def wrap_variable(self, var):
        """wrap layer.w into variables"""
        val = self.lay.w.get(var, None)
        if val is None:
            shape = self.lay.wshape[var]
            args = [0., 1e-2, shape]
            if 'moving_mean' in var:
                val = np.zeros(shape)
            elif 'moving_variance' in var:
                val = np.ones(shape)
            else:
                val = np.random.normal(*args)
            self.lay.w[var] = val.astype(np.float32)
            self.act = 'Init '
        if not self.var: return

        val = self.lay.w[var]
        self.lay.w[var] = tf.constant_initializer(val)
        if var in self._SLIM: return
        with tf.variable_scope(self.scope):
            self.lay.w[var] = tf.get_variable(var,
                shape = self.lay.wshape[var],
                dtype = tf.float32,
                initializer = self.lay.w[var]) 
Example 11
Project: DataHack2018   Author: InnovizTech   File: vis.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def draw_cuboids(self, boxes):
        for box_idx, box in enumerate(boxes):
            color = box['color'] if hasattr(box, 'color') else np.ones(4)
            size = box['size']
            translation = box['translation']
            rotation = box['rotation'] / np.pi * 180.
            try:
                text = box['text']
            except:
                text = ''

            if box_idx < len(self._cuboids):
                self._cuboids[box_idx].show()
                self._cuboids[box_idx].update_values(size, translation, rotation, color, text)
            else:
                self._cuboids.append(Cuboid(size, translation, rotation, color, text))
        for c in self._cuboids[len(boxes):]:
            c.hide() 
Example 12
Project: ML_from_scratch   Author: jarfa   File: RegressionTree.py    Apache License 2.0 6 votes vote down vote up
def predict(self, data):
        if self.is_terminal():
            return self.mean_predict

        predictions = np.ones(len(data)) * self.mean_predict
        split_indices = data[:, self.split_feature] >= self.split_value

        if self.children[0] is not None:
            predictions[-split_indices] = self.children[0].predict(
                data[-split_indices, :])

        if self.children[1] is not None:
            predictions[split_indices] = self.children[1].predict(
                data[split_indices, :])

        return predictions 
Example 13
Project: fip-walkgen   Author: stephane-caron   File: swing_foot_control.py    GNU General Public License v3.0 6 votes vote down vote up
def create_topp_instance(self):
        assert self.path is not None, "interpolate a path first"
        amax = self.max_foot_accel * ones(self.path.dimension)
        id_traj = "1.0\n1\n0.0 1.0"
        discrtimestep = self.discrtimestep
        ndiscrsteps = int((self.path.duration + 1e-10) / discrtimestep) + 1
        constraints = str(discrtimestep)
        constraints += "\n" + str(0.)  # no velocity limit
        for i in range(ndiscrsteps):
            s = i * discrtimestep
            ps = self.path.Evald(s)
            pss = self.path.Evaldd(s)
            constraints += "\n" + vect2str(+ps) + " " + vect2str(-ps)
            constraints += "\n" + vect2str(+pss) + " " + vect2str(-pss)
            constraints += "\n" + vect2str(-amax) + " " + vect2str(-amax)
        self.topp = TOPP.TOPPbindings.TOPPInstance(
            None, "QuadraticConstraints", constraints, id_traj)
        self.topp.integrationtimestep = 1e-3 
Example 14
Project: Deep_VoiceChanger   Author: pstuvwx   File: gla_util.py    MIT License 6 votes vote down vote up
def __init__(self, wave_len=254, wave_dif=64, buffer_size=5, loop_num=5, window=np.hanning(254)):
        self.wave_len = wave_len
        self.wave_dif = wave_dif
        self.buffer_size = buffer_size
        self.loop_num = loop_num
        self.window = window

        self.wave_buf = np.zeros(wave_len+wave_dif, dtype=float)
        self.overwrap_buf = np.zeros(wave_dif*buffer_size+(wave_len-wave_dif), dtype=float)
        self.spectrum_buffer = np.ones((self.buffer_size, self.wave_len), dtype=complex)
        self.absolute_buffer = np.ones((self.buffer_size, self.wave_len), dtype=complex)
        
        self.phase = np.zeros(self.wave_len, dtype=complex)
        self.phase += np.random.random(self.wave_len)-0.5 + np.random.random(self.wave_len)*1j - 0.5j
        self.phase[self.phase == 0] = 1
        self.phase /= np.abs(self.phase) 
Example 15
Project: LSTM-diagnosis   Author: jfzhang95   File: layers.py    MIT License 6 votes vote down vote up
def __init__(self, input_size, time_steps, momentum=0.1, epsilon=1e-6):
        self.gamma = theano.shared(np.ones(input_size, dtype=np.float32))
        self.beta = theano.shared(np.zeros(input_size, dtype=np.float32))
        self.params = [self.gamma, self.beta]

        self.epsilon = epsilon
        self.momentum = momentum
        self.shared_state = False
        self.train = True
        if not hasattr(BN, 'self.running_mean'):
            self.running_mean = theano.shared(np.zeros((time_steps, input_size), theano.config.floatX))

        if hasattr(BN, 'self.params'):
           print 'you'

        if not hasattr(BN, 'self.running_std'):
            self.running_std = theano.shared(np.zeros((time_steps, input_size), theano.config.floatX)) 
Example 16
Project: deep-learning-note   Author: wdxtub   File: 4_multi_classification.py    MIT License 6 votes vote down vote up
def one_vs_all(X, y, num_labels, learning_rate):
    rows = X.shape[0]
    params = X.shape[1]
    
    # k X (n + 1) array for the parameters of each of the k classifiers
    all_theta = np.zeros((num_labels, params + 1))
    
    # insert a column of ones at the beginning for the intercept term
    X = np.insert(X, 0, values=np.ones(rows), axis=1)
    
    # labels are 1-indexed instead of 0-indexed
    for i in range(1, num_labels + 1):
        theta = np.zeros(params + 1)
        y_i = np.array([1 if label == i else 0 for label in y])
        y_i = np.reshape(y_i, (rows, 1))
        
        # minimize the objective function
        fmin = minimize(fun=cost, x0=theta, args=(X, y_i, learning_rate), method='TNC', jac=gradient)
        all_theta[i-1,:] = fmin.x
    
    return all_theta 
Example 17
Project: deep-learning-note   Author: wdxtub   File: 4_multi_classification.py    MIT License 6 votes vote down vote up
def predict_all(X, all_theta):
    rows = X.shape[0]
    params = X.shape[1]
    num_labels = all_theta.shape[0]
    
    # same as before, insert ones to match the shape
    X = np.insert(X, 0, values=np.ones(rows), axis=1)
    
    # convert to matrices
    X = np.matrix(X)
    all_theta = np.matrix(all_theta)
    
    # compute the class probability for each class on each training instance
    h = sigmoid(X * all_theta.T)
    
    # create array of the index with the maximum probability
    h_argmax = np.argmax(h, axis=1)
    
    # because our array was zero-indexed we need to add one for the true label prediction
    h_argmax = h_argmax + 1
    
    return h_argmax 
Example 18
Project: deep-learning-note   Author: wdxtub   File: 6_bias_variance.py    MIT License 6 votes vote down vote up
def prepare_poly_data(*args, power):
    """
    args: keep feeding in X, Xval, or Xtest
        will return in the same order
    """
    def prepare(x):
        # expand feature
        df = poly_features(x, power=power)

        # normalization
        ndarr = normalize_feature(df).as_matrix()

        # add intercept term
        return np.insert(ndarr, 0, np.ones(ndarr.shape[0]), axis=1)

    return [prepare(x) for x in args] 
Example 19
Project: deep-learning-note   Author: wdxtub   File: mnist_projector_generate.py    MIT License 6 votes vote down vote up
def create_sprite_image(images):
    if isinstance(images, list):
        images = np.array(images)
    img_h = images.shape[1]
    img_w = images.shape[2]
    # sprite 可以理解为所有小图片拼成的大正方形矩阵
    m = int(np.ceil(np.sqrt(images.shape[0])))

    # 使用全 1 来初始化最终的大图片
    sprite_image = np.ones((img_h*m, img_w*m))

    for i in range(m):
        for j in range(m):
            # 计算当前图片编号
            cur = i * m + j
            if cur < images.shape[0]:
                # 将小图片的内容复制到最终的 sprite 图像
                sprite_image[i*img_h:(i+1)*img_h,
                             j*img_w:(j+1)*img_w] = images[cur]
    return sprite_image

# 加载 mnist 数据,制定 one_hot=False,得到的 labels 就是一个数字,而不是一个向量 
Example 20
Project: b2ac   Author: hbldh   File: inverse_iteration.py    MIT License 5 votes vote down vote up
def inverse_iteration_for_eigenvector_double(A, eigenvalue, n_iterations=1):
    """Performs a series of inverse iteration steps with a known
    eigenvalue to produce its eigenvector.

    :param A: The 3x3 matrix to which the eigenvalue belongs.
    :type A: :py:class:`numpy.ndarray`
    :param eigenvalue: One eigenvalue of the matrix A.
    :type eigenvalue: float
    :param n_iterations: Number of iterations to perform the multiplication
     with the inverse. For a accurate eigenvalue, one iteration is enough
     for a ~1e-6 correct eigenvector. More than five is usually unnecessary.
    :type n_iterations: int
    :return: The eigenvector of this matrix and eigenvalue combination.
    :rtype: :py:class:`numpy.ndarray`

    """
    A = np.array(A, 'float')
    # Subtract the eigenvalue from the diagonal entries of the matrix.
    # N_POLYPOINTS.B. Also slightly perturb the eigenvalue so the matrix will
    # not be so close to singular!
    for k in xrange(A.shape[0]):
        A[k, k] -= eigenvalue + 0.001
    # Obtain the inverse of the matrix.
    A_inv = mo.inverse_3by3_double(A).reshape((3, 3))
    # Instantiate the eigenvector to iterate with.
    eigenvector = np.ones((A.shape[0], ), 'float')
    eigenvector /= np.linalg.norm(eigenvector)
    # Perform the desired number of iterations.
    for k in xrange(n_iterations):
        eigenvector = np.dot(A_inv, eigenvector)
        eigenvector /= np.linalg.norm(eigenvector)

    if np.any(np.isnan(eigenvector)) or np.any(np.isinf(eigenvector)):
        print("Nan and/or Infs in eigenvector!")

    if (eigenvector[0] < 0) and (eigenvector[2] < 0):
        eigenvector = -eigenvector
    return eigenvector 
Example 21
Project: projection-methods   Author: akshayka   File: test_stability.py    GNU General Public License v3.0 5 votes vote down vote up
def test_projection(self):
        """Test whether CVXPY works as expected wrt NumPy shapes."""
        x = cvxpy.Variable(1000)
        x_0 = np.ones(1000)
        soc_constr = [cvxpy.norm(x[:-1], 2) <= x[-1]]

        obj = cvxpy.Minimize(cvxpy.norm(x_0 - x, 2))
        prob = cvxpy.Problem(obj, soc_constr)
        prob.solve(solver=cvxpy.ECOS)
        self.assertTrue(x.value.shape == (1000, 1))
        x_star = np.array(x.value).flatten()
        self.assertTrue(x_star.shape == (1000,), x_star.shape)
        self.assertEqual(np.linalg.norm(x_0 - x_star, 2), obj.value)
        self.assertTrue(np.isclose(prob.value, obj.value, atol=1e-7)) 
Example 22
Project: projection-methods   Author: akshayka   File: polyak.py    GNU General Public License v3.0 5 votes vote down vote up
def solve(self, problem):
        left_set = problem.sets[0]
        right_set = problem.sets[1]

        iterate = (self._initial_iterate if
            self._initial_iterate is not None else np.ones(problem.dimension))
        iterates = [iterate]
        residuals = []

        status = Optimizer.Status.INACCURATE
        for i in xrange(self.max_iters):
            if self.verbose:
                print 'iteration %d' % i
            x_k = iterates[-1]
            x_k_1 = left_set.project(x_k)
            tmp = right_set.project(x_k)

            residuals.append(self._compute_residual(x_k, x_k_1, tmp))
            if self.verbose:
                print '\tresidual: %e' % sum(residuals[-1])
            if self._is_optimal(residuals[-1]):
                status = Optimizer.Status.OPTIMAL
                if not self.do_all_iters:
                    break

            x_k_2 = right_set.project(x_k_1)
            x_k_3 = left_set.project(x_k_2)

            lambda_k = (np.linalg.norm(x_k_1 - x_k_2, ord=2)**2) / (
                np.dot(x_k_1 - x_k_3, x_k_1 - x_k_2))
            x_k_4 = x_k_1 + lambda_k * (x_k_3 - x_k_1)
            
            if self.momentum is not None:
                iterate = heavy_ball_update(
                    iterates=iterates, velocity=x_k_4-x_k,
                    alpha=self.momentum['alpha'],
                    beta=self.momentum['beta'])
            iterates.append(x_k_4)

        return iterates, residuals, status 
Example 23
Project: projection-methods   Author: akshayka   File: avgp.py    GNU General Public License v3.0 5 votes vote down vote up
def solve(self, problem):
        left_set = problem.sets[0]
        right_set = problem.sets[1]

        iterate = (self._initial_iterate if
            self._initial_iterate is not None else np.ones(problem.dimension))
        iterates = [iterate]
        residuals = []

        status = Optimizer.Status.INACCURATE
        for i in xrange(self.max_iters):
            if self.verbose:
                print 'iteration %d' % i
            x_k = iterates[-1]
            y_k = left_set.project(x_k)
            z_k = right_set.project(x_k)

            residuals.append(self._compute_residual(x_k, y_k, z_k))
            if self.verbose:
                print '\tresidual: %e' % sum(residuals[-1])
            if self._is_optimal(residuals[-1]):
                status = Optimizer.Status.OPTIMAL
                if not self.do_all_iters:
                    break
            x_k_plus = 0.5 * (y_k + z_k)

            if self.momentum is not None:
                iterate = heavy_ball_update(
                    iterates=iterates, velocity=x_k_plus-x_k,
                    alpha=self.momentum['alpha'],
                    beta=self.momentum['beta'])
            iterates.append(x_k_plus)

        return iterates, residuals, status 
Example 24
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def specification(self, specification):
        if isinstance(specification, (int)):
            if np.abs(specification) > self._triangsamples.vertlist.shape[0]:
                raise ValueError("""The Number of selected basic functions is
                too large.""")
            else:
                if specification == 0:
                    self._specification = \
                        np.ones(self._triangsamples.vertlist.shape[0])
                else:
                    self._specification = \
                        np.zeros(self._triangsamples.vertlist.shape[0])
                    if specification > 0:
                        self._specification[:specification] = 1
                    else:
                        self._specification[specification:] = 1
        elif isinstance(specification, (list, tuple, np.ndarray)):
            specification = np.asarray(specification)
            if specification.shape[0] != self._triangsamples.vertlist.shape[1]:
                raise IndexError("""The length of the specification vector
                does not match the number of spatial sample points. """)
            else:
                self._specification = specification
        else:
            raise TypeError("""The parameter specification has to be
            int or a vecor""") 
Example 25
Project: DnD4py   Author: bacook17   File: roll4me.py    MIT License 5 votes vote down vote up
def parse_roll(str_in):
    str_in = str_in.lower()
    if 'd' in str_in:
        n, d = [int(s) for s in str_in.split('d')]
        results = np.random.randint(low=1, high=d+1, size=(n_trials, n))
        mean = n * 0.5 * (d+1)
        return results[0].sum(), str(results[0]), mean, results.sum(axis=1)
    else:
        return int(str_in), '{:s}'.format(str_in), int(str_in), np.ones(n_trials)*int(str_in) 
Example 26
Project: building-boundary   Author: Geodan   File: segmentation.py    MIT License 5 votes vote down vote up
def boundary_segmentation(points, distance):
    """
    Extract linear segments using RANSAC.

    Parameters
    ----------
    points : (Mx2) array
        The coordinates of the points.
    distance : float
        The maximum distance between a point and a line for a point to be
        considered belonging to that line.

    Returns
    -------
    segments : list of array
        The linear segments.
    """
    points_shifted = points.copy()
    shift = np.min(points_shifted, axis=0)
    points_shifted -= shift

    mask = np.ones(len(points_shifted), dtype=np.bool)
    indices = np.arange(len(points_shifted))

    segments = []
    extract_segments(segments, points_shifted, indices, mask, distance)

    segments = [points_shifted[i]+shift for i in segments]

    return segments 
Example 27
Project: fenics-topopt   Author: zfergus   File: solver.py    MIT License 5 votes vote down vote up
def __init__(self, nelx, nely, volfrac, penal, rmin, ft, gui, bc):
        self.n = nelx * nely
        self.opt = nlopt.opt(nlopt.LD_MMA, self.n)
        self.passive = bc.get_passive_elements()
        self.xPhys = np.ones(self.n)
        if self.passive is not None:
            self.xPhys[self.passive] = 0

        # set bounds
        ub = np.ones(self.n, dtype=float)
        self.opt.set_upper_bounds(ub)
        lb = np.zeros(self.n, dtype=float)
        self.opt.set_lower_bounds(lb)

        # set stopping criteria
        self.opt.set_maxeval(2000)
        self.opt.set_ftol_rel(0.001)

        # set objective and constraint functions
        self.opt.set_min_objective(self.compliance_function)
        self.opt.add_inequality_constraint(self.volume_function, 0)

        # setup filter
        self.ft = ft
        self.filtering = Filter(nelx, nely, rmin)

        # setup problem def
        self.init_problem(nelx, nely, penal, bc)
        self.volfrac = volfrac

        # set GUI callback
        self.init_gui(gui) 
Example 28
Project: fenics-topopt   Author: zfergus   File: problem.py    MIT License 5 votes vote down vote up
def build_indices(self, nelx, nely):
        """ FE: Build the index vectors for the for coo matrix format. """
        self.KE = self.lk()
        self.edofMat = np.zeros((nelx * nely, 8), dtype=int)
        for elx in range(nelx):
            for ely in range(nely):
                el = ely + elx * nely
                n1 = (nely + 1) * elx + ely
                n2 = (nely + 1) * (elx + 1) + ely
                self.edofMat[el, :] = np.array([2 * n1 + 2, 2 * n1 + 3,
                    2 * n2 + 2, 2 * n2 + 3, 2 * n2, 2 * n2 + 1, 2 * n1,
                    2 * n1 + 1])
        # Construct the index pointers for the coo format
        self.iK = np.kron(self.edofMat, np.ones((8, 1))).flatten()
        self.jK = np.kron(self.edofMat, np.ones((1, 8))).flatten() 
Example 29
Project: fenics-topopt   Author: zfergus   File: tower.py    MIT License 5 votes vote down vote up
def main(nelx, nely, volfrac, penal, rmin, ft):
    print("Minimum compliance problem with MMA")
    print("ndes: {:d} x {:d}".format(nelx, nely))
    print("volfrac: {:g}, rmin: {:g}, penal: {:g}".format(volfrac, rmin, penal))
    print("Filter method: " + ["Sensitivity based", "Density based"][ft])

    # Allocate design variables (as array), initialize and allocate sens.
    x = volfrac * np.ones(nely * nelx, dtype=float)

    title = "ndes: {:d} x {:d}\nvolfrac: {:g}, rmin: {:g}, penal: {:g}".format(
        nelx, nely, volfrac, rmin, penal)
    gui    = GUI(nelx, nely, title)
    solver = LessTolerantSolver(nelx, nely, volfrac, penal, rmin, ft, gui,
        BoundaryConditions(nelx, nely))
    x_opt  = solver.optimize(x)
    x_opt  = solver.filter_variables(x_opt)
    gui.update(x_opt)

    from PIL import Image
    x_opt.clip(0.0, 1.0)
    x_opt = ((1 - x_opt.reshape(nelx, nely)) * 255).round().astype("uint8").T
    x_opt_sym = x_opt[:, ::-1]
    result = Image.fromarray(np.hstack([x_opt_sym, x_opt]))
    result.save("tmp.png")

    # Make sure the plot stays and that the shell remains
    input("Press any key...") 
Example 30
Project: fenics-topopt   Author: zfergus   File: problem.py    MIT License 5 votes vote down vote up
def build_indices(self, nelx, nely):
        """ FE: Build the index vectors for the for coo matrix format. """
        self.KE = self.lk()
        self.edofMat = np.zeros((nelx * nely, 8), dtype=int)
        for elx in range(nelx):
            for ely in range(nely):
                el = ely + elx * nely
                n1 = (nely + 1) * elx + ely
                n2 = (nely + 1) * (elx + 1) + ely
                self.edofMat[el, :] = np.array([2 * n1 + 2, 2 * n1 + 3,
                    2 * n2 + 2, 2 * n2 + 3, 2 * n2, 2 * n2 + 1, 2 * n1,
                    2 * n1 + 1])
        # Construct the index pointers for the coo format
        self.iK = np.kron(self.edofMat, np.ones((8, 1))).flatten()
        self.jK = np.kron(self.edofMat, np.ones((1, 8))).flatten() 
Example 31
Project: fenics-topopt   Author: zfergus   File: bridge.py    MIT License 5 votes vote down vote up
def main(nelx, nely, volfrac, penal, rmin, ft):
    print("Minimum compliance problem with MMA")
    print("ndes: {:d} x {:d}".format(nelx, nely))
    print("volfrac: {:g}, rmin: {:g}, penal: {:g}".format(volfrac, rmin, penal))
    print("Filter method: " + ["Sensitivity based", "Density based"][ft])

    # Allocate design variables (as array), initialize and allocate sens.
    x = volfrac * np.ones(nely * nelx, dtype=float)

    title = "ndes: {:d} x {:d}\nvolfrac: {:g}, rmin: {:g}, penal: {:g}".format(
        nelx, nely, volfrac, rmin, penal)
    gui    = GUI(nelx, nely, title)
    solver = LessTolerantSolver(nelx, nely, volfrac, penal, rmin, ft, gui,
        BoundaryConditions(nelx, nely))
    x_opt  = solver.optimize(x)
    x_opt  = solver.filter_variables(x_opt)
    gui.update(x_opt)

    from PIL import Image
    x_opt.clip(0.0, 1.0)
    x_opt = ((1 - x_opt.reshape(nelx, nely)) * 255).round().astype("uint8").T
    x_opt_sym = x_opt[:, ::-1]
    result = Image.fromarray(np.hstack([x_opt_sym, x_opt]))
    result.save("tmp.png")

    # Make sure the plot stays and that the shell remains
    input("Press any key...") 
Example 32
Project: fenics-topopt   Author: zfergus   File: L_bracket.py    MIT License 5 votes vote down vote up
def main(nelx, nely, volfrac, penal, rmin, ft):
    print("Minimum compliance problem with MMA")
    print("ndes: {:d} x {:d}".format(nelx, nely))
    print("volfrac: {:g}, rmin: {:g}, penal: {:g}".format(volfrac, rmin, penal))
    print("Filter method: " + ["Sensitivity based", "Density based"][ft])

    # Allocate design variables (as array), initialize and allocate sens.
    x = volfrac * np.ones(nely * nelx, dtype=float)

    title = "ndes: {:d} x {:d}\nvolfrac: {:g}, rmin: {:g}, penal: {:g}".format(
        nelx, nely, volfrac, rmin, penal)
    gui    = GUI(nelx, nely, title)
    solver = LessTolerantSolver(nelx, nely, volfrac, penal, rmin, ft, gui,
        LBracketBoundaryConditions(nelx, nely, nelx // 3, 2 * nely // 3))
    x_opt  = solver.optimize(x)
    x_opt  = solver.filter_variables(x_opt)
    gui.update(x_opt)

    from PIL import Image
    x_opt.clip(0.0, 1.0)
    x_opt = ((1 - x_opt.reshape(nelx, nely)) * 255).round().astype("uint8").T
    result = Image.fromarray(x_opt)
    result.save("tmp.png")

    # Make sure the plot stays and that the shell remains
    input("Press any key...") 
Example 33
Project: aospy   Author: spencerahill   File: test_data_loader.py    Apache License 2.0 5 votes vote down vote up
def test_maybe_cast_to_float64(input_dtype, expected_dtype):
    da = xr.DataArray(np.ones(3, dtype=input_dtype))
    result = _maybe_cast_to_float64(da).dtype
    assert result == expected_dtype 
Example 34
Project: Att-ChemdNER   Author: lingluodlut   File: theano_backend.py    Apache License 2.0 5 votes vote down vote up
def ones(shape, dtype=None, name=None):
    '''Instantiates an all-ones variable.
    '''
    if dtype is None:
        dtype = floatx()
    return variable(np.ones(shape), dtype, name) 
Example 35
Project: xrft   Author: xgcm   File: xrft.py    MIT License 5 votes vote down vote up
def detrend_wrap(detrend_func):
    """
    Wrapper function for `xrft.detrendn`.
    """
    def func(a, axes=None):
        if len(axes) > 3:
            raise ValueError("Detrending is only supported up to "
                            "3 dimensions.")
        if axes is None:
            axes = tuple(range(a.ndim))
        else:
            if len(set(axes)) < len(axes):
                raise ValueError("Duplicate axes are not allowed.")

        for each_axis in axes:
            if len(a.chunks[each_axis]) != 1:
                raise ValueError('The axis along the detrending is upon '
                                'cannot be chunked.')

        if len(axes) == 1:
            return dsar.map_blocks(sps.detrend, a, axis=axes[0],
                                   chunks=a.chunks, dtype=a.dtype
                                  )
        else:
            for each_axis in range(a.ndim):
                if each_axis not in axes:
                    if len(a.chunks[each_axis]) != a.shape[each_axis]:
                        raise ValueError("The axes other than ones to detrend "
                                        "over should have a chunk length of 1.")
            return dsar.map_blocks(detrend_func, a, axes,
                                   chunks=a.chunks, dtype=a.dtype
                                  )

    return func 
Example 36
Project: xrft   Author: xgcm   File: test_xrft.py    MIT License 5 votes vote down vote up
def numpy_detrend(da):
    """
    Detrend a 2D field by subtracting out the least-square plane fit.

    Parameters
    ----------
    da : `numpy.array`
        The data to be detrended

    Returns
    -------
    da : `numpy.array`
        The detrended input data
    """
    N = da.shape

    G = np.ones((N[0]*N[1],3))
    for i in range(N[0]):
        G[N[1]*i:N[1]*i+N[1], 1] = i+1
        G[N[1]*i:N[1]*i+N[1], 2] = np.arange(1, N[1]+1)

    d_obs = np.reshape(da.copy(), (N[0]*N[1],1))
    m_est = np.dot(np.dot(spl.inv(np.dot(G.T, G)), G.T), d_obs)
    d_est = np.dot(G, m_est)

    lin_trend = np.reshape(d_est, N)

    return da - lin_trend 
Example 37
Project: prediction-constrained-topic-models   Author: dtak   File: calc_roc_auc_via_bootstrap.py    MIT License 5 votes vote down vote up
def verify_min_examples_per_label(y_NC, min_examples_per_label):
    '''
    
    Examples
    --------
    >>> y_all_0 = np.zeros(10)
    >>> y_all_1 = np.ones(30)
    >>> verify_min_examples_per_label(y_all_0, 3)
    False
    >>> verify_min_examples_per_label(y_all_1, 2)
    False
    >>> verify_min_examples_per_label(np.hstack([y_all_0, y_all_1]), 10)
    True
    >>> verify_min_examples_per_label(np.eye(3), 2)
    False
    '''
    if y_NC.ndim < 2:
        y_NC = np.atleast_2d(y_NC).T
    n_C = np.sum(np.isfinite(y_NC), axis=0)
    n_pos_C = n_C * np.nanmean(y_NC, axis=0)
    min_neg = np.max(n_C - n_pos_C)
    min_pos = np.min(n_pos_C)
    if min_pos < min_examples_per_label:
        return False
    elif min_neg < min_examples_per_label:
        return False
    return True 
Example 38
Project: FRIDA   Author: LCAV   File: point_cloud.py    MIT License 5 votes vote down vote up
def EDM(self):
        ''' Computes the EDM corresponding to the marker set '''
        if self.X is None:
            raise ValueError('No marker set')

        G = np.dot(self.X.T, self.X)
        return np.outer(np.ones(self.m), np.diag(G)) \
            - 2*G + np.outer(np.diag(G), np.ones(self.m)) 
Example 39
Project: FRIDA   Author: LCAV   File: doa.py    MIT License 5 votes vote down vote up
def build_lookup(self, r=None, theta=None, phi=None):
        """
        Construct lookup table for given candidate locations (in spherical 
        coordinates). Each column is a location in cartesian coordinates.

        :param r: Candidate distances from the origin.
        :type r: numpy array
        :param theta: Candidate azimuth angles with respect to x-axis.
        :type theta: numpy array
        :param phi: Candidate elevation angles with respect to z-axis.
        :type phi: numpy array
        """
        if theta is not None:
            self.theta = theta
        if phi is not None:
            self.phi = phi
        if r is not None:
            self.r = r
            if self.r == np.ones(1):
                self.mode = 'far'
            else:
                self.mode = 'near'
        self.loc = np.zeros([self.D, len(self.r) * len(self.theta) * 
            len(self.phi)])
        self.num_loc = self.loc.shape[1]
        # convert to cartesian
        for i in range(len(self.r)):
            r_s = self.r[i]
            for j in range(len(self.theta)):
                theta_s = self.theta[j]
                for k in range(len(self.phi)):
                    # spher = np.array([r_s,theta_s,self.phi[k]])
                    self.loc[:, i * len(self.theta) + j * len(self.phi) + k] = \
                        spher2cart(r_s, theta_s, self.phi[k])[0:self.D] 
Example 40
Project: nonogram-solver   Author: mprat   File: solver.py    MIT License 5 votes vote down vote up
def possibilities_generator(
        prior, min_pos, max_start_pos, constraint_len, total_filled):
    """
    Given a row prior, a min_pos, max_start_pos, and constraint length,
    yield each potential row

    prior is an array of:
        -1 (unknown),
        0 (definitely empty),
        1 (definitely filled)
    """
    prior_filled = np.zeros(len(prior)).astype(bool)
    prior_filled[prior == 1] = True
    prior_empty = np.zeros(len(prior)).astype(bool)
    prior_empty[prior == 0] = True
    for start_pos in range(min_pos, max_start_pos + 1):
        possible = -1 * np.ones(len(prior))
        possible[start_pos:start_pos + constraint_len] = 1
        if start_pos + constraint_len < len(possible):
            possible[start_pos + constraint_len] = 0
        if start_pos > 0:
            possible[start_pos - 1] = 0

        # add in the prior
        possible[np.logical_and(possible == -1, prior == 0)] = 0
        possible[np.logical_and(possible == -1, prior == 1)] = 1

        # if contradiction with prior, continue
        # 1. possible changes prior = 1 to something else
        # 2. possible changes prior = 0 to something else
        # 3. everything is assigned in possible but there are not
        #    enough filled in
        # 4. possible changes nothing about the prior
        if np.any(possible[np.where(prior == 1)[0]] != 1) or \
                np.any(possible[np.where(prior == 0)[0]] != 0) or \
                np.sum(possible == 1) > total_filled or \
                (np.all(possible >= 0) and np.sum(possible == 1) <
                    total_filled) or \
                np.all(prior == possible):
            continue
        yield possible 
Example 41
Project: nonogram-solver   Author: mprat   File: solver.py    MIT License 5 votes vote down vote up
def __init__(self, nonogram):
        self.nonogram = nonogram
        self.puzzle_state = -1 * np.ones((nonogram.n_rows, nonogram.n_cols))
        self.filled_positions_hint_eligible = nonogram.solution_list
        self.prefilled_positions = [] 
Example 42
Project: nonogram-solver   Author: mprat   File: nonogram.py    MIT License 5 votes vote down vote up
def _init_puzzle(self):
        self.puzzle_state = -1 * np.ones((self.n_rows, self.n_cols)) 
Example 43
Project: ml_news_popularity   Author: khaledJabr   File: svr.py    MIT License 5 votes vote down vote up
def calc_K_Mat(self, input_mat, degree, c ) : 
		k_mat = np.ones([input_mat.shape[0]+1 ,input_mat.shape[0]+1 ])
		k_mat[0][0] = 0
		i_matrix = np.identity(input_mat.shape[0])
		for i in range(1, k_mat.shape[0]) : 
		    for j in range (1, k_mat.shape[0]) : 
		    	if i % 500 == 0 : 
		    		print("Fitting : K Matrix ( {} , {} )".format(i , j))
		    	k = (np.sum((input_mat[i-1:i , : ]).T * input_mat[j-1:j , :]) ** degree)  + (1/c) * i_matrix[i-1][j-1]
		    	k_mat[i][j] = k
		return k_mat 
Example 44
Project: ml_news_popularity   Author: khaledJabr   File: svr.py    MIT License 5 votes vote down vote up
def calc_K_matrix_input(self, input_data ): 
		input_k_matrix = np.ones([input_data.shape[0] +1 , self.training_data.shape[0] + 1] )
		input_k_matrix[0][0] = 0 
		for i in range(1, input_k_matrix.shape[0]):
			for j in range(1, input_k_matrix.shape[1]):
				if i % 500 == 0 : 
					print("Predicting : K Matrix ( {} , {} )".format(i , j))
				input_k_matrix[i][j] = (np.sum((input_data[i-1:i , : ]).T * self.training_data[j-1:j , :] ) ** self.degree)

		return input_k_matrix

		return [] 
Example 45
Project: mmdetection   Author: open-mmlab   File: eval_hooks.py    Apache License 2.0 5 votes vote down vote up
def evaluate(self, runner, results):
        gt_bboxes = []
        gt_labels = []
        gt_ignore = []
        for i in range(len(self.dataset)):
            ann = self.dataset.get_ann_info(i)
            bboxes = ann['bboxes']
            labels = ann['labels']
            if 'bboxes_ignore' in ann:
                ignore = np.concatenate([
                    np.zeros(bboxes.shape[0], dtype=np.bool),
                    np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
                ])
                gt_ignore.append(ignore)
                bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
                labels = np.concatenate([labels, ann['labels_ignore']])
            gt_bboxes.append(bboxes)
            gt_labels.append(labels)
        if not gt_ignore:
            gt_ignore = None
        # If the dataset is VOC2007, then use 11 points mAP evaluation.
        if hasattr(self.dataset, 'year') and self.dataset.year == 2007:
            ds_name = 'voc07'
        else:
            ds_name = self.dataset.CLASSES
        mean_ap, eval_results = eval_map(
            results,
            gt_bboxes,
            gt_labels,
            gt_ignore=gt_ignore,
            scale_ranges=None,
            iou_thr=0.5,
            dataset=ds_name,
            print_summary=True)
        runner.log_buffer.output['mAP'] = mean_ap
        runner.log_buffer.ready = True 
Example 46
Project: mmdetection   Author: open-mmlab   File: voc_eval.py    Apache License 2.0 5 votes vote down vote up
def voc_eval(result_file, dataset, iou_thr=0.5):
    det_results = mmcv.load(result_file)
    gt_bboxes = []
    gt_labels = []
    gt_ignore = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        bboxes = ann['bboxes']
        labels = ann['labels']
        if 'bboxes_ignore' in ann:
            ignore = np.concatenate([
                np.zeros(bboxes.shape[0], dtype=np.bool),
                np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
            ])
            gt_ignore.append(ignore)
            bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
            labels = np.concatenate([labels, ann['labels_ignore']])
        gt_bboxes.append(bboxes)
        gt_labels.append(labels)
    if not gt_ignore:
        gt_ignore = None
    if hasattr(dataset, 'year') and dataset.year == 2007:
        dataset_name = 'voc07'
    else:
        dataset_name = dataset.CLASSES
    eval_map(
        det_results,
        gt_bboxes,
        gt_labels,
        gt_ignore=gt_ignore,
        scale_ranges=None,
        iou_thr=iou_thr,
        dataset=dataset_name,
        print_summary=True) 
Example 47
Project: mmdetection   Author: open-mmlab   File: coco_error_analysis.py    Apache License 2.0 5 votes vote down vote up
def makeplot(rs, ps, outDir, class_name, iou_type):
    cs = np.vstack([
        np.ones((2, 3)),
        np.array([.31, .51, .74]),
        np.array([.75, .31, .30]),
        np.array([.36, .90, .38]),
        np.array([.50, .39, .64]),
        np.array([1, .6, 0])
    ])
    areaNames = ['allarea', 'small', 'medium', 'large']
    types = ['C75', 'C50', 'Loc', 'Sim', 'Oth', 'BG', 'FN']
    for i in range(len(areaNames)):
        area_ps = ps[..., i, 0]
        figure_tile = iou_type + '-' + class_name + '-' + areaNames[i]
        aps = [ps_.mean() for ps_ in area_ps]
        ps_curve = [
            ps_.mean(axis=1) if ps_.ndim > 1 else ps_ for ps_ in area_ps
        ]
        ps_curve.insert(0, np.zeros(ps_curve[0].shape))
        fig = plt.figure()
        ax = plt.subplot(111)
        for k in range(len(types)):
            ax.plot(rs, ps_curve[k + 1], color=[0, 0, 0], linewidth=0.5)
            ax.fill_between(
                rs,
                ps_curve[k],
                ps_curve[k + 1],
                color=cs[k],
                label=str('[{:.3f}'.format(aps[k]) + ']' + types[k]))
        plt.xlabel('recall')
        plt.ylabel('precision')
        plt.xlim(0, 1.)
        plt.ylim(0, 1.)
        plt.title(figure_tile)
        plt.legend()
        # plt.show()
        fig.savefig(outDir + '/{}.png'.format(figure_tile))
        plt.close(fig) 
Example 48
Project: neural-fingerprinting   Author: StephanZheng   File: test_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_get_logits_over_interval(self):
        import tensorflow as tf
        model = cnn_model()
        wrap = KerasModelWrapper(model)
        fgsm_params = {'eps': .5}
        img = np.ones(shape=(28, 28, 1))
        num_points = 21
        with tf.Session() as sess:
            tf.global_variables_initializer().run()
            logits = utils.get_logits_over_interval(sess, wrap,
                                                    img, fgsm_params,
                                                    min_epsilon=-10,
                                                    max_epsilon=10,
                                                    num_points=num_points)
            self.assertEqual(logits.shape[0], num_points) 
Example 49
Project: neural-fingerprinting   Author: StephanZheng   File: picklable_model.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def set_input_shape(self, shape):
        self.input_shape = shape
        self.output_shape = shape
        channels = shape[-1]
        self.channels = channels
        self.actual_num_groups = min(self.channels, self.num_groups)
        extra_dims = (self.channels // self.actual_num_groups,
                      self.actual_num_groups)
        self.expanded_shape = tuple(shape[1:3]) + tuple(extra_dims)
        init_value = np.ones((channels,), dtype='float32') * self.init_gamma
        self.gamma = PV(init_value, name=self.name + "_gamma")
        self.beta = PV(np.zeros((self.channels,), dtype='float32'),
                       name=self.name + "_beta") 
Example 50
Project: neural-fingerprinting   Author: StephanZheng   File: test_serial.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_save_and_load_var():
    """
    Tests that we can save and load a PicklableVariable with joblib
    """
    sess = tf.Session()
    with sess.as_default():
        x = np.ones(1)
        xv = PicklableVariable(x)
        xv.var.initializer.run()
        save("/tmp/var.joblib", xv)
        sess.run(tf.assign(xv.var, np.ones(1) * 2))
        new_xv = load("/tmp/var.joblib")
        assert np.allclose(sess.run(xv.var), np.ones(1) * 2)
        assert np.allclose(sess.run(new_xv.var), np.ones(1)) 
Example 51
Project: voice-recognition   Author: golabies   File: filters.py    MIT License 5 votes vote down vote up
def smooth(signal, wsz):
        out0 = np.convolve(signal, np.ones(wsz, dtype=float), 'valid') / wsz
        r = np.arange(1, wsz - 1, 2)
        start = np.cumsum(signal[:wsz - 1])[::2] / r
        stop = (np.cumsum(signal[:-wsz:-1])[::2] / r)[::-1]
        return np.concatenate((start, out0, stop)) 
Example 52
Project: ML_from_scratch   Author: jarfa   File: test_util.py    Apache License 2.0 5 votes vote down vote up
def test_roc_auc_binary_date(self):
        with self.assertRaises(ValueError):
            roc_auc(
                np.array([0, 1, 3, 1]),
                np.ones(4) * 0.1
        ) 
Example 53
Project: ML_from_scratch   Author: jarfa   File: test_util.py    Apache License 2.0 5 votes vote down vote up
def test_roc_auc_mult_observed(self):
        with self.assertRaises(ValueError):
            roc_auc(
                np.zeros(4),
                np.ones(4) * 0.1
        ) 
Example 54
Project: programsynthesishunting   Author: flexgp   File: regression_random_polynomial.py    GNU General Public License v3.0 5 votes vote down vote up
def eval(self, x):
        """Evaluate the polynomial at a set of points x."""
        assert x.shape[0] == self.n_vars
        result = np.zeros(x.shape[1]) # same length as a column of x
        for coef, pows in zip(self.coefs, self.terms(self.degree, self.n_vars)):
            tmp = np.ones(x.shape[1])
            for (xi, pow) in zip(x, pows):
                tmp *= (xi ** pow)
            tmp *= coef
            result += tmp
        return result 
Example 55
Project: programsynthesishunting   Author: flexgp   File: baselines.py    GNU General Public License v3.0 5 votes vote down vote up
def fit_maj_class(train_X, train_y, test_X):
    """
    Use the majority class, for a binary problem...
    
    :param train_X: An array of input (X) training data.
    :param train_y: An array of expected output (Y) training data.
    :param test_X: An array of input (X) testint data.
    :return:
    """
    
    # Set training Y data to int type.
    train_y = train_y.astype(int)
    
    # Get all classes from training Y data, often just {0, 1} or {-1, 1}.
    classes = set(train_y)
    
    # Get majority class.
    maj = Counter(train_y).most_common(1)[0][0]
    
    # Generate model.
    model = "Majority class %d" % maj
    
    # Generate training and testing output values.
    yhat_train = maj * np.ones(len(train_y))
    yhat_test = maj * np.ones(len(test_y))
    
    return model, yhat_train, yhat_test 
Example 56
Project: programsynthesishunting   Author: flexgp   File: baselines.py    GNU General Public License v3.0 5 votes vote down vote up
def fit_const(train_X, train_y, test_X):
    """
    Use the mean of the y training values as a predictor.
    
    :param train_X:
    :param train_y:
    :param test_X:
    :return:
    """
    mn = np.mean(train_y)
    yhat_train = np.ones(len(train_y)) * mn
    yhat_test = np.ones(len(test_y)) * mn
    model = "Const %.2f" % mn
    
    return model, yhat_train, yhat_test 
Example 57
Project: cplot   Author: sunchaoatmo   File: context.py    GNU General Public License v3.0 5 votes vote down vote up
def buildregmap(self):
    import numpy as np
    nx,ny=self.regmap.shape
    nc=np.max(self.regmap)-1 #-1# actully it is plus one and then minus two regmap is starting from 1 and the last one is for the whole land also we need to subtract the last one for taiwan
    regmask_new=np.ones((nc,nx,ny))
    for i in range(nc):
      regmask_new[i,self.regmap==i+1]=0
    #regmask_new[-1,:,:]=self.eastmask
    self.regmask_new=regmask_new
    self.nregs=nc 
Example 58
Project: skylab   Author: coenders   File: ps_injector.py    GNU General Public License v3.0 5 votes vote down vote up
def rotate_struct(ev, ra, dec):
    r"""Wrapper around `utils.rotated` for structured arrays

    Parameters
    ----------
    ev : ndarray
        Structured array describing events that will be rotated
    ra : float
        Right ascension of direction events will be rotated on
    dec : float
        Declination of direction events will be rotated on

    Returns
    --------
    ndarray:
        Structured array describing rotated events; true information are
        deleted.

    """
    rot = np.copy(ev)

    rot["ra"], rot_dec = utils.rotate(
        ev["trueRa"], ev["trueDec"], ra * np.ones(len(ev)),
        dec * np.ones(len(ev)), ev["ra"], np.arcsin(ev["sinDec"]))

    if "dec" in ev.dtype.names:
        rot["dec"] = rot_dec

    rot["sinDec"] = np.sin(rot_dec)

    # Delete Monte Carlo information from sampled events.
    mc = ["trueRa", "trueDec", "trueE", "ow"]

    return numpy.lib.recfunctions.drop_fields(rot, mc) 
Example 59
Project: skylab   Author: coenders   File: ps_model.py    GNU General Public License v3.0 5 votes vote down vote up
def weight(self, ev, **params):
        r"""For classicLLH, no weighting of events

        """
        return np.ones(len(ev)), None 
Example 60
Project: deep-learning-note   Author: wdxtub   File: multi_layer_net_extend.py    MIT License 5 votes vote down vote up
def __init__(self, input_size, hidden_size_list, output_size,
                 activation='relu', weight_init_std='relu', weight_decay_lambda=0,
                 use_dropout=False, dropout_ration=0.5, use_batchnorm=False):
        self.input_size = input_size
        self.output_size = output_size
        self.hidden_size_list = hidden_size_list
        self.hidden_layer_num = len(hidden_size_list)
        self.use_dropout = use_dropout
        self.weight_decay_lambda = weight_decay_lambda
        self.use_batchnorm = use_batchnorm
        self.params = {}

        # 初始化权重
        self.__init_weight(weight_init_std)

        # 生成层
        activation_layer = {'sigmoid': Sigmoid, 'relu': Relu}
        self.layers = OrderedDict()
        for idx in range(1, self.hidden_layer_num + 1):
            self.layers['Affine' + str(idx)] = Affine(self.params['W' + str(idx)],
                                                      self.params['b' + str(idx)])
            if self.use_batchnorm:
                self.params['gamma' + str(idx)] = np.ones(hidden_size_list[idx - 1])
                self.params['beta' + str(idx)] = np.zeros(hidden_size_list[idx - 1])
                self.layers['BatchNorm' + str(idx)] = BatchNormalization(self.params['gamma' + str(idx)],
                                                                         self.params['beta' + str(idx)])

            self.layers['Activation_function' + str(idx)] = activation_layer[activation]()

            if self.use_dropout:
                self.layers['Dropout' + str(idx)] = Dropout(dropout_ration)

        idx = self.hidden_layer_num + 1
        self.layers['Affine' + str(idx)] = Affine(self.params['W' + str(idx)], self.params['b' + str(idx)])

        self.last_layer = SoftmaxWithLoss() 
Example 61
Project: deep-learning-note   Author: wdxtub   File: 5_nueral_network.py    MIT License 5 votes vote down vote up
def forward_propagate(X, theta1, theta2):
    m = X.shape[0]
    
    a1 = np.insert(X, 0, values=np.ones(m), axis=1)
    z2 = a1 * theta1.T
    a2 = np.insert(sigmoid(z2), 0, values=np.ones(m), axis=1)
    z3 = a2 * theta2.T
    h = sigmoid(z3)
    
    return a1, z2, a2, z3, h 
Example 62
Project: core   Author: lifemapper   File: create_mask.py    GNU General Public License v3.0 5 votes vote down vote up
def create_blank_mask(bbox, cell_size, epsg, nodata=DEFAULT_NODATA, 
                             ascii_filename=None, tiff_filename=None):
    """
    @summary: Create a blank mask raster for the specified region
    @param bbox: (minx, miny, maxx, maxy) tuple of raster coordinates
    @param cell_size: The cell size, in map units, of each cell in the raster
    @param epsg: The epsg code of the map projection to use for this raster
    @param nodata: A value to use for NODATA
    @param ascii_filename: If provided, write the mask raster as ASCII to this 
                                      location
    @param tiff_filename: If provided, write the mask raster as GeoTiff to this
                                     location
    """
    minx, miny, maxx, maxy = bbox
    num_cols = int(float(maxx - minx) / cell_size)
    num_rows = int(float(maxy - miny) / cell_size)
    
    data = np.ones((num_rows, num_cols), dtype=np.int8)
    
    if ascii_filename is not None:
        write_ascii(ascii_filename, bbox, cell_size, data, epsg, nodata=nodata)
    
    if tiff_filename is not None:
        write_tiff(tiff_filename, bbox, cell_size, data, epsg, nodata=nodata)
    
# ............................................................................. 
Example 63
Project: b2ac   Author: hbldh   File: reference.py    MIT License 4 votes vote down vote up
def fit_improved_B2AC_numpy(points):
    """Ellipse fitting in Python with improved B2AC algorithm as described in
    this `paper <http://autotrace.sourceforge.net/WSCG98.pdf>`_.

    This version of the fitting simply applies NumPy:s methods for calculating
    the conic section, modelled after the Matlab code in the paper:

    .. code-block::

        function a = fit_ellipse(x, y)

        D1 = [x .ˆ 2, x .* y, y .ˆ 2]; % quadratic part of the design matrix
        D2 = [x, y, ones(size(x))]; % linear part of the design matrix
        S1 = D1’ * D1; % quadratic part of the scatter matrix
        S2 = D1’ * D2; % combined part of the scatter matrix
        S3 = D2’ * D2; % linear part of the scatter matrix
        T = - inv(S3) * S2’; % for getting a2 from a1
        M = S1 + S2 * T; % reduced scatter matrix
        M = [M(3, :) ./ 2; - M(2, :); M(1, :) ./ 2]; % premultiply by inv(C1)
        [evec, eval] = eig(M); % solve eigensystem
        cond = 4 * evec(1, :) .* evec(3, :) - evec(2, :) .ˆ 2; % evaluate a’Ca
        a1 = evec(:, find(cond > 0)); % eigenvector for min. pos. eigenvalue
        a = [a1; T * a1]; % ellipse coefficients

    :param points: The [Nx2] array of points to fit ellipse to.
    :type points: :py:class:`numpy.ndarray`
    :return: The conic section array defining the fitted ellipse.
    :rtype: :py:class:`numpy.ndarray`

    """
    x = points[:, 0]
    y = points[:, 1]

    D1 = np.vstack([x ** 2, x * y, y ** 2]).T
    D2 = np.vstack([x, y, np.ones((len(x), ), dtype=x.dtype)]).T
    S1 = D1.T.dot(D1)
    S2 = D1.T.dot(D2)
    S3 = D2.T.dot(D2)
    T = -np.linalg.inv(S3).dot(S2.T)
    M = S1 + S2.dot(T)
    M = np.array([M[2, :] / 2, -M[1, :], M[0, :] / 2])
    eval, evec = np.linalg.eig(M)
    cond = (4 * evec[:, 0] * evec[:, 2]) - (evec[:, 1] ** 2)
    I = np.where(cond > 0)[0]
    a1 = evec[:, I[np.argmin(cond[I])]]
    return np.concatenate([a1, T.dot(a1)]) 
Example 64
Project: projection-methods   Author: akshayka   File: test_soc.py    GNU General Public License v3.0 4 votes vote down vote up
def test_projection(self):
        """Test projection, query methods of the SOC oracle."""
        x = cvxpy.Variable(1000)
        x_0 = np.ones(1000)
        soc = SOC(x)
        constr = soc._constr

        # Test idempotency
        z = x_0[:-1]
        norm_z = np.linalg.norm(x_0[:-1], 2)
        x_0[-1] = norm_z + 10
        x_star = soc.project(x_0)
        self.assertTrue(soc.contains(x_star))
        self.assertTrue(np.array_equal(x_0, x_star), "projection not "
            "idemptotent")
        p = cvxpy.Problem(cvxpy.Minimize(cvxpy.pnorm(x - x_0, 2)), constr)
        p.solve()
        self.assertTrue(np.isclose(np.array(x.value).flatten(), x_star,
            atol=1e-3).all())
        utils.query_helper(self, x_0, x_star, soc, idempotent=True)
        

        # Test the case in which norm_z <= -t
        x_0[-1] = -1 * norm_z
        x_star = soc.project(x_0)
        self.assertTrue(np.array_equal(x_star, np.zeros(1000)),
            "projection not  zero")
        p = cvxpy.Problem(cvxpy.Minimize(cvxpy.pnorm(x - x_0, 2)), constr)
        p.solve()
        self.assertTrue(np.isclose(np.array(x.value).flatten(), x_star,
            atol=1e-3).all())
        utils.query_helper(self, x_0, x_star, soc, idempotent=False)

        # Test the case in which norm_z > abs(t)
        x_0[-1] = norm_z - 10
        x_star = soc.project(x_0)
        p = cvxpy.Problem(cvxpy.Minimize(cvxpy.pnorm(x - x_0, 2)), constr)
        p.solve()
        self.assertTrue(np.isclose(np.array(x.value).flatten(), x_star,
            atol=1e-3).all())
        utils.query_helper(self, x_0, x_star, soc, idempotent=False)

        # Test random projection
        x_0 = np.random.randn(1000)
        x_star = soc.project(x_0)
        p = cvxpy.Problem(cvxpy.Minimize(cvxpy.pnorm(x - x_0, 2)), constr)
        p.solve()
        self.assertTrue(np.isclose(np.array(x.value).flatten(), x_star,
            atol=1e-3).all()) 
Example 65
Project: projection-methods   Author: akshayka   File: scs_admm.py    GNU General Public License v3.0 4 votes vote down vote up
def solve(self, problem):
        if not isinstance(problem, SCSProblem):
            raise ValueError('SCSADMM can only solve SCSProblem instances, '
                'but received an instance of %s' % type(problem))
        product_set = problem.sets[0]
        affine_set = problem.sets[1]
            
        iterate = np.ones(problem.dimension)
        iterates = [iterate]
        residuals = []
        info = []

        status = Optimizer.Status.INACCURATE
        for i in xrange(self.max_iters):
            if self.verbose:
                print 'iteration %d' % i
            uv_k = iterates[-1]
            residuals.append(self._compute_residual(
                uv_k, product_set.project(uv_k), affine_set.project(uv_k)))
            if self.verbose:
                r = residuals[-1]
                print '\tresidual: %e' % sum(r)
                print '\t\tproduct: %e' % r[0]
                print '\t\taffine: %e' % r[1]
            if self._is_optimal(residuals[-1]):
                status = Optimizer.Status.OPTIMAL
                if not self.do_all_iters:
                    break

            # Parse uv into its components
            u_k = self._u(problem, uv_k)
            v_k = self._v(problem, uv_k)
            u_k_plus_v_k = u_k + v_k
            # Project onto the affine set and parse
            uv_k_tilde, h_a = affine_set.query(np.hstack(
                (u_k_plus_v_k, u_k_plus_v_k)))
            u_k_tilde = self._u(problem, uv_k_tilde)    
            v_k_tilde = self._v(problem, uv_k_tilde)
            # Note that we could have used the Moreau decomposition here to
            # save on computation, but computing both cone projections
            # explicitly is easier with my code
            u_k_prime = u_k_tilde - v_k
            v_k_prime = v_k_tilde - u_k
            uv_k_plus, h_p = product_set.query(np.hstack(
                (u_k_prime, v_k_prime)))
            iterates.append(uv_k_plus)
            info.extend(h_a + h_p)
        # TODO(akshayka): Consider polishing the result at this step, or even
        # running apop using the final iterate
        if self.polish:
            polisher = APOP(max_iters=100, atol=self.atol, do_all_iters=True,
                initial_iterate=iterates[-1],
                average=False, info=info, verbose=True)
            it, res, status = polisher.solve(problem)
            iterates.extend(it)
            residuals.extend(res)
        return iterates, residuals, status 
Example 66
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: minibatch.py    MIT License 4 votes vote down vote up
def get_minibatch(roidb, num_classes):
  """Given a roidb, construct a minibatch sampled from it."""
  num_images = len(roidb)
  # Sample random scales to use for each image in this batch
  random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
                  size=num_images)
  assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
    'num_images ({}) must divide BATCH_SIZE ({})'. \
    format(num_images, cfg.TRAIN.BATCH_SIZE)

  # Get the input image blob, formatted for caffe
  im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)

  blobs = {'data': im_blob}

  assert len(im_scales) == 1, "Single batch only"
  assert len(roidb) == 1, "Single batch only"
  
  # gt boxes: (x1, y1, x2, y2, cls)
  #if cfg.TRAIN.USE_ALL_GT:
    # Include all ground truth boxes
  #  gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
  #else:
    # For the COCO ground truth boxes, exclude the ones that are ''iscrowd'' 
  #  gt_inds = np.where(roidb[0]['gt_classes'] != 0 & np.all(roidb[0]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
  #gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
  #gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
  #gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
  boxes = roidb[0]['boxes'] * im_scales[0]
  batch_ind = 0 * np.ones((boxes.shape[0], 1))
  boxes = np.hstack((batch_ind, boxes))
  DEDUP_BOXES=1./16.
  if DEDUP_BOXES > 0:
    v = np.array([1,1e3, 1e6, 1e9, 1e12])
    hashes = np.round(boxes * DEDUP_BOXES).dot(v)
    _, index, inv_index = np.unique(hashes, return_index=True,
                                    return_inverse=True)
    boxes = boxes[index, :]
  
  blobs['boxes'] = boxes
  blobs['im_info'] = np.array(
    [im_blob.shape[1], im_blob.shape[2], im_scales[0]],
    dtype=np.float32)
  blobs['labels'] = roidb[0]['labels']

  return blobs 
Example 67
Project: xrft   Author: xgcm   File: xrft.py    MIT License 4 votes vote down vote up
def detrendn(da, axes=None):
    """
    Detrend by subtracting out the least-square plane or least-square cubic fit
    depending on the number of axis.

    Parameters
    ----------
    da : `dask.array`
        The data to be detrended

    Returns
    -------
    da : `numpy.array`
        The detrended input data
    """
    N = [da.shape[n] for n in axes]
    M = []
    for n in range(da.ndim):
        if n not in axes:
            M.append(da.shape[n])

    if len(N) == 2:
        G = np.ones((N[0]*N[1],3))
        for i in range(N[0]):
            G[N[1]*i:N[1]*i+N[1], 1] = i+1
            G[N[1]*i:N[1]*i+N[1], 2] = np.arange(1, N[1]+1)
        if type(da) == xr.DataArray:
            d_obs = np.reshape(da.copy().values, (N[0]*N[1],1))
        else:
            d_obs = np.reshape(da.copy(), (N[0]*N[1],1))
    elif len(N) == 3:
        if type(da) == xr.DataArray:
            if da.ndim > 3:
                raise NotImplementedError("Cubic detrend is not implemented "
                                         "for 4-dimensional `xarray.DataArray`."
                                         " We suggest converting it to "
                                         "`dask.array`.")
            else:
                d_obs = np.reshape(da.copy().values, (N[0]*N[1]*N[2],1))
        else:
            d_obs = np.reshape(da.copy(), (N[0]*N[1]*N[2],1))

        G = np.ones((N[0]*N[1]*N[2],4))
        G[:,3] = np.tile(np.arange(1,N[2]+1), N[0]*N[1])
        ys = np.zeros(N[1]*N[2])
        for i in range(N[1]):
            ys[N[2]*i:N[2]*i+N[2]] = i+1
        G[:,2] = np.tile(ys, N[0])
        for i in range(N[0]):
            G[len(ys)*i:len(ys)*i+len(ys),1] = i+1
    else:
        raise NotImplementedError("Detrending over more than 4 axes is "
                                 "not implemented.")

    m_est = np.dot(np.dot(spl.inv(np.dot(G.T, G)), G.T), d_obs)
    d_est = np.dot(G, m_est)

    lin_trend = np.reshape(d_est, da.shape)

    return da - lin_trend 
Example 68
Project: FRIDA   Author: LCAV   File: doa.py    MIT License 4 votes vote down vote up
def __init__(self, L, fs, nfft, c=343.0, num_src=1, mode='far', r=None, 
        theta=None, phi=None):

        self.L = L              # locations of mics
        self.fs = fs            # sampling frequency
        self.c = c              # speed of sound
        self.M = L.shape[1]     # number of microphones
        self.D = L.shape[0]     # number of dimensions (x,y,z)
        self.num_snap = None    # number of snapshots

        self.nfft = nfft
        self.max_bin = int(self.nfft/2) + 1
        self.freq_bins = None
        self.freq_hz = None
        self.num_freq = None

        self.num_src = self._check_num_src(num_src)
        self.sources = np.zeros([self.D, self.num_src])
        self.src_idx = np.zeros(self.num_src, dtype=np.int)
        self.phi_recon = None

        self.mode = mode
        if self.mode is 'far':
            self.r = np.ones(1)
        elif r is None:
            self.r = np.ones(1)
            self.mode = 'far'
        else:
            self.r = r
            if r == np.ones(1):
                mode = 'far'
        if theta is None:
            self.theta = np.linspace(-180., 180., 30) * np.pi / 180
        else:
            self.theta = theta
        if phi is None:
            self.phi = np.pi / 2 * np.ones(1)
        else:
            self.phi = phi

        # spatial spectrum / dirty image (FRI)
        self.P = None

        # build lookup table to candidate locations from r, theta, phi 
        from fri import FRI
        if not isinstance(self, FRI):
            self.loc = None
            self.num_loc = None
            self.build_lookup()
            self.mode_vec = None
            self.compute_mode()
        else:   # no grid search for FRI
            self.num_loc = len(self.theta) 
Example 69
Project: ieml   Author: IEMLdev   File: tree_graph.py    GNU General Public License v3.0 4 votes vote down vote up
def __init__(self, list_transitions):
        """
        Transitions list must be the (start, end, data) the data will be stored as the transition tag
        :param list_transitions:
        """
        # transitions : dict
        #
        self.transitions = defaultdict(list)
        for t in list_transitions:
            self.transitions[t[0]].append((t[1], t))

        self.nodes = sorted(set(self.transitions) | {e[0] for l in self.transitions.values() for e in l})

        # sort the transitions
        for s in self.transitions:
            self.transitions[s].sort(key=lambda t: self.nodes.index(t[0]))

        self.nodes_index = {n: i for i, n in enumerate(self.nodes)}
        _count = len(self.nodes)
        self.array = numpy.zeros((len(self.nodes), len(self.nodes)), dtype=bool)

        for t in self.transitions:
            for end in self.transitions[t]:
                self.array[self.nodes_index[t]][self.nodes_index[end[0]]] = True

        # checking
        # root checking, no_parent hold True for each index where the node has no parent
        parents_count = numpy.dot(self.array.transpose().astype(dtype=int), numpy.ones((_count,), dtype=int))
        no_parents = parents_count == 0
        roots_count = numpy.count_nonzero(no_parents)

        if roots_count == 0:
            raise InvalidTreeStructure('No root node found, the graph has at least a cycle.')
        elif roots_count > 1:
            raise InvalidTreeStructure('Several root nodes found.')

        self.root = self.nodes[no_parents.nonzero()[0][0]]

        if (parents_count > 1).any():
            raise InvalidTreeStructure('A node has several parents.')

        def __stage():
            current = [self.root]
            while current:
                yield current
                current = [child[0] for parent in current for child in self.transitions[parent]]

        self.stages = list(__stage()) 
Example 70
Project: DataHack2018   Author: InnovizTech   File: vis.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def color_pc(self, pc, coloring='reflectivity_and_label', colormap='pc_cmap'):
        """
        Generate coloring for point cloud based on multiple options
        :param pc: point cloud
        :param coloring: Coloring option. Supported: 'reflectivity', np.array of point cloud size x 4 with points colors
        :return:
        """
        if colormap is 'pc_cmap':
            colormap = pc_cmap

        points = pc[:, :3]
        color = np.zeros((len(pc), 4))
        color[:, -1] = 1.

        if isinstance(coloring, np.ndarray) and coloring.dtype == np.int and coloring.shape == (points.shape[0],):
            cmap = ListedColormap(
                ['w', 'magenta', 'orange', 'mediumspringgreen', 'deepskyblue', 'pink', 'y', 'g', 'r', 'purple', ])
            coloring = np.mod(coloring, len(cmap.colors))
            c = cm.ScalarMappable(cmap=cmap, norm=mcolors.Normalize(vmin=0, vmax=len(cmap.colors)-1))
            color = c.to_rgba(coloring)
        elif isinstance(coloring, np.ndarray):
            if coloring.shape == (points.shape[0], 4):
                color = coloring
            if coloring.shape == (points.shape[0], ):
                c = cm.ScalarMappable(cmap=colormap)
                color = c.to_rgba(coloring, norm=False)
        elif isinstance(coloring, collections.Callable):
            colors = coloring(points)
            c = cm.ScalarMappable(cmap=colormap)
            color = c.to_rgba(colors)
        elif coloring == 'reflectivity':
            reflectivity = pc[:, 3]
            reflectivity[reflectivity > 1] = 1
            c = cm.ScalarMappable(cmap=colormap)
            color = c.to_rgba(reflectivity, norm=False)
            color[reflectivity < 0] = np.array([1.0, 1.0, 1.0, 1.0])
        elif coloring == 'reflectivity_and_label':
            # pc_colors
            reflectivity = pc[:, 3]
            reflectivity[reflectivity > 1] = 1
            c = cm.ScalarMappable(cmap=colormap)
            color = c.to_rgba(reflectivity, norm=False)
            if pc.shape[-1] == 5:
                labels = pc[:, 4]
                labels_valid = labels[labels > 0]
                c = cm.ScalarMappable(cmap=label_cmap)
                color_labels = c.to_rgba(labels_valid, norm=True)
                color[labels > 0] = color_labels
        else:
            color = np.ones((points.shape[0], 4))
            color[:, -1] = 1.
        return color 
Example 71
Project: kuaa   Author: rafaelwerneck   File: OPF.py    GNU General Public License v3.0 4 votes vote down vote up
def __dijkstra(self, A, seeds, seeds_labels):
        """
        Parameters:
            A = the adjacency matrix of a graph
            seeds = the indices of the nodes to be used as seeds of the SPF-Max
            seeds_labels = labels of the seeds.
        Note:
            This function corresponds to the multi-source-max Dijkstra algorithm.
        Return:
            c1 = costs to the corresponding parent root
            par = parents nodes
            lab = labels of the nodes
        """
        h = 0
        n = A.shape[0]
        done = np.zeros(n).astype('bool')
        c1  = np.ones(n) * float("inf")
        par = np.ones(n) * -1
        lab = np.ones(n) * float("inf")
        heap = []
        for i, v0 in enumerate(seeds):
            c1[v0] = h
            par[v0] = v0
            lab[v0] = seeds_labels[i]
            heappush(heap, (h, v0))

        while len(heap) > 0:
            (_, p) = heappop(heap)
            done[p] = True
            for q in xrange(n):
                if p == q or done[q]:
                    continue
                c = max(c1[p], A[p, q])  # fcost
                if c < c1[q]:
                    if c1[q] < float("inf"):
                        try:
                            heap.remove((c1[q], q))
                            heapify(heap)
                        except:
                            pass
                    c1[q]  = c
                    par[q] = p
                    lab[q] = lab[p]
                    heappush(heap, (c1[q], q))

        return c1, par, lab

#   @staticmethod 
Example 72
Project: kuaa   Author: rafaelwerneck   File: OPF.py    GNU General Public License v3.0 4 votes vote down vote up
def __prim(self, A):
        """
        Parameters:
        A = Weighted adjacency matrix. Non connected nodes have weigh 'Inf'.
        Note:

        Return:
        B =  weighted adjacency matrix corresponding to the MST of A.
        """
        root = 0
        # prevents zero-weighted MSTs
        A = A.astype('double')

        n = A.shape[0]
        if n == 0: return []

        d = np.zeros(n)         # type(d) = numpy.ndarray
        pred = np.zeros(n, int)  # type(pred) = numpy.ndarray
        B = np.ones(A.shape) * float("inf")  # type(B) = numpy.ndarray

        b0 = root         # first vertex as root. type(b0) = int
        Q = range(0, n)   # all vertices. type(Q) = list
        Q.remove(root)    # except the root.
        d[Q] = A[Q, b0]   # distances from root.
        pred[Q] = b0      # predecessor as root.

        while Q != []:
            s = np.argmin(d[Q])  # function `argmin' in module `numpy.core.fromnumeric'.
            closest_i = Q[s]  # find vertex in Q with smallest weight from b0
            Q.remove(closest_i)
            b0 = closest_i
            b1 = pred[closest_i]
            B[b0, b1] = B[b1, b0] = A[b0, b1]  # insert this edge in MST weight matrix
            if Q == []: break
            QA = np.array(Q)    # remaining vertexes.
            iii = np.where(A[QA, closest_i] < d[QA])
            d[QA[iii]] = A[QA[iii], closest_i]     # update weights to pred
            pred[QA[iii]] = closest_i             # update pred

        return B

#   @staticmethod 
Example 73
Project: fbpconv_tf   Author: panakino   File: image_gen.py    GNU General Public License v3.0 4 votes vote down vote up
def create_image_and_label(nx,ny, cnt = 10, r_min = 5, r_max = 50, border = 92, sigma = 20, rectangles=False):
    
    
    image = np.ones((nx, ny, 1))
    label = np.zeros((nx, ny, 3), dtype=np.bool)
    mask = np.zeros((nx, ny), dtype=np.bool)
    for _ in range(cnt):
        a = np.random.randint(border, nx-border)
        b = np.random.randint(border, ny-border)
        r = np.random.randint(r_min, r_max)
        h = np.random.randint(1,255)

        y,x = np.ogrid[-a:nx-a, -b:ny-b]
        m = x*x + y*y <= r*r
        mask = np.logical_or(mask, m)

        image[m] = h

    label[mask, 1] = 1
    
    if rectangles:
        mask = np.zeros((nx, ny), dtype=np.bool)
        for _ in range(cnt//2):
            a = np.random.randint(nx)
            b = np.random.randint(ny)
            r =  np.random.randint(r_min, r_max)
            h = np.random.randint(1,255)
    
            m = np.zeros((nx, ny), dtype=np.bool)
            m[a:a+r, b:b+r] = True
            mask = np.logical_or(mask, m)
            image[m] = h
            
        label[mask, 2] = 1
        
        label[..., 0] = ~(np.logical_or(label[...,1], label[...,2]))
    
    image += np.random.normal(scale=sigma, size=image.shape)
    image -= np.amin(image)
    image /= np.amax(image)
    
    if rectangles:
        return image, label
    else:
        return image, label[..., 1] 
Example 74
Project: mmdetection   Author: open-mmlab   File: mean_ap.py    Apache License 2.0 4 votes vote down vote up
def average_precision(recalls, precisions, mode='area'):
    """Calculate average precision (for single or multiple scales).

    Args:
        recalls (ndarray): shape (num_scales, num_dets) or (num_dets, )
        precisions (ndarray): shape (num_scales, num_dets) or (num_dets, )
        mode (str): 'area' or '11points', 'area' means calculating the area
            under precision-recall curve, '11points' means calculating
            the average precision of recalls at [0, 0.1, ..., 1]

    Returns:
        float or ndarray: calculated average precision
    """
    no_scale = False
    if recalls.ndim == 1:
        no_scale = True
        recalls = recalls[np.newaxis, :]
        precisions = precisions[np.newaxis, :]
    assert recalls.shape == precisions.shape and recalls.ndim == 2
    num_scales = recalls.shape[0]
    ap = np.zeros(num_scales, dtype=np.float32)
    if mode == 'area':
        zeros = np.zeros((num_scales, 1), dtype=recalls.dtype)
        ones = np.ones((num_scales, 1), dtype=recalls.dtype)
        mrec = np.hstack((zeros, recalls, ones))
        mpre = np.hstack((zeros, precisions, zeros))
        for i in range(mpre.shape[1] - 1, 0, -1):
            mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i])
        for i in range(num_scales):
            ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0]
            ap[i] = np.sum(
                (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])
    elif mode == '11points':
        for i in range(num_scales):
            for thr in np.arange(0, 1 + 1e-3, 0.1):
                precs = precisions[i, recalls[i, :] >= thr]
                prec = precs.max() if precs.size > 0 else 0
                ap[i] += prec
            ap /= 11
    else:
        raise ValueError(
            'Unrecognized mode, only "area" and "11points" are supported')
    if no_scale:
        ap = ap[0]
    return ap 
Example 75
Project: mmdetection   Author: open-mmlab   File: test_robustness.py    Apache License 2.0 4 votes vote down vote up
def voc_eval_with_return(result_file,
                         dataset,
                         iou_thr=0.5,
                         print_summary=True,
                         only_ap=True):
    det_results = mmcv.load(result_file)
    gt_bboxes = []
    gt_labels = []
    gt_ignore = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        bboxes = ann['bboxes']
        labels = ann['labels']
        if 'bboxes_ignore' in ann:
            ignore = np.concatenate([
                np.zeros(bboxes.shape[0], dtype=np.bool),
                np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
            ])
            gt_ignore.append(ignore)
            bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
            labels = np.concatenate([labels, ann['labels_ignore']])
        gt_bboxes.append(bboxes)
        gt_labels.append(labels)
    if not gt_ignore:
        gt_ignore = gt_ignore
    if hasattr(dataset, 'year') and dataset.year == 2007:
        dataset_name = 'voc07'
    else:
        dataset_name = dataset.CLASSES
    mean_ap, eval_results = eval_map(
        det_results,
        gt_bboxes,
        gt_labels,
        gt_ignore=gt_ignore,
        scale_ranges=None,
        iou_thr=iou_thr,
        dataset=dataset_name,
        print_summary=print_summary)

    if only_ap:
        eval_results = [{
            'ap': eval_results[i]['ap']
        } for i in range(len(eval_results))]

    return mean_ap, eval_results 
Example 76
Project: subword-qac   Author: clovaai   File: generate.py    MIT License 4 votes vote down vote up
def beam_search(model, hidden, input, best_score, off, beam_size, branching_factor, max_suffix_len):
    bsz = best_score.size(0)
    batch_idx = torch.arange(bsz).to(device)

    prev_beam_idxs = []
    new_token_idxs = []
    end_scores = []
    end_prev_beam_idxs = []

    for i in range(max_suffix_len):
        output, hidden = model(input, hidden=hidden)            # output: (1, batch * beam, ntoken)
        logp = F.log_softmax(output.squeeze(0), 1)              # logp: (batch * beam, t)
        if i == 0 and off is not None:
            logp.masked_fill_(off.unsqueeze(1).repeat(1, beam_size, 1).view(bsz * beam_size, -1), -float('inf'))
        score = logp + best_score.view(-1).unsqueeze(1)     # score: (batch * beam, t)

        end_score = score[:, 2].view(-1, beam_size)
        prev_end_score = end_scores[-1] if i > 0 else \
            torch.zeros((bsz, beam_size), dtype=torch.float).fill_(-float('inf')).to(device)
        end_score, end_prev_beam_idx = torch.cat((end_score, prev_end_score), 1).sort(-1, descending=True)
        end_score = end_score[:,:beam_size]                     # end_score: (batch, beam)
        end_prev_beam_idx = end_prev_beam_idx[:, :beam_size]    # end_prev_beam_idx: (batch, beam)
        end_scores.append(end_score)
        end_prev_beam_idxs.append(end_prev_beam_idx)
        score[:, 2].fill_(-float('inf'))

        val, idx0 = score.topk(branching_factor, 1)             # (batch * beam, f)
        val = val.view(-1, beam_size * branching_factor)        # (batch, beam * f)
        idx0 = idx0.view(-1, beam_size * branching_factor)      # (batch, beam * f)
        best_score, idx1 = val.topk(beam_size, 1)               # (batch, beam * f) -> (batch, beam)

        prev_beam_idx = idx1 // branching_factor                # (batch, beam)
        new_token_idx = idx0.gather(1, idx1)                    # (batch, beam)
        prev_beam_idxs.append(prev_beam_idx)
        new_token_idxs.append(new_token_idx)
        input = new_token_idx.view(1, -1)
        hidden_idx = (prev_beam_idx + batch_idx.unsqueeze(1).mul(beam_size)).view(-1)
        hidden = [(h.index_select(0, hidden_idx), c.index_select(0, hidden_idx)) for h, c in hidden]

        if (best_score[:, 0] < end_score[:, -1]).all():
            break

    max_suffix_len = i + 1
    tokens = torch.ones(bsz, beam_size, max_suffix_len, dtype=torch.long).to(device).mul(2) # tokens: (batch, beam, L)
    pos = (beam_size + torch.arange(beam_size)).unsqueeze(0).repeat(bsz, 1).to(device)      # pos: (batch, beam)
    for i in reversed(range(max_suffix_len)):
        end = pos >= beam_size
        for j in range(bsz):
            tokens[j, 1 - end[j], i] = new_token_idxs[i][j, pos[j, 1 - end[j]]]
            pos[j][1 - end[j]] = prev_beam_idxs[i][j, pos[j, 1 - end[j]]]
            pos[j][end[j]] = end_prev_beam_idxs[i][j, pos[j, end[j]] - beam_size]
    decode_len = (tokens != 2).sum(2).max(1)[0]
    return tokens, end_scores[-1], decode_len 
Example 77
Project: neural-fingerprinting   Author: StephanZheng   File: adaptive_attacks.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def adaptive_fast_gradient_sign_method(sess, model, X, Y, eps, clip_min=None,
                              clip_max=None, batch_size=256, log_dir = None,
                                       model_logits = None, binary_steps = 2,
                                        dataset="cifar"):
    """
    TODO
    :param sess:
    :param model: predictions or after-softmax
    :param X:
    :param Y:
    :param eps:
    :param clip_min:
    :param clip_max:
    :param batch_size:
    :return:
    """
    # Define TF placeholders for the input and output
    x = tf.placeholder(tf.float32, shape=(None,) + X.shape[1:])
    y = tf.placeholder(tf.float32, shape=(None,) + Y.shape[1:])
    alpha = tf.placeholder(tf.float32, shape=(None,) + (1,))
    num_samples = np.shape(X)[0]
    ALPHA = 0.1*np.ones((num_samples,1))
    ub = 10.0*np.ones(num_samples)
    lb = 0.0*np.ones(num_samples)
    Best_X_adv = None
    for i in range(binary_steps):
        adv_x = adaptive_fgsm(
            x, model(x), eps=eps,
            clip_min=clip_min,
            clip_max=clip_max, y=y,
            log_dir= log_dir,
            model_logits = model_logits,
            alpha = alpha
        )

        X_adv = batch_eval(
            sess, [x, y, alpha], [adv_x],
            [X, Y, ALPHA], feed={},
            args={'batch_size': batch_size}
        )
        X_adv = np.array(X_adv[0])
        if(i==0):
            Best_X_adv = X_adv

        ALPHA, Best_X_adv = binary_refinement(sess,Best_X_adv,
                      X_adv, Y, ALPHA, ub, lb, model, dataset)

    return Best_X_adv 
Example 78
Project: deep-learning-note   Author: wdxtub   File: 5_nueral_network.py    MIT License 4 votes vote down vote up
def backprop(params, input_size, hidden_size, num_labels, X, y, learning_rate):
    m = X.shape[0]
    X = np.matrix(X)
    y = np.matrix(y)
    
    # reshape the parameter array into parameter matrices for each layer
    theta1 = np.matrix(np.reshape(params[:hidden_size * (input_size + 1)], (hidden_size, (input_size + 1))))
    theta2 = np.matrix(np.reshape(params[hidden_size * (input_size + 1):], (num_labels, (hidden_size + 1))))
    
    # run the feed-forward pass
    a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2)
    
    # initializations
    J = 0
    delta1 = np.zeros(theta1.shape)  # (25, 401)
    delta2 = np.zeros(theta2.shape)  # (10, 26)
    
    # compute the cost
    for i in range(m):
        first_term = np.multiply(-y[i,:], np.log(h[i,:]))
        second_term = np.multiply((1 - y[i,:]), np.log(1 - h[i,:]))
        J += np.sum(first_term - second_term)
    
    J = J / m
    
    # add the cost regularization term
    J += (float(learning_rate) / (2 * m)) * (np.sum(np.power(theta1[:,1:], 2)) + np.sum(np.power(theta2[:,1:], 2)))
    
    # perform backpropagation
    for t in range(m):
        a1t = a1[t,:]  # (1, 401)
        z2t = z2[t,:]  # (1, 25)
        a2t = a2[t,:]  # (1, 26)
        ht = h[t,:]  # (1, 10)
        yt = y[t,:]  # (1, 10)
        
        d3t = ht - yt  # (1, 10)
        
        z2t = np.insert(z2t, 0, values=np.ones(1))  # (1, 26)
        d2t = np.multiply((theta2.T * d3t.T).T, sigmoid_gradient(z2t))  # (1, 26)
        
        delta1 = delta1 + (d2t[:,1:]).T * a1t
        delta2 = delta2 + d3t.T * a2t
        
    delta1 = delta1 / m
    delta2 = delta2 / m
    
    # add the gradient regularization term
    delta1[:,1:] = delta1[:,1:] + (theta1[:,1:] * learning_rate) / m
    delta2[:,1:] = delta2[:,1:] + (theta2[:,1:] * learning_rate) / m
    
    # unravel the gradient matrices into a single array
    grad = np.concatenate((np.ravel(delta1), np.ravel(delta2)))
    
    return J, grad 
Example 79
Project: mlearn   Author: materialsvirtuallab   File: describers.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def describe_all(self, structures, include_stress=False):
        """
        Returns data for all input structures in a single DataFrame.

        Args:
            structures (Structure): Input structures as a list.
            include_stress (bool): Whether to include stress descriptors.

        Returns:
            DataFrame with indices of input list preserved. To retrieve
            the data for structures[i], use
            df.xs(i, level='input_index').

        """
        columns = list(map(lambda s: '-'.join(['%d' % i for i in s]),
                           self.subscripts))
        if self.quadratic:
            columns += list(map(lambda s: '-'.join(['%d%d%d' % (i, j, k)
                                                    for i, j, k in s]),
                                itertools.combinations_with_replacement(self.subscripts, 2)))

        raw_data = self.calculator.calculate(structures)

        def process(output, combine, idx, include_stress):
            b, db, vb, e = output
            df = pd.DataFrame(b, columns=columns)
            if combine:
                df_add = pd.DataFrame({'element': e, 'n': np.ones(len(e))})
                df_b = df_add.join(df)
                n_atoms = df_b.shape[0]
                b_by_el = [df_b[df_b['element'] == e] for e in self.elements]
                sum_b = [df[df.columns[1:]].sum(axis=0) for df in b_by_el]
                hstack_b = pd.concat(sum_b, keys=self.elements)
                hstack_b = hstack_b.to_frame().T / n_atoms
                hstack_b.fillna(0, inplace=True)
                dbs = np.split(db, len(self.elements), axis=1)
                dbs = np.hstack([np.insert(d.reshape(-1, len(columns)),
                                           0, 0, axis=1) for d in dbs])
                db_index = ['%d_%s' % (i, d)
                            for i in df_b.index for d in 'xyz']
                df_db = pd.DataFrame(dbs, index=db_index,
                                     columns=hstack_b.columns)
                if include_stress:
                    vbs = np.split(vb.sum(axis=0), len(self.elements))
                    vbs = np.hstack([np.insert(v.reshape(-1, len(columns)),
                                               0, 0, axis=1) for v in vbs])
                    volume = structures[idx].volume
                    vbs = vbs / volume * 160.21766208  # from eV to GPa
                    vb_index = ['xx', 'yy', 'zz', 'yz', 'xz', 'xy']
                    df_vb = pd.DataFrame(vbs, index=vb_index,
                                         columns=hstack_b.columns)
                    df = pd.concat([hstack_b, df_db, df_vb])
                else:
                    df = pd.concat([hstack_b, df_db])
            return df

        df = pd.concat([process(d, self.pot_fit, i, include_stress)
                        for i, d in enumerate(raw_data)],
                       keys=range(len(raw_data)), names=["input_index", None])
        return df 
Example 80
Project: core   Author: lifemapper   File: mcpa.py    GNU General Public License v3.0 4 votes vote down vote up
def _standardize_matrix(mtx, weights):
    """Standardizes a phylogenetic or predictor matrix

    Args:
        mtx (numpy array): The matrix to standardize
        weights (numpy array): A one-dimensional array of sums to use for
            standardization.

    Note:
        * Formula for standardization ::
            Mstd = M - 1c.1r.W.M(1/trace(W)) ./ 1c(1r.W(M*M)
                       - ((1r.W.M)*(1r.W.M)(1/trace(W))(1/trace(W)-1))^0.5
        * M - Matrix to be standardized
        * W - A k by k diagonal matrix of weights, where each non-zero value is
            the column or row sum (depending on the M) for an incidence matrix.
        * 1r - A row of k ones
        * 1c - A column of k ones
        * trace - Returns the sum of the input matrix
        * "./" indicates Hadamard division
        * "*" indicates Hadamard multiplication
        * Code adopted from supplemental material MATLAB code

    See:
        Literature supplemental materials
    """
    # Create a row of ones, we'll transpose for a column
    ones = np.ones((1, weights.shape[0]), dtype=float)
    # This maps to trace(W)
    total_sum = np.sum(weights)
    
    # s1 = 1r.W.M
    s1 = (ones * weights).dot(mtx)
    # s2 = 1r.W.(M*M)
    s2 = (ones * weights).dot(mtx*mtx)
    
    mean_weighted = s1 / total_sum
    std_dev_weighted = ((s2 - (s1**2.0 / total_sum)) / (total_sum))**0.5
    
    # Fixes any invalid values created previously
    tmp = np.nan_to_num(ones.T.dot(std_dev_weighted)**-1.0)
    std_mtx = tmp * (mtx - ones.T.dot(mean_weighted))
    
    return std_mtx

# .............................................................................