Python numpy.ones() Examples

The following are code examples for showing how to use numpy.ones(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: b2ac   Author: hbldh   File: reference.py    MIT License 6 votes vote down vote up
def _calculate_scatter_matrix_py(x, y):
    """Calculates the complete scatter matrix for the input coordinates.

    :param x: The x coordinates.
    :type x: :py:class:`numpy.ndarray`
    :param y: The y coordinates.
    :type y: :py:class:`numpy.ndarray`
    :return: The complete scatter matrix.
    :rtype: :py:class:`numpy.ndarray`

    """
    D = np.ones((len(x), 6), dtype=x.dtype)
    D[:, 0] = x * x
    D[:, 1] = x * y
    D[:, 2] = y * y
    D[:, 3] = x
    D[:, 4] = y

    return D.T.dot(D) 
Example 2
Project: b2ac   Author: hbldh   File: double.py    MIT License 6 votes vote down vote up
def _calculate_scatter_matrix_double(x, y):
    """Calculates the complete scatter matrix for the input coordinates.

    :param x: The x coordinates.
    :type x: :py:class:`numpy.ndarray`
    :param y: The y coordinates.
    :type y: :py:class:`numpy.ndarray`
    :return: The complete scatter matrix.
    :rtype: :py:class:`numpy.ndarray`

    """
    D = np.ones((len(x), 6), 'int64')
    D[:, 0] = x * x
    D[:, 1] = x * y
    D[:, 2] = y * y
    D[:, 3] = x
    D[:, 4] = y

    return D.T.dot(D) 
Example 3
Project: xrft   Author: xgcm   File: test_xrft.py    MIT License 6 votes vote down vote up
def test_cross_phase_2d(self, dask):
        Ny, Nx = (32, 16)
        x = np.linspace(0, 1, num=Nx, endpoint=False)
        y = np.ones(Ny)
        f = 6
        phase_offset = np.pi/2
        signal1 = np.cos(2*np.pi*f*x)  # frequency = 1/(2*pi)
        signal2 = np.cos(2*np.pi*f*x - phase_offset)
        da1 = xr.DataArray(data=signal1*y[:,np.newaxis], name='a',
                          dims=['y','x'], coords={'y':y, 'x':x})
        da2 = xr.DataArray(data=signal2*y[:,np.newaxis], name='b',
                          dims=['y','x'], coords={'y':y, 'x':x})
        with pytest.raises(ValueError):
            xrft.cross_phase(da1, da2, dim=['y','x'])

        if dask:
            da1 = da1.chunk({'x': 16})
            da2 = da2.chunk({'x': 16})
        cp = xrft.cross_phase(da1, da2, dim=['x'])
        actual_phase_offset = cp.sel(freq_x=f).values
        npt.assert_almost_equal(actual_phase_offset, phase_offset) 
Example 4
Project: prediction-constrained-topic-models   Author: dtak   File: slda_utils__init_manager.py    MIT License 6 votes vote down vote up
def init_topics_KV__rand_active_words(
        n_states=10,
        frac_words_active=0.5,
        blend_frac_active=0.5,
        n_vocabs=144,
        seed=0):
    prng = np.random.RandomState(int(seed))
    unif_topics_KV = np.ones((n_states, n_vocabs)) / float(n_vocabs)
    active_topics_KV = np.zeros((n_states, n_vocabs))
    for k in xrange(n_states):
        active_words_U = prng.choice(
            np.arange(n_vocabs, dtype=np.int32),
            int(frac_words_active * n_vocabs),
            replace=False)
        active_topics_KV[k, active_words_U] = 1.0 / active_words_U.size
    topics_KV = (1 - blend_frac_active) * unif_topics_KV \
        + blend_frac_active * active_topics_KV
    topics_KV /= topics_KV.sum(axis=1)[:,np.newaxis]
    return topics_KV 
Example 5
Project: prediction-constrained-topic-models   Author: dtak   File: slda_utils__init_manager.py    MIT License 6 votes vote down vote up
def init_topics_KV__rand_docs(
        dataset=None,
        n_states=10,
        n_vocabs=144,
        blend_frac_doc=0.5,
        seed=0):
    prng = np.random.RandomState(int(seed))
    unif_topics_KV = np.ones((n_states, n_vocabs)) / float(n_vocabs)
    doc_KV = np.zeros((n_states, n_vocabs))
    chosen_doc_ids = prng.choice(
        np.arange(dataset['n_docs'], dtype=np.int32),
        n_states,
        replace=False)
    for k in xrange(n_states):
        start_d = dataset['doc_indptr_Dp1'][chosen_doc_ids[k]]
        stop_d = dataset['doc_indptr_Dp1'][chosen_doc_ids[k] + 1]
        active_words_U = dataset['word_id_U'][start_d:stop_d]
        doc_KV[k, active_words_U] = dataset['word_ct_U'][start_d:stop_d]
    doc_KV /= doc_KV.sum(axis=1)[:,np.newaxis]
    topics_KV = (1 - blend_frac_doc) * unif_topics_KV \
        + blend_frac_doc * doc_KV
    topics_KV /= topics_KV.sum(axis=1)[:,np.newaxis]
    return topics_KV 
Example 6
Project: prediction-constrained-topic-models   Author: dtak   File: calc_N_d_K__vb_qpiDir_qzCat.py    MIT License 6 votes vote down vote up
def make_initial_P_d_K(
        init_name,
        prng=np.random,
        alpha_K=None,
        init_P_d_K_list=None):
    K = alpha_K.size

    if init_name.count('warm'):
        return init_P_d_K_list.pop()
    elif init_name.count('uniform_sample'):
        return prng.dirichlet(np.ones(K))
    elif init_name.count('prior_sample'):
        return prng.dirichlet(alpha_K)
    elif init_name.count("prior_mean"):
        return alpha_K / np.sum(alpha_K) #np.zeros(K, dtype=alpha_K.dtype)
    else:
        raise ValueError("Unrecognized vb lstep_init_name: " + init_name) 
Example 7
Project: FRIDA   Author: LCAV   File: point_cloud.py    MIT License 6 votes vote down vote up
def classical_mds(self, D):
        ''' 
        Classical multidimensional scaling

        Parameters
        ----------
        D : square 2D ndarray
            Euclidean Distance Matrix (matrix containing squared distances between points
        '''

        # Apply MDS algorithm for denoising
        n = D.shape[0]
        J = np.eye(n) - np.ones((n,n))/float(n)
        G = -0.5*np.dot(J, np.dot(D, J))

        s, U = np.linalg.eig(G)

        # we need to sort the eigenvalues in decreasing order
        s = np.real(s)
        o = np.argsort(s)
        s = s[o[::-1]]
        U = U[:,o[::-1]]

        S = np.diag(s)[0:self.dim,:]
        self.X = np.dot(np.sqrt(S),U.T) 
Example 8
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: baseop.py    MIT License 6 votes vote down vote up
def wrap_variable(self, var):
        """wrap layer.w into variables"""
        val = self.lay.w.get(var, None)
        if val is None:
            shape = self.lay.wshape[var]
            args = [0., 1e-2, shape]
            if 'moving_mean' in var:
                val = np.zeros(shape)
            elif 'moving_variance' in var:
                val = np.ones(shape)
            else:
                val = np.random.normal(*args)
            self.lay.w[var] = val.astype(np.float32)
            self.act = 'Init '
        if not self.var: return

        val = self.lay.w[var]
        self.lay.w[var] = tf.constant_initializer(val)
        if var in self._SLIM: return
        with tf.variable_scope(self.scope):
            self.lay.w[var] = tf.get_variable(var,
                shape = self.lay.wshape[var],
                dtype = tf.float32,
                initializer = self.lay.w[var]) 
Example 9
Project: DataHack2018   Author: InnovizTech   File: vis.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def draw_cuboids(self, boxes):
        for box_idx, box in enumerate(boxes):
            color = box['color'] if hasattr(box, 'color') else np.ones(4)
            size = box['size']
            translation = box['translation']
            rotation = box['rotation'] / np.pi * 180.
            try:
                text = box['text']
            except:
                text = ''

            if box_idx < len(self._cuboids):
                self._cuboids[box_idx].show()
                self._cuboids[box_idx].update_values(size, translation, rotation, color, text)
            else:
                self._cuboids.append(Cuboid(size, translation, rotation, color, text))
        for c in self._cuboids[len(boxes):]:
            c.hide() 
Example 10
Project: ML_from_scratch   Author: jarfa   File: RegressionTree.py    Apache License 2.0 6 votes vote down vote up
def predict(self, data):
        if self.is_terminal():
            return self.mean_predict

        predictions = np.ones(len(data)) * self.mean_predict
        split_indices = data[:, self.split_feature] >= self.split_value

        if self.children[0] is not None:
            predictions[-split_indices] = self.children[0].predict(
                data[-split_indices, :])

        if self.children[1] is not None:
            predictions[split_indices] = self.children[1].predict(
                data[split_indices, :])

        return predictions 
Example 11
Project: fip-walkgen   Author: stephane-caron   File: swing_foot_control.py    GNU General Public License v3.0 6 votes vote down vote up
def create_topp_instance(self):
        assert self.path is not None, "interpolate a path first"
        amax = self.max_foot_accel * ones(self.path.dimension)
        id_traj = "1.0\n1\n0.0 1.0"
        discrtimestep = self.discrtimestep
        ndiscrsteps = int((self.path.duration + 1e-10) / discrtimestep) + 1
        constraints = str(discrtimestep)
        constraints += "\n" + str(0.)  # no velocity limit
        for i in range(ndiscrsteps):
            s = i * discrtimestep
            ps = self.path.Evald(s)
            pss = self.path.Evaldd(s)
            constraints += "\n" + vect2str(+ps) + " " + vect2str(-ps)
            constraints += "\n" + vect2str(+pss) + " " + vect2str(-pss)
            constraints += "\n" + vect2str(-amax) + " " + vect2str(-amax)
        self.topp = TOPP.TOPPbindings.TOPPInstance(
            None, "QuadraticConstraints", constraints, id_traj)
        self.topp.integrationtimestep = 1e-3 
Example 12
Project: Deep_VoiceChanger   Author: pstuvwx   File: gla_util.py    MIT License 6 votes vote down vote up
def __init__(self, wave_len=254, wave_dif=64, buffer_size=5, loop_num=5, window=np.hanning(254)):
        self.wave_len = wave_len
        self.wave_dif = wave_dif
        self.buffer_size = buffer_size
        self.loop_num = loop_num
        self.window = window

        self.wave_buf = np.zeros(wave_len+wave_dif, dtype=float)
        self.overwrap_buf = np.zeros(wave_dif*buffer_size+(wave_len-wave_dif), dtype=float)
        self.spectrum_buffer = np.ones((self.buffer_size, self.wave_len), dtype=complex)
        self.absolute_buffer = np.ones((self.buffer_size, self.wave_len), dtype=complex)
        
        self.phase = np.zeros(self.wave_len, dtype=complex)
        self.phase += np.random.random(self.wave_len)-0.5 + np.random.random(self.wave_len)*1j - 0.5j
        self.phase[self.phase == 0] = 1
        self.phase /= np.abs(self.phase) 
Example 13
Project: LSTM-diagnosis   Author: jfzhang95   File: layers.py    MIT License 6 votes vote down vote up
def __init__(self, input_size, time_steps, momentum=0.1, epsilon=1e-6):
        self.gamma = theano.shared(np.ones(input_size, dtype=np.float32))
        self.beta = theano.shared(np.zeros(input_size, dtype=np.float32))
        self.params = [self.gamma, self.beta]

        self.epsilon = epsilon
        self.momentum = momentum
        self.shared_state = False
        self.train = True
        if not hasattr(BN, 'self.running_mean'):
            self.running_mean = theano.shared(np.zeros((time_steps, input_size), theano.config.floatX))

        if hasattr(BN, 'self.params'):
           print 'you'

        if not hasattr(BN, 'self.running_std'):
            self.running_std = theano.shared(np.zeros((time_steps, input_size), theano.config.floatX)) 
Example 14
Project: deep-learning-note   Author: wdxtub   File: 4_multi_classification.py    MIT License 6 votes vote down vote up
def one_vs_all(X, y, num_labels, learning_rate):
    rows = X.shape[0]
    params = X.shape[1]
    
    # k X (n + 1) array for the parameters of each of the k classifiers
    all_theta = np.zeros((num_labels, params + 1))
    
    # insert a column of ones at the beginning for the intercept term
    X = np.insert(X, 0, values=np.ones(rows), axis=1)
    
    # labels are 1-indexed instead of 0-indexed
    for i in range(1, num_labels + 1):
        theta = np.zeros(params + 1)
        y_i = np.array([1 if label == i else 0 for label in y])
        y_i = np.reshape(y_i, (rows, 1))
        
        # minimize the objective function
        fmin = minimize(fun=cost, x0=theta, args=(X, y_i, learning_rate), method='TNC', jac=gradient)
        all_theta[i-1,:] = fmin.x
    
    return all_theta 
Example 15
Project: deep-learning-note   Author: wdxtub   File: 4_multi_classification.py    MIT License 6 votes vote down vote up
def predict_all(X, all_theta):
    rows = X.shape[0]
    params = X.shape[1]
    num_labels = all_theta.shape[0]
    
    # same as before, insert ones to match the shape
    X = np.insert(X, 0, values=np.ones(rows), axis=1)
    
    # convert to matrices
    X = np.matrix(X)
    all_theta = np.matrix(all_theta)
    
    # compute the class probability for each class on each training instance
    h = sigmoid(X * all_theta.T)
    
    # create array of the index with the maximum probability
    h_argmax = np.argmax(h, axis=1)
    
    # because our array was zero-indexed we need to add one for the true label prediction
    h_argmax = h_argmax + 1
    
    return h_argmax 
Example 16
Project: deep-learning-note   Author: wdxtub   File: 6_bias_variance.py    MIT License 6 votes vote down vote up
def prepare_poly_data(*args, power):
    """
    args: keep feeding in X, Xval, or Xtest
        will return in the same order
    """
    def prepare(x):
        # expand feature
        df = poly_features(x, power=power)

        # normalization
        ndarr = normalize_feature(df).as_matrix()

        # add intercept term
        return np.insert(ndarr, 0, np.ones(ndarr.shape[0]), axis=1)

    return [prepare(x) for x in args] 
Example 17
Project: deep-learning-note   Author: wdxtub   File: mnist_projector_generate.py    MIT License 6 votes vote down vote up
def create_sprite_image(images):
    if isinstance(images, list):
        images = np.array(images)
    img_h = images.shape[1]
    img_w = images.shape[2]
    # sprite 可以理解为所有小图片拼成的大正方形矩阵
    m = int(np.ceil(np.sqrt(images.shape[0])))

    # 使用全 1 来初始化最终的大图片
    sprite_image = np.ones((img_h*m, img_w*m))

    for i in range(m):
        for j in range(m):
            # 计算当前图片编号
            cur = i * m + j
            if cur < images.shape[0]:
                # 将小图片的内容复制到最终的 sprite 图像
                sprite_image[i*img_h:(i+1)*img_h,
                             j*img_w:(j+1)*img_w] = images[cur]
    return sprite_image

# 加载 mnist 数据,制定 one_hot=False,得到的 labels 就是一个数字,而不是一个向量 
Example 18
Project: b2ac   Author: hbldh   File: inverse_iteration.py    MIT License 5 votes vote down vote up
def inverse_iteration_for_eigenvector_double(A, eigenvalue, n_iterations=1):
    """Performs a series of inverse iteration steps with a known
    eigenvalue to produce its eigenvector.

    :param A: The 3x3 matrix to which the eigenvalue belongs.
    :type A: :py:class:`numpy.ndarray`
    :param eigenvalue: One eigenvalue of the matrix A.
    :type eigenvalue: float
    :param n_iterations: Number of iterations to perform the multiplication
     with the inverse. For a accurate eigenvalue, one iteration is enough
     for a ~1e-6 correct eigenvector. More than five is usually unnecessary.
    :type n_iterations: int
    :return: The eigenvector of this matrix and eigenvalue combination.
    :rtype: :py:class:`numpy.ndarray`

    """
    A = np.array(A, 'float')
    # Subtract the eigenvalue from the diagonal entries of the matrix.
    # N_POLYPOINTS.B. Also slightly perturb the eigenvalue so the matrix will
    # not be so close to singular!
    for k in xrange(A.shape[0]):
        A[k, k] -= eigenvalue + 0.001
    # Obtain the inverse of the matrix.
    A_inv = mo.inverse_3by3_double(A).reshape((3, 3))
    # Instantiate the eigenvector to iterate with.
    eigenvector = np.ones((A.shape[0], ), 'float')
    eigenvector /= np.linalg.norm(eigenvector)
    # Perform the desired number of iterations.
    for k in xrange(n_iterations):
        eigenvector = np.dot(A_inv, eigenvector)
        eigenvector /= np.linalg.norm(eigenvector)

    if np.any(np.isnan(eigenvector)) or np.any(np.isinf(eigenvector)):
        print("Nan and/or Infs in eigenvector!")

    if (eigenvector[0] < 0) and (eigenvector[2] < 0):
        eigenvector = -eigenvector
    return eigenvector 
Example 19
Project: projection-methods   Author: akshayka   File: test_stability.py    GNU General Public License v3.0 5 votes vote down vote up
def test_projection(self):
        """Test whether CVXPY works as expected wrt NumPy shapes."""
        x = cvxpy.Variable(1000)
        x_0 = np.ones(1000)
        soc_constr = [cvxpy.norm(x[:-1], 2) <= x[-1]]

        obj = cvxpy.Minimize(cvxpy.norm(x_0 - x, 2))
        prob = cvxpy.Problem(obj, soc_constr)
        prob.solve(solver=cvxpy.ECOS)
        self.assertTrue(x.value.shape == (1000, 1))
        x_star = np.array(x.value).flatten()
        self.assertTrue(x_star.shape == (1000,), x_star.shape)
        self.assertEqual(np.linalg.norm(x_0 - x_star, 2), obj.value)
        self.assertTrue(np.isclose(prob.value, obj.value, atol=1e-7)) 
Example 20
Project: projection-methods   Author: akshayka   File: polyak.py    GNU General Public License v3.0 5 votes vote down vote up
def solve(self, problem):
        left_set = problem.sets[0]
        right_set = problem.sets[1]

        iterate = (self._initial_iterate if
            self._initial_iterate is not None else np.ones(problem.dimension))
        iterates = [iterate]
        residuals = []

        status = Optimizer.Status.INACCURATE
        for i in xrange(self.max_iters):
            if self.verbose:
                print 'iteration %d' % i
            x_k = iterates[-1]
            x_k_1 = left_set.project(x_k)
            tmp = right_set.project(x_k)

            residuals.append(self._compute_residual(x_k, x_k_1, tmp))
            if self.verbose:
                print '\tresidual: %e' % sum(residuals[-1])
            if self._is_optimal(residuals[-1]):
                status = Optimizer.Status.OPTIMAL
                if not self.do_all_iters:
                    break

            x_k_2 = right_set.project(x_k_1)
            x_k_3 = left_set.project(x_k_2)

            lambda_k = (np.linalg.norm(x_k_1 - x_k_2, ord=2)**2) / (
                np.dot(x_k_1 - x_k_3, x_k_1 - x_k_2))
            x_k_4 = x_k_1 + lambda_k * (x_k_3 - x_k_1)
            
            if self.momentum is not None:
                iterate = heavy_ball_update(
                    iterates=iterates, velocity=x_k_4-x_k,
                    alpha=self.momentum['alpha'],
                    beta=self.momentum['beta'])
            iterates.append(x_k_4)

        return iterates, residuals, status 
Example 21
Project: projection-methods   Author: akshayka   File: avgp.py    GNU General Public License v3.0 5 votes vote down vote up
def solve(self, problem):
        left_set = problem.sets[0]
        right_set = problem.sets[1]

        iterate = (self._initial_iterate if
            self._initial_iterate is not None else np.ones(problem.dimension))
        iterates = [iterate]
        residuals = []

        status = Optimizer.Status.INACCURATE
        for i in xrange(self.max_iters):
            if self.verbose:
                print 'iteration %d' % i
            x_k = iterates[-1]
            y_k = left_set.project(x_k)
            z_k = right_set.project(x_k)

            residuals.append(self._compute_residual(x_k, y_k, z_k))
            if self.verbose:
                print '\tresidual: %e' % sum(residuals[-1])
            if self._is_optimal(residuals[-1]):
                status = Optimizer.Status.OPTIMAL
                if not self.do_all_iters:
                    break
            x_k_plus = 0.5 * (y_k + z_k)

            if self.momentum is not None:
                iterate = heavy_ball_update(
                    iterates=iterates, velocity=x_k_plus-x_k,
                    alpha=self.momentum['alpha'],
                    beta=self.momentum['beta'])
            iterates.append(x_k_plus)

        return iterates, residuals, status 
Example 22
Project: projection-methods   Author: akshayka   File: dykstra.py    GNU General Public License v3.0 5 votes vote down vote up
def solve(self, problem):
        left_set = problem.sets[0]
        right_set = problem.sets[1]

        iterate = (self._initial_iterate if
            self._initial_iterate is not None else np.ones(problem.dimension))

        # (p_n), (q_n) are the auxiliary sequences, (a_n), (b_n) the main
        # sequences, defined in Bauschke's 98 paper
        # (Dykstra's Alternating Projection Algorithm for Two Sets).
        self.p = [None] * (self.max_iters + 1)
        self.q = [None] * (self.max_iters + 1)
        self.a = [None] * (self.max_iters + 1)
        self.b = [None] * (self.max_iters + 1)
        zero_vector = np.zeros(problem.dimension)
        self.p[0] = self.q[0] = zero_vector
        self.b[0] = self.a[0] = iterate
        residuals = []

        status = Optimizer.Status.INACCURATE
        for n in xrange(1, self.max_iters + 1):
            if self.verbose:
                print 'iteration %d' % n
            # TODO(akshayka): Robust stopping criterion
            residuals.append(self._compute_residual(
                self.b[n-1], left_set, right_set))
            if self.verbose:
                print '\tresidual: %e' % sum(residuals[-1])
            if self._is_optimal(residuals[-1]):
                status = Optimizer.Status.OPTIMAL
                if not self.do_all_iters:
                    break

            self.a[n] = left_set.project(self.b[n-1] + self.p[n-1])
            self.b[n] = right_set.project(self.a[n] + self.q[n-1])
            self.p[n] = self.b[n-1] + self.p[n-1] - self.a[n]
            self.q[n] = self.a[n] + self.q[n-1] - self.b[n]

        # TODO(akshayka): does it matter if I return self.b vs self.a?
        # the first implementation returned self.a ...
        return self.b, residuals, status 
Example 23
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def specification(self, specification):
        if isinstance(specification, (int)):
            if np.abs(specification) > self._triangsamples.vertlist.shape[0]:
                raise ValueError("""The Number of selected basic functions is
                too large.""")
            else:
                if specification == 0:
                    self._specification = \
                        np.ones(self._triangsamples.vertlist.shape[0])
                else:
                    self._specification = \
                        np.zeros(self._triangsamples.vertlist.shape[0])
                    if specification > 0:
                        self._specification[:specification] = 1
                    else:
                        self._specification[specification:] = 1
        elif isinstance(specification, (list, tuple, np.ndarray)):
            specification = np.asarray(specification)
            if specification.shape[0] != self._triangsamples.vertlist.shape[1]:
                raise IndexError("""The length of the specification vector
                does not match the number of spatial sample points. """)
            else:
                self._specification = specification
        else:
            raise TypeError("""The parameter specification has to be
            int or a vecor""") 
Example 24
Project: DnD4py   Author: bacook17   File: roll4me.py    MIT License 5 votes vote down vote up
def parse_roll(str_in):
    str_in = str_in.lower()
    if 'd' in str_in:
        n, d = [int(s) for s in str_in.split('d')]
        results = np.random.randint(low=1, high=d+1, size=(n_trials, n))
        mean = n * 0.5 * (d+1)
        return results[0].sum(), str(results[0]), mean, results.sum(axis=1)
    else:
        return int(str_in), '{:s}'.format(str_in), int(str_in), np.ones(n_trials)*int(str_in) 
Example 25
Project: building-boundary   Author: Geodan   File: segmentation.py    MIT License 5 votes vote down vote up
def boundary_segmentation(points, distance):
    """
    Extract linear segments using RANSAC.

    Parameters
    ----------
    points : (Mx2) array
        The coordinates of the points.
    distance : float
        The maximum distance between a point and a line for a point to be
        considered belonging to that line.

    Returns
    -------
    segments : list of array
        The linear segments.
    """
    points_shifted = points.copy()
    shift = np.min(points_shifted, axis=0)
    points_shifted -= shift

    mask = np.ones(len(points_shifted), dtype=np.bool)
    indices = np.arange(len(points_shifted))

    segments = []
    extract_segments(segments, points_shifted, indices, mask, distance)

    segments = [points_shifted[i]+shift for i in segments]

    return segments 
Example 26
Project: fenics-topopt   Author: zfergus   File: solver.py    MIT License 5 votes vote down vote up
def __init__(self, nelx, nely, volfrac, penal, rmin, ft, gui, bc):
        self.n = nelx * nely
        self.opt = nlopt.opt(nlopt.LD_MMA, self.n)
        self.passive = bc.get_passive_elements()
        self.xPhys = np.ones(self.n)
        if self.passive is not None:
            self.xPhys[self.passive] = 0

        # set bounds
        ub = np.ones(self.n, dtype=float)
        self.opt.set_upper_bounds(ub)
        lb = np.zeros(self.n, dtype=float)
        self.opt.set_lower_bounds(lb)

        # set stopping criteria
        self.opt.set_maxeval(2000)
        self.opt.set_ftol_rel(0.001)

        # set objective and constraint functions
        self.opt.set_min_objective(self.compliance_function)
        self.opt.add_inequality_constraint(self.volume_function, 0)

        # setup filter
        self.ft = ft
        self.filtering = Filter(nelx, nely, rmin)

        # setup problem def
        self.init_problem(nelx, nely, penal, bc)
        self.volfrac = volfrac

        # set GUI callback
        self.init_gui(gui) 
Example 27
Project: fenics-topopt   Author: zfergus   File: problem.py    MIT License 5 votes vote down vote up
def build_indices(self, nelx, nely):
        """ FE: Build the index vectors for the for coo matrix format. """
        self.KE = self.lk()
        self.edofMat = np.zeros((nelx * nely, 8), dtype=int)
        for elx in range(nelx):
            for ely in range(nely):
                el = ely + elx * nely
                n1 = (nely + 1) * elx + ely
                n2 = (nely + 1) * (elx + 1) + ely
                self.edofMat[el, :] = np.array([2 * n1 + 2, 2 * n1 + 3,
                    2 * n2 + 2, 2 * n2 + 3, 2 * n2, 2 * n2 + 1, 2 * n1,
                    2 * n1 + 1])
        # Construct the index pointers for the coo format
        self.iK = np.kron(self.edofMat, np.ones((8, 1))).flatten()
        self.jK = np.kron(self.edofMat, np.ones((1, 8))).flatten() 
Example 28
Project: fenics-topopt   Author: zfergus   File: tower.py    MIT License 5 votes vote down vote up
def main(nelx, nely, volfrac, penal, rmin, ft):
    print("Minimum compliance problem with MMA")
    print("ndes: {:d} x {:d}".format(nelx, nely))
    print("volfrac: {:g}, rmin: {:g}, penal: {:g}".format(volfrac, rmin, penal))
    print("Filter method: " + ["Sensitivity based", "Density based"][ft])

    # Allocate design variables (as array), initialize and allocate sens.
    x = volfrac * np.ones(nely * nelx, dtype=float)

    title = "ndes: {:d} x {:d}\nvolfrac: {:g}, rmin: {:g}, penal: {:g}".format(
        nelx, nely, volfrac, rmin, penal)
    gui    = GUI(nelx, nely, title)
    solver = LessTolerantSolver(nelx, nely, volfrac, penal, rmin, ft, gui,
        BoundaryConditions(nelx, nely))
    x_opt  = solver.optimize(x)
    x_opt  = solver.filter_variables(x_opt)
    gui.update(x_opt)

    from PIL import Image
    x_opt.clip(0.0, 1.0)
    x_opt = ((1 - x_opt.reshape(nelx, nely)) * 255).round().astype("uint8").T
    x_opt_sym = x_opt[:, ::-1]
    result = Image.fromarray(np.hstack([x_opt_sym, x_opt]))
    result.save("tmp.png")

    # Make sure the plot stays and that the shell remains
    input("Press any key...") 
Example 29
Project: fenics-topopt   Author: zfergus   File: solver.py    MIT License 5 votes vote down vote up
def __init__(self, nelx, nely, volfrac, penal, rmin, ft, gui, bc):
        self.n = nelx * nely
        self.opt = nlopt.opt(nlopt.LD_MMA, self.n)
        self.passive = bc.get_passive_elements()
        self.xPhys = np.ones(self.n)
        if self.passive is not None:
            self.xPhys[self.passive] = 0

        # set bounds
        ub = np.ones(self.n, dtype=float)
        self.opt.set_upper_bounds(ub)
        lb = np.zeros(self.n, dtype=float)
        self.opt.set_lower_bounds(lb)

        # set stopping criteria
        self.opt.set_maxeval(2000)
        self.opt.set_ftol_rel(0.001)

        # set objective and constraint functions
        self.opt.set_min_objective(self.compliance_function)
        self.opt.add_inequality_constraint(self.volume_function, 0)

        # setup filter
        self.ft = ft
        self.filtering = Filter(nelx, nely, rmin)

        # setup problem def
        self.init_problem(nelx, nely, penal, bc)
        self.volfrac = volfrac

        # set GUI callback
        self.init_gui(gui) 
Example 30
Project: fenics-topopt   Author: zfergus   File: problem.py    MIT License 5 votes vote down vote up
def build_indices(self, nelx, nely):
        """ FE: Build the index vectors for the for coo matrix format. """
        self.KE = self.lk()
        self.edofMat = np.zeros((nelx * nely, 8), dtype=int)
        for elx in range(nelx):
            for ely in range(nely):
                el = ely + elx * nely
                n1 = (nely + 1) * elx + ely
                n2 = (nely + 1) * (elx + 1) + ely
                self.edofMat[el, :] = np.array([2 * n1 + 2, 2 * n1 + 3,
                    2 * n2 + 2, 2 * n2 + 3, 2 * n2, 2 * n2 + 1, 2 * n1,
                    2 * n1 + 1])
        # Construct the index pointers for the coo format
        self.iK = np.kron(self.edofMat, np.ones((8, 1))).flatten()
        self.jK = np.kron(self.edofMat, np.ones((1, 8))).flatten() 
Example 31
Project: fenics-topopt   Author: zfergus   File: bridge.py    MIT License 5 votes vote down vote up
def main(nelx, nely, volfrac, penal, rmin, ft):
    print("Minimum compliance problem with MMA")
    print("ndes: {:d} x {:d}".format(nelx, nely))
    print("volfrac: {:g}, rmin: {:g}, penal: {:g}".format(volfrac, rmin, penal))
    print("Filter method: " + ["Sensitivity based", "Density based"][ft])

    # Allocate design variables (as array), initialize and allocate sens.
    x = volfrac * np.ones(nely * nelx, dtype=float)

    title = "ndes: {:d} x {:d}\nvolfrac: {:g}, rmin: {:g}, penal: {:g}".format(
        nelx, nely, volfrac, rmin, penal)
    gui    = GUI(nelx, nely, title)
    solver = LessTolerantSolver(nelx, nely, volfrac, penal, rmin, ft, gui,
        BoundaryConditions(nelx, nely))
    x_opt  = solver.optimize(x)
    x_opt  = solver.filter_variables(x_opt)
    gui.update(x_opt)

    from PIL import Image
    x_opt.clip(0.0, 1.0)
    x_opt = ((1 - x_opt.reshape(nelx, nely)) * 255).round().astype("uint8").T
    x_opt_sym = x_opt[:, ::-1]
    result = Image.fromarray(np.hstack([x_opt_sym, x_opt]))
    result.save("tmp.png")

    # Make sure the plot stays and that the shell remains
    input("Press any key...") 
Example 32
Project: fenics-topopt   Author: zfergus   File: L_bracket.py    MIT License 5 votes vote down vote up
def main(nelx, nely, volfrac, penal, rmin, ft):
    print("Minimum compliance problem with MMA")
    print("ndes: {:d} x {:d}".format(nelx, nely))
    print("volfrac: {:g}, rmin: {:g}, penal: {:g}".format(volfrac, rmin, penal))
    print("Filter method: " + ["Sensitivity based", "Density based"][ft])

    # Allocate design variables (as array), initialize and allocate sens.
    x = volfrac * np.ones(nely * nelx, dtype=float)

    title = "ndes: {:d} x {:d}\nvolfrac: {:g}, rmin: {:g}, penal: {:g}".format(
        nelx, nely, volfrac, rmin, penal)
    gui    = GUI(nelx, nely, title)
    solver = LessTolerantSolver(nelx, nely, volfrac, penal, rmin, ft, gui,
        LBracketBoundaryConditions(nelx, nely, nelx // 3, 2 * nely // 3))
    x_opt  = solver.optimize(x)
    x_opt  = solver.filter_variables(x_opt)
    gui.update(x_opt)

    from PIL import Image
    x_opt.clip(0.0, 1.0)
    x_opt = ((1 - x_opt.reshape(nelx, nely)) * 255).round().astype("uint8").T
    result = Image.fromarray(x_opt)
    result.save("tmp.png")

    # Make sure the plot stays and that the shell remains
    input("Press any key...") 
Example 33
Project: aospy   Author: spencerahill   File: test_data_loader.py    Apache License 2.0 5 votes vote down vote up
def test_maybe_cast_to_float64(input_dtype, expected_dtype):
    da = xr.DataArray(np.ones(3, dtype=input_dtype))
    result = _maybe_cast_to_float64(da).dtype
    assert result == expected_dtype 
Example 34
Project: Att-ChemdNER   Author: lingluodlut   File: theano_backend.py    Apache License 2.0 5 votes vote down vote up
def ones(shape, dtype=None, name=None):
    '''Instantiates an all-ones variable.
    '''
    if dtype is None:
        dtype = floatx()
    return variable(np.ones(shape), dtype, name) 
Example 35
Project: xrft   Author: xgcm   File: xrft.py    MIT License 5 votes vote down vote up
def detrend_wrap(detrend_func):
    """
    Wrapper function for `xrft.detrendn`.
    """
    def func(a, axes=None):
        if len(axes) > 3:
            raise ValueError("Detrending is only supported up to "
                            "3 dimensions.")
        if axes is None:
            axes = tuple(range(a.ndim))
        else:
            if len(set(axes)) < len(axes):
                raise ValueError("Duplicate axes are not allowed.")

        for each_axis in axes:
            if len(a.chunks[each_axis]) != 1:
                raise ValueError('The axis along the detrending is upon '
                                'cannot be chunked.')

        if len(axes) == 1:
            return dsar.map_blocks(sps.detrend, a, axis=axes[0],
                                   chunks=a.chunks, dtype=a.dtype
                                  )
        else:
            for each_axis in range(a.ndim):
                if each_axis not in axes:
                    if len(a.chunks[each_axis]) != a.shape[each_axis]:
                        raise ValueError("The axes other than ones to detrend "
                                        "over should have a chunk length of 1.")
            return dsar.map_blocks(detrend_func, a, axes,
                                   chunks=a.chunks, dtype=a.dtype
                                  )

    return func 
Example 36
Project: xrft   Author: xgcm   File: test_xrft.py    MIT License 5 votes vote down vote up
def numpy_detrend(da):
    """
    Detrend a 2D field by subtracting out the least-square plane fit.

    Parameters
    ----------
    da : `numpy.array`
        The data to be detrended

    Returns
    -------
    da : `numpy.array`
        The detrended input data
    """
    N = da.shape

    G = np.ones((N[0]*N[1],3))
    for i in range(N[0]):
        G[N[1]*i:N[1]*i+N[1], 1] = i+1
        G[N[1]*i:N[1]*i+N[1], 2] = np.arange(1, N[1]+1)

    d_obs = np.reshape(da.copy(), (N[0]*N[1],1))
    m_est = np.dot(np.dot(spl.inv(np.dot(G.T, G)), G.T), d_obs)
    d_est = np.dot(G, m_est)

    lin_trend = np.reshape(d_est, N)

    return da - lin_trend 
Example 37
Project: prediction-constrained-topic-models   Author: dtak   File: calc_roc_auc_via_bootstrap.py    MIT License 5 votes vote down vote up
def verify_min_examples_per_label(y_NC, min_examples_per_label):
    '''
    
    Examples
    --------
    >>> y_all_0 = np.zeros(10)
    >>> y_all_1 = np.ones(30)
    >>> verify_min_examples_per_label(y_all_0, 3)
    False
    >>> verify_min_examples_per_label(y_all_1, 2)
    False
    >>> verify_min_examples_per_label(np.hstack([y_all_0, y_all_1]), 10)
    True
    >>> verify_min_examples_per_label(np.eye(3), 2)
    False
    '''
    if y_NC.ndim < 2:
        y_NC = np.atleast_2d(y_NC).T
    n_C = np.sum(np.isfinite(y_NC), axis=0)
    n_pos_C = n_C * np.nanmean(y_NC, axis=0)
    min_neg = np.max(n_C - n_pos_C)
    min_pos = np.min(n_pos_C)
    if min_pos < min_examples_per_label:
        return False
    elif min_neg < min_examples_per_label:
        return False
    return True 
Example 38
Project: FRIDA   Author: LCAV   File: point_cloud.py    MIT License 5 votes vote down vote up
def EDM(self):
        ''' Computes the EDM corresponding to the marker set '''
        if self.X is None:
            raise ValueError('No marker set')

        G = np.dot(self.X.T, self.X)
        return np.outer(np.ones(self.m), np.diag(G)) \
            - 2*G + np.outer(np.diag(G), np.ones(self.m)) 
Example 39
Project: FRIDA   Author: LCAV   File: doa.py    MIT License 5 votes vote down vote up
def build_lookup(self, r=None, theta=None, phi=None):
        """
        Construct lookup table for given candidate locations (in spherical 
        coordinates). Each column is a location in cartesian coordinates.

        :param r: Candidate distances from the origin.
        :type r: numpy array
        :param theta: Candidate azimuth angles with respect to x-axis.
        :type theta: numpy array
        :param phi: Candidate elevation angles with respect to z-axis.
        :type phi: numpy array
        """
        if theta is not None:
            self.theta = theta
        if phi is not None:
            self.phi = phi
        if r is not None:
            self.r = r
            if self.r == np.ones(1):
                self.mode = 'far'
            else:
                self.mode = 'near'
        self.loc = np.zeros([self.D, len(self.r) * len(self.theta) * 
            len(self.phi)])
        self.num_loc = self.loc.shape[1]
        # convert to cartesian
        for i in range(len(self.r)):
            r_s = self.r[i]
            for j in range(len(self.theta)):
                theta_s = self.theta[j]
                for k in range(len(self.phi)):
                    # spher = np.array([r_s,theta_s,self.phi[k]])
                    self.loc[:, i * len(self.theta) + j * len(self.phi) + k] = \
                        spher2cart(r_s, theta_s, self.phi[k])[0:self.D] 
Example 40
Project: nonogram-solver   Author: mprat   File: solver.py    MIT License 5 votes vote down vote up
def possibilities_generator(
        prior, min_pos, max_start_pos, constraint_len, total_filled):
    """
    Given a row prior, a min_pos, max_start_pos, and constraint length,
    yield each potential row

    prior is an array of:
        -1 (unknown),
        0 (definitely empty),
        1 (definitely filled)
    """
    prior_filled = np.zeros(len(prior)).astype(bool)
    prior_filled[prior == 1] = True
    prior_empty = np.zeros(len(prior)).astype(bool)
    prior_empty[prior == 0] = True
    for start_pos in range(min_pos, max_start_pos + 1):
        possible = -1 * np.ones(len(prior))
        possible[start_pos:start_pos + constraint_len] = 1
        if start_pos + constraint_len < len(possible):
            possible[start_pos + constraint_len] = 0
        if start_pos > 0:
            possible[start_pos - 1] = 0

        # add in the prior
        possible[np.logical_and(possible == -1, prior == 0)] = 0
        possible[np.logical_and(possible == -1, prior == 1)] = 1

        # if contradiction with prior, continue
        # 1. possible changes prior = 1 to something else
        # 2. possible changes prior = 0 to something else
        # 3. everything is assigned in possible but there are not
        #    enough filled in
        # 4. possible changes nothing about the prior
        if np.any(possible[np.where(prior == 1)[0]] != 1) or \
                np.any(possible[np.where(prior == 0)[0]] != 0) or \
                np.sum(possible == 1) > total_filled or \
                (np.all(possible >= 0) and np.sum(possible == 1) <
                    total_filled) or \
                np.all(prior == possible):
            continue
        yield possible 
Example 41
Project: nonogram-solver   Author: mprat   File: solver.py    MIT License 5 votes vote down vote up
def __init__(self, nonogram):
        self.nonogram = nonogram
        self.puzzle_state = -1 * np.ones((nonogram.n_rows, nonogram.n_cols))
        self.filled_positions_hint_eligible = nonogram.solution_list
        self.prefilled_positions = [] 
Example 42
Project: nonogram-solver   Author: mprat   File: nonogram.py    MIT License 5 votes vote down vote up
def _init_puzzle(self):
        self.puzzle_state = -1 * np.ones((self.n_rows, self.n_cols)) 
Example 43
Project: ml_news_popularity   Author: khaledJabr   File: svr.py    MIT License 5 votes vote down vote up
def calc_K_Mat(self, input_mat, degree, c ) : 
		k_mat = np.ones([input_mat.shape[0]+1 ,input_mat.shape[0]+1 ])
		k_mat[0][0] = 0
		i_matrix = np.identity(input_mat.shape[0])
		for i in range(1, k_mat.shape[0]) : 
		    for j in range (1, k_mat.shape[0]) : 
		    	if i % 500 == 0 : 
		    		print("Fitting : K Matrix ( {} , {} )".format(i , j))
		    	k = (np.sum((input_mat[i-1:i , : ]).T * input_mat[j-1:j , :]) ** degree)  + (1/c) * i_matrix[i-1][j-1]
		    	k_mat[i][j] = k
		return k_mat 
Example 44
Project: ml_news_popularity   Author: khaledJabr   File: svr.py    MIT License 5 votes vote down vote up
def calc_K_matrix_input(self, input_data ): 
		input_k_matrix = np.ones([input_data.shape[0] +1 , self.training_data.shape[0] + 1] )
		input_k_matrix[0][0] = 0 
		for i in range(1, input_k_matrix.shape[0]):
			for j in range(1, input_k_matrix.shape[1]):
				if i % 500 == 0 : 
					print("Predicting : K Matrix ( {} , {} )".format(i , j))
				input_k_matrix[i][j] = (np.sum((input_data[i-1:i , : ]).T * self.training_data[j-1:j , :] ) ** self.degree)

		return input_k_matrix

		return [] 
Example 45
Project: mmdetection   Author: open-mmlab   File: eval_hooks.py    Apache License 2.0 5 votes vote down vote up
def evaluate(self, runner, results):
        gt_bboxes = []
        gt_labels = []
        gt_ignore = []
        for i in range(len(self.dataset)):
            ann = self.dataset.get_ann_info(i)
            bboxes = ann['bboxes']
            labels = ann['labels']
            if 'bboxes_ignore' in ann:
                ignore = np.concatenate([
                    np.zeros(bboxes.shape[0], dtype=np.bool),
                    np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
                ])
                gt_ignore.append(ignore)
                bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
                labels = np.concatenate([labels, ann['labels_ignore']])
            gt_bboxes.append(bboxes)
            gt_labels.append(labels)
        if not gt_ignore:
            gt_ignore = None
        # If the dataset is VOC2007, then use 11 points mAP evaluation.
        if hasattr(self.dataset, 'year') and self.dataset.year == 2007:
            ds_name = 'voc07'
        else:
            ds_name = self.dataset.CLASSES
        mean_ap, eval_results = eval_map(
            results,
            gt_bboxes,
            gt_labels,
            gt_ignore=gt_ignore,
            scale_ranges=None,
            iou_thr=0.5,
            dataset=ds_name,
            print_summary=True)
        runner.log_buffer.output['mAP'] = mean_ap
        runner.log_buffer.ready = True 
Example 46
Project: mmdetection   Author: open-mmlab   File: voc_eval.py    Apache License 2.0 5 votes vote down vote up
def voc_eval(result_file, dataset, iou_thr=0.5):
    det_results = mmcv.load(result_file)
    gt_bboxes = []
    gt_labels = []
    gt_ignore = []
    for i in range(len(dataset)):
        ann = dataset.get_ann_info(i)
        bboxes = ann['bboxes']
        labels = ann['labels']
        if 'bboxes_ignore' in ann:
            ignore = np.concatenate([
                np.zeros(bboxes.shape[0], dtype=np.bool),
                np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool)
            ])
            gt_ignore.append(ignore)
            bboxes = np.vstack([bboxes, ann['bboxes_ignore']])
            labels = np.concatenate([labels, ann['labels_ignore']])
        gt_bboxes.append(bboxes)
        gt_labels.append(labels)
    if not gt_ignore:
        gt_ignore = None
    if hasattr(dataset, 'year') and dataset.year == 2007:
        dataset_name = 'voc07'
    else:
        dataset_name = dataset.CLASSES
    eval_map(
        det_results,
        gt_bboxes,
        gt_labels,
        gt_ignore=gt_ignore,
        scale_ranges=None,
        iou_thr=iou_thr,
        dataset=dataset_name,
        print_summary=True) 
Example 47
Project: mmdetection   Author: open-mmlab   File: coco_error_analysis.py    Apache License 2.0 5 votes vote down vote up
def makeplot(rs, ps, outDir, class_name, iou_type):
    cs = np.vstack([
        np.ones((2, 3)),
        np.array([.31, .51, .74]),
        np.array([.75, .31, .30]),
        np.array([.36, .90, .38]),
        np.array([.50, .39, .64]),
        np.array([1, .6, 0])
    ])
    areaNames = ['allarea', 'small', 'medium', 'large']
    types = ['C75', 'C50', 'Loc', 'Sim', 'Oth', 'BG', 'FN']
    for i in range(len(areaNames)):
        area_ps = ps[..., i, 0]
        figure_tile = iou_type + '-' + class_name + '-' + areaNames[i]
        aps = [ps_.mean() for ps_ in area_ps]
        ps_curve = [
            ps_.mean(axis=1) if ps_.ndim > 1 else ps_ for ps_ in area_ps
        ]
        ps_curve.insert(0, np.zeros(ps_curve[0].shape))
        fig = plt.figure()
        ax = plt.subplot(111)
        for k in range(len(types)):
            ax.plot(rs, ps_curve[k + 1], color=[0, 0, 0], linewidth=0.5)
            ax.fill_between(
                rs,
                ps_curve[k],
                ps_curve[k + 1],
                color=cs[k],
                label=str('[{:.3f}'.format(aps[k]) + ']' + types[k]))
        plt.xlabel('recall')
        plt.ylabel('precision')
        plt.xlim(0, 1.)
        plt.ylim(0, 1.)
        plt.title(figure_tile)
        plt.legend()
        # plt.show()
        fig.savefig(outDir + '/{}.png'.format(figure_tile))
        plt.close(fig) 
Example 48
Project: neural-fingerprinting   Author: StephanZheng   File: test_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_get_logits_over_interval(self):
        import tensorflow as tf
        model = cnn_model()
        wrap = KerasModelWrapper(model)
        fgsm_params = {'eps': .5}
        img = np.ones(shape=(28, 28, 1))
        num_points = 21
        with tf.Session() as sess:
            tf.global_variables_initializer().run()
            logits = utils.get_logits_over_interval(sess, wrap,
                                                    img, fgsm_params,
                                                    min_epsilon=-10,
                                                    max_epsilon=10,
                                                    num_points=num_points)
            self.assertEqual(logits.shape[0], num_points) 
Example 49
Project: neural-fingerprinting   Author: StephanZheng   File: picklable_model.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def set_input_shape(self, shape):
        self.input_shape = shape
        self.output_shape = shape
        channels = shape[-1]
        self.channels = channels
        self.actual_num_groups = min(self.channels, self.num_groups)
        extra_dims = (self.channels // self.actual_num_groups,
                      self.actual_num_groups)
        self.expanded_shape = tuple(shape[1:3]) + tuple(extra_dims)
        init_value = np.ones((channels,), dtype='float32') * self.init_gamma
        self.gamma = PV(init_value, name=self.name + "_gamma")
        self.beta = PV(np.zeros((self.channels,), dtype='float32'),
                       name=self.name + "_beta") 
Example 50
Project: neural-fingerprinting   Author: StephanZheng   File: test_serial.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_save_and_load_var():
    """
    Tests that we can save and load a PicklableVariable with joblib
    """
    sess = tf.Session()
    with sess.as_default():
        x = np.ones(1)
        xv = PicklableVariable(x)
        xv.var.initializer.run()
        save("/tmp/var.joblib", xv)
        sess.run(tf.assign(xv.var, np.ones(1) * 2))
        new_xv = load("/tmp/var.joblib")
        assert np.allclose(sess.run(xv.var), np.ones(1) * 2)
        assert np.allclose(sess.run(new_xv.var), np.ones(1))