Python autograd.numpy.concatenate() Examples

The following are code examples for showing how to use autograd.numpy.concatenate(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Efficient_Augmentation   Author: mkuchnik   File: LogisticRegression.py    MIT License 6 votes vote down vote up
def W_b(self):
        """
        Gets the weights concatenated with the bias (bias trick)
        """
        weights = np.array(self.coef_)
        if self.multi_class == "multinomial" and self.n_classes_ > 2:
            assert weights.shape == (self.n_classes_, self.X_.shape[1])
            weights = weights[:, :]  # Unpack the weights
            if not self.fit_intercept:
                return weights
            intercept = np.array(self.intercept_).reshape(-1, 1)
            assert intercept.shape == (self.n_classes_, 1)
            W_b = np.concatenate((weights, intercept), axis=1)
            return W_b
        else:
            assert weights.shape == (1, self.X_.shape[1])
            weights = weights[0, :]  # Unpack the weights
            if not self.fit_intercept:
                return weights
            intercept = np.array(self.intercept_)
            assert intercept.shape == (1,)
            W_b = np.concatenate((weights, intercept), axis=0)
            return W_b 
Example 2
Project: ReducedVarianceReparamGradients   Author: andymiller   File: bbvi_mvn_diag.py    MIT License 6 votes vote down vote up
def __init__(self, lnpdf, D, glnpdf=None, lnpdf_is_vectorized=False):
        """
        Implements MCVI --- exposes elbo gradient and sampling methods.
        This class breaks the gradient down into parts

        dg/dz = dlnpdf(z)/dz * dz/dlam - dlnq(z)/dz * dz/dlam - dlnq(z)/dlam

        Parameterizes with mean and log-std! (not variance!)
            lam = [mean, log-std]
        """
        # base class sets up the gradient function organization
        super(DiagMvnBBVI, self).__init__(lnpdf, D, glnpdf, lnpdf_is_vectorized)

        # we note that the second two terms, with probability one, 
        # create the vector [0, 0, 0, ..., 0, 1., 1., ..., 1.]
        self.mask = np.concatenate([np.zeros(D), np.ones(D)])
        self.num_variational_params = 2*D
        self.D = D

    #####################################################################
    # Methods for various types of gradients of the ELBO                #
    #    -- that can be plugged into FilteredOptimization routines      #
    ##################################################################### 
Example 3
Project: momi2   Author: popgenmethods   File: size_history.py    GNU General Public License v3.0 6 votes vote down vote up
def sfs(self, n):
        if n == 0:
            return np.array([0.])
        Et_jj = self.etjj(n)
        #assert np.all(Et_jj[:-1] - Et_jj[1:] >= 0.0) and np.all(Et_jj >= 0.0) and np.all(Et_jj <= self.tau)

        ret = np.sum(Et_jj[:, None] * Wmatrix(n), axis=0)

        before_tmrca = self.tau - np.sum(ret * np.arange(1, n) / n)
        # ignore branch length above untruncated TMRCA
        if self.tau == float('inf'):
            before_tmrca = 0.0

        ret = np.concatenate((np.array([0.0]), ret, np.array([before_tmrca])))
        return ret

    # def transition_prob(self, v, axis=0):
    #     return moran_model.moran_action(self.scaled_time, v, axis=axis) 
Example 4
Project: eye_hand_calibration   Author: MobileManipulation   File: full_calib.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, grid_size, extrinsics, intrinsics):
        self.extrinsics = extrinsics
        self.intrinsics = intrinsics

        # Build up testing data in a grid
        z = np.linspace(-0.2, 0.2, num=grid_size)
        y = np.linspace(-0.2+0.025, 0.2+0.025, num=grid_size)
        x = np.linspace(0.5, 1.2, num=grid_size)
        g = np.meshgrid(x, y, z)
        cam = np.stack(map(np.ravel, g))
        np.concatenate([cam, np.ones([1, cam.shape[1]])])

        # Compute all truth data in various frames
        self.camera_truth = cam
        self.optical_truth = opt = toCameraFrame(cam, extrinsics)
        self.pixel_truth = cameraProjection(opt, intrinsics) 
Example 5
Project: baconian-project   Author: Lukeeeeee   File: ilqr_policy.py    MIT License 6 votes vote down vote up
def finite_difference(self, x, u):

        "calling finite difference for delta perturbation"
        xu = np.concatenate((x, u))

        F = np.zeros((x.shape[0], xu.shape[0]))

        for i in range(x.shape[0]):
            F[i, :] = approx_fprime(xu, self.simulate_next_state, self.delta, i)

        c = approx_fprime(xu, self.simulate_cost, self.delta)

        C = np.zeros((len(xu), len(xu)))

        for i in range(xu.shape[0]):
            C[i, :] = approx_fprime(xu, self.approx_fdoubleprime, self.delta, i)

        f = np.zeros((len(x)))

        return C, F, c, f 
Example 6
Project: tree-regularization-public   Author: dtak   File: train.py    MIT License 6 votes vote down vote up
def get_ith_minibatch_ixs_fences(b_i, batch_size, fences):
    """Split timeseries data of uneven sequence lengths into batches.
    This is how we handle different sized sequences.
    
    @param b_i: integer
                iteration index
    @param batch_size: integer
                       size of batch
    @param fences: list of integers
                   sequence of cutoff array
    @return idx: integer
    @return batch_slice: slice object
    """
    num_data = len(fences) - 1
    num_minibatches = num_data / batch_size + ((num_data % batch_size) > 0)
    b_i = b_i % num_minibatches
    idx = slice(b_i * batch_size, (b_i+1) * batch_size)
    batch_i = np.arange(num_data)[idx]
    batch_slice = np.concatenate([range(i, j) for i, j in 
                                  zip(fences[batch_i], fences[batch_i+1])])
    return idx, batch_slice 
Example 7
Project: ParametricGP   Author: maziarraissi   File: parametric_GP.py    MIT License 5 votes vote down vote up
def __init__(self, X, y, M=10, max_iter = 2000, N_batch = 1, 
                 monitor_likelihood = 10, lrate = 1e-3):
        (N,D) = X.shape
        N_subset = min(N, 10000)
        idx = np.random.choice(N, N_subset, replace=False)
        kmeans = KMeans(n_clusters=M, random_state=0).fit(X[idx,:])
        Z = kmeans.cluster_centers_
    
        hyp = np.log(np.ones(D+1))
        logsigma_n = np.array([-4.0])
        hyp = np.concatenate([hyp, logsigma_n])
    
        m = np.zeros((M,1))
        S = kernel(Z,Z,hyp[:-1])

        self.X = X
        self.y = y
        
        self.M = M
        self.Z = Z
        self.m = m
        self.S = S
        
        self.hyp= hyp
        
        self.max_iter = max_iter
        self.N_batch = N_batch
        self.monitor_likelihood = monitor_likelihood
        self.jitter = 1e-8
        self.jitter_cov = 1e-8
        
        # Adam optimizer parameters
        self.mt_hyp = np.zeros(hyp.shape)
        self.vt_hyp = np.zeros(hyp.shape)
        self.lrate = lrate 
Example 8
Project: Efficient_Augmentation   Author: mkuchnik   File: LinearSVM.py    MIT License 5 votes vote down vote up
def _X_b(X):
        """bias trick pad X"""
        X_b = np.concatenate((X,
                              np.ones((len(X), 1))),
                             axis=1)
        return X_b 
Example 9
Project: Efficient_Augmentation   Author: mkuchnik   File: LinearSVM.py    MIT License 5 votes vote down vote up
def W_b(self):
        """
        Gets the weights concatenated with the bias (bias trick)
        """
        weights = np.array(self.coef_)
        assert weights.shape == (1, self.X_.shape[1])
        weights = weights[0, :]  # Unpack the weights
        if not self.fit_intercept:
            return weights
        else:
            intercept = np.array(self.intercept_)
            assert intercept.shape == (1,)
            W_b = np.concatenate((weights, intercept), axis=0)
            return W_b 
Example 10
Project: Efficient_Augmentation   Author: mkuchnik   File: LogisticRegression.py    MIT License 5 votes vote down vote up
def _X_b(X):
        """bias trick pad X"""
        X_b = np.concatenate((X,
                              np.ones((len(X), 1))),
                             axis=1)
        return X_b 
Example 11
Project: autograd-forward   Author: BB-UCL   File: test_numpy.py    MIT License 5 votes vote down vote up
def test_concatenate_1ist():  combo_check(np.concatenate, [0], [(R(1), R(3))],             axis=[0]) 
Example 12
Project: autograd-forward   Author: BB-UCL   File: test_numpy.py    MIT License 5 votes vote down vote up
def test_concatenate_tuple(): combo_check(np.concatenate, [0], [[R(1), R(3)]],             axis=[0]) 
Example 13
Project: autograd-forward   Author: BB-UCL   File: test_numpy.py    MIT License 5 votes vote down vote up
def test_concatenate_2d():    combo_check(np.concatenate, [0], [(R(2, 2), R(2, 2))],       axis=[0, 1]) 
Example 14
Project: autograd-forward   Author: BB-UCL   File: test_numpy.py    MIT License 5 votes vote down vote up
def test_concatenate_3d():    combo_check(np.concatenate, [0], [(R(2, 2, 2), R(2, 2, 2))], axis=[0, 1, 2]) 
Example 15
Project: ReducedVarianceReparamGradients   Author: andymiller   File: misc.py    MIT License 5 votes vote down vote up
def mvn_fisher_info(params):
    """ returns the fisher information matrix (diagonal) for a multivariate
    normal distribution with params = [mu, ln sigma] """
    D = len(params) / 2
    mean, log_std = params[:D], params[D:]
    return np.concatenate([np.exp(-2.*log_std),
                           2*np.ones(D)]) 
Example 16
Project: ReducedVarianceReparamGradients   Author: andymiller   File: misc.py    MIT License 5 votes vote down vote up
def unconstrained_to_simplex(rhos):
    rhosf = np.concatenate([rhos, [0.]])
    pis   = np.exp(rhosf) / np.sum(np.exp(rhosf))
    return pis 
Example 17
Project: momi2   Author: popgenmethods   File: math_functions.py    GNU General Public License v3.0 5 votes vote down vote up
def transformed_expi(x):
    abs_x = np.abs(x)
    ser = abs_x < 1. / 45.
    nser = np.logical_not(ser)

#     ret = np.zeros(x.shape)
#     ret[ser], ret[nser] = transformed_expi_series(x[ser]), transformed_expi_naive(x[nser])))
#     return ret

    # We use np.concatenate to combine.
    # would be better to use ret[ser] and ret[nser] as commented out above
    # but array assignment not yet supported by autograd
    assert np.all(abs_x[:-1] >= abs_x[1:])
    return np.concatenate((transformed_expi_naive(x[nser]), transformed_expi_series(x[ser]))) 
Example 18
Project: momi2   Author: popgenmethods   File: math_functions.py    GNU General Public License v3.0 5 votes vote down vote up
def expm1d(x, eps=1e-6):
    x = np.array(x)
    abs_x = np.abs(x)
    if x.shape:
        # FIXME: don't require abs_x to be increasing
        assert np.all(abs_x[1:] >= abs_x[:-1])
        small = abs_x < eps
        big = ~small
        return np.concatenate([expm1d_taylor(x[small]),
                               expm1d_naive(x[big])])
    elif abs_x < eps:
        return expm1d_taylor(x)
    else:
        return expm1d_naive(x) 
Example 19
Project: eye_hand_calibration   Author: MobileManipulation   File: full_calib.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, camera_points, pixels, extrinsics, intrinsics):
        """
        camera_points: XYZ coordinates of points, expressed in camera_link frame
        pixels: pixel coordinates in camera/rgb/image_color (unrectified!)
        extrinsics: initial guess of extrinsics
        intrinsics: initial guess of intrinsics
        """
        self.x0 = np.concatenate([extrinsics, intrinsics])
        self.camera_truth = camera_points
        self.pixel_truth = pixels

        print "kinect:", self.camera_truth
        print "pixels:", self.pixel_truth 
Example 20
Project: private-pgm   Author: ryan112358   File: dual_query.py    Apache License 2.0 5 votes vote down vote up
def answer_workload(workload, data):
    ans = [W.dot(data.project(cl).datavector()) for cl, W in workload]
    return np.concatenate(ans) 
Example 21
Project: private-pgm   Author: ryan112358   File: dual_query.py    Apache License 2.0 5 votes vote down vote up
def marginal_loss(marginals, workload, cache):
    answers = []
    for proj, W in workload:
        for cl in marginals:
            if set(proj) <= set(cl):
                mu = marginals[cl].project(proj)
                x = mu.values.flatten()
                answers.append(W.dot(x))
                break
    total = x.sum()
    answers = np.concatenate(answers) / total

    gradient = grad(log_likelihood, argnum=0)
    loss = log_likelihood(answers, cache)
    danswers = gradient(answers, cache)

    i = 0
    gradients = { cl : Factor.zeros(marginals[cl].domain) for cl in marginals }
    for proj, W in workload:
        for cl in marginals:
            if set(proj) <= set(cl):
                m = W.shape[0]
                dmu = W.T.dot(danswers[i:i+m]) / total
                dom = gradients[cl].domain.project(proj)
                gradients[cl] += Factor(dom, dmu)
                i += m
                break

    print(loss)
    return loss, graphical_model.CliqueVector(gradients) 
Example 22
Project: pylqr   Author: navigator8972   File: pylqr_trajctrl.py    GNU General Public License v3.0 5 votes vote down vote up
def synthesize_trajectory(self, x0, u_array=None, n_itrs=50, tol=1e-6, verbose=True):
        if self.ilqr_ is None:
            print 'No iLQR solver has been prepared.'
            return None

        #initialization doesn't matter as global optimality can be guaranteed?
        if u_array is None:
            u_init = [np.zeros(self.n_dims_) for i in range(self.T_-1)]
        else:
            u_init = u_array
        x_init = np.concatenate([x0, np.zeros(self.n_dims_)])
        res = self.ilqr_.ilqr_iterate(x_init, u_init, n_itrs=n_itrs, tol=tol, verbose=verbose)
        return res['x_array_opt'][:, 0:self.n_dims_] 
Example 23
Project: pylqr   Author: navigator8972   File: pylqr_trajctrl.py    GNU General Public License v3.0 5 votes vote down vote up
def PyLQR_TrajCtrl_TrackingTest():
    n_pnts = 200
    x_coord = np.linspace(0.0, 2*np.pi, n_pnts)
    y_coord = np.sin(x_coord)
    #concatenate to have trajectory
    ref_traj = np.array([x_coord, y_coord]).T
    weight_mats = [ np.eye(ref_traj.shape[1])*100 ]

    #draw reference trajectory
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.hold(True)
    ax.plot(ref_traj[:, 0], ref_traj[:, 1], '.-k', linewidth=3.5)
    ax.plot([ref_traj[0, 0]], [ref_traj[0, 1]], '*k', markersize=16)

    lqr_traj_ctrl = PyLQR_TrajCtrl(use_autograd=True)
    lqr_traj_ctrl.build_ilqr_tracking_solver(ref_traj, weight_mats)

    n_queries = 5

    for i in range(n_queries):
        #start from a perturbed point
        x0 = ref_traj[0, :] + np.random.rand(2) * 2 - 1
        syn_traj = lqr_traj_ctrl.synthesize_trajectory(x0)
        #plot it
        ax.plot(syn_traj[:, 0], syn_traj[:, 1], linewidth=3.5)

    plt.show()
    return 
Example 24
Project: autoptim   Author: pierreablin   File: autoptim.py    MIT License 5 votes vote down vote up
def _vectorize(optim_vars):
    shapes = [var.shape for var in optim_vars]
    x = np.concatenate([var.ravel() for var in optim_vars])
    return x, shapes 
Example 25
Project: kernel-gof   Author: wittawatj   File: goftest.py    MIT License 5 votes vote down vote up
def feature_tensor(self, X):
        """
        Compute the feature tensor which is n x d x J.
        The feature tensor can be used to compute the statistic, and the
        covariance matrix for simulating from the null distribution.

        X: n x d data numpy array

        return an n x d x J numpy array
        """
        k = self.k
        J = self.V.shape[0]
        n, d = X.shape
        # n x d matrix of gradients
        grad_logp = self.p.grad_log(X)
        #assert np.all(util.is_real_num(grad_logp))
        # n x J matrix
        #print 'V'
        #print self.V
        K = k.eval(X, self.V)
        #assert np.all(util.is_real_num(K))

        list_grads = np.array([np.reshape(k.gradX_y(X, v), (1, n, d)) for v in self.V])
        stack0 = np.concatenate(list_grads, axis=0)
        #a numpy array G of size n x d x J such that G[:, :, J]
        #    is the derivative of k(X, V_j) with respect to X.
        dKdV = np.transpose(stack0, (1, 2, 0))

        # n x d x J tensor
        grad_logp_K = util.outer_rows(grad_logp, K)
        #print 'grad_logp'
        #print grad_logp.dtype
        #print grad_logp
        #print 'K'
        #print K
        Xi = old_div((grad_logp_K + dKdV),np.sqrt(d*J))
        #Xi = (grad_logp_K + dKdV)
        return Xi 
Example 26
Project: paragami   Author: rgiordan   File: simplex_patterns.py    Apache License 2.0 5 votes vote down vote up
def _constrain_simplex_matrix(free_mat):
    # The first column is the reference value.  Append a column of zeros
    # to each simplex representing this reference value.
    reference_col = np.expand_dims(np.full(free_mat.shape[0:-1], 0), axis=-1)
    free_mat_aug = np.concatenate([reference_col, free_mat], axis=-1)

    # Note that autograd needs to update their logsumexp to be in special
    # not misc before this can be changed.  Furthermore, logsumexp is
    # not even available in the pypi version of autograd.
    log_norm = logsumexp(free_mat_aug, axis=-1)
    return np.exp(free_mat_aug - log_norm) 
Example 27
Project: paragami   Author: rgiordan   File: test_autograd_supplement_lib.py    Apache License 2.0 5 votes vote down vote up
def test_slogdet_3d(self):
        fun = lambda x: np.sum(np.linalg.slogdet(x)[1])
        mat = np.concatenate(
            [(rand_psd(5) + 5 * np.eye(5))[None,...] for _ in range(3)])
        # At this time, this is not supported.
        #check_grads(fun)(mat)

        # Check that it raises an error.
        fwd_grad = autograd.make_jvp(fun, argnum=0)
        def error_fun():
            return fwd_grad(mat)(mat)
        self.assertRaises(ValueError, error_fun) 
Example 28
Project: momi2   Author: popgenmethods   File: sfs.py    GNU General Public License v3.0 4 votes vote down vote up
def p_missing(self):
        """Estimate of probability that a random allele from each population is missing.

        Missingness is estimated as follows: \
        from each SNP remove a random allele; \
        if the resulting config is monomorphic, then ignore. \
        If polymorphic, then count whether the removed allele \
        is missing or not.

        This avoids bias from fact that we don't observe \
        some polymorphic configs that appear monomorphic \
        after removing the missing alleles.

        :returns: 1-d array of missingness per population
        :rtype: :class:`numpy.ndarray`
        """
        counts = self._total_freqs
        sampled_n = self.sampled_n
        n_pops = len(self.sampled_pops)

        config_arr = self.configs.value
        # augment config_arr to contain the missing counts
        n_miss = sampled_n - np.sum(config_arr, axis=2)
        config_arr = np.concatenate((config_arr, np.reshape(
            n_miss, list(n_miss.shape)+[1])), axis=2)

        ret = []
        for i in range(n_pops):
            n_valid = []
            for allele in (0, 1, -1):
                # configs with removed allele
                removed = np.array(config_arr)
                removed[:, i, allele] -= 1
                # is the resulting config polymorphic?
                valid_removed = (removed[:, i, allele] >= 0) & np.all(
                    np.sum(removed[:, :, :2], axis=1) > 0, axis=1)

                # sum up the valid configs
                n_valid.append(np.sum(
                    (counts * config_arr[:, i, allele])[valid_removed]))
            # fraction of valid configs with missing additional allele
            ret.append(n_valid[-1] / float(sum(n_valid)))
        return np.array(ret) 
Example 29
Project: ParetoMTL   Author: Xi-L   File: run_synthetic_example.py    MIT License 4 votes vote down vote up
def get_d_paretomtl(grads,value,weights,i):
    # calculate the gradient direction for Pareto MTL
    nobj, dim = grads.shape
    
    # check active constraints
    normalized_current_weight = weights[i]/np.linalg.norm(weights[i])
    normalized_rest_weights = np.delete(weights, (i), axis=0) / np.linalg.norm(np.delete(weights, (i), axis=0), axis = 1,keepdims = True)
    w = normalized_rest_weights - normalized_current_weight
    
    
    # solve QP 
    gx =  np.dot(w,value/np.linalg.norm(value))
    idx = gx >  0
   
    
    vec =  np.concatenate((grads, np.dot(w[idx],grads)), axis = 0)
    
#    # use cvxopt to solve QP
#    
#    P = np.dot(vec , vec.T)
#    
#    q = np.zeros(nobj + np.sum(idx))
#    
#    G =  - np.eye(nobj + np.sum(idx) )
#    h = np.zeros(nobj + np.sum(idx))
#    
#
#    
#    A = np.ones(nobj + np.sum(idx)).reshape(1,nobj + np.sum(idx))
#    b = np.ones(1)
 
#    cvxopt.solvers.options['show_progress'] = False
#    sol = cvxopt_solve_qp(P, q, G, h, A, b)
  
    # use MinNormSolver to solve QP
    sol, nd = MinNormSolver.find_min_norm_element(vec)
   
    
    # reformulate ParetoMTL as linear scalarization method, return the weights
    weight0 =  sol[0] + np.sum(np.array([sol[j] * w[idx][j - 2,0] for j in np.arange(2,2 + np.sum(idx))]))
    weight1 = sol[1] + np.sum(np.array([sol[j] * w[idx][j - 2,1] for j in np.arange(2,2 + np.sum(idx))]))
    weight = np.stack([weight0,weight1])
   

    return weight 
Example 30
Project: pyflatten   Author: ericmjl   File: __init__.py    MIT License 4 votes vote down vote up
def flatten(value):
    """value can be any nesting of tuples, arrays, dicts.
       returns 1D numpy array and an unflatten function."""
    if isinstance(value, np.ndarray):
        def unflatten(vector):
            return np.reshape(vector, value.shape)
        return np.ravel(value), unflatten

    elif isinstance(value, float):
        return np.array([value]), lambda x: x[0]

    elif isinstance(value, tuple):
        if not value:
            return np.array([]), lambda x: ()

        flattened_first, unflatten_first = flatten(value[0])
        flattened_rest, unflatten_rest = flatten(value[1:])

        def unflatten(vector):
            N = len(flattened_first)
            return (unflatten_first(vector[:N]),) + unflatten_rest(vector[N:])

        return np.concatenate((flattened_first, flattened_rest)), unflatten

    elif isinstance(value, list):
        if not value:
            return np.array([]), lambda x: []
        flattened_first, unflatten_first = flatten(value[0])
        flattened_rest, unflatten_rest = flatten(value[1:])

        def unflatten(vector):
            N = len(flattened_first)
            return [unflatten_first(vector[:N])] + unflatten_rest(vector[N:])

        return np.concatenate((flattened_first, flattened_rest)), unflatten

    elif isinstance(value, dict):
        flattened = []
        unflatteners = []
        lengths = []
        keys = []
        for k, v in sorted(value.items(), key=itemgetter(0)):
            cur_flattened, cur_unflatten = flatten(v)
            flattened.append(cur_flattened)
            unflatteners.append(cur_unflatten)
            lengths.append(len(cur_flattened))
            keys.append(k)

        def unflatten(vector):
            split_ixs = np.cumsum(lengths)
            pieces = np.split(vector, split_ixs)
            return {key: unflattener(piece)
                    for piece, unflattener, key in zip(pieces,
                                                       unflatteners,
                                                       keys)}

        return np.concatenate(flattened), unflatten

    else:
        raise Exception("Don't know how to flatten type {}".format(type(value))
                        )