Python autograd.numpy.sum() Examples

The following are code examples for showing how to use autograd.numpy.sum(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Efficient_Augmentation   Author: mkuchnik   File: LogisticRegression.py    MIT License 6 votes vote down vote up
def _log_posterior(W, X, y, L2_alpha, intercept=False, n_classes=2,
                       W_shape=None):
        assert len(X.shape) == 2, "X must be 2d"
        if n_classes <= 2:
            pred = LogisticRegression._logit(X, W)
            assert len(pred.shape) <= 2
            log_lik = np.sum(np.log(pred * y + (1 - pred) * (1 - y)))
        else:
            raise RuntimeError("Multiclass is not supported")
        if intercept:
            log_prior = -L2_alpha * LogisticRegression._l2_norm(W[:-1])
        else:
            log_prior = -L2_alpha * LogisticRegression._l2_norm(W)
        log_posterior = log_prior + log_lik
        #print("log_posterior", log_posterior)
        return log_posterior 
Example 2
Project: einsum2   Author: jackkamm   File: einsum2.py    MIT License 6 votes vote down vote up
def _reshape(in_arr, in_sublist, *out_sublists):
    assert len(out_sublists) == 3

    old_sublist = in_sublist
    in_sublist = sum(out_sublists, [])
    in_arr = _transpose(in_arr, old_sublist, in_sublist)

    # in_arr.shape breaks in autograd if it has no dimension
    if in_sublist:
        shapes = {s:i for i,s in zip(in_arr.shape, in_sublist)}
    else: shapes = {}
    return np.reshape(in_arr, [np.prod([shapes[s] for s in out_subs], dtype=int)
                               for out_subs in out_sublists]) 
Example 3
Project: einsum2   Author: jackkamm   File: einsum2.py    MIT License 6 votes vote down vote up
def _sum_unique_axes(in_arr, in_sublist, *keep_subs):
    # assume no repeated subscripts
    assert len(in_sublist) == len(set(in_sublist))

    out_sublist = []
    sum_axes = []
    keep_subs = set([s for ks in keep_subs for s in ks])
    for idx, sub in enumerate(in_sublist):
        if sub in keep_subs:
            out_sublist.append(sub)
        else:
            sum_axes.append(idx)
    if sum_axes:
        return np.sum(in_arr, axis=tuple(sum_axes)), out_sublist
    else:
        return in_arr, out_sublist 
Example 4
Project: STSC   Author: wOOL   File: stsc_autograd.py    The Unlicense 6 votes vote down vote up
def get_rotation_matrix(X, C):
    ij_list = [(i, j) for i in range(C) for j in range(C) if i < j]

    def cost(theta_list):
        U_list = generate_U_list(ij_list, theta_list, C)
        R = reduce(npy.dot, U_list, npy.eye(C))
        Z = X.dot(R)
        M = npy.max(Z, axis=1, keepdims=True)
        return npy.sum((Z / M) ** 2)

    theta_list_init = npy.array([0.0] * int(C * (C - 1) / 2))
    opt = minimize(cost,
                   x0=theta_list_init,
                   method='CG',
                   jac=grad(cost),
                   options={'disp': False})
    return opt.fun, reduce(npy.dot, generate_U_list(ij_list, opt.x, C), npy.eye(C)) 
Example 5
Project: autograd-forward   Author: BB-UCL   File: numpy_grads.py    MIT License 6 votes vote down vote up
def forward_grad_np_std(g, ans, gvs, vs, x, axis=None, ddof=0, keepdims=False):
    if axis is None:
        if gvs.iscomplex:
            num_reps = gvs.size / 2
        else:
            num_reps = gvs.size
    elif isinstance(axis, int):
        num_reps = gvs.shape[axis]
    elif isinstance(axis, tuple):
        num_reps = anp.prod(anp.array(gvs.shape)[list(axis)])

    if num_reps <= 1:
        return vs.zeros()
    x_minus_mean = anp.conj(x - anp.mean(x, axis=axis, keepdims=True))
    return (anp.sum(anp.real(g * x_minus_mean), axis=axis, keepdims=keepdims) /
            ((num_reps - ddof) * ans)) 
Example 6
Project: qoc   Author: SchusterLab   File: targetstateinfidelity.py    MIT License 6 votes vote down vote up
def cost(self, controls, states, system_eval_step):
        """
        Compute the penalty.

        Arguments:
        controls
        states
        system_eval_step

        Returns:
        cost
        """
        # The cost is the infidelity of each evolved state and its target state.
        inner_products = anp.matmul(self.target_states_dagger, states)[:, 0, 0]
        fidelities = anp.real(inner_products * anp.conjugate(inner_products))
        fidelity_normalized = anp.sum(fidelities) / self.state_count
        infidelity = 1 - fidelity_normalized
        
        return infidelity * self.cost_multiplier 
Example 7
Project: qoc   Author: SchusterLab   File: targetstateinfidelitytime.py    MIT License 6 votes vote down vote up
def cost(self, controls, states, system_eval_step):
        """
        Compute the penalty.

        Arguments:
        controls
        states
        system_eval_step

        Returns:
        cost
        """
        # The cost is the infidelity of each evolved state and its target state.
        inner_products = anp.matmul(self.target_states_dagger, states)[:, 0, 0]
        fidelities = anp.real(inner_products * anp.conjugate(inner_products))
        fidelity_normalized = anp.sum(fidelities) / self.state_count
        infidelity = 1 - fidelity_normalized
        # Normalize the cost for the number of times the cost is evaluated.
        cost_normalized = infidelity / self.cost_eval_count

        return cost_normalized * self.cost_multiplier 
Example 8
Project: momi2   Author: popgenmethods   File: demography.py    GNU General Public License v3.0 6 votes vote down vote up
def get_treeseq_configs(treeseq, sampled_n):
    mat = np.zeros((len(sampled_n), sum(sampled_n)), dtype=int)
    j = 0
    for i, n in enumerate(sampled_n):
        for _ in range(n):
            mat[i, j] = 1
            j += 1
    mat = scipy.sparse.csr_matrix(mat)

    def get_config(genos):
        derived_counts = mat.dot(genos)
        return np.array([
            sampled_n - derived_counts,
            derived_counts
        ]).T

    for v in treeseq.variants():
        yield get_config(v.genotypes) 
Example 9
Project: momi2   Author: popgenmethods   File: sfs.py    GNU General Public License v3.0 6 votes vote down vote up
def resample(self):
        """Create a new SFS by resampling blocks with replacement.

        Note the resampled SFS is assumed to have the same length in base pairs \
        as the original SFS, which may be a poor assumption if the blocks are not of equal length.

        :returns: Resampled SFS
        :rtype: :class:`Sfs`
        """
        loci = np.random.choice(
            np.arange(self.n_loci), size=self.n_loci, replace=True)
        mat = self.freqs_matrix[:, loci]
        to_keep = np.asarray(mat.sum(axis=1) > 0).squeeze()
        to_keep = np.arange(len(self.configs))[to_keep]
        mat = mat[to_keep, :]
        configs = _ConfigList_Subset(self.configs, to_keep)

        return self.from_matrix(mat, configs, self.folded, self.length) 
Example 10
Project: momi2   Author: popgenmethods   File: sfs.py    GNU General Public License v3.0 6 votes vote down vote up
def _entropy(self):
        counts = self._total_freqs
        n_snps = float(self.n_snps())
        p = counts / n_snps
        # return np.sum(p * np.log(p))
        ret = np.sum(p * np.log(p))

        # correct for missing data
        sampled_n = np.sum(self.configs.value, axis=2)
        sampled_n_counts = co.Counter()
        assert len(counts) == len(sampled_n)
        for c, n in zip(counts, sampled_n):
            n = tuple(n)
            sampled_n_counts[n] += c
        sampled_n_counts = np.array(
            list(sampled_n_counts.values()), dtype=float)

        ret = ret + np.sum(sampled_n_counts / n_snps *
                           np.log(n_snps / sampled_n_counts))
        assert not np.isnan(ret)
        return ret 
Example 11
Project: momi2   Author: popgenmethods   File: configurations.py    GNU General Public License v3.0 6 votes vote down vote up
def _build_old_new_idxs(self, folded):
        idxs = self.full_configs._augmented_idxs(folded)

        denom_idx_key = 'denom_idx'
        denom_idx = idxs[denom_idx_key]
        idxs = {k: v[self.sub_idxs]
                for k, v in list(idxs.items()) if k != denom_idx_key}

        old_idxs = np.array(
            list(set(sum(map(list, idxs.values()), [denom_idx]))))
        old_2_new_idxs = {old_id: new_id for new_id,
                          old_id in enumerate(old_idxs)}

        idxs = {k: np.array([old_2_new_idxs[old_id]
                             for old_id in v], dtype=int)
                for k, v in list(idxs.items())}
        idxs[denom_idx_key] = old_2_new_idxs[denom_idx]
        return old_idxs, idxs 
Example 12
Project: momi2   Author: popgenmethods   File: confidence_region.py    GNU General Public License v3.0 6 votes vote down vote up
def _many_score_cov(params, data, demo_func, **kwargs):
    params = np.array(params)

    def f_vec(x):
        ret = _composite_log_likelihood(
            data, demo_func(*x), vector=True, **kwargs)
        # centralize
        return ret - np.mean(ret)

    # g_out = einsum('ij,ik', jacobian(f_vec)(params), jacobian(f_vec)(params))
    # but computed in a roundabout way because jacobian implementation is slow
    def _g_out_antihess(x):
        l = f_vec(x)
        lc = make_constant(l)
        return np.sum(0.5 * (l**2 - l * lc - lc * l))
    return autograd.hessian(_g_out_antihess)(params) 
Example 13
Project: momi2   Author: popgenmethods   File: size_history.py    GNU General Public License v3.0 6 votes vote down vote up
def sfs(self, n):
        if n == 0:
            return np.array([0.])
        Et_jj = self.etjj(n)
        #assert np.all(Et_jj[:-1] - Et_jj[1:] >= 0.0) and np.all(Et_jj >= 0.0) and np.all(Et_jj <= self.tau)

        ret = np.sum(Et_jj[:, None] * Wmatrix(n), axis=0)

        before_tmrca = self.tau - np.sum(ret * np.arange(1, n) / n)
        # ignore branch length above untruncated TMRCA
        if self.tau == float('inf'):
            before_tmrca = 0.0

        ret = np.concatenate((np.array([0.0]), ret, np.array([before_tmrca])))
        return ret

    # def transition_prob(self, v, axis=0):
    #     return moran_model.moran_action(self.scaled_time, v, axis=axis) 
Example 14
Project: momi2   Author: popgenmethods   File: likelihood.py    GNU General Public License v3.0 6 votes vote down vote up
def _mut_factor_het(sfs, demo, mut_rate, vector, p_missing):
    mut_rate = mut_rate * np.ones(sfs.n_loci)
    E_het = expected_heterozygosity(
        demo,
        restrict_to_pops=np.array(
            sfs.sampled_pops)[sfs.ascertainment_pop])

    p_missing = p_missing * np.ones(len(sfs.ascertainment_pop))
    p_missing = p_missing[sfs.ascertainment_pop]
    lambd = np.einsum("i,j->ij", mut_rate, E_het * (1.0 - p_missing))

    counts = sfs.avg_pairwise_hets[:, sfs.ascertainment_pop]
    ret = -lambd + counts * np.log(lambd) - scipy.special.gammaln(counts + 1)
    ret = ret * sfs.sampled_n[sfs.ascertainment_pop] / float(
        np.sum(sfs.sampled_n[sfs.ascertainment_pop]))
    if not vector:
        ret = np.sum(ret)
    else:
        ret = np.sum(ret, axis=1)
    return ret 
Example 15
Project: momi2   Author: popgenmethods   File: einsum2.py    GNU General Public License v3.0 6 votes vote down vote up
def _sum_unique_axes(in_arr, in_sublist, *keep_subs):
    # assume no repeated subscripts
    assert len(in_sublist) == len(set(in_sublist))

    out_sublist = []
    sum_axes = []
    keep_subs = set([s for ks in keep_subs for s in ks])
    for idx, sub in enumerate(in_sublist):
        if sub in keep_subs:
            out_sublist.append(sub)
        else:
            sum_axes.append(idx)
    if sum_axes:
        return np.sum(in_arr, axis=tuple(sum_axes)), out_sublist
    else:
        return in_arr, out_sublist 
Example 16
Project: tangent   Author: google   File: test_hessian_vector_products.py    Apache License 2.0 5 votes vote down vote up
def f_straightline(x):
  a = x * x * x
  b = a * x**2.0
  return np.sum(b) 
Example 17
Project: tangent   Author: google   File: test_hessian_vector_products.py    Apache License 2.0 5 votes vote down vote up
def f_calltree(x):
  a = cube(x)
  b = a * x**2.0
  return np.sum(b) 
Example 18
Project: ParametricGP   Author: maziarraissi   File: Utilities.py    MIT License 5 votes vote down vote up
def kernel(X, Xp, hyp):
    output_scale = np.exp(hyp[0])
    lengthscales = np.sqrt(np.exp(hyp[1:]))
    X = X/lengthscales
    Xp = Xp/lengthscales
    X_SumSquare = np.sum(np.square(X),axis=1);
    Xp_SumSquare = np.sum(np.square(Xp),axis=1);
    mul = np.dot(X,Xp.T);
    dists = X_SumSquare[:,np.newaxis]+Xp_SumSquare-2.0*mul
    return output_scale * np.exp(-0.5 * dists) 
Example 19
Project: Efficient_Augmentation   Author: mkuchnik   File: LinearSVM.py    MIT License 5 votes vote down vote up
def _loss(W, X, y, C, intercept=False, temp=1e-2):
        pred_score = LinearSVM._pred_score(X, W)
        if temp == 0:
            loss_pred = C * np.sum(hinge(y * pred_score))
        else:
            loss_pred = C * np.sum(smooth_hinge(y * pred_score, temp))
        if intercept:
            loss_reg = LinearSVM._l2_norm(W[:-1])
        else:
            loss_reg = LinearSVM._l2_norm(W)
        loss = loss_pred + loss_reg
        return loss 
Example 20
Project: Efficient_Augmentation   Author: mkuchnik   File: LogisticRegression.py    MIT License 5 votes vote down vote up
def sum_log_loss(self, X=None, y=None, L2_alpha=None):
        return np.sum(self.log_losses(X=X,
                                      y=y,
                                      L2_alpha=L2_alpha)) 
Example 21
Project: Efficient_Augmentation   Author: mkuchnik   File: LogisticRegression.py    MIT License 5 votes vote down vote up
def sum_pred_losses(self, X=None, y=None):
        return np.sum(self.pred_losses(X=X,
                                       y=y)) 
Example 22
Project: STSC   Author: wOOL   File: stsc_manopt.py    The Unlicense 5 votes vote down vote up
def get_rotation_matrix(X, C):
    def cost(R):
        Z = npy.dot(X, R)
        M = npy.max(Z, axis=1, keepdims=True)
        return npy.sum((Z / M) ** 2)

    manifold = Stiefel(C, C)
    problem = Problem(manifold=manifold, cost=cost, verbosity=0)
    solver = SteepestDescent(logverbosity=0)
    opt = solver.solve(problem=problem, x=npy.eye(C))
    return cost(opt), opt 
Example 23
Project: lightML   Author: jfzhang95   File: LinearClassification.py    MIT License 5 votes vote down vote up
def fit(self, X, y):
        # def loss function
        def calc_loss(W):
            y_pred = logistic_predictions(W, XMat)
            label_probabilities = y_pred * yMat + (1 - y_pred) * (1 - yMat)
            if self.reg is None:
                return -np.sum(np.log(label_probabilities))
            elif self.reg == 'l1':
                return -np.sum(np.log(label_probabilities))+np.sum(self.alpha*(np.abs(W[0:-1])))
            elif self.reg == 'l2':
                return -np.sum(np.log(label_probabilities))+np.sum(self.alpha*W[0:-1]*W[0:-1])
            else:
                print("the reg can only be None, l1 or l2!")
                return ValueError

        verbose = self.verbose
        print_step = self.print_step
        max_iters = self.max_iters

        XMat = np.array(X)
        yMat = np.array(y)

        if XMat.shape[0] != yMat.shape[0]:
            yMat = yMat.T
        assert XMat.shape[0] == yMat.shape[0]

        grad_fun = elementwise_grad(calc_loss)

        n_samples, n_features = X.shape
        n_outdim = y.shape[1]
        XMat = np.hstack([XMat, np.ones((n_samples, 1))])

        self.W = np.random.randn(n_features+1, n_outdim) * 0.1
        for it in range(max_iters + 1):
            loss = calc_loss(self.W)
            grad_params = grad_fun(self.W)
            # update params
            self.W -= self.lr * grad_params

            if verbose and it % print_step == 0:
                print('iteration %d / %d: loss %f' % (it, max_iters, loss)) 
Example 24
Project: lightML   Author: jfzhang95   File: LinearRegression.py    MIT License 5 votes vote down vote up
def fit(self, X, y):
        # def loss function
        def calc_linear_loss(W):
            y_pred = np.dot(XMat, W)
            return np.sqrt((np.power(yMat - y_pred, 2))).mean() \
                        + np.sum(self.alpha * W[0:-1] * W[0:-1])

        verbose = self.verbose
        print_step = self.print_step
        max_iters = self.max_iters

        XMat = np.array(X)
        yMat = np.array(y)

        if XMat.shape[0] != yMat.shape[0]:
            yMat = yMat.T
        assert XMat.shape[0] == yMat.shape[0]

        grad_fun = elementwise_grad(calc_linear_loss)

        n_samples, n_features = X.shape
        n_outdim = y.shape[1]
        XMat = np.hstack([XMat, np.ones((n_samples, 1))])

        self.W = np.random.randn(n_features+1, n_outdim)
        for it in range(max_iters+1):
            loss = calc_linear_loss(self.W)
            grad_params = grad_fun(self.W)

            # update params
            self.W -= self.lr * grad_params

            if verbose and it % print_step == 0:
                print('iteration %d / %d: loss %f' % (it, max_iters, loss)) 
Example 25
Project: lightML   Author: jfzhang95   File: LinearRegression.py    MIT License 5 votes vote down vote up
def fit(self, X, y):
        # def loss function
        def calc_linear_loss(W):
            y_pred = np.dot(XMat, W)
            return np.sqrt((np.power(yMat - y_pred, 2))).mean() \
                   + np.sum(self.alpha * np.abs(W[0:-1]))

        verbose = self.verbose
        print_step = self.print_step
        max_iters = self.max_iters

        XMat = np.array(X)
        yMat = np.array(y)

        if XMat.shape[0] != yMat.shape[0]:
            yMat = yMat.T
        assert XMat.shape[0] == yMat.shape[0]

        grad_fun = elementwise_grad(calc_linear_loss)

        n_samples, n_features = X.shape
        n_outdim = y.shape[1]
        XMat = np.hstack([XMat, np.ones((n_samples, 1))])

        self.W = np.random.randn(n_features + 1, n_outdim)
        for it in range(max_iters+1):
            loss = calc_linear_loss(self.W)
            grad_params = grad_fun(self.W)

            # update params
            self.W -= self.lr * grad_params

            if verbose and it % print_step == 0:
                print('iteration %d / %d: loss %f' % (it, max_iters, loss)) 
Example 26
Project: autograd-forward   Author: BB-UCL   File: numpy_grads.py    MIT License 5 votes vote down vote up
def forward_grad_np_var(g, ans, gvs, vs, x, axis=None, ddof=0, keepdims=False):
    if axis is None:
        if gvs.iscomplex:
            num_reps = gvs.size / 2
        else:
            num_reps = gvs.size
    elif isinstance(axis, int):
        num_reps = gvs.shape[axis]
    elif isinstance(axis, tuple):
        num_reps = anp.prod(anp.array(gvs.shape)[list(axis)])

    x_minus_mean = anp.conj(x - anp.mean(x, axis=axis, keepdims=True))
    return (2.0 * anp.sum(anp.real(g * x_minus_mean), axis=axis, keepdims=keepdims) /
            (num_reps - ddof)) 
Example 27
Project: autograd-forward   Author: BB-UCL   File: numpy_grads.py    MIT License 5 votes vote down vote up
def fwd_grad_chooser(g, ans, gvs, vs, x, axis=None, keepdims=False):
    if anp.isscalar(x):
        return g
    if not keepdims:
        if isinstance(axis, int):
            ans = anp.expand_dims(ans, axis)
        elif isinstance(axis, tuple):
            for ax in sorted(axis):
                ans = anp.expand_dims(ans, ax)
    chosen_locations = x == ans
    return anp.sum(g * chosen_locations, axis=axis, keepdims=keepdims) 
Example 28
Project: autograd-forward   Author: BB-UCL   File: misc.py    MIT License 5 votes vote down vote up
def make_fwd_grad_logsumexp(g, ans, gvs, vs, x, axis=None, b=1.0, keepdims=False):
    if not keepdims:
        if isinstance(axis, int):
            ans = anp.expand_dims(ans, axis)
        elif isinstance(axis, tuple):
            for ax in sorted(axis):
                ans = anp.expand_dims(ans, ax)
    return anp.sum(g * b * anp.exp(x - ans), axis=axis, keepdims=keepdims) 
Example 29
Project: autograd-forward   Author: BB-UCL   File: test_wrappers.py    MIT License 5 votes vote down vote up
def test_hessian_vector_product():
    fun = lambda a: np.sum(np.sin(a))
    a = npr.randn(5)
    v = npr.randn(5)
    H = hessian(fun)(a)
    check_equivalent(np.dot(H, v), hessian_vector_product(fun)(a, v)) 
Example 30
Project: autograd-forward   Author: BB-UCL   File: test_wrappers.py    MIT License 5 votes vote down vote up
def test_hessian_tensor_product():
    fun = lambda a: np.sum(np.sin(a))
    a = npr.randn(5, 4, 3)
    V = npr.randn(5, 4, 3)
    H = hessian(fun)(a)
    check_equivalent(np.tensordot(H, V, axes=np.ndim(V)), hessian_vector_product(fun)(a, V)) 
Example 31
Project: autograd-forward   Author: BB-UCL   File: test_wrappers.py    MIT License 5 votes vote down vote up
def test_fwd_rev_hessian_vector_product():
    fun = lambda a: np.sum(np.sin(a))
    a = npr.randn(5)
    v = npr.randn(5)
    H = hessian(fun)(a)
    check_equivalent(np.dot(H, v), hessian_vector_product(fun, method='fwd-rev')(a, v)) 
Example 32
Project: autograd-forward   Author: BB-UCL   File: test_wrappers.py    MIT License 5 votes vote down vote up
def test_fwd_rev_hessian_matrix_product():
    fun = lambda a: np.sum(np.sin(a))
    a = npr.randn(5, 4)
    V = npr.randn(5, 4)
    H = hessian(fun)(a)
    check_equivalent(np.tensordot(H, V), hessian_vector_product(fun, method='fwd-rev')(a, V)) 
Example 33
Project: autograd-forward   Author: BB-UCL   File: test_wrappers.py    MIT License 5 votes vote down vote up
def test_fwd_rev_hessian_tensor_product():
    fun = lambda a: np.sum(np.sin(a))
    a = npr.randn(5, 4, 3)
    V = npr.randn(5, 4, 3)
    H = hessian(fun)(a)
    check_equivalent(np.tensordot(H, V, axes=np.ndim(V)), hessian_vector_product(fun, method='fwd-rev')(a, V)) 
Example 34
Project: autograd-forward   Author: BB-UCL   File: test_numpy.py    MIT License 5 votes vote down vote up
def test_sum():  stat_check(np.sum) 
Example 35
Project: autograd-forward   Author: BB-UCL   File: test_numpy.py    MIT License 5 votes vote down vote up
def test_nan_to_num():
    y = np.array([0., np.nan, np.inf, -np.inf])
    fun = lambda x: np.sum(np.sin(np.nan_to_num(x + y)))
    x = np.random.randn(4)
    combo_check(fun, [0], [x]) 
Example 36
Project: DistCal   Author: Srceh   File: GP_Beta_cal.py    Apache License 2.0 5 votes vote down vote up
def link_log_lik(w, q, ln_q, ln_1_q, ln_s):

    w = w.reshape(-1, 3)

    a = -numpy.exp(w[:, 0]).reshape(-1, 1)

    b = numpy.exp(w[:, 1]).reshape(-1, 1)

    c = w[:, 2].reshape(-1, 1)

    tmp_sum = a * ln_q + c + b * ln_1_q

    tmp_exp = numpy.exp(tmp_sum)

    tmp_de = numpy.where(tmp_exp.ravel() <= 1e-16,
                         2 * numpy.log(1 + tmp_exp.ravel()),
                         2 * (tmp_sum.ravel() + numpy.log(1 + 1 / tmp_exp.ravel()))).reshape(-1, 1)

    ln_s_hat = ln_s + tmp_sum + numpy.log((a + b) * q - a) - ln_q - ln_1_q - tmp_de

    # L = numpy.sum(ln_s_hat)

    # if numpy.isnan(L):
    #     import pdb; pdb.set_trace()
    #
    # if numpy.isinf(L):
    #     import pdb; pdb.set_trace()

    # print([numpy.mean(ln_s), numpy.mean(ln_s_hat)])

    return ln_s_hat 
Example 37
Project: DistCal   Author: Srceh   File: GP_Beta_cal.py    Apache License 2.0 5 votes vote down vote up
def mc_link_lik(w, mu_shift, q, ln_q, ln_1_q, ln_s):

    n = numpy.shape(q)[0]

    w_a = w[:, numpy.arange(0, n)*3]

    w_b = w[:, numpy.arange(0, n)*3+1]

    a = -numpy.exp(w_a / mu_shift[0] + mu_shift[1])

    b = numpy.exp(w_b / mu_shift[2] + mu_shift[3])

    c = w[:, numpy.arange(0, n)*3+2] / mu_shift[4] + mu_shift[5]

    tmp_sum = a * ln_q.ravel() + b * ln_1_q.ravel() + c

    tmp_de = numpy.where(tmp_sum <= 0,
                         2 * numpy.log(1 + numpy.exp(tmp_sum)),
                         2 * (tmp_sum + numpy.log(1 + 1 / (numpy.exp(tmp_sum)))))

    ln_s_hat = (tmp_sum + numpy.log((a + b) * q.ravel() - a) - ln_q.ravel() - ln_1_q.ravel() - tmp_de) + ln_s.ravel()

    mean_exp = numpy.mean(numpy.exp(ln_s_hat), axis=0)

    ln_mean_s_hat = numpy.where(mean_exp > 0, numpy.log(mean_exp), numpy.log(1e-16))

    link_ll = numpy.sum(ln_mean_s_hat)

    return link_ll 
Example 38
Project: qoc   Author: SchusterLab   File: controlvariation.py    MIT License 5 votes vote down vote up
def cost(self, controls, states, system_eval_step):
        """
        Compute the penalty.

        Arguments:
        controls
        states
        system_eval_step

        Returns:
        cost
        """
        if self.max_control_norms is None:
            normalized_controls = controls / self.max_control_norms
        else:
            normalized_controls = controls

        # Penalize the square of the absolute value of the difference
        # in value of the control parameters from one step to the next.
        diffs = anp.diff(normalized_controls, axis=0, n=self.order)
        cost = anp.sum(anp.real(diffs * anp.conjugate(diffs)))
        # You can prove that the square of the complex modulus of the difference
        # between two complex values is l.t.e. 2 if the complex modulus
        # of the two complex values is l.t.e. 1 respectively using the
        # triangle inequality. This fact generalizes for higher order differences.
        # Therefore, a factor of 2 should be used to normalize the diffs.
        cost_normalized = cost / self.cost_normalization_constant

        return cost_normalized * self.cost_multiplier 
Example 39
Project: qoc   Author: SchusterLab   File: controlbandwidthmax.py    MIT License 5 votes vote down vote up
def cost(self, controls, states, system_eval_step):
        """
        Compute the penalty.

        Arguments:
        controls
        states
        system_eval_step

        Returns:
        cost
        """
        cost = 0
        # Iterate over the controls, penalize each control that has
        # frequencies greater than its maximum frequency.
        for i, max_bandwidth in enumerate(self.max_bandwidths):
            control_fft = anp.fft.fft(controls[:, i])
            control_fft_sq  = anp.abs(control_fft)
            penalty_freq_indices = anp.nonzero(self.freqs >= max_bandwidth)[0]
            penalized_ffts = control_fft_sq[penalty_freq_indices]
            penalty = anp.sum(penalized_ffts)
            penalty_normalized = penalty / (penalty_freq_indices.shape[0] * anp.max(penalized_ffts))
            cost = cost + penalty_normalized
        cost_normalized =  cost / self.control_count
                       
        return cost_normalized * self.cost_multiplier 
Example 40
Project: qoc   Author: SchusterLab   File: controlnorm.py    MIT License 5 votes vote down vote up
def cost(self, controls, states, system_eval_step):
        """
        Compute the penalty.

        Arguments:
        controls
        states
        system_eval_step

        Returns:
        cost
        """
        # Normalize the controls.
        if self.max_control_norms is not None:
            controls = controls / self.max_control_norms
            
        # Weight the controls.
        if self.control_weights is not None:
            controls = controls[:,] * self.control_weights

        # The cost is the sum of the square of the modulus of the normalized,
        # weighted, controls.
        cost = anp.sum(anp.real(controls * anp.conjugate(controls)))
        cost_normalized = cost / self.controls_size
        
        return cost_normalized * self.cost_multiplier 
Example 41
Project: kernel-activation-functions   Author: ispamm   File: kafnets.py    MIT License 5 votes vote down vote up
def predict_kaf_nn(w, X, info):
    """
    Compute the outputs of a KAF feedforward network.
    """
    
    D, gamma = info
    for W, b, alpha in w:
        outputs = np.dot(X, W) + b
        K = gauss_kernel(outputs, D, gamma)
        X = np.sum(K*alpha, axis=2)
    return X 
Example 42
Project: ReducedVarianceReparamGradients   Author: andymiller   File: frisk.py    MIT License 5 votes vote down vote up
def process_dataset():
    data_dir = os.path.dirname(__file__)
    df = pd.read_csv(os.path.join(data_dir, 'data/frisk/frisk_with_noise.dat'), skiprows=6, delim_whitespace=True)

    # compute proportion black in precinct, black = 1
    # first aggregate by precinct/ethnicity, and sum over populations
    popdf = df[['pop', 'precinct', 'eth']]. \
                groupby(['precinct', 'eth'])['pop'].apply(sum)
    percent_black = np.array([ popdf[i][1] / float(popdf[i].sum())
                               for i in xrange(1, 76)] )
    precinct_type = pd.cut(percent_black, [0, .1, .4, 1.])  #
    df['precinct_type'] = precinct_type.codes[df.precinct.values-1]
    return df 
Example 43
Project: ReducedVarianceReparamGradients   Author: andymiller   File: frisk.py    MIT License 5 votes vote down vote up
def normal_lnpdf(x, mean, ln_std):
    x = np.atleast_2d(x)
    D = x.shape[1]
    dcoef = 1.
    if ln_std.shape[1] != D:
        dcoef = D
    qterm = -.5 * np.sum((x - mean)**2 / np.exp(2.*ln_std), axis=1)
    coef  = -.5*D * np.log(2.*np.pi) - dcoef * np.sum(ln_std, axis=1)
    return qterm + coef 
Example 44
Project: ReducedVarianceReparamGradients   Author: andymiller   File: misc.py    MIT License 5 votes vote down vote up
def mvn_diag_logpdf(x, mean, log_std):
    D = len(mean)
    qterm = -.5 * np.sum((x - mean)**2 / np.exp(2.*log_std), axis=1)
    coef  = -.5*D * np.log(2.*np.pi) - np.sum(log_std)
    return qterm + coef 
Example 45
Project: ReducedVarianceReparamGradients   Author: andymiller   File: misc.py    MIT License 5 votes vote down vote up
def mvn_diag_entropy(log_std):
    D = len(log_std)
    return .5 * (D*np.log(2*np.pi*np.e) + np.sum(2*log_std)) 
Example 46
Project: ReducedVarianceReparamGradients   Author: andymiller   File: misc.py    MIT License 5 votes vote down vote up
def mvn_logpdf(x, mean, icholSigma):
    D     = len(mean)
    coef  = -.5*D*np.log(2.*np.pi)
    dterm = np.sum(np.log(np.diag(icholSigma)))
    white = np.dot(np.atleast_2d(x) - mean, icholSigma.T)
    qterm = -.5*np.sum(white**2, axis=1)
    ll = coef + dterm + qterm
    if len(ll) == 1:
        return ll[0]
    return ll 
Example 47
Project: ReducedVarianceReparamGradients   Author: andymiller   File: misc.py    MIT License 5 votes vote down vote up
def unconstrained_to_simplex(rhos):
    rhosf = np.concatenate([rhos, [0.]])
    pis   = np.exp(rhosf) / np.sum(np.exp(rhosf))
    return pis 
Example 48
Project: document_classification   Author: scotthlee   File: tools.py    Apache License 2.0 5 votes vote down vote up
def sens(guesses, targets):
	tp = np.sum(np.logical_and(guesses==1, targets==1))
	fn = np.sum(np.logical_and(guesses==0, targets==1))
	return np.true_divide(tp, tp + fn) 
Example 49
Project: document_classification   Author: scotthlee   File: tools.py    Apache License 2.0 5 votes vote down vote up
def spec(guesses, targets):
	tn = np.sum(np.logical_and(guesses==0, targets==0))
	fp = np.sum(np.logical_and(guesses==1, targets==0))
	return np.true_divide(tn, tn + fp) 
Example 50
Project: document_classification   Author: scotthlee   File: tools.py    Apache License 2.0 5 votes vote down vote up
def ppv(guesses, targets):
	tp = np.sum(np.logical_and(guesses==1, targets==1))
	fp = np.sum(np.logical_and(guesses==1, targets==0))
	return np.true_divide(tp, tp + fp)