Python scipy.special.logsumexp() Examples

The following are 30 code examples of scipy.special.logsumexp(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.special , or try the search function .
Example #1
Source File: baseline_utils.py    From pysaliency with MIT License 6 votes vote down vote up
def _log_density(self, stimulus):
        shape = stimulus.shape[0], stimulus.shape[1]

        stimulus_id = get_image_hash(stimulus)
        stimulus_index = self.stimuli.stimulus_ids.index(stimulus_id)

        #fixations = self.fixations[self.fixations.n == stimulus_index]
        inds = self.fixations.n != stimulus_index

        ZZ = np.zeros(shape)

        _fixations = np.array([self.ys[inds]*shape[0], self.xs[inds]*shape[1]]).T
        fill_fixation_map(ZZ, _fixations)
        ZZ = gaussian_filter(ZZ, [self.bandwidth*shape[0], self.bandwidth*shape[1]])
        ZZ *= (1-self.eps)
        ZZ += self.eps * 1.0/(shape[0]*shape[1])
        ZZ = np.log(ZZ)

        ZZ -= logsumexp(ZZ)
        #ZZ -= np.log(np.exp(ZZ).sum())

        return ZZ 
Example #2
Source File: sams.py    From openmmtools with MIT License 6 votes vote down vote up
def _global_jump(self, replicas_log_P_k):
        """
        Global jump scheme.
        This method is described after Eq. 3 in [2]
        """
        n_replica, n_states = self.n_replicas, self.n_states
        for replica_index, current_state_index in enumerate(self._replica_thermodynamic_states):
            neighborhood = self._neighborhood(current_state_index)

            # Compute unnormalized log probabilities for all thermodynamic states.
            log_P_k = np.zeros([n_states], np.float64)
            for state_index in neighborhood:
                u_k = self._energy_thermodynamic_states[replica_index, :]
                log_P_k[state_index] =  - u_k[state_index] + self.log_weights[state_index]
            log_P_k -= logsumexp(log_P_k)

            # Update sampler Context to current thermodynamic state.
            P_k = np.exp(log_P_k[neighborhood])
            new_state_index = np.random.choice(neighborhood, p=P_k)
            self._replica_thermodynamic_states[replica_index] = new_state_index

            # Accumulate statistics.
            replicas_log_P_k[replica_index,:] = log_P_k[:]
            self._n_proposed_matrix[current_state_index, neighborhood] += 1
            self._n_accepted_matrix[current_state_index, new_state_index] += 1 
Example #3
Source File: test_loss.py    From pygbm with MIT License 6 votes vote down vote up
def test_logsumexp():
    # check consistency with scipy's version
    rng = np.random.RandomState(0)
    for _ in range(100):
        a = rng.uniform(0, 1000, size=1000)
        assert_almost_equal(logsumexp(a), _logsumexp(a))

    # Test whether logsumexp() function correctly handles large inputs.
    # (from scipy tests)

    b = np.array([1000, 1000])
    desired = 1000.0 + np.log(2.0)
    assert_almost_equal(_logsumexp(b), desired)

    n = 1000
    b = np.full(n, 10000, dtype='float64')
    desired = 10000.0 + np.log(n)
    assert_almost_equal(_logsumexp(b), desired) 
Example #4
Source File: test_stats_utils.py    From arviz with Apache License 2.0 6 votes vote down vote up
def test_logsumexp_b(ary_dtype, axis, b, keepdims):
    """Test ArviZ implementation of logsumexp.

    Test also compares against Scipy implementation.
    Case where b=None, they are equal. (N=len(ary))
    Second case where b=x, and x is 1/(number of elements), they are almost equal.

    Test tests against b parameter.
    """
    ary = np.random.randn(100, 101).astype(ary_dtype)  # pylint: disable=no-member
    assert _logsumexp(ary=ary, axis=axis, b=b, keepdims=keepdims, copy=True) is not None
    ary = ary.copy()
    assert _logsumexp(ary=ary, axis=axis, b=b, keepdims=keepdims, copy=False) is not None
    out = np.empty(5)
    assert _logsumexp(ary=np.random.randn(10, 5), axis=0, out=out) is not None

    # Scipy implementation
    scipy_results = logsumexp(ary, b=b, axis=axis, keepdims=keepdims)
    arviz_results = _logsumexp(ary, b=b, axis=axis, keepdims=keepdims)

    assert_array_almost_equal(scipy_results, arviz_results) 
Example #5
Source File: nb_sklearn.py    From recordlinkage with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def predict_log_proba(self, X):
        """
        Return log-probability estimates for the test vector X.

        Parameters
        ----------
        X : array-like, shape = [n_samples, n_features]

        Returns
        -------
        C : array-like, shape = [n_samples, n_classes]
            Returns the log-probability of the samples for each class in
            the model. The columns correspond to the classes in sorted
            order, as they appear in the attribute `classes_`.
        """
        jll = self._joint_log_likelihood(X)

        # normalize by P(x) = P(f_1, ..., f_n)
        log_prob_x = logsumexp(jll, axis=1)  # return shape = (2,)

        return jll - np.atleast_2d(log_prob_x).T 
Example #6
Source File: corextopic.py    From corex_topic with Apache License 2.0 6 votes vote down vote up
def __init__(self, n_hidden=2, max_iter=200, eps=1e-5, seed=None, verbose=False, count='binarize',
                 tree=True, **kwargs):
        self.n_hidden = n_hidden  # Number of hidden factors to use (Y_1,...Y_m) in paper
        self.max_iter = max_iter  # Maximum number of updates to run, regardless of convergence
        self.eps = eps  # Change to signal convergence
        self.tree = tree
        np.random.seed(seed)  # Set seed for deterministic results
        self.verbose = verbose
        self.t = 20  # Initial softness of the soft-max function for alpha (see NIPS paper [1])
        self.count = count  # Which strategy, if necessary, for binarizing count data
        if verbose > 0:
            np.set_printoptions(precision=3, suppress=True, linewidth=200)
            print('corex, rep size:', n_hidden)
        if verbose:
            np.seterr(all='warn')
            # Can change to 'raise' if you are worried to see where the errors are
            # Locally, I "ignore" underflow errors in logsumexp that appear innocuous (probabilities near 0)
        else:
            np.seterr(all='ignore') 
Example #7
Source File: utils.py    From choix with MIT License 6 votes vote down vote up
def log_likelihood_network(
        digraph, traffic_in, traffic_out, params, weight=None):
    """
    Compute the log-likelihood of model parameters.

    If ``weight`` is not ``None``, the log-likelihood is correct only up to a
    constant (independent of the parameters).
    """
    loglik = 0
    for i in range(len(traffic_in)):
        loglik += traffic_in[i] * params[i]
        if digraph.out_degree(i) > 0:
            neighbors = list(digraph.successors(i))
            if weight is None:
                loglik -= traffic_out[i] * logsumexp(params.take(neighbors))
            else:
                weights = [digraph[i][j][weight] for j in neighbors]
                loglik -= traffic_out[i] * logsumexp(
                        params.take(neighbors), b=weights)
    return loglik 
Example #8
Source File: test_logsumexp.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_logsumexp_b():
    a = np.arange(200)
    b = np.arange(200, 0, -1)
    desired = np.log(np.sum(b*np.exp(a)))
    assert_almost_equal(logsumexp(a, b=b), desired)

    a = [1000, 1000]
    b = [1.2, 1.2]
    desired = 1000 + np.log(2 * 1.2)
    assert_almost_equal(logsumexp(a, b=b), desired)

    x = np.array([1e-40] * 100000)
    b = np.linspace(1, 1000, 100000)
    logx = np.log(x)

    X = np.vstack((x, x))
    logX = np.vstack((logx, logx))
    B = np.vstack((b, b))
    assert_array_almost_equal(np.exp(logsumexp(logX, b=B)), (B * X).sum())
    assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=0)),
                                (B * X).sum(axis=0))
    assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=1)),
                                (B * X).sum(axis=1)) 
Example #9
Source File: baseline_utils.py    From pysaliency with MIT License 6 votes vote down vote up
def _log_density(self, stimulus):
        shape = stimulus.shape[0], stimulus.shape[1]
        if shape not in self.shape_cache:
            ZZ = np.zeros(shape)
            height, width = shape
            if self.keep_aspect:
                max_size = max(height, width)
                y_factor = max_size
                x_factor = max_size
            else:
                y_factor = height
                x_factor = width
            _fixations = np.array([self.ys*y_factor, self.xs*x_factor]).T
            fill_fixation_map(ZZ, _fixations)
            ZZ = gaussian_filter(ZZ, [self.bandwidth*y_factor, self.bandwidth*x_factor])
            ZZ *= (1-self.eps)
            ZZ += self.eps * 1.0/(shape[0]*shape[1])
            ZZ = np.log(ZZ)

            ZZ -= logsumexp(ZZ)
            self.shape_cache[shape] = ZZ

        return self.shape_cache[shape] 
Example #10
Source File: relative_setup.py    From perses with MIT License 6 votes vote down vote up
def ESS(works_prev, works_incremental):
        """
        compute the effective sample size (ESS) as given in Eq 3.15 in https://arxiv.org/abs/1303.3123.

        Parameters
        ----------
        works_prev: np.array
            np.array of floats representing the accumulated works at t-1 (unnormalized)
        works_incremental: np.array
            np.array of floats representing the incremental works at t (unnormalized)

        Returns
        -------
        ESS: float
            effective sample size
        """
        prev_weights_normalized = np.exp(-works_prev - logsumexp(-works_prev))
        #_logger.debug(f"\t\tnormalized weights: {prev_weights_normalized}")
        incremental_weights_unnormalized = np.exp(-works_incremental)
        #_logger.debug(f"\t\tincremental weights (unnormalized): {incremental_weights_unnormalized}")
        ESS = np.dot(prev_weights_normalized, incremental_weights_unnormalized)**2 / np.dot(np.power(prev_weights_normalized, 2), np.power(incremental_weights_unnormalized, 2))
        #_logger.debug(f"\t\tESS: {ESS}")
        return ESS 
Example #11
Source File: relative_setup.py    From perses with MIT License 6 votes vote down vote up
def CESS(works_prev, works_incremental):
        """
        compute the conditional effective sample size (CESS) as given in Eq 3.16 in https://arxiv.org/abs/1303.3123.

        Parameters
        ----------
        works_prev: np.array
            np.array of floats representing the accumulated works at t-1 (unnormalized)
        works_incremental: np.array
            np.array of floats representing the incremental works at t (unnormalized)

        Returns
        -------
        CESS: float
            conditional effective sample size
        """
        prev_weights_normalization = np.exp(logsumexp(-works_prev))
        prev_weights_normalized = np.exp(-works_prev) / prev_weights_normalization
        #_logger.debug(f"\t\tnormalized weights: {prev_weights_normalized}")
        incremental_weights_unnormalized = np.exp(-works_incremental)
        #_logger.debug(f"\t\tincremental weights (unnormalized): {incremental_weights_unnormalized}")
        N = len(prev_weights_normalized)
        CESS = N * np.dot(prev_weights_normalized, incremental_weights_unnormalized)**2 / np.dot(prev_weights_normalized, np.power(incremental_weights_unnormalized, 2))
        #_logger.debug(f"\t\tCESS: {CESS}")
        return CESS 
Example #12
Source File: models.py    From pysaliency with MIT License 6 votes vote down vote up
def _log_density(self, stimulus):
        smap = self.parent_model.log_density(stimulus)

        target_shape = (stimulus.shape[0],
                        stimulus.shape[1])

        if smap.shape != target_shape:
            if self.verbose:
                print("Resizing saliency map", smap.shape, target_shape)
            x_factor = target_shape[1] / smap.shape[1]
            y_factor = target_shape[0] / smap.shape[0]

            smap = zoom(smap, [y_factor, x_factor], order=1, mode='nearest')

            smap -= logsumexp(smap)

            assert smap.shape == target_shape

        return smap 
Example #13
Source File: utils.py    From perses with MIT License 6 votes vote down vote up
def ESS(works_prev, works_incremental):
    """
    compute the effective sample size (ESS) as given in Eq 3.15 in https://arxiv.org/abs/1303.3123.
    Parameters
    ----------
    works_prev: np.array
        np.array of floats representing the accumulated works at t-1 (unnormalized)
    works_incremental: np.array
        np.array of floats representing the incremental works at t (unnormalized)

    Returns
    -------
    normalized_ESS: float
        effective sample size
    """
    prev_weights_normalized = np.exp(-works_prev - logsumexp(-works_prev))
    incremental_weights_unnormalized = np.exp(-works_incremental)
    ESS = np.dot(prev_weights_normalized, incremental_weights_unnormalized)**2 / np.dot(np.power(prev_weights_normalized, 2), np.power(incremental_weights_unnormalized, 2))
    normalized_ESS = ESS / len(prev_weights_normalized)
    assert normalized_ESS >= 0.0 - DISTRIBUTED_ERROR_TOLERANCE and normalized_ESS <= 1.0 + DISTRIBUTED_ERROR_TOLERANCE, f"the normalized ESS ({normalized_ESS} is not between 0 and 1)"
    return normalized_ESS 
Example #14
Source File: utils.py    From perses with MIT License 6 votes vote down vote up
def multinomial_resample(total_works, num_resamples):
    """
    from a numpy array of total works and particle_labels, resample the particle indices N times with replacement
    from a multinomial distribution conditioned on the weights w_i \propto e^{-cumulative_works_i}
    Parameters
    ----------
    total_works : np.array of floats
        generalized accumulated works at time t for all particles
    num_resamples : int, default len(sampler_states)
        number of resamples to conduct; default doesn't change the number of particles

    Returns
    -------
    resampled_works : np.array([1.0/num_resamples]*num_resamples)
        resampled works (uniform)
    resampled_indices : np.array of ints
        resampled indices
    """
    normalized_weights = np.exp(-total_works - logsumexp(-total_works))
    resampled_indices = np.random.choice(len(normalized_weights), num_resamples, p=normalized_weights, replace = True)
    resampled_works = np.array([np.average(total_works)] * num_resamples)

    return resampled_works, resampled_indices 
Example #15
Source File: models.py    From pysaliency with MIT License 6 votes vote down vote up
def conditional_log_density(self, stimulus, x_hist, y_hist, t_hist, attributes=None, out=None):
        smap = self.parent_model.conditional_log_density(stimulus, x_hist, y_hist, t_hist, attributes=attributes, out=out)

        target_shape = (stimulus.shape[0],
                        stimulus.shape[1])

        if smap.shape != target_shape:
            if self.verbose:
                print("Resizing saliency map", smap.shape, target_shape)
            x_factor = target_shape[1] / smap.shape[1]
            y_factor = target_shape[0] / smap.shape[0]

            smap = zoom(smap, [y_factor, x_factor], order=1, mode='nearest')

            smap -= logsumexp(smap)

            assert smap.shape == target_shape

        return smap 
Example #16
Source File: marginalized_gaussian_noise.py    From gwin with GNU General Public License v3.0 5 votes vote down vote up
def _margdistphase_loglr(self, mf_snr, opt_snr):
        """Returns the log likelihood ratio marginalized over distance and
        phase.
        """
        logl = numpy.log(special.i0(mf_snr))
        logl_marg = logl/self._dist_array
        opt_snr_marg = opt_snr/self._dist_array**2
        return special.logsumexp(logl_marg - 0.5*opt_snr_marg,
                                 b=self._deltad*self.dist_prior) 
Example #17
Source File: nca_scipy.py    From curriculum with GNU General Public License v3.0 5 votes vote down vote up
def nca_cost(self, A, X, Y):
        '''
        return the loss and gradients of NCA given A X Y
        @params A : 1-d numpy.array (high_dims * low_dims, )
        @params X : 2-d numpy.array (n_samples, high_dims)
        @params Y : 1-d numpy.array (n_samples, )
        '''
        # reshape A
        A = A.reshape((self.high_dims, self.low_dims))

        # to low dimension
        low_X = np.dot(X, A)

        # distance matrix-->proba_matrix
        pij_mat = pairwise_distances(low_X, squared = True)
        np.fill_diagonal(pij_mat, np.inf)
        pij_mat = np.exp(0.0 - pij_mat - logsumexp(0.0 - pij_mat, axis = 1)[:, None])

        # mask where mask_{ij} = True if Y[i] == Y[j], shape = (n_samples, n_samples)
        mask = (Y == Y[:, None])                                                        # (n_samples, n_samples)
        # mask array
        pij_mat_mask = pij_mat * mask                                                   # (n_samples, n_samples)
        # pi = \sum_{j \in C_i} p_{ij}
        pi_arr = np.array(np.sum(pij_mat_mask, axis = 1))                               # (n_samples, )
        # target
        self.target = np.sum(pi_arr)

        # gradients
        weighted_pij = pij_mat_mask - pij_mat * pi_arr[:, None]                             # (n_samples, n_samples)
        weighted_pij_sum = weighted_pij + weighted_pij.transpose()                          # (n_samples, n_samples)
        np.fill_diagonal(weighted_pij_sum, -weighted_pij.sum(axis = 0))

        gradients = 2 * (low_X.transpose().dot(weighted_pij_sum)).dot(X).transpose()        # (high_dims, low_dims)
        gradients = 0.0 - gradients

        self.n_steps += 1
        if self.verbose:
            print("Training, step = {}, target = {}...".format(self.n_steps, self.target))

        return [-self.target, gradients.ravel()] 
Example #18
Source File: supervised.py    From indosum with Apache License 2.0 5 votes vote down vote up
def _compute_gamma(self, obs: Sequence[np.ndarray]) -> np.ndarray:
        alpha = self._forward(obs)
        beta = self._backward(obs)
        omega = logsumexp(alpha[-1])
        return alpha + beta - omega 
Example #19
Source File: marginalized_gaussian_noise.py    From gwin with GNU General Public License v3.0 5 votes vote down vote up
def _margtimephasedist_loglr(self, mf_snr, opt_snr):
        """Returns the log likelihood ratio marginalized over time, phase and
        distance.
        """
        logl = special.logsumexp(numpy.log(special.i0(mf_snr)),
                                 b=self._deltat)
        logl_marg = logl/self._dist_array
        opt_snr_marg = opt_snr/self._dist_array**2
        return special.logsumexp(logl_marg - 0.5*opt_snr_marg,
                                 b=self._deltad*self.dist_prior) 
Example #20
Source File: marginalized_gaussian_noise.py    From gwin with GNU General Public License v3.0 5 votes vote down vote up
def _margtimedist_loglr(self, mf_snr, opt_snr):
        """Returns the log likelihood ratio marginalized over time and
        distance.
        """
        logl = special.logsumexp(mf_snr, b=self._deltat)
        logl_marg = logl/self._dist_array
        opt_snr_marg = opt_snr/self._dist_array**2
        return special.logsumexp(logl_marg - 0.5*opt_snr_marg,
                                 b=self._deltad*self.dist_prior) 
Example #21
Source File: marginalized_gaussian_noise.py    From gwin with GNU General Public License v3.0 5 votes vote down vote up
def _margtimephase_loglr(self, mf_snr, opt_snr):
        """Returns the log likelihood ratio marginalized over time and phase.
        """
        return special.logsumexp(numpy.log(special.i0(mf_snr)),
                                 b=self._deltat) - 0.5*opt_snr 
Example #22
Source File: estimators.py    From nelpy with MIT License 5 votes vote down vote up
def score_samples(self, X, y, lengths=None):
        raise NotImplementedError


# def decode_bayes_from_ratemap_1d(X, ratemap, dt, xmin, xmax, bin_centers):
#     """
#     X has been standardized to (n_samples, n_units), where each sample is a singleton window
#     """
#     n_samples, n_features = X.shape
#     n_units, n_xbins = ratemap.shape

#     assert n_features == n_units, "X has {} units, whereas ratemap has {}".format(n_features, n_units)

#     lfx = np.log(ratemap)
#     eterm = -ratemap.sum(axis=0)*dt

#     posterior = np.empty((n_xbins, n_samples))
#     posterior[:] = np.nan

#     # decode each sample / bin separately
#     for tt in range(n_samples):
#         obs = X[tt]
#         if obs.sum() > 0:
#             posterior[:,tt] = (np.tile(np.array(obs, ndmin=2).T, n_xbins) * lfx).sum(axis=0) + eterm

#     # normalize posterior:
#     posterior = np.exp(posterior - logsumexp(posterior, axis=0))

#     mode_pth = np.argmax(posterior, axis=0)*xmax/n_xbins
#     mode_pth = np.where(np.isnan(posterior.sum(axis=0)), np.nan, mode_pth)
#     mean_pth = (bin_centers * posterior.T).sum(axis=1)

#     return posterior, mode_pth, mean_pth 
Example #23
Source File: baseline_utils.py    From pysaliency with MIT License 5 votes vote down vote up
def _log_density(self, stimulus):
        shape = stimulus.shape[0], stimulus.shape[1]

        stimulus_id = get_image_hash(stimulus)
        stimulus_index = self.stimuli.stimulus_ids.index(stimulus_id)

        #fixations = self.fixations[self.fixations.n == stimulus_index]
        inds = self.fixations.n == stimulus_index

        if not inds.sum():
            return UniformModel().log_density(stimulus)

        ZZ = np.zeros(shape)
        if self.keep_aspect:
            height, width = shape
            max_size = max(width, height)
            x_factor = max_size
            y_factor = max_size
        else:
            x_factor = shape[1]
            y_factor = shape[0]
        _fixations = np.array([self.ys[inds]*y_factor, self.xs[inds]*x_factor]).T
        fill_fixation_map(ZZ, _fixations)
        ZZ = gaussian_filter(ZZ, [self.bandwidth*y_factor, self.bandwidth*x_factor])
        ZZ *= (1-self.eps)
        ZZ += self.eps * 1.0/(shape[0]*shape[1])
        ZZ = np.log(ZZ)

        ZZ -= logsumexp(ZZ)
        #ZZ -= np.log(np.exp(ZZ).sum())

        return ZZ 
Example #24
Source File: models.py    From pysaliency with MIT License 5 votes vote down vote up
def _resize_prediction(self, prediction, target_shape):
        if prediction.shape != target_shape:
            x_factor = target_shape[1] / prediction.shape[1]
            y_factor = target_shape[0] / prediction.shape[0]

            prediction = zoom(prediction, [y_factor, x_factor], order=1, mode='nearest')

            prediction -= logsumexp(prediction)

            assert prediction.shape == target_shape

        return prediction 
Example #25
Source File: models.py    From pysaliency with MIT License 5 votes vote down vote up
def conditional_log_density(self, stimulus, x_hist, y_hist, t_hist, attributes=None, out=None):
        log_densities = []
        for i, model in enumerate(self.models):
            log_density = model.conditional_log_density(stimulus, x_hist, y_hist, t_hist, attributes=attributes).copy()
            log_density += np.log(self.weights[i])
            log_densities.append(log_density)

        log_density = logsumexp(log_densities, axis=0)
        if self.check_norm:
            np.testing.assert_allclose(np.exp(log_density).sum(), 1.0, rtol=1e-7)
        if not log_density.shape == (stimulus.shape[0], stimulus.shape[1]):
            raise ValueError('wrong density shape in mixture model! stimulus shape: ({}, {}), density shape: {}'.format(stimulus.shape[0], stimulus.shape[1], log_density.shape))
        return log_density 
Example #26
Source File: models.py    From pysaliency with MIT License 5 votes vote down vote up
def _log_density(self, stimulus):
        log_densities = []
        for i, model in enumerate(self.models):
            log_density = model.log_density(stimulus).copy()
            log_density += np.log(self.weights[i])
            log_densities.append(log_density)

        log_density = logsumexp(log_densities, axis=0)
        np.testing.assert_allclose(np.exp(log_density).sum(), 1.0, rtol=1e-7)
        if not log_density.shape == (stimulus.shape[0], stimulus.shape[1]):
            raise ValueError('wrong density shape in mixture model! stimulus shape: ({}, {}), density shape: {}'.format(stimulus.shape[0], stimulus.shape[1], log_density.shape))
        return log_density 
Example #27
Source File: opt.py    From choix with MIT License 5 votes vote down vote up
def objective(self, params):
        """Compute the negative penalized log-likelihood."""
        val = self._penalty * np.sum(params**2)
        for winner, losers in self._data:
            idx = np.append(winner, losers)
            val += logsumexp(params.take(idx) - params[winner])
        return val 
Example #28
Source File: utils.py    From choix with MIT License 5 votes vote down vote up
def log_likelihood_top1(data, params):
    """Compute the log-likelihood of model parameters."""
    loglik = 0
    params = np.asarray(params)
    for winner, losers in data:
        idx = np.append(winner, losers)
        loglik -= logsumexp(params.take(idx) - params[winner])
    return loglik 
Example #29
Source File: utils.py    From choix with MIT License 5 votes vote down vote up
def log_likelihood_rankings(data, params):
    """Compute the log-likelihood of model parameters."""
    loglik = 0
    params = np.asarray(params)
    for ranking in data:
        for i, winner in enumerate(ranking[:-1]):
            loglik -= logsumexp(params.take(ranking[i:]) - params[winner])
    return loglik 
Example #30
Source File: ep.py    From choix with MIT License 5 votes vote down vote up
def _match_moments_logit(mean_cav, cov_cav):
    # Adapted from the GPML function `likLogistic.m`.
    # First use a scale mixture.
    lambdas = sqrt(2) * np.array([0.44, 0.41, 0.40, 0.39, 0.36]);
    cs = np.array([
      1.146480988574439e+02,
      -1.508871030070582e+03,
      2.676085036831241e+03,
      -1.356294962039222e+03,
      7.543285642111850e+01
    ])
    arr1, arr2, arr3 = np.zeros(5), np.zeros(5), np.zeros(5)
    for i, x in enumerate(lambdas):
        arr1[i], arr2[i], arr3[i] = _match_moments_probit(
                x * mean_cav, x * x * cov_cav)
    logpart1 = logsumexp(arr1, b=cs)
    dlogpart1 = (np.dot(np.exp(arr1) * arr2, cs * lambdas)
                 / np.dot(np.exp(arr1), cs))
    d2logpart1 = (np.dot(np.exp(arr1) * (arr2 * arr2 + arr3),
                         cs * lambdas * lambdas)
                  / np.dot(np.exp(arr1), cs)) - (dlogpart1 * dlogpart1)
    # Tail decays linearly in the log domain (and not quadratically).
    exponent = -10.0 * (abs(mean_cav) - (196.0 / 200.0) * cov_cav - 4.0) 
    if exponent < 500:
        lambd = 1.0 / (1.0 + exp(exponent))
        logpart2 = min(cov_cav / 2.0 - abs(mean_cav), -0.1)
        dlogpart2 = 1.0
        if mean_cav > 0:
            logpart2 = log(1 - exp(logpart2))
            dlogpart2 = 0.0
        d2logpart2 = 0.0
    else:
        lambd, logpart2, dlogpart2, d2logpart2 = 0.0, 0.0, 0.0, 0.0
    logpart = (1 - lambd) * logpart1 + lambd * logpart2
    dlogpart = (1 - lambd) * dlogpart1 + lambd * dlogpart2
    d2logpart = (1 - lambd) * d2logpart1 + lambd * d2logpart2
    return logpart, dlogpart, d2logpart