Python pymc3.summary() Examples

The following are 8 code examples of pymc3.summary(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module pymc3 , or try the search function .
Example #1
Source File: utils.py    From nispat with GNU General Public License v3.0 6 votes vote down vote up
def divergence_plot(nm, ylim=None):
    
    if nm.hbr.configs['n_chains'] > 1 and nm.hbr.model_type != 'nn':
        a = pm.summary(nm.hbr.trace).round(2)
        plt.figure()
        plt.hist(a['r_hat'],10)
        plt.title('Gelman-Rubin diagnostic for divergence')

    divergent = nm.hbr.trace['diverging']
        
    tracedf = pm.trace_to_dataframe(nm.hbr.trace)
    
    _, ax = plt.subplots(2, 1, figsize=(15, 4), sharex=True, sharey=True)
    ax[0].plot(tracedf.values[divergent == 0].T, color='k', alpha=.05)
    ax[0].set_title('No Divergences', fontsize=10)
    ax[1].plot(tracedf.values[divergent == 1].T, color='C2', lw=.5, alpha=.5)
    ax[1].set_title('Divergences', fontsize=10)
    plt.ylim(ylim)
    plt.xticks(range(tracedf.shape[1]), list(tracedf.columns))
    plt.xticks(rotation=90, fontsize=7)
    plt.tight_layout()
    plt.show() 
Example #2
Source File: generalized_nested_logit.py    From cs-ranking with Apache License 2.0 6 votes vote down vote up
def _predict_scores_fixed(self, X, **kwargs):
        mean_trace = dict(pm.summary(self.trace)["mean"])
        weights = np.array(
            [
                mean_trace["weights[{}]".format(i)]
                for i in range(self.n_object_features_fit_)
            ]
        )
        lambda_k = np.array(
            [mean_trace["lambda_k[{}]".format(i)] for i in range(self.n_nests)]
        )
        weights_ik = np.zeros((self.n_object_features_fit_, self.n_nests))
        for i, k in product(range(self.n_object_features_fit_), range(self.n_nests)):
            weights_ik[i][k] = mean_trace["weights_ik[{},{}]".format(i, k)]
        alpha_ik = np.dot(X, weights_ik)
        alpha_ik = npu.softmax(alpha_ik, axis=2)
        utility = np.dot(X, weights)
        p = self._get_probabilities_np(utility, lambda_k, alpha_ik)
        return p 
Example #3
Source File: nested_logit_model.py    From cs-ranking with Apache License 2.0 6 votes vote down vote up
def _predict_scores_fixed(self, X, **kwargs):
        y_nests = self.create_nests(X)
        mean_trace = dict(pm.summary(self.trace)["mean"])
        weights = np.array(
            [
                mean_trace["weights[{}]".format(i)]
                for i in range(self.n_object_features_fit_)
            ]
        )
        weights_k = np.array(
            [
                mean_trace["weights_k[{}]".format(i)]
                for i in range(self.n_object_features_fit_)
            ]
        )
        lambda_k = np.array(
            [mean_trace["lambda_k[{}]".format(i)] for i in range(self.n_nests)]
        )
        weights = weights / lambda_k[:, None]
        utility_k = np.dot(self.features_nests, weights_k)
        utility = self._eval_utility_np(X, y_nests, weights)
        scores = self._get_probabilities_np(y_nests, utility, lambda_k, utility_k)
        return scores 
Example #4
Source File: estimate.py    From geoist with MIT License 5 votes vote down vote up
def get_L2_estimates(summary):
    """
    Returns digestible estimates from the L2 estimates.

    :type summary: :class:`~pandas.core.frame.DataFrame`
    :param summary: Summary statistics from Posterior distributions

    :return: 
        (tuple): tuple containing:
            * mean_te (float) : Mean value of elastic thickness from posterior (km)
            * std_te (float)  : Standard deviation of elastic thickness from posterior (km)
            * mean_F (float)  : Mean value of load ratio from posterior
            * std_F (float)   : Standard deviation of load ratio from posterior
            * mean_a (float, optional)  : Mean value of phase difference between initial loads from posterior
            * std_a (float, optional)   : Standard deviation of phase difference between initial loads from posterior
            * rchi2 (float)   : Reduced chi-squared value
    """

    mean_a = None

    # Go through all estimates
    for index, row in summary.iterrows():
        if index=='Te':
            mean_te = row['mean']
            std_te = row['std']
            rchi2 = row['chi2']
        elif index=='F':
            mean_F = row['mean']
            std_F = row['std']
        elif index=='alpha':
            mean_a = row['mean']
            std_a = row['std']

    if mean_a is not None:
        return mean_te, std_te, mean_F, std_F, mean_a, std_a, rchi2
    else:
        return mean_te, std_te, mean_F, std_F, rchi2 
Example #5
Source File: mixed_logit_model.py    From cs-ranking with Apache License 2.0 5 votes vote down vote up
def _predict_scores_fixed(self, X, **kwargs):
        summary = dict(pm.summary(self.trace)["mean"])
        weights = np.zeros((self.n_object_features_fit_, self.n_mixtures))
        for i, k in product(range(self.n_object_features_fit_), range(self.n_mixtures)):
            weights[i][k] = summary["weights[{},{}]".format(i, k)]
        utility = np.dot(X, weights)
        p = np.mean(npu.softmax(utility, axis=1), axis=2)
        return p 
Example #6
Source File: paired_combinatorial_logit.py    From cs-ranking with Apache License 2.0 5 votes vote down vote up
def _predict_scores_fixed(self, X, **kwargs):
        mean_trace = dict(pm.summary(self.trace)["mean"])
        weights = np.array(
            [
                mean_trace["weights[{}]".format(i)]
                for i in range(self.n_object_features_fit_)
            ]
        )
        lambda_k = np.array(
            [mean_trace["lambda_k[{}]".format(i)] for i in range(self.n_nests)]
        )
        utility = np.dot(X, weights)
        p = self._get_probabilities_np(utility, lambda_k)
        return p 
Example #7
Source File: multinomial_logit_model.py    From cs-ranking with Apache License 2.0 5 votes vote down vote up
def _predict_scores_fixed(self, X, **kwargs):
        d = dict(pm.summary(self.trace)["mean"])
        intercept = 0.0
        weights = np.array(
            [d["weights[{}]".format(i)] for i in range(self.n_object_features_fit_)]
        )
        if "intercept" in d:
            intercept = intercept + d["intercept"]
        return np.dot(X, weights) + intercept 
Example #8
Source File: estimate.py    From geoist with MIT License 4 votes vote down vote up
def get_bayes_estimates(summary, map_estimate):
    """
    Returns digestible estimates from the Posterior distributions.

    :type summary: :class:`~pandas.core.frame.DataFrame`
    :param summary: Summary statistics from Posterior distributions
    :type map_estimate: dict
    :param map_estimate: Container for Maximum a Posteriori (MAP) estimates

    :return: 
        (tuple): tuple containing:
            * mean_te (float) : Mean value of elastic thickness ``Te`` from posterior (km)
            * std_te (float)  : Standard deviation of elastic thickness ``Te`` from posterior (km)
            * C2_5_te (float) : Lower limit of 95% confidence interval on ``Te`` (km)
            * C97_5_te (float) : Upper limit of 95% confidence interval on ``Te`` (km)
            * MAP_te (float) : Maximum a Posteriori ``Te`` (km)
            * mean_F (float)  : Mean value of load ratio ``F`` from posterior
            * std_F (float)   : Standard deviation of load ratio ``F`` from posterior
            * C2_5_F (float) : Lower limit of 95% confidence interval on ``F``
            * C97_5_F (float) : Upper limit of 95% confidence interval on ``F``
            * MAP_F (float)  : Maximum a Posteriori load ratio ``F``
            * mean_a (float, optional)  : Mean value of initial phase difference ``alpha`` from posterior
            * std_a (float, optional)   : Standard deviation of initial phase difference `alpha`` from posterior
            * C2_5_a (float, optional) : Lower limit of 95% confidence interval on ``alpha``
            * C97_5_a (float, optional) : Upper limit of 95% confidence interval on ``alpha``
            * MAP_a (float, optional)  : Maximum a Posteriori initial phase difference ``alpha``

    """

    mean_a = None

    # Go through all estimates
    for index, row in summary.iterrows():
        if index=='Te':
            mean_te = row['mean']
            std_te = row['sd']
            C2_5_te = row['hpd_2.5']
            C97_5_te = row['hpd_97.5']
            MAP_te = np.float(map_estimate['Te'])
        elif index=='F':
            mean_F = row['mean']
            std_F = row['sd']
            C2_5_F = row['hpd_2.5']
            C97_5_F = row['hpd_97.5']
            MAP_F = np.float(map_estimate['F'])
        elif index=='alpha':
            mean_a = row['mean']
            std_a = row['sd']
            C2_5_a = row['hpd_2.5']
            C97_5_a = row['hpd_97.5']
            MAP_a = np.float(map_estimate['alpha'])

    if mean_a is not None:
        return mean_te, std_te, C2_5_te, C97_5_te, MAP_te, \
            mean_F, std_F, C2_5_F, C97_5_F, MAP_F, \
            mean_a, std_a, C2_5_a, C97_5_a, MAP_a
    else:
        return mean_te, std_te, C2_5_te, C97_5_te, MAP_te, \
            mean_F, std_F, C2_5_F, C97_5_F, MAP_F