Python pymc3.Normal() Examples

The following are 30 code examples for showing how to use pymc3.Normal(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module pymc3 , or try the search function .

Example 1
Project: lifestyles   Author: CamDavidsonPilon   File: cbc_hb.py    License: MIT License 6 votes vote down vote up
def model(profiles, comparisons, selections, sample=2500, alpha_prior_std=10):
    all_attributes = pd.get_dummies(profiles).columns
    profiles_dummies = pd.get_dummies(profiles, drop_first=True)
    choices = pd.concat({profile: profiles_dummies.loc[comparisons[profile]].reset_index(drop=True) for profile in comparisons.columns}, axis=1)

    respondants = selections.columns
    n_attributes_in_model = profiles_dummies.shape[1]
    n_participants = selections.shape[1]

    with pm.Model():

        # https://www.sawtoothsoftware.com/download/ssiweb/CBCHB_Manual.pdf
        # need to include the covariance matrix as a parent of `partsworth`
        alpha = pm.Normal('alpha', 0, sd=alpha_prior_std, shape=n_attributes_in_model, testval=np.random.randn(n_attributes_in_model))
        partsworth = pm.MvNormal("partsworth", alpha, tau=np.eye(n_attributes_in_model), shape=(n_participants, n_attributes_in_model))

        cs = [_create_observation_variable(selection, choices, partsworth[i, :]) for i, (_, selection) in enumerate(selections.iteritems())]

        trace = pm.sample(sample)
    return transform_trace_to_individual_summary_statistics(trace, respondants, profiles_dummies.columns, all_attributes) 
Example 2
Project: cryptotrader   Author: naripok   File: bayesian.py    License: MIT License 6 votes vote down vote up
def fit(self, X, Y, n_samples=10000, tune_steps=1000, n_jobs=4):
        with pm.Model() as self.model:
            # Priors
            std = pm.Uniform("std", 0, self.sps, testval=X.std())
            beta = pm.StudentT("beta", mu=0, lam=self.sps, nu=self.nu)
            alpha = pm.StudentT("alpha", mu=0, lam=self.sps, nu=self.nu, testval=Y.mean())
            # Deterministic model
            mean = pm.Deterministic("mean", alpha + beta * X)
            # Posterior distribution
            obs = pm.Normal("obs", mu=mean, sd=std, observed=Y)
            ## Run MCMC
            # Find search start value with maximum a posterior estimation
            start = pm.find_MAP()
            # sample posterior distribution for latent variables
            trace = pm.sample(n_samples, njobs=n_jobs, tune=tune_steps, start=start)
            # Recover posterior samples
            self.burned_trace = trace[int(n_samples / 2):] 
Example 3
Project: arviz   Author: arviz-devs   File: helpers.py    License: Apache License 2.0 6 votes vote down vote up
def tfp_schools_model(num_schools, treatment_stddevs):
    """Non-centered eight schools model for tfp."""
    import tensorflow_probability.python.edward2 as ed
    import tensorflow as tf

    if int(tf.__version__[0]) > 1:
        import tensorflow.compat.v1 as tf  # pylint: disable=import-error

        tf.disable_v2_behavior()

    avg_effect = ed.Normal(loc=0.0, scale=10.0, name="avg_effect")  # `mu`
    avg_stddev = ed.Normal(loc=5.0, scale=1.0, name="avg_stddev")  # `log(tau)`
    school_effects_standard = ed.Normal(
        loc=tf.zeros(num_schools), scale=tf.ones(num_schools), name="school_effects_standard"
    )  # `eta`
    school_effects = avg_effect + tf.exp(avg_stddev) * school_effects_standard  # `theta`
    treatment_effects = ed.Normal(
        loc=school_effects, scale=treatment_stddevs, name="treatment_effects"
    )  # `y`
    return treatment_effects 
Example 4
Project: dowhy   Author: microsoft   File: mcmc_sampler.py    License: MIT License 6 votes vote down vote up
def apply_parameters(self, g, df, initialization_trace=None):
        for node in nx.topological_sort(g):
            parent_names = g.nodes()[node]["parent_names"]
            if parent_names:
                if not initialization_trace:
                    sd = np.array([df[node].std()] + (df[node].std() / df[parent_names].std()).tolist())
                    mu = np.array([df[node].std()] + (df[node].std() / df[parent_names].std()).tolist())
                    node_sd = df[node].std()
                else:
                    node_sd = initialization_trace["{}_sd".format(node)].mean()
                    mu = initialization_trace["beta_{}".format(node)].mean(axis=0)
                    sd = initialization_trace["beta_{}".format(node)].std(axis=0)
                g.nodes()[node]["parameters"] = pm.Normal("beta_{}".format(node), mu=mu, sd=sd,
                                                          shape=len(parent_names) + 1)
                g.nodes()[node]["sd"] = pm.Exponential("{}_sd".format(node), lam=node_sd)
        return g 
Example 5
Project: dowhy   Author: microsoft   File: mcmc_sampler.py    License: MIT License 6 votes vote down vote up
def build_bayesian_network(self, g, df):
        for node in nx.topological_sort(g):
            if g.nodes()[node]["parent_names"]:
                mu = g.nodes()[node]["parameters"][0]  # intercept
                mu += pm.math.dot(df[g.nodes()[node]["parent_names"]],
                                  g.nodes()[node]["parameters"][1:])
                if g.nodes()[node]["variable_type"] == 'c':
                    sd = g.nodes()[node]["sd"]
                    g.nodes()[node]["variable"] = pm.Normal("{}".format(node),
                                                            mu=mu, sd=sd,
                                                            observed=df[node])
                elif g.nodes()[node]["variable_type"] == 'b':
                    g.nodes()[node]["variable"] = pm.Bernoulli("{}".format(node),
                                                               logit_p=mu,
                                                               observed=df[node])
                else:
                    raise Exception("Unrecognized variable type: {}".format(g.nodes()[node]["variable_type"]))
        return g 
Example 6
Project: phoenics   Author: aspuru-guzik-group   File: pymc3_interface.py    License: Apache License 2.0 5 votes vote down vote up
def __get_weights(self, index, shape, scale = None):
		return pm.Normal('w%d' % index, self.weight_loc, self.weight_scale, shape = shape) 
Example 7
Project: phoenics   Author: aspuru-guzik-group   File: pymc3_interface.py    License: Apache License 2.0 5 votes vote down vote up
def __get_biases(self, index, shape, scale = None):
		return pm.Normal('b%d' % index, self.weight_loc, self.weight_scale, shape = shape) 
Example 8
Project: phoenics   Author: aspuru-guzik-group   File: pymc3_interface_backup.py    License: Apache License 2.0 5 votes vote down vote up
def __get_weights(self, index, shape, scale = None):
		return pm.Normal('w%d' % index, self.weight_loc, self.weight_scale, shape = shape) 
Example 9
Project: phoenics   Author: aspuru-guzik-group   File: pymc3_interface_backup.py    License: Apache License 2.0 5 votes vote down vote up
def __get_biases(self, index, shape, scale = None):
		return pm.Normal('b%d' % index, self.weight_loc, self.weight_scale, shape = shape) 
Example 10
Project: phoenics   Author: aspuru-guzik-group   File: pymc3_interface_backup.py    License: Apache License 2.0 5 votes vote down vote up
def __get_biases(self, index, shape, scale = None):
		return pm.Normal('b%d' % index, self.weight_loc, self.weight_scale, shape = shape) 
Example 11
Project: gelato   Author: ferrine   File: test_spec.py    License: MIT License 5 votes vote down vote up
def test_shape(self):
        spec = DistSpec(Normal, mu=0, sd=1)
        spec2 = DistSpec(Normal, mu=0, sd=DistSpec(Lognormal, 0, 1))

        with Model('layer'):
            var = spec((100, 100), 'var')
            var2 = spec2((100, 100), 'var2')
            assert (var.init_value.shape == (100, 100))
            assert (var.name.endswith('var'))
            assert (var2.init_value.shape == (100, 100))
            assert (var2.name.endswith('var2')) 
Example 12
Project: gelato   Author: ferrine   File: test_spec.py    License: MIT License 5 votes vote down vote up
def test_expressions(expr):
    with Model() as model:
        var = expr((10, 10))
        Normal('obs', observed=var)
        assert var.tag.test_value.shape == (10, 10)
        assert len(model.free_RVs) == 3
        fit(1) 
Example 13
Project: gelato   Author: ferrine   File: test_magic.py    License: MIT License 5 votes vote down vote up
def test_workflow(self):
        inp = InputLayer(self.x.shape)
        out = DenseLayer(inp, 1, W=NormalSpec(sd=LognormalSpec()), nonlinearity=to.identity)
        out = DenseLayer(out, 1, W=NormalSpec(sd=LognormalSpec()), nonlinearity=to.identity)
        assert out.root is inp
        with out:
            pm.Normal('y', mu=get_output(out),
                      sd=self.sd,
                      observed=self.y) 
Example 14
Project: nispat   Author: amarquand   File: hbr.py    License: GNU General Public License v3.0 5 votes vote down vote up
def from_posterior(param, samples, distribution = None, half = False, freedom=10):
    
    if len(samples.shape)>1:
        shape = samples.shape[1:]
    else:
        shape = None
            
    if (distribution is None):
        smin, smax = np.min(samples), np.max(samples)
        width = smax - smin
        x = np.linspace(smin, smax, 1000)
        y = stats.gaussian_kde(samples)(x)
        if half:
            x = np.concatenate([x, [x[-1] + 0.1 * width]])
            y = np.concatenate([y, [0]])
        else:
            x = np.concatenate([[x[0] - 0.1 * width], x, [x[-1] + 0.1 * width]])
            y = np.concatenate([[0], y, [0]])
        return pm.distributions.Interpolated(param, x, y)
    elif (distribution=='normal'):
        temp = stats.norm.fit(samples)
        if shape is None:
            return pm.Normal(param, mu=temp[0], sigma=freedom*temp[1])
        else:
            return pm.Normal(param, mu=temp[0], sigma=freedom*temp[1], shape=shape)
    elif (distribution=='hnormal'):
        temp = stats.halfnorm.fit(samples)
        if shape is None:
            return pm.HalfNormal(param, sigma=freedom*temp[1])
        else:
            return pm.HalfNormal(param, sigma=freedom*temp[1], shape=shape)
    elif (distribution=='hcauchy'):
        temp = stats.halfcauchy.fit(samples)
        if shape is None:
            return pm.HalfCauchy(param, freedom*temp[1])
        else:
            return pm.HalfCauchy(param, freedom*temp[1], shape=shape) 
Example 15
Project: autoimpute   Author: kearnz   File: bayesian_regression.py    License: MIT License 5 votes vote down vote up
def fit(self, X, y):
        """Fit the Imputer to the dataset by fitting bayesian model.

        Args:
            X (pd.Dataframe): dataset to fit the imputer.
            y (pd.Series): response, which is eventually imputed.

        Returns:
            self. Instance of the class.
        """
        _not_num_series(self.strategy, y)
        nc = len(X.columns)

        # initialize model for bayesian linear reg. Default vals for priors
        # assume data is scaled and centered. Convergence can struggle or fail
        # if not the case and proper values for the priors are not specified
        # separately, also assumes each beta is normal and "independent"
        # while betas likely not independent, this is technically a rule of OLS
        with pm.Model() as fit_model:
            alpha = pm.Normal("alpha", self.am, sd=self.asd)
            beta = pm.Normal("beta", self.bm, sd=self.bsd, shape=nc)
            sigma = pm.HalfCauchy("σ", self.sig)
            mu = alpha+beta.dot(X.T)
            score = pm.Normal("score", mu, sd=sigma, observed=y)
        self.statistics_ = {"param": fit_model, "strategy": self.strategy}
        return self 
Example 16
Project: autoimpute   Author: kearnz   File: bayesian_regression.py    License: MIT License 5 votes vote down vote up
def fit(self, X, y):
        """Fit the Imputer to the dataset by fitting bayesian model.

        Args:
            X (pd.Dataframe): dataset to fit the imputer.
            y (pd.Series): response, which is eventually imputed.

        Returns:
            self. Instance of the class.
        """
        y = y.astype("category").cat
        y_cat_l = len(y.codes.unique())

        # bayesian logistic regression. Mutliple categories not supported yet
        if y_cat_l != 2:
            err = "Only two categories supported. Multinomial coming soon."
            raise ValueError(err)
        nc = len(X.columns)

        # initialize model for bayesian logistic reg. Default vals for priors
        # assume data is scaled and centered. Convergence can struggle or fail
        # if not the case and proper values for the priors are not specified
        # separately, also assumes each beta is normal and "independent"
        # while betas likely not independent, this is technically a rule of OLS
        with pm.Model() as fit_model:
            alpha = pm.Normal("alpha", self.am, sd=self.asd)
            beta = pm.Normal("beta", self.bm, sd=self.bsd, shape=nc)
            p = pm.invlogit(alpha + beta.dot(X.T))
            score = pm.Bernoulli("score", p, observed=y.codes)

        params = {"model": fit_model, "labels": y.categories}
        self.statistics_ = {"param": params, "strategy": self.strategy}
        return self 
Example 17
Project: autoimpute   Author: kearnz   File: pmm.py    License: MIT License 5 votes vote down vote up
def fit(self, X, y):
        """Fit the Imputer to the dataset by fitting bayesian and LS model.

        Args:
            X (pd.Dataframe): dataset to fit the imputer.
            y (pd.Series): response, which is eventually imputed.

        Returns:
            self. Instance of the class.
        """
        _not_num_series(self.strategy, y)
        nc = len(X.columns)

        # get predictions for the data, which will be used for "closest" vals
        y_pred = self.lm.fit(X, y).predict(X)
        y_df = DataFrame({"y": y, "y_pred": y_pred})

        # calculate bayes and use appropriate means for alpha and beta priors
        # here we specify the point estimates from the linear regression as the
        # means for the priors. This will greatly speed up posterior sampling
        # and help ensure that convergence occurs
        if self.am is None:
            self.am = self.lm.intercept_
        if self.bm is None:
            self.bm = self.lm.coef_

        # initialize model for bayesian linear reg. Default vals for priors
        # assume data is scaled and centered. Convergence can struggle or fail
        # if not the case and proper values for the priors are not specified
        # separately, also assumes each beta is normal and "independent"
        # while betas likely not independent, this is technically a rule of OLS
        with pm.Model() as fit_model:
            alpha = pm.Normal("alpha", self.am, sd=self.asd)
            beta = pm.Normal("beta", self.bm, sd=self.bsd, shape=nc)
            sigma = pm.HalfCauchy("σ", self.sig)
            mu = alpha+beta.dot(X.T)
            score = pm.Normal("score", mu, sd=sigma, observed=y)
        params = {"model": fit_model, "y_obs": y_df}
        self.statistics_ = {"param": params, "strategy": self.strategy}
        return self 
Example 18
Project: autoimpute   Author: kearnz   File: lrd.py    License: MIT License 5 votes vote down vote up
def fit(self, X, y):
        """Fit the Imputer to the dataset by fitting bayesian and LS model.

        Args:
            X (pd.Dataframe): dataset to fit the imputer.
            y (pd.Series): response, which is eventually imputed.

        Returns:
            self. Instance of the class.
        """
        _not_num_series(self.strategy, y)
        nc = len(X.columns)

        # get predictions for the data, which will be used for "closest" vals
        y_pred = self.lm.fit(X, y).predict(X)
        y_df = DataFrame({"y": y, "y_pred": y_pred})

        # calculate bayes and use appropriate means for alpha and beta priors
        # here we specify the point estimates from the linear regression as the
        # means for the priors. This will greatly speed up posterior sampling
        # and help ensure that convergence occurs
        if self.am is None:
            self.am = self.lm.intercept_
        if self.bm is None:
            self.bm = self.lm.coef_

        # initialize model for bayesian linear reg. Default vals for priors
        # assume data is scaled and centered. Convergence can struggle or fail
        # if not the case and proper values for the priors are not specified
        # separately, also assumes each beta is normal and "independent"
        # while betas likely not independent, this is technically a rule of OLS
        with pm.Model() as fit_model:
            alpha = pm.Normal("alpha", self.am, sd=self.asd)
            beta = pm.Normal("beta", self.bm, sd=self.bsd, shape=nc)
            sigma = pm.HalfCauchy("σ", self.sig)
            mu = alpha+beta.dot(X.T)
            score = pm.Normal("score", mu, sd=sigma, observed=y)
        params = {"model": fit_model, "y_obs": y_df}
        self.statistics_ = {"param": params, "strategy": self.strategy}
        return self 
Example 19
Project: arviz   Author: arviz-devs   File: helpers.py    License: Apache License 2.0 5 votes vote down vote up
def _pyro_noncentered_model(J, sigma, y=None):
    import pyro
    import pyro.distributions as dist

    mu = pyro.sample("mu", dist.Normal(0, 5))
    tau = pyro.sample("tau", dist.HalfCauchy(5))
    with pyro.plate("J", J):
        eta = pyro.sample("eta", dist.Normal(0, 1))
        theta = mu + tau * eta
        return pyro.sample("obs", dist.Normal(theta, sigma), obs=y) 
Example 20
Project: arviz   Author: arviz-devs   File: helpers.py    License: Apache License 2.0 5 votes vote down vote up
def _numpyro_noncentered_model(J, sigma, y=None):
    import numpyro
    import numpyro.distributions as dist

    mu = numpyro.sample("mu", dist.Normal(0, 5))
    tau = numpyro.sample("tau", dist.HalfCauchy(5))
    with numpyro.plate("J", J):
        eta = numpyro.sample("eta", dist.Normal(0, 1))
        theta = mu + tau * eta
        return numpyro.sample("obs", dist.Normal(theta, sigma), obs=y) 
Example 21
Project: arviz   Author: arviz-devs   File: helpers.py    License: Apache License 2.0 5 votes vote down vote up
def pymc3_noncentered_schools(data, draws, chains):
    """Non-centered eight schools implementation for pymc3."""
    import pymc3 as pm

    with pm.Model() as model:
        mu = pm.Normal("mu", mu=0, sd=5)
        tau = pm.HalfCauchy("tau", beta=5)
        eta = pm.Normal("eta", mu=0, sd=1, shape=data["J"])
        theta = pm.Deterministic("theta", mu + tau * eta)
        pm.Normal("obs", mu=theta, sd=data["sigma"], observed=data["y"])
        trace = pm.sample(draws, chains=chains)
    return model, trace 
Example 22
Project: bambi   Author: bambinos   File: pymc.py    License: MIT License 5 votes vote down vote up
def _build_dist(self, spec, label, dist, **kwargs):
        """Build and return a PyMC3 Distribution."""
        if isinstance(dist, str):
            if hasattr(pm, dist):
                dist = getattr(pm, dist)
            elif dist in self.dists:
                dist = self.dists[dist]
            else:
                raise ValueError(
                    f"The Distribution {dist} was not found in PyMC3 or the PyMC3BackEnd."
                )

        # Inspect all args in case we have hyperparameters
        def _expand_args(key, value, label):
            if isinstance(value, Prior):
                label = f"{label}_{key}"
                return self._build_dist(spec, label, value.name, **value.args)
            return value

        kwargs = {k: _expand_args(k, v, label) for (k, v) in kwargs.items()}

        # Non-centered parameterization for hyperpriors
        if (
            spec.noncentered
            and "sigma" in kwargs
            and "observed" not in kwargs
            and isinstance(kwargs["sigma"], pm.model.TransformedRV)
        ):
            old_sigma = kwargs["sigma"]
            _offset = pm.Normal(label + "_offset", mu=0, sigma=1, shape=kwargs["shape"])
            return pm.Deterministic(label, _offset * old_sigma)

        return dist(label, **kwargs) 
Example 23
Project: bilby   Author: lscsoft   File: linear_regression_pymc3_custom_likelihood.py    License: MIT License 5 votes vote down vote up
def log_likelihood(self, sampler=None):
        """
        Parameters
        ----------
        sampler: :class:`bilby.core.sampler.Pymc3`
            A Sampler object must be passed containing the prior distributions
            and PyMC3 :class:`~pymc3.Model` to use as a context manager.
        """

        from bilby.core.sampler import Pymc3

        if not isinstance(sampler, Pymc3):
            raise ValueError("Sampler is not a bilby Pymc3 sampler object")

        if not hasattr(sampler, 'pymc3_model'):
            raise AttributeError("Sampler has not PyMC3 model attribute")

        with sampler.pymc3_model:
            mdist = sampler.pymc3_priors['m']
            cdist = sampler.pymc3_priors['c']

            mu = model(time, mdist, cdist)

            # set the likelihood distribution
            pm.Normal('likelihood', mu=mu, sd=self.sigma, observed=self.y)


# Now lets instantiate a version of our GaussianLikelihood, giving it
# the time, data and signal model 
Example 24
Project: cs-ranking   Author: kiudee   File: model_selector.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(
        self,
        learner_cls,
        parameter_keys,
        model_params,
        fit_params,
        model_path,
        **kwargs,
    ):
        self.priors = [
            [pm.Normal, {"mu": 0, "sd": 10}],
            [pm.Laplace, {"mu": 0, "b": 10}],
        ]
        self.uniform_prior = [pm.Uniform, {"lower": -20, "upper": 20}]
        self.prior_indices = np.arange(len(self.priors))
        self.parameter_f = [
            (pm.Normal, {"mu": 0, "sd": 5}),
            (pm.Cauchy, {"alpha": 0, "beta": 1}),
            0,
            -5,
            5,
        ]
        self.parameter_s = [
            (pm.HalfCauchy, {"beta": 1}),
            (pm.HalfNormal, {"sd": 0.5}),
            (pm.Exponential, {"lam": 0.5}),
            (pm.Uniform, {"lower": 1, "upper": 10}),
            10,
        ]
        # ,(pm.HalfCauchy, {'beta': 2}), (pm.HalfNormal, {'sd': 1}),(pm.Exponential, {'lam': 1.0})]
        self.learner_cls = learner_cls
        self.model_params = model_params
        self.fit_params = fit_params
        self.parameter_keys = parameter_keys
        self.parameters = list(product(self.parameter_f, self.parameter_s))
        pf_arange = np.arange(len(self.parameter_f))
        ps_arange = np.arange(len(self.parameter_s))
        self.parameter_ind = list(product(pf_arange, ps_arange))
        self.model_path = model_path
        self.models = dict()
        self.logger = logging.getLogger(ModelSelector.__name__) 
Example 25
Project: cs-ranking   Author: kiudee   File: generalized_linear_model.py    License: Apache License 2.0 5 votes vote down vote up
def model_configuration(self):
        """
            Constructs the dictionary containing the priors for the weight vectors for the model according to the
            regularization function. The parameters are:
                * **weights** : Weights to evaluates the utility of the objects

            For ``l1`` regularization the priors are:

            .. math::

                \\text{mu}_w \\sim \\text{Normal}(\\text{mu}=0, \\text{sd}=5.0) \\\\
                \\text{b}_w \\sim \\text{HalfCauchy}(\\beta=1.0) \\\\
                \\text{weights} \\sim \\text{Laplace}(\\text{mu}=\\text{mu}_w, \\text{b}=\\text{b}_w)

            For ``l2`` regularization the priors are:

            .. math::

                \\text{mu}_w \\sim \\text{Normal}(\\text{mu}=0, \\text{sd}=5.0) \\\\
                \\text{sd}_w \\sim \\text{HalfCauchy}(\\beta=1.0) \\\\
                \\text{weights} \\sim \\text{Normal}(\\text{mu}=\\text{mu}_w, \\text{sd}=\\text{sd}_w)
        """
        if self.regularization == "l2":
            weight = pm.Normal
            prior = "sd"
        elif self.regularization == "l1":
            weight = pm.Laplace
            prior = "b"
        configuration = {
            "weights": [
                weight,
                {
                    "mu": (pm.Normal, {"mu": 0, "sd": 10}),
                    prior: (pm.HalfCauchy, {"beta": 1}),
                },
            ]
        }
        self.logger.info(
            "Creating default config {}".format(print_dictionary(configuration))
        )
        return configuration 
Example 26
Project: cs-ranking   Author: kiudee   File: generalized_linear_model.py    License: Apache License 2.0 5 votes vote down vote up
def construct_model(self, X, Y):
        """
            Constructs the linear logit model which evaluated the utility score as :math:`U(x) = w \\cdot x`, where
            :math:`w` is the weight vector. The probability of choosing the object :math:`x_i` from the query set
            :math:`Q = \\{x_1, \\ldots ,x_n\\}` is:

            .. math::

                P_i =  P(x_i \\lvert Q) = \\frac{1}{1+exp(-U(x_i))}

            Parameters
            ----------
            X : numpy array
                (n_instances, n_objects, n_features)
                Feature vectors of the objects
            Y : numpy array
                (n_instances, n_objects)
                Preferences in form of Choices for given objects

            Returns
            -------
             model : pymc3 Model :class:`pm.Model`
        """
        self.logger.info(
            "Creating model_args config {}".format(
                print_dictionary(self.model_configuration)
            )
        )
        with pm.Model() as self.model:
            self.Xt = theano.shared(X)
            self.Yt = theano.shared(Y)
            shapes = {"weights": self.n_object_features_fit_}
            # shapes = {'weights': (self.n_object_features_fit_, 3)}
            weights_dict = create_weight_dictionary(self.model_configuration, shapes)
            intercept = pm.Normal("intercept", mu=0, sd=10)
            utility = tt.dot(self.Xt, weights_dict["weights"]) + intercept
            self.p = ttu.sigmoid(utility)
            BinaryCrossEntropyLikelihood("yl", p=self.p, observed=self.Yt)
        self.logger.info("Model construction completed") 
Example 27
Project: phoenics   Author: aspuru-guzik-group   File: pymc3_interface.py    License: Apache License 2.0 4 votes vote down vote up
def _create_model(self):

		with pm.Model() as self.model:

			# getting the location primers
			for layer_index in range(self.num_layers):
				setattr(self, 'w%d' % layer_index, self.__get_weights(layer_index, self.weight_shapes[layer_index]))
				setattr(self, 'b%d' % layer_index, self.__get_biases(layer_index, self.bias_shapes[layer_index]))

				if layer_index == 0:
					fc = pm.Deterministic('fc%d' % layer_index, pm.math.tanh(pm.math.dot(self.network_input, self.weight(layer_index)) + self.bias(layer_index)))
					setattr(self, 'fc%d' % layer_index, fc)
				elif 0 < layer_index < self.num_layers - 1:
					fc = pm.Deterministic('fc%d' % layer_index, pm.math.tanh(pm.math.dot(getattr(self, 'fc%d' % (layer_index - 1)), self.weight(layer_index)) + self.bias(layer_index)))
					setattr(self, 'fc%d' % layer_index, fc)
				else:
					self._loc = pm.Deterministic('bnn_out', pm.math.sigmoid(pm.math.dot(getattr(self, 'fc%d' % (layer_index - 1)), self.weight(layer_index)) + self.bias(layer_index)) )	


			# getting the precision / standard deviation / variance
			self.tau_rescaling = np.zeros((self.num_obs, self.network_input.shape[1]))
			for obs_index in range(self.num_obs):
				self.tau_rescaling[obs_index] += self.var_e_ranges
			self.tau_rescaling = self.tau_rescaling**2

			tau        = pm.Gamma('tau', self.num_obs**2, 1., shape = (self.num_obs, self.network_input.shape[1]))
			self.tau   = tau / self.tau_rescaling
			self.scale = pm.Deterministic('scale', 1. / pm.math.sqrt(self.tau))


			# learn the floats
			self.loc        = pm.Deterministic('loc', (self.upper_rescalings - self.lower_rescalings) * self._loc + self.lower_rescalings)
			self.out_floats = pm.Normal('out_floats', self.loc[:, self.floats], tau = self.tau[:, self.floats], observed = self.network_output[:, self._floats])


			# learn the integers
			self.int_scale = pm.Deterministic('int_scale', 1. * self.scale)
			self.out_ints  = DiscreteLaplace('out_ints', loc = self.loc[:, self.ints], scale = self.int_scale[:, self.ints], observed = self.network_output[:, self._ints])


			# learn the categories
			dist_counter, cat_var_index = 0, 0
			
			self.alpha = pm.Deterministic('alpha', (self.loc + 1.) * self.scale)
			self.num_cats = 0
			for var_e_index, var_e_type in enumerate(self.var_e_types):
				if var_e_type == 'categorical' and self.var_e_begin[var_e_index] == var_e_index:
					begin, end  = self.var_e_begin[var_e_index], self.var_e_end[var_e_index]
					var_e_name  = self.var_e_names[var_e_index]
					param_index = np.argwhere(self.var_p_names == var_e_name)[0, 0]
					self.param_index = param_index

					out_dirichlet = pm.Dirichlet('dirich_%d' % dist_counter, a = self.alpha[:, begin : end], shape = (self.num_obs, int(end - begin)) )
					out_cats      = pm.Categorical('out_cats_%d' % dist_counter, p = out_dirichlet, observed = self.network_output[:, param_index])
					self.num_cats += 1
					dist_counter += 1 
Example 28
Project: phoenics   Author: aspuru-guzik-group   File: pymc3_interface_backup.py    License: Apache License 2.0 4 votes vote down vote up
def _create_model_old(self):
		self._get_rescalings()

		with pm.Model() as self.model:

			# getting the location
			for layer_index in range(self.num_layers):
				setattr(self, 'w%d' % layer_index, self.__get_weights(layer_index, self.weight_shapes[layer_index]))
				setattr(self, 'b%d' % layer_index, self.__get_biases(layer_index, self.bias_shapes[layer_index]))

				if layer_index == 0:
					fc = pm.Deterministic('fc%d' % layer_index, pm.math.tanh(pm.math.dot(self.network_input, self.weight(layer_index)) + self.bias(layer_index)))
					setattr(self, 'fc%d' % layer_index, fc)
				elif 0 < layer_index < self.num_layers - 1:
					fc = pm.Deterministic('fc%d' % layer_index, pm.math.tanh(pm.math.dot(getattr(self, 'fc%d' % (layer_index - 1)), self.weight(layer_index)) + self.bias(layer_index)))
					setattr(self, 'fc%d' % layer_index, fc)
				else:
					self.loc = pm.Deterministic('loc', (self.upper_rescalings - self.lower_rescalings) * pm.math.sigmoid(pm.math.dot(getattr(self, 'fc%d' % (layer_index - 1)), self.weight(layer_index)) + self.bias(layer_index)) + self.lower_rescalings)



			# getting the standard deviation (or rather precision)
			self.tau_rescaling = np.zeros((self.num_obs, self.observed_params.shape[1]))
			for obs_index in range(self.num_obs):
				self.tau_rescaling[obs_index] += self.domain_ranges
			self.tau_rescaling = self.tau_rescaling**2
			self.tau = pm.Gamma('tau', self.num_obs**2, 1., shape = (self.num_obs, self.observed_params.shape[1]))
			self.tau = self.tau / self.tau_rescaling
#			self.sd  = pm.Deterministic('sd', 0.05 + 1. / pm.math.sqrt(self.tau))
			self.scale  = pm.Deterministic('scale', 1. / pm.math.sqrt(self.tau))



			print(self.observed_params.shape)
			print(self._floats)
			print(self._integers)
			quit()

			# now that we got all locations and scales we can start getting the distributions


			# floats are easy, as we can take loc and scale as they are
			self.out = pm.Normal('out', self.loc, tau = self.tau, observed = self.observed_params)

			# integers are a bit more tricky and require the following transformation for the beta binomial
			alpha = ((n - mu) / sigma**2 - 1) / (n / mu - (n - mu) / sigma**2)
			beta  = (n / mu - 1) * alpha
			self.alpha = pm.Deterministic('alpha', alpha)
			self.beta  = pm.Deterministic('beta', beta) 
Example 29
Project: gempy   Author: cgre-aachen   File: coKriging.py    License: GNU Lesser General Public License v3.0 4 votes vote down vote up
def fit_cross_cov(self, n_exp=2, n_gauss=2, range_mu=None):
        """
        Fit an analytical covariance to the experimental data.
        Args:
            n_exp (int): number of exponential basic functions
            n_gauss (int): number of gaussian basic functions
            range_mu: prior mean of the range. Default mean of the lags

        Returns:
            pymc.Model: PyMC3 model to be sampled using MCMC
        """
        self.n_exp = n_exp
        self.n_gauss = n_gauss
        n_var = self.n_properties
        df = self.exp_var
        lags = self.lags

        # Prior standard deviation for the error of the regression
        prior_std_reg = df.std(0).max() * 10

        # Prior value for the mean of the ranges
        if not range_mu:
            range_mu = lags.mean()

        # pymc3 Model
        with pm.Model() as model:  # model specifications in PyMC3 are wrapped in a with-statement
            # Define priors
            sigma = pm.HalfCauchy('sigma', beta=prior_std_reg, testval=1., shape=n_var)

            psill = pm.Normal('sill', prior_std_reg, sd=.5 * prior_std_reg, shape=(n_exp + n_gauss))
            range_ = pm.Normal('range', range_mu, sd=range_mu * .3, shape=(n_exp + n_gauss))

            lambda_ = pm.Uniform('weights', 0, 1, shape=(n_var * (n_exp + n_gauss)))

            # Exponential covariance
            exp = pm.Deterministic('exp',
                                   # (lambda_[:n_exp*n_var]*
                                   psill[:n_exp] *
                                   (1. - T.exp(T.dot(-lags.values.reshape((len(lags), 1)),
                                                     (range_[:n_exp].reshape((1, n_exp)) / 3.) ** -1))))

            gauss = pm.Deterministic('gaus',
                                     psill[n_exp:] *
                                     (1. - T.exp(T.dot(-lags.values.reshape((len(lags), 1)) ** 2,
                                                       (range_[n_exp:].reshape((1, n_gauss)) * 4 / 7.) ** -2))))

            # We stack the basic functions in the same matrix and tile it to match the number of properties we have
            func = pm.Deterministic('func', T.tile(T.horizontal_stack(exp, gauss), (n_var, 1, 1)))

            # We weight each basic function and sum them
            func_w = pm.Deterministic("func_w", T.sum(func * lambda_.reshape((n_var, 1, (n_exp + n_gauss))), axis=2))

            for e, cross in enumerate(df.columns):
                # Likelihoods
                pm.Normal(cross + "_like", mu=func_w[e], sd=sigma[e], observed=df[cross].values)
        return model 
Example 30
Project: gempy   Author: cgre-aachen   File: coKriging.py    License: GNU Lesser General Public License v3.0 4 votes vote down vote up
def fit_cross_cov(df, lags, n_exp=2, n_gaus=2, range_mu=None):
    n_var = df.columns.shape[0]
    n_basis_f = n_var * (n_exp + n_gaus)
    prior_std_reg = df.std(0).max() * 10
    #
    if not range_mu:
        range_mu = lags.mean()

    # Because is a experimental variogram I am not going to have outliers
    nugget_max = df.values.max()
    # print(n_basis_f, n_var*n_exp, nugget_max, range_mu, prior_std_reg)
    # pymc3 Model
    with pm.Model() as model:  # model specifications in PyMC3 are wrapped in a with-statement
        # Define priors
        sigma = pm.HalfCauchy('sigma', beta=prior_std_reg, testval=1., shape=n_var)

        psill = pm.Normal('sill', prior_std_reg, sd=.5 * prior_std_reg, shape=(n_exp + n_gaus))
        range_ = pm.Normal('range', range_mu, sd=range_mu * .3, shape=(n_exp + n_gaus))
        #  nugget = pm.Uniform('nugget', 0, nugget_max, shape=n_var)

        lambda_ = pm.Uniform('weights', 0, 1, shape=(n_var * (n_exp + n_gaus)))

        # Exponential covariance
        exp = pm.Deterministic('exp',
                               # (lambda_[:n_exp*n_var]*
                               psill[:n_exp] *
                               (1. - T.exp(T.dot(-lags.values.reshape((len(lags), 1)),
                                                 (range_[:n_exp].reshape((1, n_exp)) / 3.) ** -1))))

        gaus = pm.Deterministic('gaus',
                                psill[n_exp:] *
                                (1. - T.exp(T.dot(-lags.values.reshape((len(lags), 1)) ** 2,
                                                  (range_[n_exp:].reshape((1, n_gaus)) * 4 / 7.) ** -2))))

        func = pm.Deterministic('func', T.tile(T.horizontal_stack(exp, gaus), (n_var, 1, 1)))

        func_w = pm.Deterministic("func_w", T.sum(func * lambda_.reshape((n_var, 1, (n_exp + n_gaus))), axis=2))
        #           nugget.reshape((n_var,1)))

        for e, cross in enumerate(df.columns):
            # Likelihoods
            pm.Normal(cross + "_like", mu=func_w[e], sd=sigma[e], observed=df[cross].values)
    return model