Python pymc3.Uniform() Examples

The following are 14 code examples of pymc3.Uniform(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module pymc3 , or try the search function .
Example #1
Source File: bayesian.py    From cryptotrader with MIT License 6 votes vote down vote up
def fit(self, X, Y, n_samples=10000, tune_steps=1000, n_jobs=4):
        with pm.Model() as self.model:
            # Priors
            std = pm.Uniform("std", 0, self.sps, testval=X.std())
            beta = pm.StudentT("beta", mu=0, lam=self.sps, nu=self.nu)
            alpha = pm.StudentT("alpha", mu=0, lam=self.sps, nu=self.nu, testval=Y.mean())
            # Deterministic model
            mean = pm.Deterministic("mean", alpha + beta * X)
            # Posterior distribution
            obs = pm.Normal("obs", mu=mean, sd=std, observed=Y)
            ## Run MCMC
            # Find search start value with maximum a posterior estimation
            start = pm.find_MAP()
            # sample posterior distribution for latent variables
            trace = pm.sample(n_samples, njobs=n_jobs, tune=tune_steps, start=start)
            # Recover posterior samples
            self.burned_trace = trace[int(n_samples / 2):] 
Example #2
Source File: linear_regression_pymc3_custom_likelihood.py    From bilby with MIT License 6 votes vote down vote up
def ln_prob(self, sampler=None):
        """
        Change ln_prob method to take in a Sampler and return a PyMC3
        distribution.
        """

        from bilby.core.sampler import Pymc3

        if not isinstance(sampler, Pymc3):
            raise ValueError("Sampler is not a bilby Pymc3 sampler object")

        return pm.Uniform(self.name, lower=self.minimum,
                          upper=self.maximum)


# From hereon, the syntax is exactly equivalent to other bilby examples
# We make a prior 
Example #3
Source File: dist.py    From gelato with MIT License 5 votes vote down vote up
def __init__(self):
        super(FlatSpec, self).__init__(testval=init.Uniform(1)) 
Example #4
Source File: GaussianProcessMCMC.py    From pyGPGO with MIT License 5 votes vote down vote up
def fit(self, X, y):
        """
        Fits a Gaussian Process regressor using MCMC.

        Parameters
        ----------
        X: np.ndarray, shape=(nsamples, nfeatures)
            Training instances to fit the GP.
        y: np.ndarray, shape=(nsamples,)
            Corresponding continuous target values to `X`.

        """
        self.X = X
        self.n = self.X.shape[0]
        self.y = y
        self.model = pm.Model()

        with self.model as model:
            l = pm.Uniform('l', 0, 10)

            log_s2_f = pm.Uniform('log_s2_f', lower=-7, upper=5)
            s2_f = pm.Deterministic('sigmaf', tt.exp(log_s2_f))

            log_s2_n = pm.Uniform('log_s2_n', lower=-7, upper=5)
            s2_n = pm.Deterministic('sigman', tt.exp(log_s2_n))

            f_cov = s2_f * covariance_equivalence[type(self.covfunc).__name__](1, l)
            Sigma = f_cov(self.X) + tt.eye(self.n) * s2_n ** 2
            y_obs = pm.MvNormal('y_obs', mu=np.zeros(self.n), cov=Sigma, observed=self.y)
        with self.model as model:
            if self.step is not None:
                self.trace = pm.sample(self.niter, step=self.step())[self.burnin:]
            else:
                self.trace = pm.sample(self.niter, init=self.init)[self.burnin:] 
Example #5
Source File: tStudentProcessMCMC.py    From pyGPGO with MIT License 5 votes vote down vote up
def fit(self, X, y):
        """
        Fits a Student-t regressor using MCMC.

        Parameters
        ----------
        X: np.ndarray, shape=(nsamples, nfeatures)
            Training instances to fit the GP.
        y: np.ndarray, shape=(nsamples,)
            Corresponding continuous target values to `X`.

        """
        self.X = X
        self.n = self.X.shape[0]
        self.y = y
        self.model = pm.Model()

        with self.model as model:
            l = pm.Uniform('l', 0, 10)

            log_s2_f = pm.Uniform('log_s2_f', lower=-7, upper=5)
            s2_f = pm.Deterministic('sigmaf', tt.exp(log_s2_f))

            log_s2_n = pm.Uniform('log_s2_n', lower=-7, upper=5)
            s2_n = pm.Deterministic('sigman', tt.exp(log_s2_n))

            f_cov = s2_f * covariance_equivalence[type(self.covfunc).__name__](1, l)
            Sigma = f_cov(self.X) + tt.eye(self.n) * s2_n ** 2
            y_obs = pm.MvStudentT('y_obs', nu=self.nu, mu=np.zeros(self.n), Sigma=Sigma, observed=self.y)
        with self.model as model:
            if self.step is not None:
                self.trace = pm.sample(self.niter, step=self.step())[self.burnin:]
            else:
                self.trace = pm.sample(self.niter, init=self.init)[self.burnin:] 
Example #6
Source File: pymc3.py    From pyPESTO with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def sample(
            self, n_samples: int, beta: float = 1.):
        problem = self.problem
        log_post_fun = TheanoLogProbability(problem, beta)
        trace = self.trace

        x0 = None
        if self.x0 is not None:
            x0 = {x_name: val
                  for x_name, val in zip(self.problem.x_names, self.x0)}

        # create model context
        with pm.Model() as model:
            # uniform bounds
            k = [pm.Uniform(x_name, lower=lb, upper=ub)
                 for x_name, lb, ub in
                 zip(problem.get_reduced_vector(problem.x_names),
                     problem.lb, problem.ub)]

            # convert to tensor vector
            theta = tt.as_tensor_variable(k)

            # use a DensityDist for the log-posterior
            pm.DensityDist('log_post', logp=lambda v: log_post_fun(v),
                           observed={'v': theta})

            # step, by default automatically determined by pymc3
            step = None
            if self.step_function:
                step = self.step_function()

            # perform the actual sampling
            trace = pm.sample(
                draws=int(n_samples), trace=trace, start=x0, step=step,
                **self.options)

            # convert trace to inference data object
            data = az.from_pymc3(trace=trace, model=model)

        self.trace = trace
        self.data = data 
Example #7
Source File: linear_regression_pymc3_custom_likelihood.py    From bilby with MIT License 5 votes vote down vote up
def __init__(self, minimum, maximum, name=None, latex_label=None):
        """
        Uniform prior with bounds (should be equivalent to bilby.prior.Uniform)
        """

        bilby.core.prior.Prior.__init__(self, name, latex_label,
                                        minimum=minimum,
                                        maximum=maximum) 
Example #8
Source File: model_selector.py    From cs-ranking with Apache License 2.0 5 votes vote down vote up
def __init__(
        self,
        learner_cls,
        parameter_keys,
        model_params,
        fit_params,
        model_path,
        **kwargs,
    ):
        self.priors = [
            [pm.Normal, {"mu": 0, "sd": 10}],
            [pm.Laplace, {"mu": 0, "b": 10}],
        ]
        self.uniform_prior = [pm.Uniform, {"lower": -20, "upper": 20}]
        self.prior_indices = np.arange(len(self.priors))
        self.parameter_f = [
            (pm.Normal, {"mu": 0, "sd": 5}),
            (pm.Cauchy, {"alpha": 0, "beta": 1}),
            0,
            -5,
            5,
        ]
        self.parameter_s = [
            (pm.HalfCauchy, {"beta": 1}),
            (pm.HalfNormal, {"sd": 0.5}),
            (pm.Exponential, {"lam": 0.5}),
            (pm.Uniform, {"lower": 1, "upper": 10}),
            10,
        ]
        # ,(pm.HalfCauchy, {'beta': 2}), (pm.HalfNormal, {'sd': 1}),(pm.Exponential, {'lam': 1.0})]
        self.learner_cls = learner_cls
        self.model_params = model_params
        self.fit_params = fit_params
        self.parameter_keys = parameter_keys
        self.parameters = list(product(self.parameter_f, self.parameter_s))
        pf_arange = np.arange(len(self.parameter_f))
        ps_arange = np.arange(len(self.parameter_s))
        self.parameter_ind = list(product(pf_arange, ps_arange))
        self.model_path = model_path
        self.models = dict()
        self.logger = logging.getLogger(ModelSelector.__name__) 
Example #9
Source File: paired_combinatorial_logit.py    From cs-ranking with Apache License 2.0 5 votes vote down vote up
def construct_model(self, X, Y):
        """
            Constructs the nested logit model by applying priors on weight vectors **weights** as per :meth:`model_configuration`.
            Then we apply a uniform prior to the :math:`\\lambda s`, i.e. :math:`\\lambda s \\sim Uniform(\\text{alpha}, 1.0)`.
            The probability of choosing the object :math:`x_i` from the query set :math:`Q = \\{x_1, \\ldots ,x_n\\}` is
            evaluated in :meth:`get_probabilities`.

            Parameters
            ----------
            X : numpy array
                (n_instances, n_objects, n_features)
                Feature vectors of the objects
            Y : numpy array
                (n_instances, n_objects)
                Preferences in the form of discrete choices for given objects

            Returns
            -------
             model : pymc3 Model :class:`pm.Model`
        """
        self.loss_function_ = likelihood_dict.get(self.loss_function, None)
        with pm.Model() as self.model:
            self.Xt = theano.shared(X)
            self.Yt = theano.shared(Y)
            shapes = {"weights": self.n_object_features_fit_}
            weights_dict = create_weight_dictionary(self.model_configuration, shapes)
            lambda_k = pm.Uniform("lambda_k", self.alpha, 1.0, shape=self.n_nests)
            utility = tt.dot(self.Xt, weights_dict["weights"])
            self.p = self.get_probabilities(utility, lambda_k)
            LogLikelihood(
                "yl", loss_func=self.loss_function_, p=self.p, observed=self.Yt
            )
        self.logger.info("Model construction completed") 
Example #10
Source File: coKriging.py    From gempy with GNU Lesser General Public License v3.0 4 votes vote down vote up
def fit_cross_cov(self, n_exp=2, n_gauss=2, range_mu=None):
        """
        Fit an analytical covariance to the experimental data.
        Args:
            n_exp (int): number of exponential basic functions
            n_gauss (int): number of gaussian basic functions
            range_mu: prior mean of the range. Default mean of the lags

        Returns:
            pymc.Model: PyMC3 model to be sampled using MCMC
        """
        self.n_exp = n_exp
        self.n_gauss = n_gauss
        n_var = self.n_properties
        df = self.exp_var
        lags = self.lags

        # Prior standard deviation for the error of the regression
        prior_std_reg = df.std(0).max() * 10

        # Prior value for the mean of the ranges
        if not range_mu:
            range_mu = lags.mean()

        # pymc3 Model
        with pm.Model() as model:  # model specifications in PyMC3 are wrapped in a with-statement
            # Define priors
            sigma = pm.HalfCauchy('sigma', beta=prior_std_reg, testval=1., shape=n_var)

            psill = pm.Normal('sill', prior_std_reg, sd=.5 * prior_std_reg, shape=(n_exp + n_gauss))
            range_ = pm.Normal('range', range_mu, sd=range_mu * .3, shape=(n_exp + n_gauss))

            lambda_ = pm.Uniform('weights', 0, 1, shape=(n_var * (n_exp + n_gauss)))

            # Exponential covariance
            exp = pm.Deterministic('exp',
                                   # (lambda_[:n_exp*n_var]*
                                   psill[:n_exp] *
                                   (1. - T.exp(T.dot(-lags.values.reshape((len(lags), 1)),
                                                     (range_[:n_exp].reshape((1, n_exp)) / 3.) ** -1))))

            gauss = pm.Deterministic('gaus',
                                     psill[n_exp:] *
                                     (1. - T.exp(T.dot(-lags.values.reshape((len(lags), 1)) ** 2,
                                                       (range_[n_exp:].reshape((1, n_gauss)) * 4 / 7.) ** -2))))

            # We stack the basic functions in the same matrix and tile it to match the number of properties we have
            func = pm.Deterministic('func', T.tile(T.horizontal_stack(exp, gauss), (n_var, 1, 1)))

            # We weight each basic function and sum them
            func_w = pm.Deterministic("func_w", T.sum(func * lambda_.reshape((n_var, 1, (n_exp + n_gauss))), axis=2))

            for e, cross in enumerate(df.columns):
                # Likelihoods
                pm.Normal(cross + "_like", mu=func_w[e], sd=sigma[e], observed=df[cross].values)
        return model 
Example #11
Source File: coKriging.py    From gempy with GNU Lesser General Public License v3.0 4 votes vote down vote up
def fit_cross_cov(df, lags, n_exp=2, n_gaus=2, range_mu=None):
    n_var = df.columns.shape[0]
    n_basis_f = n_var * (n_exp + n_gaus)
    prior_std_reg = df.std(0).max() * 10
    #
    if not range_mu:
        range_mu = lags.mean()

    # Because is a experimental variogram I am not going to have outliers
    nugget_max = df.values.max()
    # print(n_basis_f, n_var*n_exp, nugget_max, range_mu, prior_std_reg)
    # pymc3 Model
    with pm.Model() as model:  # model specifications in PyMC3 are wrapped in a with-statement
        # Define priors
        sigma = pm.HalfCauchy('sigma', beta=prior_std_reg, testval=1., shape=n_var)

        psill = pm.Normal('sill', prior_std_reg, sd=.5 * prior_std_reg, shape=(n_exp + n_gaus))
        range_ = pm.Normal('range', range_mu, sd=range_mu * .3, shape=(n_exp + n_gaus))
        #  nugget = pm.Uniform('nugget', 0, nugget_max, shape=n_var)

        lambda_ = pm.Uniform('weights', 0, 1, shape=(n_var * (n_exp + n_gaus)))

        # Exponential covariance
        exp = pm.Deterministic('exp',
                               # (lambda_[:n_exp*n_var]*
                               psill[:n_exp] *
                               (1. - T.exp(T.dot(-lags.values.reshape((len(lags), 1)),
                                                 (range_[:n_exp].reshape((1, n_exp)) / 3.) ** -1))))

        gaus = pm.Deterministic('gaus',
                                psill[n_exp:] *
                                (1. - T.exp(T.dot(-lags.values.reshape((len(lags), 1)) ** 2,
                                                  (range_[n_exp:].reshape((1, n_gaus)) * 4 / 7.) ** -2))))

        func = pm.Deterministic('func', T.tile(T.horizontal_stack(exp, gaus), (n_var, 1, 1)))

        func_w = pm.Deterministic("func_w", T.sum(func * lambda_.reshape((n_var, 1, (n_exp + n_gaus))), axis=2))
        #           nugget.reshape((n_var,1)))

        for e, cross in enumerate(df.columns):
            # Likelihoods
            pm.Normal(cross + "_like", mu=func_w[e], sd=sigma[e], observed=df[cross].values)
    return model 
Example #12
Source File: generalized_nested_logit.py    From cs-ranking with Apache License 2.0 4 votes vote down vote up
def construct_model(self, X, Y):
        """
            Constructs the nested logit model by applying priors on weight vectors **weights** and **weights_k** as per
            :meth:`model_configuration`. Then we apply a uniform prior to the :math:`\\lambda s`, i.e.
            :math:`\\lambda s \\sim Uniform(\\text{alpha}, 1.0)`.The probability of choosing the object :math:`x_i` from the
            query set :math:`Q = \\{x_1, \\ldots ,x_n\\}` is evaluated in :meth:`get_probabilities`.

            Parameters
            ----------
            X : numpy array
                (n_instances, n_objects, n_features)
                Feature vectors of the objects
            Y : numpy array
                (n_instances, n_objects)
                Preferences in the form of discrete choices for given objects

            Returns
            -------
             model : pymc3 Model :class:`pm.Model`
        """
        self.random_state_ = check_random_state(self.random_state)
        self.loss_function_ = likelihood_dict.get(self.loss_function, None)
        if np.prod(X.shape) > self.threshold:
            upper_bound = int(self.threshold / np.prod(X.shape[1:]))
            indices = self.random_state_.choice(X.shape[0], upper_bound, replace=False)
            X = X[indices, :, :]
            Y = Y[indices, :]
        self.logger.info(
            "Train Set instances {} objects {} features {}".format(*X.shape)
        )
        with pm.Model() as self.model:
            self.Xt = theano.shared(X)
            self.Yt = theano.shared(Y)
            shapes = {
                "weights": self.n_object_features_fit_,
                "weights_ik": (self.n_object_features_fit_, self.n_nests),
            }
            weights_dict = create_weight_dictionary(self.model_configuration, shapes)

            alpha_ik = tt.dot(self.Xt, weights_dict["weights_ik"])
            alpha_ik = ttu.softmax(alpha_ik, axis=2)
            utility = tt.dot(self.Xt, weights_dict["weights"])
            lambda_k = pm.Uniform("lambda_k", self.alpha, 1.0, shape=self.n_nests)
            self.p = self.get_probabilities(utility, lambda_k, alpha_ik)
            LogLikelihood(
                "yl", loss_func=self.loss_function_, p=self.p, observed=self.Yt
            )
        self.logger.info("Model construction completed") 
Example #13
Source File: model_selector.py    From cs-ranking with Apache License 2.0 4 votes vote down vote up
def fit(self, X, Y):
        model_args = dict()
        for param_key in self.parameter_keys:
            model_args[param_key] = self.uniform_prior
        self.logger.info("Uniform Prior")
        self.model_params["model_args"] = model_args
        key = "{}_uniform_prior".format(self.parameter_keys)
        self.fit_learner(X, Y, key)
        for j, param in enumerate(self.parameters):
            self.logger.info("mu: {}, sd/b: {}".format(*self.parameter_ind[j]))
            if len(self.parameter_keys) == 2:
                for i1, i2 in product(self.prior_indices, self.prior_indices):
                    prior1 = self.priors[i1]
                    prior2 = self.priors[i2]
                    self.logger.info("Priors {}, {}".format(i1, i2))
                    model_args = dict()
                    k1 = list(prior1[1].keys())
                    k2 = list(prior2[1].keys())
                    prior1[1] = dict(zip(k1, param))
                    prior2[1] = dict(zip(k2, param))
                    model_args[self.parameter_keys[0]] = prior1
                    model_args[self.parameter_keys[1]] = prior2
                    key = "{}_{}_{}_{}_mu_{}_sd_{}".format(
                        self.parameter_keys[0],
                        i1,
                        self.parameter_keys[1],
                        i2,
                        self.parameter_ind[j][0],
                        self.parameter_ind[j][1],
                    )
                    self.model_params["model_args"] = model_args
                    self.fit_learner(X, Y, key)
            else:
                for i, prior in enumerate(self.priors):
                    self.logger.info("Prior {}".format(i))
                    model_args = dict()
                    k1 = list(prior[1].keys())
                    prior[1] = dict(zip(k1, param))
                    model_args[self.parameter_keys[0]] = prior
                    self.model_params["model_args"] = model_args
                    key = "{}_{}_mu_{}_sd_{}".format(
                        self.parameter_keys[0],
                        i,
                        self.parameter_ind[j][0],
                        self.parameter_ind[j][1],
                    )
                    self.fit_learner(X, Y, key) 
Example #14
Source File: nested_logit_model.py    From cs-ranking with Apache License 2.0 4 votes vote down vote up
def construct_model(self, X, Y):
        """
            Constructs the nested logit model by applying priors on weight vectors **weights** and **weights_k** as per
            :meth:`model_configuration`. Then we apply a uniform prior to the :math:`\\lambda s`, i.e.
            :math:`\\lambda s \\sim Uniform(\\text{alpha}, 1.0)`.The probability of choosing the object :math:`x_i` from
            the query set :math:`Q = \\{x_1, \\ldots ,x_n\\}` is evaluated in :meth:`get_probabilities`.

            Parameters
            ----------
            X : numpy array
                (n_instances, n_objects, n_features)
                Feature vectors of the objects
            Y : numpy array
                (n_instances, n_objects)
                Preferences in the form of discrete choices for given objects

            Returns
            -------
             model : pymc3 Model :class:`pm.Model`
        """
        self.loss_function_ = likelihood_dict.get(self.loss_function, None)
        if np.prod(X.shape) > self.threshold:
            upper_bound = int(self.threshold / np.prod(X.shape[1:]))
            indices = self.random_state_.choice(X.shape[0], upper_bound, replace=False)
            X = X[indices, :, :]
            Y = Y[indices, :]
        self.logger.info(
            "Train Set instances {} objects {} features {}".format(*X.shape)
        )
        y_nests = self.create_nests(X)
        with pm.Model() as self.model:
            self.Xt = theano.shared(X)
            self.Yt = theano.shared(Y)
            self.y_nests = theano.shared(y_nests)
            shapes = {
                "weights": self.n_object_features_fit_,
                "weights_k": self.n_object_features_fit_,
            }

            weights_dict = create_weight_dictionary(self.model_configuration, shapes)
            lambda_k = pm.Uniform("lambda_k", self.alpha, 1.0, shape=self.n_nests)
            weights = weights_dict["weights"] / lambda_k[:, None]
            utility = self._eval_utility(weights)
            utility_k = tt.dot(self.features_nests, weights_dict["weights_k"])
            self.p = self.get_probabilities(utility, lambda_k, utility_k)

            LogLikelihood(
                "yl", loss_func=self.loss_function_, p=self.p, observed=self.Yt
            )
        self.logger.info("Model construction completed")