Python scipy.special.expit() Examples

The following are 30 code examples of scipy.special.expit(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.special , or try the search function .
Example #1
Source File: discriminant_analysis.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def predict_proba(self, X):
        """Estimate probability.

        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            Input data.

        Returns
        -------
        C : array, shape (n_samples, n_classes)
            Estimated probabilities.
        """
        check_is_fitted(self, 'classes_')

        decision = self.decision_function(X)
        if self.classes_.size == 2:
            proba = expit(decision)
            return np.vstack([1-proba, proba]).T
        else:
            return softmax(decision) 
Example #2
Source File: run_supervised_tm.py    From causal-text-embeddings with MIT License 6 votes vote down vote up
def predict(model, docs, dtype='real'):
	normalized = docs/docs.sum(axis=-1)[:,np.newaxis]
	normalized_bow = torch.tensor(normalized, dtype=torch.float)
	num_documents = docs.shape[0]

	treatment_ones = torch.ones(num_documents) 
	treatment_zeros = torch.zeros(num_documents) 

	model.eval()
	with torch.no_grad():
		doc_representation,_ = model.get_theta(normalized_bow)
		propensity_score = model.predict_treatment(doc_representation).squeeze().detach().numpy()
		propensity_score = expit(propensity_score)
		expected_outcome_treat = model.predict_outcome_st_treat(doc_representation, treatment_ones).squeeze().detach().numpy()
		expected_outcome_no_treat = model.predict_outcome_st_no_treat(doc_representation, treatment_zeros).squeeze().detach().numpy()

		if dtype == 'binary':
			expected_outcome_treat = expit(expected_outcome_treat)
			expected_outcome_no_treat = expit(expected_outcome_no_treat)
		
		return propensity_score, expected_outcome_treat, expected_outcome_no_treat 
Example #3
Source File: pyglmnet.py    From pyglmnet with MIT License 6 votes vote down vote up
def _mu(distr, z, eta, fit_intercept):
    """The non-linearity (inverse link)."""
    if distr in ['softplus', 'gamma']:
        mu = np.log1p(np.exp(z))
    elif distr == 'poisson':
        mu = z.copy()
        beta0 = (1 - eta) * np.exp(eta) if fit_intercept else 0.
        mu[z > eta] = z[z > eta] * np.exp(eta) + beta0
        mu[z <= eta] = np.exp(z[z <= eta])
    elif distr == 'gaussian':
        mu = z
    elif distr == 'binomial':
        mu = expit(z)
    elif distr == 'probit':
        mu = norm.cdf(z)
    return mu 
Example #4
Source File: array_utils.py    From baal with Apache License 2.0 6 votes vote down vote up
def to_prob(probabilities: np.ndarray):
    """
    If the probabilities array is not a distrubution will softmax it.

    Args:
        probabilities (array): [batch_size, num_classes, ...]

    Returns:
        Same as probabilities.
    """
    not_bounded = np.min(probabilities) < 0 or np.max(probabilities) > 1.0
    multiclass = probabilities.shape[1] > 1
    sum_to_one = np.allclose(probabilities.sum(1), 1)
    if not_bounded or (multiclass and not sum_to_one):
        if multiclass:
            probabilities = softmax(probabilities, 1)
        else:
            probabilities = expit(probabilities)
    return probabilities 
Example #5
Source File: shared_cnn.py    From eval-nas with MIT License 6 votes vote down vote up
def update_dag_logits(self, gradient_dicts, weight_decay, max_grad=0.1):
        """
        Updates the probabilities of each path being selected using the given gradients.
        """
        dag_probs = tuple(expit(logit) for logit in self.dags_logits)
        current_average_dag_probs = tuple(np.mean(prob) for prob in dag_probs)

        for i, key in enumerate(self.all_connections):
            for grad_dict, current_average_dag_prob, dag_logits in zip(gradient_dicts, current_average_dag_probs,
                                                                       self.dags_logits):
                if key in grad_dict:
                    grad = grad_dict[key] - weight_decay * (
                            current_average_dag_prob - self.target_ave_prob)  # *expit(dag_logits[i])
                    deriv = sigmoid_derivitive(dag_logits[i])
                    logit_grad = grad * deriv
                    dag_logits[i] += np.clip(logit_grad, -max_grad, max_grad) 
Example #6
Source File: test_logistic_regression.py    From dl4nlp with MIT License 6 votes vote down vote up
def assertLogisticRegression(self, sampler):
        data_size = 3
        input_size = 5
        inputs = np.random.uniform(-10.0, 10.0, size=(data_size, input_size))
        outputs = np.random.randint(0, 2, size=data_size)
        initial_parameters = np.random.normal(scale=1e-5, size=input_size)

        # Create cost and gradient function for gradient descent and check its gradient
        cost_gradient = bind_cost_gradient(logistic_regression_cost_gradient,
                                           inputs, outputs, sampler=sampler)
        result = gradient_check(cost_gradient, initial_parameters)
        self.assertEqual([], result)

        # Train logistic regression and see if it predicts correct labels
        final_parameters, cost_history = gradient_descent(cost_gradient, initial_parameters, 100)
        predictions = expit(np.dot(inputs, final_parameters)) > 0.5

        # Binary classification of 3 data points with 5 dimension is always linearly separable
        for output, prediction in zip(outputs, predictions):
            self.assertEqual(output, prediction) 
Example #7
Source File: att.py    From causal-text-embeddings with MIT License 6 votes vote down vote up
def _perturbed_model(q_t0, q_t1, g, t, q, eps):
    # helper function for psi_tmle

    h1 = t / q - ((1 - t) * g) / (q * (1 - g))
    full_q = (1.0 - t) * q_t0 + t * q_t1
    perturbed_q = full_q - eps * h1

    def q1(t_cf, epsilon):
        h_cf = t_cf * (1.0 / g) - (1.0 - t_cf) / (1.0 - g)
        full_q = (1.0 - t_cf) * q_t0 + t_cf * q_t1  # predictions from unperturbed model
        return full_q - epsilon * h_cf

    psi_init = np.mean(t * (q1(np.ones_like(t), eps) - q1(np.zeros_like(t), eps))) / q
    h2 = (q_t1 - q_t0 - psi_init) / q
    perturbed_g = expit(logit(g) - eps * h2)

    return perturbed_q, perturbed_g 
Example #8
Source File: logistic_regression.py    From dl4nlp with MIT License 6 votes vote down vote up
def logistic_regression_cost_gradient(parameters, input, output):
    """
    Cost and gradient for logistic regression
    :param parameters: weight vector
    :param input: feature vector
    :param output: binary label (0 or 1)
    :return: cost and gradient for the input and output
    """
    prediction = expit(np.dot(input, parameters))
    if output:
        inside_log = prediction
    else:
        inside_log = 1.0 - prediction

    if inside_log != 0.0:
        cost = -np.log(inside_log)
    else:
        cost = np.finfo(float).min

    gradient = (prediction - output) * input
    return cost, gradient 
Example #9
Source File: basis_functions.py    From revrand with Apache License 2.0 6 votes vote down vote up
def transform(self, X, lenscale=None):
        r"""
        Apply the sigmoid basis function to X.

        Parameters
        ----------
        X: ndarray
            (N, d) array of observations where N is the number of samples, and
            d is the dimensionality of X.
        lenscale: float
            the length scale (scalar) of the RBFs to apply to X. If not input,
            this uses the value of the initial length scale.

        Returns
        -------
        ndarray:
            of shape (N, D) where D is number of centres.
        """
        N, d = X.shape
        lenscale = self._check_dim(d, lenscale)

        return expit(cdist(X / lenscale, self.C / lenscale, 'euclidean')) 
Example #10
Source File: likelihoods.py    From revrand with Apache License 2.0 6 votes vote down vote up
def df(self, y, f):
        r"""
        Derivative of Poisson log likelihood w.r.t.\  f.

        Parameters
        ----------
        y: ndarray
            array of 0, 1 valued integers of targets
        f: ndarray
            latent function from the GLM prior (:math:`\mathbf{f} =
            \boldsymbol\Phi \mathbf{w}`)

        Returns
        -------
        df: ndarray
            the derivative :math:`\partial \log p(y|f) / \partial f`
        """
        y, f = np.broadcast_arrays(y, f)
        if self.tranfcn == 'exp':
            return y - np.exp(f)
        else:
            return expit(f) * (y / safesoftplus(f) - 1) 
Example #11
Source File: likelihoods.py    From revrand with Apache License 2.0 6 votes vote down vote up
def cdf(self, y, f, n):
        r"""
        Cumulative density function of the likelihood.

        Parameters
        ----------
        y: ndarray
            query quantiles, i.e.\  :math:`P(Y \leq y)`.
        f: ndarray
            latent function from the GLM prior (:math:`\mathbf{f} =
            \boldsymbol\Phi \mathbf{w}`)
        n: ndarray
            the total number of observations

        Returns
        -------
        cdf: ndarray
            Cumulative density function evaluated at y.
        """
        return binom.cdf(y, n=n, p=expit(f)) 
Example #12
Source File: likelihoods.py    From revrand with Apache License 2.0 6 votes vote down vote up
def Ey(self, f, n):
        r"""
        Expected value of the Binomial likelihood.

        Parameters
        ----------
        f: ndarray
            latent function from the GLM prior (:math:`\mathbf{f} =
            \boldsymbol\Phi \mathbf{w}`)
        n: ndarray
            the total number of observations

        Returns
        -------
        Ey: ndarray
            expected value of y, :math:`\mathbb{E}[\mathbf{y}|\mathbf{f}]`.
        """
        return expit(f) * n 
Example #13
Source File: likelihoods.py    From revrand with Apache License 2.0 6 votes vote down vote up
def loglike(self, y, f, n):
        r"""
        Binomial log likelihood.

        Parameters
        ----------
        y: ndarray
            array of 0, 1 valued integers of targets
        f: ndarray
            latent function from the GLM prior (:math:`\mathbf{f} =
            \boldsymbol\Phi \mathbf{w}`)
        n: ndarray
            the total number of observations

        Returns
        -------
        logp: ndarray
            the log likelihood of each y given each f under this
            likelihood.
        """
        ll = binom.logpmf(y, n=n, p=expit(f))
        return ll 
Example #14
Source File: likelihoods.py    From revrand with Apache License 2.0 6 votes vote down vote up
def df(self, y, f):
        r"""
        Derivative of Bernoulli log likelihood w.r.t.\  f.

        Parameters
        ----------
        y: ndarray
            array of 0, 1 valued integers of targets
        f: ndarray
            latent function from the GLM prior (:math:`\mathbf{f} =
            \boldsymbol\Phi \mathbf{w}`)

        Returns
        -------
        df: ndarray
            the derivative :math:`\partial \log p(y|f) / \partial f`
        """
        y, f = np.broadcast_arrays(y, f)
        return y - expit(f) 
Example #15
Source File: likelihoods.py    From revrand with Apache License 2.0 6 votes vote down vote up
def Ey(self, f):
        r"""
        Expected value of the Bernoulli likelihood.

        Parameters
        ----------
        f: ndarray
            latent function from the GLM prior (:math:`\mathbf{f} =
            \boldsymbol\Phi \mathbf{w}`)

        Returns
        -------
        Ey: ndarray
            expected value of y, :math:`\mathbb{E}[\mathbf{y}|\mathbf{f}]`.
        """
        return expit(f) 
Example #16
Source File: likelihoods.py    From revrand with Apache License 2.0 6 votes vote down vote up
def cdf(self, y, f):
        r"""
        Cumulative density function of the likelihood.

        Parameters
        ----------
        y: ndarray
            query quantiles, i.e.\  :math:`P(Y \leq y)`.
        f: ndarray
            latent function from the GLM prior (:math:`\mathbf{f} =
            \boldsymbol\Phi \mathbf{w}`)

        Returns
        -------
        cdf: ndarray
            Cumulative density function evaluated at y.
        """
        return bernoulli.cdf(y, expit(f)) 
Example #17
Source File: test_gradient_boosting.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_probability_exponential():
    # Predict probabilities.
    clf = GradientBoostingClassifier(loss='exponential',
                                     n_estimators=100, random_state=1)

    assert_raises(ValueError, clf.predict_proba, T)

    clf.fit(X, y)
    assert_array_equal(clf.predict(T), true_result)

    # check if probabilities are in [0, 1].
    y_proba = clf.predict_proba(T)
    assert np.all(y_proba >= 0.0)
    assert np.all(y_proba <= 1.0)
    score = clf.decision_function(T).ravel()
    assert_array_almost_equal(y_proba[:, 1], expit(2 * score))

    # derive predictions from probabilities
    y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
    assert_array_equal(y_pred, true_result) 
Example #18
Source File: utils.py    From keras-yolo3 with MIT License 5 votes vote down vote up
def _sigmoid(x):
    return expit(x) 
Example #19
Source File: shared_cnn.py    From eval-nas with MIT License 5 votes vote down vote up
def get_dags_probs(self):
        """Returns the current probability of each path being selected.
        Each index corresponds to the connection in self.all_connections
        """
        return tuple(expit(logits) for logits in self.dags_logits) 
Example #20
Source File: utils.py    From ImageAI with MIT License 5 votes vote down vote up
def _sigmoid(x):
    return expit(x) 
Example #21
Source File: sparse.py    From pyxclib with MIT License 5 votes vote down vote up
def sigmoid(X, copy=False):
    """Sparse sigmoid i.e. zeros are kept intact
    Parameters
    ----------
    X: csr_matrix
        sparse matrix in csr format
    copy: boolean, optional, default=False
        make a copy or not
    """
    if copy:
        X = X.copy()
    X.data = expit(X.data)
    return X 
Example #22
Source File: strategy.py    From ml-five with MIT License 5 votes vote down vote up
def get_output(self, hiddens):
        v = self.output_weights.dot(hiddens)
#         print(self.hidden_weights.shape)
#         print(hiddens.shape)
#         print(v.shape)
        return expit(v)
#         return v 
Example #23
Source File: annealing.py    From macarico with MIT License 5 votes vote down vote up
def __call__(self, T):
        # timv: This doesn't look correct: why so many kappas?
        #return self.lower_bound + self.width * self.kappa / (self.kappa + expit(T / self.kappa))
        return self.lower_bound + self.width * sigmoid(-T / self.kappa)


# timv: UserAnnealing: is only useful to wrap `f` in something which is a
# subtype `Annealing`. it's better to just subclass Annealing.
#class UserAnnealing(Annealing):
#    def __init__(self, f):
#        self.f = f
#    def __call__(self, T):
#        return self.f(T) 
Example #24
Source File: mle_neuro.py    From particles with MIT License 5 votes vote down vote up
def PY(self, t, xp, x):
        return dists.Binomial(n=self.M, p=expit(x)) 
Example #25
Source File: strategy.py    From ml-five with MIT License 5 votes vote down vote up
def get_hidden_values(self, inputs):
        v = self.hidden_weights.dot(inputs)
#         print(self.hidden_weights.shape)
#         print(inputs.shape)
#         print(v.shape)
        v = expit(v)
        v[0] = 1.
        return v 
Example #26
Source File: _continuous_distns.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def _cdf(self, x):
        return sc.expit(x) 
Example #27
Source File: gaia.py    From AiGEM_TeamHeidelberg2017 with MIT License 5 votes vote down vote up
def score(self, seq, classes, classes_variance):
        """
        Calculates a score for a sequence using the logists retrieved from the classifier as well as their variance and
            other information on the sequence, like the blosum score. Weights for goal and avoid classes are taken from
            the gaia file, the blosum weight is normalized with the length of the sequence
        Args:
            seq(ndarray): Sequence to be scored in one-hot encoding
            classes(ndarrray): Mean logits for the sequence from the classifier.
            classes_variance(ndarray): Variance between the logits for the sequence from the classifier

        Returns(float): A score for the sequence

        """
        blosumweight = 2/(11*len(self.startseq_chars))

        scores = np.ndarray((self.seqs.shape[0]))
        blosum_scores = np.ndarray((self.seqs.shape[0]))

        for seq in range(scores.shape[0]):
            scores[seq] = 0
            for goal_id in range(len(self.goal_weights)):
                scores[seq] += self.goal_weights[goal_id] * classes[seq, self.goals[goal_id]] \
                               - (expit(classes_variance[seq, self.goals[goal_id]]))
            for avoid_id in range(len(self.avoids_weights)):
                scores[seq] -= self.avoids_weights[avoid_id] * classes[seq, self.avoids[avoid_id]] \
                               + (expit(classes_variance[seq, self.avoids[avoid_id]]))
            norm = np.sum(self.goal_weights)
            if norm == 0:
                norm = 1
            scores[seq] = scores[seq]/norm
            blosum_scores[seq] = blosumweight * self.blosumscore(self.seqs[seq])
        scores -= blosum_scores
        return scores, blosum_scores 
Example #28
Source File: test_logit.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_large(self):
        for dtype in (np.float32, np.float64, np.longdouble):
            for n in (88, 89, 709, 710, 11356, 11357):
                n = np.array(n, dtype=dtype)
                assert_allclose(expit(n), 1.0, atol=1e-20)
                assert_allclose(expit(-n), 0.0, atol=1e-20)
                assert_equal(expit(n).dtype, dtype)
                assert_equal(expit(-n).dtype, dtype) 
Example #29
Source File: _continuous_distns.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def _sf(self, x):
        return sc.expit(-x) 
Example #30
Source File: test_logit.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def check_expit_out(self, dtype, expected):
        a = np.linspace(-4,4,10)
        a = np.array(a, dtype=dtype)
        actual = expit(a)
        assert_almost_equal(actual, expected)
        assert_equal(actual.dtype, np.dtype(dtype))