Python autograd.numpy.maximum() Examples

The following are code examples for showing how to use autograd.numpy.maximum(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: momi2   Author: popgenmethods   File: util.py    GNU General Public License v3.0 6 votes vote down vote up
def truncate0(x, axis=None, strict=False, tol=1e-13):
    '''make sure everything in x is non-negative'''
    # the maximum along axis
    maxes = np.maximum(np.amax(x, axis=axis), 1e-300)
    # the negative part of minimum along axis
    mins = np.maximum(-np.amin(x, axis=axis), 0.0)

    # assert the negative numbers are small (relative to maxes)
    assert np.all(mins <= tol * maxes)

    if axis is not None:
        idx = [slice(None)] * x.ndim
        idx[axis] = np.newaxis
        mins = mins[idx]
        maxes = maxes[idx]

    if strict:
        # set everything below the tolerance to 0
        return set0(x, x < tol * maxes)
    else:
        # set everything of same magnitude as most negative number, to 0
        return set0(x, x < 2 * mins) 
Example 2
Project: momi2   Author: popgenmethods   File: likelihood.py    GNU General Public License v3.0 6 votes vote down vote up
def _composite_log_likelihood(data, demo, mut_rate=None, truncate_probs=0.0, vector=False, p_missing=None, use_pairwise_diffs=False, **kwargs):
    try:
        sfs = data.sfs
    except AttributeError:
        sfs = data

    sfs_probs = np.maximum(expected_sfs(demo, sfs.configs, normalized=True, **kwargs),
                           truncate_probs)
    log_lik = sfs._integrate_sfs(np.log(sfs_probs), vector=vector)

    # add on log likelihood of poisson distribution for total number of SNPs
    if mut_rate is not None:
        log_lik = log_lik + \
            _mut_factor(sfs, demo, mut_rate, vector,
                        p_missing, use_pairwise_diffs)

    if not vector:
        log_lik = np.squeeze(log_lik)
    return log_lik 
Example 3
Project: SyntheticStatistics   Author: BlissChapman   File: util.py    MIT License 6 votes vote down vote up
def fit_gaussian_draw(X, J, seed=28, reg=1e-7, eig_pow=1.0):
    """
    Fit a multivariate normal to the data X (n x d) and draw J points 
    from the fit. 
    - reg: regularizer to use with the covariance matrix
    - eig_pow: raise eigenvalues of the covariance matrix to this power to construct 
        a new covariance matrix before drawing samples. Useful to shrink the spread 
        of the variance.
    """
    with NumpySeedContext(seed=seed):
        d = X.shape[1]
        mean_x = np.mean(X, 0)
        cov_x = np.cov(X.T)
        if d==1:
            cov_x = np.array([[cov_x]])
        [evals, evecs] = np.linalg.eig(cov_x)
        evals = np.maximum(0, np.real(evals))
        assert np.all(np.isfinite(evals))
        evecs = np.real(evecs)
        shrunk_cov = evecs.dot(np.diag(evals**eig_pow)).dot(evecs.T) + reg*np.eye(d)
        V = np.random.multivariate_normal(mean_x, shrunk_cov, J)
    return V 
Example 4
Project: disk-embedding   Author: lapras-inc   File: eucl_cones_model.py    Apache License 2.0 6 votes vote down vote up
def is_a_scores_vector_batch(self, K, parent_vectors, other_vectors, rel_reversed):
        norm_parents = np.linalg.norm(parent_vectors, axis=1)
        norms_other = np.linalg.norm(other_vectors, axis=1)
        euclidean_dists = np.maximum(np.linalg.norm(parent_vectors - other_vectors, axis=1), 1e-6) # To avoid the fact that parent can be equal to child for the reconstruction experiment

        if not rel_reversed:
            cos_angles_child = (norms_other**2 - norm_parents**2 - euclidean_dists**2) / (2 * euclidean_dists * norm_parents) # 1 + neg_size
            angles_psi_parent = np.arcsin(K / norm_parents) # scalar
        else:
            cos_angles_child = (norm_parents**2 - norms_other**2 - euclidean_dists**2) / (2 * euclidean_dists * norms_other) # 1 + neg_size
            angles_psi_parent = np.arcsin(K / norms_other) # 1 + neg_size

        assert not np.isnan(cos_angles_child).any()
        clipped_cos_angle_child = np.maximum(cos_angles_child, -1 + EPS)
        clipped_cos_angle_child = np.minimum(clipped_cos_angle_child, 1 - EPS)
        angles_child = np.arccos(clipped_cos_angle_child)  # (1 + neg_size, batch_size)

        return np.maximum(0, angles_child - angles_psi_parent) 
Example 5
Project: hyperbolic_cones   Author: dalab   File: eucl_cones_model.py    Apache License 2.0 6 votes vote down vote up
def is_a_scores_vector_batch(self, K, parent_vectors, other_vectors, rel_reversed):
        norm_parents = np.linalg.norm(parent_vectors, axis=1)
        norms_other = np.linalg.norm(other_vectors, axis=1)
        euclidean_dists = np.maximum(np.linalg.norm(parent_vectors - other_vectors, axis=1), 1e-6) # To avoid the fact that parent can be equal to child for the reconstruction experiment

        if not rel_reversed:
            cos_angles_child = (norms_other**2 - norm_parents**2 - euclidean_dists**2) / (2 * euclidean_dists * norm_parents) # 1 + neg_size
            angles_psi_parent = np.arcsin(K / norm_parents) # scalar
        else:
            cos_angles_child = (norm_parents**2 - norms_other**2 - euclidean_dists**2) / (2 * euclidean_dists * norms_other) # 1 + neg_size
            angles_psi_parent = np.arcsin(K / norms_other) # 1 + neg_size

        assert not np.isnan(cos_angles_child).any()
        clipped_cos_angle_child = np.maximum(cos_angles_child, -1 + EPS)
        clipped_cos_angle_child = np.minimum(clipped_cos_angle_child, 1 - EPS)
        angles_child = np.arccos(clipped_cos_angle_child)  # (1 + neg_size, batch_size)

        return np.maximum(0, angles_child - angles_psi_parent) 
Example 6
Project: kernel-gof   Author: wittawatj   File: util.py    MIT License 6 votes vote down vote up
def fit_gaussian_draw(X, J, seed=28, reg=1e-7, eig_pow=1.0):
    """
    Fit a multivariate normal to the data X (n x d) and draw J points 
    from the fit. 
    - reg: regularizer to use with the covariance matrix
    - eig_pow: raise eigenvalues of the covariance matrix to this power to construct 
        a new covariance matrix before drawing samples. Useful to shrink the spread 
        of the variance.
    """
    with NumpySeedContext(seed=seed):
        d = X.shape[1]
        mean_x = np.mean(X, 0)
        cov_x = np.cov(X.T)
        if d==1:
            cov_x = np.array([[cov_x]])
        [evals, evecs] = np.linalg.eig(cov_x)
        evals = np.maximum(0, np.real(evals))
        assert np.all(np.isfinite(evals))
        evecs = np.real(evecs)
        shrunk_cov = evecs.dot(np.diag(evals**eig_pow)).dot(evecs.T) + reg*np.eye(d)
        V = np.random.multivariate_normal(mean_x, shrunk_cov, J)
    return V 
Example 7
Project: kernel-gof   Author: wittawatj   File: util.py    MIT License 6 votes vote down vote up
def bound_by_data(Z, Data):
    """
    Determine lower and upper bound for each dimension from the Data, and project 
    Z so that all points in Z live in the bounds.

    Z: m x d 
    Data: n x d

    Return a projected Z of size m x d.
    """
    n, d = Z.shape
    Low = np.min(Data, 0)
    Up = np.max(Data, 0)
    LowMat = np.repeat(Low[np.newaxis, :], n, axis=0)
    UpMat = np.repeat(Up[np.newaxis, :], n, axis=0)

    Z = np.maximum(LowMat, Z)
    Z = np.minimum(UpMat, Z)
    return Z 
Example 8
Project: prediction-constrained-topic-models   Author: dtak   File: util_differentiable_transform__2D_rows_sum_to_one.py    MIT License 5 votes vote down vote up
def to_safe_common_arr(topics_KV, min_eps=MIN_EPS):
    ''' Force provided topics_KV array to be numerically safe.

    Returns
    -------
    topics_KV : 2D array, size K x V
        minimum value of each row is min_eps
        each row will sum to 1.0 (+/- min_eps)
    '''
    K, V = topics_KV.shape
    topics_KV = topics_KV.copy()
    for rep in range(2):
        topics_KV /= topics_KV.sum(axis=1)[:,np.newaxis]
        np.maximum(topics_KV, min_eps, out=topics_KV)
    return topics_KV 
Example 9
Project: autograd-forward   Author: BB-UCL   File: test_numpy.py    MIT License 5 votes vote down vote up
def test_maximum(): combo_check(np.maximum, [0, 1],
                               [R(1), R(1,4), R(3, 4)],
                               [R(1), R(1,4), R(3, 4)]) 
Example 10
Project: ReducedVarianceReparamGradients   Author: andymiller   File: nn.py    MIT License 5 votes vote down vote up
def neural_net_predict(params, inputs, parser, num_layers):
    for l in range(num_layers):
        W, b = parser.get(params, 'W_%d'%l), parser.get(params, 'b_%d'%l)
        outputs = np.dot(inputs, W) + b
        inputs  = np.maximum(outputs, 0.)
    return outputs 
Example 11
Project: momi2   Author: popgenmethods   File: sfs.py    GNU General Public License v3.0 5 votes vote down vote up
def avg_pairwise_hets(self):
        # avg number of hets per ind per pop (assuming Hardy-Weinberg)
        n_nonmissing = np.sum(self.configs.value, axis=2)
        # for denominator, assume 1 allele is drawn from whole sample, and 1
        # allele is drawn only from nomissing alleles
        denoms = np.maximum(n_nonmissing * (self.sampled_n - 1), 1.0)
        p_het = 2 * self.configs.value[:, :, 0] * \
            self.configs.value[:, :, 1] / denoms

        return self.freqs_matrix.T.dot(p_het) 
Example 12
Project: momi2   Author: popgenmethods   File: optimizers.py    GNU General Public License v3.0 5 votes vote down vote up
def sgd(fun, x0, fun_and_jac, pieces, stepsize, num_iters, bounds=None, callback=None, iter_per_output=10, rgen=np.random):
    x0 = np.array(x0)

    if callback is None:
        callback = lambda *a, **kw: None

    if bounds is None:
        bounds = [(None, None) for _ in x0]
    lower, upper = zip(*bounds)
    lower = [-float('inf') if l is None else l
             for l in lower]
    upper = [float('inf') if u is None else u
             for u in upper]

    def truncate(x):
        return np.maximum(np.minimum(x, upper), lower)

    x = x0
    for nit in range(num_iters):
        i = rgen.randint(pieces)
        f_x, g_x = fun_and_jac(x, i)
        x = truncate(x - stepsize * g_x)
        if nit % iter_per_output == 0:
            callback(x, f_x, nit)

    return scipy.optimize.OptimizeResult({'x': x, 'fun': f_x, 'jac': g_x}) 
Example 13
Project: disk-embedding   Author: lapras-inc   File: eucl_cones_model.py    Apache License 2.0 5 votes vote down vote up
def _compute_loss(self):
        """Compute and store loss value for the given batch of examples."""
        if self._loss_computed:
            return
        self._loss_computed = True

        self.euclidean_dists = np.linalg.norm(self.vectors_u - self.vectors_v, axis=1)  # (1 + neg_size, batch_size)
        euclidean_dists_sq = self.euclidean_dists ** 2

        if not self.rels_reversed:
            # (1 + neg_size, batch_size)
            child_numerator = self.norms_v_sq - self.norms_u_sq - euclidean_dists_sq
            self.child_numitor = 2 * self.euclidean_dists * self.norms_u
            self.angles_psi_parent = np.arcsin(self.K / self.norms_u) # (1, batch_size)

        else:
            # (1 + neg_size, batch_size)
            child_numerator = self.norms_u_sq - self.norms_v_sq - euclidean_dists_sq
            self.child_numitor = 2 * self.euclidean_dists * self.norms_v
            self.angles_psi_parent = np.arcsin(self.K / self.norms_v) # (1 + neg_size, batch_size)

        self.cos_angles_child = child_numerator / self.child_numitor
        # To avoid numerical errors
        self.clipped_cos_angle_child = np.maximum(self.cos_angles_child, -1 + EPS)
        self.clipped_cos_angle_child = np.minimum(self.clipped_cos_angle_child, 1 - EPS)
        self.angles_child = np.arccos(self.clipped_cos_angle_child)  # (1 + neg_size, batch_size)

        self.angle_diff = self.angles_child - self.angles_psi_parent
        self.energy_vec = np.maximum(0, self.angle_diff) # (1 + neg_size, batch_size)
        self.pos_loss = self.energy_vec[0].sum()
        self.neg_loss = np.maximum(0, self.margin - self.energy_vec[1:]).sum()
        self.loss = self.pos_loss + self.neg_loss 
Example 14
Project: disk-embedding   Author: lapras-inc   File: hyp_cones_model.py    Apache License 2.0 5 votes vote down vote up
def _compute_loss(self):
        """Compute and store loss value for the given batch of examples."""
        if self._loss_computed:
            return
        self._loss_computed = True

        self.euclidean_dists = np.linalg.norm(self.vectors_u - self.vectors_v, axis=1)  # (1 + neg_size, batch_size)
        self.dot_prods = (self.vectors_u * self.vectors_v).sum(axis=1) # (1 + neg, batch_size)

        self.g = 1 + self.norms_v_sq * self.norms_u_sq - 2 * self.dot_prods
        self.g_sqrt = np.sqrt(self.g)

        self.euclidean_times_sqrt_g = self.euclidean_dists * self.g_sqrt

        if not self.rels_reversed:
            # u is x , v is y
            # (1 + neg_size, batch_size)
            child_numerator = self.dot_prods * (1 + self.norms_u_sq) - self.norms_u_sq * (1 + self.norms_v_sq)
            self.child_numitor = self.euclidean_times_sqrt_g * self.norms_u
            self.angles_psi_parent = np.arcsin(self.K * self.one_minus_norms_sq_u / self.norms_u) # (1, batch_size)

        else:
            # v is x , u is y
            # (1 + neg_size, batch_size)
            child_numerator = self.dot_prods * (1 + self.norms_v_sq) - self.norms_v_sq * (1 + self.norms_u_sq)
            self.child_numitor = self.euclidean_times_sqrt_g * self.norms_v
            self.angles_psi_parent = np.arcsin(self.K * self.one_minus_norms_sq_v / self.norms_v) # (1, batch_size)

        self.cos_angles_child = child_numerator / self.child_numitor
        # To avoid numerical errors
        self.clipped_cos_angle_child = np.maximum(self.cos_angles_child, -1 + EPS)
        self.clipped_cos_angle_child = np.minimum(self.clipped_cos_angle_child, 1 - EPS)
        self.angles_child = np.arccos(self.clipped_cos_angle_child)  # (1 + neg_size, batch_size)

        self.angle_diff = self.angles_child - self.angles_psi_parent
        self.energy_vec = np.maximum(0, self.angle_diff) # (1 + neg_size, batch_size)
        self.pos_loss = self.energy_vec[0].sum()
        self.neg_loss = np.maximum(0, self.margin - self.energy_vec[1:]).sum()
        self.loss = self.pos_loss + self.neg_loss 
Example 15
Project: disk-embedding   Author: lapras-inc   File: spherical_disk_model.py    Apache License 2.0 5 votes vote down vote up
def _compute_loss(self):
        """Compute and store loss value for the given batch of examples."""
        if self._loss_computed:
            return
        self._loss_computed = True

        self.vectors_u # (1, dim, batch_size)
        self.vectors_v # (1 + neg, dim, batch_size)

        # D
        self.distance_between_centers = np.linalg.norm(self.vectors_u[:,:1,:] - self.vectors_v[:,1:,:], axis=1) # (1 + neg_size, batch_size)

        if not self.rels_reversed:
            # u is x , v is y
            self.radius_child = self.vectors_u[:,0,:] # (1 + neg_size, batch_size)
            self.radius_parent = self.vectors_v[:,0,:] # (1, batch_size)

        else:
            # v is x , u is y
            self.radius_parent = self.vectors_u[:,0,:] # (1 + neg_size, batch_size)
            self.radius_child = self.vectors_v[:,0,:] # (1, batch_size)

        self.kuikomi = self.radius_child - self.radius_parent + self.distance_between_centers
        self.energy_vec = np.maximum(0, self.kuikomi) # (1 + neg_size, batch_size)

        self.pos_loss = self.energy_vec[0].sum()
        self.neg_loss = np.maximum(0, self.margin - self.energy_vec[1:]).sum()
        self.loss = self.pos_loss + self.neg_loss 
Example 16
Project: disk-embedding   Author: lapras-inc   File: spherical_disk_model.py    Apache License 2.0 5 votes vote down vote up
def __init__(self):
        super(SphericalDisk, self).__init__()

    # def is_a_scores_vector_batch(self, K, parent_vectors, other_vectors, rel_reversed):
        # norm_parent = np.linalg.norm(parent_vectors, axis=1)
        # norm_parent_sq = norm_parent ** 2
        # norms_other = np.linalg.norm(other_vectors, axis=1)
        # norms_other_sq = norms_other ** 2
        # euclidean_dists = np.maximum(np.linalg.norm(parent_vectors - other_vectors, axis=1), 1e-6) # To avoid the fact that parent can be equal to child for the reconstruction experiment
        # dot_prods = (parent_vectors * other_vectors).sum(axis=1)
        # g = 1 + norm_parent_sq * norms_other_sq - 2 * dot_prods
        # g_sqrt = np.sqrt(g)

        # if not rel_reversed:
            # # parent = x , other = y
            # child_numerator = dot_prods * (1 + norm_parent_sq) - norm_parent_sq * (1 + norms_other_sq)
            # child_numitor = euclidean_dists * norm_parent * g_sqrt
            # angles_psi_parent = np.arcsin(K * (1 - norm_parent_sq) / norm_parent)
        # else:
            # # parent = y , other = x
            # child_numerator = dot_prods * (1 + norms_other_sq) - norms_other_sq * (1 + norm_parent_sq)
            # child_numitor = euclidean_dists * norms_other * g_sqrt
            # angles_psi_parent = np.arcsin(K * (1 - norms_other_sq) / norms_other)

        # cos_angles_child = child_numerator / child_numitor
        # assert not np.isnan(cos_angles_child).any()
        # clipped_cos_angle_child = np.maximum(cos_angles_child, -1 + EPS)
        # clipped_cos_angle_child = np.minimum(clipped_cos_angle_child, 1 - EPS)
        # angles_child = np.arccos(clipped_cos_angle_child)  # (1 + neg_size, batch_size)

        # # return angles_child # np.maximum(1, angles_child / angles_psi_parent)
        # return np.maximum(0, angles_child - angles_psi_parent) 
Example 17
Project: disk-embedding   Author: lapras-inc   File: order_emb_model.py    Apache License 2.0 5 votes vote down vote up
def _loss_fn(self, matrix, rels_reversed):
        """Given a numpy array with vectors for u, v and negative samples, computes loss value.

        Parameters
        ----------
        matrix : numpy.array
            Array containing vectors for u, v and negative samples, of shape (2 + negative_size, dim).
        rels_reversed : bool

        Returns
        -------
        float
            Computed loss value.

        Warnings
        --------
        Only used for autograd gradients, since autograd requires a specific function signature.
        """
        vector_u = matrix[0]
        vectors_v = matrix[1:]
        if not rels_reversed:
            entailment_penalty = grad_np.maximum(0, vector_u - vectors_v) # (1 + negative_size, dim).
        else:
            entailment_penalty = grad_np.maximum(0, - vector_u + vectors_v) # (1 + negative_size, dim).

        energy_vec = grad_np.linalg.norm(entailment_penalty, axis=1) ** 2
        positive_term = energy_vec[0]
        negative_terms = energy_vec[1:]
        return positive_term + grad_np.maximum(0, self.margin - negative_terms).sum() 
Example 18
Project: disk-embedding   Author: lapras-inc   File: order_emb_model.py    Apache License 2.0 5 votes vote down vote up
def _compute_loss(self):
        """Compute and store loss value for the given batch of examples."""
        if self._loss_computed:
            return
        self._loss_computed = True

        if not self.rels_reversed:
            self.entailment_penalty = np.maximum(0, self.vectors_u - self.vectors_v) # (1 + negative_size, dim, batch_size).
        else:
            self.entailment_penalty = np.maximum(0, - self.vectors_u + self.vectors_v) # (1 + negative_size, dim, batch_size).

        self.energy_vec = np.linalg.norm(self.entailment_penalty, axis=1)**2 # (1 + negative_size, batch_size).
        self.pos_loss = self.energy_vec[0].sum()
        self.neg_loss = np.maximum(0, self.margin - self.energy_vec[1:]).sum()
        self.loss = self.pos_loss + self.neg_loss 
Example 19
Project: disk-embedding   Author: lapras-inc   File: order_emb_model.py    Apache License 2.0 5 votes vote down vote up
def is_a_scores_vector_batch(self, alpha, parent_vectors, other_vectors, rel_reversed):
        if not rel_reversed:
            return np.linalg.norm(np.maximum(0, parent_vectors - other_vectors), axis=1)
        else:
            return np.linalg.norm(np.maximum(0, - parent_vectors + other_vectors), axis=1) 
Example 20
Project: disk-embedding   Author: lapras-inc   File: poincare_model.py    Apache License 2.0 5 votes vote down vote up
def _compute_loss(self):
        """Compute and store loss value for the given batch of examples."""
        if self._loss_computed:
            return
        self._compute_distances()

        if self.loss_type == 'nll':
            # NLL loss from the NIPS paper.
            exp_negative_distances = np.exp(-self.poincare_dists)  # (1 + neg_size, batch_size)
            # Remove the value for the true edge (u,v) from the partition function
            Z = exp_negative_distances[1:].sum(axis=0)  # (batch_size)
            self.exp_negative_distances = exp_negative_distances  # (1 + neg_size, batch_size)
            self.Z = Z # (batch_size)

            self.pos_loss = self.poincare_dists[0].sum()
            self.neg_loss = np.log(self.Z).sum()
            self.loss = self.pos_loss + self.neg_loss  # scalar

        elif self.loss_type == 'neg':
            # NEG loss function:
            # - log sigma((r - d(u,v)) / t) - \sum_{v' \in N(u)} log sigma((d(u,v') - r) / t)
            positive_term = np.log(1.0 + np.exp((- self.neg_r + self.poincare_dists[0]) / self.neg_t))  # (batch_size)
            negative_terms = self.neg_mu * \
                             np.log(1.0 + np.exp((self.neg_r - self.poincare_dists[1:]) / self.neg_t)) # (1 + neg_size, batch_size)

            self.pos_loss = positive_term.sum()
            self.neg_loss = negative_terms.sum()
            self.loss = self.pos_loss + self.neg_loss  # scalar

        elif self.loss_type == 'maxmargin':
            # max - margin loss function: \sum_{v' \in N(u)} max(0, \gamma + d(u,v) - d(u,v'))
            self.loss = np.maximum(0, self.maxmargin_margin + self.poincare_dists[0] - self.poincare_dists[1:]).sum() # scalar
            self.pos_loss = self.loss
            self.neg_loss = self.loss

        else:
            raise ValueError('Unknown loss type : ' + self.loss_type)

        self._loss_computed = True 
Example 21
Project: hyperbolic_cones   Author: dalab   File: eucl_cones_model.py    Apache License 2.0 5 votes vote down vote up
def _compute_loss(self):
        """Compute and store loss value for the given batch of examples."""
        if self._loss_computed:
            return
        self._loss_computed = True

        self.euclidean_dists = np.linalg.norm(self.vectors_u - self.vectors_v, axis=1)  # (1 + neg_size, batch_size)
        euclidean_dists_sq = self.euclidean_dists ** 2

        if not self.rels_reversed:
            # (1 + neg_size, batch_size)
            child_numerator = self.norms_v_sq - self.norms_u_sq - euclidean_dists_sq
            self.child_numitor = 2 * self.euclidean_dists * self.norms_u
            self.angles_psi_parent = np.arcsin(self.K / self.norms_u) # (1, batch_size)

        else:
            # (1 + neg_size, batch_size)
            child_numerator = self.norms_u_sq - self.norms_v_sq - euclidean_dists_sq
            self.child_numitor = 2 * self.euclidean_dists * self.norms_v
            self.angles_psi_parent = np.arcsin(self.K / self.norms_v) # (1 + neg_size, batch_size)

        self.cos_angles_child = child_numerator / self.child_numitor
        # To avoid numerical errors
        self.clipped_cos_angle_child = np.maximum(self.cos_angles_child, -1 + EPS)
        self.clipped_cos_angle_child = np.minimum(self.clipped_cos_angle_child, 1 - EPS)
        self.angles_child = np.arccos(self.clipped_cos_angle_child)  # (1 + neg_size, batch_size)

        self.angle_diff = self.angles_child - self.angles_psi_parent
        self.energy_vec = np.maximum(0, self.angle_diff) # (1 + neg_size, batch_size)
        self.pos_loss = self.energy_vec[0].sum()
        self.neg_loss = np.maximum(0, self.margin - self.energy_vec[1:]).sum()
        self.loss = self.pos_loss + self.neg_loss 
Example 22
Project: hyperbolic_cones   Author: dalab   File: hyp_cones_model.py    Apache License 2.0 5 votes vote down vote up
def _compute_loss(self):
        """Compute and store loss value for the given batch of examples."""
        if self._loss_computed:
            return
        self._loss_computed = True

        self.euclidean_dists = np.linalg.norm(self.vectors_u - self.vectors_v, axis=1)  # (1 + neg_size, batch_size)
        self.dot_prods = (self.vectors_u * self.vectors_v).sum(axis=1) # (1 + neg, batch_size)

        self.g = 1 + self.norms_v_sq * self.norms_u_sq - 2 * self.dot_prods
        self.g_sqrt = np.sqrt(self.g)

        self.euclidean_times_sqrt_g = self.euclidean_dists * self.g_sqrt

        if not self.rels_reversed:
            # u is x , v is y
            # (1 + neg_size, batch_size)
            child_numerator = self.dot_prods * (1 + self.norms_u_sq) - self.norms_u_sq * (1 + self.norms_v_sq)
            self.child_numitor = self.euclidean_times_sqrt_g * self.norms_u
            self.angles_psi_parent = np.arcsin(self.K * self.one_minus_norms_sq_u / self.norms_u) # (1, batch_size)

        else:
            # v is x , u is y
            # (1 + neg_size, batch_size)
            child_numerator = self.dot_prods * (1 + self.norms_v_sq) - self.norms_v_sq * (1 + self.norms_u_sq)
            self.child_numitor = self.euclidean_times_sqrt_g * self.norms_v
            self.angles_psi_parent = np.arcsin(self.K * self.one_minus_norms_sq_v / self.norms_v) # (1, batch_size)

        self.cos_angles_child = child_numerator / self.child_numitor
        # To avoid numerical errors
        self.clipped_cos_angle_child = np.maximum(self.cos_angles_child, -1 + EPS)
        self.clipped_cos_angle_child = np.minimum(self.clipped_cos_angle_child, 1 - EPS)
        self.angles_child = np.arccos(self.clipped_cos_angle_child)  # (1 + neg_size, batch_size)

        self.angle_diff = self.angles_child - self.angles_psi_parent
        self.energy_vec = np.maximum(0, self.angle_diff) # (1 + neg_size, batch_size)
        self.pos_loss = self.energy_vec[0].sum()
        self.neg_loss = np.maximum(0, self.margin - self.energy_vec[1:]).sum()
        self.loss = self.pos_loss + self.neg_loss 
Example 23
Project: hyperbolic_cones   Author: dalab   File: order_emb_model.py    Apache License 2.0 5 votes vote down vote up
def _loss_fn(self, matrix, rels_reversed):
        """Given a numpy array with vectors for u, v and negative samples, computes loss value.

        Parameters
        ----------
        matrix : numpy.array
            Array containing vectors for u, v and negative samples, of shape (2 + negative_size, dim).
        rels_reversed : bool

        Returns
        -------
        float
            Computed loss value.

        Warnings
        --------
        Only used for autograd gradients, since autograd requires a specific function signature.
        """
        vector_u = matrix[0]
        vectors_v = matrix[1:]
        if not rels_reversed:
            entailment_penalty = grad_np.maximum(0, vector_u - vectors_v) # (1 + negative_size, dim).
        else:
            entailment_penalty = grad_np.maximum(0, - vector_u + vectors_v) # (1 + negative_size, dim).

        energy_vec = grad_np.linalg.norm(entailment_penalty, axis=1) ** 2
        positive_term = energy_vec[0]
        negative_terms = energy_vec[1:]
        return positive_term + grad_np.maximum(0, self.margin - negative_terms).sum() 
Example 24
Project: hyperbolic_cones   Author: dalab   File: order_emb_model.py    Apache License 2.0 5 votes vote down vote up
def _compute_loss(self):
        """Compute and store loss value for the given batch of examples."""
        if self._loss_computed:
            return
        self._loss_computed = True

        if not self.rels_reversed:
            self.entailment_penalty = np.maximum(0, self.vectors_u - self.vectors_v) # (1 + negative_size, dim, batch_size).
        else:
            self.entailment_penalty = np.maximum(0, - self.vectors_u + self.vectors_v) # (1 + negative_size, dim, batch_size).

        self.energy_vec = np.linalg.norm(self.entailment_penalty, axis=1)**2 # (1 + negative_size, batch_size).
        self.pos_loss = self.energy_vec[0].sum()
        self.neg_loss = np.maximum(0, self.margin - self.energy_vec[1:]).sum()
        self.loss = self.pos_loss + self.neg_loss 
Example 25
Project: hyperbolic_cones   Author: dalab   File: order_emb_model.py    Apache License 2.0 5 votes vote down vote up
def is_a_scores_vector_batch(self, alpha, parent_vectors, other_vectors, rel_reversed):
        if not rel_reversed:
            return np.linalg.norm(np.maximum(0, parent_vectors - other_vectors), axis=1)
        else:
            return np.linalg.norm(np.maximum(0, - parent_vectors + other_vectors), axis=1) 
Example 26
Project: hyperbolic_cones   Author: dalab   File: poincare_model.py    Apache License 2.0 5 votes vote down vote up
def _maxmargin_loss_fn(poincare_dists, maxmargin_margin):
        """
        Parameters
        ----------
        poincare_dists : numpy.array
            All distances d(u,v) and d(u,v'), where v' is negative. Shape (1 + negative_size).

        Returns
        ----------
        max-margin loss function: \sum_{v' \in N(u)} max(0, \gamma + d(u,v) - d(u,v'))
        """
        positive_term = poincare_dists[0]
        negative_terms = poincare_dists[1:]
        return grad_np.maximum(0, maxmargin_margin + positive_term - negative_terms).sum() 
Example 27
Project: hyperbolic_cones   Author: dalab   File: poincare_model.py    Apache License 2.0 5 votes vote down vote up
def _compute_loss(self):
        """Compute and store loss value for the given batch of examples."""
        if self._loss_computed:
            return
        self._compute_distances()

        if self.loss_type == 'nll':
            # NLL loss from the NIPS paper.
            exp_negative_distances = np.exp(-self.poincare_dists)  # (1 + neg_size, batch_size)
            # Remove the value for the true edge (u,v) from the partition function
            Z = exp_negative_distances[1:].sum(axis=0)  # (batch_size)
            self.exp_negative_distances = exp_negative_distances  # (1 + neg_size, batch_size)
            self.Z = Z # (batch_size)

            self.pos_loss = self.poincare_dists[0].sum()
            self.neg_loss = np.log(self.Z).sum()
            self.loss = self.pos_loss + self.neg_loss  # scalar

        elif self.loss_type == 'neg':
            # NEG loss function:
            # - log sigma((r - d(u,v)) / t) - \sum_{v' \in N(u)} log sigma((d(u,v') - r) / t)
            positive_term = np.log(1.0 + np.exp((- self.neg_r + self.poincare_dists[0]) / self.neg_t))  # (batch_size)
            negative_terms = self.neg_mu * \
                             np.log(1.0 + np.exp((self.neg_r - self.poincare_dists[1:]) / self.neg_t)) # (1 + neg_size, batch_size)

            self.pos_loss = positive_term.sum()
            self.neg_loss = negative_terms.sum()
            self.loss = self.pos_loss + self.neg_loss  # scalar

        elif self.loss_type == 'maxmargin':
            # max - margin loss function: \sum_{v' \in N(u)} max(0, \gamma + d(u,v) - d(u,v'))
            self.loss = np.maximum(0, self.maxmargin_margin + self.poincare_dists[0] - self.poincare_dists[1:]).sum() # scalar
            self.pos_loss = self.loss
            self.neg_loss = self.loss

        else:
            raise ValueError('Unknown loss type : ' + self.loss_type)

        self._loss_computed = True 
Example 28
Project: baconian-project   Author: Lukeeeeee   File: ilqr_policy.py    MIT License 5 votes vote down vote up
def increase(self, mu):
        self.factor = np.maximum(self.factor, self.factor * self.min_factor)
        self.mu = np.maximum(self.min_mu, self.mu * self.factor) 
Example 29
Project: autograd-gamma   Author: CamDavidsonPilon   File: __init__.py    MIT License 5 votes vote down vote up
def central_difference_of_(f, argnum=0):
    new_f = lambda x, *args: f(*args[:argnum], x, *args[argnum:])

    def _central_difference(_, *args):
        x = args[argnum]
        args = args[:argnum] + args[argnum + 1 :]

        # Why do we calculate a * MACHINE_EPSILON_POWER?
        # consider if x is massive, like, 2**100. Then even for a simple
        # function like the identity function, (2**100 + h) - 2**100 = 0  due
        # to floating points. (the correct answer should be 1.0)

        # another thing to consider (and later to add) is that x is machine representable, but x + h is
        # rarely, and will be rounded to be machine representable. This (x + h) - x != h.
        delta = np.maximum(x * MACHINE_EPISLON_POWER, 1e-7)
        return unbroadcast_f(
            x,
            lambda g: g
            * (
                -new_f(x + 2 * delta, *args)
                + 8 * new_f(x + delta, *args)
                - 8 * new_f(x - delta, *args)
                + new_f(x - 2 * delta, *args)
            )
            / (12 * delta),
        )

    return _central_difference 
Example 30
Project: SyntheticStatistics   Author: BlissChapman   File: tst.py    MIT License 4 votes vote down vote up
def generic_nc_parameter(Z, reg='auto'):
    """
    Compute the non-centrality parameter of the non-central Chi-squared
    which is approximately the distribution of the test statistic under the H_1
    (and H_0). The empirical nc parameter is also the test statistic.

    - reg can be 'auto'. This will automatically determine the lowest value of
    the regularization parameter so that the statistic can be computed.
    """
    #from IPython.core.debugger import Tracer
    #Tracer()()

    n = Z.shape[0]
    Sig = np.cov(Z.T)
    W = np.mean(Z, 0)
    n_features = len(W)
    if n_features == 1:
        reg = 0 if reg=='auto' else reg
        s = float(n)*(W[0]**2)/(reg+Sig)
    else:
        if reg=='auto':
            # First compute with reg=0. If no problem, do nothing.
            # If the covariance is singular, make 0 eigenvalues positive.
            try:
                s = n*np.dot(np.linalg.solve(Sig, W), W)
            except np.linalg.LinAlgError:
                try:
                    # singular matrix
                    # eigen decompose
                    evals, eV = np.linalg.eig(Sig)
                    evals = np.real(evals)
                    eV = np.real(eV)
                    evals = np.maximum(0, evals)
                    # find the non-zero second smallest eigenvalue
                    snd_small = np.sort(evals[evals > 0])[0]
                    evals[evals <= 0] = snd_small

                    # reconstruct Sig
                    Sig = eV.dot(np.diag(evals)).dot(eV.T)
                    # try again
                    s = n*np.linalg.solve(Sig, W).dot(W)
                except:
                    s = -1
        else:
            # assume reg is a number
            # test statistic
            try:
                s = n*np.linalg.solve(Sig + reg*np.eye(Sig.shape[0]), W).dot(W)
            except np.linalg.LinAlgError:
                print('LinAlgError. Return -1 as the nc_parameter.')
                s = -1
    return s 
Example 31
Project: disk-embedding   Author: lapras-inc   File: eucl_cones_model.py    Apache License 2.0 4 votes vote down vote up
def _loss_fn(self, matrix, rels_reversed):
        """Given a numpy array with vectors for u, v and negative samples, computes loss value.

        Parameters
        ----------
        matrix : numpy.array
            Array containing vectors for u, v and negative samples, of shape (2 + negative_size, dim).
        rels_reversed : bool

        Returns
        -------
        float
            Computed loss value.

        Warnings
        --------
        Only used for autograd gradients, since autograd requires a specific function signature.
        """
        vector_u = matrix[0]
        vectors_v = matrix[1:]

        norm_u = grad_np.linalg.norm(vector_u)
        norms_v = grad_np.linalg.norm(vectors_v, axis=1)
        euclidean_dists = grad_np.linalg.norm(vector_u - vectors_v, axis=1)

        if not rels_reversed:
            # u is x , v is y
            cos_angle_child = (norms_v**2 - norm_u**2 - euclidean_dists**2) / (2 * euclidean_dists * norm_u) # 1 + neg_size
            angles_psi_parent = grad_np.arcsin(self.K / norm_u) # scalar
        else:
            # v is x , u is y
            cos_angle_child = (norm_u**2 - norms_v**2 - euclidean_dists**2) / (2 * euclidean_dists * norms_v) # 1 + neg_size
            angles_psi_parent = grad_np.arcsin(self.K / norms_v) # 1 + neg_size

        # To avoid numerical errors
        clipped_cos_angle_child = grad_np.maximum(cos_angle_child, -1 + EPS)
        clipped_cos_angle_child = grad_np.minimum(clipped_cos_angle_child, 1 - EPS)
        angles_child = grad_np.arccos(clipped_cos_angle_child)  # 1 + neg_size

        energy_vec = grad_np.maximum(0, angles_child - angles_psi_parent)
        positive_term = energy_vec[0]
        negative_terms = energy_vec[1:]
        return positive_term + grad_np.maximum(0, self.margin - negative_terms).sum() 
Example 32
Project: disk-embedding   Author: lapras-inc   File: hyp_cones_model.py    Apache License 2.0 4 votes vote down vote up
def _loss_fn(self, matrix, rels_reversed):
        """Given a numpy array with vectors for u, v and negative samples, computes loss value.

        Parameters
        ----------
        matrix : numpy.array
            Array containing vectors for u, v and negative samples, of shape (2 + negative_size, dim).
        rels_reversed : bool

        Returns
        -------
        float
            Computed loss value.

        Warnings
        --------
        Only used for autograd gradients, since autograd requires a specific function signature.
        """
        vector_u = matrix[0]
        vectors_v = matrix[1:]

        norm_u = grad_np.linalg.norm(vector_u)
        norms_v = grad_np.linalg.norm(vectors_v, axis=1)
        euclidean_dists = grad_np.linalg.norm(vector_u - vectors_v, axis=1)
        dot_prod = (vector_u * vectors_v).sum(axis=1)

        if not rels_reversed:
            # u is x , v is y
            cos_angle_child = (dot_prod * (1 + norm_u ** 2) - norm_u ** 2 * (1 + norms_v ** 2)) /\
                              (norm_u * euclidean_dists * grad_np.sqrt(1 + norms_v ** 2 * norm_u ** 2 - 2 * dot_prod))
            angles_psi_parent = grad_np.arcsin(self.K * (1 - norm_u**2) / norm_u) # scalar
        else:
            # v is x , u is y
            cos_angle_child = (dot_prod * (1 + norms_v ** 2) - norms_v **2 * (1 + norm_u ** 2) ) /\
                              (norms_v * euclidean_dists * grad_np.sqrt(1 + norms_v**2 * norm_u**2 - 2 * dot_prod))
            angles_psi_parent = grad_np.arcsin(self.K * (1 - norms_v**2) / norms_v) # 1 + neg_size

        # To avoid numerical errors
        clipped_cos_angle_child = grad_np.maximum(cos_angle_child, -1 + EPS)
        clipped_cos_angle_child = grad_np.minimum(clipped_cos_angle_child, 1 - EPS)
        angles_child = grad_np.arccos(clipped_cos_angle_child)  # 1 + neg_size

        energy_vec = grad_np.maximum(0, angles_child - angles_psi_parent)
        positive_term = energy_vec[0]
        negative_terms = energy_vec[1:]
        return positive_term + grad_np.maximum(0, self.margin - negative_terms).sum() 
Example 33
Project: hyperbolic_cones   Author: dalab   File: eucl_cones_model.py    Apache License 2.0 4 votes vote down vote up
def _loss_fn(self, matrix, rels_reversed):
        """Given a numpy array with vectors for u, v and negative samples, computes loss value.

        Parameters
        ----------
        matrix : numpy.array
            Array containing vectors for u, v and negative samples, of shape (2 + negative_size, dim).
        rels_reversed : bool

        Returns
        -------
        float
            Computed loss value.

        Warnings
        --------
        Only used for autograd gradients, since autograd requires a specific function signature.
        """
        vector_u = matrix[0]
        vectors_v = matrix[1:]

        norm_u = grad_np.linalg.norm(vector_u)
        norms_v = grad_np.linalg.norm(vectors_v, axis=1)
        euclidean_dists = grad_np.linalg.norm(vector_u - vectors_v, axis=1)

        if not rels_reversed:
            # u is x , v is y
            cos_angle_child = (norms_v**2 - norm_u**2 - euclidean_dists**2) / (2 * euclidean_dists * norm_u) # 1 + neg_size
            angles_psi_parent = grad_np.arcsin(self.K / norm_u) # scalar
        else:
            # v is x , u is y
            cos_angle_child = (norm_u**2 - norms_v**2 - euclidean_dists**2) / (2 * euclidean_dists * norms_v) # 1 + neg_size
            angles_psi_parent = grad_np.arcsin(self.K / norms_v) # 1 + neg_size

        # To avoid numerical errors
        clipped_cos_angle_child = grad_np.maximum(cos_angle_child, -1 + EPS)
        clipped_cos_angle_child = grad_np.minimum(clipped_cos_angle_child, 1 - EPS)
        angles_child = grad_np.arccos(clipped_cos_angle_child)  # 1 + neg_size

        energy_vec = grad_np.maximum(0, angles_child - angles_psi_parent)
        positive_term = energy_vec[0]
        negative_terms = energy_vec[1:]
        return positive_term + grad_np.maximum(0, self.margin - negative_terms).sum() 
Example 34
Project: hyperbolic_cones   Author: dalab   File: hyp_cones_model.py    Apache License 2.0 4 votes vote down vote up
def _loss_fn(self, matrix, rels_reversed):
        """Given a numpy array with vectors for u, v and negative samples, computes loss value.

        Parameters
        ----------
        matrix : numpy.array
            Array containing vectors for u, v and negative samples, of shape (2 + negative_size, dim).
        rels_reversed : bool

        Returns
        -------
        float
            Computed loss value.

        Warnings
        --------
        Only used for autograd gradients, since autograd requires a specific function signature.
        """
        vector_u = matrix[0]
        vectors_v = matrix[1:]

        norm_u = grad_np.linalg.norm(vector_u)
        norms_v = grad_np.linalg.norm(vectors_v, axis=1)
        euclidean_dists = grad_np.linalg.norm(vector_u - vectors_v, axis=1)
        dot_prod = (vector_u * vectors_v).sum(axis=1)

        if not rels_reversed:
            # u is x , v is y
            cos_angle_child = (dot_prod * (1 + norm_u ** 2) - norm_u ** 2 * (1 + norms_v ** 2)) /\
                              (norm_u * euclidean_dists * grad_np.sqrt(1 + norms_v ** 2 * norm_u ** 2 - 2 * dot_prod))
            angles_psi_parent = grad_np.arcsin(self.K * (1 - norm_u**2) / norm_u) # scalar
        else:
            # v is x , u is y
            cos_angle_child = (dot_prod * (1 + norms_v ** 2) - norms_v **2 * (1 + norm_u ** 2) ) /\
                              (norms_v * euclidean_dists * grad_np.sqrt(1 + norms_v**2 * norm_u**2 - 2 * dot_prod))
            angles_psi_parent = grad_np.arcsin(self.K * (1 - norms_v**2) / norms_v) # 1 + neg_size

        # To avoid numerical errors
        clipped_cos_angle_child = grad_np.maximum(cos_angle_child, -1 + EPS)
        clipped_cos_angle_child = grad_np.minimum(clipped_cos_angle_child, 1 - EPS)
        angles_child = grad_np.arccos(clipped_cos_angle_child)  # 1 + neg_size

        energy_vec = grad_np.maximum(0, angles_child - angles_psi_parent)
        positive_term = energy_vec[0]
        negative_terms = energy_vec[1:]
        return positive_term + grad_np.maximum(0, self.margin - negative_terms).sum()