Python numpy.Infinity() Examples

The following are 9 code examples of numpy.Infinity(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: test_constants.py    From chainer with MIT License 6 votes vote down vote up
def test_constants():
    assert chainerx.Inf is numpy.Inf
    assert chainerx.Infinity is numpy.Infinity
    assert chainerx.NAN is numpy.NAN
    assert chainerx.NINF is numpy.NINF
    assert chainerx.NZERO is numpy.NZERO
    assert chainerx.NaN is numpy.NaN
    assert chainerx.PINF is numpy.PINF
    assert chainerx.PZERO is numpy.PZERO
    assert chainerx.e is numpy.e
    assert chainerx.euler_gamma is numpy.euler_gamma
    assert chainerx.inf is numpy.inf
    assert chainerx.infty is numpy.infty
    assert chainerx.nan is numpy.nan
    assert chainerx.newaxis is numpy.newaxis
    assert chainerx.pi is numpy.pi 
Example #2
Source File: graph.py    From PyGraphArt with MIT License 6 votes vote down vote up
def prim(self):
        '''
        Returns Prim's minimum spanninng tree
        '''
        big_f = set([])
        costs = np.empty((self.n), dtype=object)
        costs[:] = np.max(self.costs) + 1
        big_e = np.empty((self.n), dtype=object)
        big_q = set(range(self.n))
        tree_edges = np.array([], dtype=object)
        while len(big_q) > 0:
            v = np.argmin(costs)
            big_q.remove(v)
            costs[v] = np.Infinity
            big_f.add(v)
            if big_e[v] is not None:
                tree_edges = np.append(tree_edges, None)
                tree_edges[-1] = (big_e[v], v)

            for i, w in zip(range(len(self.FSs[v])), self.FSs[v]):
                if w in big_q and self.FS_costs[v][i] < costs[w]:
                    costs[w] = self.FS_costs[v][i]
                    big_e[w] = v
        return tree_edges 
Example #3
Source File: anonymity.py    From anonymisation with GNU General Public License v3.0 6 votes vote down vote up
def get_k(df, groupby, unknown=None):
    """
        Return the k-anonymity level of a df, grouped by the specified columns.

        :param df: The dataframe to get k from
        :param groupby: The columns to group by
        :type df: pandas.DataFrame
        :type groupby: Array
        :return: k-anonymity
        :rtype: int
    """
    df = _remove_unknown(df, groupby, unknown)
    size_group = df.groupby(groupby).size()
    if len(size_group) == 0:
        return np.Infinity
    return min(size_group) 
Example #4
Source File: graph.py    From PyGraphArt with MIT License 5 votes vote down vote up
def bellman_ford(self, source):
        '''
        Returns Labels-algorithm's shortest paths from source to all other
        nodes, if the (directed) graph doesn't contains cycles
        '''
        if self.oriented is False:
            print 'cannot apply bellman_ford, graph is not oriented'
            return
        dist = np.array([np.Infinity for x in range(self.n)], dtype=np.float32)
        pred = np.empty((self.n), dtype=np.int)
        pred[source] = source
        dist[source] = 0

        for i in np.arange(1, self.n):
            for e in range(len(self.edges)):
                if dist[self.edges[e][0]] + self.costs[e] < dist[self.edges[e][1]]:
                    dist[self.edges[e][1]] = dist[
                        self.edges[e][0]] + self.costs[e]
                    pred[self.edges[e][1]] = self.edges[e][0]

        for e in range(len(self.edges)):
            if dist[self.edges[e][1]] > dist[self.edges[e][0]] + self.costs[e]:
                print 'Error, Graph contains a negative-weight cycle'
                break

        edges = np.array([], dtype=object)
        for v in range(len(pred)):
            edges = np.append(edges, None)
            edges[-1] = [pred[v], v]

        return edges  # , prev, dist 
Example #5
Source File: graph.py    From PyGraphArt with MIT License 5 votes vote down vote up
def floyd_warshall(self, source):
        '''
        Returns floyd_warshall's shortest paths from source to all other
        nodes, if the (directed) graph doesn't contains negative cycles
        '''
        print '''warning! apply this algorithm only if constricted, it takes\\
        O(n^3)!'''
        print 'O(n^3) = O(', self.n**3, ')'
        dist = np.empty((self.n, self.n), dtype=np.float32)
        pred = np.zeros((self.n, self.n), dtype=np.int)
        dist.fill(np.Infinity)
        for v in range(self.n):
            dist[v][v] = .0
        for e in range(len(self.edges)):
            u = self.edges[e][0]
            v = self.edges[e][1]
            dist[u][v] = self.costs[e]
            pred[u][v] = v
        for h in range(1, self.n):
            for i in range(1, self.n):
                for j in range(self.n):
                    if dist[i][h] + dist[h][j] < dist[i][j]:
                        dist[i][j] = dist[i][h] + dist[h][j]
                        pred[i][j] = pred[h][j]
            for i in range(1, self.n):
                if dist[i][i] < 0:
                    print 'Error! found negative cycle, thus the problem is inferiorly unlinmited'
                    return
        edges = np.array([], dtype=object)
        for v in range(len(pred)):
            edges = np.append(edges, None)
            edges[-1] = [pred[source][v], v]

        return edges  # , prev, dist 
Example #6
Source File: text_models.py    From mindmeld with Apache License 2.0 5 votes vote down vote up
def predict_log_proba(self, examples, dynamic_resource=None):
        X, _, _ = self.get_feature_matrix(examples, dynamic_resource=dynamic_resource)
        predictions = self._predict_proba(X, self._clf.predict_log_proba)

        # JSON can't reliably encode infinity, so replace it with large number
        for row in predictions:
            _, probas = row
            for label, proba in probas.items():
                if proba == -np.Infinity:
                    probas[label] = _NEG_INF
        return predictions 
Example #7
Source File: regression.py    From open_model_zoo with Apache License 2.0 5 votes vote down vote up
def _psnr_differ(self, annotation_image, prediction_image):
        prediction = np.asarray(prediction_image).astype(np.float)
        ground_truth = np.asarray(annotation_image).astype(np.float)

        height, width = prediction.shape[:2]
        prediction = prediction[
            self.scale_border:height - self.scale_border,
            self.scale_border:width - self.scale_border
        ]
        ground_truth = ground_truth[
            self.scale_border:height - self.scale_border,
            self.scale_border:width - self.scale_border
        ]
        image_difference = (prediction - ground_truth) / 255.  # rgb color space

        r_channel_diff = image_difference[:, :, self.channel_order[0]]
        g_channel_diff = image_difference[:, :, self.channel_order[1]]
        b_channel_diff = image_difference[:, :, self.channel_order[2]]

        channels_diff = (r_channel_diff * 65.738 + g_channel_diff * 129.057 + b_channel_diff * 25.064) / 256

        mse = np.mean(channels_diff ** 2)
        if mse == 0:
            return np.Infinity

        return -10 * math.log10(mse) 
Example #8
Source File: utils.py    From molml with MIT License 4 votes vote down vote up
def get_coulomb_matrix(numbers, coords, alpha=1, use_decay=False):
    r"""
    Return the coulomb matrix for the given coords and numbers.

    .. math::

        C_{ij} = \begin{cases}
            \frac{Z_i Z_j}{\| r_i - r_j \|^\alpha} & i \neq j\\
            \frac{1}{2} Z_i^{2.4} & i = j
        \end{cases}

    Parameters
    ----------
    numbers : array-like, shape=(n_atoms, )
        The atomic numbers of all the atoms

    coords : array-like, shape=(n_atoms, 3)
        The xyz coordinates of all the atoms (in angstroms)

    alpha : number, default=6
        Some value to exponentiate the distance in the coulomb matrix.

    use_decay : bool, default=False
        This setting defines an extra decay for the values as they get futher
        away from the "central atom". This is to alleviate issues the arise as
        atoms enter or leave the cutoff radius.

    Returns
    -------
    top : array, shape=(n_atoms, n_atoms)
        The coulomb matrix
    """
    top = numpy.outer(numbers, numbers).astype(numpy.float64)
    r = cdist(coords, coords)
    if use_decay:
        other = cdist([coords[0]], coords).reshape(-1)
        r += numpy.add.outer(other, other)

    r **= alpha

    with numpy.errstate(divide='ignore', invalid='ignore'):
        numpy.divide(top, r, top)
    numpy.fill_diagonal(top, 0.5 * numpy.array(numbers) ** 2.4)
    top[top == numpy.Infinity] = 0
    top[numpy.isnan(top)] = 0
    return top 
Example #9
Source File: epmgp.py    From emukit with Apache License 2.0 3 votes vote down vote up
def lt_factor(s, l, M, V, mp, p, gamma):
    cVc = (V[l, l] - 2 * V[s, l] + V[s, s]) / 2.0
    Vc = (V[:, l] - V[:, s]) / sq2
    cM = (M[l] - M[s]) / sq2
    cVnic = np.max([cVc / (1 - p * cVc), 0])
    cmni = cM + cVnic * (p * cM - mp)
    z = cmni / np.sqrt(cVnic + 1e-25)
    if np.isnan(z):
        z = -np.inf
    e, lP, exit_flag = log_relative_gauss(z)
    if exit_flag == 0:
        alpha = e / np.sqrt(cVnic)
        # beta  = alpha * (alpha + cmni / cVnic);
        # r     = beta * cVnic / (1 - cVnic * beta);
        beta = alpha * (alpha * cVnic + cmni)
        r = beta / (1 - beta)
        # new message
        pnew = r / cVnic
        mpnew = r * (alpha + cmni / cVnic) + alpha

        # update terms
        dp = np.max([-p + eps, gamma * (pnew - p)])  # at worst, remove message
        dmp = np.max([-mp + eps, gamma * (mpnew - mp)])
        d = np.max([dmp, dp])  # for convergence measures

        pnew = p + dp
        mpnew = mp + dmp
        # project out to marginal
        Vnew = V - dp / (1 + dp * cVc) * np.outer(Vc, Vc)

        Mnew = M + (dmp - cM * dp) / (1 + dp * cVc) * Vc
        if np.any(np.isnan(Vnew)):
            raise Exception("an error occurs while running expectation "
                            "propagation in entropy search. "
                            "Resulting variance contains NaN")
        # % there is a problem here, when z is very large
        logS = lP - 0.5 * (np.log(beta) - np.log(pnew) - np.log(cVnic)) \
               + (alpha * alpha) / (2 * beta) * cVnic

    elif exit_flag == -1:
        d = np.NAN
        Mnew = 0
        Vnew = 0
        pnew = 0
        mpnew = 0
        logS = -np.Infinity
    elif exit_flag == 1:
        d = 0
        # remove message from marginal:
        # new message
        pnew = 0
        mpnew = 0
        # update terms
        dp = -p  # at worst, remove message
        dmp = -mp
        d = max([dmp, dp])  # for convergence measures
        # project out to marginal
        Vnew = V - dp / (1 + dp * cVc) * (np.outer(Vc, Vc))
        Mnew = M + (dmp - cM * dp) / (1 + dp * cVc) * Vc
        logS = 0
    return Mnew, Vnew, pnew, mpnew, logS, d