Python numpy.isinf() Examples

The following are 30 code examples of numpy.isinf(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: data_helper.py    From LanczosNetwork with MIT License 7 votes vote down vote up
def normalize_adj(A, is_sym=True, exponent=0.5):
  """
    Normalize adjacency matrix

    is_sym=True: D^{-1/2} A D^{-1/2}
    is_sym=False: D^{-1} A
  """
  rowsum = np.array(A.sum(1))

  if is_sym:
    r_inv = np.power(rowsum, -exponent).flatten()
  else:
    r_inv = np.power(rowsum, -1.0).flatten()

  r_inv[np.isinf(r_inv)] = 0.

  if sp.isspmatrix(A):
    r_mat_inv = sp.diags(r_inv.squeeze())
  else:
    r_mat_inv = np.diag(r_inv)

  if is_sym:
    return r_mat_inv.dot(A).dot(r_mat_inv)
  else:
    return r_mat_inv.dot(A) 
Example #2
Source File: test_numeric.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_double(self):
        # offset for alignment test
        for i in range(2):
            assert_array_equal(self.d[i:] > 0, self.ed[i:])
            assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:])
            assert_array_equal(self.d[i:] == 0, ~self.ed[i:])
            assert_array_equal(-self.d[i:] < 0, self.ed[i:])
            assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:])
            r = self.d[i:] != 0
            assert_array_equal(r, self.ed[i:])
            r2 = self.d[i:] != np.zeros_like(self.d[i:])
            r3 = 0 != self.d[i:]
            assert_array_equal(r, r2)
            assert_array_equal(r, r3)
            # check bool == 0x1
            assert_array_equal(r.view(np.int8), r.astype(np.int8))
            assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
            assert_array_equal(r3.view(np.int8), r3.astype(np.int8))

            # isnan on amd64 takes the same code path
            assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:])
            assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:])
            assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:])
            assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:])
            assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:]) 
Example #3
Source File: gaussian_moments.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _compute_eps(log_moments, delta):
  """Compute epsilon for given log_moments and delta.

  Args:
    log_moments: the log moments of privacy loss, in the form of pairs
      of (moment_order, log_moment)
    delta: the target delta.
  Returns:
    epsilon
  """
  min_eps = float("inf")
  for moment_order, log_moment in log_moments:
    if moment_order == 0:
      continue
    if math.isinf(log_moment) or math.isnan(log_moment):
      sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
      continue
    min_eps = min(min_eps, (log_moment - math.log(delta)) / moment_order)
  return min_eps 
Example #4
Source File: gaussian_moments.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def _compute_delta(log_moments, eps):
  """Compute delta for given log_moments and eps.

  Args:
    log_moments: the log moments of privacy loss, in the form of pairs
      of (moment_order, log_moment)
    eps: the target epsilon.
  Returns:
    delta
  """
  min_delta = 1.0
  for moment_order, log_moment in log_moments:
    if moment_order == 0:
      continue
    if math.isinf(log_moment) or math.isnan(log_moment):
      sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
      continue
    if log_moment < moment_order * eps:
      min_delta = min(min_delta,
                      math.exp(log_moment - moment_order * eps))
  return min_delta 
Example #5
Source File: data_collection.py    From dataiku-contrib with Apache License 2.0 6 votes vote down vote up
def _get_numeric_feature_analysis_data(self, series, output):

	logger.info("Checking series of type: %s (isM8=%s)" % (series.dtype, series.dtype == np.dtype('M8[ns]')))

	if np.isinf(series).any():
	    raise ValueError("Numeric feature '%s' contains Infinity values" % name)

	output['stats'] = {
	    'min': series.min(),
	    'average': series.mean(),
	    'median': series.median(),
	    'max': series.max(),
	    'p99': series.quantile(0.99),
	    'std': series.std()
	}
        output['nulls_count'] = series.isnull().sum()

        return output 
Example #6
Source File: test_numeric.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_float(self):
        # offset for alignment test
        for i in range(4):
            assert_array_equal(self.f[i:] > 0, self.ef[i:])
            assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:])
            assert_array_equal(self.f[i:] == 0, ~self.ef[i:])
            assert_array_equal(-self.f[i:] < 0, self.ef[i:])
            assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:])
            r = self.f[i:] != 0
            assert_array_equal(r, self.ef[i:])
            r2 = self.f[i:] != np.zeros_like(self.f[i:])
            r3 = 0 != self.f[i:]
            assert_array_equal(r, r2)
            assert_array_equal(r, r3)
            # check bool == 0x1
            assert_array_equal(r.view(np.int8), r.astype(np.int8))
            assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
            assert_array_equal(r3.view(np.int8), r3.astype(np.int8))

            # isnan on amd64 takes the same code path
            assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])
            assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:])
            assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:])
            assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:])
            assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:]) 
Example #7
Source File: test_scalarmath.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_zero_division(self):
        with np.errstate(all="ignore"):
            for t in [np.complex64, np.complex128]:
                a = t(0.0)
                b = t(1.0)
                assert_(np.isinf(b/a))
                b = t(complex(np.inf, np.inf))
                assert_(np.isinf(b/a))
                b = t(complex(np.inf, np.nan))
                assert_(np.isinf(b/a))
                b = t(complex(np.nan, np.inf))
                assert_(np.isinf(b/a))
                b = t(complex(np.nan, np.nan))
                assert_(np.isnan(b/a))
                b = t(0.)
                assert_(np.isnan(b/a)) 
Example #8
Source File: test_analytics.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_numpy_type_funcs(func):
    # for func in [np.isfinite, np.isinf, np.isnan, np.signbit]:
    # copy and paste from idx fixture as pytest doesn't support
    # parameters and fixtures at the same time.
    major_axis = Index(['foo', 'bar', 'baz', 'qux'])
    minor_axis = Index(['one', 'two'])
    major_codes = np.array([0, 0, 1, 2, 3, 3])
    minor_codes = np.array([0, 1, 0, 1, 0, 1])
    index_names = ['first', 'second']

    idx = MultiIndex(
        levels=[major_axis, minor_axis],
        codes=[major_codes, minor_codes],
        names=index_names,
        verify_integrity=False
    )

    with pytest.raises(Exception):
        func(idx) 
Example #9
Source File: test_reductions.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_sum_inf(self):
        s = Series(np.random.randn(10))
        s2 = s.copy()

        s[5:8] = np.inf
        s2[5:8] = np.nan

        assert np.isinf(s.sum())

        arr = np.random.randn(100, 100).astype('f4')
        arr[:, 2] = np.inf

        with pd.option_context("mode.use_inf_as_na", True):
            tm.assert_almost_equal(s.sum(), s2.sum())

        res = nanops.nansum(arr, axis=1)
        assert np.isinf(res).all() 
Example #10
Source File: minibatch2.py    From TFFRCNN with MIT License 6 votes vote down vote up
def _get_viewpoint_estimation_labels(viewpoint_data, clss, num_classes):
    """Bounding-box regression targets are stored in a compact form in the
    roidb.

    This function expands those targets into the 4-of-4*K representation used
    by the network (i.e. only one class has non-zero targets). The loss weights
    are similarly expanded.

    Returns:
        view_target_data (ndarray): N x 3K blob of regression targets
        view_loss_weights (ndarray): N x 3K blob of loss weights
    """
    view_targets = np.zeros((clss.size, 3 * num_classes), dtype=np.float32)
    view_loss_weights = np.zeros(view_targets.shape, dtype=np.float32)
    inds = np.where( (clss > 0) & np.isfinite(viewpoint_data[:,0]) & np.isfinite(viewpoint_data[:,1]) & np.isfinite(viewpoint_data[:,2]) )[0]
    for ind in inds:
        cls = clss[ind]
        start = 3 * cls
        end = start + 3
        view_targets[ind, start:end] = viewpoint_data[ind, :]
        view_loss_weights[ind, start:end] = [1., 1., 1.]

    assert not np.isinf(view_targets).any(), 'viewpoint undefined'
    return view_targets, view_loss_weights 
Example #11
Source File: testing.py    From dexplo with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _check_1d_arrays(a: ndarray, b: ndarray, kind: str, tol: float = 10 ** -4) -> bool:
    if kind == 'O':
        if not va.is_equal_1d_object(a, b):
            raise AssertionError(f'The values of the columns are not equal')
        return True
    elif kind == 'f':
        with np.errstate(invalid='ignore'):
            criteria1 = np.abs(a - b) < tol
            criteria2 = np.isnan(a) & np.isnan(b)
            criteria3 = np.isinf(a) & np.isinf(b)
        return (criteria1 | criteria2 | criteria3).all()
    else:
        try:
            np.testing.assert_array_equal(a, b)
        except AssertionError:
            return False
        return True 
Example #12
Source File: ColorMapWidget.py    From tf-pose with Apache License 2.0 6 votes vote down vote up
def map(self, data):
        data = data[self.fieldName]
        colors = np.empty((len(data), 4))
        default = np.array(fn.colorTuple(self['Default'])) / 255.
        colors[:] = default
        
        for v in self.param('Values'):
            mask = data == v.maskValue
            c = np.array(fn.colorTuple(v.value())) / 255.
            colors[mask] = c
        #scaled = np.clip((data-self['Min']) / (self['Max']-self['Min']), 0, 1)
        #cmap = self.value()
        #colors = cmap.map(scaled, mode='float')
        
        #mask = np.isnan(data) | np.isinf(data)
        #nanColor = self['NaN']
        #nanColor = (nanColor.red()/255., nanColor.green()/255., nanColor.blue()/255., nanColor.alpha()/255.)
        #colors[mask] = nanColor
        
        return colors 
Example #13
Source File: test_arithmetic_execution.py    From mars with Apache License 2.0 6 votes vote down vote up
def testDtypeExecution(self):
        a = ones((10, 20), dtype='f4', chunk_size=5)

        c = truediv(a, 2, dtype='f8')

        res = self.executor.execute_tensor(c, concat=True)[0]
        self.assertEqual(res.dtype, np.float64)

        c = truediv(a, 0, dtype='f8')
        res = self.executor.execute_tensor(c, concat=True)[0]
        self.assertTrue(np.isinf(res[0, 0]))

        with self.assertRaises(FloatingPointError):
            with np.errstate(divide='raise'):
                c = truediv(a, 0, dtype='f8')
                _ = self.executor.execute_tensor(c, concat=True)[0]  # noqa: F841 
Example #14
Source File: Scalers.py    From scattertext with Apache License 2.0 6 votes vote down vote up
def scale_neg_1_to_1_with_zero_mean_log_abs_max(v):
	'''
	!!! not working
	'''
	df = pd.DataFrame({'v':v,
	                   'sign': (v > 0) * 2 - 1})
	df['lg'] = np.log(np.abs(v)) / np.log(1.96)
	df['exclude'] = (np.isinf(df.lg) | np.isneginf(df.lg))
	for mask in [(df['sign'] == -1) & (df['exclude'] == False),
	             (df['sign'] == 1) & (df['exclude'] == False)]:
		df[mask]['lg'] = df[mask]['lg'].max() - df[mask]['lg']
	df['lg'] *= df['sign']
	df['lg'] = df['lg'].fillna(0)
	print(df[df['exclude']]['lg'].values)
	#to_rescale = convention_df['lg'].reindex(v.index)
	df['to_out'] =  scale_neg_1_to_1_with_zero_mean_abs_max(df['lg'])
	print('right')
	print(df.sort_values(by='lg').iloc[:5])
	print(df.sort_values(by='lg').iloc[-5:])
	print('to_out')
	print(df.sort_values(by='to_out').iloc[:5])
	print(df.sort_values(by='to_out').iloc[-5:])
	print(len(df), len(df.dropna()))
	return df['to_out'] 
Example #15
Source File: core.py    From ffn with MIT License 6 votes vote down vote up
def calc_inv_vol_weights(returns):
    """
    Calculates weights proportional to inverse volatility of each column.

    Returns weights that are inversely proportional to the column's
    volatility resulting in a set of portfolio weights where each position
    has the same level of volatility.

    Note, that assets with returns all equal to NaN or 0 are excluded from
    the portfolio (their weight is set to NaN).

    Returns:
        Series {col_name: weight}
    """
    # calc vols
    vol = np.divide(1., np.std(returns, ddof=1))
    vol[np.isinf(vol)] = np.NaN
    volsum = vol.sum()
    return np.divide(vol, volsum) 
Example #16
Source File: _distn_infrastructure.py    From lambda-packs with MIT License 6 votes vote down vote up
def _entropy(self, *args):
        def integ(x):
            val = self._pdf(x, *args)
            return entr(val)

        # upper limit is often inf, so suppress warnings when integrating
        olderr = np.seterr(over='ignore')
        h = integrate.quad(integ, self.a, self.b)[0]
        np.seterr(**olderr)

        if not np.isnan(h):
            return h
        else:
            # try with different limits if integration problems
            low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
            if np.isinf(self.b):
                upper = upp
            else:
                upper = self.b
            if np.isinf(self.a):
                lower = low
            else:
                lower = self.a
            return integrate.quad(integ, lower, upper)[0] 
Example #17
Source File: utilities.py    From qcqp with MIT License 5 votes vote down vote up
def eval(self, x):
        if np.isinf(x):
            if self.P != 0: return self.P*x*x
            if self.q != 0: return self.q*x
            return r
        return x*(self.P*x + self.q) + self.r 
Example #18
Source File: math.py    From formulas with European Union Public License 1.1 5 votes vote down vote up
def xmround(*args):
    raise_errors(args)
    num, sig = list(flatten(map(replace_empty, args), None))
    if isinstance(num, bool) or isinstance(sig, bool):
        return Error.errors['#VALUE!']
    with np.errstate(divide='ignore', invalid='ignore'):
        x = num < 0 < sig and np.nan or xceiling(num, sig, ceil=np.round)
    return (np.isnan(x) or np.isinf(x)) and Error.errors['#NUM!'] or x 
Example #19
Source File: nested_choice_calcs.py    From pylogit with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def naturalize_nest_coefs(nest_coef_estimates):
    """
    Parameters
    ----------
    nest_coef_estimates : 1D ndarray.
        Should contain the estimated logit's
        (`ln[nest_coefs / (1 - nest_coefs)]`) of the true nest coefficients.
        All values should be ints, floats, or longs.

    Returns
    -------
    nest_coefs : 1D ndarray.
        Will contain the 'natural' nest coefficients:
        `1.0 / (1.0 + exp(-nest_coef_estimates))`.
    """
    # Calculate the exponential term of the
    # logistic transformation
    exp_term = np.exp(-1 * nest_coef_estimates)

    # Guard against_overflow
    inf_idx = np.isinf(exp_term)
    exp_term[inf_idx] = max_comp_value

    # Calculate the 'natural' nest coefficients
    nest_coefs = 1.0 / (1.0 + exp_term)

    # Guard against underflow
    zero_idx = (nest_coefs == 0)
    nest_coefs[zero_idx] = min_comp_value

    return nest_coefs


# Create the actual function used to calculate the gradient 
Example #20
Source File: test_piecewise.py    From pywr with GNU General Public License v3.0 5 votes vote down vote up
def test_piecewise_with_parameters_json():
    """Test using parameters with piecewise link."""
    model = load_model("piecewise1_with_parameters.json")
    sublinks = model.nodes["link1"].sublinks

    assert isinstance(sublinks[0].max_flow, ConstantParameter)
    assert np.isinf(sublinks[1].max_flow)
    assert isinstance(sublinks[0].cost, ConstantParameter)
    assert isinstance(sublinks[1].cost, ConstantParameter)

    model.run()

    assert_allclose(model.nodes["demand1"].flow, 20) 
Example #21
Source File: utils.py    From graph-cnn.pytorch with MIT License 5 votes vote down vote up
def normalize_adj(mx):
    """Row-normalize sparse matrix"""
    rowsum = np.array(mx.sum(1))
    r_inv_sqrt = np.power(rowsum, -0.5).flatten()
    r_inv_sqrt[np.isinf(r_inv_sqrt)] = 0.
    r_mat_inv_sqrt = sp.diags(r_inv_sqrt)

    return mx.dot(r_mat_inv_sqrt).transpose().dot(r_mat_inv_sqrt).tocoo() 
Example #22
Source File: base_multinomial_cm_v2.py    From pylogit with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def ensure_valid_nums_in_specification_cols(specification, dataframe):
    """
    Checks whether each column in `specification` contains numeric data,
    excluding positive or negative infinity and excluding NaN. Raises
    ValueError if any of the columns do not meet these requirements.

    Parameters
    ----------
    specification : iterable of column headers in `dataframe`.
    dataframe : pandas DataFrame.
        Dataframe containing the data for the choice model to be estimated.

    Returns
    -------
    None.
    """
    problem_cols = []
    for col in specification:
        # The condition below checks for values that are not floats or integers
        # This will catch values that are strings.
        if dataframe[col].dtype.kind not in ['f', 'i', 'u']:
            problem_cols.append(col)
        # The condition below checks for positive or negative inifinity values.
        elif np.isinf(dataframe[col]).any():
            problem_cols.append(col)
        # This condition will check for NaN values.
        elif np.isnan(dataframe[col]).any():
            problem_cols.append(col)

    if problem_cols != []:
        msg = "The following columns contain either +/- inifinity values, "
        msg_2 = "NaN values, or values that are not real numbers "
        msg_3 = "(e.g. strings):\n{}"
        total_msg = msg + msg_2 + msg_3
        raise ValueError(total_msg.format(problem_cols))

    return None 
Example #23
Source File: math.py    From formulas with European Union Public License 1.1 5 votes vote down vote up
def xsrqtpi(number):
    raise_errors(number)
    x = list(flatten(replace_empty(number), None))[0]
    if isinstance(x, bool):
        return Error.errors['#VALUE!']
    with np.errstate(divide='ignore', invalid='ignore'):
        x = np.sqrt(float(x) * np.pi)
    return (np.isnan(x) or np.isinf(x)) and Error.errors['#NUM!'] or x 
Example #24
Source File: diffussion.py    From manifold-diffusion with MIT License 5 votes vote down vote up
def normalize_connection_graph(G):
    W = csr_matrix(G)
    W = W - diags(W.diagonal())
    D = np.array(1./ np.sqrt(W.sum(axis = 1)))
    D[np.isnan(D)] = 0
    D[np.isinf(D)] = 0
    D_mh = diags(D.reshape(-1))
    Wn = D_mh * W * D_mh
    return Wn 
Example #25
Source File: callbacks.py    From astroNN with MIT License 5 votes vote down vote up
def on_batch_end(self, batch, logs=None):
        logs = logs or {}
        loss = logs.get('loss')
        if loss is not None:
            if np.isnan(loss) or np.isinf(loss):
                self.model.stop_training = True
                raise ValueError(f'Batch {int(batch)}: Invalid loss, terminating training') 
Example #26
Source File: symbols.py    From trees with Apache License 2.0 5 votes vote down vote up
def estimate_norm(datas):
    if datas.shape[0] < 2:
        return None, None, 0.0

    mp = np.mean(datas, axis=0)
    sp = np.cov(datas.transpose())

    sign, logdet = np.linalg.slogdet(sp)
    if np.isnan(logdet) or np.isinf(logdet):
        return mp, sp, 0.0

    ent = sign * logdet
    return mp, sp, ent 
Example #27
Source File: entropy.py    From Emotion-Recogniton-from-EEG-Signals with MIT License 5 votes vote down vote up
def corr(data,type_corr):
    C = np.array(data.corr(type_corr))
    C[np.isnan(C)] = 0
    C[np.isinf(C)] = 0
    w,v = np.linalg.eig(C)
    #print(w)
    x = np.sort(w)
    x = np.real(x)
    return x 
Example #28
Source File: functions.py    From tf-pose with Apache License 2.0 5 votes vote down vote up
def siScale(x, minVal=1e-25, allowUnicode=True):
    """
    Return the recommended scale factor and SI prefix string for x.
    
    Example::
    
        siScale(0.0001)   # returns (1e6, 'μ')
        # This indicates that the number 0.0001 is best represented as 0.0001 * 1e6 = 100 μUnits
    """
    
    if isinstance(x, decimal.Decimal):
        x = float(x)
        
    try:
        if np.isnan(x) or np.isinf(x):
            return(1, '')
    except:
        print(x, type(x))
        raise
    if abs(x) < minVal:
        m = 0
        x = 0
    else:
        m = int(np.clip(np.floor(np.log(abs(x))/np.log(1000)), -9.0, 9.0))
    
    if m == 0:
        pref = ''
    elif m < -8 or m > 8:
        pref = 'e%d' % (m*3)
    else:
        if allowUnicode:
            pref = SI_PREFIXES[m+8]
        else:
            pref = SI_PREFIXES_ASCII[m+8]
    p = .001**m
    
    return (p, pref) 
Example #29
Source File: AxisItem.py    From tf-pose with Apache License 2.0 5 votes vote down vote up
def setRange(self, mn, mx):
        """Set the range of values displayed by the axis.
        Usually this is handled automatically by linking the axis to a ViewBox with :func:`linkToView <pyqtgraph.AxisItem.linkToView>`"""
        if any(np.isinf((mn, mx))) or any(np.isnan((mn, mx))):
            raise Exception("Not setting range to [%s, %s]" % (str(mn), str(mx)))
        self.range = [mn, mx]
        if self.autoSIPrefix:
            self.updateAutoSIPrefix()
        self.picture = None
        self.update() 
Example #30
Source File: ColorMapWidget.py    From tf-pose with Apache License 2.0 5 votes vote down vote up
def map(self, data):
        data = data[self.fieldName]
        
        scaled = np.clip((data-self['Min']) / (self['Max']-self['Min']), 0, 1)
        cmap = self.value()
        colors = cmap.map(scaled, mode='float')
        
        mask = np.isnan(data) | np.isinf(data)
        nanColor = self['NaN']
        nanColor = (nanColor.red()/255., nanColor.green()/255., nanColor.blue()/255., nanColor.alpha()/255.)
        colors[mask] = nanColor
        
        return colors