Python numpy.float128() Examples

The following are 30 code examples of numpy.float128(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: estimators_pid.py    From IDTxl with GNU General Public License v3.0 5 votes vote down vote up
def _mi_prob(self, s1_prob, s2_prob, joint_s1_s2_prob):
        """MI estimator in the prob domain."""
        total = np.zeros(1).astype('float128')
        [alph_s1, alph_s2] = np.shape(joint_s1_s2_prob)

        for sym_s1 in range(0, alph_s1):
            for sym_s2 in range(0, alph_s2):

                if (s1_prob[sym_s1] * s2_prob[sym_s2] *
                        joint_s1_s2_prob[sym_s1, sym_s2] > 0):

                    local_contrib = (
                        np.log(joint_s1_s2_prob[sym_s1, sym_s2]) -
                        np.log(s1_prob[sym_s1]) -
                        np.log(s2_prob[sym_s2])
                                    ) / np.log(2)

                    weighted_contrib = (joint_s1_s2_prob[sym_s1, sym_s2] *
                                        local_contrib)
                else:
                    weighted_contrib = 0
                total += weighted_contrib

        return total 
Example #2
Source File: dtype.py    From bifrost with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def name_nbit2numpy(name, nbit):
    if   name == 'i':
        if   nbit == 8:   return np.int8
        elif nbit == 16:  return np.int16
        elif nbit == 32:  return np.int32
        elif nbit == 64:  return np.int64
        else: raise TypeError("Invalid signed integer type size: %i" % nbit)
    elif name == 'u':
        if   nbit == 8:   return np.uint8
        elif nbit == 16:  return np.uint16
        elif nbit == 32:  return np.uint32
        elif nbit == 64:  return np.uint64
        else: raise TypeError("Invalid unsigned integer type size: %i" % nbit)
    elif name == 'f':
        if   nbit == 16:  return np.float16
        elif nbit == 32:  return np.float32
        elif nbit == 64:  return np.float64
        elif nbit == 128: return np.float128
        else: raise TypeError("Invalid floating-point type size: %i" % nbit)
    elif name == 'ci':
        if   nbit == 8:   return ci8
        elif nbit == 16:  return ci16
        elif nbit == 32:  return ci32
    # elif name in set(['ci', 'cu']):
        # Note: This gives integer types in place of proper complex types
        # return name_nbit2numpy(name[1:], nbit*2)
    elif name == 'cf':
        if   nbit == 16:  return cf16
        elif nbit == 32:  return np.complex64
        elif nbit == 64:  return np.complex128
        elif nbit == 128: return np.complex256
        else: raise TypeError("Invalid complex floating-point type size: %i" %
                              nbit)
    else:
        raise TypeError("Invalid type name: " + name) 
Example #3
Source File: utils.py    From hgail with MIT License 5 votes vote down vote up
def softmax(logits, axis=-1):
    shape = logits.shape
    logits = logits.astype(np.float128).reshape(-1, shape[-1])
    x = np.exp(logits - np.max(logits, axis=-1, keepdims=True))
    probs = x / np.sum(x, axis=-1, keepdims=True)
    invalid_idxs = np.where(np.sum(probs, axis=-1, keepdims=True) > 1.)[0]
    probs[invalid_idxs] -= (np.sum(probs[invalid_idxs], axis=-1, keepdims=True) - 1 + 1e-8) / probs.shape[-1]
    probs = probs.astype(np.float64)
    return probs.reshape(shape) 
Example #4
Source File: dtype.py    From bifrost with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def numpy2bifrost(dtype):
    if   dtype == np.int8:       return _bf.BF_DTYPE_I8
    elif dtype == np.int16:      return _bf.BF_DTYPE_I16
    elif dtype == np.int32:      return _bf.BF_DTYPE_I32
    elif dtype == np.uint8:      return _bf.BF_DTYPE_U8
    elif dtype == np.uint16:     return _bf.BF_DTYPE_U16
    elif dtype == np.uint32:     return _bf.BF_DTYPE_U32
    elif dtype == np.float16:    return _bf.BF_DTYPE_F16
    elif dtype == np.float32:    return _bf.BF_DTYPE_F32
    elif dtype == np.float64:    return _bf.BF_DTYPE_F64
    elif dtype == np.float128:   return _bf.BF_DTYPE_F128
    elif dtype == ci8:           return _bf.BF_DTYPE_CI8
    elif dtype == ci16:          return _bf.BF_DTYPE_CI16
    elif dtype == ci32:          return _bf.BF_DTYPE_CI32
    elif dtype == cf16:          return _bf.BF_DTYPE_CF16
    elif dtype == np.complex64:  return _bf.BF_DTYPE_CF32
    elif dtype == np.complex128: return _bf.BF_DTYPE_CF64
    elif dtype == np.complex256: return _bf.BF_DTYPE_CF128
    else: raise ValueError("Unsupported dtype: " + str(dtype)) 
Example #5
Source File: test_conversion_numpy.py    From rpy2 with GNU General Public License v2.0 5 votes vote down vote up
def test_scalar_f128(self):
        f128 = numpy.float128(100.000000003)
        f128_r = conversion.py2rpy(f128)
        f128_test = numpy.array(f128_r)[0]
        assert f128 == f128_test 
Example #6
Source File: test_nanops.py    From predictive-maintenance-using-machine-learning with Apache License 2.0 5 votes vote down vote up
def test_returned_dtype(self):

        dtypes = [np.int16, np.int32, np.int64, np.float32, np.float64]
        if hasattr(np, 'float128'):
            dtypes.append(np.float128)

        for dtype in dtypes:
            s = Series(range(10), dtype=dtype)
            group_a = ['mean', 'std', 'var', 'skew', 'kurt']
            group_b = ['min', 'max']
            for method in group_a + group_b:
                result = getattr(s, method)()
                if is_integer_dtype(dtype) and method in group_a:
                    assert result.dtype == np.float64
                else:
                    assert result.dtype == dtype 
Example #7
Source File: lda_sentenceLayer.py    From topicModelling with GNU General Public License v3.0 5 votes vote down vote up
def get_full_conditional(self, sentence, m, z, n_z, n_m_z):
        prod_nom, prod_den = [] , []
        words = Counter(sentence)
        for key, val in words.iteritems():
            for x in range(val):
                quantity = self.n_z_t[:,key] + self.beta + x
                prod_nom.append(quantity)
#                prod_nom *= (quantity)
        prod_nom = np.array(prod_nom, dtype=np.float128)
        left_denominator = n_z + self.beta*self.V
        for x in range(len(sentence)):
            quantity = left_denominator + x
            prod_den.append(quantity)
#            prod_den *= (quantity)
        prod_den = np.array(prod_den, dtype=np.float128)
#        print "Shapes of interest:", prod_den.shape, prod_nom.shape
        prodall1 = np.divide(prod_nom,prod_den)
#        print "After division:", prodall.shape
        prodall = np.prod(prodall1, axis=0)
#        print "After multiplication", prodall.shape
#        prod_nom = np.prod(prod_nom, axis=0, dtype=np.float128)
#        prod_den = np.prod(prod_den, axis=0, dtype=np.float128)

#        left = prod_nom/prod_den
        right = (n_m_z[m,:] + self.alpha)
        p_z = prodall*right
#        try:    
#            p_z /= np.sum(p_z)
#        except:
#            print p_z
#            print prodall1
#            print prodall
        p_z /= np.sum(p_z)
#        except RuntimeWarning:
#            print 'Exception'
#            print prodall 
#            print right
#            print self.n_z_t[:,key]
        return p_z.astype(np.float64) 
Example #8
Source File: DataIOFactory.py    From Predicting-Health-Insurance-Cost with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convertDatatoFloat(data,isMatrix):
    if(isMatrix):
        dataMatrixAsfloat = [ [ np.float128(eachVal) for eachVal in row ] for row in data]
        return np.array(dataMatrixAsfloat) 
    else:
        dataListAsfloat = [ np.float128(eachVal) for eachVal in data]
        return dataListAsfloat 
Example #9
Source File: preprocessing.py    From skl-groups with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fit(self, X, y=None):
        """Compute the minimum and maximum to be used for later scaling.

        Parameters
        ----------
        X : array-like, shape [n_samples, n_features]
            The data used to compute the per-feature minimum and maximum
            used for later scaling along the features axis.
        """
        X = check_array(X, copy=self.copy,
                        dtype=[np.float64, np.float32, np.float16, np.float128])

        feature_range = self.feature_range
        if feature_range[0] >= feature_range[1]:
            raise ValueError("Minimum of desired feature range must be smaller"
                             " than maximum. Got %s." % str(feature_range))
        if self.fit_feature_range is not None:
            fit_feature_range = self.fit_feature_range
            if fit_feature_range[0] >= fit_feature_range[1]:
                raise ValueError("Minimum of desired (fit) feature range must "
                                 "be smaller than maximum. Got %s."
                                 % str(feature_range))
            if (fit_feature_range[0] < feature_range[0] or
                    fit_feature_range[1] > feature_range[1]):
                raise ValueError("fit_feature_range must be a subset of "
                                 "feature_range. Got %s, fit %s."
                                 % (str(feature_range),
                                    str(fit_feature_range)))
            feature_range = fit_feature_range

        data_min = np.min(X, axis=0)
        data_range = np.max(X, axis=0) - data_min
        # Do not scale constant features
        data_range[data_range == 0.0] = 1.0
        self.scale_ = (feature_range[1] - feature_range[0]) / data_range
        self.min_ = feature_range[0] - data_min * self.scale_
        self.data_range = data_range
        self.data_min = data_min
        return self 
Example #10
Source File: approximate_equality_protocol_test.py    From Cirq with Apache License 2.0 5 votes vote down vote up
def test_numpy_dtype_compatibility():
    i_a, i_b, i_c = 0, 1, 2
    i_types = [np.intc, np.intp, np.int0, np.int8, np.int16, np.int32, np.int64]
    for i_type in i_types:
        assert cirq.approx_eq(i_type(i_a), i_type(i_b), atol=1)
        assert not cirq.approx_eq(i_type(i_a), i_type(i_c), atol=1)
    u_types = [np.uint, np.uint0, np.uint8, np.uint16, np.uint32, np.uint64]
    for u_type in u_types:
        assert cirq.approx_eq(u_type(i_a), u_type(i_b), atol=1)
        assert not cirq.approx_eq(u_type(i_a), u_type(i_c), atol=1)

    f_a, f_b, f_c = 0, 1e-8, 1
    f_types = [np.float16, np.float32, np.float64]
    if hasattr(np, 'float128'):
        f_types.append(np.float128)
    for f_type in f_types:
        assert cirq.approx_eq(f_type(f_a), f_type(f_b), atol=1e-8)
        assert not cirq.approx_eq(f_type(f_a), f_type(f_c), atol=1e-8)

    c_a, c_b, c_c = 0, 1e-8j, 1j
    c_types = [np.complex64, np.complex128]
    if hasattr(np, 'complex256'):
        c_types.append(np.complex256)
    for c_type in c_types:
        assert cirq.approx_eq(c_type(c_a), c_type(c_b), atol=1e-8)
        assert not cirq.approx_eq(c_type(c_a), c_type(c_c), atol=1e-8) 
Example #11
Source File: estimators_fast_pid_ext_rep.py    From IDTxl with GNU General Public License v3.0 5 votes vote down vote up
def _mi_prob(s1_prob, s2_prob, joint_s1_s2_prob):
    """
    MI estimator in the prob domain
    """
    total = np.zeros(1).astype('float128')

    [alph_s1, alph_s2] = np.shape(joint_s1_s2_prob)

    for sym_s1 in range(0, alph_s1):
        for sym_s2 in range(0, alph_s2):

#            print(sym_s1, '\t', sym_s2, '\t', s1_prob[sym_s1], '\t', s2_prob[sym_s2], '\t', joint_s1_s2_prob[sym_s1, sym_s2])

            if ( s1_prob[sym_s1] * s2_prob[sym_s2]
                 * joint_s1_s2_prob[sym_s1, sym_s2] > 0 ):

                local_contrib = (
                    np.log(joint_s1_s2_prob[sym_s1, sym_s2])
                    - np.log(s1_prob[sym_s1])
                    - np.log(s2_prob[sym_s2])
                    ) / np.log(2)

                weighted_contrib = (
                    joint_s1_s2_prob[sym_s1, sym_s2]
                    * local_contrib)
            else:
                weighted_contrib = 0
            total += weighted_contrib

    return total 
Example #12
Source File: estimators_fast_pid_ext_rep.py    From IDTxl with GNU General Public License v3.0 5 votes vote down vote up
def _joint_mi(s1, s2, t, alph_s1, alph_s2, alph_t):
    """
    Joint MI estimator in the samples domain
    """

    [s12, alph_s12] = _join_variables(s1, s2, alph_s1, alph_s2)

    t_count = np.zeros(alph_t, dtype=np.int)
    s12_count = np.zeros(alph_s12, dtype=np.int)
    joint_t_s12_count = np.zeros((alph_t, alph_s12), dtype=np.int)

    num_samples = len(t)

    for obs in range(0, num_samples):
        t_count[t[obs]] += 1
        s12_count[s12[obs]] += 1
        joint_t_s12_count[t[obs], s12[obs]] += 1

    t_prob = np.divide(t_count, num_samples).astype('float128')
    s12_prob = np.divide(s12_count, num_samples).astype('float128')
    joint_t_s12_prob = np.divide(joint_t_s12_count, num_samples).astype('float128')

    jmi = _mi_prob(t_prob, s12_prob, joint_t_s12_prob)

    return jmi 
Example #13
Source File: estimators_fast_pid.py    From IDTxl with GNU General Public License v3.0 5 votes vote down vote up
def _cmi_prob(s2cond_prob, joint_t_s2cond_prob, joint_s1_s2cond_prob,
              joint_t_s1_s2cond_prob):
    """Calculate probabilities for CMI estimation."""
    total = np.zeros(1).astype('float128')

    [alph_t, alph_s1, alph_s2cond] = np.shape(joint_t_s1_s2cond_prob)

    for sym_s1 in range(0, alph_s1):
        for sym_s2cond in range(0, alph_s2cond):
            for sym_t in range(0, alph_t):

                if (s2cond_prob[sym_s2cond] *
                        joint_t_s2cond_prob[sym_t, sym_s2cond] *
                        joint_s1_s2cond_prob[sym_s1, sym_s2cond] *
                        joint_t_s1_s2cond_prob[sym_t, sym_s1, sym_s2cond] > 0):

                    local_contrib = (
                        np.log(joint_t_s1_s2cond_prob[sym_t, sym_s1,
                                                      sym_s2cond]) +
                        np.log(s2cond_prob[sym_s2cond]) -
                        np.log(joint_t_s2cond_prob[sym_t, sym_s2cond]) -
                        np.log(joint_s1_s2cond_prob[sym_s1, sym_s2cond])
                        ) / np.log(2)

                    weighted_contrib = (
                        joint_t_s1_s2cond_prob[sym_t, sym_s1, sym_s2cond] *
                        local_contrib)
                else:
                    weighted_contrib = 0
                total += weighted_contrib
    return total 
Example #14
Source File: estimators_fast_pid.py    From IDTxl with GNU General Public License v3.0 5 votes vote down vote up
def _mi_prob(s1_prob, s2_prob, joint_s1_s2_prob):
    """ MI estimator in the prob domain."""
    total = np.zeros(1).astype('float128')

    [alph_s1, alph_s2] = np.shape(joint_s1_s2_prob)

    for sym_s1 in range(0, alph_s1):
        for sym_s2 in range(0, alph_s2):

            if (s1_prob[sym_s1] * s2_prob[sym_s2] *
                    joint_s1_s2_prob[sym_s1, sym_s2] > 0):

                local_contrib = (
                    np.log(joint_s1_s2_prob[sym_s1, sym_s2]) -
                    np.log(s1_prob[sym_s1]) -
                    np.log(s2_prob[sym_s2])) / np.log(2)

                weighted_contrib = (
                    joint_s1_s2_prob[sym_s1, sym_s2] *
                    local_contrib)
            else:
                weighted_contrib = 0
            total += weighted_contrib

    return total 
Example #15
Source File: estimators_pid.py    From IDTxl with GNU General Public License v3.0 5 votes vote down vote up
def _cmi_prob(self, s2cond_prob, joint_t_s2cond_prob,
                  joint_s1_s2cond_prob, joint_t_s1_s2cond_prob):
        total = np.zeros(1).astype('float128')

        [alph_t, alph_s1, alph_s2cond] = np.shape(joint_t_s1_s2cond_prob)

        for sym_s1 in range(0, alph_s1):
            for sym_s2cond in range(0, alph_s2cond):
                for sym_t in range(0, alph_t):

                    if (s2cond_prob[sym_s2cond] *
                            joint_t_s2cond_prob[sym_t, sym_s2cond] *
                            joint_s1_s2cond_prob[sym_s1, sym_s2cond] *
                            joint_t_s1_s2cond_prob[sym_t, sym_s1, sym_s2cond] >
                            0):

                        local_contrib = (
                               np.log(joint_t_s1_s2cond_prob[sym_t, sym_s1,
                                                             sym_s2cond]) +
                               np.log(s2cond_prob[sym_s2cond]) -
                               np.log(joint_t_s2cond_prob[sym_t, sym_s2cond]) -
                               np.log(joint_s1_s2cond_prob[sym_s1, sym_s2cond])
                                       ) / np.log(2)

                        weighted_contrib = (
                            joint_t_s1_s2cond_prob[sym_t, sym_s1, sym_s2cond] *
                            local_contrib)
                    else:
                        weighted_contrib = 0
                    total += weighted_contrib

        return total 
Example #16
Source File: tools.py    From PINTO_model_zoo with MIT License 5 votes vote down vote up
def sigmoid(arr):
    """
    对数组arr中的每个元素执行sigmoid计算
    :param arr: 任意shape的数组
    :return: sigmoid后的数组
    """
    arr = np.array(arr, dtype=np.float128)
    return 1.0 / (1.0 + np.exp(-1.0 * arr)) 
Example #17
Source File: estimators_pid.py    From IDTxl with GNU General Public License v3.0 5 votes vote down vote up
def _joint_mi(self, s1, s2, t, alph_s1, alph_s2, alph_t):
        """Joint MI estimator in the samples domain."""

        [s12, alph_s12] = _join_variables(s1, s2, alph_s1, alph_s2)

        t_count = np.zeros(alph_t, dtype=np.int)
        s12_count = np.zeros(alph_s12, dtype=np.int)
        joint_t_s12_count = np.zeros((alph_t, alph_s12), dtype=np.int)

        num_samples = len(t)

        for obs in range(0, num_samples):
            t_count[t[obs]] += 1
            s12_count[s12[obs]] += 1
            joint_t_s12_count[t[obs], s12[obs]] += 1

        t_prob = np.divide(t_count, num_samples).astype('float128')
        s12_prob = np.divide(s12_count, num_samples).astype('float128')
        joint_t_s12_prob = np.divide(joint_t_s12_count,
                                     num_samples).astype('float128')

        return self._mi_prob(t_prob, s12_prob, joint_t_s12_prob) 
Example #18
Source File: test_hash.py    From ubelt with Apache License 2.0 5 votes vote down vote up
def test_numpy_float():
    if np is None:
        pytest.skip('requires numpy')
    assert hash_sequence(np.float16(3.0)) == hash_sequence(3.0)
    assert hash_sequence(np.float32(3.0)) == hash_sequence(3.0)
    assert hash_sequence(np.float64(3.0)) == hash_sequence(3.0)
    try:
        assert hash_sequence(np.float128(3.0)) == hash_sequence(3.0)
    except AttributeError:
        pass 
Example #19
Source File: util.py    From qtpandas with MIT License 5 votes vote down vote up
def getCsvData():
    dtypes = {
        "int8_value": numpy.int8,
        "int16_value": numpy.int16,
        "int32_value": numpy.int32,
        # "int64_value": numpy.int64, # OverFlowError
        "uint8_value": numpy.uint8,
        "uint16_value": numpy.uint16,
        "uint32_value": numpy.uint32,
        # "uint64_value": numpy.uint64, # OverFlowError
        "float16_value": numpy.float16,
        "float32_value": numpy.float32,
        "float64_value": numpy.float64,
        # "float128_value": numpy.float128,
        "bool_value": numpy.bool_
    }
    delimiter = ","
    encoding = "utf-8"
    parse_dates = ["timestamp_value"]

    path = os.path.join(os.getcwdu(), "examples/testData/test1.csv")
    if not os.path.exists(path):
        path = os.path.join(os.getcwdu(), "testData/test1.csv")

    df = pandas.read_csv(
        path,
        dtype=dtypes,
        delimiter=delimiter,
        encoding=encoding,
        parse_dates=parse_dates
    )

    try:
        df["int64_value"] = df["int64_value"].astype(numpy.int64)
        df["uint64_value"] = df["uint64_value"].astype(numpy.uint64)
    except:
        raise

    return df 
Example #20
Source File: NestedSampling.py    From cpnest with MIT License 5 votes vote down vote up
def get_worst_n_live_points(self, n):
        """
        selects the lowest likelihood N live points
        for evolution
        """
        self.params.sort(key=attrgetter('logL'))
        self.worst = np.arange(n)
        self.logLmin.value = np.float128(self.params[n-1].logL)
        return np.float128(self.logLmin.value) 
Example #21
Source File: fusor_utils.py    From SVE with GNU General Public License v3.0 5 votes vote down vote up
def pooled_distance(M,mode='j'):
    D,NN = {},{}
    for t in M:
        D[t],NN[t] = {},{}
        for b in M[t]:
            D[t][b],NN[t][b] = {},{}
            #get the combintaion distances
            for g in M[t][b]:
                D[t][b][g] = np.float128(1.0)  #default is the  max distance
                if mode=='j' and np.float128(M[t][b][g][1]) > 0.0:
                    #1-(|I|/|U|)
                    D[t][b][g] = np.float128(1.0)-np.float128(M[t][b][g][0])/np.float128(M[t][b][g][1])
                if mode=='u' and np.float128(M[t][b][g][1]) > 0.0 and np.float128(M[t][b][g][2]) > 0.0:
                    #1-2*((|I|/|U|)*(|D1|/(|D1|+|D2|))/((|I|/|U|)+(|D1|/(|D1|+|D2|))
                    j = np.float128(M[t][b][g][0])/np.float128(M[t][b][g][1])
                    u = np.float128(M[t][b][g][2])/(np.float128(M[t][b][g][2])+np.float128(M[t][b][g][3]))
                    D[t][b][g] = np.float128(1.0)-np.float128(2.0)*(j*u)/(j+u)
            K = sorted(list(set([v for w in M[t][b] for v in w])))        
            for k in K:
                N = []
                for i,j in sorted(M[t][b].keys()): #distance then key
                    if k==i: N += [[D[t][b][(i,j)],j]]
                    if k==j: N += [[D[t][b][(i,j)],i]]
                NN[t][b][k] = sorted(N,key=lambda x: x[0])
    return D,NN

#given prior and new data smooth based on the magnitude
#of observations for the features for each pair 
Example #22
Source File: fusor_utils.py    From SVE with GNU General Public License v3.0 5 votes vote down vote up
def impute_true(A,k):
    I = {}
    for t in A:
        I[t] = {}
        for b in A[t]:
            I[t][b] = {}
            for g in A[t][b]:
                I[t][b][g] = [np.uint64(0),np.uint64(0),np.uint64(0),np.uint64(0)]
                for i in range(4):
                    I[t][b][g][i] = np.float128(np.mean(A[t][b][g][np.where(A[t][b][g][:,0]!=k),i+1])+1)
    return I 
Example #23
Source File: fusor_utils.py    From SVE with GNU General Public License v3.0 5 votes vote down vote up
def get_pwdm(D,G):
    P = {}
    for t in D:
        P[t] = {}
        for b in D[t]:
            P[t][b] = np.zeros((len(G),len(G)),dtype=np.float128)
            for i,j in it.combinations(range(len(G)),2):
                if D[t][b].has_key((G[i],G[j])):
                    P[t][b][i][j] = P[t][b][j][i] = D[t][b][(G[i],G[j])]
                elif D[t][b].has_key((G[j],G[i])):
                    P[t][b][i][j] = P[t][b][j][i] = D[t][b][(G[j],G[i])]
                else: #no key for (i,j) is missing
                    P[t][b][i][j] = P[t][b][j][i] = np.float128(1.0)
    return P 
Example #24
Source File: fusor_utils.py    From SVE with GNU General Public License v3.0 5 votes vote down vote up
def sum_pairs(P):
    S = {}
    for t in P:
        S[t] = {}
        for b in P[t]:
            S[t][b] = []
            for row in P[t][b]:
                S[t][b] += [np.sum(row)/np.float128(len(row)-1)]
    return S 
Example #25
Source File: fusor_utils.py    From SVE with GNU General Public License v3.0 5 votes vote down vote up
def exp_stats(e,e_post,a):
    if a < np.float128(1.0):
        H,H_post = exp_hist(e),exp_hist(e_post)
        upper,lower,value = 0,0,np.float128(1.0)
        for row in H:
            if row[0] >= a: upper += int(row[1])
        for row in H_post:
            lower += int(row[1])
            if lower >= upper:
                value = row[0]
                break
        h = np.array([e[g] for g in e])
        x = [a,value,len(e)-upper,upper,np.median(h),np.mean(h),np.std(h)]
    else:
        x = [a,a,len(e),0,np.float128(0.0),np.float128(0.0),np.float128(0.0)]
    return x
      
#given the old group expectation E and cutoff t,b value alpha
#estimates a alpha_post value that uses the histogram of the
#posterior estimate E_post to adjust the alpha value 
Example #26
Source File: fusor_utils.py    From SVE with GNU General Public License v3.0 5 votes vote down vote up
def select_groups(W,gamma=0.0):
    G = {}
    for t in W:
        G[t] = {}
        for b in W[t]:
            C,G[t][b] = [],{}
            for g in W[t][b]:
                if len(g)<=1 and W[t][b][g]>=gamma: C += [g[0]]
            if len(C)<=0:
                G[t][b] = {(None,):np.float128(0.0)}
            else:
                C = sorted(C)
                for i in range(1,len(C)+1):
                    for j in it.combinations(C,i):
                        G[t][b][j] = W[t][b][j]
    return G
                
#pppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppp  
#given a svul, join idxs from list j
################################################################################
#given a svul, join idxs from list j 
Example #27
Source File: fusor_utils.py    From SVE with GNU General Public License v3.0 5 votes vote down vote up
def get_weight(A,E,average=0):
    if E.has_key((None,)): #default average weighting when no target is available
        if average>0:        #to do is to connect the independant expectations here
            c = np.float128(1.0)
            n = c/np.float128(average)
            w  = c-np.prod([c-n*np.float128((A[g][A[g].keys()[0]][1])) for g in A])
        else:
            w = np.float128(0.0) #0.0
    else:
        w = E[tuple(sorted([a[0] for a in A]))]
    return w
    
#active edges A, next vertext edges B
#clear off terminal edges of A and add B 
Example #28
Source File: fusor_utils.py    From SVE with GNU General Public License v3.0 5 votes vote down vote up
def target_filter_cuttoff_static(B,static_filter):
    alpha = {}
    for t in B:
        alpha[t] = {}
        for b in B[t]:
            alpha[t][b] = np.float128(static_filter)
    return alpha

#:::TO DO::: do the search but not inside each bin 
Example #29
Source File: fusor_utils.py    From SVE with GNU General Public License v3.0 5 votes vote down vote up
def target_filter_cuttoff_static_exhaustive(A,E,T):
    alpha = {}
    for t in A:
        alpha[t] = {}
        for b in A[t]:
            alpha[t][b] = np.float128(1.0)
    return alpha

#when gamm = 0.0 will search all combinations 
Example #30
Source File: fusor_utils.py    From SVE with GNU General Public License v3.0 5 votes vote down vote up
def target_filter_cutoff_exhaustive(A,E,T):
    alpha = {}
    for t in A:
        alpha[t] = {}
        for b in A[t]:
            alpha[t][b] = np.float128(1.0)
            J = {k:[np.uint64(0),np.uint64(0)] for k in sorted(list(set([E[t][b][z] for z in E[t][b]])))}
            if len(J)>1:
                for s in A[t][b]:
                    for k in J:
                        if T[t][b].has_key(s):
                            N = fu.feature_magnitudes(T[t][b][s],filter_single_bin(A[t][b][s],k))
                            J[k][0],J[k][1] = J[k][0]+np.uint64(N[0]),J[k][1]+np.uint64(N[1])
                for k in J:
                    if J[k][1]>0.0: J[k] = float(np.float128(J[k][0])/np.float128(J[k][1]))
                    else:           J[k] = 0.0 
                alpha[t][b] = sorted([[k,J[k]] for k in J],key=lambda x: x[1], reverse=True)[0][0]
            elif len(J)==1 and J.keys()[0] > 0.0:
                for s in A[t][b]:
                    for k in J:
                        if T[t][b].has_key(s):
                            N = fu.feature_magnitudes(T[t][b][s],filter_single_bin(A[t][b][s],k))
                            J[k][0],J[k][1] = J[k][0]+np.uint64(N[0]),J[k][1]+np.uint64(N[1])
                for k in J:
                    if J[k][1]>0.0: J[k] = float(np.float128(J[k][0])/np.float128(J[k][1]))
                    else:           J[k] = 0.0 
                alpha[t][b] = sorted([[k,J[k]] for k in J],key=lambda x: x[1], reverse=True)[0][0]
    return alpha

#when gamm = 0.0 will search all combinations