Python math.nan() Examples

The following are code examples for showing how to use math.nan(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: RTX   Author: RTXteam   File: QueryPubMedNGD.py    MIT License 6 votes vote down vote up
def normalized_google_distance(mesh1_str, mesh2_str):
        '''returns the normalized Google distance for two MeSH terms
        
        :returns: NGD, as a float (or math.nan if any counts are zero, or None if HTTP error)
        '''
        nij = QueryPubMedNGD.get_pubmed_hits_count('("{mesh1}"[MeSH Terms]) AND "{mesh2}"[MeSH Terms]'.format(mesh1=mesh1_str,
                                                                                               mesh2=mesh2_str))
        N = 2.7e+7 * 20 # from PubMed home page there are 27 million articles; avg 20 MeSH terms per article
        ni = QueryPubMedNGD.get_pubmed_hits_count('"{mesh1}"[MeSH Terms]'.format(mesh1=mesh1_str))
        nj = QueryPubMedNGD.get_pubmed_hits_count('"{mesh2}"[MeSH Terms]'.format(mesh2=mesh2_str))
        if ni == 0 or nj == 0 or nij == 0:
            return math.nan
        numerator = max(math.log(ni), math.log(nj)) - math.log(nij)
        denominator = math.log(N) - min(math.log(ni), math.log(nj))
        ngd = numerator/denominator
        return ngd 
Example 2
Project: OctoBot-Trading   Author: Drakkar-Software   File: ticker_manager.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def reset_ticker(self):
        self.ticker = {
            ExchangeConstantsTickersColumns.ASK: nan,
            ExchangeConstantsTickersColumns.ASK_VOLUME: nan,
            ExchangeConstantsTickersColumns.BID: nan,
            ExchangeConstantsTickersColumns.BID_VOLUME: nan,
            ExchangeConstantsTickersColumns.OPEN: nan,
            ExchangeConstantsTickersColumns.LOW: nan,
            ExchangeConstantsTickersColumns.HIGH: nan,
            ExchangeConstantsTickersColumns.CLOSE: nan,
            ExchangeConstantsTickersColumns.LAST: nan,
            ExchangeConstantsTickersColumns.AVERAGE: nan,
            ExchangeConstantsTickersColumns.SYMBOL: nan,
            ExchangeConstantsTickersColumns.QUOTE_VOLUME: nan,
            ExchangeConstantsTickersColumns.TIMESTAMP: 0,
            ExchangeConstantsTickersColumns.VWAP: nan
        } 
Example 3
Project: arbc-solidity   Author: OffchainLabs   File: floatlib.py    Apache License 2.0 6 votes vote down vote up
def toPythonFloat(vm):  # call from Python emulator only; will weird out the compiler
    unpack(vm)
    f = vm.stack[0]
    vm.pop()
    if f[4] != 0:
        return math.nan
    if f[3] != 0:
        if f[0] == 0:
            return math.inf
        else:
            return -math.inf

    floatval = 1 + f[1]/(2**_mantbits)
    expToUse = f[2]+_bias
    if expToUse < 0:
        floatval /= (2**(-expToUse))
    else:
        floatval *= (2*expToUse)
    if f[0] != 0:
        floatval = -floatval
    return floatval


# unpacks the low-order 64 bits of the (int) input into an _unpackedFloat 
Example 4
Project: OctoBot-Tentacles   Author: Drakkar-Software   File: pattern_analysis.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def get_pattern(data):
        if len(data) > 0:
            mean_value = np.mean(data) * 0.7
        else:
            mean_value = math.nan
        if math.isnan(mean_value):
            return PatternAnalyser.UNKNOWN_PATTERN
        indexes_under_mean_value = np.where(data > mean_value)[0] \
            if mean_value < 0 \
            else np.where(data < mean_value)[0]

        nb_gaps = 0
        for i in range(len(indexes_under_mean_value)-1):
            if indexes_under_mean_value[i+1]-indexes_under_mean_value[i] > 3:
                nb_gaps += 1

        if nb_gaps > 1:
            return "W" if mean_value < 0 else "M"
        else:
            return "V" if mean_value < 0 else "N"

    # returns a value 0 < value < 1: the higher the stronger is the pattern 
Example 5
Project: TheVentureCity   Author: dksmith01   File: growth_accounting.py    GNU General Public License v3.0 6 votes vote down vote up
def calc_user_qr(row, new_col = 'new', res_col = 'resurrected', churned_col = 'churned'):
    new_users = row[new_col] if hasattr(row, new_col) and pd.notnull(row[new_col]) else 0
    res_users = row[res_col] if hasattr(row, res_col) and pd.notnull(row[res_col]) else 0
    churned_users = row[churned_col] if hasattr(row, churned_col) and pd.notnull(row[churned_col]) else 0
    if churned_users < 0:
        user_qr = -1 * (new_users + res_users) / churned_users
    else:
        user_qr = math.nan
        
    return user_qr




### This takes one or more rows of growth accounting figures for a specific
### date and calculates the user quick ratio
### It is used when calculating rolling quick ratio 
Example 6
Project: TheVentureCity   Author: dksmith01   File: growth_accounting.py    GNU General Public License v3.0 6 votes vote down vote up
def calc_rev_qr(row, new_col = 'new', res_col = 'resurrected', 
                churned_col = 'churned', exp_col = 'expansion', 
                con_col = 'contraction'):
    new_rev = row[new_col] if hasattr(row, new_col) and pd.notnull(row[new_col]) else 0
    res_rev = row[res_col] if hasattr(row, res_col) and pd.notnull(row[res_col]) else 0
    churned_rev = row[churned_col] if hasattr(row, churned_col) and pd.notnull(row[churned_col]) else 0
    expansion_rev = row[exp_col] if hasattr(row, exp_col) and pd.notnull(row[exp_col]) else 0
    contraction_rev = row[con_col] if hasattr(row, con_col) and pd.notnull(row[con_col]) else 0
    if churned_rev + contraction_rev < 0:
        rev_qr = -1 * (new_rev + res_rev + expansion_rev) / (churned_rev + contraction_rev)
    else:
        rev_qr = math.nan
    return rev_qr




### This takes a dataframe of transactions grouped by a particular date period
### and returns active, retained, new, resurrected, and churned users for that
### time period 
Example 7
Project: graphql-core-next   Author: graphql-python   File: test_is_invalid.py    MIT License 6 votes vote down vote up
def describe_is_invalid():
    def null_is_not_invalid():
        assert is_invalid(None) is False

    def falsy_objects_are_not_invalid():
        assert is_invalid("") is False
        assert is_invalid(0) is False
        assert is_invalid([]) is False
        assert is_invalid({}) is False

    def truthy_objects_are_not_invalid():
        assert is_invalid("str") is False
        assert is_invalid(1) is False
        assert is_invalid([0]) is False
        assert is_invalid({None: None}) is False

    def inf_is_not_invalid():
        assert is_invalid(inf) is False
        assert is_invalid(-inf) is False

    def undefined_is_invalid():
        assert is_invalid(INVALID) is True

    def nan_is_invalid():
        assert is_invalid(nan) is True 
Example 8
Project: quilt   Author: Richienb   File: __init__.py    Apache License 2.0 6 votes vote down vote up
def constant(constanttype):
    """
    Get A Constant
    """
    constanttype = constanttype.lower()
    if constanttype == 'pi':
        return math.pi
    elif constanttype == 'e':
        return math.e
    elif constanttype == 'tau':
        return math.tau
    elif constanttype == 'inf':
        return math.inf
    elif constanttype == 'nan':
        return math.nan
    elif constanttype in ['phi', 'golden']:
        return (1 + 5**0.5) / 2 
Example 9
Project: combine-FEVER-NSMN   Author: easonnie   File: doc_utils.py    MIT License 6 votes vote down vote up
def doc_f1(cls, d_list):

        def single_f1(item):
            docid_predicted = item['predicted_docids']
            docid_predicted = set(docid_predicted)
            docid_gt =  [iii for i in item['evidence'] \
                             for ii in i \
                             for iii in ii \
                             if type(iii) == str]
            docid_gt = set(docid_gt)
            docid_intersect = docid_predicted & docid_gt

            if len(docid_gt) == 0:
                return math.nan
            f1 = 2*len(docid_intersect) / (len(docid_gt) + len(docid_predicted))
            return f1

        score_list = map(single_f1, d_list)
        score_list = [s for s in score_list if not math.isnan(s)]
        return sum(score_list) / len(score_list) 
Example 10
Project: recommandation-film   Author: sambiak   File: utilisateur.py    GNU General Public License v3.0 6 votes vote down vote up
def reccomandation(self, x):
        """"""
        y = np.array([[math.nan] * nombre_films()])
        for key in self.films:
            y[0][self.conv.renvoyer_index(key)] = self.films[key]
        max_i = 0
        n_max = 0
        t = np.dot(x, self._theta.T)
        print(t)
        for i, el in enumerate(y[0]):
            if np.isnan(el) and t[i, 0] > n_max:
                print("film : ", self.conv.renvoyer_nom_index(i), "note :", n_max)
                n_max = t[i, 0]
                max_i = i
        print(t)
        print(self._theta)
        return self.conv.renvoyer_nom_index(max_i) 
Example 11
Project: recommandation-film   Author: sambiak   File: utilisateur.py    GNU General Public License v3.0 6 votes vote down vote up
def reccomandation(self, x):
        """"""
        y = np.array([[math.nan] * nombre_films()])
        for key in self.films:
            y[0][self.conv.renvoyer_index(key)] = self.films[key]
        max_i = 0
        n_max = 0
        t = np.dot(x, self._theta.T)
        print(t)
        for i, el in enumerate(y[0]):
            if np.isnan(el) and t[i, 0] > n_max:
                print("film : ", self.conv.renvoyer_nom_index(i), "note :", n_max)
                n_max = t[i, 0]
                max_i = i
        print(t)
        print(self._theta)
        return self.conv.renvoyer_nom_index(max_i) 
Example 12
Project: aetherling   Author: David-Durst   File: pnr.py    MIT License 6 votes vote down vote up
def percent_vs_base(results_pd, column_name, index_of_p_1_row):
    #others = pd.to_numeric(other_results[column_name], errors='coerce')
    #base = pd.to_numeric(result_pd[column_name], errors='coerce')
    #results_pd[column_name + '_diff'] = pd.to_numeric(results_pd.loc[:,column_name], errors='coerse') - \
    #                                    results_pd.at[index_of_p_1_row, column_name]
    p_1_value = results_pd[column_name].iloc[index_of_p_1_row]
    def get_ratio(num):
        if num == "\\red{X}" or str(num) == "nan" or p_1_value == "\\red{X}" or str(p_1_value) == "nan" or \
                num == "0" or p_1_value == "0":
            return ""
        else:
            return "(" + str(round((float(num) / float(p_1_value)), 2)) + ")"
    results_pd[column_name + "ratio"] = results_pd[column_name].apply(get_ratio)
    return results_pd
    #return others.apply(int_if_not_nan)# ((others - ae) / ae).apply(int_if_not_nan)
    #return other_results[column_name].apply(int_if_not_nan) #others.apply(int_if_not_nan)# ((others - ae) / ae).apply(int_if_not_nan) 
Example 13
Project: yTermPlayer   Author: TimeTraveller-San   File: music_api.py    GNU General Public License v3.0 6 votes vote down vote up
def get_next_index(self):
        try:
            assert isinstance(self.index, int), "invalid index"
        except AssertionError:
            self.index = 0
        if(self._random):
            self.next_index = randint(1, int(self.queue_len) - 1)
            return int(self.next_index)
        self.index = int(self.index)
        #repeat playlist
        if(self.repeat_mode == 3):
            if(self.index == self.queue_len - 1):
                self.next_index = 0
            else:
                self.next_index = self.index + 1
        #repeat single song
        elif(self.repeat_mode == 2):
            self.next_index = self.index
        #no repeat mode
        else:
            if(self.index == self.queue_len - 1):
                self.next_index = math.nan
            else:
                self.next_index = self.index + 1
        return self.next_index 
Example 14
Project: hail   Author: hail-is   File: test_expr.py    MIT License 6 votes vote down vote up
def test_comparisons(self):
        f0 = hl.float(0.0)
        fnull = hl.null(tfloat)
        finf = hl.float(float('inf'))
        fnan = hl.float(float('nan'))

        self.check_expr(f0 == fnull, None, tbool)
        self.check_expr(f0 < fnull, None, tbool)
        self.check_expr(f0 != fnull, None, tbool)

        self.check_expr(fnan == fnan, False, tbool)
        self.check_expr(f0 == f0, True, tbool)
        self.check_expr(finf == finf, True, tbool)

        self.check_expr(f0 < finf, True, tbool)
        self.check_expr(f0 > finf, False, tbool)

        self.check_expr(fnan <= finf, False, tbool)
        self.check_expr(fnan >= finf, False, tbool) 
Example 15
Project: hail   Author: hail-is   File: test_expr.py    MIT License 6 votes vote down vote up
def test_nan_inf_checks(self):
        finite = 0
        infinite = float('inf')
        nan = math.nan
        na = hl.null('float64')

        assert hl.eval(hl.is_finite(finite)) == True
        assert hl.eval(hl.is_finite(infinite)) == False
        assert hl.eval(hl.is_finite(nan)) == False
        assert hl.eval(hl.is_finite(na)) == None

        assert hl.eval(hl.is_infinite(finite)) == False
        assert hl.eval(hl.is_infinite(infinite)) == True
        assert hl.eval(hl.is_infinite(nan)) == False
        assert hl.eval(hl.is_infinite(na)) == None

        assert hl.eval(hl.is_nan(finite)) == False
        assert hl.eval(hl.is_nan(infinite)) == False
        assert hl.eval(hl.is_nan(nan)) == True
        assert hl.eval(hl.is_nan(na)) == None 
Example 16
Project: otone_frontend   Author: Opentrons   File: pipette.py    Apache License 2.0 6 votes vote down vote up
def calibrate(self, property_, value):
        """Set a given pipette property to a value
        """
       #ToDo: probably need to utilize None instead of math.nan here
        if (value != None and (property_=='top' or property_=='bottom' or property_=='blowout' or property_=='droptip')):
            # if it's a top or blowout value, save it
            self.__dict__[property_] = value  
        elif (value != None and (property_=='tip_racks')):
            self.tip_racks.extend(value)
            #if isinstance(value, list):
            #    self.tip_racks.extend(value)
            logger.debug('new tip-racks: {}'.format(self.tip_racks))
        elif (value != None and (property_=='trash_container')):
            self.trash_container = []
            self.trash_container.extend(value)
            logger.debug('new trash_container: {}'.format(self.trash_container)) 
Example 17
Project: elektronn3   Author: ELEKTRONN   File: trainer.py    MIT License 6 votes vote down vote up
def _log_to_history_tracker(self, stats: Dict, misc: Dict) -> None:
        """Update history tracker and plot stats (kind of made obsolete by tensorboard)"""
        # TODO: Decide what to do with this, now that most things are already in tensorboard.
        if self._tracker.history.length > 0:
            tr_loss_gain = self._tracker.history[-1][2] - np.mean(stats['tr_loss'])
        else:
            tr_loss_gain = 0
        if not stats.get('tr_accuracy'):
            tr_accuracy = nan
        else:
            tr_accuracy = np.nanmean(stats['tr_accuracy'])
        val_accuracy = stats.get('val_accuracy', nan)
        self._tracker.update_history([
            self.step, self._timer.t_passed, np.mean(stats['tr_loss']), np.mean(stats['val_loss']),
            tr_loss_gain, tr_accuracy, val_accuracy, misc['learning_rate'], 0, 0
        ])
        # Plot tracker stats to pngs in save_path
        self._tracker.plot(self.save_path) 
Example 18
Project: PEtab   Author: ICB-DCM   File: test_measurements.py    MIT License 6 votes vote down vote up
def test_concat_measurements():
    a = pd.DataFrame({'measurement': [1.0]})
    b = pd.DataFrame({'time': [1.0]})

    with tempfile.NamedTemporaryFile(mode='w', delete=False) as fh:
        filename_a = fh.name
        a.to_csv(fh, sep='\t', index=False)

    expected = pd.DataFrame({
        'measurement': [1.0, nan],
        'time': [nan, 1.0]
    })

    assert expected.equals(petab.concat_measurements([a, b]))

    assert expected.equals(petab.concat_measurements([filename_a, b])) 
Example 19
Project: outfancy   Author: carlosplanchon   File: chart.py    Creative Commons Zero v1.0 Universal 6 votes vote down vote up
def get_char_slope(self, slope, color, color_number):
        """We generate a point character to plot based on slope."""
        if slope is nan:
            return '|'
        if slope > 2 or slope < -2:
            point_character = '|'
        elif slope > .7:
            point_character = '/'
        elif slope < -.7:
            point_character = '\\'
        else:
            point_character = '—'

        if color:
            point_character = f'\x1b[1;{color_number}m'\
                f'{point_character}\x1b[0;99m'
        return point_character 
Example 20
Project: inflation_calc   Author: EricSchles   File: predict.py    GNU General Public License v3.0 6 votes vote down vote up
def predict(df,steps):
    start = df.index[0].year
    end = df.index[-1].year
    years_captured = [idx.year for idx in df.index]
    years_inclusive = [elem for elem in range(start, end+1)]
    s = df.T.squeeze()
    for year in years_inclusive:
        if not year in years_captured:
            s = s.set_value(datetime.datetime(year=year,month=1,day=1),math.nan)
    s.sort_index(inplace=True)
    s = s.interpolate()
    data = s.to_frame()
    model_order = brute_search(data)
    model_order = tuple([int(elem) for elem in model_order])
    model = sm.tsa.ARIMA(data, model_order).fit(disp=0)
    return model.forecast(steps=steps)[0], end 
Example 21
Project: inflation_calc   Author: EricSchles   File: predict.py    GNU General Public License v3.0 6 votes vote down vote up
def predict(df,steps):
    print("started function")
    start = df.index[0].year
    end = df.index[-1].year
    years_captured = [idx.year for idx in df.index]
    years_inclusive = [elem for elem in range(start, end+1)]
    s = df.T.squeeze()
    for year in years_inclusive:
        if not year in years_captured:
            s = s.set_value(datetime.datetime(year=year,month=1,day=1),math.nan)
    s.sort_index(inplace=True)
    s = s.interpolate()
    data = s.to_frame()
    print("loaded data")
    model_order = brute_search(data)
    model_order = tuple([int(elem) for elem in model_order])
    print("found model order")
    model = sm.tsa.ARIMA(data, model_order).fit(disp=0)
    print("fit model")
    return model.forecast(steps=steps)[0], end 
Example 22
Project: otone_backend   Author: Opentrons   File: pipette.py    Apache License 2.0 6 votes vote down vote up
def calibrate(self, property_, value):
        """Set a given pipette property to a value
        """
        logger.debug('pipette.calibrate called')
        logger.debug('\tproperty_: {0} , value: {1}'.format(property_,value))
       #ToDo: probably need to utilize None instead of math.nan here
        if (value != None and (property_=='top' or property_=='bottom' or property_=='blowout' or property_=='droptip')):
            # if it's a top or blowout value, save it
            self.__dict__[property_] = value  
        elif (value != None and (property_=='tip_racks')):
            self.tip_racks.extend(value)
            #if isinstance(value, list):
            #    self.tip_racks.extend(value)
            logger.debug('new tip-racks: {}'.format(self.tip_racks))
        elif (value != None and (property_=='trash_container')):
            self.trash_container = []
            self.trash_container.extend(value)
            logger.debug('new trash_container: {}'.format(self.trash_container)) 
Example 23
Project: ROS-Code   Author: Richienb   File: maths.py    Apache License 2.0 6 votes vote down vote up
def constant(constanttype):
    constanttype = constanttype.lower()
    if constanttype == 'pi':
        return math.pi
    elif constanttype == 'e':
        return math.e
    elif constanttype == 'tau':
        return math.tau
    elif constanttype == 'inf':
        return math.inf
    elif constanttype == 'nan':
        return math.nan
    elif constanttype in ['phi', 'golden']:
        return (1 + 5**0.5) / 2


# Find The Power Of A Number 
Example 24
Project: RTX   Author: RTXteam   File: ReasoningUtilities.py    MIT License 5 votes vote down vote up
def refine_omims_well_studied(omims, doid, omim_to_mesh, q1_doid_to_mesh, verbose=False):
	"""
	Subset given omims to those that are well studied (have low google distance between the source
	omim mesh name and the target doid mesh name
	:param omims:
	:param doid:
	:param omim_to_mesh:
	:param q1_doid_to_mesh:
	:param verbose:
	:return:
	"""
	# Getting well-studied omims
	omims_GD = list()
	for omim in omims:  # only the on the prioritized ones
		omim_descr = get_node_property(omim, "name", node_label="disease")
		doid_descr = get_node_property(doid, "name", node_label="disease")
		res = NormGoogleDistance.get_ngd_for_all([omim, doid], [omim_descr, doid_descr])
		omims_GD.append((omim, res))
	well_studied_omims = list()
	for tup in omims_GD:
		if tup[1] != math.nan and tup[1] > 0:
			well_studied_omims.append(tup)
	well_studied_omims = [item[0] for item in sorted(well_studied_omims, key=lambda tup: tup[1])]
	if verbose:
		print("Found %d well-studied omims" % len(well_studied_omims))
	# print(well_studied_omims)
	return well_studied_omims 
Example 25
Project: TGC-Designer-Tools   Author: chadrockey   File: infill_image.py    Apache License 2.0 5 votes vote down vote up
def apply_mask(np_array, mask, invalid_value=math.nan):
    np_array[ mask < 1 ] = invalid_value
    return np_array 
Example 26
Project: mabwiser   Author: fmr-llc   File: simulator.py    Apache License 2.0 5 votes vote down vote up
def _predict_contexts(self, contexts: np.ndarray, is_predict: bool,
                          seeds: Optional[np.ndarray] = None, start_index: Optional[int] = None) -> List:

        # Copy learning policy object
        lp = deepcopy(self.lp)

        # Create an empty list of predictions
        predictions = [None] * len(contexts)

        # For each row in the given contexts
        for index, row in enumerate(contexts):

            # Get random generator
            lp.rng = np.random.RandomState(seeds[index])

            # Calculate the distances from the historical contexts
            # Row is 1D so convert it to 2D array for cdist using newaxis
            # Finally, reshape to flatten the output distances list
            row_2d = row[np.newaxis, :]
            distances_to_row = self.distances[start_index + index]

            # Find the neighbor indices within the radius
            # np.where with a condition returns a tuple where the first element is an array of indices
            indices = np.where(distances_to_row <= self.radius)

            # If neighbors exist
            if indices[0].size > 0:

                prediction, exp, stats = self._get_nhood_predictions(lp, row_2d, indices, is_predict)
                predictions[index] = [prediction, exp, len(indices[0]), stats]

            else:  # When there are no neighbors

                # Random arm (or nan expectations)
                prediction = self._get_no_nhood_predictions(lp, is_predict)
                predictions[index] = [prediction, {}, 0, {}]

        # Return the list of predictions
        return predictions 
Example 27
Project: mabwiser   Author: fmr-llc   File: simulator.py    Apache License 2.0 5 votes vote down vote up
def _get_no_nhood_predictions(self, lp, is_predict):
        if is_predict:
            # if no_nhood_prob_of_arm is None, select a random int
            # else, select a non-uniform random arm
            # choice returns an array, hence get zero index
            rand_int = lp.rng.choice(len(self.arms), 1, p=self.no_nhood_prob_of_arm)[0]
            return self.arms[rand_int]
        else:
            # Expectations will be nan when there are no neighbors
            return self.arm_to_expectation.copy() 
Example 28
Project: OctoBot-Trading   Author: Drakkar-Software   File: kline_manager.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def _reset_kline(self):
        self.kline = [nan] * len(PriceIndexes) 
Example 29
Project: OctoBot-Trading   Author: Drakkar-Software   File: kline_manager.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def kline_update(self, kline):
        try:
            # test for new candle
            if self.kline[PriceIndexes.IND_PRICE_TIME.value] != kline[PriceIndexes.IND_PRICE_TIME.value]:
                self._reset_kline()

            if self.kline[PriceIndexes.IND_PRICE_TIME.value] is nan:
                self.kline[PriceIndexes.IND_PRICE_TIME.value] = kline[PriceIndexes.IND_PRICE_TIME.value]

            if self.kline[PriceIndexes.IND_PRICE_OPEN.value] is nan:
                self.kline[PriceIndexes.IND_PRICE_OPEN.value] = kline[PriceIndexes.IND_PRICE_CLOSE.value]

            if self.kline[PriceIndexes.IND_PRICE_VOL.value] is nan:
                self.kline[PriceIndexes.IND_PRICE_VOL.value] = kline[PriceIndexes.IND_PRICE_VOL.value]
            else:
                self.kline[PriceIndexes.IND_PRICE_VOL.value] += kline[PriceIndexes.IND_PRICE_VOL.value]

            self.kline[PriceIndexes.IND_PRICE_CLOSE.value] = kline[PriceIndexes.IND_PRICE_CLOSE.value]

            if self.kline[PriceIndexes.IND_PRICE_HIGH.value] is nan or \
                    self.kline[PriceIndexes.IND_PRICE_HIGH.value] < kline[PriceIndexes.IND_PRICE_HIGH.value]:
                self.kline[PriceIndexes.IND_PRICE_HIGH.value] = kline[PriceIndexes.IND_PRICE_HIGH.value]

            if self.kline[PriceIndexes.IND_PRICE_LOW.value] is nan or \
                    self.kline[PriceIndexes.IND_PRICE_LOW.value] > kline[PriceIndexes.IND_PRICE_LOW.value]:
                self.kline[PriceIndexes.IND_PRICE_LOW.value] = kline[PriceIndexes.IND_PRICE_LOW.value]
        except TypeError as e:
            self.logger.error(f"Fail to update kline with {kline} : {e}") 
Example 30
Project: OctoBot-Trading   Author: Drakkar-Software   File: exchange_market_status_fixer.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def is_ms_valid(value, zero_valid=False):
    return value is not None and value is not nan and (value >= 0 if zero_valid else value > 0) 
Example 31
Project: OctoBot-Trading   Author: Drakkar-Software   File: trading_mode_orders.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def add_dusts_to_quantity_if_necessary(quantity, price, symbol_market, current_symbol_holding):
    """
    Adds remaining quantity to the order if the remaining quantity is too small
    :param quantity:
    :param price:
    :param symbol_market:
    :param current_symbol_holding:
    :return:
    """
    remaining_portfolio_amount = float("{1:.{0}f}".format(CURRENCY_DEFAULT_MAX_PRICE_DIGITS,
                                                          current_symbol_holding - quantity))
    remaining_max_total_order_price = remaining_portfolio_amount * price

    symbol_market_limits = symbol_market[Ecmsc.LIMITS.value]

    limit_amount = symbol_market_limits[Ecmsc.LIMITS_AMOUNT.value]
    limit_cost = symbol_market_limits[Ecmsc.LIMITS_COST.value]

    if not (is_valid(limit_amount, Ecmsc.LIMITS_AMOUNT_MIN.value) and
            is_valid(limit_cost, Ecmsc.LIMITS_COST_MIN.value)):
        fixed_market_status = ExchangeMarketStatusFixer(symbol_market, price).get_market_status()
        limit_amount = fixed_market_status[Ecmsc.LIMITS.value][Ecmsc.LIMITS_AMOUNT.value]
        limit_cost = fixed_market_status[Ecmsc.LIMITS.value][Ecmsc.LIMITS_COST.value]

    min_quantity = get_value_or_default(limit_amount, Ecmsc.LIMITS_AMOUNT_MIN.value, math.nan)
    min_cost = get_value_or_default(limit_cost, Ecmsc.LIMITS_COST_MIN.value, math.nan)

    # check with 40% more than remaining total not to require huge market moves to sell this asset
    min_cost_to_consider = min_cost * 1.4
    min_quantity_to_consider = min_quantity * 1.4

    if remaining_max_total_order_price < min_cost_to_consider \
            or remaining_portfolio_amount < min_quantity_to_consider:
        return current_symbol_holding
    else:
        return quantity 
Example 32
Project: OctoBot-Trading   Author: Drakkar-Software   File: __test_exchange_market_status_fixer.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_exchange_market_status_fixer_without_price_amount(self):
        ms = {
            Ecmsc.PRECISION.value: self._get_precision(5, 5, 5),
            Ecmsc.LIMITS.value: self._get_limits(nan, None, 0.03, 1e4, 0.05, 1e7)
        }

        assert ExchangeMarketStatusFixer(ms).market_status() == {
            Ecmsc.PRECISION.value: self._get_precision(5, 5, 5),
            Ecmsc.LIMITS.value: self._get_limits(0.05 / 0.03, 1e7 / 1e4, 0.03, 1e4, 0.05, 1e7)
        }

    # Limits 
Example 33
Project: OctoBot-Trading   Author: Drakkar-Software   File: __test_exchange_market_status_fixer.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_fix_market_status_limits(self):
        assert not check_market_status_limits(self._get_limits(4, None, None, 1000, 56, 45))
        assert not check_market_status_limits(self._get_limits(9, None, 5066, 1000, 56, nan))
        assert not check_market_status_limits(self._get_limits(8, nan, 789, 1000, nan, 45))
        assert not check_market_status_limits(self._get_limits(0, 0, 789, 1000, 0, 45))
        assert check_market_status_limits(self._get_limits(12, 2752783, 242, 1000, 56, 45)) 
Example 34
Project: OctoBot-Trading   Author: Drakkar-Software   File: __test_exchange_market_status_fixer.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_check_market_status_values(self):
        assert not check_market_status_values([78272, None, None, 5e-10, 100, 0.12])
        assert not check_market_status_values([78272, None, nan, 5e-10, 100, 0.12])
        assert not check_market_status_values([78272, nan, nan, 5e-10, 100, 0.12])
        assert not check_market_status_values([78272, 0, 0, 5e-10, 100, 0.12])
        assert check_market_status_values([17, 78272, 79, 5e-10, 145, 100]) 
Example 35
Project: arbc-solidity   Author: OffchainLabs   File: floatlib.py    Apache License 2.0 5 votes vote down vote up
def gt(vm):
    cmp(vm)
    # cmp nan
    vm.push(1)
    vm.sgt()
    vm.bitwise_or()
    vm.iszero() 
Example 36
Project: arbc-solidity   Author: OffchainLabs   File: floatlib.py    Apache License 2.0 5 votes vote down vote up
def lt(vm):
    cmp(vm)
    # cmp nan
    vm.push(-1)
    vm.slt()
    vm.bitwise_or()
    vm.iszero() 
Example 37
Project: TheVentureCity   Author: dksmith01   File: growth_accounting.py    GNU General Public License v3.0 5 votes vote down vote up
def add_period_n_cum_inc_per_cohort_cust_columns(cohort_df, since_col, unit):
    c_df = cohort_df.copy()
    since_vector = c_df[since_col].unique()
    for n in since_vector:
        new_column_name = unit + ' %s' % n
        c_df[new_column_name] = c_df['cum_inc_per_cohort_cust'] * (c_df[since_col] == n)
        c_df[new_column_name] = c_df[new_column_name].replace(0, np.nan)
    return c_df
    


### Calculate the user retention by cohort defined by any weekly or monthly time period 
Example 38
Project: scitail   Author: allenai   File: features_field.py    Apache License 2.0 5 votes vote down vote up
def as_array(self, padding_lengths: Dict[str, int]) -> numpy.array:
        padded_features = pad_sequence_to_length(self.features,
                                                 padding_lengths['num_features'],
                                                 (lambda: math.nan))
        return numpy.asarray(padded_features, dtype=numpy.float32) 
Example 39
Project: scitail   Author: allenai   File: features_field.py    Apache License 2.0 5 votes vote down vote up
def empty_field(self):
        return FeaturesField([math.nan] * len(self.features)) 
Example 40
Project: graphql-core-next   Author: graphql-python   File: test_is_finite.py    MIT License 5 votes vote down vote up
def describe_is_finite():
    def null_is_not_finite():
        assert is_finite(None) is False

    def booleans_are_not_finite():
        # they should not be considered as integers 0 and 1
        assert is_finite(False) is False
        assert is_finite(True) is False

    def strings_are_not_finite():
        assert is_finite("string") is False

    def ints_are_finite():
        assert is_finite(0) is True
        assert is_finite(1) is True
        assert is_finite(-1) is True
        assert is_finite(1 >> 100) is True

    def floats_are_finite():
        assert is_finite(0.0) is True
        assert is_finite(1.5) is True
        assert is_finite(-1.5) is True
        assert is_finite(1e100) is True
        assert is_finite(-1e100) is True
        assert is_finite(1e-100) is True

    def nan_is_not_finite():
        assert is_finite(nan) is False

    def inf_is_not_finite():
        assert is_finite(inf) is False
        assert is_finite(-inf) is False

    def undefined_is_not_finite():
        assert is_finite(INVALID) is False 
Example 41
Project: quilt   Author: Richienb   File: __init__.py    Apache License 2.0 5 votes vote down vote up
def isfalse(variable):
    """
    Check if a variable is essentially "False"

    :type variable: variable
    :param variable: The variable to check
    """

    # Return the answer
    return variable in [0, 0.0, False, [], {}, math.nan, "", (), None] 
Example 42
Project: redis-memory-analyzer   Author: gamenet   File: Set.py    MIT License 5 votes vote down vote up
def analyze(self, keys, total=0):
        key_stat = {
            'headers': ['Match', "Count", "Avg Count", "Value mem", "Real", "Ratio", "System*", "Encoding", "Total", "TTL Min", "TTL Max", "TTL Avg."],
            'data': []
        }

        progress = tqdm(total=total,
                        mininterval=1,
                        desc="Processing Set patterns",
                        leave=False)

        for pattern, data in keys.items():
            agg = SetAggregator(progress_iterator((SetStatEntry(x, self.redis) for x in data), progress), len(data))

            stat_entry = [
                pattern,
                len(data),
                agg.fieldAvgCount,
                agg.valueUsedBytes,
                agg.valueAlignedBytes,
                agg.valueAlignedBytes / (agg.valueUsedBytes if agg.valueUsedBytes > 0 else 1),
                agg.system,
                agg.encoding,
                agg.total,
                agg.ttlMin,
                agg.ttlMax,
                agg.ttlAvg,
            ]

            key_stat['data'].append(stat_entry)

        key_stat['data'].sort(key=lambda x: x[8], reverse=True)
        key_stat['data'].append(make_total_row(key_stat['data'], ['Total:', sum, 0, sum, sum, 0, sum, '', sum, min, max, math.nan]))

        progress.close()

        return key_stat 
Example 43
Project: redis-memory-analyzer   Author: gamenet   File: List.py    MIT License 5 votes vote down vote up
def analyze(self, keys, total=0):
        key_stat = {
            'headers': ['Match', "Count", "Avg Count", "Min Count", "Max Count", "Stdev Count", "Value mem", "Real", "Ratio", "System", "Encoding", "Total", 'TTL Min', 'TTL Max', 'TTL Avg'],
            'data': []
        }

        progress = tqdm(total=total,
                        mininterval=1,
                        desc="Processing List patterns",
                        leave=False)

        for pattern, data in keys.items():
            agg = ListAggregator(progress_iterator((ListStatEntry(x, self.redis) for x in data), progress), len(data))

            stat_entry = [
                pattern,
                len(data),
                agg.fieldAvgCount,
                agg.fieldMinCount,
                agg.fieldMaxCount,
                agg.fieldStdev,
                agg.valueUsedBytes,
                agg.valueAlignedBytes,
                agg.valueAlignedBytes / (agg.valueUsedBytes if agg.valueUsedBytes > 0 else 1),
                agg.system,
                agg.encoding,
                agg.valueAlignedBytes + agg.system,
                agg.ttlMin,
                agg.ttlMax,
                agg.ttlAvg,
            ]

            key_stat['data'].append(stat_entry)
            progress.update()

        key_stat['data'].sort(key=lambda x: x[8], reverse=True)
        key_stat['data'].append(make_total_row(key_stat['data'], ['Total:', sum, 0, 0, 0, 0, sum, sum, 0, sum, '', sum, min, max, math.nan]))

        progress.close()

        return key_stat 
Example 44
Project: carme   Author: CarmeLabs   File: cleandata.py    MIT License 5 votes vote down vote up
def removeColumns(df, columns):
    for column in columns:
        for index in range(len(df.index)):
            if df.at[index][df[column + 1]] == 0:
                df.at[index][df[column]] = math.nan
        df = df.drop(columns = [df.columns[column + 1]])
    return df 
Example 45
Project: recommandation-film   Author: sambiak   File: utilisateur.py    GNU General Public License v3.0 5 votes vote down vote up
def theta(self, x, etapes):
        """Fait etapes etapes du gradient sur theta du film puis le renvoie"""
        y = np.array([[math.nan] * nombre_films()])
        for key in self.films:
            y[0][self.conv.renvoyer_index(key)] = self.films[key]
        for etape in range(etapes):
            self._theta = etape_du_gradient(y, 0.0001, self._theta, x)
        return self._theta 
Example 46
Project: recommandation-film   Author: sambiak   File: utilisateur.py    GNU General Public License v3.0 5 votes vote down vote up
def theta(self, x, etapes):
        """Fait etapes etapes du gradient sur theta du film puis le renvoie"""
        y = np.array([[math.nan] * nombre_films()])
        for key in self.films:
            y[0][self.conv.renvoyer_index(key)] = self.films[key]
        for etape in range(etapes):
            self._theta = etape_du_gradient(y, 0.0001, self._theta, x)
        return self._theta 
Example 47
Project: torchsim   Author: GoodAI   File: ball_env.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, params: BallEnvironmentParams, name: str = "BallEnvironment"):
        super().__init__(params, name)

        bouncing_params = SimpleBouncingBallNodeParams(sx=params.env_size[0],
                                                       sy=params.env_size[1],
                                                       ball_radius=params.ball_radius,
                                                       ball_shapes=params.shapes,
                                                       dir_x=1,
                                                       dir_y=2,
                                                       noise_amplitude=params.noise_amplitude,
                                                       switch_next_shape_after=params.switch_shape_after,
                                                       random_position_direction_switch_after=
                                                       params.random_position_direction_switch_after
                                                       )

        ball_node = SimpleBouncingBallNode(bouncing_params)

        self.add_node(ball_node)
        self.ball_node = ball_node

        Connector.connect(ball_node.outputs.bitmap, self.outputs.data.input)

        switch_node = SwitchNode(2)
        self.add_node(switch_node)
        self.switch_node = switch_node

        nan_node = ConstantNode(params.n_shapes, math.nan)
        self.add_node(nan_node)

        Connector.connect(ball_node.outputs.label_one_hot, switch_node.inputs[0])
        Connector.connect(nan_node.outputs.output, switch_node.inputs[1])
        Connector.connect(switch_node.outputs.output, self.outputs.label.input) 
Example 48
Project: torchsim   Author: GoodAI   File: space_engineers_env.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, params: SeEnvironmentParams, name: str="SEEnvironment"):
        super().__init__(params, name)

        env_params = DatasetSeObjectsParams()
        if params.env_size == (24, 24, 3):
            env_params.dataset_size = SeDatasetSize.SIZE_24
        else:
            raise ValueError(f"Param env_size {params.env_size} is not supported.")

        env_params.class_filter = params.shapes
        se_node = DatasetSeObjectsNode(env_params)

        self.add_node(se_node)
        self.se_node = se_node

        Connector.connect(se_node.outputs.image_output, self.outputs.data.input)

        switch_node = SwitchNode(2)
        self.add_node(switch_node)
        self.switch_node = switch_node

        nan_node = ConstantNode(params.n_shapes, math.nan)
        self.add_node(nan_node)

        Connector.connect(se_node.outputs.task_to_agent_label, switch_node.inputs[0])
        Connector.connect(nan_node.outputs.output, switch_node.inputs[1])
        Connector.connect(switch_node.outputs.output, self.outputs.label.input) 
Example 49
Project: TSDK   Author: xinlingqudongX   File: tools.py    MIT License 5 votes vote down vote up
def encrypt(self,code):
        '''这个就是base64加密'''
        n = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/='
        s = ''
        l = 0
        while l < len(code):
            try:
                t = ord(code[l])
            except IndexError as e:
                t = 0# t = math.nan
            l += 1
            try:
                r = ord(code[l])
            except IndexError as e:
                r = 0 # r = math.nan
            l += 1
            try:
                i = ord(code[l])
            except IndexError as e:
                i = 0 # i = math.nan
            l += 1
            o = t >> 2
            a = (3 & t) << 4 | r >> 4
            # if type(r) or type(i) == 'float':
            #     print(r,i)
            u = (15 & r) << 2 | i >> 6
            c = 63 & i
            if r == 0:
                u = c = 64
            else:
                c = 64 if i == 0 else c
            s = s + n[o] + n[a] + n[u] + n[c]
            # print(s)
        return s 
Example 50
Project: freecad.turns   Author: joelgraff   File: line_segment.py    GNU Lesser General Public License v2.1 5 votes vote down vote up
def get_intersection(self, segment):
        """
        Calculate the intersection of two line segments
        """

        _lhs = TupleMath.subtract(self.end, self.start)[0:2]
        _lhs = TupleMath.multiply(_lhs, (1.0, -1.0))

        _lhs += (sum(TupleMath.multiply(_lhs, self.start)),)

        _rhs = TupleMath.subtract(segment.end, segment.start)[0:2]
        _rhs = TupleMath.multiply(_lhs, (1.0, -1.0))

        _rhs += (sum(TupleMath.multiply(_rhs, segment.start)),)

        _determinant = TupleMath.cross(_lhs, _rhs, (0, 0, 1))[2]

        if not _determinant:
            return (math.nan, math.nan)

        _intersection = (
            TupleMath.cross(_lhs, _rhs, (1, 0, 0))[0],
            TupleMath.cross(_lhs, _rhs, (0, 1, 0))[1]
        )

        return TupleMath.scale(_intersection, 1.0 / _determinant) 
Example 51
Project: altair-transform   Author: altair-viz   File: vegaexpr.py    MIT License 5 votes vote down vote up
def vectorize(func: Callable) -> Callable:
    @wraps(func)
    def wrapper(*args, **kwargs):
        series_args = [
            arg
            for arg in itertools.chain(args, kwargs.values())
            if isinstance(arg, pd.Series)
        ]
        if not series_args:
            return func(*args, **kwargs)
        else:
            index = reduce(operator.or_, [s.index for s in series_args])

            def _get(x, i):
                return x.get(i, math.nan) if isinstance(x, pd.Series) else x

            return pd.Series(
                [
                    func(
                        *(_get(arg, i) for arg in args),
                        **{k: _get(v, i) for k, v in kwargs.items()},
                    )
                    for i in index
                ],
                index=index,
            )

    if hasattr(func, "__annotations__"):
        wrapper.__annotations__ = {
            key: Union[pd.Series, val] for key, val in func.__annotations__.items()
        }
    return wrapper


# Type Checking Functions 
Example 52
Project: aetherling   Author: David-Durst   File: pnr.py    MIT License 5 votes vote down vote up
def fix_clock_ae(x_str):
    if str(x_str) == "nan":
        return "\\red{X}"
    x = float(x_str[:-2])
    if x > 0:
        return str(round(1000 / base_ns_ae))
    else:
        return str(round(1000 / (base_ns_ae + -1 * x))) 
Example 53
Project: aetherling   Author: David-Durst   File: pnr.py    MIT License 5 votes vote down vote up
def fix_clock_hth(x_str):
    if str(x_str) == "nan":
        return "\\red{X}"
    x = float(x_str[:-2])
    if x > 0:
        return str(round(1000 / base_ns_hth))
    else:
        return str(round(1000 / (base_ns_hth + -1 * x))) 
Example 54
Project: aetherling   Author: David-Durst   File: pnr.py    MIT License 5 votes vote down vote up
def fix_clock_sp(x_str):
    if str(x_str) == "nan":
        return "\\red{X}"
    x = float(x_str[:-2])
    if x > 0:
        return str(round(1000 / base_ns_sp))
    else:
        return str(round(1000 / (base_ns_sp + -1 * x))) 
Example 55
Project: aetherling   Author: David-Durst   File: pnr_graphs.py    MIT License 5 votes vote down vote up
def fix_clock(x_str):
    if str(x_str) == "nan":
        return "\\red{X}"
    x = float(x_str[:-2])
    if x > 0:
        return str(round(1000 / base_ns))
    else:
        return str(round(1000 / (base_ns + -1 * x))) 
Example 56
Project: malib   Author: ying-wen   File: test_tabular_input.py    MIT License 5 votes vote down vote up
def test_record_misc_stat_nan(self):
        self.tabular.record_misc_stat('none', None)

        correct = {
            'noneAverage': math.nan,
            'noneStd': math.nan,
            'noneMedian': math.nan,
            'noneMin': math.nan,
            'noneMax': math.nan
        }
        for k, v in self.tabular.as_dict.items():
            assert correct[k] is math.nan 
Example 57
Project: Python-PHP-Bridge   Author: blyxxyz   File: __init__.py    ISC License 5 votes vote down vote up
def decode(self, data: dict) -> Any:
        type_ = data['type']
        value = data['value']
        if type_ in {'string', 'integer', 'NULL', 'boolean'}:
            return value
        elif type_ == 'double':
            if value == 'INF':
                return math.inf
            elif value == '-INF':
                return -math.inf
            elif value == 'NAN':
                return math.nan
            return value
        elif type_ == 'array':
            if isinstance(value, list):
                return Array.list(map(self.decode, value))
            elif isinstance(value, dict):
                return Array((key, self.decode(item))
                             for key, item in value.items())
        elif type_ == 'object':
            cls = self.get_class(value['class'])
            return self.get_object(cls, value['hash'])
        elif type_ == 'resource':
            return self.get_resource(value['type'], value['hash'])
        elif type_ == 'bytes':
            # PHP's strings are just byte arrays
            # Decoding this to a bytes object would be problematic
            # It might be meant as a legitimate string, and some binary data
            # could be valid unicode by accident
            value = base64.b64decode(value)
            return value.decode(errors='surrogateescape')
        raise RuntimeError("Unknown type {!r}".format(type_)) 
Example 58
Project: opentaps_seas   Author: opentaps   File: hnum.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def to_num(cls, val, error=False):
        if val is None:
            return math.nan
        if isinstance(val, str):
            try:
                val = float(re.sub("[^0-9-]", "", val))
            except Exception:
                if error:
                    raise
                pass
        return val

    # Private constructor */ 
Example 59
Project: dump1090-exporter   Author: claws   File: exporter.py    MIT License 5 votes vote down vote up
def process_stats(
        self, stats: dict, time_periods: Sequence[str] = ("last1min",)
    ) -> None:
        """ Process dump1090 statistics into exported metrics.

        :param stats: a dict containing dump1090 statistics data.
        """
        metrics = self.metrics["stats"]  # type: Dict[str, Dict[str, Gauge]]

        for time_period in time_periods:
            try:
                tp_stats = stats[time_period]
            except KeyError:
                logger.exception(f"Problem extracting time period: {time_period}")
                continue

            labels = dict(time_period=time_period)

            for key in metrics:
                d = tp_stats[key] if key else tp_stats
                for name, metric in metrics[key].items():
                    try:
                        value = d[name]
                        # 'accepted' values are in a list
                        if isinstance(value, list):
                            value = value[0]
                    except KeyError:
                        # 'signal' and 'peak_signal' are not present if
                        # there are no aircraft.
                        if name not in ["peak_signal", "signal"]:
                            key_str = " {} ".format(key) if key else " "
                            logger.warning(
                                f"Problem extracting{key_str}item '{name}' from: {d}"
                            )
                        value = math.nan
                    metric.set(labels, value) 
Example 60
Project: yTermPlayer   Author: TimeTraveller-San   File: music_api.py    GNU General Public License v3.0 5 votes vote down vote up
def get_prev_index(self):
        try:
            assert isinstance(self.index, int), "invalid index"
        except AssertionError:
            self.index = 0
        if(self.index <= 0):
            self.prev_index = math.nan
        else:
            self.prev_index = self.index - 1
        return self.prev_index 
Example 61
Project: hail   Author: hail-is   File: test_expr.py    MIT License 5 votes vote down vote up
def test_agg_minmax(self):
        nan = float('nan')
        na = hl.null(hl.tfloat32)
        size = 200
        for aggfunc in (agg.min, agg.max):
            array_with_nan = hl.array([0. if i == 1 else nan for i in range(size)])
            array_with_na = hl.array([0. if i == 1 else na for i in range(size)])
            t = hl.utils.range_table(size)
            self.assertEqual(t.aggregate(aggfunc(array_with_nan[t.idx])), 0.)
            self.assertEqual(t.aggregate(aggfunc(array_with_na[t.idx])), 0.) 
Example 62
Project: hail   Author: hail-is   File: test_expr.py    MIT License 5 votes vote down vote up
def test_str_parsing(self):
        for x in ('true', 'True', 'TRUE'):
            self.assertTrue(hl.eval(hl.bool(x)))

        for x in ('false', 'False', 'FALSE'):
            self.assertFalse(hl.eval(hl.bool(x)))

        for x in ('nan', 'Nan', 'naN', 'NaN'):
            for f in (hl.float, hl.float32, hl.float64, hl.parse_float32, hl.parse_float64):
                self.assertTrue(hl.eval(hl.is_nan(f(x))))
                self.assertTrue(hl.eval(hl.is_nan(f('+' + x))))
                self.assertTrue(hl.eval(hl.is_nan(f('-' + x))))

        for x in ('inf', 'Inf', 'iNf', 'InF', 'infinity', 'InfiNitY', 'INFINITY'):
            for f in (hl.float, hl.float32, hl.float64, hl.parse_float32, hl.parse_float64):
                self.assertTrue(hl.eval(hl.is_infinite(f(x))))
                self.assertTrue(hl.eval(hl.is_infinite(f('+' + x))))
                self.assertTrue(hl.eval(hl.is_infinite(f('-' + x))))
                self.assertTrue(hl.eval(f('-' + x) < 0.0))

        for x in ('0', '1', '-5', '12382421'):
            for f in (hl.int32, hl.int64, hl.parse_int32, hl.parse_int64):
                self.assertEqual(hl.eval(f(hl.literal(x))), int(x))
            for f in (hl.float32, hl.float64, hl.parse_float32, hl.parse_float64):
                self.assertEqual(hl.eval(f(hl.literal(x))), float(x))

        for x in ('-1.5', '0.0', '2.5'):
            for f in (hl.float32, hl.float64, hl.parse_float32, hl.parse_float64):
                self.assertEqual(hl.eval(f(hl.literal(x))), float(x))
            for f in (hl.parse_int32, hl.parse_int64):
                self.assertEqual(hl.eval(f(hl.literal(x))), None)

        for x in ('abc', '1abc', ''):
            for f in (hl.parse_float32, hl.parse_float64, hl.parse_int32, hl.parse_int64):
                self.assertEqual(hl.eval(f(hl.literal(x))), None) 
Example 63
Project: hail   Author: hail-is   File: test_expr.py    MIT License 5 votes vote down vote up
def test_nan_roundtrip(self):
        a = [math.nan, math.inf, -math.inf, 0, 1]
        round_trip = hl.eval(hl.literal(a))
        self.assertTrue(math.isnan(round_trip[0]))
        self.assertTrue(math.isinf(round_trip[1]))
        self.assertTrue(math.isinf(round_trip[2]))
        self.assertEqual(round_trip[-2:], [0, 1]) 
Example 64
Project: dowel   Author: rlworkgroup   File: test_tabular_input.py    MIT License 5 votes vote down vote up
def test_record_misc_stat_nan(self):
        self.tabular.record_misc_stat('none', None)

        correct = {
            'noneAverage': math.nan,
            'noneStd': math.nan,
            'noneMedian': math.nan,
            'noneMin': math.nan,
            'noneMax': math.nan
        }
        for k, v in self.tabular.as_dict.items():
            assert correct[k] is math.nan 
Example 65
Project: sensibility   Author: naturalness   File: test_clamp.py    Apache License 2.0 5 votes vote down vote up
def test_clamp_nan() -> None:
    """
    Clamp should raise when given NaN.
    """
    from math import nan
    with pytest.raises(FloatingPointError):
        clamp(nan) 
Example 66
Project: texar-pytorch   Author: asyml   File: regression.py    Apache License 2.0 5 votes vote down vote up
def value(self) -> float:
        if self.count == 0:
            return 0.0
        numerator = self.xy_sum - self.x_sum * self.y_sum / self.count
        denominator_x = self.x2_sum - self.x_sum ** 2 / self.count
        denominator_y = self.y2_sum - self.y_sum ** 2 / self.count
        if denominator_x == 0.0 or denominator_y == 0.0:
            return math.nan
        return numerator / math.sqrt(denominator_x * denominator_y) 
Example 67
Project: nanovna-saver   Author: mihtjel   File: SITools.py    GNU General Public License v3.0 5 votes vote down vote up
def __init__(self,
                 value: Union[Number, str] = 0,
                 unit: str = "",
                 fmt=Format()):
        assert 3 <= fmt.max_nr_digits <= 30
        assert -8 <= fmt.min_offset <= fmt.max_offset <= 8
        assert fmt.parse_clamp_min < fmt.parse_clamp_max
        self._unit = unit
        self.fmt = fmt
        if isinstance(value, str):
            self._value = math.nan
            self.parse(value)
        else:
            self._value = decimal.Decimal(value, context=Value.CTX) 
Example 68
Project: RTX   Author: RTXteam   File: QueryNCBIeUtils.py    MIT License 4 votes vote down vote up
def normalized_google_distance(mesh1_str, mesh2_str, mesh1=True, mesh2=True):
        """
        returns the normalized Google distance for two MeSH terms
        :param mesh1_str_decorated: mesh string
        :param mesh2_str_decorated: mesh string
        :param mesh1: flag if mesh1_str is a MeSH term
        :param mesh2: flag if mesh2_str is a MeSH term
        :returns: NGD, as a float (or math.nan if any counts are zero, or None if HTTP error)
        """

        if mesh1:  # checks mesh flag then converts to mesh term search
            mesh1_str_decorated = mesh1_str + '[MeSH Terms]'
        else:
            mesh1_str_decorated = mesh1_str

        if mesh2:  # checks mesh flag then converts to mesh term search
            mesh2_str_decorated = mesh2_str + '[MeSH Terms]'
        else:
            mesh2_str_decorated = mesh2_str

        if mesh1 and mesh2:
            [nij, ni, nj] = QueryNCBIeUtils.get_pubmed_hits_count('({mesh1}) AND ({mesh2})'.format(mesh1=mesh1_str_decorated,
                                                                                     mesh2=mesh2_str_decorated),joint=True)
            if type(ni) == str:
                if mesh1_str_decorated == ni:
                    mesh1_str_decorated = ni[:-12]
                if mesh2_str_decorated == nj:
                    mesh2_str_decorated = nj[:-12]
                [nij, ni, nj] = QueryNCBIeUtils.get_pubmed_hits_count('({mesh1}) AND ({mesh2})'.format(mesh1=mesh1_str_decorated,
                                                                                         mesh2=mesh2_str_decorated), joint=True)

        else:
            nij = QueryNCBIeUtils.get_pubmed_hits_count('({mesh1}) AND ({mesh2})'.format(mesh1=mesh1_str_decorated,
                                                                                         mesh2=mesh2_str_decorated))
            ni = QueryNCBIeUtils.get_pubmed_hits_count('{mesh1}'.format(mesh1=mesh1_str_decorated))
            nj = QueryNCBIeUtils.get_pubmed_hits_count('{mesh2}'.format(mesh2=mesh2_str_decorated))
            if (ni == 0 and mesh1) or (nj == 0 and mesh2):
                if (ni == 0 and mesh1):
                    mesh1_str_decorated = mesh1_str
                if (nj == 0 and mesh2):
                    mesh2_str_decorated = mesh2_str
                nij = QueryNCBIeUtils.get_pubmed_hits_count('({mesh1}) AND ({mesh2})'.format(mesh1=mesh1_str_decorated,
                                                                                         mesh2=mesh2_str_decorated))
                ni = QueryNCBIeUtils.get_pubmed_hits_count('{mesh1}'.format(mesh1=mesh1_str_decorated))
                nj = QueryNCBIeUtils.get_pubmed_hits_count('{mesh2}'.format(mesh2=mesh2_str_decorated))
        N = 2.7e+7 * 20  # from PubMed home page there are 27 million articles; avg 20 MeSH terms per article
        if ni is None or nj is None or nij is None:
            return math.nan
        if ni == 0 or nj == 0 or nij == 0:
            return math.nan
        numerator = max(math.log(ni), math.log(nj)) - math.log(nij)
        denominator = math.log(N) - min(math.log(ni), math.log(nj))
        ngd = numerator/denominator
        return ngd 
Example 69
Project: RTX   Author: RTXteam   File: QueryNCBIeUtils.py    MIT License 4 votes vote down vote up
def multi_normalized_google_distance(name_list, mesh_flags = None):
        """
        returns the normalized Google distance for a list of n MeSH Terms
        :param name_list: a list of strings containing search terms for each node
        :param mesh_flags: a list of boolean values indicating which terms need [MeSH Terms] appended to it.
        :returns: NGD, as a float (or math.nan if any counts are zero, or None if HTTP error)
        """

        if mesh_flags is None:
            mesh_flags = [True]*len(name_list)
        elif len(name_list) != len(mesh_flags):
            print('Warning: mismatching lengths for input lists of names and flags returning None...')
            return None

        search_string='('

        if sum(mesh_flags) == len(mesh_flags):
            search_string += '[MeSH Terms]) AND ('.join(name_list) + '[MeSH Terms])'
            counts = QueryNCBIeUtils.multi_pubmed_hits_count(search_string, n_terms=len(name_list))
        else:
            for a in range(len(name_list)):
                search_string += name_list[a]
                if mesh_flags[a]:
                    search_string += "[MeSH Terms]"
                if a < len(name_list)-1:
                    search_string += ') AND ('
            search_string += ')'
            counts = QueryNCBIeUtils.multi_pubmed_hits_count(search_string, n_terms =1)
            for a in range(len(name_list)):
                name = name_list[a]
                if mesh_flags[a]:
                    name += "[MeSH Terms]"
                counts += QueryNCBIeUtils.multi_pubmed_hits_count(name, n_terms = 1)

        if type(counts[1]) == str:
            if counts[1] == 'null_flag':
                missed_names = [name + '[MeSH Terms]' for name in name_list]
            else:
                missed_names = counts[1:]
            counts = [counts[0]]
            for name in name_list:
                name_decorated = name + '[MeSH Terms]'
                if name_decorated in missed_names:
                    counts += QueryNCBIeUtils.multi_pubmed_hits_count(name, n_terms=1)
                else:
                    counts += QueryNCBIeUtils.multi_pubmed_hits_count(name_decorated, n_terms=1)

        N = 2.7e+7 * 20  # from PubMed home page there are 27 million articles; avg 20 MeSH terms per article
        if None in counts:
            return math.nan
        if 0 in counts:
            return math.nan
        numerator = max([math.log(x) for x in counts[1:]]) - math.log(counts[0])
        denominator = math.log(N) - min([ math.log(x) for x in counts[1:]])
        ngd = numerator/denominator
        return ngd 
Example 70
Project: graphql-core-next   Author: graphql-python   File: test_is_nullish.py    MIT License 4 votes vote down vote up
def describe_is_nullish():
    def null_is_nullish():
        assert is_nullish(None) is True

    def falsy_objects_are_not_nullish():
        assert is_nullish("") is False
        assert is_nullish(0) is False
        assert is_nullish([]) is False
        assert is_nullish({}) is False

    def truthy_objects_are_not_nullish():
        assert is_nullish("str") is False
        assert is_nullish(1) is False
        assert is_nullish([0]) is False
        assert is_nullish({None: None}) is False

    def inf_is_not_nullish():
        assert is_nullish(inf) is False
        assert is_nullish(-inf) is False

    def undefined_is_nullish():
        assert is_nullish(INVALID) is True

    def nan_is_nullish():
        assert is_nullish(nan) is True

    def irreflexive_objects_are_not_nullish():
        # Numpy arrays operate element-wise and the comparison operator returns arrays.
        # Similar to math.nan, they are therefore not equal to themselves. However, we
        # only want math.nan to be considered nullish, not values like numpy arrays.

        class IrreflexiveValue:
            def __eq__(self, other):
                return False

            def __bool__(self):
                return False

        value = IrreflexiveValue()
        assert value != value
        assert not value

        assert is_nullish(value) is False 
Example 71
Project: redis-memory-analyzer   Author: gamenet   File: KeyString.py    MIT License 4 votes vote down vote up
def analyze(self, keys, total=0):
        """

        :param keys:
        :param progress:
        :return:
        """
        key_stat = {
            'headers': ['Match', "Count", "Useful", "Real", "Ratio", "Encoding", "Min", "Max", "Avg", "TTL Min", "TTL Max", "TTL Avg."],
            'data': []
        }

        progress = tqdm(total=total,
                        mininterval=1,
                        desc="Processing keys",
                        leave=False)

        for pattern, data in keys.items():
            used_bytes_iter, aligned_iter, encoding_iter, ttl_iter = tee(
                    progress_iterator((StringEntry(value=x["name"], ttl=x["ttl"]) for x in data), progress), 4)

            total_elements = len(data)
            if total_elements == 0:
                continue

            aligned = sum(obj.aligned for obj in aligned_iter)
            used_bytes_generator = (obj.useful_bytes for obj in used_bytes_iter)
            useful_iter, min_iter, max_iter, mean_iter = tee(used_bytes_generator, 4)

            prefered_encoding = pref_encoding((obj.encoding for obj in encoding_iter), redis_encoding_id_to_str)
            min_value = min(min_iter)
            if total_elements < 2:
                avg = min_value
            else:
                avg = statistics.mean(mean_iter)

            used_user = sum(useful_iter)

            ttls = [obj.ttl for obj in ttl_iter]
            min_ttl = min(ttls)
            max_ttl = max(ttls)
            avg_ttl = statistics.mean(ttls) if len(ttls) > 1 else min(ttls)

            stat_entry = [
                pattern, total_elements, used_user, aligned, aligned / used_user, prefered_encoding,
                min_value, max(max_iter), avg, min_ttl, max_ttl, avg_ttl
            ]
            key_stat['data'].append(stat_entry)

        key_stat['data'].sort(key=lambda x: x[1], reverse=True)
        key_stat['data'].append(make_total_row(key_stat['data'], ['Total:', sum, sum, sum, 0, '', 0, 0, 0, min, max, math.nan]))

        progress.close()

        return key_stat 
Example 72
Project: redis-memory-analyzer   Author: gamenet   File: Hash.py    MIT License 4 votes vote down vote up
def analyze(self, keys, total=0):
        key_stat = {
            'headers': ['Match', "Count", "Avg field count", "Key mem", "Real", "Ratio", "Value mem", "Real", "Ratio",
                        "System", "Encoding", "Total mem", "Total aligned", "TTL Min", "TTL Max", "TTL Avg."],
            'data': []
        }

        progress = tqdm(total=total,
                        mininterval=1,
                        desc="Processing Hash patterns",
                        leave=False)

        for pattern, data in keys.items():
            agg = HashAggregator(progress_iterator((HashStatEntry(x, self.redis) for x in data), progress), len(data))

            stat_entry = [
                pattern,
                len(data),
                agg.fieldAvgCount,
                agg.fieldUsedBytes,
                agg.fieldAlignedBytes,
                agg.fieldAlignedBytes / (agg.fieldUsedBytes if agg.fieldUsedBytes > 0 else 1),
                agg.valueUsedBytes,
                agg.valueAlignedBytes,
                agg.valueAlignedBytes / (agg.valueUsedBytes if agg.valueUsedBytes > 0 else 1),
                agg.system,
                agg.encoding,
                agg.fieldUsedBytes + agg.valueUsedBytes,
                agg.fieldAlignedBytes + agg.valueAlignedBytes + agg.system,
                agg.ttlMin,
                agg.ttlMax,
                agg.ttlAvg,
            ]

            key_stat['data'].append(stat_entry)

        key_stat['data'].sort(key=lambda x: x[12], reverse=True)
        key_stat['data'].append(
            make_total_row(key_stat['data'], ['Total:', sum, 0, sum, sum, 0, sum, sum, 0, sum, '', sum, sum, min, max, math.nan]))

        progress.close()

        return key_stat 
Example 73
Project: torchsim   Author: GoodAI   File: l1_topology.py    Apache License 2.0 4 votes vote down vote up
def __init__(self):
        super().__init__('cuda')

        noise_amplitude: float = 0
        env_size: Tuple[int, int] = (27, 27)
        ball_radius: int = 5
        switch_shape_after = 200

        sp_n_cluster_centers = 200  # free

        ball_env_params = BallEnvironmentParams(
            switch_shape_after=switch_shape_after,
            noise_amplitude=noise_amplitude,
            ball_radius=ball_radius,
            env_size=env_size
        )

        ball_env = BallEnvironment(ball_env_params)
        self.add_node(ball_env)
        self.ball_env = ball_env

        # topmost layer
        ep_sp = ExpertParams()
        ep_sp.flock_size = 1
        ep_sp.n_cluster_centers = sp_n_cluster_centers
        sp_reconstruction_layer = SpReconstructionLayer(env_size[0] * env_size[1],
                                                        ball_env_params.n_shapes,
                                                        sp_params=ep_sp, name="L0")
        self.add_node(sp_reconstruction_layer)
        self.sp_reconstruction_layer = sp_reconstruction_layer

        switch_node = SwitchNode(2)
        self.add_node(switch_node)
        self.switch_node = switch_node

        nan_node = ConstantNode(ball_env_params.n_shapes, math.nan)
        self.add_node(nan_node)

        Connector.connect(ball_env.outputs.data, sp_reconstruction_layer.inputs.data)

        Connector.connect(ball_env.outputs.label, switch_node.inputs[0])
        Connector.connect(nan_node.outputs.output, switch_node.inputs[1])
        Connector.connect(switch_node.outputs.output, sp_reconstruction_layer.inputs.label)

        self.is_training = True 
Example 74
Project: aetherling   Author: David-Durst   File: pnr.py    MIT License 4 votes vote down vote up
def get_latex_from_results_str(results_file):
    results = pd.read_csv(results_file)
    results['Clock Rate'] = nan
    results_tex_str = ""
    applications = ["map", "conv2d", "conv2d_b2b", "conv2d_b2b_3x3_repeat", "pyramid", "sharpen", "camera"]
    application_lengths = [200, 16, 16, 16, 64, 16, 200]
    index_of_p_1_row_ae = [3, 2, 2, 2, 2, 2, 3]
    index_of_p_1_row_other = 0
    application_parallelisms = [[frac(1,8), frac(1,4), frac(1,2) , frac(1,1),frac(2,1),frac(4,1),frac(5,1),frac(8,1),frac(10,1),frac(20,1),frac(200,1)],
                                [frac(1,9), frac(1,3), frac(1,1), frac(2,1),frac(4,1),frac(8,1),frac(16,1)],
                                [frac(1,9), frac(1,3), frac(1,1), frac(2,1),frac(4,1),frac(8,1),frac(16,1)],
                                [frac(1,9), frac(1,3), frac(1,1), frac(2,1),frac(4,1),frac(8,1),frac(16,1)],
                                [frac(1,9), frac(1,3), frac(1,1), frac(2,1),frac(4,1),frac(8,1),frac(16,1),frac(32,1),frac(64,1)],
                                [frac(1,9), frac(1,3), frac(1,1), frac(2,1),frac(4,1),frac(8,1),frac(16,1)],
                                [frac(1,8), frac(1,4), frac(1,2) , frac(1,1),frac(2,1),frac(4,1),frac(5,1),frac(10,1),frac(20,1),frac(200,1)]]
    application_parallelisms_others = [[frac(1,1), frac(2,1), frac(4,1), frac(8, 1)],
                                       [frac(1,1), frac(2,1), frac(4,1), frac(8, 1)],
                                       [frac(1,1), frac(2,1), frac(4,1), frac(8, 1)],
                                       [frac(1,1), frac(2,1), frac(4,1), frac(8, 1)],
                                       [frac(1,1), frac(2,1), frac(4,1), frac(8, 1)],
                                       [frac(1,1), frac(2,1), frac(4,1), frac(8, 1)],
                                       [frac(1,1), frac(2,1), frac(4,1), frac(8, 1)]]
    per_system_per_application_results = []
    for i, system in enumerate(systems):
        per_system_results = []
        for j, app in enumerate(applications):
            start_per_app_per_system = results[(results.System == system) & (results.Application == app)]
            paper_parallelism = fix_parallelism(start_per_app_per_system, application_lengths[j])
            filled_in = add_missing_parallelisms(paper_parallelism, system, app, application_parallelisms[j] if i == 0 else application_parallelisms_others[j])
            sorted_by_parallelism = filled_in.sort_values("Parallelism")
            results_only_selected_columns = get_output_columns(sorted_by_parallelism, index_of_p_1_row_ae[j] if i == 0 else index_of_p_1_row_other, system)
            per_system_results.append(results_only_selected_columns)
        per_system_per_application_results.append(per_system_results)
#    per_system_results = [results[results.System == system] for system in systems]
#    per_system_per_application = \
#        [[per_system_result[per_system_result.Application == app]
#          for app in applications]
#         for per_system_result in per_system_results]


    # get all Aetherling results into latex tables
    #aetherling_per_app = per_system_per_application_results[0]
    for i, system_per_app in enumerate(per_system_per_application_results):
        for j, app_pd in enumerate(system_per_app):
            results_tex_str += "System {}, App {}\n".format(systems[i], applications[j])
            results_tex_str += app_pd.to_latex(index=False, escape=False)
    for app_idx in range(len(applications)):
        results_tex_str += "Comparison for App {}\n".format(applications[app_idx])
        ae_res = per_system_per_application_results[0][app_idx]
        ae_res_for_comp = ae_res[ae_res.Parallelism.isin([int_if_not_nan(x) for x in application_parallelisms_others[0]])]
        results_merged = merge_columns(
            ae_res_for_comp,
            per_system_per_application_results[1][app_idx],
            per_system_per_application_results[2][app_idx],
        ).reindex()
        results_tex_str += results_merged.to_latex(index=False, escape=False)
    return results_tex_str 
Example 75
Project: COMETSC   Author: MSingerLab   File: hgmd.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def batch_fold_change(marker_exp, c_list, coi):
    """Applies log2 fold change to a gene expression matrix, gene by gene.

    :param marker_exp: A DataFrame whose rows are cell identifiers, columns are
        gene identifiers, and values are float values representing gene
        expression.
    :param c_list: A Series whose indices are cell identifiers, and whose
        values are the cluster which that cell is part of.
    :param coi: The cluster of interest.
    :rtype: pandas.DataFrame
    """
    
    def fold_change(col,c_list,coi):
        mean0 = np.mean(col[c_list == coi]) + .000001
        mean1 = np.mean(col[c_list != coi]) + .000001
        if mean0 == 0:
            val = math.nan
            return val
        if mean1 == 0:
            val = math.nan
            return val
        val = (mean0/mean1)
        if val < 0:
            return abs(val)
        return val

    
    fc = marker_exp.apply(
        lambda col:
        (math.log(fold_change(col,c_list,coi),2))
        )
    fca = marker_exp.apply(
        lambda col:
        abs(math.log(fold_change(col,c_list,coi),2))
        )
    output = pd.DataFrame()
    output['gene_1'] = fc.index
    output[['Log2FoldChange']] = pd.DataFrame(
        fc.values.tolist(),
        columns=['Log2FoldChange']
    )
    output['gene_1'] = fca.index
    output[['Log2FoldChangeAbs']] = pd.DataFrame(
        fca.values.tolist(),
        columns=['Log2FoldChangeAbs']
    )
    return output 
Example 76
Project: hail   Author: hail-is   File: test_expr.py    MIT License 4 votes vote down vote up
def test_max(self):
        exprs_and_results = [
            (hl.max(1, 2), 2),
            (hl.max(1.0, 2), 2.0),
            (hl.max([1, 2]), 2),
            (hl.max([1.0, 2]), 2.0),
            (hl.max(0, 1.0, 2), 2.0),
            (hl.nanmax(0, 1.0, 2), 2.0),
            (hl.max(0, 1, 2), 2),
            (hl.max([0, 10, 2, 3, 4, 5, 6, ]), 10),
            (hl.max(0, 10, 2, 3, 4, 5, 6), 10),
            (hl.max([-5, -4, hl.null(tint32), -3, -2, hl.null(tint32)]), -2),
            (hl.max([float('nan'), -4, float('nan'), -3, -2, hl.null(tint32)]), float('nan')),
            (hl.max(0.1, hl.null('float'), 0.0), 0.1),
            (hl.max(0.1, hl.null('float'), float('nan')), float('nan')),
            (hl.max(hl.null('float'), float('nan')), float('nan')),
            (hl.max(0.1, hl.null('float'), float('nan'), filter_missing=False), None),
            (hl.nanmax(0.1, hl.null('float'), float('nan')), 0.1),
            (hl.max(hl.null('float'), float('nan')), float('nan')),
            (hl.nanmax(hl.null('float'), float('nan')), float('nan')),
            (hl.nanmax(hl.null('float'), float('nan'), 1.1, filter_missing=False), None),
            (hl.max([0.1, hl.null('float'), 0.0]), 0.1),
            (hl.max([hl.null('float'), float('nan')]), float('nan')),
            (hl.max([0.1, hl.null('float'), float('nan')]), float('nan')),
            (hl.max([0.1, hl.null('float'), float('nan')], filter_missing=False), None),
            (hl.nanmax([0.1, hl.null('float'), float('nan')]), 0.1),
            (hl.nanmax([float('nan'), 1.1, 0.1, hl.null('float'), 0.0]), 1.1),
            (hl.max([float('nan'), 1.1, 0.1, hl.null('float'), float('nan')]), float('nan')),
            (hl.max([float('nan'), 1.1, 0.1, hl.null('float'), float('nan')], filter_missing=False), None),
            (hl.nanmax([float('nan'), 1.1, 0.1, hl.null('float'), float('nan')]), 1.1),
            (hl.nanmax([hl.null('float'), float('nan'), 1.1], filter_missing=False), None),
            (hl.max({0.1, hl.null('float'), 0.0}), 0.1),
            (hl.max({hl.null('float'), float('nan')}), float('nan')),
            (hl.nanmax({float('nan'), 1.1, 0.1, hl.null('float'), 0.0}), 1.1),
            (hl.nanmax({hl.null('float'), float('nan'), 1.1}, filter_missing=False), None),
        ]

        r = hl.eval(hl.tuple(x[0] for x in exprs_and_results))
        for i in range(len(r)):
            actual = r[i]
            expected = exprs_and_results[i][1]
            assert actual == expected or (
                    actual is not None
                    and expected is not None
                    and (math.isnan(actual) and math.isnan(expected))), \
                f'{i}: {actual}, {expected}' 
Example 77
Project: hail   Author: hail-is   File: test_expr.py    MIT License 4 votes vote down vote up
def test_min(self):
        exprs_and_results = [
            (hl.min(1, 2), 1),
            (hl.min(1.0, 2), 1.0),
            (hl.min([1, 2]), 1),
            (hl.min([1.0, 2]), 1.0),
            (hl.min(0, 1.0, 2), 0.0),
            (hl.nanmin(0, 1.0, 2), 0.0),
            (hl.min(0, 1, 2), 0),
            (hl.min([10, 10, 2, 3, 4, 5, 6]), 2),
            (hl.min(0, 10, 2, 3, 4, 5, 6), 0),
            (hl.min([-5, -4, hl.null(tint32), -3, -2, hl.null(tint32)]), -5),
            (hl.min([float('nan'), -4, float('nan'), -3, -2, hl.null(tint32)]), float('nan')),
            (hl.min(-0.1, hl.null('float'), 0.0), -0.1),
            (hl.min(0.1, hl.null('float'), float('nan')), float('nan')),
            (hl.min(hl.null('float'), float('nan')), float('nan')),
            (hl.min(0.1, hl.null('float'), float('nan'), filter_missing=False), None),
            (hl.nanmin(-0.1, hl.null('float'), float('nan')), -0.1),
            (hl.min(hl.null('float'), float('nan')), float('nan')),
            (hl.nanmin(hl.null('float'), float('nan')), float('nan')),
            (hl.nanmin(hl.null('float'), float('nan'), 1.1, filter_missing=False), None),
            (hl.min([-0.1, hl.null('float'), 0.0]), -0.1),
            (hl.min([hl.null('float'), float('nan')]), float('nan')),
            (hl.min([0.1, hl.null('float'), float('nan')]), float('nan')),
            (hl.min([0.1, hl.null('float'), float('nan')], filter_missing=False), None),
            (hl.nanmin([-0.1, hl.null('float'), float('nan')]), -0.1),
            (hl.nanmin([float('nan'), -1.1, 0.1, hl.null('float'), 0.0]), -1.1),
            (hl.min([float('nan'), 1.1, 0.1, hl.null('float'), float('nan')]), float('nan')),
            (hl.min([float('nan'), 1.1, 0.1, hl.null('float'), float('nan')], filter_missing=False), None),
            (hl.nanmin([float('nan'), 1.1, 0.1, hl.null('float'), float('nan')]), 0.1),
            (hl.nanmin([hl.null('float'), float('nan'), 1.1], filter_missing=False), None),
            (hl.min({-0.1, hl.null('float'), 0.0}), -0.1),
            (hl.min({hl.null('float'), float('nan')}), float('nan')),
            (hl.nanmin({float('nan'), 1.1, -0.1, hl.null('float'), 0.0}), -0.1),
            (hl.nanmin({hl.null('float'), float('nan'), 1.1}, filter_missing=False), None),
        ]

        r = hl.eval(hl.tuple(x[0] for x in exprs_and_results))
        for i in range(len(r)):
            actual = r[i]
            expected = exprs_and_results[i][1]
            assert actual == expected or (
                    actual is not None
                    and expected is not None
                    and (math.isnan(actual) and math.isnan(expected))), \
                f'{i}: {actual}, {expected}' 
Example 78
Project: elektronn3   Author: ELEKTRONN   File: _trainer_multi.py    MIT License 4 votes vote down vote up
def run(self, max_steps: int = 1, max_runtime=3600 * 24 * 7) -> None:
        """Train the network for ``max_steps`` steps.

        After each training epoch, validation performance is measured and
        visualizations are computed and logged to tensorboard."""
        self.start_time = datetime.datetime.now()
        self.end_time = self.start_time + datetime.timedelta(seconds=max_runtime)
        self._save_model(suffix='_initial', verbose=False)
        self._lr_nhood.clear()
        self._lr_nhood.append(self.optimizer.param_groups[0]['lr'])  # LR of the first training step
        while not self.terminate:
            try:
                stats, file_stats, misc, tr_sample_images = self._train(max_steps, max_runtime)
                self.epoch += 1

                if self.valid_dataset is None:
                    stats['val_loss'] = nan
                    val_sample_images = None
                else:
                    valid_stats, val_sample_images = self._validate()
                    stats.update(valid_stats)

                # Log to stdout and text log file
                self._log_basic(stats, misc)
                # Render visualizations and log to tensorboard
                self._log_to_tensorboard(stats, misc, tr_sample_images, val_sample_images, file_stats=file_stats)
                # Legacy non-tensorboard logging to files
                self._log_to_history_tracker(stats, misc)

                # Save trained model state
                self._save_model(val_loss=stats['val_loss'], verbose=False)  # Not verbose because it can get spammy.
                # TODO: Support other metrics for determining what's the "best" model?
                if stats['val_loss'] < self.best_val_loss:
                    self.best_val_loss = stats['val_loss']
                    self._save_model(suffix=f'_best{self.step}', val_loss=stats['val_loss'])
            except KeyboardInterrupt:
                if self.ipython_shell:
                    IPython.embed(header=self._shell_info)
                else:
                    break
                if self.terminate:
                    break
            except Exception as e:
                logger.exception('Unhandled exception during training:')
                if self.ignore_errors:
                    # Just print the traceback and try to carry on with training.
                    # This can go wrong in unexpected ways, so don't leave the training unattended.
                    pass
                elif self.ipython_shell:
                    print("\nEntering Command line such that Exception can be "
                          "further inspected by user.\n\n")
                    IPython.embed(header=self._shell_info)
                    if self.terminate:
                        break
                else:
                    raise e
        self._save_model(suffix='_final') 
Example 79
Project: elektronn3   Author: ELEKTRONN   File: trainer.py    MIT License 4 votes vote down vote up
def run(self, max_steps: int = 1, max_runtime=3600 * 24 * 7) -> None:
        """Train the network for ``max_steps`` steps.
        After each training epoch, validation performance is measured and
        visualizations are computed and logged to tensorboard."""
        self.start_time = datetime.datetime.now()
        self.end_time = self.start_time + datetime.timedelta(seconds=max_runtime)
        self._save_model(suffix='_initial', verbose=False)
        self._lr_nhood.clear()
        self._lr_nhood.append(self.optimizer.param_groups[0]['lr'])  # LR of the first training step
        while not self.terminate:
            try:
                stats, misc, tr_sample_images = self._train(max_steps, max_runtime)
                self.epoch += 1

                if self.valid_dataset is None:
                    stats['val_loss'] = nan
                    val_sample_images = None
                else:
                    valid_stats, val_sample_images = self._validate()
                    stats.update(valid_stats)

                # Log to stdout and text log file
                self._log_basic(stats, misc)
                # Render visualizations and log to tensorboard
                self._log_to_tensorboard(stats, misc, tr_sample_images, val_sample_images)
                # Legacy non-tensorboard logging to files
                self._log_to_history_tracker(stats, misc)

                # Save trained model state
                self._save_model(val_loss=stats['val_loss'], verbose=False)  # Not verbose because it can get spammy.
                # TODO: Support other metrics for determining what's the "best" model?
                if stats['val_loss'] < self.best_val_loss:
                    self.best_val_loss = stats['val_loss']
                    self._save_model(suffix='_best', val_loss=stats['val_loss'])
            except KeyboardInterrupt:
                if self.ipython_shell:
                    IPython.embed(header=self._shell_info)
                else:
                    break
                if self.terminate:
                    break
            except Exception as e:
                logger.exception('Unhandled exception during training:')
                if self.ignore_errors:
                    # Just print the traceback and try to carry on with training.
                    # This can go wrong in unexpected ways, so don't leave the training unattended.
                    pass
                elif self.ipython_shell:
                    print("\nEntering Command line such that Exception can be "
                          "further inspected by user.\n\n")
                    IPython.embed(header=self._shell_info)
                    if self.terminate:
                        break
                else:
                    raise e
        self._save_model(suffix='_final')
        if self.tb is not None:
            self.tb.close()  # Ensure that everything is flushed 
Example 80
Project: loss-visualization   Author: cfellicious   File: analyze.py    GNU General Public License v3.0 4 votes vote down vote up
def create_loss_landscape(net=None, vectors=None, dir=None, steps=0,
                          wrongly_classified_images=None, wrongly_classified_labels=None, mean_path=None):
    """

    :param net:
    :param vectors:
    :param dir:
    :param steps:
    :param wrongly_classified_images:
    :param wrongly_classified_labels:
    :param mean_path:
    :return:
    """
    debug = 1
    start_time = time.time()
    # if not debug, calculate the grid and save the new data
    if not debug or not(os.path.exists(os.path.join(dir,'vector_grid1.npy')) and
                        os.path.exists(os.path.join(dir,'vector_grid2.npy'))):
        vector_grid1 = create_grid(np.vstack((vectors[0], np.negative(vectors[0]))), steps)
        vector_grid2 = create_grid(np.vstack((vectors[1], np.negative(vectors[1]))), steps)

        np.save(os.path.join(dir, 'vector_grid1'), vector_grid1)
        np.save(os.path.join(dir, 'vector_grid2'), vector_grid2)
    else:
        vector_grid1 = np.load(os.path.join(dir, 'vector_grid1.npy'))
        vector_grid2 = np.load(os.path.join(dir, 'vector_grid2.npy'))

    end_time = time.time() - start_time
    print('Duration : ' + str(end_time))

    # Save the initial weights of the network
    layer_weights = save_network_weights(net=net)

    loss_matrix = np.zeros((steps, steps))
    accuracy_matrix = np.zeros((steps, steps))
    for x_idx in range(0, steps):
        for y_idx in range(0, steps):
            print(x_idx, y_idx)
            # Modify the network values
            net = update_net_params(net, layer_weights, vector_grid1[x_idx, :], vector_grid2[y_idx, :])

            # Calculate the loss for the entire testing database

            loss, accuracy = compute_loss_for_db(net=net, wrongly_classified_images=wrongly_classified_images,
                                                 wrongly_classified_labels=wrongly_classified_labels,
                                                 mean_file_path=mean_path)

            if loss == 0:
                loss = math.nan
            else:
                loss = -(math.log(loss))
            # Save the loss value to a matrix
            print(loss)
            loss_matrix[x_idx][y_idx] = loss
            accuracy_matrix[x_idx][y_idx] = accuracy

    return loss_matrix, accuracy_matrix