Python pandas.compat.long() Examples
The following are 30
code examples of pandas.compat.long().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
pandas.compat
, or try the search function
.
Example #1
Source File: test_comparisons.py From vnpy_crypto with MIT License | 6 votes |
def test_compare_invalid(self): # GH 8058 val = Timestamp('20130101 12:01:02') assert not val == 'foo' assert not val == 10.0 assert not val == 1 assert not val == long(1) assert not val == [] assert not val == {'foo': 1} assert not val == np.float64(1) assert not val == np.int64(1) assert val != 'foo' assert val != 10.0 assert val != 1 assert val != long(1) assert val != [] assert val != {'foo': 1} assert val != np.float64(1) assert val != np.int64(1)
Example #2
Source File: test_grouping.py From recruit with Apache License 2.0 | 6 votes |
def test_multiindex_columns_empty_level(self): lst = [['count', 'values'], ['to filter', '']] midx = MultiIndex.from_tuples(lst) df = DataFrame([[long(1), 'A']], columns=midx) grouped = df.groupby('to filter').groups assert grouped['A'] == [0] grouped = df.groupby([('to filter', '')]).groups assert grouped['A'] == [0] df = DataFrame([[long(1), 'A'], [long(2), 'B']], columns=midx) expected = df.groupby('to filter').groups result = df.groupby([('to filter', '')]).groups assert result == expected df = DataFrame([[long(1), 'A'], [long(2), 'A']], columns=midx) expected = df.groupby('to filter').groups result = df.groupby([('to filter', '')]).groups tm.assert_dict_equal(result, expected)
Example #3
Source File: test_grouping.py From vnpy_crypto with MIT License | 6 votes |
def test_multiindex_columns_empty_level(self): lst = [['count', 'values'], ['to filter', '']] midx = MultiIndex.from_tuples(lst) df = DataFrame([[long(1), 'A']], columns=midx) grouped = df.groupby('to filter').groups assert grouped['A'] == [0] grouped = df.groupby([('to filter', '')]).groups assert grouped['A'] == [0] df = DataFrame([[long(1), 'A'], [long(2), 'B']], columns=midx) expected = df.groupby('to filter').groups result = df.groupby([('to filter', '')]).groups assert result == expected df = DataFrame([[long(1), 'A'], [long(2), 'A']], columns=midx) expected = df.groupby('to filter').groups result = df.groupby([('to filter', '')]).groups tm.assert_dict_equal(result, expected)
Example #4
Source File: test_grouping.py From predictive-maintenance-using-machine-learning with Apache License 2.0 | 6 votes |
def test_multiindex_columns_empty_level(self): lst = [['count', 'values'], ['to filter', '']] midx = MultiIndex.from_tuples(lst) df = DataFrame([[long(1), 'A']], columns=midx) grouped = df.groupby('to filter').groups assert grouped['A'] == [0] grouped = df.groupby([('to filter', '')]).groups assert grouped['A'] == [0] df = DataFrame([[long(1), 'A'], [long(2), 'B']], columns=midx) expected = df.groupby('to filter').groups result = df.groupby([('to filter', '')]).groups assert result == expected df = DataFrame([[long(1), 'A'], [long(2), 'A']], columns=midx) expected = df.groupby('to filter').groups result = df.groupby([('to filter', '')]).groups tm.assert_dict_equal(result, expected)
Example #5
Source File: test_comparisons.py From recruit with Apache License 2.0 | 6 votes |
def test_compare_invalid(self): # GH#8058 val = Timestamp('20130101 12:01:02') assert not val == 'foo' assert not val == 10.0 assert not val == 1 assert not val == long(1) assert not val == [] assert not val == {'foo': 1} assert not val == np.float64(1) assert not val == np.int64(1) assert val != 'foo' assert val != 10.0 assert val != 1 assert val != long(1) assert val != [] assert val != {'foo': 1} assert val != np.float64(1) assert val != np.int64(1)
Example #6
Source File: test_comparisons.py From predictive-maintenance-using-machine-learning with Apache License 2.0 | 6 votes |
def test_compare_invalid(self): # GH#8058 val = Timestamp('20130101 12:01:02') assert not val == 'foo' assert not val == 10.0 assert not val == 1 assert not val == long(1) assert not val == [] assert not val == {'foo': 1} assert not val == np.float64(1) assert not val == np.int64(1) assert val != 'foo' assert val != 10.0 assert val != 1 assert val != long(1) assert val != [] assert val != {'foo': 1} assert val != np.float64(1) assert val != np.int64(1)
Example #7
Source File: sorting.py From elasticintel with GNU General Public License v3.0 | 5 votes |
def is_int64_overflow_possible(shape): the_prod = long(1) for x in shape: the_prod *= long(x) return the_prod >= _INT64_MAX
Example #8
Source File: sorting.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def is_int64_overflow_possible(shape): the_prod = long(1) for x in shape: the_prod *= long(x) return the_prod >= _INT64_MAX
Example #9
Source File: common.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def _long_prod(vals): result = long(1) for x in vals: result *= x return result
Example #10
Source File: json.py From elasticintel with GNU General Public License v3.0 | 5 votes |
def _get_data_from_filepath(self, filepath_or_buffer): """ read_json accepts three input types: 1. filepath (string-like) 2. file-like object (e.g. open file object, StringIO) 3. JSON string This method turns (1) into (2) to simplify the rest of the processing. It returns input types (2) and (3) unchanged. """ data = filepath_or_buffer exists = False if isinstance(data, compat.string_types): try: exists = os.path.exists(filepath_or_buffer) # gh-5874: if the filepath is too long will raise here except (TypeError, ValueError): pass if exists or self.compression is not None: data, _ = _get_handle(filepath_or_buffer, 'r', encoding=self.encoding, compression=self.compression) self.should_close = True self.open_stream = data return data
Example #11
Source File: test_timestamp.py From elasticintel with GNU General Public License v3.0 | 5 votes |
def test_compare_invalid(self): # GH 8058 val = Timestamp('20130101 12:01:02') assert not val == 'foo' assert not val == 10.0 assert not val == 1 assert not val == long(1) assert not val == [] assert not val == {'foo': 1} assert not val == np.float64(1) assert not val == np.int64(1) assert val != 'foo' assert val != 10.0 assert val != 1 assert val != long(1) assert val != [] assert val != {'foo': 1} assert val != np.float64(1) assert val != np.int64(1) # ops testing df = DataFrame(np.random.randn(5, 2)) a = df[0] b = Series(np.random.randn(5)) b.name = Timestamp('2000-01-01') tm.assert_series_equal(a / b, 1 / (b / a))
Example #12
Source File: test_timestamp.py From elasticintel with GNU General Public License v3.0 | 5 votes |
def test_delta_preserve_nanos(self): val = Timestamp(long(1337299200000000123)) result = val + timedelta(1) assert result.nanosecond == val.nanosecond
Example #13
Source File: stata.py From predictive-maintenance-using-machine-learning with Apache License 2.0 | 5 votes |
def __init__(self, value): self._value = value # Conversion to long to avoid hash issues on 32 bit platforms #8968 value = compat.long(value) if value < 2147483648 else float(value) self._str = self.MISSING_VALUES[value]
Example #14
Source File: common.py From elasticintel with GNU General Public License v3.0 | 5 votes |
def _long_prod(vals): result = long(1) for x in vals: result *= x return result
Example #15
Source File: test_datetimelike.py From coffeegrindsize with MIT License | 5 votes |
def test_unbox_scalar(self): data = np.arange(10, dtype='i8') * 24 * 3600 * 10**9 arr = self.array_cls(data, freq='D') result = arr._unbox_scalar(arr[0]) assert isinstance(result, (int, compat.long)) result = arr._unbox_scalar(pd.NaT) assert isinstance(result, (int, compat.long)) with pytest.raises(ValueError): arr._unbox_scalar('foo')
Example #16
Source File: test_dtypes.py From coffeegrindsize with MIT License | 5 votes |
def test_iter_python_types(self): # GH-19909 # TODO(Py2): Remove long cat = Categorical([1, 2]) assert isinstance(list(cat)[0], (int, long)) assert isinstance(cat.tolist()[0], (int, long))
Example #17
Source File: test_io.py From elasticintel with GNU General Public License v3.0 | 5 votes |
def test_convert_sql_column_longs(self): arr = np.array([long(1), long(2), long(3), long(4)], dtype='O') result = lib.convert_sql_column(arr) expected = np.array([1, 2, 3, 4], dtype='i8') tm.assert_numpy_array_equal(result, expected) arr = np.array([long(1), long(2), long(3), None, long(4)], dtype='O') result = lib.convert_sql_column(arr) expected = np.array([1, 2, 3, np.nan, 4], dtype='f8') tm.assert_numpy_array_equal(result, expected)
Example #18
Source File: stata.py From elasticintel with GNU General Public License v3.0 | 5 votes |
def _dtype_to_stata_type(dtype, column): """ Converts dtype types to stata types. Returns the byte of the given ordinal. See TYPE_MAP and comments for an explanation. This is also explained in the dta spec. 1 - 244 are strings of this length Pandas Stata 251 - chr(251) - for int8 byte 252 - chr(252) - for int16 int 253 - chr(253) - for int32 long 254 - chr(254) - for float32 float 255 - chr(255) - for double double If there are dates to convert, then dtype will already have the correct type inserted. """ # TODO: expand to handle datetime to integer conversion if dtype.type == np.string_: return chr(dtype.itemsize) elif dtype.type == np.object_: # try to coerce it to the biggest string # not memory efficient, what else could we # do? itemsize = max_len_string_array(_ensure_object(column.values)) return chr(max(itemsize, 1)) elif dtype == np.float64: return chr(255) elif dtype == np.float32: return chr(254) elif dtype == np.int32: return chr(253) elif dtype == np.int16: return chr(252) elif dtype == np.int8: return chr(251) else: # pragma : no cover raise NotImplementedError("Data type %s not supported." % dtype)
Example #19
Source File: stata.py From predictive-maintenance-using-machine-learning with Apache License 2.0 | 5 votes |
def _dtype_to_stata_type_117(dtype, column, force_strl): """ Converts dtype types to stata types. Returns the byte of the given ordinal. See TYPE_MAP and comments for an explanation. This is also explained in the dta spec. 1 - 2045 are strings of this length Pandas Stata 32768 - for object strL 65526 - for int8 byte 65527 - for int16 int 65528 - for int32 long 65529 - for float32 float 65530 - for double double If there are dates to convert, then dtype will already have the correct type inserted. """ # TODO: expand to handle datetime to integer conversion if force_strl: return 32768 if dtype.type == np.object_: # try to coerce it to the biggest string # not memory efficient, what else could we # do? itemsize = max_len_string_array(ensure_object(column.values)) itemsize = max(itemsize, 1) if itemsize <= 2045: return itemsize return 32768 elif dtype == np.float64: return 65526 elif dtype == np.float32: return 65527 elif dtype == np.int32: return 65528 elif dtype == np.int16: return 65529 elif dtype == np.int8: return 65530 else: # pragma : no cover raise NotImplementedError("Data type %s not supported." % dtype)
Example #20
Source File: internals.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def _can_hold_element(self, element): tipo = maybe_infer_dtype_type(element) if tipo is not None: return issubclass(tipo.type, (np.floating, np.integer, np.complexfloating)) return ( isinstance( element, (float, int, complex, np.float_, np.int_, compat.long)) and not isinstance(element, (bool, np.bool_)))
Example #21
Source File: json.py From predictive-maintenance-using-machine-learning with Apache License 2.0 | 5 votes |
def _get_data_from_filepath(self, filepath_or_buffer): """ The function read_json accepts three input types: 1. filepath (string-like) 2. file-like object (e.g. open file object, StringIO) 3. JSON string This method turns (1) into (2) to simplify the rest of the processing. It returns input types (2) and (3) unchanged. """ data = filepath_or_buffer exists = False if isinstance(data, compat.string_types): try: exists = os.path.exists(filepath_or_buffer) # gh-5874: if the filepath is too long will raise here except (TypeError, ValueError): pass if exists or self.compression is not None: data, _ = _get_handle(filepath_or_buffer, 'r', encoding=self.encoding, compression=self.compression) self.should_close = True self.open_stream = data return data
Example #22
Source File: sorting.py From predictive-maintenance-using-machine-learning with Apache License 2.0 | 5 votes |
def is_int64_overflow_possible(shape): the_prod = long(1) for x in shape: the_prod *= long(x) return the_prod >= _INT64_MAX
Example #23
Source File: blocks.py From predictive-maintenance-using-machine-learning with Apache License 2.0 | 5 votes |
def _can_hold_element(self, element): tipo = maybe_infer_dtype_type(element) if tipo is not None: return issubclass(tipo.type, (np.floating, np.integer, np.complexfloating)) return ( isinstance( element, (float, int, complex, np.float_, np.int_, compat.long)) and not isinstance(element, (bool, np.bool_)))
Example #24
Source File: blocks.py From predictive-maintenance-using-machine-learning with Apache License 2.0 | 5 votes |
def _can_hold_element(self, element): tipo = maybe_infer_dtype_type(element) if tipo is not None: return (issubclass(tipo.type, (np.floating, np.integer)) and not issubclass(tipo.type, (np.datetime64, np.timedelta64))) return ( isinstance( element, (float, int, np.floating, np.int_, compat.long)) and not isinstance(element, (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64)))
Example #25
Source File: test_comparisons.py From predictive-maintenance-using-machine-learning with Apache License 2.0 | 5 votes |
def test_comparison(self): # 5-18-2012 00:00:00.000 stamp = long(1337299200000000000) val = Timestamp(stamp) assert val == val assert not val != val assert not val < val assert val <= val assert not val > val assert val >= val other = datetime(2012, 5, 18) assert val == other assert not val != other assert not val < other assert val <= other assert not val > other assert val >= other other = Timestamp(stamp + 100) assert val != other assert val != other assert val < other assert val <= other assert other > val assert other >= val
Example #26
Source File: test_convert_to.py From predictive-maintenance-using-machine-learning with Apache License 2.0 | 5 votes |
def test_to_dict_box_scalars(self, orient, item_getter): # 14216, 23753 # make sure that we are boxing properly df = DataFrame({'a': [1, 2], 'b': [.1, .2]}) result = df.to_dict(orient=orient) assert isinstance(item_getter(result, 'a', 0), (int, long)) assert isinstance(item_getter(result, 'b', 0), float)
Example #27
Source File: test_constructors.py From predictive-maintenance-using-machine-learning with Apache License 2.0 | 5 votes |
def test_scalar_conversion(self): # Pass in scalar is disabled scalar = Series(0.5) assert not isinstance(scalar, float) # Coercion assert float(Series([1.])) == 1.0 assert int(Series([1.])) == 1 assert long(Series([1.])) == 1
Example #28
Source File: test_dtypes.py From predictive-maintenance-using-machine-learning with Apache License 2.0 | 5 votes |
def test_iter_python_types(self): # GH-19909 # TODO(Py2): Remove long cat = Categorical([1, 2]) assert isinstance(list(cat)[0], (int, long)) assert isinstance(cat.tolist()[0], (int, long))
Example #29
Source File: test_datetimelike.py From recruit with Apache License 2.0 | 5 votes |
def test_unbox_scalar(self): data = np.arange(10, dtype='i8') * 24 * 3600 * 10**9 arr = self.array_cls(data, freq='D') result = arr._unbox_scalar(arr[0]) assert isinstance(result, (int, compat.long)) result = arr._unbox_scalar(pd.NaT) assert isinstance(result, (int, compat.long)) with pytest.raises(ValueError): arr._unbox_scalar('foo')
Example #30
Source File: stata.py From Computable with MIT License | 5 votes |
def __init__(self, fname, data, convert_dates=None, write_index=True, encoding="latin-1", byteorder=None): super(StataWriter, self).__init__(encoding) self._convert_dates = convert_dates self._write_index = write_index # attach nobs, nvars, data, varlist, typlist self._prepare_pandas(data) if byteorder is None: byteorder = sys.byteorder self._byteorder = _set_endianness(byteorder) self._file = _open_file_binary_write( fname, self._encoding or self._default_encoding ) self.type_converters = {253: np.long, 252: int}