Python pandas.Index() Examples
The following are 30
code examples of pandas.Index().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
pandas
, or try the search function
.
Example #1
Source File: portfolio.py From tensortrade with Apache License 2.0 | 7 votes |
def on_next(self, data: dict): if not self._keys: self._keys = self.find_keys(data) index = pd.Index([self.clock.step], name="step") performance_data = {k: data[k] for k in self._keys} performance_data['base_symbol'] = self.base_instrument.symbol performance_step = pd.DataFrame(performance_data, index=index) net_worth = data['net_worth'] if self._performance is None: self._performance = performance_step self._initial_net_worth = net_worth self._net_worth = net_worth else: self._performance = self._performance.append(performance_step) self._net_worth = net_worth if self._performance_listener: self._performance_listener(performance_step)
Example #2
Source File: test_heatmap.py From mmvec with BSD 3-Clause "New" or "Revised" License | 6 votes |
def setUp(self): self.taxa = pd.Series([ 'k__Bacteria; p__Proteobacteria; c__Deltaproteobacteria; ' 'o__Desulfobacterales; f__Desulfobulbaceae; g__; s__', 'k__Bacteria; p__Cyanobacteria; c__Chloroplast; o__Streptophyta', 'k__Bacteria; p__Proteobacteria; c__Alphaproteobacteria; ' 'o__Rickettsiales; f__mitochondria; g__Lardizabala; s__biternata', 'k__Archaea; p__Euryarchaeota; c__Methanomicrobia; ' 'o__Methanosarcinales; f__Methanosarcinaceae; g__Methanosarcina', 'k__Bacteria; p__Proteobacteria; c__Alphaproteobacteria; ' 'o__Rickettsiales; f__mitochondria; g__Pavlova; s__lutheri', 'k__Archaea; p__[Parvarchaeota]; c__[Parvarchaea]; o__WCHD3-30', 'k__Bacteria; p__Proteobacteria; c__Alphaproteobacteria; ' 'o__Sphingomonadales; f__Sphingomonadaceae'], index=pd.Index([c for c in 'ABCDEFG'], name='feature-id'), name='Taxon') self.exp = pd.Series( ['s__', 'o__Streptophyta', 's__biternata', 'g__Methanosarcina', 's__lutheri', 'o__WCHD3-30', 'f__Sphingomonadaceae'], index=pd.Index([c for c in 'ABCDEFG'], name='feature-id'), name='Taxon')
Example #3
Source File: test_indexing.py From recordlinkage with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_index_names_pandas023(self, index_class): # Pandas changes the behaviour of MultiIndex names. # https://github.com/pandas-dev/pandas/pull/18882 # https://github.com/J535D165/recordlinkage/issues/55 # This test tests compatibility. # make an index for each dataframe with a new index name index_a = pd.Index(self.a.index, name='index') df_a = pd.DataFrame(self.a, index=index_a) index_b = pd.Index(self.b.index, name='index') df_b = pd.DataFrame(self.b, index=index_b) # make the index pairs_link = index_class._link_index(df_a, df_b) if pairs_link.names[0] is not None: assert pairs_link.names[0] != pairs_link.names[1] # make the index pairs_dedup = index_class._dedup_index(df_a) if pairs_link.names[0] is not None: assert pairs_dedup.names[0] != pairs_dedup.names[1]
Example #4
Source File: test_indexing.py From recordlinkage with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_index_names_link(self, index_class): # tuples with the name of the first and second index index_names = [('index1', 'index2'), ('index1', None), (None, 'index2'), (None, None), (10, 'index2'), (10, 11)] for name_a, name_b in index_names: # make an index for each dataframe with a new index name index_a = pd.Index(self.a.index, name=name_a) df_a = pd.DataFrame(self.a, index=index_a) index_b = pd.Index(self.b.index, name=name_b) df_b = pd.DataFrame(self.b, index=index_b) pairs = index_class.index((df_a, df_b)) assert pairs.names == [name_a, name_b] # check for inplace editing (not the intention) assert df_a.index.name == name_a assert df_b.index.name == name_b
Example #5
Source File: test_indexing.py From recordlinkage with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_lower_triangular(self, index_class): # make an index for each dataframe with a new index name index_a = pd.Index(self.a.index, name='index') df_a = pd.DataFrame(self.a, index=index_a) pairs = index_class.index(df_a) # expected levels = [df_a.index.values, df_a.index.values] codes = np.tril_indices(len(df_a.index), k=-1) full_pairs = pd.MultiIndex(levels=levels, codes=codes, verify_integrity=False) # all pairs are in the lower triangle of the matrix. assert len(pairs.difference(full_pairs)) == 0
Example #6
Source File: test_indexing.py From recordlinkage with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_index_names_dedup(self, index_class): index_names = ['dedup', None, 'index', int(1)] expected = [ ['dedup_1', 'dedup_2'], [None, None], ['index_1', 'index_2'], ['1_1', '1_2'], ] for i, name in enumerate(index_names): index_A = pd.Index(self.a.index).rename(name) df_A = pd.DataFrame(self.a, index=index_A) pairs = index_class.index((df_A)) assert pairs.names == expected[i] assert df_A.index.name == name
Example #7
Source File: test_indexing.py From recordlinkage with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_duplicated_index_names_dedup(self, index_class): # make an index for each dataframe with a new index name index_a = pd.Index(self.a.index, name='index') df_a = pd.DataFrame(self.a, index=index_a) # make the index pairs = index_class.index(df_a) assert pairs.names == ['index_1', 'index_2'] # check for inplace editing (not the intention) assert df_a.index.name == 'index' # make the index index_class.suffixes = ['_a', '_b'] pairs = index_class.index(df_a) assert pairs.names == ['index_a', 'index_b'] # check for inplace editing (not the intention) assert df_a.index.name == 'index'
Example #8
Source File: test_visualizers.py From mmvec with BSD 3-Clause "New" or "Revised" License | 6 votes |
def setUp(self): _ranks = pd.DataFrame([[4.1, 1.3, 2.1], [0.1, 0.3, 0.2], [2.2, 4.3, 3.2], [-6.3, -4.4, 2.1]], index=pd.Index([c for c in 'ABCD'], name='id'), columns=['m1', 'm2', 'm3']).T self.ranks = Artifact.import_data('FeatureData[Conditional]', _ranks) self.taxa = CategoricalMetadataColumn(pd.Series([ 'k__Bacteria; p__Proteobacteria; c__Deltaproteobacteria; ' 'o__Desulfobacterales; f__Desulfobulbaceae; g__; s__', 'k__Bacteria; p__Cyanobacteria; c__Chloroplast; o__Streptophyta', 'k__Bacteria; p__Proteobacteria; c__Alphaproteobacteria; ' 'o__Rickettsiales; f__mitochondria; g__Lardizabala; s__biternata', 'k__Archaea; p__Euryarchaeota; c__Methanomicrobia; ' 'o__Methanosarcinales; f__Methanosarcinaceae; g__Methanosarcina'], index=pd.Index([c for c in 'ABCD'], name='feature-id'), name='Taxon')) self.metabolites = CategoricalMetadataColumn(pd.Series([ 'amino acid', 'carbohydrate', 'drug metabolism'], index=pd.Index(['m1', 'm2', 'm3'], name='feature-id'), name='Super Pathway'))
Example #9
Source File: test_api.py From recruit with Apache License 2.0 | 6 votes |
def test_ordered_api(self): # GH 9347 cat1 = Categorical(list('acb'), ordered=False) tm.assert_index_equal(cat1.categories, Index(['a', 'b', 'c'])) assert not cat1.ordered cat2 = Categorical(list('acb'), categories=list('bca'), ordered=False) tm.assert_index_equal(cat2.categories, Index(['b', 'c', 'a'])) assert not cat2.ordered cat3 = Categorical(list('acb'), ordered=True) tm.assert_index_equal(cat3.categories, Index(['a', 'b', 'c'])) assert cat3.ordered cat4 = Categorical(list('acb'), categories=list('bca'), ordered=True) tm.assert_index_equal(cat4.categories, Index(['b', 'c', 'a'])) assert cat4.ordered
Example #10
Source File: numpy_records.py From arctic with GNU Lesser General Public License v2.1 | 6 votes |
def _index_to_records(self, df): metadata = {} index = df.index index_tz = None if isinstance(index, MultiIndex): ix_vals, index_names, index_tz = _multi_index_to_records(index, len(df) == 0) else: ix_vals = [index.values] index_names = list(index.names) if index_names[0] is None: index_names = ['index'] log.info("Index has no name, defaulting to 'index'") if isinstance(index, DatetimeIndex) and index.tz is not None: index_tz = get_timezone(index.tz) if index_tz is not None: metadata['index_tz'] = index_tz metadata['index'] = index_names return index_names, ix_vals, metadata
Example #11
Source File: numpy_records.py From arctic with GNU Lesser General Public License v2.1 | 6 votes |
def _index_from_records(self, recarr): index = recarr.dtype.metadata['index'] if len(index) == 1: rtn = Index(np.copy(recarr[str(index[0])]), name=index[0]) if isinstance(rtn, DatetimeIndex) and 'index_tz' in recarr.dtype.metadata: rtn = rtn.tz_localize('UTC').tz_convert(recarr.dtype.metadata['index_tz']) else: level_arrays = [] index_tz = recarr.dtype.metadata.get('index_tz', []) for level_no, index_name in enumerate(index): # build each index level separately to ensure we end up with the right index dtype level = Index(np.copy(recarr[str(index_name)])) if level_no < len(index_tz): tz = index_tz[level_no] if tz is not None: if not isinstance(level, DatetimeIndex) and len(level) == 0: # index type information got lost during save as the index was empty, cast back level = DatetimeIndex([], tz=tz) else: level = level.tz_localize('UTC').tz_convert(tz) level_arrays.append(level) rtn = MultiIndex.from_arrays(level_arrays, names=index) return rtn
Example #12
Source File: test_chunkstore.py From arctic with GNU Lesser General Public License v2.1 | 6 votes |
def test_update(chunkstore_lib): df = DataFrame(data={'data': [1, 2, 3]}, index=pd.Index(data=[dt(2016, 1, 1), dt(2016, 1, 2), dt(2016, 1, 3)], name='date')) df2 = DataFrame(data={'data': [20, 30, 40]}, index=pd.Index(data=[dt(2016, 1, 2), dt(2016, 1, 3), dt(2016, 1, 4)], name='date')) equals = DataFrame(data={'data': [1, 20, 30, 40]}, index=pd.Index(data=[dt(2016, 1, 1), dt(2016, 1, 2), dt(2016, 1, 3), dt(2016, 1, 4)], name='date')) chunkstore_lib.write('chunkstore_test', df, chunk_size='D') chunkstore_lib.update('chunkstore_test', df2) assert_frame_equal(chunkstore_lib.read('chunkstore_test'), equals) assert(chunkstore_lib.get_info('chunkstore_test')['len'] == len(equals)) assert(chunkstore_lib.get_info('chunkstore_test')['chunk_count'] == len(equals))
Example #13
Source File: test_chunkstore.py From arctic with GNU Lesser General Public License v2.1 | 6 votes |
def test_update_chunk_range(chunkstore_lib): df = DataFrame(data={'data': [1, 2, 3]}, index=pd.Index(data=[dt(2015, 1, 1), dt(2015, 1, 2), dt(2015, 1, 3)], name='date')) df2 = DataFrame(data={'data': [30]}, index=pd.Index(data=[dt(2015, 1, 2)], name='date')) equals = DataFrame(data={'data': [30, 3]}, index=pd.Index(data=[dt(2015, 1, 2), dt(2015, 1, 3)], name='date')) chunkstore_lib.write('chunkstore_test', df, chunk_size='M') chunkstore_lib.update('chunkstore_test', df2, chunk_range=DateRange(dt(2015, 1, 1), dt(2015, 1, 2))) assert_frame_equal(chunkstore_lib.read('chunkstore_test'), equals)
Example #14
Source File: test_chunkstore.py From arctic with GNU Lesser General Public License v2.1 | 6 votes |
def test_append_before(chunkstore_lib): df = DataFrame(data={'data': [1, 2, 3]}, index=pd.Index(data=[dt(2016, 1, 1), dt(2016, 1, 2), dt(2016, 1, 3)], name='date')) df2 = DataFrame(data={'data': [20, 30, 40]}, index=pd.Index(data=[dt(2015, 1, 2), dt(2015, 1, 3), dt(2015, 1, 4)], name='date')) equals = DataFrame(data={'data': [20, 30, 40, 1, 2, 3]}, index=pd.Index(data=[dt(2015, 1, 2), dt(2015, 1, 3), dt(2015, 1, 4), dt(2016, 1, 1), dt(2016, 1, 2), dt(2016, 1, 3)], name='date')) chunkstore_lib.write('chunkstore_test', df, chunk_size='D') chunkstore_lib.append('chunkstore_test', df2) assert_frame_equal(chunkstore_lib.read('chunkstore_test') , equals)
Example #15
Source File: test_chunkstore.py From arctic with GNU Lesser General Public License v2.1 | 6 votes |
def test_update_series(chunkstore_lib): df = Series(data=[1, 2, 3], index=pd.Index(data=[dt(2016, 1, 1), dt(2016, 1, 2), dt(2016, 1, 3)], name='date'), name='data') df2 = Series(data=[20, 30, 40], index=pd.Index(data=[dt(2016, 1, 2), dt(2016, 1, 3), dt(2016, 1, 4)], name='date'), name='data') equals = Series(data=[1, 20, 30, 40], index=pd.Index(data=[dt(2016, 1, 1), dt(2016, 1, 2), dt(2016, 1, 3), dt(2016, 1, 4)], name='date'), name='data') chunkstore_lib.write('chunkstore_test', df, chunk_size='D') chunkstore_lib.update('chunkstore_test', df2) assert_series_equal(chunkstore_lib.read('chunkstore_test'), equals)
Example #16
Source File: test_utils.py From arctic with GNU Lesser General Public License v2.1 | 6 votes |
def create_test_data(size=5, index=True, multiindex=True, random_data=True, random_ids=True, date_offset=0, use_hours=False, cols=1): data = {} for i in range(cols): if random_data: data['data' + str(i)] = [random.random() * random.randint(-100, 100) for _ in range(size)] else: data['data' + str(i)] = range(size) dates = [dt(2016, 1, 1) + timedelta(days=0 if use_hours else n+date_offset, hours=n+date_offset if use_hours else 0) for n in range(size)] if index: if multiindex: index_col_names = ['date', 'id'] idx = [(date, random.randint(1, size)) for date in dates] if random_ids else [(date, 1) for date in dates] index = MultiIndex.from_tuples(idx, names=index_col_names) if idx else MultiIndex([[]]*2, [[]]*2, names=index_col_names) return DataFrame(data=data, index=index) return DataFrame(data=data, index=Index(data=dates, name='date')) data.update({'date': dates}) return DataFrame(data=data)
Example #17
Source File: test_chunkstore.py From arctic with GNU Lesser General Public License v2.1 | 6 votes |
def test_update_no_overlap(chunkstore_lib): df = DataFrame(data={'data': [1, 2, 3]}, index=pd.Index(data=[dt(2016, 1, 1), dt(2016, 1, 2), dt(2016, 1, 3)], name='date')) df2 = DataFrame(data={'data': [20, 30, 40]}, index=pd.Index(data=[dt(2015, 1, 2), dt(2015, 1, 3), dt(2015, 1, 4)], name='date')) equals = DataFrame(data={'data': [20, 30, 40, 1, 2, 3]}, index=pd.Index(data=[dt(2015, 1, 2), dt(2015, 1, 3), dt(2015, 1, 4), dt(2016, 1, 1), dt(2016, 1, 2), dt(2016, 1, 3)], name='date')) chunkstore_lib.write('chunkstore_test', df, chunk_size='D') chunkstore_lib.update('chunkstore_test', df2) assert_frame_equal(chunkstore_lib.read('chunkstore_test'), equals)
Example #18
Source File: test_frame.py From recruit with Apache License 2.0 | 6 votes |
def test_constructor_ndarray(self, float_frame): # no index or columns sp = SparseDataFrame(float_frame.values) # 1d sp = SparseDataFrame(float_frame['A'].values, index=float_frame.index, columns=['A']) tm.assert_sp_frame_equal(sp, float_frame.reindex(columns=['A'])) # raise on level argument pytest.raises(TypeError, float_frame.reindex, columns=['A'], level=1) # wrong length index / columns with pytest.raises(ValueError, match="^Index length"): SparseDataFrame(float_frame.values, index=float_frame.index[:-1]) with pytest.raises(ValueError, match="^Column length"): SparseDataFrame(float_frame.values, columns=float_frame.columns[:-1]) # GH 9272
Example #19
Source File: test_integer.py From recruit with Apache License 2.0 | 6 votes |
def test_astype_index(self, all_data, dropna): # as an int/uint index to Index all_data = all_data[:10] if dropna: other = all_data[~all_data.isna()] else: other = all_data dtype = all_data.dtype idx = pd.Index(np.array(other)) assert isinstance(idx, ABCIndexClass) result = idx.astype(dtype) expected = idx.astype(object).astype(dtype) tm.assert_index_equal(result, expected)
Example #20
Source File: test_integer.py From recruit with Apache License 2.0 | 6 votes |
def test_preserve_dtypes(op): # TODO(#22346): preserve Int64 dtype # for ops that enable (mean would actually work here # but generally it is a float return value) df = pd.DataFrame({ "A": ['a', 'b', 'b'], "B": [1, None, 3], "C": integer_array([1, None, 3], dtype='Int64'), }) # op result = getattr(df.C, op)() assert isinstance(result, int) # groupby result = getattr(df.groupby("A"), op)() expected = pd.DataFrame({ "B": np.array([1.0, 3.0]), "C": integer_array([1, 3], dtype="Int64") }, index=pd.Index(['a', 'b'], name='A')) tm.assert_frame_equal(result, expected)
Example #21
Source File: test_missing.py From recruit with Apache License 2.0 | 6 votes |
def test_nan_handling(self): # Nans are represented as -1 in codes c = Categorical(["a", "b", np.nan, "a"]) tm.assert_index_equal(c.categories, Index(["a", "b"])) tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8)) c[1] = np.nan tm.assert_index_equal(c.categories, Index(["a", "b"])) tm.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0], dtype=np.int8)) # Adding nan to categories should make assigned nan point to the # category! c = Categorical(["a", "b", np.nan, "a"]) tm.assert_index_equal(c.categories, Index(["a", "b"])) tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8))
Example #22
Source File: test_indexing.py From recruit with Apache License 2.0 | 6 votes |
def test_categories_assigments(self): s = Categorical(["a", "b", "c", "a"]) exp = np.array([1, 2, 3, 1], dtype=np.int64) s.categories = [1, 2, 3] tm.assert_numpy_array_equal(s.__array__(), exp) tm.assert_index_equal(s.categories, Index([1, 2, 3])) # lengthen with pytest.raises(ValueError): s.categories = [1, 2, 3, 4] # shorten with pytest.raises(ValueError): s.categories = [1, 2] # Combinations of sorted/unique:
Example #23
Source File: groupby.py From recruit with Apache License 2.0 | 5 votes |
def test_groupby_extension_no_sort(self, data_for_grouping): df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping}) result = df.groupby("B", sort=False).A.mean() _, index = pd.factorize(data_for_grouping, sort=False) index = pd.Index(index, name="B") expected = pd.Series([1, 3, 4], index=index, name="A") self.assert_series_equal(result, expected)
Example #24
Source File: groupby.py From recruit with Apache License 2.0 | 5 votes |
def test_groupby_extension_agg(self, as_index, data_for_grouping): df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping}) result = df.groupby("B", as_index=as_index).A.mean() _, index = pd.factorize(data_for_grouping, sort=True) index = pd.Index(index, name="B") expected = pd.Series([3, 1, 4], index=index, name="A") if as_index: self.assert_series_equal(result, expected) else: expected = expected.reset_index() self.assert_frame_equal(result, expected)
Example #25
Source File: pickle_compat.py From recruit with Apache License 2.0 | 5 votes |
def load_newobj_ex(self): kwargs = self.stack.pop() args = self.stack.pop() cls = self.stack.pop() # compat if issubclass(cls, Index): obj = object.__new__(cls) else: obj = cls.__new__(cls, *args, **kwargs) self.append(obj)
Example #26
Source File: test_frame.py From recruit with Apache License 2.0 | 5 votes |
def test_ctor_reindex(self): idx = pd.Index([0, 1, 2, 3]) with pytest.raises(ValueError, match=''): pd.SparseDataFrame({"A": [1, 2]}, index=idx)
Example #27
Source File: test_internals.py From recruit with Apache License 2.0 | 5 votes |
def test_merge(self): avals = randn(2, 10) bvals = randn(2, 10) ref_cols = Index(['e', 'a', 'b', 'd', 'f']) ablock = make_block(avals, ref_cols.get_indexer(['e', 'b'])) bblock = make_block(bvals, ref_cols.get_indexer(['a', 'd'])) merged = ablock.merge(bblock) tm.assert_numpy_array_equal(merged.mgr_locs.as_array, np.array([0, 1, 2, 3], dtype=np.int64)) tm.assert_numpy_array_equal(merged.values[[0, 2]], np.array(avals)) tm.assert_numpy_array_equal(merged.values[[1, 3]], np.array(bvals)) # TODO: merge with mixed type?
Example #28
Source File: test_internals.py From recruit with Apache License 2.0 | 5 votes |
def test_get(self): cols = Index(list('abc')) values = np.random.rand(3, 3) block = make_block(values=values.copy(), placement=np.arange(3)) mgr = BlockManager(blocks=[block], axes=[cols, np.arange(3)]) assert_almost_equal(mgr.get('a', fastpath=False), values[0]) assert_almost_equal(mgr.get('b', fastpath=False), values[1]) assert_almost_equal(mgr.get('c', fastpath=False), values[2]) assert_almost_equal(mgr.get('a').internal_values(), values[0]) assert_almost_equal(mgr.get('b').internal_values(), values[1]) assert_almost_equal(mgr.get('c').internal_values(), values[2])
Example #29
Source File: test_internals.py From recruit with Apache License 2.0 | 5 votes |
def test_reindex_items(self): # mgr is not consolidated, f8 & f8-2 blocks mgr = create_mgr('a: f8; b: i8; c: f8; d: i8; e: f8;' 'f: bool; g: f8-2') reindexed = mgr.reindex_axis(['g', 'c', 'a', 'd'], axis=0) assert reindexed.nblocks == 2 tm.assert_index_equal(reindexed.items, pd.Index(['g', 'c', 'a', 'd'])) assert_almost_equal( mgr.get('g', fastpath=False), reindexed.get('g', fastpath=False)) assert_almost_equal( mgr.get('c', fastpath=False), reindexed.get('c', fastpath=False)) assert_almost_equal( mgr.get('a', fastpath=False), reindexed.get('a', fastpath=False)) assert_almost_equal( mgr.get('d', fastpath=False), reindexed.get('d', fastpath=False)) assert_almost_equal( mgr.get('g').internal_values(), reindexed.get('g').internal_values()) assert_almost_equal( mgr.get('c').internal_values(), reindexed.get('c').internal_values()) assert_almost_equal( mgr.get('a').internal_values(), reindexed.get('a').internal_values()) assert_almost_equal( mgr.get('d').internal_values(), reindexed.get('d').internal_values())
Example #30
Source File: test_frame.py From recruit with Apache License 2.0 | 5 votes |
def test_set_value(self, float_frame): # ok, as the index gets converted to object frame = float_frame.copy() with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): res = frame.set_value('foobar', 'B', 1.5) assert res.index.dtype == 'object' res = float_frame res.index = res.index.astype(object) with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): res = float_frame.set_value('foobar', 'B', 1.5) assert res is not float_frame assert res.index[-1] == 'foobar' with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): assert res.get_value('foobar', 'B') == 1.5 with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): res2 = res.set_value('foobar', 'qux', 1.5) assert res2 is not res tm.assert_index_equal(res2.columns, pd.Index(list(float_frame.columns) + ['qux'])) with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): assert res2.get_value('foobar', 'qux') == 1.5