Python pandas.io.common._stringify_path() Examples

The following are 30 code examples of pandas.io.common._stringify_path(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module pandas.io.common , or try the search function .
Example #1
Source File: pytables.py    From elasticintel with GNU General Public License v3.0 6 votes vote down vote up
def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None,
           append=None, **kwargs):
    """ store this object, close it if we opened it """

    if append:
        f = lambda store: store.append(key, value, **kwargs)
    else:
        f = lambda store: store.put(key, value, **kwargs)

    path_or_buf = _stringify_path(path_or_buf)
    if isinstance(path_or_buf, string_types):
        with HDFStore(path_or_buf, mode=mode, complevel=complevel,
                      complib=complib) as store:
            f(store)
    else:
        f(path_or_buf) 
Example #2
Source File: pytables.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None,
           append=None, **kwargs):
    """ store this object, close it if we opened it """

    if append:
        f = lambda store: store.append(key, value, **kwargs)
    else:
        f = lambda store: store.put(key, value, **kwargs)

    path_or_buf = _stringify_path(path_or_buf)
    if isinstance(path_or_buf, string_types):
        with HDFStore(path_or_buf, mode=mode, complevel=complevel,
                      complib=complib) as store:
            f(store)
    else:
        f(path_or_buf) 
Example #3
Source File: pytables.py    From Splunking-Crime with GNU Affero General Public License v3.0 6 votes vote down vote up
def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None,
           append=None, **kwargs):
    """ store this object, close it if we opened it """

    if append:
        f = lambda store: store.append(key, value, **kwargs)
    else:
        f = lambda store: store.put(key, value, **kwargs)

    path_or_buf = _stringify_path(path_or_buf)
    if isinstance(path_or_buf, string_types):
        with HDFStore(path_or_buf, mode=mode, complevel=complevel,
                      complib=complib) as store:
            f(store)
    else:
        f(path_or_buf) 
Example #4
Source File: pytables.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def __init__(self, path, mode=None, complevel=None, complib=None,
                 fletcher32=False, **kwargs):
        try:
            import tables  # noqa
        except ImportError as ex:  # pragma: no cover
            raise ImportError('HDFStore requires PyTables, "{ex}" problem '
                              'importing'.format(ex=str(ex)))

        if complib is not None and complib not in tables.filters.all_complibs:
            raise ValueError(
                "complib only supports {libs} compression.".format(
                    libs=tables.filters.all_complibs))

        if complib is None and complevel is not None:
            complib = tables.filters.default_complib

        self._path = _stringify_path(path)
        if mode is None:
            mode = 'a'
        self._mode = mode
        self._handle = None
        self._complevel = complevel if complevel else 0
        self._complib = complib
        self._fletcher32 = fletcher32
        self._filters = None
        self.open(mode=mode, **kwargs) 
Example #5
Source File: test_common.py    From predictive-maintenance-using-machine-learning with Apache License 2.0 5 votes vote down vote up
def test_stringify_path_localpath(self):
        path = os.path.join('foo', 'bar')
        abs_path = os.path.abspath(path)
        lpath = LocalPath(path)
        assert icom._stringify_path(lpath) == abs_path 
Example #6
Source File: test_common.py    From predictive-maintenance-using-machine-learning with Apache License 2.0 5 votes vote down vote up
def test_stringify_path_fspath(self):
        p = CustomFSPath('foo/bar.csv')
        result = icom._stringify_path(p)
        assert result == 'foo/bar.csv' 
Example #7
Source File: excel.py    From predictive-maintenance-using-machine-learning with Apache License 2.0 5 votes vote down vote up
def write(self, writer, sheet_name='Sheet1', startrow=0,
              startcol=0, freeze_panes=None, engine=None):
        """
        writer : string or ExcelWriter object
            File path or existing ExcelWriter
        sheet_name : string, default 'Sheet1'
            Name of sheet which will contain DataFrame
        startrow :
            upper left cell row to dump data frame
        startcol :
            upper left cell column to dump data frame
        freeze_panes : tuple of integer (length 2), default None
            Specifies the one-based bottommost row and rightmost column that
            is to be frozen
        engine : string, default None
            write engine to use if writer is a path - you can also set this
            via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``,
            and ``io.excel.xlsm.writer``.
        """
        from pandas.io.excel import ExcelWriter
        from pandas.io.common import _stringify_path

        if isinstance(writer, ExcelWriter):
            need_save = False
        else:
            writer = ExcelWriter(_stringify_path(writer), engine=engine)
            need_save = True

        formatted_cells = self.get_formatted_cells()
        writer.write_cells(formatted_cells, sheet_name,
                           startrow=startrow, startcol=startcol,
                           freeze_panes=freeze_panes)
        if need_save:
            writer.save() 
Example #8
Source File: pytables.py    From predictive-maintenance-using-machine-learning with Apache License 2.0 5 votes vote down vote up
def __init__(self, path, mode=None, complevel=None, complib=None,
                 fletcher32=False, **kwargs):

        if 'format' in kwargs:
            raise ValueError('format is not a defined argument for HDFStore')

        try:
            import tables  # noqa
        except ImportError as ex:  # pragma: no cover
            raise ImportError('HDFStore requires PyTables, "{ex!s}" problem '
                              'importing'.format(ex=ex))

        if complib is not None and complib not in tables.filters.all_complibs:
            raise ValueError(
                "complib only supports {libs} compression.".format(
                    libs=tables.filters.all_complibs))

        if complib is None and complevel is not None:
            complib = tables.filters.default_complib

        self._path = _stringify_path(path)
        if mode is None:
            mode = 'a'
        self._mode = mode
        self._handle = None
        self._complevel = complevel if complevel else 0
        self._complib = complib
        self._fletcher32 = fletcher32
        self._filters = None
        self.open(mode=mode, **kwargs) 
Example #9
Source File: feather_format.py    From predictive-maintenance-using-machine-learning with Apache License 2.0 5 votes vote down vote up
def read_feather(path, columns=None, use_threads=True):
    """
    Load a feather-format object from the file path

    .. versionadded 0.20.0

    Parameters
    ----------
    path : string file path, or file-like object
    columns : sequence, default None
        If not provided, all columns are read

        .. versionadded 0.24.0
    nthreads : int, default 1
        Number of CPU threads to use when reading to pandas.DataFrame

       .. versionadded 0.21.0
       .. deprecated 0.24.0
    use_threads : bool, default True
        Whether to parallelize reading using multiple threads

       .. versionadded 0.24.0

    Returns
    -------
    type of object stored in file

    """

    feather, pyarrow = _try_import()
    path = _stringify_path(path)

    if LooseVersion(pyarrow.__version__) < LooseVersion('0.11.0'):
        int_use_threads = int(use_threads)
        if int_use_threads < 1:
            int_use_threads = 1
        return feather.read_feather(path, columns=columns,
                                    nthreads=int_use_threads)

    return feather.read_feather(path, columns=columns,
                                use_threads=bool(use_threads)) 
Example #10
Source File: excel.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def write(self, writer, sheet_name='Sheet1', startrow=0,
              startcol=0, freeze_panes=None, engine=None):
        """
        writer : string or ExcelWriter object
            File path or existing ExcelWriter
        sheet_name : string, default 'Sheet1'
            Name of sheet which will contain DataFrame
        startrow :
            upper left cell row to dump data frame
        startcol :
            upper left cell column to dump data frame
        freeze_panes : tuple of integer (length 2), default None
            Specifies the one-based bottommost row and rightmost column that
            is to be frozen
        engine : string, default None
            write engine to use if writer is a path - you can also set this
            via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``,
            and ``io.excel.xlsm.writer``.
        """
        from pandas.io.excel import ExcelWriter
        from pandas.io.common import _stringify_path

        if isinstance(writer, ExcelWriter):
            need_save = False
        else:
            writer = ExcelWriter(_stringify_path(writer), engine=engine)
            need_save = True

        formatted_cells = self.get_formatted_cells()
        writer.write_cells(formatted_cells, sheet_name,
                           startrow=startrow, startcol=startcol,
                           freeze_panes=freeze_panes)
        if need_save:
            writer.save() 
Example #11
Source File: test_common.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_stringify_path_localpath(self):
        path = os.path.join('foo', 'bar')
        abs_path = os.path.abspath(path)
        lpath = LocalPath(path)
        assert icom._stringify_path(lpath) == abs_path 
Example #12
Source File: feather_format.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def read_feather(path, nthreads=1):
    """
    Load a feather-format object from the file path

    .. versionadded 0.20.0

    Parameters
    ----------
    path : string file path, or file-like object
    nthreads : int, default 1
        Number of CPU threads to use when reading to pandas.DataFrame

       .. versionadded 0.21.0

    Returns
    -------
    type of object stored in file

    """

    feather = _try_import()
    path = _stringify_path(path)

    if feather.__version__ < LooseVersion('0.4.0'):
        return feather.read_dataframe(path)

    return feather.read_dataframe(path, nthreads=nthreads) 
Example #13
Source File: test_common.py    From elasticintel with GNU General Public License v3.0 5 votes vote down vote up
def test_stringify_path_pathlib(self):
        tm._skip_if_no_pathlib()

        rel_path = common._stringify_path(Path('.'))
        assert rel_path == '.'
        redundant_path = common._stringify_path(Path('foo//bar'))
        assert redundant_path == os.path.join('foo', 'bar') 
Example #14
Source File: test_common.py    From elasticintel with GNU General Public License v3.0 5 votes vote down vote up
def test_stringify_path_localpath(self):
        tm._skip_if_no_localpath()

        path = os.path.join('foo', 'bar')
        abs_path = os.path.abspath(path)
        lpath = LocalPath(path)
        assert common._stringify_path(lpath) == abs_path 
Example #15
Source File: test_common.py    From elasticintel with GNU General Public License v3.0 5 votes vote down vote up
def test_stringify_path_fspath(self):
        p = CustomFSPath('foo/bar.csv')
        result = common._stringify_path(p)
        assert result == 'foo/bar.csv' 
Example #16
Source File: excel.py    From elasticintel with GNU General Public License v3.0 5 votes vote down vote up
def write(self, writer, sheet_name='Sheet1', startrow=0,
              startcol=0, freeze_panes=None, engine=None):
        """
        writer : string or ExcelWriter object
            File path or existing ExcelWriter
        sheet_name : string, default 'Sheet1'
            Name of sheet which will contain DataFrame
        startrow :
            upper left cell row to dump data frame
        startcol :
            upper left cell column to dump data frame
        freeze_panes : tuple of integer (length 2), default None
            Specifies the one-based bottommost row and rightmost column that
            is to be frozen
        engine : string, default None
            write engine to use if writer is a path - you can also set this
            via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``,
            and ``io.excel.xlsm.writer``.
        """
        from pandas.io.excel import ExcelWriter
        from pandas.io.common import _stringify_path

        if isinstance(writer, ExcelWriter):
            need_save = False
        else:
            writer = ExcelWriter(_stringify_path(writer), engine=engine)
            need_save = True

        formatted_cells = self.get_formatted_cells()
        writer.write_cells(formatted_cells, sheet_name,
                           startrow=startrow, startcol=startcol,
                           freeze_panes=freeze_panes)
        if need_save:
            writer.save() 
Example #17
Source File: feather_format.py    From elasticintel with GNU General Public License v3.0 5 votes vote down vote up
def read_feather(path, nthreads=1):
    """
    Load a feather-format object from the file path

    .. versionadded 0.20.0

    Parameters
    ----------
    path : string file path, or file-like object
    nthreads : int, default 1
        Number of CPU threads to use when reading to pandas.DataFrame

       .. versionadded 0.21.0

    Returns
    -------
    type of object stored in file

    """

    feather = _try_import()
    path = _stringify_path(path)

    if feather.__version__ < LooseVersion('0.4.0'):
        return feather.read_dataframe(path)

    return feather.read_dataframe(path, nthreads=nthreads) 
Example #18
Source File: pickle.py    From elasticintel with GNU General Public License v3.0 5 votes vote down vote up
def to_pickle(obj, path, compression='infer', protocol=pkl.HIGHEST_PROTOCOL):
    """
    Pickle (serialize) object to input file path

    Parameters
    ----------
    obj : any object
    path : string
        File path
    compression : {'infer', 'gzip', 'bz2', 'xz', None}, default 'infer'
        a string representing the compression to use in the output file

        .. versionadded:: 0.20.0
    protocol : int
        Int which indicates which protocol should be used by the pickler,
        default HIGHEST_PROTOCOL (see [1], paragraph 12.1.2). The possible
        values for this parameter depend on the version of Python. For Python
        2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a valid value.
        For Python >= 3.4, 4 is a valid value. A negative value for the
        protocol parameter is equivalent to setting its value to
        HIGHEST_PROTOCOL.

        .. [1] https://docs.python.org/3/library/pickle.html
        .. versionadded:: 0.21.0


    """
    path = _stringify_path(path)
    inferred_compression = _infer_compression(path, compression)
    f, fh = _get_handle(path, 'wb',
                        compression=inferred_compression,
                        is_text=False)
    if protocol < 0:
        protocol = pkl.HIGHEST_PROTOCOL
    try:
        pkl.dump(obj, f, protocol=protocol)
    finally:
        for _f in fh:
            _f.close() 
Example #19
Source File: feather_format.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def read_feather(path, nthreads=1):
    """
    Load a feather-format object from the file path

    .. versionadded 0.20.0

    Parameters
    ----------
    path : string file path, or file-like object
    nthreads : int, default 1
        Number of CPU threads to use when reading to pandas.DataFrame

       .. versionadded 0.21.0

    Returns
    -------
    type of object stored in file

    """

    feather = _try_import()
    path = _stringify_path(path)

    if LooseVersion(feather.__version__) < LooseVersion('0.4.0'):
        return feather.read_dataframe(path)

    return feather.read_dataframe(path, nthreads=nthreads) 
Example #20
Source File: test_common.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_stringify_path_pathlib(self):
        rel_path = icom._stringify_path(Path('.'))
        assert rel_path == '.'
        redundant_path = icom._stringify_path(Path('foo//bar'))
        assert redundant_path == os.path.join('foo', 'bar') 
Example #21
Source File: test_common.py    From recruit with Apache License 2.0 5 votes vote down vote up
def test_stringify_path_fspath(self):
        p = CustomFSPath('foo/bar.csv')
        result = icom._stringify_path(p)
        assert result == 'foo/bar.csv' 
Example #22
Source File: excel.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def write(self, writer, sheet_name='Sheet1', startrow=0,
              startcol=0, freeze_panes=None, engine=None):
        """
        writer : string or ExcelWriter object
            File path or existing ExcelWriter
        sheet_name : string, default 'Sheet1'
            Name of sheet which will contain DataFrame
        startrow :
            upper left cell row to dump data frame
        startcol :
            upper left cell column to dump data frame
        freeze_panes : tuple of integer (length 2), default None
            Specifies the one-based bottommost row and rightmost column that
            is to be frozen
        engine : string, default None
            write engine to use if writer is a path - you can also set this
            via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``,
            and ``io.excel.xlsm.writer``.
        """
        from pandas.io.excel import ExcelWriter
        from pandas.io.common import _stringify_path

        if isinstance(writer, ExcelWriter):
            need_save = False
        else:
            writer = ExcelWriter(_stringify_path(writer), engine=engine)
            need_save = True

        formatted_cells = self.get_formatted_cells()
        writer.write_cells(formatted_cells, sheet_name,
                           startrow=startrow, startcol=startcol,
                           freeze_panes=freeze_panes)
        if need_save:
            writer.save() 
Example #23
Source File: test_common.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def test_stringify_path_fspath(self):
        p = CustomFSPath('foo/bar.csv')
        result = common._stringify_path(p)
        assert result == 'foo/bar.csv' 
Example #24
Source File: excel.py    From recruit with Apache License 2.0 5 votes vote down vote up
def write(self, writer, sheet_name='Sheet1', startrow=0,
              startcol=0, freeze_panes=None, engine=None):
        """
        writer : string or ExcelWriter object
            File path or existing ExcelWriter
        sheet_name : string, default 'Sheet1'
            Name of sheet which will contain DataFrame
        startrow :
            upper left cell row to dump data frame
        startcol :
            upper left cell column to dump data frame
        freeze_panes : tuple of integer (length 2), default None
            Specifies the one-based bottommost row and rightmost column that
            is to be frozen
        engine : string, default None
            write engine to use if writer is a path - you can also set this
            via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``,
            and ``io.excel.xlsm.writer``.
        """
        from pandas.io.excel import ExcelWriter
        from pandas.io.common import _stringify_path

        if isinstance(writer, ExcelWriter):
            need_save = False
        else:
            writer = ExcelWriter(_stringify_path(writer), engine=engine)
            need_save = True

        formatted_cells = self.get_formatted_cells()
        writer.write_cells(formatted_cells, sheet_name,
                           startrow=startrow, startcol=startcol,
                           freeze_panes=freeze_panes)
        if need_save:
            writer.save() 
Example #25
Source File: test_common.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def test_stringify_path_localpath(self):
        path = os.path.join('foo', 'bar')
        abs_path = os.path.abspath(path)
        lpath = LocalPath(path)
        assert common._stringify_path(lpath) == abs_path 
Example #26
Source File: test_common.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def test_stringify_path_pathlib(self):
        rel_path = common._stringify_path(Path('.'))
        assert rel_path == '.'
        redundant_path = common._stringify_path(Path('foo//bar'))
        assert redundant_path == os.path.join('foo', 'bar') 
Example #27
Source File: test_common.py    From predictive-maintenance-using-machine-learning with Apache License 2.0 5 votes vote down vote up
def test_stringify_path_pathlib(self):
        rel_path = icom._stringify_path(Path('.'))
        assert rel_path == '.'
        redundant_path = icom._stringify_path(Path('foo//bar'))
        assert redundant_path == os.path.join('foo', 'bar') 
Example #28
Source File: pytables.py    From recruit with Apache License 2.0 5 votes vote down vote up
def __init__(self, path, mode=None, complevel=None, complib=None,
                 fletcher32=False, **kwargs):

        if 'format' in kwargs:
            raise ValueError('format is not a defined argument for HDFStore')

        try:
            import tables  # noqa
        except ImportError as ex:  # pragma: no cover
            raise ImportError('HDFStore requires PyTables, "{ex!s}" problem '
                              'importing'.format(ex=ex))

        if complib is not None and complib not in tables.filters.all_complibs:
            raise ValueError(
                "complib only supports {libs} compression.".format(
                    libs=tables.filters.all_complibs))

        if complib is None and complevel is not None:
            complib = tables.filters.default_complib

        self._path = _stringify_path(path)
        if mode is None:
            mode = 'a'
        self._mode = mode
        self._handle = None
        self._complevel = complevel if complevel else 0
        self._complib = complib
        self._fletcher32 = fletcher32
        self._filters = None
        self.open(mode=mode, **kwargs) 
Example #29
Source File: feather_format.py    From recruit with Apache License 2.0 5 votes vote down vote up
def read_feather(path, columns=None, use_threads=True):
    """
    Load a feather-format object from the file path

    .. versionadded 0.20.0

    Parameters
    ----------
    path : string file path, or file-like object
    columns : sequence, default None
        If not provided, all columns are read

        .. versionadded 0.24.0
    nthreads : int, default 1
        Number of CPU threads to use when reading to pandas.DataFrame

       .. versionadded 0.21.0
       .. deprecated 0.24.0
    use_threads : bool, default True
        Whether to parallelize reading using multiple threads

       .. versionadded 0.24.0

    Returns
    -------
    type of object stored in file

    """

    feather, pyarrow = _try_import()
    path = _stringify_path(path)

    if LooseVersion(pyarrow.__version__) < LooseVersion('0.11.0'):
        int_use_threads = int(use_threads)
        if int_use_threads < 1:
            int_use_threads = 1
        return feather.read_feather(path, columns=columns,
                                    nthreads=int_use_threads)

    return feather.read_feather(path, columns=columns,
                                use_threads=bool(use_threads)) 
Example #30
Source File: sasreader.py    From recruit with Apache License 2.0 4 votes vote down vote up
def read_sas(filepath_or_buffer, format=None, index=None, encoding=None,
             chunksize=None, iterator=False):
    """
    Read SAS files stored as either XPORT or SAS7BDAT format files.

    Parameters
    ----------
    filepath_or_buffer : string or file-like object
        Path to the SAS file.
    format : string {'xport', 'sas7bdat'} or None
        If None, file format is inferred from file extension. If 'xport' or
        'sas7bdat', uses the corresponding format.
    index : identifier of index column, defaults to None
        Identifier of column that should be used as index of the DataFrame.
    encoding : string, default is None
        Encoding for text data.  If None, text data are stored as raw bytes.
    chunksize : int
        Read file `chunksize` lines at a time, returns iterator.
    iterator : bool, defaults to False
        If True, returns an iterator for reading the file incrementally.

    Returns
    -------
    DataFrame if iterator=False and chunksize=None, else SAS7BDATReader
    or XportReader
    """
    if format is None:
        buffer_error_msg = ("If this is a buffer object rather "
                            "than a string name, you must specify "
                            "a format string")
        filepath_or_buffer = _stringify_path(filepath_or_buffer)
        if not isinstance(filepath_or_buffer, compat.string_types):
            raise ValueError(buffer_error_msg)
        fname = filepath_or_buffer.lower()
        if fname.endswith(".xpt"):
            format = "xport"
        elif fname.endswith(".sas7bdat"):
            format = "sas7bdat"
        else:
            raise ValueError("unable to infer format of SAS file")

    if format.lower() == 'xport':
        from pandas.io.sas.sas_xport import XportReader
        reader = XportReader(filepath_or_buffer, index=index,
                             encoding=encoding,
                             chunksize=chunksize)
    elif format.lower() == 'sas7bdat':
        from pandas.io.sas.sas7bdat import SAS7BDATReader
        reader = SAS7BDATReader(filepath_or_buffer, index=index,
                                encoding=encoding,
                                chunksize=chunksize)
    else:
        raise ValueError('unknown SAS format')

    if iterator or chunksize:
        return reader

    data = reader.read()
    reader.close()
    return data