Python csv.html() Examples

The following are 20 code examples of csv.html(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module csv , or try the search function .
Example #1
Source File: utils.py    From pyDataverse with MIT License 6 votes vote down vote up
def json_to_dict(data):
    """Convert JSON to a dict().

    See more about the json module at
    https://docs.python.org/3.5/library/json.html

    Parameters
    ----------
    data : string
        Data as a json-formatted string.

    Returns
    -------
    dict
        Data as Python Dictionary.

    """
    try:
        return json.loads(data)
    except Exception as e:
        raise e 
Example #2
Source File: utils.py    From pyDataverse with MIT License 6 votes vote down vote up
def dict_to_json(data):
    """Convert dict() to JSON-formatted string.

    See more about the json module at
    https://docs.python.org/3.5/library/json.html

    Parameters
    ----------
    data : dict
        Data as Python Dictionary.

    Returns
    -------
    string
        Data as a json-formatted string.

    """
    try:
        return json.dumps(data, ensure_ascii=True, indent=2)
    except Exception as e:
        raise e 
Example #3
Source File: utils.py    From pyDataverse with MIT License 6 votes vote down vote up
def read_file(filename, mode='r'):
    """Read in a file.

    Parameters
    ----------
    filename : string
        Filename with full path.
    mode : string
        Read mode of file. Defaults to `r`. See more at
        https://docs.python.org/3.5/library/functions.html#open

    Returns
    -------
    string
        Returns data as string.

    """
    try:
        with open(filename, mode) as f:
            data = f.read()
        return data
    except IOError:
        print('An error occured trying to read the file {}.'.format(filename))
    except Exception as e:
        raise e 
Example #4
Source File: utils.py    From pyDataverse with MIT License 6 votes vote down vote up
def write_file(filename, data, mode='w'):
    """Write data in a file.

    Parameters
    ----------
    filename : string
        Filename with full path.
    data : string
        Data to be stored.
    mode : string
        Read mode of file. Defaults to `w`. See more at
        https://docs.python.org/3.5/library/functions.html#open

    """
    try:
        with open(filename, mode) as f:
            f.write(data)
    except IOError:
        print('An error occured trying to write the file {}.'.format(filename))
    except Exception as e:
        raise e 
Example #5
Source File: utils.py    From pyDataverse with MIT License 6 votes vote down vote up
def read_file_json(filename):
    """Read in a json file.

    See more about the json module at
    https://docs.python.org/3.5/library/json.html

    Parameters
    ----------
    filename : string
        Filename with full path.

    Returns
    -------
    dict
        Data as a json-formatted string.

    """
    try:
        return json_to_dict(read_file(filename, 'r'))
    except Exception as e:
        raise e 
Example #6
Source File: utils.py    From pyDataverse with MIT License 6 votes vote down vote up
def read_file_csv(filename):
    """Read in CSV file.

    See more at `csv.reader() <https://docs.python.org/3.5/library/csv.html>`_.

    Parameters
    ----------
    filename : string
        Full filename with path of file.

    Returns
    -------
    reader
        Reader object, which can be iterated over.

    """
    try:
        with open(filename, newline='') as csvfile:
            return csv.reader(csvfile, delimiter=',', quotechar='"')
    except Exception as e:
        raise e
    finally:
        csvfile.close() 
Example #7
Source File: csv.py    From jc with MIT License 6 votes vote down vote up
def process(proc_data):
    """
    Final processing to conform to the schema.

    Parameters:

        proc_data:   (dictionary) raw structured data to process

    Returns:

        List of dictionaries. Each dictionary represents a row in the csv file:

        [
          {
            csv file converted to a Dictionary
            https://docs.python.org/3/library/csv.html
          }
        ]
    """

    # No further processing
    return proc_data 
Example #8
Source File: generator_utils.py    From pycsvw with Apache License 2.0 6 votes vote down vote up
def read_csv(handle):
    """ Read CSV file
    :param handle: File-like object of the CSV file
    :return: csv.reader object
    """

    # These functions are to handle unicode in Python 2 as described in:
    # https://docs.python.org/2/library/csv.html#examples
    def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
        """ csv.py doesn't do Unicode; encode temporarily as UTF-8."""
        csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),
                                dialect=dialect, **kwargs)
        for row in csv_reader:
            # decode UTF-8 back to Unicode, cell by cell:
            yield [unicode(cell, 'utf-8') for cell in row]

    def utf_8_encoder(unicode_csv_data):
        """ Encode with UTF-8."""
        for line in unicode_csv_data:
            yield line.encode('utf-8')

    return unicode_csv_reader(handle) if PY2 else csv.reader(handle) 
Example #9
Source File: __init__.py    From indra with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def write_unicode_csv(filename, rows, delimiter=',', quotechar='"',
                       quoting=csv.QUOTE_MINIMAL, lineterminator='\n',
                       encoding='utf-8'):
    # Python 3 version
    if sys.version_info[0] >= 3:
        # Open the file in text mode with given encoding
        # Set newline arg to '' (see https://docs.python.org/3/library/csv.html)
        with open(filename, 'w', newline='', encoding=encoding) as f:
            # Next, get the csv writer, with unicode delimiter and quotechar
            csv_writer = csv.writer(f, delimiter=delimiter, quotechar=quotechar,
                                quoting=quoting, lineterminator=lineterminator)
            # Write the rows to the file
            csv_writer.writerows(rows)
    # Python 2 version
    else:
        # Open the file, no encoding specified
        with open(filename, 'w') as f:
            # Next, get the csv writer, passing delimiter and quotechar as
            # bytestrings rather than unicode
            csv_writer = csv.writer(f, delimiter=delimiter.encode(encoding),
                                quotechar=quotechar.encode(encoding),
                                quoting=quoting, lineterminator=lineterminator)
            for row in rows:
                csv_writer.writerow([unicode(cell).encode(encoding)
                                     for cell in row]) 
Example #10
Source File: cli.py    From whatportis with MIT License 5 votes vote down vote up
def unicode_csv_reader(data):
    """
    Handle Unicode CSV data
    See: https://docs.python.org/2/library/csv.html
    """

    def utf_8_encoder(unicode_csv_data):
        for line in unicode_csv_data:
            yield line.encode("utf-8")

    csv_reader = csv.reader(utf_8_encoder(data))
    for row in csv_reader:
        yield [unicode(cell, "utf-8") for cell in row] 
Example #11
Source File: io.py    From qiime2 with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _format(self, value):
        if isinstance(value, str):
            return value
        elif isinstance(value, float):
            # Use fixed precision or scientific notation as necessary (both are
            # roundtrippable in the metadata file format), with up to 15 digits
            # *total* precision (i.e. before and after the decimal point),
            # rounding if necessary. Trailing zeros or decimal points will not
            # be included in the formatted string (e.g. 42.0 will be formatted
            # as "42"). A precision of 15 digits is used because that is within
            # the 64-bit floating point spec (things get weird after that).
            #
            # Using repr() and str() each have their own predefined precision
            # which varies across Python versions. Using the string formatting
            # presentation types (e.g. %g, %f) without specifying a precision
            # will usually default to 6 digits past the decimal point, which
            # seems a little low.
            #
            # References:
            #
            # - https://stackoverflow.com/a/2440786/3776794
            # - https://stackoverflow.com/a/2440708/3776794
            # - https://docs.python.org/3/library/string.html#
            #       format-specification-mini-language
            # - https://stackoverflow.com/a/20586479/3776794
            # - https://drj11.wordpress.com/2007/07/03/python-poor-printing-
            #       of-floating-point/
            return '{0:.15g}'.format(value)
        else:
            raise NotImplementedError


# Credit: https://stackoverflow.com/a/4703508/3776794 
Example #12
Source File: io.py    From qiime2 with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def write(self, filepath):
        # Newline settings based on recommendation from csv docs:
        #     https://docs.python.org/3/library/csv.html#id3

        # Do NOT write a BOM, hence utf-8 not utf-8-sig
        with open(filepath, 'w', newline='', encoding='utf-8') as fh:
            tsv_writer = csv.writer(fh, dialect='excel-tab', strict=True)

            md = self._metadata
            header = [md.id_header]
            types_directive = ['#q2:types']

            if isinstance(md, Metadata):
                for name, props in md.columns.items():
                    header.append(name)
                    types_directive.append(props.type)
            elif isinstance(md, MetadataColumn):
                header.append(md.name)
                types_directive.append(md.type)
            else:
                raise NotImplementedError

            tsv_writer.writerow(header)
            tsv_writer.writerow(types_directive)

            df = md.to_dataframe()
            df.fillna('', inplace=True)
            df = df.applymap(self._format)
            tsv_writer.writerows(df.itertuples(index=True)) 
Example #13
Source File: ScribusGeneratorBackend.py    From ScribusGenerator with MIT License 5 votes vote down vote up
def getCsvData(self, csvfile):
        # Read CSV file and return  2-dimensional list containing the data , 
		# TODO check to replace with https://docs.python.org/3/library/csv.html#csv.DictReader
        reader = csv.reader(file(csvfile), delimiter=self.__dataObject.getCsvSeparator(
        ), skipinitialspace=True, doublequote=True)
        result = []
        for row in reader:
            if(len(row) > 0): # strip empty lines in source CSV
                rowlist = []
                for col in row:
                    rowlist.append(col)
                result.append(rowlist)
        return result 
Example #14
Source File: utils.py    From text with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def unicode_csv_reader(unicode_csv_data, **kwargs):
    r"""Since the standard csv library does not handle unicode in Python 2, we need a wrapper.
    Borrowed and slightly modified from the Python docs:
    https://docs.python.org/2/library/csv.html#csv-examples

    Arguments:
        unicode_csv_data: unicode csv data (see example below)

    Examples:
        >>> from torchtext.utils import unicode_csv_reader
        >>> import io
        >>> with io.open(data_path, encoding="utf8") as f:
        >>>     reader = unicode_csv_reader(f)

    """

    # Fix field larger than field limit error
    maxInt = sys.maxsize
    while True:
        # decrease the maxInt value by factor 10
        # as long as the OverflowError occurs.
        try:
            csv.field_size_limit(maxInt)
            break
        except OverflowError:
            maxInt = int(maxInt / 10)
    csv.field_size_limit(maxInt)

    for line in csv.reader(unicode_csv_data, **kwargs):
        yield line 
Example #15
Source File: utils.py    From django-htk with MIT License 5 votes vote down vote up
def import_organization_customers_from_csv_file(organization_customer, csv_file):
    import csv
    allowed_columns = (
        'last_name, first_name',
        'first_name',
        'last_name',
        'email',
        'address',
        'address_city',
        'address_state',
        'address_zipcode',
        'mailing_address',
        'mailing_address2',
        'mailing_city',
        'mailing_state',
        'mailing_zipcode',
    )
    allowed_columns_dict = { k : True for k in allowed_columns }
    column_mapping = {}
    customers = []
    # https://docs.python.org/2/library/csv.html#csv.DictReader
    # csv.DictReader reads the first row as fieldnames
    reader = csv.DictReader(csv_file.file.read().splitlines())
    for row in reader:
        customer_data = { k : v.strip() for k, v in row.items() }
        if 'last_name, first_name' in customer_data:
            (last_name, first_name,) = [name.strip() for name in customer_data['last_name, first_name'].split(',', 1)]
            customer_data['first_name'] = first_name
            customer_data['last_name'] = last_name
        first_name = customer_data.get('first_name', '')
        last_name = customer_data.get('last_name', '')
        name = '%s%s%s' % (first_name, ' ' if first_name else '', last_name,)
        customer_data['name'] = name
        if 'mailing_address' in customer_data and 'mailing_address2' in customer_data:
            mailing_address2 = customer_data.pop('mailing_address2', '')
            if mailing_address2:
                customer_data['mailing_address'] += ', %s' % mailing_address2
        customer = create_organization_customer_member(organization_customer, customer_data)
        customers.append(customer)
    return customers 
Example #16
Source File: __init__.py    From indra with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def read_unicode_csv(filename, delimiter=',', quotechar='"',
                     quoting=csv.QUOTE_MINIMAL, lineterminator='\n',
                     encoding='utf-8', skiprows=0):
    # Python 3 version
    if sys.version_info[0] >= 3:
        # Open the file in text mode with given encoding
        # Set newline arg to '' (see https://docs.python.org/3/library/csv.html)
        with open(filename, 'r', newline='', encoding=encoding) as f:
            generator = read_unicode_csv_fileobj(f, delimiter=delimiter,
                                            quotechar=quotechar,
                                            quoting=quoting,
                                            lineterminator=lineterminator,
                                            encoding=encoding,
                                            skiprows=skiprows)
            for row in generator:
                yield row
    # Python 2 version
    else:
        # Open the file in binary mode
        with open(filename, 'rb') as f:
            generator = read_unicode_csv_fileobj(f, delimiter=delimiter,
                                            quotechar=quotechar,
                                            quoting=quoting,
                                            lineterminator=lineterminator,
                                            encoding=encoding,
                                            skiprows=skiprows)
            for row in generator:
                yield row 
Example #17
Source File: utils.py    From audio with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def unicode_csv_reader(unicode_csv_data: TextIOWrapper, **kwargs: Any) -> Any:
    r"""Since the standard csv library does not handle unicode in Python 2, we need a wrapper.
    Borrowed and slightly modified from the Python docs:
    https://docs.python.org/2/library/csv.html#csv-examples
    Args:
        unicode_csv_data (TextIOWrapper): unicode csv data (see example below)

    Examples:
        >>> from torchaudio.datasets.utils import unicode_csv_reader
        >>> import io
        >>> with io.open(data_path, encoding="utf8") as f:
        >>>     reader = unicode_csv_reader(f)
    """

    # Fix field larger than field limit error
    maxInt = sys.maxsize
    while True:
        # decrease the maxInt value by factor 10
        # as long as the OverflowError occurs.
        try:
            csv.field_size_limit(maxInt)
            break
        except OverflowError:
            maxInt = int(maxInt / 10)
    csv.field_size_limit(maxInt)

    for line in csv.reader(unicode_csv_data, **kwargs):
        yield line 
Example #18
Source File: simple_reader_knowledge_flexible.py    From OpenBookQA with Apache License 2.0 5 votes vote down vote up
def read_tsv_file_to_json_flexible(file_name):
    """Reads a tsv file to json
    See https://docs.python.org/3/library/csv.html for options and formats, etc.
    """
    import csv
    with open(file_name, newline='') as csvfile:
        reader = csv.DictReader(csvfile, delimiter='\t')

        for row in reader:
            yield row 
Example #19
Source File: simple_reader_knowledge_flexible.py    From OpenBookQA with Apache License 2.0 5 votes vote down vote up
def read_csv_file_to_json_flexible(file_name):
    """Reads a csv file to json
    See https://docs.python.org/3/library/csv.html for options and formats, etc.
    """
    import csv
    with open(file_name, newline='') as csvfile:
        reader = csv.DictReader(csvfile)

        for row in reader:
            yield row 
Example #20
Source File: test_process.py    From kobo-predict with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _get_csv_(self):
        # todo: get the csv.reader to handle unicode as done here:
        # http://docs.python.org/library/csv.html#examples
        url = reverse('csv_export', kwargs={
            'username': self.user.username, 'id_string': self.xform.id_string})
        response = self.client.get(url)
        self.assertEqual(response.status_code, 200)
        actual_csv = self._get_response_content(response)
        actual_lines = actual_csv.split("\n")
        return csv.reader(actual_lines)