Python Code Examples for write csv

60 Python code examples are found related to "write csv". These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Project: xgbmagic   Author: mirri66   File: xgbmagic.py    License: MIT License 6 votes vote down vote up
def write_csv(self, filename, include_actual=False):
        """
        write results to csv
        - include actual: if actual values are known for test set, and we want to print them
        """
        with open(filename, 'wb') as csvfile:
            writer = csv.writer(csvfile)
            headers = [self.id_column, self.target_column]
            if include_actual:
                headers.append('actual')
            writer.writerow(headers)
            try:
                for idx, value in enumerate(self.output):
                    test_id = self.test_df[self.id_column][idx]
                    test_output = self.output[idx]
                    to_write = [test_id, test_output]
                    if include_actual:
                        to_write.append(self.test_df[self.target_column][idx])
                    writer.writerow(to_write)
                print('results written to ' + filename)
            except:
                print('write_csv failed') 
Example 2
Project: toggle.sg-download   Author: 0x776b7364   File: get_episode_URLs.py    License: MIT License 6 votes vote down vote up
def writeCsv(title, urllist):

	print "[i] Exporting to CSV ..."
	outputfile = title + ".csv"
	text_file = open(outputfile, "w")
	
	# writing a table of URL,title
	for url in urllist:
		line = url[0] + "," + " ".join(url[1].split()) + "\n"
		text_file.write("{}".format(line))
	
	text_file.write("\n")
	
	# writing URLs into a single line for import into download_toggle_video.py
	line = ""
	for url in urllist:
		line = line + url[0] + " "
	
	text_file.write("{}".format(line))
	
	text_file.close() 
Example 3
Project: spatial_access   Author: GeoDaCenter   File: p2p.py    License: GNU General Public License v3.0 6 votes vote down vote up
def write_csv(self, outfile=None):
        """
        Write the transit matrix to csv.

        Note: Use write_tmx (as opposed to this method) to
        save the transit matrix unless exporting for external use.

        Arguments:
            outfile: optional filename.
        Raises:
            WriteCSVFailedException: filename does not have correct extension.
        """
        if not outfile:
            outfile = self._get_output_filename(self.network_type, extension='csv')
        if '.csv' not in outfile:
            raise WriteCSVFailedException('given filename does not have the correct extension (.csv)')
        self.matrix_interface.write_csv(outfile) 
Example 4
Project: nlp-architect   Author: NervanaSystems   File: data.py    License: Apache License 2.0 6 votes vote down vote up
def write_to_csv(output, np_feature_vectors, np_dic, np_list):
    """
    Write data to csv file

    Args:
        output (str): output file path
        np_feature_vectors (:obj:`np.ndarray`): numpy vectors
        np_dic (dict): dict, keys: the noun phrase, value: the features
        np_list (list): features list
    """
    with open(output, "w", encoding="utf-8") as out_file:
        writer = csv.writer(out_file, delimiter=",", quotechar='"')
        print("prepared data CSV file is saved in {0}".format(output))
        for i, _ in enumerate(np_feature_vectors):
            np_vector = np_feature_vectors[i]
            np_vector = numpy.append(np_vector, np_dic[np_list[i]])
            writer.writerow(np_vector) 
Example 5
Project: lidar   Author: giswqs   File: slicing.py    License: MIT License 6 votes vote down vote up
def write_dep_csv(dep_list, csv_file):
    csv = open(csv_file, "w")
    header = "id" +","+"level"+","+"count"+","+"area"+","+"volume"+","+"avg-depth"+","+"max-depth"+","+\
             "min-elev"+","+"max-elev"+","+"children-id"+","+"region-id" + "," + "perimeter" + "," + "major-axis" + \
             "," + "minor-axis" + "," + "elongatedness" + "," + "eccentricity" + "," + "orientation" + "," + \
             "area-bbox-ratio"
    csv.write(header + "\n")
    for dep in dep_list:
        # id, level, size, volume, meanDepth, maxDepth, minElev, bndElev, inNbrId, nbrId = 0
        line = "{},{},{},{:.2f},{:.2f},{:.2f},{:.2f},{:.2f},{:.2f},{},{},{:.2f},{:.2f},{:.2f},{:.2f},{:.2f},{:.2f}," \
               "{:.2f}".format(dep.id, dep.level, dep.count, dep.size, dep.volume, dep.meanDepth, dep.maxDepth,
                               dep.minElev,dep.bndElev, str(dep.inNbrId).replace(",",":"), dep.regionId, dep.perimeter,
                               dep.major_axis, dep.minor_axis, dep.elongatedness, dep.eccentricity, dep.orientation,
                               dep.area_bbox_ratio)
        csv.write(line + "\n")
    csv.close()


# extracting individual level image 
Example 6
Project: coiltraine   Author: felipecode   File: coil_logger.py    License: MIT License 6 votes vote down vote up
def write_on_csv(checkpoint_name, output):
    """
    We also create the posibility to write on a csv file. So it is faster to load
    and check. Just using this to write the network outputs
    Args
        checkpoint_name: the name of the checkpoint being writen
        output: what is being written on the file


    Returns:

    """
    root_path = "_logs"

    full_path_name = os.path.join(root_path, EXPERIMENT_BATCH_NAME,
                                  EXPERIMENT_NAME, PROCESS_NAME + '_csv')

    file_name = os.path.join(full_path_name, str(checkpoint_name) + '.csv')

    with open(file_name, 'a+') as f:
        f.write("%f" % output[0])
        for i in range(1, len(output)):
            f.write(',%f' % output[i])
        f.write("\n") 
Example 7
Project: me-trpo   Author: thanard   File: env_helpers.py    License: MIT License 6 votes vote down vote up
def write_to_csv(data, timesteps, path):
    # Make it 2D np array
    make_values_np_array(data)
    # Save to csv
    import csv
    header = sorted(data.keys())
    with open(path, 'w', newline='') as f:
        writer = csv.writer(f)
        writer.writerow(['timesteps'] + header)
        for i, timestep in enumerate(timesteps):
            writer.writerow([str(timestep)] + [str(data[h][i]) for h in header])
            # with open(os.path.join(log_dir,'errors_state_cost_%d.csv'%count), 'w', newline='') as f:
            #     writer = csv.writer(f)
            #     writer.writerow(['timesteps'] + header)
            #     for i, timestep in enumerate(timesteps):
            #         writer.writerow([str(timestep)] + ["%f"%errors['state_diff'][h][i][-1] if
            #                                            h =='batch_size' else
            #                                            errors['state_diff'][h][i] for h in header]) 
Example 8
Project: python_mozetl   Author: mozilla   File: utils.py    License: MIT License 6 votes vote down vote up
def write_csv_to_s3(dataframe, bucket, key, header=True):
    path = tempfile.mkdtemp()
    if not os.path.exists(path):
        os.makedirs(path)
    filepath = os.path.join(path, "temp.csv")

    write_csv(dataframe, filepath, header)

    # create the s3 resource for this transaction
    s3 = boto3.client("s3", region_name="us-west-2")

    # write the contents of the file to right location
    upload_file_to_s3(s3, filepath, bucket, key)

    logger.info("Sucessfully wrote {} to {}".format(key, bucket))

    # clean up the temporary directory
    shutil.rmtree(path) 
Example 9
Project: fileflow   Author: industrydive   File: dataframe_utils.py    License: Apache License 2.0 6 votes vote down vote up
def clean_and_write_dataframe_to_csv(data, filename):
    """
    Cleans a dataframe of np.NaNs and saves to file via pandas.to_csv

    :param data: data to write to CSV
    :type data: :class:`pandas.DataFrame`
    :param filename: Path to file to write CSV to. if None, string of data
        will be returned
    :type filename: str | None
    :return: If the filename is None, returns the string of data. Otherwise
        returns None.
    :rtype: str | None
    """
    # cleans np.NaN values
    data = data.where((pd.notnull(data)), None)
    # If filename=None, to_csv will return a string
    result = data.to_csv(path_or_buf=filename, encoding='utf-8', dtype=str, index=False, na_rep=None,
                         skipinitialspace=True, quoting=csv.QUOTE_ALL)
    logging.info("Dataframe of shape %s has been stored." % str(data.shape))

    return result 
Example 10
Project: introduction_to_python_TEAMLAB_MOOC   Author: TeamLab   File: stock_data_crawler.py    License: MIT License 6 votes vote down vote up
def write_csv_file_by_result(stock_data, filename):
    """stock_data를 가지고 있는 two dimensional list 값을 csv 파일로 생성함

    Args:
        stock_data (list): 저장하고자 하는 주식 정보 two dimensional list
        filename (str): 데이터가 저장될 파일이름

    Examples:
        >>> import stock_data_crawler as sdc
        >>> url = 'http://finance.google.com/finance/historical?q=KRX:005930&startdate=2013-01-01&enddate=2015-12-30&output=csv'
        >>> stock_data = sdc.get_stock_data(url)
        >>> high_data = sdc.get_attribute_data(stock_data, "High", 2014, 12)
        >>> sdc.write_csv_file_by_result(stock_data,"example.csv")
        >>> f = open("example.csv", "r", encoding="utf8")
        >>> f.read()[:100]
        '\ufeffDate,Open,High,Low,Close,Volume\n30-Dec-15,1260000.00,1272000.00,1254000.00,1260000.00,203349\n29-Dec'
        >>> f.close()
    """
    # ===Modify codes below=============





    # ================================== 
Example 11
Project: Deep-Exemplar-based-Colorization   Author: msracver   File: parse_log.py    License: MIT License 6 votes vote down vote up
def write_csv(output_filename, dict_list, delimiter, verbose=False):
    """Write a CSV file
    """

    if not dict_list:
        if verbose:
            print('Not writing %s; no lines to write' % output_filename)
        return

    dialect = csv.excel
    dialect.delimiter = delimiter

    with open(output_filename, 'w') as f:
        dict_writer = csv.DictWriter(f, fieldnames=dict_list[0].keys(),
                                     dialect=dialect)
        dict_writer.writeheader()
        dict_writer.writerows(dict_list)
    if verbose:
        print 'Wrote %s' % output_filename 
Example 12
Project: agentless-system-crawler   Author: cloudviz   File: formatters.py    License: Apache License 2.0 6 votes vote down vote up
def write_in_csv_format(iostream, frame):
    """
    Writes frame data and metadata into iostream in csv format.

    :param iostream: a CStringIO used to buffer the formatted features.
    :param frame: a BaseFrame object to be written into iostream
    :return: None
    """
    iostream.write('%s\t%s\t%s\n' %
                   ('metadata', json.dumps('metadata'),
                    json.dumps(frame.metadata, separators=(',', ':'))))
    for (key, val, feature_type) in frame.data:
        if not isinstance(val, dict):
            val = val._asdict()
        iostream.write('%s\t%s\t%s\n' % (
            feature_type, json.dumps(key),
            json.dumps(val, separators=(',', ':')))) 
Example 13
Project: python_mozetl   Author: mozilla   File: utils.py    License: MIT License 6 votes vote down vote up
def write_csv(dataframe, path, header=True):
    """ Write a dataframe to local disk.

    Disclaimer: Do not write csv files larger than driver memory. This
    is ~15GB for ec2 c3.xlarge (due to caching overhead).
    """

    # NOTE: Before spark 2.1, toLocalIterator will timeout on some dataframes
    # because rdd materialization can take a long time. Instead of using
    # an iterator over all partitions, collect everything into driver memory.
    logger.info("Writing {} rows to {}".format(dataframe.count(), path))

    with open(path, "wb") if six.PY2 else open(path, "w", newline="") as fout:
        writer = csv.writer(fout)

        if header:
            writer.writerow(dataframe.columns)

        for row in dataframe.collect():
            row = [text_type(s).encode("utf-8") for s in row] if six.PY2 else row
            writer.writerow(row) 
Example 14
Project: ibis   Author: ibis-project   File: pandas_interop.py    License: Apache License 2.0 6 votes vote down vote up
def write_csv(self, path):
        with tempfile.NamedTemporaryFile() as f:
            # Write the DataFrame to the temporary file path
            if options.verbose:
                util.log(
                    'Writing DataFrame to temporary file {}'.format(f.name)
                )

            self.df.to_csv(
                f.name,
                header=False,
                index=False,
                sep=',',
                quoting=csv.QUOTE_NONE,
                escapechar='\\',
                na_rep='#NULL',
            )
            f.seek(0)

            if options.verbose:
                util.log('Writing CSV to: {0}'.format(path))

            self.hdfs.put(path, f.name)
        return path 
Example 15
Project: sonata   Author: AllenInstitute   File: csv_adaptors.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def write_csv(path, spiketrain_reader, mode='w', sort_order=SortOrder.none, include_header=True,
              include_population=True, units='ms', **kwargs):
    path_dir = os.path.dirname(path)
    if path_dir and not os.path.exists(path_dir):
        os.makedirs(path_dir)

    conv_factor = find_conversion(spiketrain_reader.units, units)
    with open(path, mode=mode) as f:
        if include_population:
            # Saves the Population column
            csv_writer = csv.writer(f, delimiter=' ')
            if include_header:
                csv_writer.writerow(csv_headers)
            for spk in spiketrain_reader.spikes(sort_order=sort_order):
                csv_writer.writerow([spk[0]*conv_factor, spk[1], spk[2]])

        else:
            # Don't write the Population column
            csv_writer = csv.writer(f, delimiter=' ')
            if include_header:
                csv_writer.writerow([c for c in csv_headers if c != col_population])
            for spk in spiketrain_reader.spikes(sort_order=sort_order):
                csv_writer.writerow([spk[0]*conv_factor, spk[2]]) 
Example 16
Project: python-data-sci-basics   Author: katychuang   File: my_utils.py    License: MIT License 6 votes vote down vote up
def write_min_max_csv(filename, data_sample):

    #find min & max price from data_sample
    min = find_max_min(data_sample, 2, "min")
    max = find_max_min(data_sample, 2, "max")

    new_array = []
    for record in data_sample:

        if (float(record[2]) == min) or (float(record[2]) == max): 
            new_array.append(record)

    write_to_file(filename, new_array)  


#csv with just 2 columns 
Example 17
Project: aurum-datadiscovery   Author: mitdbg   File: query_processing_benchmarks.py    License: MIT License 6 votes vote down vote up
def write_results_to_csv_one_query(name, results, csv=False, dat=False):
    lines = []

    from collections import OrderedDict
    od = OrderedDict(sorted(results.items()))

    header = None
    if csv:
        header = "x_axis,q2_5p,q2_median,q2_95p,q3_5p,q3_median,q3_95p,q4_5p,q4_median,q4_95p"
    elif dat:
        header = "# x_axis q2_5p q2_median q2_95p q3_5p q3_median q3_95p q4_5p q4_median q4_95p"
    lines.append(header)
    for k, v in od.items():
        (fivep_2, median_2, ninetyp_2) = v[0]
        separator = None
        if csv:
            separator = ','
        elif dat:
            separator = ' '
        string = separator.join([str(k), str(fivep_2), str(median_2), str(ninetyp_2)])
        lines.append(string)

    write_csv(name, lines) 
Example 18
Project: napari   Author: napari   File: io.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def write_csv(
    filename: str,
    data: Union[List, np.ndarray],
    column_names: Optional[List[str]] = None,
):
    """Write a csv file.

    Parameters
    ----------
    filename : str
        Filename for saving csv.
    data : list or ndarray
        Table values, contained in a list of lists or an ndarray.
    column_names : list, optional
        List of column names for table data.
    """
    with open(filename, mode='w', newline='') as csvfile:
        writer = csv.writer(
            csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL,
        )
        if column_names is not None:
            writer.writerow(column_names)
        for row in data:
            writer.writerow(row) 
Example 19
Project: marvin   Author: sdss   File: __init__.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def write_csv(self, path=None, filename=None, model=None, overwrite=None, **kwargs):
        ''' Writes the datamodels out to CSV '''

        assert model in self.model_map + [None], 'model must be drp, dap, query, or vac'

        if model == 'query':
            self.querydm.write_csv(path=path, filename=filename,
                                   overwrite=overwrite, db=True)
        elif model == 'dap':
            self.dapdm.properties.write_csv(path=path, filename=filename, overwrite=overwrite, **kwargs)
            self.dapdm.models.write_csv(path=path, filename=filename, overwrite=overwrite, **kwargs)
        elif model == 'drp_cube':
            self.drpcubedm.spectra.write_csv(path=path, filename=filename, overwrite=overwrite, **kwargs)
            self.drpcubedm.datacubes.write_csv(path=path, filename=filename, overwrite=overwrite, **kwargs)
        elif model == 'drp_rss':
            self.drprssdm.spectra.write_csv(path=path, filename=filename, overwrite=overwrite, **kwargs)
            self.drprssdm.rss.write_csv(path=path, filename=filename, overwrite=overwrite, **kwargs)
        elif model == 'vac':
            pass 
Example 20
Project: temporalCNN   Author: charlotte-pel   File: save.py    License: GNU General Public License v3.0 6 votes vote down vote up
def write_predictions_csv(test_file, p_test):
	"""
		 Writing the predictions p_test in test_file
		 INPUT:
			-test_file: csv file where to store the results
			-p_test: predictions 
				(either predicted class 
					or class probability distribution outputing by the Softmax layer)
	"""
	print("len(p_test.shape)", len(p_test.shape))
	if  len(p_test.shape)==1: #-- saving class only [integer]
		np.savetxt(test_file, p_test.astype(int), delimiter=',', fmt='%i')
	else: #saving proba [float]
		np.savetxt(test_file, p_test, delimiter=',', fmt='%1.6f')
		

#EOF 
Example 21
Project: csv-to-sqlite   Author: zblesk   File: csv_to_sqlite.py    License: MIT License 6 votes vote down vote up
def write_csv(files, output, options):
    write_out("Output file: " + output)
    conn = sqlite3.connect(output)
    write_out("Typing style: " + options.typing_style)
    totalRowsInserted = 0
    startTime = time.perf_counter()
    with click.progressbar(files) as _files:
        actual = files if write_out.verbose else _files
        for file in actual:
            try:
                file = file.strip()
                write_out("Processing " + file)
                with CsvFileInfo(file, options) as info:
                    info.determine_types()
                    totalRowsInserted += info.save_to_db(conn)
            except Exception as exc:
                print("Error on table {0}: \n {1}".format(file, exc))
    print("Written {0} rows into {1} tables in {2:.3f} seconds".format(totalRowsInserted, len(files), time.perf_counter() - startTime))
    conn.commit() 
Example 22
Project: instarecon   Author: vergl4s   File: instarecon.py    License: MIT License 6 votes vote down vote up
def write_output_csv(self, filename=None):
        """Writes output for each target as csv in filename"""
        if filename:
            filename = os.path.expanduser(filename)

            print("# Saving output csv file")

            output_as_lines = []

            for host in self.targets:
                for line in host.print_as_csv_lines():
                    output_as_lines.append(line)
                output_as_lines.append(["\n"])

                with open(filename, "wb") as f:
                    writer = csv.writer(f)

                    for line in output_as_lines:
                        writer.writerow(line)

                    output_written = True 
Example 23
Project: faster-rcnn-scenarios   Author: djdam   File: parse_log.py    License: MIT License 6 votes vote down vote up
def write_csv(output_filename, dict_list, delimiter, verbose=False):
    """Write a CSV file
    """

    if not dict_list:
        if verbose:
            print('Not writing %s; no lines to write' % output_filename)
        return

    dialect = csv.excel
    dialect.delimiter = delimiter

    with open(output_filename, 'w') as f:
        dict_writer = csv.DictWriter(f, fieldnames=dict_list[0].keys(),
                                     dialect=dialect)
        dict_writer.writeheader()
        dict_writer.writerows(dict_list)
    if verbose:
        print 'Wrote %s' % output_filename 
Example 24
Project: Learning-Python-for-Forensics-Second-Edition   Author: PacktPublishing   File: file_lister.py    License: MIT License 6 votes vote down vote up
def write_csv(conn, target, custodian_id):
    """
    The write_csv function generates a CSV report from the
        Files table
    :param conn: The Sqlite3 database connection object
    :param target: The output filepath
    :param custodian_id: The custodian ID
    :return: None
    """
    cur = conn.cursor()
    sql = "SELECT * FROM Files where custodian = {}".format(
        custodian_id)
    cur.execute(sql)

    cols = [description[0] for description in cur.description]
    logger.info('Writing CSV report')
    with open(target, 'w') as csv_file:
        csv_writer = csv.writer(csv_file)
        csv_writer.writerow(cols)

        for entry in cur:
            csv_writer.writerow(entry)
        csv_file.flush()
    logger.info('CSV report completed: ' + target) 
Example 25
Project: pyfx   Author: jmelett   File: portfolio.py    License: MIT License 6 votes vote down vote up
def write_to_csv(self, position):
        add_headings = not os.path.isfile(self.csv_out_file)
        with open(self.csv_out_file, 'at') as fh:
            items = OrderedDict(
                open_time=position.open_time,
                close_time=position.close_time,
                instrument=position.instrument,
                side=position.side,
                open_price=position.open_price,
                close_price=position.close_price,
                profit_cash=position.profit_cash,
                profit_pips=position.profit_pips,
                max_profit_pips=position.max_profit_pips,
                max_loss_pips=position.max_loss_pips,
            )

            writer = csv.writer(fh)
            if add_headings:
                writer.writerow(items.keys())
            writer.writerow(items.values()) 
Example 26
Project: Deep-Learning-Based-Structural-Damage-Detection   Author: QinganZhao   File: parse_log.py    License: MIT License 6 votes vote down vote up
def write_csv(output_filename, dict_list, delimiter, verbose=False):
    """Write a CSV file
    """

    if not dict_list:
        if verbose:
            print('Not writing %s; no lines to write' % output_filename)
        return

    dialect = csv.excel
    dialect.delimiter = delimiter

    with open(output_filename, 'w') as f:
        dict_writer = csv.DictWriter(f, fieldnames=dict_list[0].keys(),
                                     dialect=dialect)
        dict_writer.writeheader()
        dict_writer.writerows(dict_list)
    if verbose:
        print 'Wrote %s' % output_filename 
Example 27
Project: opentaps_seas   Author: opentaps   File: models.py    License: GNU Lesser General Public License v3.0 6 votes vote down vote up
def write_csv_data(qs, output, columns, with_header=True, convert_field=None, convert_uom='uom_id', convert_to=None):
    writer = csv.writer(output)
    header = []
    fields = []
    # check if we want to convert some value field
    convert = False
    if convert_field and convert_uom and convert_to:
        convert = True
    for c in columns:
        header.append(list(c.values())[0])
        fields.append(list(c.keys())[0])
    if with_header:
        writer.writerow(header)
    for d in qs:
        row = []
        for f in fields:
            val = d.__dict__.get(f)
            if convert and convert_field == f:
                # check what uom that field is in
                f_uom_id = d.__dict__.get(convert_uom)
                f_uom = UnitOfMeasure.get(f_uom_id)
                val = f_uom.convert_amount_to(val, convert_to)
            row.append(val)
        writer.writerow(row) 
Example 28
Project: Phonopy-Spectroscopy   Author: JMSkelton   File: TextExport.py    License: MIT License 6 votes vote down vote up
def WriteDataCSV(dataRows, filePath):
    """ Write row-wise data to an Excel-compatible CSV file. """

    outputWriter = None;

    # Workaround for a bug in the csv module, where using the csv.writer on Windows inserts extra blank lines between rows.

    if sys.platform.startswith("win"):
        # Try to open the file with newline = '' set (Python >= 3).
        # If this is not possible, issue a RuntimeWarning.

        if sys.version_info.major >= 3:
            outputWriter = open(filePath, 'w', newline = '');
        else:
            warnings.warn("CSV files output from Python < 3 on Windows platforms may have blank lines between rows.", RuntimeWarning);

    if outputWriter == None:
        outputWriter = open(filePath, 'w');

    outputWriterCSV = csv.writer(outputWriter, delimiter = ',', quotechar = '\"', quoting = csv.QUOTE_ALL);

    for row in dataRows:
        outputWriterCSV.writerow(row);

    outputWriter.close(); 
Example 29
Project: iAI   Author: aimuch   File: parse_log.py    License: MIT License 6 votes vote down vote up
def write_csv(output_filename, dict_list, delimiter, verbose=False):
    """Write a CSV file
    """

    if not dict_list:
        if verbose:
            print('Not writing %s; no lines to write' % output_filename)
        return

    dialect = csv.excel
    dialect.delimiter = delimiter

    with open(output_filename, 'w') as f:
        dict_writer = csv.DictWriter(f, fieldnames=dict_list[0].keys(),
                                     dialect=dialect)
        dict_writer.writeheader()
        dict_writer.writerows(dict_list)
    if verbose:
        print 'Wrote %s' % output_filename 
Example 30
Project: hpgan   Author: ebarsoum   File: split_h36m_data.py    License: MIT License 6 votes vote down vote up
def write_to_csv(self, items, file_path):
        '''
        Write file path and its target pair in a CSV file format.

        Args:
            items: list of paths and their corresponding label if provided.
            file_path(str): target file path.
        '''
        if sys.version_info[0] < 3:
            with open(file_path, 'wb') as csv_file:
                writer = csv.writer(csv_file, delimiter=',')
                for item in items:
                    writer.writerow(item)
        else:
            with open(file_path, 'w', newline='') as csv_file:
                writer = csv.writer(csv_file, delimiter=',')
                for item in items:
                    writer.writerow(item) 
Example 31
Project: tyssue   Author: DamCB   File: csv.py    License: GNU General Public License v3.0 6 votes vote down vote up
def write_storm_csv(
    filename, points, coords=["x", "y", "z"], split_by=None, **csv_args
):
    """
    Saves a point cloud array in the storm format
    """
    columns = ["frame", "x [nm]", "y [nm]", "z [nm]", "uncertainty_xy", "uncertainty_z"]
    points = points.dropna()
    storm_points = pd.DataFrame(np.zeros((points.shape[0], 6)), columns=columns)
    storm_points[["x [nm]", "y [nm]", "z [nm]"]] = points[coords].values
    storm_points["frame"] = 1
    storm_points[["uncertainty_xy", "uncertainty_z"]] = 2.1
    # tab separated values are faster and more portable than excel
    if split_by is None:
        if not filename.endswith(".csv"):
            filename = filename + ".csv"
        storm_points.to_csv(filename, **csv_args)
    elif split_by in points.columns():
        storm_points[split_by] = points[split_by]
        # separated files by the column split_by
        storm_points.groupby(split_by).apply(
            lambda df: df.to_csv(
                "{}_{}.csv".format(filename, df[split_by].iloc[0]), **csv_args
            )
        ) 
Example 32
Project: openelections-data-or   Author: openelections   File: lane_2000_primary_parser.py    License: MIT License 6 votes vote down vote up
def writeCSV(allCanvasses):
	def listGet(inList, index, default):
		try:
			out = inList[index]
		except IndexError:
			out = default

		return out

	with open(outfile, 'wb') as csvfile:
		w = unicodecsv.writer(csvfile, encoding='utf-8')
		w.writerow(headers)

		for canvass in allCanvasses:
			for precinct, results in canvass.results.iteritems():
				for index, result in enumerate(results):
					normalisedOffice = office_lookup[canvass.office] # Normalise the office
					candidate = canvass.candidates[index]
					normalisedCandidate = candidate_lookup.get(candidate, normaliseName(candidate)) # Normalise the candidate

					row = [county, precinct, normalisedOffice, canvass.district,
							canvass.party, normalisedCandidate, result]

					print row
					w.writerow(row) 
Example 33
Project: WorkControl   Author: lyk19940625   File: get_features_into_CSV.py    License: Apache License 2.0 6 votes vote down vote up
def write_into_csv(self,path_faces_personX, path_csv_from_photos):
        photos_list = os.listdir(path_faces_personX)
        with open(path_csv_from_photos, "w", newline="") as csvfile:
            writer = csv.writer(csvfile)
            if photos_list:
                for i in range(len(photos_list)):
                    # 调用return_128d_features()得到128d特征
                    print("正在读的人脸图像:", path_faces_personX + "/" + photos_list[i])
                    features_128d = self.return_128d_features(path_faces_personX + "/" + photos_list[i])
                    #  print(features_128d)
                    # 遇到没有检测出人脸的图片跳过
                    if features_128d == 0:
                        i += 1
                    else:
                        writer.writerow(features_128d)
            else:
                print("Warning: Empty photos in "+path_faces_personX+'/')
                writer.writerow("") 
Example 34
Project: cadasta-platform   Author: Cadasta   File: shape.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def write_csv_row_and_shp(self, entity, metadatum):
        if self.is_standalone:
            # Create CSV file if not yet created
            if not metadatum.get('csv_file'):
                fn = os.path.join(self.dir_path, metadatum['title'] + '.csv')
                f = open(fn, 'w+', newline='')
                metadatum['csv_file'] = f
                w = csv.writer(f)
                metadatum['csv_writer'] = w
                w.writerow(metadatum['attr_columns'].keys())

            attr_values = self.get_attr_values(entity, metadatum)
            data = metadatum['attr_columns'].copy()
            data.update(attr_values)
            writer = metadatum['csv_writer']
            writer.writerow(list(data.values()))

        if metadatum['title'] == 'locations':
            self.write_shp_layer(entity) 
Example 35
Project: open_model_zoo   Author: opencv   File: main.py    License: Apache License 2.0 6 votes vote down vote up
def write_csv_result(csv_file, processing_info, metric_results):
    new_file = not check_file_existence(csv_file)
    field_names = ['model', 'launcher', 'device', 'dataset', 'tags', 'metric_name', 'metric_type', 'metric_value']
    model, launcher, device, tags, dataset = processing_info
    main_info = {
        'model': model,
        'launcher': launcher,
        'device': device.upper(),
        'tags': ' '.join(tags) if tags else '',
        'dataset': dataset
    }

    with open(csv_file, 'a+', newline='') as f:
        writer = DictWriter(f, fieldnames=field_names)
        if new_file:
            writer.writeheader()
        for metric_result in metric_results:
            writer.writerow({
                **main_info,
                'metric_name': metric_result['name'],
                'metric_type': metric_result['type'],
                'metric_value': metric_result['value']
            }) 
Example 36
Project: SPN.pytorch   Author: yeezhu   File: voc.py    License: MIT License 6 votes vote down vote up
def write_object_labels_csv(file, labeled_data):
    # write a csv file
    print('[dataset] write file %s' % file)
    with open(file, 'w') as csvfile:
        fieldnames = ['name']
        fieldnames.extend(object_categories)
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)

        writer.writeheader()
        for (name, labels) in labeled_data.items():
            example = {'name': name}
            for i in range(20):
                example[fieldnames[i + 1]] = int(labels[i])
            writer.writerow(example)

    csvfile.close() 
Example 37
Project: coiltraine   Author: felipecode   File: coil_logger.py    License: MIT License 6 votes vote down vote up
def write_on_error_csv(error_file_name, output):
    """
    Keep the errors writen to quickly recover
    Args
        dataset_name: the name of the checkpoint being writen
        output: what is being written on the file


    Returns:

    """
    root_path = "_logs"

    full_path_name = os.path.join(root_path, EXPERIMENT_BATCH_NAME,
                                  EXPERIMENT_NAME)

    file_name = os.path.join(full_path_name, str(error_file_name) + '_error' + '.csv')


    with open(file_name, 'a+') as f:
        f.write("%f" % output)
        f.write("\n") 
Example 38
Project: dax   Author: VUIIS   File: xnat_tools_utils.py    License: MIT License 6 votes vote down vote up
def write_csv(csv_string, csv_file, exe_name=''):
    """
    Method to write the report as a csv file
     with the values from REDCap

    :param csv_string: data to write in the csv
    :param csv_file: csv filepath
    :param exe_name: name of executable running the function for error
    :return: None
    """
    print('INFO: Writing report ...')
    basedir = os.path.basedir(csv_file)
    if not os.path.exists(basedir):
        err = 'Path %s not found for report. Give an existing parent folder.'
        raise XnatToolsUserError(exe_name, err % csv_file)
    with open(csv_file, 'w') as output_file:
        for line in csv_string:
            output_file.write(line) 
Example 39
Project: pyresttest   Author: svanoort   File: resttest.py    License: Apache License 2.0 6 votes vote down vote up
def write_benchmark_csv(file_out, benchmark_result, benchmark, test_config=TestConfig()):
    """ Writes benchmark to file as csv """
    writer = csv.writer(file_out)
    writer.writerow(('Benchmark', benchmark_result.name))
    writer.writerow(('Benchmark Group', benchmark_result.group))
    writer.writerow(('Failures', benchmark_result.failures))

    # Write result arrays
    if benchmark_result.results:
        writer.writerow(('Results', ''))
        writer.writerows(metrics_to_tuples(benchmark_result.results))
    if benchmark_result.aggregates:
        writer.writerow(('Aggregates', ''))
        writer.writerows(benchmark_result.aggregates)

# Method to call when writing benchmark file 
Example 40
Project: music163-spiders   Author: zyingzhou   File: get_songs.py    License: MIT License 6 votes vote down vote up
def write_to_csv(items, artist_name):

    with open("music163_songs.csv", "a", encoding='utf-8') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(["歌手名字", artist_name])

        for item in items:
            writer.writerow([item.a['href'].replace('/song?id=', ''), item.a.text])

            # 可视化显示
            # print('歌曲id:', item.a['href'].replace('/song?id=', ''))
            # song_name = item.b['title']
            # print('歌曲名字:', song_name)

    csvfile.close()


# 获取歌手id和歌手姓名 
Example 41
Project: diluvian   Author: aschampion   File: util.py    License: MIT License 6 votes vote down vote up
def write_keras_history_to_csv(history, filename):
    """Write Keras history to a CSV file.

    If the file already exists it will be overwritten.

    Parameters
    ----------
    history : keras.callbacks.History
    filename : str
    """
    if sys.version_info[0] < 3:
        args, kwargs = (['wb', ], {})
    else:
        args, kwargs = (['w', ], {'newline': '', 'encoding': 'utf8', })
    with open(filename, *args, **kwargs) as csvfile:
        writer = csv.writer(csvfile)
        metric_cols = history.history.keys()
        indices = [i[0] for i in sorted(enumerate(metric_cols), key=lambda x: x[1])]
        metric_cols = sorted(metric_cols)
        cols = ['epoch'] + metric_cols
        sorted_metrics = list(history.history.values())
        sorted_metrics = [sorted_metrics[i] for i in indices]
        writer.writerow(cols)
        for row in zip(history.epoch, *sorted_metrics):
            writer.writerow(row) 
Example 42
Project: wildcat.pytorch   Author: durandtibo   File: voc.py    License: MIT License 6 votes vote down vote up
def write_object_labels_csv(file, labeled_data):
    # write a csv file
    print('[dataset] write file %s' % file)
    with open(file, 'w') as csvfile:
        fieldnames = ['name']
        fieldnames.extend(object_categories)
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)

        writer.writeheader()
        for (name, labels) in labeled_data.items():
            example = {'name': name}
            for i in range(20):
                example[fieldnames[i + 1]] = int(labels[i])
            writer.writerow(example)

    csvfile.close() 
Example 43
Project: alpg   Author: GENETX   File: writer.py    License: GNU General Public License v3.0 6 votes vote down vote up
def writeCsvRow(fname, hnum, data):
	if hnum == 0:
		with open(outputFolder+'/'+fname, 'w') as f:
			for l in range(0, len(data)):
				f.write(str(round(data[l])) + '\n')
	else:
		with open(outputFolder+'/'+fname, 'r+') as f:
			lines = f.readlines()
			f.seek(0)
			f.truncate()
			j = 0
			for line in lines:
				line = line.rstrip()
				line = line + ';' + str(round(data[j])) + '\n'
				f.write(line)
				j = j + 1 
Example 44
Project: scantron   Author: rackerlabs   File: masscan_json_to_csv.py    License: Apache License 2.0 6 votes vote down vote up
def write_results_to_csv_file(results_list, masscan_csv_file_name):
    """Writes results to a .csv file.  Attempts to extract column names, falls back to CSV_FIELD_NAMES."""

    print(f"Writing results to: {masscan_csv_file_name}")

    with open(masscan_csv_file_name, "w") as csvfile:
        try:
            field_names = results_list[0].keys()
        except IndexError:
            field_names = CSV_FIELD_NAMES

        writer = csv.DictWriter(csvfile, fieldnames=field_names)
        writer.writeheader()

        for result in results_list:
            writer.writerow(result)

    print(f"Done writing results to: {masscan_csv_file_name}") 
Example 45
Project: iTunes_Backup_Reader   Author: jfarley248   File: writer.py    License: MIT License 6 votes vote down vote up
def writeToCsv(backup_list, application_list, output_file, logger):

    backup_csv = output_file + "Backups.csv"
    app_csv = output_file + "Applications.csv"


    with open(backup_csv, 'w', newline='') as backup_csv_handle:
        columns = ["Device_Name", "Product_Name", "Product_Model", "Phone_Number",
                "iOS_Version", "Last_Backup_Completion",
                "Last_Backup_Write_Completed", "User_Computers", "Passcode_Set", "Encrypted",
                "GUID", "ICCID", "IMEI", "MEID", "Serial_Num",
                "Full_Backup", "Version", "iTunes_Version"]
        wr = csv.writer(backup_csv_handle, quoting=csv.QUOTE_ALL)
        wr.writerows([columns, backup_list])

    with open(app_csv, 'w', newline='') as app_csv_handle:
        columns = ["Device_Name", "Device_SN", "App_Name", "AppleID", "User_Full_Name", "Purchase_Date",
                   "Is_Possibly_Sideloaded", "App_Version", "Is_Auto_Download", "Is_Purchased_Redownload",
                   "Publisher", "Full_App_Name"]
        wr = csv.writer(app_csv_handle, quoting=csv.QUOTE_ALL)
        wr.writerow(columns)
        wr.writerows(application_list) 
Example 46
Project: cdlib   Author: GiulioRossetti   File: io.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def write_community_csv(communities,  path, delimiter=","):
    """
    Save community structure to comma separated value (csv) file.

    :param communities: a NodeClustering object
    :param path: output filename
    :param delimiter: column delimiter

    :Example:

    >>> import networkx as nx
    >>> from cdlib import algorithms, readwrite
    >>> g = nx.karate_club_graph()
    >>> coms = algorithms.louvain(g)
    >>> readwrite.write_community_csv(coms, "communities.csv", ",")

    """
    with open(path, "w") as f:
        for cid, community in enumerate(communities.communities):
            res = delimiter.join(list(map(str, community)))
            f.write("%s\n" % res) 
Example 47
Project: openprescribing   Author: ebmdatalab   File: utils.py    License: MIT License 6 votes vote down vote up
def write_csv_response(cursor, filename):
    """
    Writes a cursor to a CSV file.
    NB: Use StreamingHTTPResponse instead to handle big files?
    https://docs.djangoproject.com/en/1.7/howto/outputting-csv/
    """
    response = HttpResponse(content_type="text/csv")
    csv_name = "%s.csv" % filename  # TODO: Include date here
    response["Content-Disposition"] = 'attachment; filename="%s"' % csv_name
    writer = csv.writer(response)
    cursor_copy = []
    for c in cursor:
        c = [str(item).encode("utf8") for item in c]
        cursor_copy.append(c)
    writer.writerow([str(i[0]).encode("utf8") for i in cursor.description])
    writer.writerows(cursor_copy)
    return response 
Example 48
Project: pysnyk   Author: snyk-labs   File: api-demo-10-project-deps-licenses-report.py    License: MIT License 6 votes vote down vote up
def write_all_project_output_csv(all_project_info, output_path):
    # using tab delimitation because I used ',' for the licenses

    with open(output_path, "w") as output_csv:
        for next_project in all_project_info:
            str_csv_line = "%s  %s  %s  %s" % (
                next_project["project_name"],
                "License(s)",
                "License Issue(s)",
                "Application and Path",
            )
            output_csv.write("%s\n" % str_csv_line)

            flattened_dependencies_list = next_project["flat_deps_list"]
            for next_dep in flattened_dependencies_list:
                str_csv_line = "%s  %s  %s  %s" % (
                    next_dep["pkgId"],
                    next_dep["licenses"],
                    next_dep["license_issues"],
                    next_dep["path"],
                )
                output_csv.write("%s\n" % str_csv_line)

            output_csv.write("\n")  # empty row to separate projects 
Example 49
Project: PyU4V   Author: dell   File: file_handler.py    License: Apache License 2.0 6 votes vote down vote up
def write_dict_to_csv_file(
        file_path, dictionary, delimiter=',', quotechar='|'):
    """Write dictionary data to CSV spreadsheet.

    :param file_path: path including name of the file to be written to -- str
    :param dictionary: data to be written to file -- dict
    :param delimiter: delimiter kwarg for csv writer object -- str
    :param quotechar: quotechar kwarg for csv writer object -- str
    """
    columns = list(dictionary.keys())
    num_values = 0
    for column in columns:
        col_length = len(dictionary.get(column))
        if col_length > num_values:
            num_values = col_length

    data_for_file = list()
    data_for_file.append(columns)
    for i in range(0, num_values):
        csv_line = list()
        for column in columns:
            csv_line.append(dictionary.get(column)[i])
        data_for_file.append(csv_line)

    write_to_csv_file(file_path, data_for_file, delimiter, quotechar) 
Example 50
Project: python-examples   Author: furas   File: get_reviews.py    License: MIT License 6 votes vote down vote up
def write_in_csv(items, filename='results.csv',
                  headers=['hotel name', 'review title', 'review body',
                           'review date', 'contributions', 'helpful vote',
                           'user name' , 'user location', 'rating'],
                  mode='w'):

    print('--- CSV ---')

    with open(filename, mode) as csvfile:
        csv_file = csv.DictWriter(csvfile, headers)

        if mode == 'w': # don't write headers if you append to existing file
            csv_file.writeheader()

        csv_file.writerows(items)

#----------------------------------------------------------------------
# DATABASE
# --------------------------------------------------------------------- 
Example 51
Project: indra   Author: sorgerlab   File: __init__.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def write_unicode_csv(filename, rows, delimiter=',', quotechar='"',
                       quoting=csv.QUOTE_MINIMAL, lineterminator='\n',
                       encoding='utf-8'):
    # Python 3 version
    if sys.version_info[0] >= 3:
        # Open the file in text mode with given encoding
        # Set newline arg to '' (see https://docs.python.org/3/library/csv.html)
        with open(filename, 'w', newline='', encoding=encoding) as f:
            # Next, get the csv writer, with unicode delimiter and quotechar
            csv_writer = csv.writer(f, delimiter=delimiter, quotechar=quotechar,
                                quoting=quoting, lineterminator=lineterminator)
            # Write the rows to the file
            csv_writer.writerows(rows)
    # Python 2 version
    else:
        # Open the file, no encoding specified
        with open(filename, 'w') as f:
            # Next, get the csv writer, passing delimiter and quotechar as
            # bytestrings rather than unicode
            csv_writer = csv.writer(f, delimiter=delimiter.encode(encoding),
                                quotechar=quotechar.encode(encoding),
                                quoting=quoting, lineterminator=lineterminator)
            for row in rows:
                csv_writer.writerow([unicode(cell).encode(encoding)
                                     for cell in row]) 
Example 52
Project: export-dynamodb   Author: travistrle   File: main.py    License: GNU General Public License v3.0 6 votes vote down vote up
def write_to_csv_file(data, filename):
    """
    Write to a csv file.
    :param data:
    :param filename:
    :return:
    """
    if data is None:
        return

    print("Writing to csv file.")
    with open(filename, 'w') as csvfile:
        writer = csv.DictWriter(csvfile, delimiter=',', fieldnames=data['keys'],
                                quotechar='"')
        writer.writeheader()
        writer.writerows(data['items']) 
Example 53
Project: ml-fairness-gym   Author: google   File: download_movielens.py    License: Apache License 2.0 6 votes vote down vote up
def write_csv_output(dataframes, directory):
  """Write csv file outputs."""
  movies, users, ratings = dataframes
  file_util.makedirs(directory)

  del movies['tag_id']  # This column isn't necessary.

  users.to_csv(
      file_util.open(os.path.join(directory, 'users.csv'), 'w'),
      index=False,
      columns=['userId'])
  movies.to_csv(
      file_util.open(os.path.join(directory, 'movies.csv'), 'w'),
      index=False)
  ratings.to_csv(
      file_util.open(os.path.join(directory, 'ratings.csv'), 'w'),
      index=False) 
Example 54
Project: open-source-library-data-collector   Author: sendgrid   File: utils.py    License: MIT License 6 votes vote down vote up
def write_records_to_csv(filepath, records, headers=None):
    """Write a list of lists to a CSV (comma separated values) file, where
       each sub-list is a row of data.

    :param filepath: Path to the file to write, including export name
    :type filepath:  basestring

    :param records: List of lists to put in CSV. Each sub-list is made of
                    things that can be written, like strings or numbers
    :type records:  list

    :param headers: List of column headers as strings. If not provided,
                    no header is written.
    :type headers: list
    """

    # Create any intermediary folders if necessary, works in Python 3.2+
    os.makedirs(os.path.dirname(filepath), exist_ok=True)
    with open(filepath, 'w') as fp:
        writer = csv.writer(fp)
        if headers:
            writer.writerow(headers)
        writer.writerows(records) 
Example 55
Project: Python-Digital-Forensics-Cookbook   Author: PacktPublishing   File: total_virus.py    License: MIT License 6 votes vote down vote up
def write_csv(data, output):
    if data == []:
        print("[-] No output results to write")
        sys.exit(4)

    print("[+] Writing output for {} domains with results to {}".format(
        len(data), output))
    flatten_data = []
    field_list = ["URL", "Scan Date", "Service",
                  "Detected", "Result", "VirusTotal Link"]
    for result in data:
        for service in result["scans"]:
            flatten_data.append(
                {"URL": result.get("url", ""),
                 "Scan Date": result.get("scan_date", ""),
                 "VirusTotal Link": result.get("permalink", ""),
                 "Service": service,
                 "Detected": result["scans"][service]["detected"],
                 "Result": result["scans"][service]["result"]})

    with open(output, "w", newline="") as csvfile:
        csv_writer = csv.DictWriter(csvfile, fieldnames=field_list)
        csv_writer.writeheader()
        for result in flatten_data:
            csv_writer.writerow(result) 
Example 56
Project: binance-downloader   Author: bullsignals   File: api.py    License: MIT License 6 votes vote down vote up
def write_to_csv(self, output=None):
        """Write k-lines retrieved from Binance into a csv file

        :param output: output file path. If none, will be stored in ./downloaded
            directory with a timestamped filename based on symbol pair and interval
        :return: None
        """
        if not self.download_successful:
            log.warn("Not writing to output file since no data was received from API")
            return

        if self.kline_df is None:
            raise ValueError("Must read in data from Binance before writing to disk!")

        # Generate default file name/path if none given
        output = output or self.output_file

        with open(output, "w") as csv_file:
            # Ensure 9 decimal places  (most prices are to 8 places)

            self.kline_df.to_csv(csv_file, index=False, float_format="%.9f")
        log.notice(f"Done writing {output} for {len(self.kline_df)} lines") 
Example 57
Project: emotion-recognition-using-speech   Author: x4nth055   File: emotion_recognition.py    License: MIT License 6 votes vote down vote up
def write_csv(self):
        """
        Write available CSV files in `self.train_desc_files` and `self.test_desc_files`
        determined by `self._set_metadata_filenames()` method.
        """
        for train_csv_file, test_csv_file in zip(self.train_desc_files, self.test_desc_files):
            # not safe approach
            if os.path.isfile(train_csv_file) and os.path.isfile(test_csv_file):
                # file already exists, just skip writing csv files
                if not self.override_csv:
                    continue
            if self.emodb_name in train_csv_file:
                write_emodb_csv(self.emotions, train_name=train_csv_file, test_name=test_csv_file, verbose=self.verbose)
                if self.verbose:
                    print("[+] Writed EMO-DB CSV File")
            elif self.tess_ravdess_name in train_csv_file:
                write_tess_ravdess_csv(self.emotions, train_name=train_csv_file, test_name=test_csv_file, verbose=self.verbose)
                if self.verbose:
                    print("[+] Writed TESS & RAVDESS DB CSV File")
            elif self.custom_db_name in train_csv_file:
                write_custom_csv(emotions=self.emotions, train_name=train_csv_file, test_name=test_csv_file, verbose=self.verbose)
                if self.verbose:
                    print("[+] Writed Custom DB CSV File") 
Example 58
def writeCsv(fileURI, list_, withListOfRows=1, append=0):
    '''
    TODO: move to utilsLyrics
    '''
    from csv import writer
    if append:
        fout = open(fileURI, 'ab')
    else:
        fout = open(fileURI, 'wb')
    w = writer(fout)
    print('writing to csv file {}...'.format(fileURI) )
    for row in list_:
        if withListOfRows:
            w.writerow(row)
        else:
            tuple_note = [row.onsetTime, row.noteDuration]
            w.writerow(tuple_note)
    
    fout.close() 
Example 59
Project: finance-dl   Author: jbms   File: venmo.py    License: GNU General Public License v2.0 6 votes vote down vote up
def write_csv(self, csv_result):
        csv_reader = csv.DictReader(
            io.StringIO(csv_result.decode(), newline=''))
        field_names = csv_reader.fieldnames
        rows = list(csv_reader)

        # Make sure rows are valid transactions with a date
        good_rows = []
        for r in rows:
            if r['Datetime'] != '':
                good_rows.append(r)
            else:
                logging.info('Invalid date in row: {}'.format(r))

        rows = good_rows

        def get_sort_key(row):
            return parse_csv_date(row['Datetime']).timestamp()

        transactions_file = os.path.join(self.output_directory,
                                         'transactions.csv')
        csv_merge.merge_into_file(filename=transactions_file,
                                  field_names=field_names, data=rows,
                                  sort_by=get_sort_key) 
Example 60
Project: nototools   Author: googlefonts   File: report_coverage_data.py    License: Apache License 2.0 6 votes vote down vote up
def write_block_coverage_csv(block_data, names, msg, out_file=sys.stdout):
    block_data.sort()
    # nowhere to write msg
    csv_writer = csv.writer(out_file, delimiter=",")
    headers = ["range", "block name", "count"]
    for name in names:
        headers.append(name + " count")
        headers.append(name + " pct")
    csv_writer.writerow(headers)
    for start, end, name, block_cps, block_covered_cps_list in block_data:
        range_str = "%04x-%04x" % (start, end)
        num_in_block = len(block_cps)
        row_parts = [range_str, name, num_in_block]
        for block_covered_cps in block_covered_cps_list:
            num_covered = len(block_covered_cps)
            pct = "%d%%" % int(100.0 * num_covered / num_in_block)
            row_parts.append(num_covered)
            row_parts.append(pct)
        csv_writer.writerow(row_parts)