Python csv.writer() Examples

The following are 30 code examples for showing how to use csv.writer(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module csv , or try the search function .

Example 1
Project: comport   Author: codeforamerica   File: models.py    License: BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def get_denominator_csv(self):
        output = io.StringIO()

        writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC)

        writer.writerow(["year", "month", "officers out on service"])

        values = sorted(self.denominator_values,
                        key=lambda x: (x.year, x.month))

        for value in values:
            row = [
                value.year,
                value.month,
                value.officers_out_on_service
            ]
            writer.writerow(row)

        return output.getvalue() 
Example 2
Project: vergeml   Author: mme   File: env.py    License: MIT License 6 votes vote down vote up
def write(self, epoch, step, data):
        if not self.ks:
            return

        # Make sure that keys have no underscores.
        data = {k.replace('_', '-'):v for k, v in data.items()}

        row = [epoch, step]
        for k in self.ks:
            if k in data:
                row.append(_toscalar(data[k]))
                self.prev[k] = data[k]
            elif k in self.prev:
                row.append(_toscalar(self.prev[k]))
            else:
                row.append(None)

        self.writer.writerow(row) 
Example 3
def save_id_to_path_mapping(self):
    """Saves mapping from submission IDs to original filenames.

    This mapping is saved as CSV file into target directory.
    """
    if not self.id_to_path_mapping:
      return
    with open(self.local_id_to_path_mapping_file, 'w') as f:
      writer = csv.writer(f)
      writer.writerow(['id', 'path'])
      for k, v in sorted(iteritems(self.id_to_path_mapping)):
        writer.writerow([k, v])
    cmd = ['gsutil', 'cp', self.local_id_to_path_mapping_file,
           os.path.join(self.target_dir, 'id_to_path_mapping.csv')]
    if subprocess.call(cmd) != 0:
      logging.error('Can\'t copy id_to_path_mapping.csv to target directory') 
Example 4
Project: neural-fingerprinting   Author: StephanZheng   File: master.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _save_sorted_results(self, run_stats, scores, image_count, filename):
    """Saves sorted (by score) results of the evaluation.

    Args:
      run_stats: dictionary with runtime statistics for submissions,
        can be generated by WorkPiecesBase.compute_work_statistics
      scores: dictionary mapping submission ids to scores
      image_count: dictionary with number of images processed by submission
      filename: output filename
    """
    with open(filename, 'w') as f:
      writer = csv.writer(f)
      writer.writerow(['SubmissionID', 'ExternalTeamId', 'Score',
                       'MedianTime', 'ImageCount'])
      get_second = lambda x: x[1]
      for s_id, score in sorted(iteritems(scores),
                                key=get_second, reverse=True):
        external_id = self.submissions.get_external_id(s_id)
        stat = run_stats.get(
            s_id, collections.defaultdict(lambda: float('NaN')))
        writer.writerow([s_id, external_id, score,
                         stat['median_eval_time'],
                         image_count[s_id]]) 
Example 5
def generate_bi_graphemes_dictionary(label_list):
    freqs = Counter()
    for label in label_list:
        label = label.split(' ')
        for i in label:
            for pair in split_every(2, i):
                if len(pair) == 2:
                    freqs[pair] += 1


    with open('resources/unicodemap_en_baidu_bi_graphemes.csv', 'w') as bigram_label:
        bigramwriter = csv.writer(bigram_label, delimiter = ',')
        baidu_labels = list('\' abcdefghijklmnopqrstuvwxyz')
        for index, key in enumerate(baidu_labels):
            bigramwriter.writerow((key, index+1))
        for index, key in enumerate(freqs.keys()):
            bigramwriter.writerow((key, index+len(baidu_labels)+1)) 
Example 6
Project: Servo   Author: fpsw   File: order.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def download_results(request):
    import csv
    response = HttpResponse(content_type='text/csv')
    response['Content-Disposition'] = 'attachment; filename="orders.csv"'

    writer = csv.writer(response)
    header = [
        'CODE',
        'CUSTOMER',
        'CREATED_AT',
        'ASSIGNED_TO',
        'CHECKED_IN',
        'LOCATION'
    ]
    writer.writerow(header)

    for o in request.session['order_queryset']:
        row = [o.code, o.customer, o.created_at,
               o.user, o.checkin_location, o.location]
        coded = [unicode(s).encode('utf-8') for s in row]

        writer.writerow(coded)

    return response 
Example 7
Project: python-toolbox-for-rapid   Author: Esri   File: CreateSubsetFile.py    License: Apache License 2.0 6 votes vote down vote up
def execute(self, parameters, messages):
        """The source code of the tool."""
        in_drainage_line = parameters[0].valueAsText
        out_csv_file = parameters[1].valueAsText

        fields = ['NextDownID', 'HydroID']

        list_all = []

        '''The script line below makes sure that rows in the subset file are
           arranged in descending order of NextDownID of stream segements'''
        for row in sorted(arcpy.da.SearchCursor(in_drainage_line, fields), reverse=True):
            list_all.append([row[1]])

        with open(out_csv_file,'wb') as csvfile:
            connectwriter = csv.writer(csvfile, dialect='excel')
            for row_list in list_all:
                out = row_list
                connectwriter.writerow(out)

        return 
Example 8
Project: pywr   Author: pywr   File: recorders.py    License: GNU General Public License v3.0 6 votes vote down vote up
def reset(self):
        import csv
        kwargs = {"newline": "", "encoding": "utf-8"}
        mode = "wt"

        if self.complib == "gzip":
            import gzip
            self._fh = gzip.open(self.csvfile, mode, self.complevel, **kwargs)
        elif self.complib in ("bz2", "bzip2"):
            import bz2
            self._fh = bz2.open(self.csvfile, mode, self.complevel, **kwargs)
        elif self.complib is None:
            self._fh = open(self.csvfile, mode, **kwargs)
        else:
            raise KeyError("Unexpected compression library: {}".format(self.complib))
        self._writer = csv.writer(self._fh, **self.csv_kwargs)
        # Write header data
        row = ["Datetime"] + [name for name in self._node_names]
        self._writer.writerow(row) 
Example 9
Project: ICDAR-2019-SROIE   Author: zzzDavid   File: prepare_dataset.py    License: MIT License 6 votes vote down vote up
def get_data():
    filenames = [os.path.splitext(f)[0] for f in glob.glob("original/*.jpg")]
    jpg_files = [s + ".jpg" for s in filenames]
    txt_files = [s + ".txt" for s in filenames]

    for file in txt_files:
        boxes = []
        with open(file, "r", encoding="utf-8", newline="") as lines:
            for line in csv.reader(lines):
                boxes.append([line[0], line[1], line[6], line[7]])
        with open('mlt/label/' + file.split('/')[1], "w+") as labelFile:
            wr = csv.writer(labelFile)
            wr.writerows(boxes)

    for jpg in jpg_files:
        shutil.copy(jpg, 'mlt/image/') 
Example 10
Project: ICDAR-2019-SROIE   Author: zzzDavid   File: main.py    License: MIT License 6 votes vote down vote up
def process_txt():
    filenames = [os.path.splitext(f)[0] for f in glob.glob("test_result/*.txt")]
    old_files = [s + ".txt" for s in filenames]
    for old_file in old_files:
        new = []
        with open(old_file, "r") as old:
            for line in csv.reader(old):
                if not line:
                    continue
                if not line[0]:
                    continue
                if line[0][0] == ' ' or line[0][-1] == ' ':
                    line[0] = line[0].strip()
                if ' ' in line[0]:
                    line = line[0].split(' ')
                new.append(line)
        with open('task2_result/' + old_file.split('/')[1], "w+") as newfile:
            wr = csv.writer(newfile, delimiter = '\n')
            new = [[s[0].upper()] for s in new]
            wr.writerows(new) 
Example 11
Project: ICDAR-2019-SROIE   Author: zzzDavid   File: main.py    License: MIT License 6 votes vote down vote up
def for_task3():
    filenames = [os.path.splitext(f)[0] for f in glob.glob("boundingbox/*.txt")]
    box_files = [s + ".txt" for s in filenames]
    for boxfile in box_files:
        box = []
        with open(boxfile,'r') as boxes:
            for line in csv.reader(boxes):
                box.append([int(string, 10) for string in line[0:8]])
        words = []
        with open('test_result/'+ boxfile.split('/')[1], 'r') as prediction:
            for line in csv.reader(prediction):
                words.append(line)
        words = [s if len(s)!=0 else [' '] for s in words]
        new = []
        for line in zip(box,words):
            a,b = line
            new.append(a+b)
        with open('for_task3/'+ boxfile.split('/')[1], 'w+') as newfile:
            csv_out = csv.writer(newfile)
            for line in new:
                csv_out.writerow(line) 
Example 12
Project: dump1090-tools   Author: mutability   File: adsb-polar.py    License: ISC License 6 votes vote down vote up
def write(self, filename):
        with closing(open(filename + '.new', 'w')) as w:
            c = csv.writer(w)
            c.writerow(['bearing_start','bearing_end','bin_start','bin_end','samples','unique'])
            for b_low,b_high,histo in self.values():
                # make sure we write at least one value per sector,
                # it makes things a little easier when plotting
                first = True
                for h_low,h_high,count,unique in histo.values():
                    if unique or first:
                        c.writerow(['%f' % b_low,
                                    '%f' % b_high,
                                    '%f' % h_low,
                                    '%f' % h_high,
                                    '%d' % count,
                                    '%d' % unique])
                        first = False
        os.rename(filename + '.new', filename) 
Example 13
Project: razzy-spinner   Author: rafasashi   File: common.py    License: GNU General Public License v3.0 6 votes vote down vote up
def outf_writer_compat(outfile, encoding, errors, gzip_compress=False):
    """
    Identify appropriate CSV writer given the Python version
    """
    if compat.PY3:
        if gzip_compress:
            outf = gzip.open(outfile, 'wt', encoding=encoding, errors=errors)
        else:
            outf = open(outfile, 'w', encoding=encoding, errors=errors)
        writer = csv.writer(outf)
    else:
        if gzip_compress:
            outf = gzip.open(outfile, 'wb')
        else:
            outf = open(outfile, 'wb')
        writer = compat.UnicodeWriter(outf, encoding=encoding, errors=errors)
    return (writer, outf) 
Example 14
Project: pydfs-lineup-optimizer   Author: DimaKudosh   File: lineup_exporter.py    License: MIT License 6 votes vote down vote up
def export(self, filename, render_func=None):
        if not self.lineups:
            return
        total_players = 0
        with open(filename, 'r') as csvfile:
            lines = list(csv.reader(csvfile))
            for i, lineup in enumerate(self.lineups, start=1):
                if i >= len(lines):
                    lines.append([])
                players_list = [(render_func or self.render_player)(player) for player in lineup.lineup]
                if not total_players:
                    total_players = len(players_list)
                lines[i] = players_list + lines[i][total_players:]
            for line_order in range(i, len(lines) - 1):
                lines[line_order] = [''] * total_players + lines[line_order][total_players:]
        with open(filename, 'w') as csvfile:
            writer = csv.writer(csvfile)
            writer.writerows(lines) 
Example 15
Project: incubator-spot   Author: apache   File: utils.py    License: Apache License 2.0 5 votes vote down vote up
def create_csv_file(cls, full_path_file, content, delimiter=','):
        with open(full_path_file, 'w+') as u_file:
            writer = csv.writer(u_file, quoting=csv.QUOTE_NONE, delimiter=delimiter)
            writer.writerows(content) 
Example 16
Project: vergeml   Author: mme   File: ls.py    License: MIT License 5 votes vote down vote up
def _output_table(output, theader, tdata, left_align):

    if not tdata:
        print("No matching trained models found.", file=sys.stderr)

    if output == 'table':
        if not tdata:
            return
        tdata.insert(0, theader)
        print(DISPLAY.table(tdata, left_align=left_align).getvalue(fit=True))

    elif output == 'json':
        res = []
        for row in tdata:
            res.append(dict(zip(theader, row)))
        print(json.dumps(res))

    elif output == 'csv':
        buffer = io.StringIO()

        writer = csv.writer(buffer)
        writer.writerow(theader)
        for row in tdata:
            writer.writerow(row)
        val = buffer.getvalue()
        val = val.replace('\r', '')
        print(val.strip()) 
Example 17
Project: vergeml   Author: mme   File: env.py    License: MIT License 5 votes vote down vote up
def __init__(self, stats, stats_file):
        self.ks = [k['name'] for k in stats if k['log']]
        self.prev = {}

        if not self.ks:
            return

        self.file = open(stats_file, "w", newline='')
        self.writer = csv.writer(self.file)
        self.writer.writerow(["epoch", "step"] + self.ks) 
Example 18
Project: vergeml   Author: mme   File: env.py    License: MIT License 5 votes vote down vote up
def end(self):
        if not self.ks:
            return

        self.file.flush()
        self.file.close()
        self.writer = None 
Example 19
Project: cgp-cnn   Author: sg-nm   File: cgp.py    License: MIT License 5 votes vote down vote up
def evolution(self, max_eval=100, mutation_rate=0.01, log_file='./log.txt'):
        with open(log_file, 'w') as fw:
            writer = csv.writer(fw, lineterminator='\n')

            eval_flag = np.empty(self.lam)

            self._evaluation([self.pop[0]], np.array([True]))
            print(self._log_data(net_info_type='active_only'))

            while self.num_eval < max_eval:
                self.num_gen += 1

                # reproduction
                for i in range(self.lam):
                    self.pop[i+1].copy(self.pop[0])    # copy a parent
                    eval_flag[i] = self.pop[i+1].mutation(mutation_rate)    # mutation

                # evaluation and selection
                evaluations = self._evaluation(self.pop[1:], eval_flag=eval_flag)
                best_arg = evaluations.argmax()
                if evaluations[best_arg] >= self.pop[0].eval:
                    self.pop[0].copy(self.pop[best_arg+1])

                # display and save log
                if eval_flag.sum() > 0:
                    print(self._log_data(net_info_type='active_only'))
                    writer.writerow(self._log_data(net_info_type='full'))

    # Modified CGP (used for GECCO 2017 paper):
    #   At each iteration:
    #     - Generate lambda individuals in which at least one active node changes (i.e., forced mutation)
    #     - Mutate the best individual with neutral mutation (unchanging the active nodes)
    #         if the best individual is not updated. 
Example 20
Project: cgp-cnn   Author: sg-nm   File: cgp.py    License: MIT License 5 votes vote down vote up
def modified_evolution(self, max_eval=100, mutation_rate=0.01, log_file='./log.txt'):
        with open(log_file, 'w') as fw:
            writer = csv.writer(fw, lineterminator='\n')

            eval_flag = np.empty(self.lam)

            active_num = self.pop[0].count_active_node()
            while active_num < self.pop[0].net_info.min_active_num or active_num > self.pop[0].net_info.max_active_num:
                self.pop[0].mutation(1.0)
                active_num = self.pop[0].count_active_node()
            self._evaluation([self.pop[0]], np.array([True]))
            print(self._log_data(net_info_type='active_only'))

            while self.num_eval < max_eval:
                self.num_gen += 1

                # reproduction
                for i in range(self.lam):
                    eval_flag[i] = False
                    self.pop[i + 1].copy(self.pop[0])  # copy a parent
                    active_num = self.pop[i + 1].count_active_node()

                    # forced mutation
                    while not eval_flag[i] or active_num < self.pop[i + 1].net_info.min_active_num \
                            or active_num > self.pop[i + 1].net_info.max_active_num:
                        self.pop[i + 1].copy(self.pop[0])  # copy a parent
                        eval_flag[i] = self.pop[i + 1].mutation(mutation_rate)  # mutation
                        active_num = self.pop[i + 1].count_active_node()

                # evaluation and selection
                evaluations = self._evaluation(self.pop[1:], eval_flag=eval_flag)
                best_arg = evaluations.argmax()
                if evaluations[best_arg] > self.pop[0].eval:
                    self.pop[0].copy(self.pop[best_arg + 1])
                else:
                    self.pop[0].neutral_mutation(mutation_rate)  # neutral mutation

                # display and save log
                print(self._log_data(net_info_type='active_only'))
                writer.writerow(self._log_data(net_info_type='full')) 
Example 21
Project: L.E.S.M.A   Author: NatanaelAntonioli   File: L.E.S.M.A. - Fabrica de Noobs Speedtest.py    License: Apache License 2.0 5 votes vote down vote up
def csv(self, delimiter=','):
		"""Return data in CSV format"""

		data = self.dict()
		out = StringIO()
		writer = csv.writer(out, delimiter=delimiter, lineterminator='')
		writer.writerow([data['server']['id'], data['server']['sponsor'],
						 data['server']['name'], data['timestamp'],
						 data['server']['d'], data['ping'], data['download'],
						 data['upload']])
		return out.getvalue() 
Example 22
Project: neural-fingerprinting   Author: StephanZheng   File: master.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def save_dict_to_file(filename, dictionary):
  """Saves dictionary as CSV file."""
  with open(filename, 'w') as f:
    writer = csv.writer(f)
    for k, v in iteritems(dictionary):
      writer.writerow([str(k), str(v)]) 
Example 23
Project: twstock   Author: mlouielu   File: fetch.py    License: MIT License 5 votes vote down vote up
def to_csv(url, path):
    data = fetch_data(url)
    with open(path, 'w', newline='', encoding='utf_8') as csvfile:
        writer = csv.writer(csvfile,
                            delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
        writer.writerow(data[0]._fields)
        for d in data:
            writer.writerow([_ for _ in d]) 
Example 24
Project: facebook-discussion-tk   Author: internaut   File: analyze_noun_counts.py    License: MIT License 5 votes vote down vote up
def write_output_to_file(label, sum_counts, append=False):
    print(">> writing output to file '%s'" % output_file)

    fmode = 'a' if append else 'w'
    with open(output_file, fmode + 'b') as f:
        writer = csv.writer(f)
        for noun, count in sum_counts:
            writer.writerow([label.encode('utf-8'), noun.encode('utf-8'), count])

    print(">> done") 
Example 25
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: benchmark.py    License: Apache License 2.0 5 votes vote down vote up
def write_csv(log_loc, args):
    for net in args.networks:
        with open(log_loc + '/' + net.name + '.csv', 'wb') as f:
            w = csv.writer(f)
            w.writerow(['num_gpus', 'img_processed_per_sec'])
            w.writerows(net.gpu_speedup.items()) 
Example 26
Project: comport   Author: codeforamerica   File: models.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_uof_csv(self):
        output = io.StringIO()

        writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC)

        uof_class = getattr(importlib.import_module("comport.data.models"), "UseOfForceIncident{}".format(self.short_name))

        csv_schema = uof_class.get_csv_schema()
        csv_headers = [col[0] for col in csv_schema]
        csv_vars = [col[1] for col in csv_schema]

        writer.writerow(csv_headers)

        use_of_force_incidents = uof_class.query.all()

        for incident in use_of_force_incidents:
            values = []
            for incident_var in csv_vars:
                incident_value = getattr(incident, incident_var)
                if incident_var.endswith("_date"):
                    incident_value = coalesce_date(incident_value)
                values.append(incident_value)

            writer.writerow(values)

        return output.getvalue() 
Example 27
Project: comport   Author: codeforamerica   File: models.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_ois_csv(self):
        output = io.StringIO()

        writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC)

        ois_class = getattr(importlib.import_module("comport.data.models"), "OfficerInvolvedShooting{}".format(self.short_name))

        csv_schema = ois_class.get_csv_schema()
        csv_headers = [col[0] for col in csv_schema]
        csv_vars = [col[1] for col in csv_schema]

        writer.writerow(csv_headers)

        officer_involved_shootings = ois_class.query.all()

        for incident in officer_involved_shootings:
            values = []
            for incident_var in csv_vars:
                incident_value = getattr(incident, incident_var)
                if incident_var.endswith("_date"):
                    incident_value = coalesce_date(incident_value)
                values.append(incident_value)

            writer.writerow(values)

        return output.getvalue() 
Example 28
Project: comport   Author: codeforamerica   File: models.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_complaint_csv(self):
        output = io.StringIO()

        writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC)

        complaint_class = getattr(importlib.import_module("comport.data.models"), "CitizenComplaint{}".format(self.short_name))

        csv_schema = complaint_class.get_csv_schema()
        csv_headers = [col[0] for col in csv_schema]
        csv_vars = [col[1] for col in csv_schema]

        writer.writerow(csv_headers)

        complaints = complaint_class.query.all()

        for complaint in complaints:
            values = []
            for incident_var in csv_vars:
                incident_value = getattr(complaint, incident_var)
                if incident_var.endswith("_date"):
                    incident_value = coalesce_date(incident_value)
                values.append(incident_value)

            writer.writerow(values)

        return output.getvalue() 
Example 29
Project: comport   Author: codeforamerica   File: models.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_pursuits_csv(self):
        output = io.StringIO()

        writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC)

        pursuits_class = getattr(importlib.import_module("comport.data.models"), "Pursuit{}".format(self.short_name))

        csv_schema = pursuits_class.get_csv_schema()
        csv_headers = [col[0] for col in csv_schema]
        csv_vars = [col[1] for col in csv_schema]

        writer.writerow(csv_headers)

        incidents = pursuits_class.query.all()

        for incident in incidents:
            values = []
            for incident_var in csv_vars:
                incident_value = getattr(incident, incident_var)
                if incident_var.endswith("_date"):
                    incident_value = coalesce_date(incident_value)
                values.append(incident_value)

            writer.writerow(values)

        return output.getvalue() 
Example 30
Project: comport   Author: codeforamerica   File: models.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_assaults_csv(self):
        output = io.StringIO()

        writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC)

        assaults_class = getattr(importlib.import_module("comport.data.models"), "AssaultOnOfficer{}".format(self.short_name))

        csv_schema = assaults_class.get_csv_schema()
        csv_headers = [col[0] for col in csv_schema]
        csv_vars = [col[1] for col in csv_schema]

        writer.writerow(csv_headers)

        incidents = assaults_class.query.all()

        for incident in incidents:
            values = []
            for incident_var in csv_vars:
                incident_value = getattr(incident, incident_var)
                if incident_var.endswith("_date"):
                    incident_value = coalesce_date(incident_value)
                values.append(incident_value)

            writer.writerow(values)

        return output.getvalue()