Python csv.reader() Examples
The following are 30
code examples of csv.reader().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
csv
, or try the search function
.
Example #1
Source File: geoloc.py From incubator-spot with Apache License 2.0 | 7 votes |
def get_ip_geo_localization(self, ip): self._logger.debug("Getting {0} geo localization ".format(ip)) if ip.strip() != "" and ip is not None: result = linecache.getline(self._ip_localization_file, bisect.bisect(self._ip_localization_ranges, Util.ip_to_int(ip))) result.strip('\n') reader = csv.reader([result]) row = reader.next() geo_loc = ";".join(row[4:6]) + " " + ";".join(row[8:9]) domain = row[9:10][0] result = {"geo_loc": geo_loc, "domain": domain} return result
Example #2
Source File: osm_tags.py From labuildings with BSD 3-Clause "New" or "Revised" License | 6 votes |
def csv_to_json(mapping_name, csv_file): reader = csv.reader(csv_file) reader.next() # skip header row mappings[mapping_name] = {} for row in reader: # print row if row[1] != '' and row[2] != '': mappings[mapping_name][row[0]] = { 'key1': row[1], 'val1': row[2] } if len(row) > 4: if row[3] != '' and row[4] != '': mappings[mapping_name][row[0]]['key2'] = row[3]; mappings[mapping_name][row[0]]['val2'] = row[4]; if len(row) > 6: if row[5] != '' and row[6] != '': mappings[mapping_name][row[0]]['key3'] = row[5]; mappings[mapping_name][row[0]]['val3'] = row[6]; # print mappings
Example #3
Source File: Train.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def accumulate_result(validate_lst, prob): sum_result = {} cnt_result = {} size = prob.shape[0] fi = csv.reader(open(validate_lst)) for i in range(size): line = fi.__next__() # Python2: line = fi.next() idx = int(line[0]) if idx not in cnt_result: cnt_result[idx] = 0. sum_result[idx] = np.zeros((1, prob.shape[1])) cnt_result[idx] += 1 sum_result[idx] += prob[i, :] for i in cnt_result.keys(): sum_result[i][:] /= cnt_result[i] return sum_result # In[9]:
Example #4
Source File: parse_geoplanet.py From gazetteer with MIT License | 6 votes |
def parse_geoplanet_places_csv(csv_file): csv_reader = csv.reader(open(csv_file, 'rb'), dialect='excel-tab', quoting=csv.QUOTE_NONE) for row in csv_reader: out_line = ['P', row[0], row[1], row[6], row[7], row[8], row[10], row[18]+" 00:00:00+00", "POINT("+row[5]+" "+row[4]+")" ] print "\t".join(out_line) return csv_file #* WOE_ID 0- primary "place" key #* ISO 1- ISO 3166-1 country/territory code #* State 2- WOEID of admin state #* County 3- WOEID of admin county #* Local_Admin 4- WOEID of local admin #* Country 5- WOEID of country #* Continent 6- WOEID of continent
Example #5
Source File: predictor.py From ernest with Apache License 2.0 | 6 votes |
def __init__(self, training_data_in=[], data_file=None): ''' Initiliaze the Predictor with some training data The training data should be a list of [mcs, input_fraction, time] ''' self.training_data = [] self.training_data.extend(training_data_in) if data_file: with open(data_file, 'rb') as csvfile: reader = csv.reader(csvfile, delimiter=' ') for row in reader: if row[0][0] != '#': parts = row[0].split(',') mc = int(parts[0]) scale = float(parts[1]) time = float(parts[2]) self.training_data.append([mc, scale, time])
Example #6
Source File: test_imagenet_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def load_images(input_dir, metadata_file_path, batch_shape): """Retrieve numpy arrays of images and labels, read from a directory.""" num_images = batch_shape[0] with open(metadata_file_path) as input_file: reader = csv.reader(input_file) header_row = next(reader) rows = list(reader) row_idx_image_id = header_row.index('ImageId') row_idx_true_label = header_row.index('TrueLabel') images = np.zeros(batch_shape) labels = np.zeros(num_images, dtype=np.int32) for idx in xrange(num_images): row = rows[idx] filepath = os.path.join(input_dir, row[row_idx_image_id] + '.png') with tf.gfile.Open(filepath, 'rb') as f: image = np.array( Image.open(f).convert('RGB')).astype(np.float) / 255.0 images[idx, :, :, :] = image labels[idx] = int(row[row_idx_true_label]) return images, labels
Example #7
Source File: report_output_writer.py From aws-ops-automator with Apache License 2.0 | 6 votes |
def csv_to_dict_list(s): if s is None: return None result = [] cols = None try: reader = csv.reader(io.StringIO(s)) cols = next(reader) row = next(reader) while True: result.append({cols[i]: row[i] for i in list(range(0, len(cols)))}) row = next(reader) except StopIteration: if cols is None: return None else: return result # noinspection PyMethodMayBeStatic
Example #8
Source File: imports.py From gazetteer with MIT License | 6 votes |
def import_gazetteer(f, limit): t = csv.reader(f, delimiter="\t") i = 0 for row in t: ft = Feature() if Feature.objects.filter(url=row[0]).count() > 0: print "duplicate row " + row[0] else: ft.url = row[0] ft.preferred_name = row[1] try: fcode = FeatureType.objects.get(code=row[2]) except: fcode = None ft.feature_type = fcode ft.admin1 = row[4] ft.admin2 = row[5] ft.geometry = Point(float(row[7]), float(row[6])) ft.save() print "saved " + ft.preferred_name i += 1 if i > limit: break
Example #9
Source File: info.py From xalpha with MIT License | 6 votes |
def _basic_init(self): raw = rget(self._url) cr = csv.reader(raw.text.splitlines(), delimiter=",") my_list = list(cr) factor = float(my_list[-1][3]) dd = { "date": [ dt.datetime.strptime(my_list[i + 1][0], "%Y-%m-%d") for i in range(len(my_list) - 1) ], "netvalue": [ float(my_list[i + 1][3]) / factor for i in range(len(my_list) - 1) ], "totvalue": [float(my_list[i + 1][3]) for i in range(len(my_list) - 1)], "comment": [0 for _ in range(len(my_list) - 1)], } index = pd.DataFrame(data=dd) index = index.iloc[::-1] index = index.reset_index(drop=True) self.price = index[index["date"].isin(opendate)] self.price = self.price[self.price["date"] <= yesterdaydash()] self.name = my_list[-1][2]
Example #10
Source File: run_attacks_and_defenses.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __init__(self, filename): """Initializes instance of DatasetMetadata.""" self._true_labels = {} self._target_classes = {} with open(filename) as f: reader = csv.reader(f) header_row = next(reader) try: row_idx_image_id = header_row.index('ImageId') row_idx_true_label = header_row.index('TrueLabel') row_idx_target_class = header_row.index('TargetClass') except ValueError: raise IOError('Invalid format of dataset metadata.') for row in reader: if len(row) < len(header_row): # skip partial or empty lines continue try: image_id = row[row_idx_image_id] self._true_labels[image_id] = int(row[row_idx_true_label]) self._target_classes[image_id] = int(row[row_idx_target_class]) except (IndexError, ValueError): raise IOError('Invalid format of dataset metadata')
Example #11
Source File: graphs.py From DOTA_models with Apache License 2.0 | 6 votes |
def _get_vocab_freqs(): """Returns vocab frequencies. Returns: List of integers, length=FLAGS.vocab_size. Raises: ValueError: if the length of the frequency file is not equal to the vocab size, or if the file is not found. """ path = FLAGS.vocab_freq_path or os.path.join(FLAGS.data_dir, 'vocab_freq.txt') if tf.gfile.Exists(path): with tf.gfile.Open(path) as f: # Get pre-calculated frequencies of words. reader = csv.reader(f, quoting=csv.QUOTE_NONE) freqs = [int(row[-1]) for row in reader] if len(freqs) != FLAGS.vocab_size: raise ValueError('Frequency file length %d != vocab size %d' % (len(freqs), FLAGS.vocab_size)) else: if FLAGS.vocab_freq_path: raise ValueError('vocab_freq_path not found') freqs = [1] * FLAGS.vocab_size return freqs
Example #12
Source File: get_metadata.py From InsightAgent with Apache License 2.0 | 6 votes |
def parse_topology_file(self): topology_list = [] if os.path.isfile(self.file_path): with open(self.file_path) as topology_file: topology_file_csv = csv.reader(topology_file) for row in topology_file_csv: map_size = len(bytearray(json.dumps(topology_list))) if map_size >= BYTES_PER_FLUSH: self._send_data(topology_list) topology_list = [] if topology_file_csv.line_num == 1: continue key = "" for index in xrange(len(row)): if index == 0: key = row[index] continue value1 = str(key) + "@@@@" + str(row[index]) value2 = str(row[index]) + "@@@@" + str(key) if value1 not in topology_list: topology_list.append(value1) if value2 not in topology_list: topology_list.append(value2) self._send_data(topology_list)
Example #13
Source File: prepare_dataset.py From ICDAR-2019-SROIE with MIT License | 6 votes |
def get_data(): filenames = [os.path.splitext(f)[0] for f in glob.glob("original/*.jpg")] jpg_files = [s + ".jpg" for s in filenames] txt_files = [s + ".txt" for s in filenames] for file in txt_files: boxes = [] with open(file, "r", encoding="utf-8", newline="") as lines: for line in csv.reader(lines): boxes.append([line[0], line[1], line[6], line[7]]) with open('mlt/label/' + file.split('/')[1], "w+") as labelFile: wr = csv.writer(labelFile) wr.writerows(boxes) for jpg in jpg_files: shutil.copy(jpg, 'mlt/image/')
Example #14
Source File: __init__.py From vergeml with MIT License | 6 votes |
def load_predictions(env, nclasses): path = os.path.join(env.stats_dir(), "predictions.csv") if not os.path.exists(path): raise FileExistsError(path) with open(path, newline='') as csvfile: y_score = [] y_test = [] csv_reader = csv.reader(csvfile, dialect="excel") for row in csv_reader: assert len(row) == nclasses * 2 y_score.append(list(map(float, row[:nclasses]))) y_test.append(list(map(float, row[nclasses:]))) y_score = np.array(y_score) y_test = np.array(y_test) return y_test, y_score
Example #15
Source File: test_Export.py From URS with MIT License | 6 votes |
def test_write_csv(self): filename = os.path.join(sys.path[0], "test_csv_writing.csv") overview = { "this": [1, 2], "is": [3, 4], "a": [5, 6], "test": [7, 8]} Export.Export._write_csv(filename, overview) with open(filename, "r") as test_csv: reader = csv.reader(test_csv) test_dict = dict((header, []) for header in next(reader)) for row in reader: for row_index, key in enumerate(test_dict.keys()): test_dict[key].append(int(row[row_index])) assert test_dict == overview os.remove(filename)
Example #16
Source File: main.py From ICDAR-2019-SROIE with MIT License | 6 votes |
def process_txt(): filenames = [os.path.splitext(f)[0] for f in glob.glob("test_result/*.txt")] old_files = [s + ".txt" for s in filenames] for old_file in old_files: new = [] with open(old_file, "r") as old: for line in csv.reader(old): if not line: continue if not line[0]: continue if line[0][0] == ' ' or line[0][-1] == ' ': line[0] = line[0].strip() if ' ' in line[0]: line = line[0].split(' ') new.append(line) with open('task2_result/' + old_file.split('/')[1], "w+") as newfile: wr = csv.writer(newfile, delimiter = '\n') new = [[s[0].upper()] for s in new] wr.writerows(new)
Example #17
Source File: main.py From ICDAR-2019-SROIE with MIT License | 6 votes |
def for_task3(): filenames = [os.path.splitext(f)[0] for f in glob.glob("boundingbox/*.txt")] box_files = [s + ".txt" for s in filenames] for boxfile in box_files: box = [] with open(boxfile,'r') as boxes: for line in csv.reader(boxes): box.append([int(string, 10) for string in line[0:8]]) words = [] with open('test_result/'+ boxfile.split('/')[1], 'r') as prediction: for line in csv.reader(prediction): words.append(line) words = [s if len(s)!=0 else [' '] for s in words] new = [] for line in zip(box,words): a,b = line new.append(a+b) with open('for_task3/'+ boxfile.split('/')[1], 'w+') as newfile: csv_out = csv.writer(newfile) for line in new: csv_out.writerow(line)
Example #18
Source File: main.py From ICDAR-2019-SROIE with MIT License | 6 votes |
def draw(): filenames = [os.path.splitext(f)[0] for f in glob.glob("for_task3/*.txt")] txt_files = [s + ".txt" for s in filenames] for txt in txt_files: image = cv2.imread('test_original/'+ txt.split('/')[1].split('.')[0]+'.jpg', cv2.IMREAD_COLOR) with open(txt, 'r') as txt_file: for line in csv.reader(txt_file): box = [int(string, 10) for string in line[0:8]] if len(line) < 9: print(txt) cv2.rectangle(image, (box[0], box[1]), (box[4], box[5]), (0,255,0), 2) font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(image, line[8].upper(), (box[0],box[1]), font, 0.5, (0, 0, 255), 1, cv2.LINE_AA) cv2.imwrite('task2_result_draw/'+ txt.split('/')[1].split('.')[0]+'.jpg', image)
Example #19
Source File: common_voice.py From fine-lm with MIT License | 6 votes |
def _collect_data(directory): """Traverses directory collecting input and target files. Args: directory: base path to extracted audio and transcripts. Returns: list of (media_base, media_filepath, label) tuples """ # Returns: data_files = [] transcripts = [ filename for filename in os.listdir(directory) if filename.endswith(".csv") ] for transcript in transcripts: transcript_path = os.path.join(directory, transcript) with open(transcript_path, "r") as transcript_file: transcript_reader = csv.reader(transcript_file) _ = transcript_reader.next() # Skip headers. for transcript_line in transcript_reader: media_name, label = transcript_line[0:2] filename = os.path.join(directory, media_name) data_files.append((media_name, filename, label)) return data_files
Example #20
Source File: barter_model.py From indras_net with GNU General Public License v3.0 | 6 votes |
def fetch_agents_from_file(self, filenm, agent_type): """ Read in a list of bartering agents from a csv file """ max_detect = self.props.get("max_detect", ebm.GLOBAL_KNOWLEDGE) with open(filenm) as f: reader = csv.reader(f) for row in reader: agent = agent_type(row[0], max_detect=max_detect) self.add_agent(agent) for i in range(1, len(row) - 2, STEP): good = row[i] self.market.add_good(good) agent.endow(good, int(row[i + 1]), eval("lambda qty: " + row[i + 2])) logging.info("Goods = " + str(self.market))
Example #21
Source File: plot_threshold_vs_success.py From pointnet-registration-framework with MIT License | 6 votes |
def read_csv(folder_name): data = [] # Each folder having results contain test.csv file with all the log. # Read all data from the csv file. with open(os.path.join(folder_name, 'test.csv')) as csvfile: csvreader = csv.reader(csvfile) for row in csvreader: row = [float(x) for x in row] data.append(row) rot_err, trans_err = [], [] # Log stored is as per following sequence in csv files: # Sr. No. [0], time taken [1], number of iterations [2], translation error [3], rotation error [4]. if folder_name[5:9]=='PNLK': for data_i in data: rot_err.append(data_i[2]) trans_err.append(data_i[1]) else: for data_i in data: rot_err.append(data_i[4]) trans_err.append(data_i[3]) return rot_err, trans_err # It will count the total number of test cases having rotation error below certain threshold.
Example #22
Source File: CreateWeightTableFromWRFGeogrid.py From python-toolbox-for-rapid with Apache License 2.0 | 5 votes |
def csvToList(self, csv_file, delimiter=','): """ Reads in a CSV file and returns the contents as list, where every row is stored as a sublist, and each element in the sublist represents 1 cell in the table. """ with open(csv_file, 'rb') as csv_con: reader = csv.reader(csv_con, delimiter=delimiter) return list(reader)
Example #23
Source File: prepare.py From DeepLung with GNU General Public License v3.0 | 5 votes |
def splitvaltestcsv(): testfiles = [] for f in os.listdir(config['test_data_path']): if f.endswith('.mhd'): testfiles.append(f[:-4]) valcsvlines = [] testcsvlines = [] import csv valf = open(config['val_annos_path'], 'r') valfcsv = csv.reader(valf) for line in valfcsv: if line[0] in testfiles: testcsvlines.append(line) else: valcsvlines.append(line) valf.close() testf = open(config['test_annos_path']+'annotations.csv', 'w') testfcsv = csv.writer(testf) for line in testcsvlines: testfcsv.writerow(line) testf.close() valf = open(config['val_annos_path'], 'w') valfcsv = csv.writer(valf) for line in valcsvlines: valfcsv.writerow(line) valf.close()
Example #24
Source File: category_util.py From object_detector_app with MIT License | 5 votes |
def load_categories_from_csv_file(csv_path): """Loads categories from a csv file. The CSV file should have one comma delimited numeric category id and string category name pair per line. For example: 0,"cat" 1,"dog" 2,"bird" ... Args: csv_path: Path to the csv file to be parsed into categories. Returns: categories: A list of dictionaries representing all possible categories. The categories will contain an integer 'id' field and a string 'name' field. Raises: ValueError: If the csv file is incorrectly formatted. """ categories = [] with tf.gfile.Open(csv_path, 'r') as csvfile: reader = csv.reader(csvfile, delimiter=',', quotechar='"') for row in reader: if not row: continue if len(row) != 2: raise ValueError('Expected 2 fields per row in csv: %s' % ','.join(row)) category_id = int(row[0]) category_name = row[1] categories.append({'id': category_id, 'name': category_name}) return categories
Example #25
Source File: test_recorders.py From pywr with GNU General Public License v3.0 | 5 votes |
def test_loading_csv_recorder_from_json(tmpdir): """ Test the CSV Recorder which is loaded from json """ filename = 'csv_recorder.json' # This is a bit horrible, but need to edit the JSON dynamically # so that the output.h5 is written in the temporary directory path = os.path.join(os.path.dirname(__file__), 'models') with open(os.path.join(path, filename), 'r') as f: data = f.read() data = json.loads(data) # Make an absolute, but temporary, path for the recorder url = data['recorders']['model_out']['url'] data['recorders']['model_out']['url'] = str(tmpdir.join(url)) model = Model.load(data, path=path) csvfile = tmpdir.join('output.csv') model.run() periods = model.timestepper.datetime_index import csv with open(str(csvfile), 'r') as fh: dialect = csv.Sniffer().sniff(fh.read(1024)) fh.seek(0) reader = csv.reader(fh, dialect) for irow, row in enumerate(reader): if irow == 0: expected = ['Datetime', 'inpt', 'otpt'] actual = row else: dt = periods[irow-1].to_timestamp() expected = [dt.isoformat()] actual = [row[0]] assert np.all((np.array([float(v) for v in row[1:]]) - 10.0) < 1e-12) assert expected == actual
Example #26
Source File: UpdateWeightTable.py From python-toolbox-for-rapid with Apache License 2.0 | 5 votes |
def csv_to_list(self, csv_file, delimiter=','): """ Reads in a CSV file and returns the contents as list, where every row is stored as a sublist, and each element in the sublist represents 1 cell in the table. """ with open(csv_file, 'rb') as csv_con: reader = csv.reader(csv_con, delimiter=delimiter) return list(reader)
Example #27
Source File: CreateWeightTableFromECMWFRunoff.py From python-toolbox-for-rapid with Apache License 2.0 | 5 votes |
def csvToList(self, csv_file, delimiter=','): """ Reads in a CSV file and returns the contents as list, where every row is stored as a sublist, and each element in the sublist represents 1 cell in the table. """ with open(csv_file, 'rb') as csv_con: reader = csv.reader(csv_con, delimiter=delimiter) return list(reader)
Example #28
Source File: parse_geoplanet.py From gazetteer with MIT License | 5 votes |
def parse_geoplanet_admins_csv(csv_file): csv_reader = csv.reader(open(csv_file, 'rb'), dialect='excel-tab', quoting=csv.QUOTE_NONE) for row in csv_reader: out_line = ['Q', row[0], row[1], row[6], row[7], row[8], row[10], row[18]+" 00:00:00+00", "POINT("+row[5]+" "+row[4]+")" ] print "\t".join(out_line) return csv_file #* WOE_ID - primary "place" key #* Name - alternative place name #* Name_Type - mnemonic describing name #* Language - ISO 639-2(b) language code
Example #29
Source File: test.py From SVM-w-SMO with MIT License | 5 votes |
def readData(filename, header=True): data, header = [], None with open(filename, 'rb') as csvfile: spamreader = csv.reader(csvfile, delimiter=',') if header: header = spamreader.next() for row in spamreader: data.append(row) return (np.array(data), np.array(header))
Example #30
Source File: kitti.py From vod-converter with MIT License | 5 votes |
def _get_detections(self, detections_fpath): detections = [] with open(detections_fpath) as f: f_csv = csv.reader(f, delimiter=' ') for row in f_csv: x1, y1, x2, y2 = map(float, row[4:8]) label = row[0] detections.append({ 'label': label, 'left': x1, 'right': x2, 'top': y1, 'bottom': y2 }) return detections