Python numpy.genfromtxt() Examples

The following are 30 code examples for showing how to use numpy.genfromtxt(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: lirpg   Author: Hwhitetooth   File: plot.py    License: MIT License 6 votes vote down vote up
def load_results(file):
    if not os.path.exists(file):
        return None
    with open(file, 'r') as f:
        lines = [line for line in f]
    if len(lines) < 2:
        return None
    keys = [name.strip() for name in lines[0].split(',')]
    data = np.genfromtxt(file, delimiter=',', skip_header=1, filling_values=0.)
    if data.ndim == 1:
        data = data.reshape(1, -1)
    assert data.ndim == 2
    assert data.shape[-1] == len(keys)
    result = {}
    for idx, key in enumerate(keys):
        result[key] = data[:, idx]
    return result 
Example 2
Project: trees   Author: gdanezis   File: malware.py    License: Apache License 2.0 6 votes vote down vote up
def read_data(labelsname, distancename):
    ## Extract labels
    rawlabels = np.genfromtxt(labelsname, delimiter=',', dtype=None)
    labelmap = {}
    row_len = 0
    for row in rawlabels:
        row_len = max(row_len, len(row)-1)
        name = row[0]
        labelmap[name] = list(row)[1:]

    ## Extract distances
    rawdistances = np.genfromtxt(distancename, delimiter=',', dtype=None)
    names = rawdistances[0][1:]
    distances = np.array(rawdistances[1:, 1:], dtype=float)
    labels = np.zeros((len(names), row_len))
    
    for i, name in enumerate(names):
        labels[i, 0:(len(row))] = labelmap[name]

    del labelmap
    return distances, labels, names 
Example 3
Project: HardRLWithYoutube   Author: MaxSobolMark   File: plot.py    License: MIT License 6 votes vote down vote up
def load_results(file):
    if not os.path.exists(file):
        return None
    with open(file, 'r') as f:
        lines = [line for line in f]
    if len(lines) < 2:
        return None
    keys = [name.strip() for name in lines[0].split(',')]
    data = np.genfromtxt(file, delimiter=',', skip_header=1, filling_values=0.)
    if data.ndim == 1:
        data = data.reshape(1, -1)
    assert data.ndim == 2
    assert data.shape[-1] == len(keys)
    result = {}
    for idx, key in enumerate(keys):
        result[key] = data[:, idx]
    return result 
Example 4
Project: jwalk   Author: jwplayer   File: io.py    License: Apache License 2.0 6 votes vote down vote up
def load_edges(fpath, delimiter=None, has_header=False):
    """Load edges in CSV format as numpy ndarray of strings.

    Args:
        fpath (str): edges file
        delimiter (str): alternative argument name for sep (default=None)
        has_header (bool): True if has header row

    Returns:
        np.ndarray: array of edges
    """
    if PANDAS_INSTALLED:
        header = 'infer' if has_header else None
        df = pd.read_csv(fpath, delimiter=delimiter, header=header)
        edges = df.values
    else:
        logger.warning("Pandas not installed. Using numpy to load csv, which "
                       "is slower.")
        header = 1 if has_header else 0
        edges = np.genfromtxt(fpath, delimiter=delimiter, skip_header=header,
                              dtype=object)
    return edges.astype('str') 
Example 5
Project: pypoisson   Author: mmolero   File: ply_from_array.py    License: MIT License 6 votes vote down vote up
def points_normals_from(filename):
    array = np.genfromtxt(filename)
    return array[:,0:3], array[:,3:6] 
Example 6
Project: recruit   Author: Frank-qlu   File: test_io.py    License: Apache License 2.0 6 votes vote down vote up
def test_skip_footer(self):
        data = ["# %i" % i for i in range(1, 6)]
        data.append("A, B, C")
        data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)])
        data[-1] = "99,99"
        kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10)
        test = np.genfromtxt(TextIO("\n".join(data)), **kwargs)
        ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)],
                        dtype=[(_, float) for _ in "ABC"])
        assert_equal(test, ctrl) 
Example 7
Project: recruit   Author: Frank-qlu   File: test_io.py    License: Apache License 2.0 6 votes vote down vote up
def test_skip_footer_with_invalid(self):
        with suppress_warnings() as sup:
            sup.filter(ConversionWarning)
            basestr = '1 1\n2 2\n3 3\n4 4\n5  \n6  \n7  \n'
            # Footer too small to get rid of all invalid values
            assert_raises(ValueError, np.genfromtxt,
                          TextIO(basestr), skip_footer=1)
    #        except ValueError:
    #            pass
            a = np.genfromtxt(
                TextIO(basestr), skip_footer=1, invalid_raise=False)
            assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
            #
            a = np.genfromtxt(TextIO(basestr), skip_footer=3)
            assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
            #
            basestr = '1 1\n2  \n3 3\n4 4\n5  \n6 6\n7 7\n'
            a = np.genfromtxt(
                TextIO(basestr), skip_footer=1, invalid_raise=False)
            assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]]))
            a = np.genfromtxt(
                TextIO(basestr), skip_footer=3, invalid_raise=False)
            assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]])) 
Example 8
Project: recruit   Author: Frank-qlu   File: test_io.py    License: Apache License 2.0 6 votes vote down vote up
def test_dtype_with_object(self):
        # Test using an explicit dtype with an object
        data = """ 1; 2001-01-01
                   2; 2002-01-31 """
        ndtype = [('idx', int), ('code', object)]
        func = lambda s: strptime(s.strip(), "%Y-%m-%d")
        converters = {1: func}
        test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
                             converters=converters)
        control = np.array(
            [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
            dtype=ndtype)
        assert_equal(test, control)

        ndtype = [('nest', [('idx', int), ('code', object)])]
        with assert_raises_regex(NotImplementedError,
                                 'Nested fields.* not supported.*'):
            test = np.genfromtxt(TextIO(data), delimiter=";",
                                 dtype=ndtype, converters=converters) 
Example 9
Project: recruit   Author: Frank-qlu   File: test_io.py    License: Apache License 2.0 6 votes vote down vote up
def test_replace_space(self):
        # Test the 'replace_space' option
        txt = "A.A, B (B), C:C\n1, 2, 3.14"
        # Test default: replace ' ' by '_' and delete non-alphanum chars
        test = np.genfromtxt(TextIO(txt),
                             delimiter=",", names=True, dtype=None)
        ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)]
        ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
        assert_equal(test, ctrl)
        # Test: no replace, no delete
        test = np.genfromtxt(TextIO(txt),
                             delimiter=",", names=True, dtype=None,
                             replace_space='', deletechars='')
        ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)]
        ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
        assert_equal(test, ctrl)
        # Test: no delete (spaces are replaced by _)
        test = np.genfromtxt(TextIO(txt),
                             delimiter=",", names=True, dtype=None,
                             deletechars='')
        ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)]
        ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
        assert_equal(test, ctrl) 
Example 10
Project: recruit   Author: Frank-qlu   File: test_io.py    License: Apache License 2.0 6 votes vote down vote up
def test_replace_space_known_dtype(self):
        # Test the 'replace_space' (and related) options when dtype != None
        txt = "A.A, B (B), C:C\n1, 2, 3"
        # Test default: replace ' ' by '_' and delete non-alphanum chars
        test = np.genfromtxt(TextIO(txt),
                             delimiter=",", names=True, dtype=int)
        ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)]
        ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
        assert_equal(test, ctrl)
        # Test: no replace, no delete
        test = np.genfromtxt(TextIO(txt),
                             delimiter=",", names=True, dtype=int,
                             replace_space='', deletechars='')
        ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)]
        ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
        assert_equal(test, ctrl)
        # Test: no delete (spaces are replaced by _)
        test = np.genfromtxt(TextIO(txt),
                             delimiter=",", names=True, dtype=int,
                             deletechars='')
        ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)]
        ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
        assert_equal(test, ctrl) 
Example 11
Project: recruit   Author: Frank-qlu   File: test_io.py    License: Apache License 2.0 6 votes vote down vote up
def test_names_with_usecols_bug1636(self):
        # Make sure we pick up the right names w/ usecols
        data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4"
        ctrl_names = ("A", "C", "E")
        test = np.genfromtxt(TextIO(data),
                             dtype=(int, int, int), delimiter=",",
                             usecols=(0, 2, 4), names=True)
        assert_equal(test.dtype.names, ctrl_names)
        #
        test = np.genfromtxt(TextIO(data),
                             dtype=(int, int, int), delimiter=",",
                             usecols=("A", "C", "E"), names=True)
        assert_equal(test.dtype.names, ctrl_names)
        #
        test = np.genfromtxt(TextIO(data),
                             dtype=int, delimiter=",",
                             usecols=("A", "C", "E"), names=True)
        assert_equal(test.dtype.names, ctrl_names) 
Example 12
Project: recruit   Author: Frank-qlu   File: test_io.py    License: Apache License 2.0 6 votes vote down vote up
def test_utf8_file(self):
        utf8 = b"\xcf\x96"
        with temppath() as path:
            with open(path, "wb") as f:
                f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2)
            test = np.genfromtxt(path, dtype=None, comments=None,
                                 delimiter=',', encoding="UTF-8")
            ctl = np.array([
                     ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"],
                     ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]],
                     dtype=np.unicode)
            assert_array_equal(test, ctl)

            # test a mixed dtype
            with open(path, "wb") as f:
                f.write(b"0,testNonethe" + utf8)
            test = np.genfromtxt(path, dtype=None, comments=None,
                                 delimiter=',', encoding="UTF-8")
            assert_equal(test['f0'], 0)
            assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8")) 
Example 13
Project: assaytools   Author: choderalab   File: platereader.py    License: GNU Lesser General Public License v2.1 6 votes vote down vote up
def read_emission_spectra_text(filename):
    """
    Read text-formatted emission spectra.

    Parameters
    ----------
    filename : str
       The Tecan Infinite output filen to be read.

    Returns
    -------
    SRC_280 : numpy.array
    SRC_280_x : numpy.array
    SRC_280_x_num : numpy.array

    Examples
    --------

    """

    SRC_280 = np.genfromtxt(filename, dtype='str')
    SRC_280_x = SRC_280[0,:]
    SRC_280_x_num = re.findall(r'\d+', str(SRC_280_x )[1:-1])

    return [SRC_280, SRC_280_x, SRC_280_x_num] 
Example 14
Project: SfmLearner-Pytorch   Author: ClementPinard   File: sequence_folders.py    License: MIT License 6 votes vote down vote up
def crawl_folders(self, sequence_length):
        sequence_set = []
        demi_length = (sequence_length-1)//2
        shifts = list(range(-demi_length, demi_length + 1))
        shifts.pop(demi_length)
        for scene in self.scenes:
            intrinsics = np.genfromtxt(scene/'cam.txt').astype(np.float32).reshape((3, 3))
            imgs = sorted(scene.files('*.jpg'))
            if len(imgs) < sequence_length:
                continue
            for i in range(demi_length, len(imgs)-demi_length):
                sample = {'intrinsics': intrinsics, 'tgt': imgs[i], 'ref_imgs': []}
                for j in shifts:
                    sample['ref_imgs'].append(imgs[i+j])
                sequence_set.append(sample)
        random.shuffle(sequence_set)
        self.samples = sequence_set 
Example 15
Project: SfmLearner-Pytorch   Author: ClementPinard   File: pose_evaluation_utils.py    License: MIT License 6 votes vote down vote up
def read_scene_data(data_root, sequence_set, seq_length=3, step=1):
    data_root = Path(data_root)
    im_sequences = []
    poses_sequences = []
    indices_sequences = []
    demi_length = (seq_length - 1) // 2
    shift_range = np.array([step*i for i in range(-demi_length, demi_length + 1)]).reshape(1, -1)

    sequences = set()
    for seq in sequence_set:
        corresponding_dirs = set((data_root/'sequences').dirs(seq))
        sequences = sequences | corresponding_dirs

    print('getting test metadata for theses sequences : {}'.format(sequences))
    for sequence in tqdm(sequences):
        poses = np.genfromtxt(data_root/'poses'/'{}.txt'.format(sequence.name)).astype(np.float64).reshape(-1, 3, 4)
        imgs = sorted((sequence/'image_2').files('*.png'))
        # construct 5-snippet sequences
        tgt_indices = np.arange(demi_length, len(imgs) - demi_length).reshape(-1, 1)
        snippet_indices = shift_range + tgt_indices
        im_sequences.append(imgs)
        poses_sequences.append(poses)
        indices_sequences.append(snippet_indices)
    return im_sequences, poses_sequences, indices_sequences 
Example 16
Project: SfmLearner-Pytorch   Author: ClementPinard   File: depth_evaluation_utils.py    License: MIT License 6 votes vote down vote up
def get_displacements_from_speed(root, date, scene, indices, tgt_index):
    """get displacement magnitudes by integrating over speed values.
    Might be a good alternative if the GPS is not good enough"""
    if len(indices) == 0:
        return []
    oxts_root = root/date/scene/'oxts'
    with open(oxts_root/'timestamps.txt') as f:
        timestamps = np.array([datetime.datetime.strptime(ts[:-3], "%Y-%m-%d %H:%M:%S.%f").timestamp() for ts in f.read().splitlines()])
    speeds = np.zeros((len(indices), 3))
    for i, index in enumerate(indices):
        oxts_data = np.genfromtxt(oxts_root/'data'/'{:010d}.txt'.format(index))
        speeds[i] = oxts_data[[6,7,10]]
    displacements = np.zeros((len(indices), 3))
    # Perform the integration operation, using trapezoidal method
    for i0, (i1, i2) in enumerate(zip(indices, indices[1:])):
        displacements[i0 + 1] = displacements[i0] + 0.5*(speeds[i0] + speeds[i0 + 1]) * (timestamps[i1] - timestamps[i2])
    # Set the origin of displacements at tgt_index
    displacements -= displacements[tgt_index]
    # Finally, get the displacement magnitude relative to tgt and discard the middle value (which is supposed to be 0)
    displacements_mag = np.linalg.norm(displacements, axis=1)
    return np.concatenate([displacements_mag[:tgt_index], displacements_mag[tgt_index + 1:]]) 
Example 17
Project: rl_graph_generation   Author: bowenliu16   File: plot.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def load_results(file):
    if not os.path.exists(file):
        return None
    with open(file, 'r') as f:
        lines = [line for line in f]
    if len(lines) < 2:
        return None
    keys = [name.strip() for name in lines[0].split(',')]
    data = np.genfromtxt(file, delimiter=',', skip_header=1, filling_values=0.)
    if data.ndim == 1:
        data = data.reshape(1, -1)
    assert data.ndim == 2
    assert data.shape[-1] == len(keys)
    result = {}
    for idx, key in enumerate(keys):
        result[key] = data[:, idx]
    return result 
Example 18
Project: lambda-packs   Author: ryfeus   File: test_io.py    License: MIT License 6 votes vote down vote up
def test_commented_header(self):
        # Check that names can be retrieved even if the line is commented out.
        data = TextIO("""
#gender age weight
M   21  72.100000
F   35  58.330000
M   33  21.99
        """)
        # The # is part of the first name and should be deleted automatically.
        test = np.genfromtxt(data, names=True, dtype=None)
        ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
                        dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
        assert_equal(test, ctrl)
        # Ditto, but we should get rid of the first element
        data = TextIO(b"""
# gender age weight
M   21  72.100000
F   35  58.330000
M   33  21.99
        """)
        test = np.genfromtxt(data, names=True, dtype=None)
        assert_equal(test, ctrl) 
Example 19
Project: lambda-packs   Author: ryfeus   File: test_io.py    License: MIT License 6 votes vote down vote up
def test_dtype_with_object(self):
        # Test using an explicit dtype with an object
        data = """ 1; 2001-01-01
                   2; 2002-01-31 """
        ndtype = [('idx', int), ('code', np.object)]
        func = lambda s: strptime(s.strip(), "%Y-%m-%d")
        converters = {1: func}
        test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
                             converters=converters)
        control = np.array(
            [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
            dtype=ndtype)
        assert_equal(test, control)

        ndtype = [('nest', [('idx', int), ('code', np.object)])]
        try:
            test = np.genfromtxt(TextIO(data), delimiter=";",
                                 dtype=ndtype, converters=converters)
        except NotImplementedError:
            pass
        else:
            errmsg = "Nested dtype involving objects should be supported."
            raise AssertionError(errmsg) 
Example 20
Project: deep-learning-note   Author: wdxtub   File: utils.py    License: MIT License 5 votes vote down vote up
def get_nasa_data():
    data = np.genfromtxt('data/NASA/airfoil_self_noise.dat')
    data = (data - data.mean(axis=0)) / data.std(axis=0)
    return torch.tensor(data[:1500, :-1], dtype=torch.float32), torch.tensor(data[:1500, -1], dtype=torch.float32) 
Example 21
Project: deep-learning-note   Author: wdxtub   File: 39_sgd_raw.py    License: MIT License 5 votes vote down vote up
def get_nasa_data():
    data = np.genfromtxt('data/NASA/airfoil_self_noise.dat')
    data = (data - data.mean(axis=0)) / data.std(axis=0)
    return torch.tensor(data[:1500, :-1], dtype=torch.float32), torch.tensor(data[:1500, -1], dtype=torch.float32) 
Example 22
Project: coco-json-converter   Author: hazirbas   File: generate_coco_json.py    License: GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, datapath, imageres="480p"):
        self.info = {"year" : 2016,
                     "version" : "1.0",
                     "description" : "A Benchmark Dataset and Evaluation Methodology for Video Object Segmentation (DAVIS)",
                     "contributor" : "F. Perazzi, J. Pont-Tuset, B. McWilliams, L. Van Gool, M. Gross, A. Sorkine-Hornung ",
                     "url" : "http://davischallenge.org/",
                     "date_created" : "2016"
                    }
        self.licenses = [{"id": 1,
                          "name": "Attribution-NonCommercial",
                          "url": "http://creativecommons.org/licenses/by-nc-sa/2.0/"
                         }]
        self.type = "instances"
        self.datapath = datapath
        self.seqs = yaml.load(open(os.path.join(self.datapath, "Annotations", "db_info.yml"),
                                   "r")
                             )["sequences"]

        self.categories = [{"id": seqId+1, "name": seq["name"], "supercategory": seq["name"]}
                              for seqId, seq in enumerate(self.seqs)]
        self.cat2id = {cat["name"]: catId+1 for catId, cat in enumerate(self.categories)}

        for s in ["train", "trainval", "val"]:
            imlist = np.genfromtxt( os.path.join(self.datapath, "ImageSets", imageres, s + ".txt"), dtype=str)
            images, annotations = self.__get_image_annotation_pairs__(imlist)
            json_data = {"info" : self.info,
                         "images" : images,
                         "licenses" : self.licenses,
                         "type" : self.type,
                         "annotations" : annotations,
                         "categories" : self.categories}

            with open(os.path.join(self.datapath, "Annotations", imageres + "_" +
                                   s+".json"), "w") as jsonfile:
                json.dump(json_data, jsonfile, sort_keys=True, indent=4) 
Example 23
Project: BMSG-GAN   Author: akanimax   File: generate_loss_plots.py    License: MIT License 5 votes vote down vote up
def read_loss_log(file_name, delimiter='\t'):
    """
    read and load the loss values from a loss.log file
    :param file_name: path of the loss.log file
    :param delimiter: delimiter used to delimit the two columns
    :return: loss_val => numpy array [Iterations x 2]
    """
    from numpy import genfromtxt
    losses = genfromtxt(file_name, delimiter=delimiter)
    return losses 
Example 24
Project: MeanShift_py   Author: mattnedrich   File: mean_shift_runner.py    License: MIT License 5 votes vote down vote up
def load_points(filename):
    data = genfromtxt(filename, delimiter=',')
    return data 
Example 25
def load_digits(dataset_path):
	# Build the dataset and then split it into data and labels
	data = np.genfromtxt(dataset_path, delimiter=",", dtype="uint8")
	target = data[:, 0]
	data = data[:, 1:].reshape(data.shape[0], 28, 28)

	# Return a tuple of the data and targets
	return (data, target) 
Example 26
Project: recruit   Author: Frank-qlu   File: test_io.py    License: Apache License 2.0 5 votes vote down vote up
def test_commented_header(self):
        # Check that names can be retrieved even if the line is commented out.
        data = TextIO("""
#gender age weight
M   21  72.100000
F   35  58.330000
M   33  21.99
        """)
        # The # is part of the first name and should be deleted automatically.
        with warnings.catch_warnings(record=True) as w:
            warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
            test = np.genfromtxt(data, names=True, dtype=None)
            assert_(w[0].category is np.VisibleDeprecationWarning)
        ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
                        dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
        assert_equal(test, ctrl)
        # Ditto, but we should get rid of the first element
        data = TextIO(b"""
# gender age weight
M   21  72.100000
F   35  58.330000
M   33  21.99
        """)
        with warnings.catch_warnings(record=True) as w:
            warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
            test = np.genfromtxt(data, names=True, dtype=None)
            assert_(w[0].category is np.VisibleDeprecationWarning)
        assert_equal(test, ctrl) 
Example 27
Project: recruit   Author: Frank-qlu   File: test_io.py    License: Apache License 2.0 5 votes vote down vote up
def test_names_and_comments_none(self):
        # Tests case when names is true but comments is None (gh-10780)
        data = TextIO('col1 col2\n 1 2\n 3 4')
        test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True)
        control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)])
        assert_equal(test, control) 
Example 28
Project: recruit   Author: Frank-qlu   File: test_io.py    License: Apache License 2.0 5 votes vote down vote up
def test_invalid_converter(self):
        strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or
                                     (b'r' not in x.lower() and x.strip() or 0.0))
        strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or
                                    (b'%' not in x.lower() and x.strip() or 0.0))
        s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n"
                   "L24U05,12/5/2003, 2 %,1,300, 150.5\r\n"
                   "D02N03,10/10/2004,R 1,,7,145.55")
        kwargs = dict(
            converters={2: strip_per, 3: strip_rand}, delimiter=",",
            dtype=None)
        assert_raises(ConverterError, np.genfromtxt, s, **kwargs) 
Example 29
Project: recruit   Author: Frank-qlu   File: test_io.py    License: Apache License 2.0 5 votes vote down vote up
def test_userconverters_with_explicit_dtype(self):
        # Test user_converters w/ explicit (standard) dtype
        data = TextIO('skip,skip,2001-01-01,1.0,skip')
        test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
                             usecols=(2, 3), converters={2: bytes})
        control = np.array([('2001-01-01', 1.)],
                           dtype=[('', '|S10'), ('', float)])
        assert_equal(test, control) 
Example 30
Project: recruit   Author: Frank-qlu   File: test_io.py    License: Apache License 2.0 5 votes vote down vote up
def test_utf8_userconverters_with_explicit_dtype(self):
        utf8 = b'\xcf\x96'
        with temppath() as path:
            with open(path, 'wb') as f:
                f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip')
            test = np.genfromtxt(path, delimiter=",", names=None, dtype=float,
                                 usecols=(2, 3), converters={2: np.unicode},
                                 encoding='UTF-8')
        control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)],
                           dtype=[('', '|U11'), ('', float)])
        assert_equal(test, control)