Python numpy.asarray() Examples

The following are 30 code examples of numpy.asarray(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: input_helpers.py    From deep-siamese-text-similarity with MIT License 6 votes vote down vote up
def batch_iter(self, data, batch_size, num_epochs, shuffle=True):
        """
        Generates a batch iterator for a dataset.
        """
        data = np.asarray(data)
        print(data)
        print(data.shape)
        data_size = len(data)
        num_batches_per_epoch = int(len(data)/batch_size) + 1
        for epoch in range(num_epochs):
            # Shuffle the data at each epoch
            if shuffle:
                shuffle_indices = np.random.permutation(np.arange(data_size))
                shuffled_data = data[shuffle_indices]
            else:
                shuffled_data = data
            for batch_num in range(num_batches_per_epoch):
                start_index = batch_num * batch_size
                end_index = min((batch_num + 1) * batch_size, data_size)
                yield shuffled_data[start_index:end_index] 
Example #2
Source File: theano_backend.py    From Att-ChemdNER with Apache License 2.0 6 votes vote down vote up
def variable(value, dtype=None, name=None):
    '''Instantiates a variable and returns it.

    # Arguments
        value: Numpy array, initial value of the tensor.
        dtype: Tensor type.
        name: Optional name string for the tensor.

    # Returns
        A variable instance (with Keras metadata included).
    '''
    if dtype is None:
        dtype = floatx()
    if hasattr(value, 'tocoo'):
        _assert_sparse_module()
        variable = th_sparse_module.as_sparse_variable(value)
    else:
        value = np.asarray(value, dtype=dtype)
        variable = theano.shared(value=value, name=name, strict=False)
    variable._keras_shape = value.shape
    variable._uses_learning_phase = False
    return variable 
Example #3
Source File: features.py    From vergeml with MIT License 6 votes vote down vote up
def transform(self, sample):
        if not self.model:
            if not self.architecture.startswith("@"):
                self.preprocess_input = get_preprocess_input(self.architecture)
                self.model = get_imagenet_architecture(self.architecture, self.variant, self.image_size, self.alpha, self.output_layer)
            else:
                # TODO get image size!
                self.model = get_custom_architecture(self.architecture, self.trainings_dir, self.output_layer)
                self.preprocess_input = generic_preprocess_input

        x = sample.x
        # TODO better resize
        x = x.convert('RGB')
        x = resize_image(x, self.image_size, self.image_size, 'antialias', 'aspect-fill')
        # x = x.resize((self.image_size, self.image_size))
        x = np.asarray(x)
        x = np.expand_dims(x, axis=0)
        x = self.preprocess_input(x)
        features = self.model.predict(x)
        features = features.flatten()
        sample.x = features
        sample = super().transform(sample)
        return sample 
Example #4
Source File: common.py    From Att-ChemdNER with Apache License 2.0 6 votes vote down vote up
def cast_to_floatx(x):
    '''Cast a Numpy array to the default Keras float type.

    # Arguments
        x: Numpy array.

    # Returns
        The same Numpy array, cast to its new type.

    # Example
    ```python
        >>> from keras import backend as K
        >>> K.floatx()
        'float32'
        >>> arr = numpy.array([1.0, 2.0], dtype='float64')
        >>> arr.dtype
        dtype('float64')
        >>> new_arr = K.cast_to_floatx(arr)
        >>> new_arr
        array([ 1.,  2.], dtype=float32)
        >>> new_arr.dtype
        dtype('float32')
    ```
    '''
    return np.asarray(x, dtype=_FLOATX) 
Example #5
Source File: features.py    From vergeml with MIT License 6 votes vote down vote up
def transform(self, sample):
        if not self.model:
            if not self.architecture.startswith("@"):
                _, self.preprocess_input, self.model = \
                    get_imagenet_architecture(self.architecture, self.variant, self.size, self.alpha, self.output_layer)
            else:
                self.model = get_custom_architecture(self.architecture, self.trainings_dir, self.output_layer)
                self.preprocess_input = generic_preprocess_input

        x = sample.x
        x = x.convert('RGB')
        x = resize_image(x, self.image_size, self.image_size, 'antialias', 'aspect-fill')
        #x = x.resize((self.image_size, self.image_size))
        x = np.asarray(x)
        x = np.expand_dims(x, axis=0)
        x = self.preprocess_input(x)
        features = self.model.predict(x)
        features = features.flatten()
        sample.x = features
        sample.y = None
        return sample 
Example #6
Source File: input_helpers.py    From deep-siamese-text-similarity with MIT License 6 votes vote down vote up
def loadW2V(self,emb_path, type="bin"):
        print("Loading W2V data...")
        num_keys = 0
        if type=="textgz":
            # this seems faster than gensim non-binary load
            for line in gzip.open(emb_path):
                l = line.strip().split()
                st=l[0].lower()
                self.pre_emb[st]=np.asarray(l[1:])
            num_keys=len(self.pre_emb)
        if type=="text":
            # this seems faster than gensim non-binary load
            for line in open(emb_path):
                l = line.strip().split()
                st=l[0].lower()
                self.pre_emb[st]=np.asarray(l[1:])
            num_keys=len(self.pre_emb)
        else:
            self.pre_emb = Word2Vec.load_word2vec_format(emb_path,binary=True)
            self.pre_emb.init_sims(replace=True)
            num_keys=len(self.pre_emb.vocab)
        print("loaded word2vec len ", num_keys)
        gc.collect() 
Example #7
Source File: input_helpers.py    From deep-siamese-text-similarity with MIT License 6 votes vote down vote up
def getTsvData(self, filepath):
        print("Loading training data from "+filepath)
        x1=[]
        x2=[]
        y=[]
        # positive samples from file
        for line in open(filepath):
            l=line.strip().split("\t")
            if len(l)<2:
                continue
            if random() > 0.5:
                x1.append(l[0].lower())
                x2.append(l[1].lower())
            else:
                x1.append(l[1].lower())
                x2.append(l[0].lower())
            y.append(int(l[2]))
        return np.asarray(x1),np.asarray(x2),np.asarray(y) 
Example #8
Source File: dataset_tool.py    From disentangling_conditional_gans with MIT License 6 votes vote down vote up
def create_celeba(tfrecord_dir, celeba_dir, cx=89, cy=121):
    print('Loading CelebA from "%s"' % celeba_dir)
    glob_pattern = os.path.join(celeba_dir, 'img_align_celeba_png', '*.png')
    image_filenames = sorted(glob.glob(glob_pattern))
    expected_images = 202599
    if len(image_filenames) != expected_images:
        error('Expected to find %d images' % expected_images)
    
    with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:
        order = tfr.choose_shuffled_order()
        for idx in range(order.size):
            img = np.asarray(PIL.Image.open(image_filenames[order[idx]]))
            assert img.shape == (218, 178, 3)
            img = img[cy - 64 : cy + 64, cx - 64 : cx + 64]
            img = img.transpose(2, 0, 1) # HWC => CHW
            tfr.add_image(img)

#---------------------------------------------------------------------------- 
Example #9
Source File: dataset_wrappers.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def __init__(self, dataset, oversample_thr):
        self.dataset = dataset
        self.oversample_thr = oversample_thr
        self.CLASSES = dataset.CLASSES

        repeat_factors = self._get_repeat_factors(dataset, oversample_thr)
        repeat_indices = []
        for dataset_index, repeat_factor in enumerate(repeat_factors):
            repeat_indices.extend([dataset_index] * math.ceil(repeat_factor))
        self.repeat_indices = repeat_indices

        flags = []
        if hasattr(self.dataset, 'flag'):
            for flag, repeat_factor in zip(self.dataset.flag, repeat_factors):
                flags.extend([flag] * int(math.ceil(repeat_factor)))
            assert len(flags) == len(repeat_indices)
        self.flag = np.asarray(flags, dtype=np.uint8) 
Example #10
Source File: structures.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def areas(self):
        """Compute areas of masks.

        This func is modified from
        https://github.com/facebookresearch/detectron2/blob/ffff8acc35ea88ad1cb1806ab0f00b4c1c5dbfd9/detectron2/structures/masks.py#L387
        Only works with Polygons, using the shoelace formula

        Return:
            ndarray: areas of each instance
        """  # noqa: W501
        area = []
        for polygons_per_obj in self.masks:
            area_per_obj = 0
            for p in polygons_per_obj:
                area_per_obj += self._polygon_area(p[0::2], p[1::2])
            area.append(area_per_obj)
        return np.asarray(area) 
Example #11
Source File: chainer_alex.py    From mlimages with MIT License 6 votes vote down vote up
def predict(limit):
    _limit = limit if limit > 0 else 5

    td = TrainingData(LABEL_FILE, img_root=IMAGES_ROOT, mean_image_file=MEAN_IMAGE_FILE, image_property=IMAGE_PROP)
    label_def = LabelingMachine.read_label_def(LABEL_DEF_FILE)
    model = alex.Alex(len(label_def))
    serializers.load_npz(MODEL_FILE, model)

    i = 0
    for arr, im in td.generate():
        x = np.ndarray((1,) + arr.shape, arr.dtype)
        x[0] = arr
        x = chainer.Variable(np.asarray(x), volatile="on")
        y = model.predict(x)
        p = np.argmax(y.data)
        print("predict {0}, actual {1}".format(label_def[p], label_def[im.label]))
        im.image.show()
        i += 1
        if i >= _limit:
            break 
Example #12
Source File: parse_result.py    From deep-learning-note with MIT License 6 votes vote down vote up
def build_example(line):
    parts = line.split(' ')
    label = int(parts[0])
    if label > 1:
        label = 1

    indice_list = []
    items = parts[1:]
    for item in items:
        index = int(item.split(':')[0])
        if index >= input_dim:
            continue
        indice_list += [[0, index]]

    value_list = [1 for i in range(len(indice_list))]
    shape_list = [1, input_dim]

    indice_list = numpy.asarray(indice_list)
    value_list = numpy.asarray(value_list)
    shape_list = numpy.asarray(shape_list)
    return indice_list, value_list, shape_list, label


# 一定要放在 with 里,不然 导出的 graph 不带变量和参数 
Example #13
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def color_overlap(color1, *args):
    '''
    color_overlap(color1, color2...) yields the rgba value associated with overlaying color2 on top
      of color1 followed by any additional colors (overlaid left to right). This respects alpha
      values when calculating the results.
    Note that colors may be lists of colors, in which case a matrix of RGBA values is yielded.
    '''
    args = list(args)
    args.insert(0, color1)
    rgba = np.asarray([0.5,0.5,0.5,0])
    for c in args:
        c = to_rgba(c)
        a = c[...,3]
        a0 = rgba[...,3]
        if   np.isclose(a0, 0).all(): rgba = np.ones(rgba.shape) * c
        elif np.isclose(a,  0).all(): continue
        else:                         rgba = times(a, c) + times(1-a, rgba)
    return rgba 
Example #14
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def apply_cmap(zs, cmap, vmin=None, vmax=None, unit=None, logrescale=False):
    '''
    apply_cmap(z, cmap) applies the given cmap to the values in z; if vmin and/or vmax are passed,
      they are used to scale z.

    Note that this function can automatically rescale data into log-space if the colormap is a
    neuropythy log-space colormap such as log_eccentricity. To enable this behaviour use the
    optional argument logrescale=True.
    '''
    zs = pimms.mag(zs) if unit is None else pimms.mag(zs, unit)
    zs = np.asarray(zs, dtype='float')
    if pimms.is_str(cmap): cmap = matplotlib.cm.get_cmap(cmap)
    if logrescale:
        if vmin is None: vmin = np.log(np.nanmin(zs))
        if vmax is None: vmax = np.log(np.nanmax(zs))
        mn = np.exp(vmin)
        u = zdivide(nanlog(zs + mn) - vmin, vmax - vmin, null=np.nan)
    else:        
        if vmin is None: vmin = np.nanmin(zs)
        if vmax is None: vmax = np.nanmax(zs)
        u = zdivide(zs - vmin, vmax - vmin, null=np.nan)
    u[np.isnan(u)] = -np.inf
    return cmap(u) 
Example #15
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def images_from_filemap(fmap):
    '''
    images_from_filemap(fmap) yields a persistent map of MRImages tracked by the given subject with
      the given name and path; in freesurfer subjects these are renamed and converted from their
      typical freesurfer filenames (such as 'ribbon') to forms that conform to the neuropythy naming
      conventions (such as 'gray_mask'). To access data by their original names, use the filemap.
    '''
    imgmap = fmap.data_tree.image
    def img_loader(k): return lambda:imgmap[k]
    imgs = {k:img_loader(k) for k in six.iterkeys(imgmap)}
    def _make_mask(val, eq=True):
        rib = imgmap['ribbon']
        img = np.asarray(rib.dataobj)
        arr = (img == val) if eq else (img != val)
        arr.setflags(write=False)
        return type(rib)(arr, rib.affine, rib.header)
    imgs['lh_gray_mask']  = lambda:_make_mask(3)
    imgs['lh_white_mask'] = lambda:_make_mask(2)
    imgs['rh_gray_mask']  = lambda:_make_mask(42)
    imgs['rh_white_mask'] = lambda:_make_mask(41)
    imgs['brain_mask']    = lambda:_make_mask(0, False)
    # merge in with the typical images
    return pimms.merge(fmap.data_tree.image, pimms.lazy_map(imgs)) 
Example #16
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def image_dimensions(images):
        '''
        sub.image_dimensions is a tuple of the default size of an anatomical image for the given
        subject.
        '''
        if images is None or len(images) == 0: return None
        if pimms.is_lazy_map(images):
            # look for an image that isn't lazy...
            key = next((k for k in images.iterkeys() if not images.is_lazy(k)), None)
            if key is None: key = next(images.iterkeys(), None)
        else:
            key = next(images.iterkeys(), None)
        img = images[key]
        if img is None: return None
        if is_image(img): img = img.dataobj
        return np.asarray(img).shape 
Example #17
Source File: images.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def parse_dataobj(self, dataobj, hdat={}):
        # first, see if we have a specified shape/size
        ish = next((hdat[k] for k in ('image_size', 'image_shape', 'shape') if k in hdat), None)
        if ish is Ellipsis: ish = None
        # make a numpy array of the appropriate dtype
        dtype = self.parse_type(hdat, dataobj=dataobj)
        try:    dataobj = dataobj.dataobj
        except Exception: pass
        if   dataobj is not None: arr = np.asarray(dataobj).astype(dtype)
        elif ish:                 arr = np.zeros(ish,       dtype=dtype)
        else:                     arr = np.zeros([1,1,1,0], dtype=dtype)
        # reshape to the requested shape if need-be
        if ish and ish != arr.shape: arr = np.reshape(arr, ish)
        # then reshape to a valid (4D) shape
        sh = arr.shape
        if   len(sh) == 2: arr = np.reshape(arr, (sh[0], 1, 1, sh[1]))
        elif len(sh) == 1: arr = np.reshape(arr, (sh[0], 1, 1))
        elif len(sh) == 3: arr = np.reshape(arr, sh)
        elif len(sh) != 4: raise ValueError('Cannot convert n-dimensional array to image if n > 4')
        # and return
        return arr 
Example #18
Source File: tracker.py    From kalman_filter_multi_object_tracking with MIT License 5 votes vote down vote up
def __init__(self, prediction, trackIdCount):
        """Initialize variables used by Track class
        Args:
            prediction: predicted centroids of object to be tracked
            trackIdCount: identification of each track object
        Return:
            None
        """
        self.track_id = trackIdCount  # identification of each track object
        self.KF = KalmanFilter()  # KF instance to track this object
        self.prediction = np.asarray(prediction)  # predicted centroids (x,y)
        self.skipped_frames = 0  # number of frames skipped undetected
        self.trace = []  # trace path 
Example #19
Source File: filter.py    From fenics-topopt with MIT License 5 votes vote down vote up
def filter_volume_sensitivities(self, _xPhys, dv, ft):
        if ft == 0:
            pass
        elif ft == 1:
            dv[:] = np.asarray(self.H * (dv[np.newaxis].T / self.Hs))[:, 0] 
Example #20
Source File: xrft.py    From xrft with MIT License 5 votes vote down vote up
def _cross_spectrum(daft1, daft2, dim, N, density):
    cs = (daft1 * np.conj(daft2)).real

    if density:
        cs /= (np.asarray(N).prod())**2
        for i in dim:
            cs /= daft1['freq_' + i + '_spacing']

    return cs 
Example #21
Source File: bio_utils.py    From models with MIT License 5 votes vote down vote up
def sequence_to_int(sequences, max_len):
    if type(sequences) is list:
        seqs_enc = np.asarray([nucleotide_to_int(read, max_len) for read in sequences], 'uint8')
    else:
        seqs_enc = np.asarray([nucleotide_to_int(read, max_len) for read in sequences], 'uint8')
        seqs_enc = list(itertools.chain(*seqs_enc))
        seqs_enc = np.asarray(seqs_enc)

    return seqs_enc 
Example #22
Source File: dataloader.py    From models with MIT License 5 votes vote down vote up
def __init__(self, fasta_file, split_char=' ', id_field=0):

        seq_dict = self.read_fasta(fasta_file, split_char, id_field)
        self.length = len(seq_dict)

        sequences = sorted(seq_dict.items(), key=lambda kv: len(seq_dict[kv[0]]))
        self.identifier, self.seqs = zip(*sequences)
        self.seqs = [np.asarray([seq]) for seq in self.seqs] 
Example #23
Source File: logger.py    From Random-Erasing with Apache License 2.0 5 votes vote down vote up
def plot(self, names=None):   
        names = self.names if names == None else names
        numbers = self.numbers
        for _, name in enumerate(names):
            x = np.arange(len(numbers[name]))
            plt.plot(x, np.asarray(numbers[name]))
        plt.legend([self.title + '(' + name + ')' for name in names])
        plt.grid(True) 
Example #24
Source File: utils.py    From deep-learning-note with MIT License 5 votes vote down vote up
def read_birth_life_data(filename):
    """
    Read in birth_life_2010.txt and return:
    data in the form of NumPy array
    n_samples: number of samples
    """
    text = open(filename, 'r').readlines()[1:]
    data = [line[:-1].split('\t') for line in text]
    births = [float(line[1]) for line in data]
    lifes = [float(line[2]) for line in data]
    data = list(zip(births, lifes))
    n_samples = len(data)
    data = np.asarray(data, dtype=np.float32)
    return data, n_samples 
Example #25
Source File: 18_basic_tfrecord.py    From deep-learning-note with MIT License 5 votes vote down vote up
def get_image_binary(filename):
    image = Image.open(filename)
    image = np.asarray(image, np.uint8)
    shape = np.array(image.shape, np.int32)
    return shape.tobytes(), image.tobytes() 
Example #26
Source File: register_retinotopy.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def _guess_surf_file(fl):
    # MGH/MGZ files
    try: return np.asarray(fsmgh.load(fl).dataobj).flatten()
    except Exception: pass
    # FreeSurfer Curv files
    try: return fsio.read_morph_data(fl)
    except Exception: pass
    # Nifti files
    try: return np.squeeze(nib.load(fl).dataobj)
    except Exception: pass
    raise ValueError('Could not determine filetype for: %s' % fl) 
Example #27
Source File: __init__.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def to_java_ints(m):
    '''
    to_java_ints(m) yields a java array object for the vector or matrix m.
    '''
    global _java
    if _java is None: _init_registration()
    m = np.asarray(m)
    dims = len(m.shape)
    if dims > 2: raise ValueError('1D and 2D arrays supported only')
    bindat = serialize_numpy(m, 'i')
    return (_java.jvm.nben.util.Numpy.int2FromBytes(bindat) if dims == 2
            else _java.jvm.nben.util.Numpy.int1FromBytes(bindat)) 
Example #28
Source File: __init__.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def to_java_array(m):
    '''
    to_java_array(m) yields to_java_ints(m) if m is an array of integers and to_java_doubles(m) if
    m is anything else. The numpy array m is tested via numpy.issubdtype(m.dtype, numpy.int64).
    '''
    if not hasattr(m, '__iter__'): return m
    m = np.asarray(m)
    if np.issubdtype(m.dtype, np.dtype(int).type) or all(isinstance(x, num.Integral) for x in m):
        return to_java_ints(m)
    else:
        return to_java_doubles(m) 
Example #29
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def to_rgba(val):
    '''
    to_rgba(val) is identical to matplotlib.colors.to_rgba(val) except that it operates over lists
      as well as individual elements to yield matrices of rgba values. In addition, it always yields
      numpy vectors or matrices.
    '''
    if pimms.is_npmatrix(val) and val.shape[1] == 4: return val
    try: return np.asarray(matplotlib.colors.to_rgba(val))
    except Exception: return np.asarray([matplotlib.colors.to_rgba(u) for u in val]) 
Example #30
Source File: 1_generate_text.py    From deep-learning-note with MIT License 5 votes vote down vote up
def sample(preds, temperature=1.0):
    # 给定模型预测,采样下一个字符的函数
    preds = np.asarray(preds).astype('float64')
    preds = np.log(preds) / temperature
    exp_preds = np.exp(preds)
    preds = exp_preds / np.sum(exp_preds)
    probas = np.random.multinomial(1, preds, 1)
    return np.argmax(probas)