Python numpy.asarray() Examples

The following are 60 code examples for showing how to use numpy.asarray(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

You may also check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: vergeml   Author: mme   File: features.py    License: MIT License 6 votes vote down vote up
def transform(self, sample):
        if not self.model:
            if not self.architecture.startswith("@"):
                _, self.preprocess_input, self.model = \
                    get_imagenet_architecture(self.architecture, self.variant, self.size, self.alpha, self.output_layer)
            else:
                self.model = get_custom_architecture(self.architecture, self.trainings_dir, self.output_layer)
                self.preprocess_input = generic_preprocess_input

        x = sample.x
        x = x.convert('RGB')
        x = resize_image(x, self.image_size, self.image_size, 'antialias', 'aspect-fill')
        #x = x.resize((self.image_size, self.image_size))
        x = np.asarray(x)
        x = np.expand_dims(x, axis=0)
        x = self.preprocess_input(x)
        features = self.model.predict(x)
        features = features.flatten()
        sample.x = features
        sample.y = None
        return sample 
Example 2
Project: vergeml   Author: mme   File: features.py    License: MIT License 6 votes vote down vote up
def transform(self, sample):
        if not self.model:
            if not self.architecture.startswith("@"):
                self.preprocess_input = get_preprocess_input(self.architecture)
                self.model = get_imagenet_architecture(self.architecture, self.variant, self.image_size, self.alpha, self.output_layer)
            else:
                # TODO get image size!
                self.model = get_custom_architecture(self.architecture, self.trainings_dir, self.output_layer)
                self.preprocess_input = generic_preprocess_input

        x = sample.x
        # TODO better resize
        x = x.convert('RGB')
        x = resize_image(x, self.image_size, self.image_size, 'antialias', 'aspect-fill')
        # x = x.resize((self.image_size, self.image_size))
        x = np.asarray(x)
        x = np.expand_dims(x, axis=0)
        x = self.preprocess_input(x)
        features = self.model.predict(x)
        features = features.flatten()
        sample.x = features
        sample = super().transform(sample)
        return sample 
Example 3
Project: Att-ChemdNER   Author: lingluodlut   File: theano_backend.py    License: Apache License 2.0 6 votes vote down vote up
def variable(value, dtype=None, name=None):
    '''Instantiates a variable and returns it.

    # Arguments
        value: Numpy array, initial value of the tensor.
        dtype: Tensor type.
        name: Optional name string for the tensor.

    # Returns
        A variable instance (with Keras metadata included).
    '''
    if dtype is None:
        dtype = floatx()
    if hasattr(value, 'tocoo'):
        _assert_sparse_module()
        variable = th_sparse_module.as_sparse_variable(value)
    else:
        value = np.asarray(value, dtype=dtype)
        variable = theano.shared(value=value, name=name, strict=False)
    variable._keras_shape = value.shape
    variable._uses_learning_phase = False
    return variable 
Example 4
Project: Att-ChemdNER   Author: lingluodlut   File: common.py    License: Apache License 2.0 6 votes vote down vote up
def cast_to_floatx(x):
    '''Cast a Numpy array to the default Keras float type.

    # Arguments
        x: Numpy array.

    # Returns
        The same Numpy array, cast to its new type.

    # Example
    ```python
        >>> from keras import backend as K
        >>> K.floatx()
        'float32'
        >>> arr = numpy.array([1.0, 2.0], dtype='float64')
        >>> arr.dtype
        dtype('float64')
        >>> new_arr = K.cast_to_floatx(arr)
        >>> new_arr
        array([ 1.,  2.], dtype=float32)
        >>> new_arr.dtype
        dtype('float32')
    ```
    '''
    return np.asarray(x, dtype=_FLOATX) 
Example 5
Project: deep-siamese-text-similarity   Author: dhwajraj   File: input_helpers.py    License: MIT License 6 votes vote down vote up
def loadW2V(self,emb_path, type="bin"):
        print("Loading W2V data...")
        num_keys = 0
        if type=="textgz":
            # this seems faster than gensim non-binary load
            for line in gzip.open(emb_path):
                l = line.strip().split()
                st=l[0].lower()
                self.pre_emb[st]=np.asarray(l[1:])
            num_keys=len(self.pre_emb)
        if type=="text":
            # this seems faster than gensim non-binary load
            for line in open(emb_path):
                l = line.strip().split()
                st=l[0].lower()
                self.pre_emb[st]=np.asarray(l[1:])
            num_keys=len(self.pre_emb)
        else:
            self.pre_emb = Word2Vec.load_word2vec_format(emb_path,binary=True)
            self.pre_emb.init_sims(replace=True)
            num_keys=len(self.pre_emb.vocab)
        print("loaded word2vec len ", num_keys)
        gc.collect() 
Example 6
Project: deep-siamese-text-similarity   Author: dhwajraj   File: input_helpers.py    License: MIT License 6 votes vote down vote up
def getTsvData(self, filepath):
        print("Loading training data from "+filepath)
        x1=[]
        x2=[]
        y=[]
        # positive samples from file
        for line in open(filepath):
            l=line.strip().split("\t")
            if len(l)<2:
                continue
            if random() > 0.5:
                x1.append(l[0].lower())
                x2.append(l[1].lower())
            else:
                x1.append(l[1].lower())
                x2.append(l[0].lower())
            y.append(int(l[2]))
        return np.asarray(x1),np.asarray(x2),np.asarray(y) 
Example 7
Project: deep-siamese-text-similarity   Author: dhwajraj   File: input_helpers.py    License: MIT License 6 votes vote down vote up
def batch_iter(self, data, batch_size, num_epochs, shuffle=True):
        """
        Generates a batch iterator for a dataset.
        """
        data = np.asarray(data)
        print(data)
        print(data.shape)
        data_size = len(data)
        num_batches_per_epoch = int(len(data)/batch_size) + 1
        for epoch in range(num_epochs):
            # Shuffle the data at each epoch
            if shuffle:
                shuffle_indices = np.random.permutation(np.arange(data_size))
                shuffled_data = data[shuffle_indices]
            else:
                shuffled_data = data
            for batch_num in range(num_batches_per_epoch):
                start_index = batch_num * batch_size
                end_index = min((batch_num + 1) * batch_size, data_size)
                yield shuffled_data[start_index:end_index] 
Example 8
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset_tool.py    License: MIT License 6 votes vote down vote up
def create_celeba(tfrecord_dir, celeba_dir, cx=89, cy=121):
    print('Loading CelebA from "%s"' % celeba_dir)
    glob_pattern = os.path.join(celeba_dir, 'img_align_celeba_png', '*.png')
    image_filenames = sorted(glob.glob(glob_pattern))
    expected_images = 202599
    if len(image_filenames) != expected_images:
        error('Expected to find %d images' % expected_images)
    
    with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:
        order = tfr.choose_shuffled_order()
        for idx in range(order.size):
            img = np.asarray(PIL.Image.open(image_filenames[order[idx]]))
            assert img.shape == (218, 178, 3)
            img = img[cy - 64 : cy + 64, cx - 64 : cx + 64]
            img = img.transpose(2, 0, 1) # HWC => CHW
            tfr.add_image(img)

#---------------------------------------------------------------------------- 
Example 9
Project: mmdetection   Author: open-mmlab   File: dataset_wrappers.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(self, dataset, oversample_thr):
        self.dataset = dataset
        self.oversample_thr = oversample_thr
        self.CLASSES = dataset.CLASSES

        repeat_factors = self._get_repeat_factors(dataset, oversample_thr)
        repeat_indices = []
        for dataset_index, repeat_factor in enumerate(repeat_factors):
            repeat_indices.extend([dataset_index] * math.ceil(repeat_factor))
        self.repeat_indices = repeat_indices

        flags = []
        if hasattr(self.dataset, 'flag'):
            for flag, repeat_factor in zip(self.dataset.flag, repeat_factors):
                flags.extend([flag] * int(math.ceil(repeat_factor)))
            assert len(flags) == len(repeat_indices)
        self.flag = np.asarray(flags, dtype=np.uint8) 
Example 10
Project: mmdetection   Author: open-mmlab   File: structures.py    License: Apache License 2.0 6 votes vote down vote up
def areas(self):
        """Compute areas of masks.

        This func is modified from
        https://github.com/facebookresearch/detectron2/blob/ffff8acc35ea88ad1cb1806ab0f00b4c1c5dbfd9/detectron2/structures/masks.py#L387
        Only works with Polygons, using the shoelace formula

        Return:
            ndarray: areas of each instance
        """  # noqa: W501
        area = []
        for polygons_per_obj in self.masks:
            area_per_obj = 0
            for p in polygons_per_obj:
                area_per_obj += self._polygon_area(p[0::2], p[1::2])
            area.append(area_per_obj)
        return np.asarray(area) 
Example 11
Project: mlimages   Author: icoxfog417   File: chainer_alex.py    License: MIT License 6 votes vote down vote up
def predict(limit):
    _limit = limit if limit > 0 else 5

    td = TrainingData(LABEL_FILE, img_root=IMAGES_ROOT, mean_image_file=MEAN_IMAGE_FILE, image_property=IMAGE_PROP)
    label_def = LabelingMachine.read_label_def(LABEL_DEF_FILE)
    model = alex.Alex(len(label_def))
    serializers.load_npz(MODEL_FILE, model)

    i = 0
    for arr, im in td.generate():
        x = np.ndarray((1,) + arr.shape, arr.dtype)
        x[0] = arr
        x = chainer.Variable(np.asarray(x), volatile="on")
        y = model.predict(x)
        p = np.argmax(y.data)
        print("predict {0}, actual {1}".format(label_def[p], label_def[im.label]))
        im.image.show()
        i += 1
        if i >= _limit:
            break 
Example 12
Project: deep-learning-note   Author: wdxtub   File: parse_result.py    License: MIT License 6 votes vote down vote up
def build_example(line):
    parts = line.split(' ')
    label = int(parts[0])
    if label > 1:
        label = 1

    indice_list = []
    items = parts[1:]
    for item in items:
        index = int(item.split(':')[0])
        if index >= input_dim:
            continue
        indice_list += [[0, index]]

    value_list = [1 for i in range(len(indice_list))]
    shape_list = [1, input_dim]

    indice_list = numpy.asarray(indice_list)
    value_list = numpy.asarray(value_list)
    shape_list = numpy.asarray(shape_list)
    return indice_list, value_list, shape_list, label


# 一定要放在 with 里,不然 导出的 graph 不带变量和参数 
Example 13
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def color_overlap(color1, *args):
    '''
    color_overlap(color1, color2...) yields the rgba value associated with overlaying color2 on top
      of color1 followed by any additional colors (overlaid left to right). This respects alpha
      values when calculating the results.
    Note that colors may be lists of colors, in which case a matrix of RGBA values is yielded.
    '''
    args = list(args)
    args.insert(0, color1)
    rgba = np.asarray([0.5,0.5,0.5,0])
    for c in args:
        c = to_rgba(c)
        a = c[...,3]
        a0 = rgba[...,3]
        if   np.isclose(a0, 0).all(): rgba = np.ones(rgba.shape) * c
        elif np.isclose(a,  0).all(): continue
        else:                         rgba = times(a, c) + times(1-a, rgba)
    return rgba 
Example 14
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def apply_cmap(zs, cmap, vmin=None, vmax=None, unit=None, logrescale=False):
    '''
    apply_cmap(z, cmap) applies the given cmap to the values in z; if vmin and/or vmax are passed,
      they are used to scale z.

    Note that this function can automatically rescale data into log-space if the colormap is a
    neuropythy log-space colormap such as log_eccentricity. To enable this behaviour use the
    optional argument logrescale=True.
    '''
    zs = pimms.mag(zs) if unit is None else pimms.mag(zs, unit)
    zs = np.asarray(zs, dtype='float')
    if pimms.is_str(cmap): cmap = matplotlib.cm.get_cmap(cmap)
    if logrescale:
        if vmin is None: vmin = np.log(np.nanmin(zs))
        if vmax is None: vmax = np.log(np.nanmax(zs))
        mn = np.exp(vmin)
        u = zdivide(nanlog(zs + mn) - vmin, vmax - vmin, null=np.nan)
    else:        
        if vmin is None: vmin = np.nanmin(zs)
        if vmax is None: vmax = np.nanmax(zs)
        u = zdivide(zs - vmin, vmax - vmin, null=np.nan)
    u[np.isnan(u)] = -np.inf
    return cmap(u) 
Example 15
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def images_from_filemap(fmap):
    '''
    images_from_filemap(fmap) yields a persistent map of MRImages tracked by the given subject with
      the given name and path; in freesurfer subjects these are renamed and converted from their
      typical freesurfer filenames (such as 'ribbon') to forms that conform to the neuropythy naming
      conventions (such as 'gray_mask'). To access data by their original names, use the filemap.
    '''
    imgmap = fmap.data_tree.image
    def img_loader(k): return lambda:imgmap[k]
    imgs = {k:img_loader(k) for k in six.iterkeys(imgmap)}
    def _make_mask(val, eq=True):
        rib = imgmap['ribbon']
        img = np.asarray(rib.dataobj)
        arr = (img == val) if eq else (img != val)
        arr.setflags(write=False)
        return type(rib)(arr, rib.affine, rib.header)
    imgs['lh_gray_mask']  = lambda:_make_mask(3)
    imgs['lh_white_mask'] = lambda:_make_mask(2)
    imgs['rh_gray_mask']  = lambda:_make_mask(42)
    imgs['rh_white_mask'] = lambda:_make_mask(41)
    imgs['brain_mask']    = lambda:_make_mask(0, False)
    # merge in with the typical images
    return pimms.merge(fmap.data_tree.image, pimms.lazy_map(imgs)) 
Example 16
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def image_dimensions(images):
        '''
        sub.image_dimensions is a tuple of the default size of an anatomical image for the given
        subject.
        '''
        if images is None or len(images) == 0: return None
        if pimms.is_lazy_map(images):
            # look for an image that isn't lazy...
            key = next((k for k in images.iterkeys() if not images.is_lazy(k)), None)
            if key is None: key = next(images.iterkeys(), None)
        else:
            key = next(images.iterkeys(), None)
        img = images[key]
        if img is None: return None
        if is_image(img): img = img.dataobj
        return np.asarray(img).shape 
Example 17
Project: neuropythy   Author: noahbenson   File: images.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def parse_dataobj(self, dataobj, hdat={}):
        # first, see if we have a specified shape/size
        ish = next((hdat[k] for k in ('image_size', 'image_shape', 'shape') if k in hdat), None)
        if ish is Ellipsis: ish = None
        # make a numpy array of the appropriate dtype
        dtype = self.parse_type(hdat, dataobj=dataobj)
        try:    dataobj = dataobj.dataobj
        except Exception: pass
        if   dataobj is not None: arr = np.asarray(dataobj).astype(dtype)
        elif ish:                 arr = np.zeros(ish,       dtype=dtype)
        else:                     arr = np.zeros([1,1,1,0], dtype=dtype)
        # reshape to the requested shape if need-be
        if ish and ish != arr.shape: arr = np.reshape(arr, ish)
        # then reshape to a valid (4D) shape
        sh = arr.shape
        if   len(sh) == 2: arr = np.reshape(arr, (sh[0], 1, 1, sh[1]))
        elif len(sh) == 1: arr = np.reshape(arr, (sh[0], 1, 1))
        elif len(sh) == 3: arr = np.reshape(arr, sh)
        elif len(sh) != 4: raise ValueError('Cannot convert n-dimensional array to image if n > 4')
        # and return
        return arr 
Example 18
Project: vergeml   Author: mme   File: image.py    License: MIT License 5 votes vote down vote up
def transform(self, sample):
        sample.x = np.asarray(sample.x)
        sample.y = None
        return sample 
Example 19
Project: vergeml   Author: mme   File: labeled_image.py    License: MIT License 5 votes vote down vote up
def transform(self, sample):
        onehot = np.array([float(label in sample.y) for label in self.meta['labels']])
        sample.x = np.asarray(sample.x)
        sample.y = onehot
        return sample 
Example 20
Project: fenics-topopt   Author: zfergus   File: filter.py    License: MIT License 5 votes vote down vote up
def filter_variables(self, x, xPhys, ft):
        if ft == 0:
            xPhys[:] = x
        elif ft == 1:
            xPhys[:] = np.asarray(self.H * x[np.newaxis].T / self.Hs)[:, 0] 
Example 21
Project: fenics-topopt   Author: zfergus   File: filter.py    License: MIT License 5 votes vote down vote up
def filter_compliance_sensitivities(self, xPhys, dc, ft):
        if ft == 0:
            dc[:] = (np.asarray((self.H * (xPhys * dc))[np.newaxis].T /
                self.Hs)[:, 0] / np.maximum(0.001, xPhys))
        elif ft == 1:
            dc[:] = np.asarray(self.H * (dc[np.newaxis].T / self.Hs))[:, 0] 
Example 22
Project: fenics-topopt   Author: zfergus   File: filter.py    License: MIT License 5 votes vote down vote up
def filter_volume_sensitivities(self, _xPhys, dv, ft):
        if ft == 0:
            pass
        elif ft == 1:
            dv[:] = np.asarray(self.H * (dv[np.newaxis].T / self.Hs))[:, 0] 
Example 23
Project: fenics-topopt   Author: zfergus   File: filter.py    License: MIT License 5 votes vote down vote up
def filter_variables(self, x, xPhys, ft):
        if ft == 0:
            xPhys[:] = x
        elif ft == 1:
            xPhys[:] = np.asarray(self.H * x[np.newaxis].T / self.Hs)[:, 0] 
Example 24
Project: fenics-topopt   Author: zfergus   File: filter.py    License: MIT License 5 votes vote down vote up
def filter_compliance_sensitivities(self, xPhys, dc, ft):
        if ft == 0:
            dc[:] = (np.asarray((self.H * (xPhys * dc))[np.newaxis].T /
                self.Hs)[:, 0] / np.maximum(0.001, xPhys))
        elif ft == 1:
            dc[:] = np.asarray(self.H * (dc[np.newaxis].T / self.Hs))[:, 0] 
Example 25
Project: Att-ChemdNER   Author: lingluodlut   File: theano_backend.py    License: Apache License 2.0 5 votes vote down vote up
def set_value(x, value):
    x.set_value(np.asarray(value, dtype=x.dtype)) 
Example 26
Project: Att-ChemdNER   Author: lingluodlut   File: theano_backend.py    License: Apache License 2.0 5 votes vote down vote up
def batch_set_value(tuples):
    for x, value in tuples:
        x.set_value(np.asarray(value, dtype=x.dtype)) 
Example 27
Project: xrft   Author: xgcm   File: xrft.py    License: MIT License 5 votes vote down vote up
def _power_spectrum(daft, dim, N, density):

    ps = (daft * np.conj(daft)).real

    if density:
        ps /= (np.asarray(N).prod()) ** 2
        for i in dim:
            ps /= daft['freq_' + i + '_spacing']

    return ps 
Example 28
Project: xrft   Author: xgcm   File: xrft.py    License: MIT License 5 votes vote down vote up
def _cross_spectrum(daft1, daft2, dim, N, density):
    cs = (daft1 * np.conj(daft2)).real

    if density:
        cs /= (np.asarray(N).prod())**2
        for i in dim:
            cs /= daft1['freq_' + i + '_spacing']

    return cs 
Example 29
Project: deep-siamese-text-similarity   Author: dhwajraj   File: input_helpers.py    License: MIT License 5 votes vote down vote up
def getTsvDataCharBased(self, filepath):
        print("Loading training data from "+filepath)
        x1=[]
        x2=[]
        y=[]
        # positive samples from file
        for line in open(filepath):
            l=line.strip().split("\t")
            if len(l)<2:
                continue
            if random() > 0.5:
               x1.append(l[0].lower())
               x2.append(l[1].lower())
            else:
               x1.append(l[1].lower())
               x2.append(l[0].lower())
            y.append(1)#np.array([0,1]))
        # generate random negative samples
        combined = np.asarray(x1+x2)
        shuffle_indices = np.random.permutation(np.arange(len(combined)))
        combined_shuff = combined[shuffle_indices]
        for i in xrange(len(combined)):
            x1.append(combined[i])
            x2.append(combined_shuff[i])
            y.append(0) #np.array([1,0]))
        return np.asarray(x1),np.asarray(x2),np.asarray(y) 
Example 30
Project: deep-siamese-text-similarity   Author: dhwajraj   File: input_helpers.py    License: MIT License 5 votes vote down vote up
def getTsvTestData(self, filepath):
        print("Loading testing/labelled data from "+filepath)
        x1=[]
        x2=[]
        y=[]
        # positive samples from file
        for line in open(filepath):
            l=line.strip().split("\t")
            if len(l)<3:
                continue
            x1.append(l[1].lower())
            x2.append(l[2].lower())
            y.append(int(l[0])) #np.array([0,1]))
        return np.asarray(x1),np.asarray(x2),np.asarray(y) 
Example 31
Project: deep-siamese-text-similarity   Author: dhwajraj   File: input_helpers.py    License: MIT License 5 votes vote down vote up
def getTestDataSet(self, data_path, vocab_path, max_document_length):
        x1_temp,x2_temp,y = self.getTsvTestData(data_path)

        # Build vocabulary
        vocab_processor = MyVocabularyProcessor(max_document_length,min_frequency=0)
        vocab_processor = vocab_processor.restore(vocab_path)
        print len(vocab_processor.vocabulary_)

        x1 = np.asarray(list(vocab_processor.transform(x1_temp)))
        x2 = np.asarray(list(vocab_processor.transform(x2_temp)))
        # Randomly shuffle data
        del vocab_processor
        gc.collect()
        return x1,x2, y 
Example 32
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset_tool.py    License: MIT License 5 votes vote down vote up
def create_lsun(tfrecord_dir, lmdb_dir, resolution=256, max_images=None):
    print('Loading LSUN dataset from "%s"' % lmdb_dir)
    import lmdb # pip install lmdb
    import cv2 # pip install opencv-python
    import io
    with lmdb.open(lmdb_dir, readonly=True).begin(write=False) as txn:
        total_images = txn.stat()['entries']
        if max_images is None:
            max_images = total_images
        with TFRecordExporter(tfrecord_dir, max_images) as tfr:
            for idx, (key, value) in enumerate(txn.cursor()):
                try:
                    try:
                        img = cv2.imdecode(np.fromstring(value, dtype=np.uint8), 1)
                        if img is None:
                            raise IOError('cv2.imdecode failed')
                        img = img[:, :, ::-1] # BGR => RGB
                    except IOError:
                        img = np.asarray(PIL.Image.open(io.BytesIO(value)))
                    crop = np.min(img.shape[:2])
                    img = img[(img.shape[0] - crop) // 2 : (img.shape[0] + crop) // 2, (img.shape[1] - crop) // 2 : (img.shape[1] + crop) // 2]
                    img = PIL.Image.fromarray(img, 'RGB')
                    img = img.resize((resolution, resolution), PIL.Image.ANTIALIAS)
                    img = np.asarray(img)
                    img = img.transpose(2, 0, 1) # HWC => CHW
                    tfr.add_image(img)
                except:
                    print(sys.exc_info()[1])
                if tfr.cur_images == max_images:
                    break
        
#---------------------------------------------------------------------------- 
Example 33
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset_tool.py    License: MIT License 5 votes vote down vote up
def create_from_images(tfrecord_dir, image_dir, label_dir, shuffle):
    print('Loading images from "%s"' % image_dir)
    image_filenames = sorted(glob.glob(os.path.join(image_dir, '*')))
    if len(image_filenames) == 0:
        error('No input images found')
        
    img = np.asarray(PIL.Image.open(image_filenames[0]))
    resolution = img.shape[0]
    channels = img.shape[2] if img.ndim == 3 else 1
    if img.shape[1] != resolution:
        error('Input images must have the same width and height')
    if resolution != 2 ** int(np.floor(np.log2(resolution))):
        error('Input image resolution must be a power-of-two')
    if channels not in [1, 3]:
        error('Input images must be stored as RGB or grayscale')

    try:
        with open(label_dir, 'rb') as file:
            labels = pickle.load(file)
    except:
        error('Label file was not found')
    
    with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:
        order = tfr.choose_shuffled_order() if shuffle else np.arange(len(image_filenames))
        reordered_names = []
        for idx in range(order.size):
            image_filename = image_filenames[order[idx]]
            img = np.asarray(PIL.Image.open(image_filename))
            if channels == 1:
                img = img[np.newaxis, :, :] # HW => CHW
            else:
                img = img.transpose(2, 0, 1) # HWC => CHW
            tfr.add_image(img)
            reordered_names.append(os.path.basename(image_filename))
        reordered_labels = []
        for key in reordered_names:
            reordered_labels += [labels[key]]
        reordered_labels = np.stack(reordered_labels, 0)
        tfr.add_labels(reordered_labels)

#---------------------------------------------------------------------------- 
Example 34
Project: mmdetection   Author: open-mmlab   File: test_masks.py    License: Apache License 2.0 5 votes vote down vote up
def test_polygon_mask_index():
    raw_masks = dummy_raw_polygon_masks((3, 28, 28))
    polygon_masks = PolygonMasks(raw_masks, 28, 28)
    # index by integer
    polygon_masks[0]
    # index by list
    polygon_masks[[0, 1]]
    # index by ndarray
    polygon_masks[np.asarray([0, 1])]
    with pytest.raises(ValueError):
        # invalid index
        polygon_masks[torch.Tensor([1, 2])] 
Example 35
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_mc_predictions(model, X, nb_iter=50, batch_size=256):
    """
    TODO
    :param model:
    :param X:
    :param nb_iter:
    :param batch_size:
    :return:
    """
    output_dim = model.layers[-1].output.shape[-1].value
    get_output = K.function(
        [model.layers[0].input, K.learning_phase()],
        [model.layers[-1].output]
    )

    def predict():
        n_batches = int(np.ceil(X.shape[0] / float(batch_size)))
        output = np.zeros(shape=(len(X), output_dim))
        for i in range(n_batches):
            output[i * batch_size:(i + 1) * batch_size] = \
                get_output([X[i * batch_size:(i + 1) * batch_size], 1])[0]
        return output

    preds_mc = []
    for i in tqdm(range(nb_iter)):
        preds_mc.append(predict())

    return np.asarray(preds_mc) 
Example 36
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def mle_single(data, x, k=10):
    data = np.asarray(data, dtype=np.float32)
    x = np.asarray(x, dtype=np.float32)
    if x.ndim == 1:
        x = x.reshape((-1, x.shape[0]))
    # dim = x.shape[1]

    k = min(k, len(data)-1)
    f = lambda v: - k / np.sum(np.log(v/v[-1]))
    a = cdist(x, data)
    a = np.apply_along_axis(np.sort, axis=1, arr=a)[:,1:k+1]
    a = np.apply_along_axis(f, axis=1, arr=a)
    return a[0]

# lid of a batch of query points X 
Example 37
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def mle_batch(data, batch, k):
    data = np.asarray(data, dtype=np.float32)
    batch = np.asarray(batch, dtype=np.float32)

    k = min(k, len(data)-1)
    f = lambda v: - k / np.sum(np.log(v/v[-1]))
    a = cdist(batch, data)
    a = np.apply_along_axis(np.sort, axis=1, arr=a)[:,1:k+1]
    a = np.apply_along_axis(f, axis=1, arr=a)
    return a

# mean distance of x to its k nearest neighbours 
Example 38
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def kmean_batch(data, batch, k):
    data = np.asarray(data, dtype=np.float32)
    batch = np.asarray(batch, dtype=np.float32)

    k = min(k, len(data)-1)
    f = lambda v: np.mean(v)
    a = cdist(batch, data)
    a = np.apply_along_axis(np.sort, axis=1, arr=a)[:,1:k+1]
    a = np.apply_along_axis(f, axis=1, arr=a)
    return a

# mean distance of x to its k nearest neighbours 
Example 39
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def kmean_pca_batch(data, batch, k=10):
    data = np.asarray(data, dtype=np.float32)
    batch = np.asarray(batch, dtype=np.float32)
    a = np.zeros(batch.shape[0])
    for i in np.arange(batch.shape[0]):
        tmp = np.concatenate((data, [batch[i]]))
        tmp_pca = PCA(n_components=2).fit_transform(tmp)
        a[i] = mle_single(tmp_pca[:-1], tmp_pca[-1], k=k)
    return a 
Example 40
Project: neural-fingerprinting   Author: StephanZheng   File: test_utils.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_to_categorical_no_nb_classes_arg(self):
        vec = np.asarray([0, 1, 2])
        cat = np.asarray([[1, 0, 0],
                          [0, 1, 0],
                          [0, 0, 1]])
        self.assertTrue(np.all(utils.to_categorical(vec) == cat)) 
Example 41
Project: neural-fingerprinting   Author: StephanZheng   File: test_utils.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_to_categorical_with_nb_classes_arg(self):
        vec = np.asarray([0])
        cat = np.asarray([[1, 0, 0]])
        self.assertTrue(np.all(utils.to_categorical(vec, 3) == cat)) 
Example 42
Project: neural-fingerprinting   Author: StephanZheng   File: test_utils.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_random_targets_vector(self):
        # Test utils.random_targets with a vector of labels as the input
        gt_labels = np.asarray([0, 1, 2, 3])
        rt = utils.random_targets(gt_labels, 5)

        # Make sure random_targets returns a one-hot encoded labels
        self.assertTrue(len(rt.shape) == 2)
        rt_labels = np.argmax(rt, axis=1)

        # Make sure all labels are different from the correct labels
        self.assertTrue(np.all(rt_labels != gt_labels)) 
Example 43
Project: neural-fingerprinting   Author: StephanZheng   File: test_utils.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_random_targets_one_hot(self):
        # Test utils.random_targets with one-hot encoded labels as the input
        gt = np.asarray([[0, 0, 1, 0, 0],
                         [1, 0, 0, 0, 0],
                         [0, 0, 0, 1, 0],
                         [1, 0, 0, 0, 0]])
        gt_labels = np.argmax(gt, axis=1)
        rt = utils.random_targets(gt, 5)

        # Make sure random_targets returns a one-hot encoded labels
        self.assertTrue(len(rt.shape) == 2)
        rt_labels = np.argmax(rt, axis=1)

        # Make sure all labels are different from the correct labels
        self.assertTrue(np.all(rt_labels != gt_labels)) 
Example 44
Project: neural-fingerprinting   Author: StephanZheng   File: test_utils.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_random_targets_one_hot_single_label(self):
        # Test utils.random_targets with a single one-hot encoded label
        gt = np.asarray([0, 0, 1, 0, 0])
        gt = gt.reshape((1, 5))
        gt_labels = np.argmax(gt, axis=1)
        rt = utils.random_targets(gt, 5)

        # Make sure random_targets returns a one-hot encoded labels
        self.assertTrue(len(rt.shape) == 2)
        rt_labels = np.argmax(rt, axis=1)

        # Make sure all labels are different from the correct labels
        self.assertTrue(np.all(rt_labels != gt_labels)) 
Example 45
Project: neural-fingerprinting   Author: StephanZheng   File: attacks_tf.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def ZERO():
    return np.asarray(0., dtype=np_dtype) 
Example 46
Project: mlimages   Author: icoxfog417   File: chainer_alex.py    License: MIT License 5 votes vote down vote up
def train(epoch=10, batch_size=32, gpu=False):
    if gpu:
        cuda.check_cuda_available()
    xp = cuda.cupy if gpu else np

    td = TrainingData(LABEL_FILE, img_root=IMAGES_ROOT, image_property=IMAGE_PROP)

    # make mean image
    if not os.path.isfile(MEAN_IMAGE_FILE):
        print("make mean image...")
        td.make_mean_image(MEAN_IMAGE_FILE)
    else:
        td.mean_image_file = MEAN_IMAGE_FILE

    # train model
    label_def = LabelingMachine.read_label_def(LABEL_DEF_FILE)
    model = alex.Alex(len(label_def))
    optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9)
    optimizer.setup(model)
    epoch = epoch
    batch_size = batch_size

    print("Now our model is {0} classification task.".format(len(label_def)))
    print("begin training the model. epoch:{0} batch size:{1}.".format(epoch, batch_size))

    if gpu:
        model.to_gpu()

    for i in range(epoch):
        print("epoch {0}/{1}: (learning rate={2})".format(i + 1, epoch, optimizer.lr))
        td.shuffle(overwrite=True)

        for x_batch, y_batch in td.generate_batches(batch_size):
            x = chainer.Variable(xp.asarray(x_batch))
            t = chainer.Variable(xp.asarray(y_batch))

            optimizer.update(model, x, t)
            print("loss: {0}, accuracy: {1}".format(float(model.loss.data), float(model.accuracy.data)))

        serializers.save_npz(MODEL_FILE, model)
        optimizer.lr *= 0.97 
Example 47
Project: tensorflow-DeepFM   Author: ChenglongChen   File: metrics.py    License: MIT License 5 votes vote down vote up
def gini(actual, pred):
    assert (len(actual) == len(pred))
    all = np.asarray(np.c_[actual, pred, np.arange(len(actual))], dtype=np.float)
    all = all[np.lexsort((all[:, 2], -1 * all[:, 1]))]
    totalLosses = all[:, 0].sum()
    giniSum = all[:, 0].cumsum().sum() / totalLosses

    giniSum -= (len(actual) + 1) / 2.
    return giniSum / len(actual) 
Example 48
Project: models   Author: kipoi   File: dataloader_read_fasta.py    License: MIT License 5 votes vote down vote up
def read_fasta( fasta_file, split_char=' ', id_field=0 ):
    '''
        Reads in fasta file containing multiple sequences.
        Returns dictionary holding multiple sequences or only single 
        sequence, depending on input file
        In order to retrieve the protein identifier, the header is split 
        after split_char and the field at position id_field is chosen as
        identifier.
    '''
    sequences = dict()
    with open( fasta_file, 'r' ) as fasta_f:
        for line in fasta_f:
            # get uniprot ID from header and create new entry
            if line.startswith('>'):
                uniprot_id = line.replace('>', '').strip().split(split_char)[id_field]
                sequences[ uniprot_id ] = ''
            else:
                # repl. all whie-space chars and join seqs spanning multiple lines
                sequences[ uniprot_id ] += ''.join( line.split() ).upper()

    sequences = sorted(sequences.items(), key=lambda kv: len( sequences[kv[0]] ) )
    identifier, seqs = zip(*sequences)
    seqs = [ np.asarray([seq]) for seq in seqs ]
    #seqs = np.concatenate( seqs )
    print(seqs)
    return { "inputs": seqs,
             "metadata":
                { "id":  identifier }
            } 
Example 49
Project: models   Author: kipoi   File: dataloader.py    License: MIT License 5 votes vote down vote up
def __init__(self, fasta_file, split_char=' ', id_field=0):

        seq_dict = self.read_fasta(fasta_file, split_char, id_field)
        self.length = len(seq_dict)

        sequences = sorted(seq_dict.items(), key=lambda kv: len(seq_dict[kv[0]]))
        self.identifier, self.seqs = zip(*sequences)
        self.seqs = [np.asarray([seq]) for seq in self.seqs] 
Example 50
Project: models   Author: kipoi   File: bio_utils.py    License: MIT License 5 votes vote down vote up
def sequence_to_int(sequences, max_len):
    if type(sequences) is list:
        seqs_enc = np.asarray([nucleotide_to_int(read, max_len) for read in sequences], 'uint8')
    else:
        seqs_enc = np.asarray([nucleotide_to_int(read, max_len) for read in sequences], 'uint8')
        seqs_enc = list(itertools.chain(*seqs_enc))
        seqs_enc = np.asarray(seqs_enc)

    return seqs_enc 
Example 51
Project: Random-Erasing   Author: zhunzhong07   File: logger.py    License: Apache License 2.0 5 votes vote down vote up
def plot_overlap(logger, names=None):
    names = logger.names if names == None else names
    numbers = logger.numbers
    for _, name in enumerate(names):
        x = np.arange(len(numbers[name]))
        if name in ['Train Acc.', 'Valid Acc.']:
            plt.plot(x, 100-np.asarray(numbers[name], dtype='float'))
        else:
            plt.plot(x, np.asarray(numbers[name]))
    return [logger.title + '(' + name + ')' for name in names] 
Example 52
Project: Random-Erasing   Author: zhunzhong07   File: logger.py    License: Apache License 2.0 5 votes vote down vote up
def plot(self, names=None):   
        names = self.names if names == None else names
        numbers = self.numbers
        for _, name in enumerate(names):
            x = np.arange(len(numbers[name]))
            plt.plot(x, np.asarray(numbers[name]))
        plt.legend([self.title + '(' + name + ')' for name in names])
        plt.grid(True) 
Example 53
Project: kalman_filter_multi_object_tracking   Author: srianant   File: tracker.py    License: MIT License 5 votes vote down vote up
def __init__(self, prediction, trackIdCount):
        """Initialize variables used by Track class
        Args:
            prediction: predicted centroids of object to be tracked
            trackIdCount: identification of each track object
        Return:
            None
        """
        self.track_id = trackIdCount  # identification of each track object
        self.KF = KalmanFilter()  # KF instance to track this object
        self.prediction = np.asarray(prediction)  # predicted centroids (x,y)
        self.skipped_frames = 0  # number of frames skipped undetected
        self.trace = []  # trace path 
Example 54
Project: deep-learning-note   Author: wdxtub   File: 1_generate_text.py    License: MIT License 5 votes vote down vote up
def sample(preds, temperature=1.0):
    # 给定模型预测,采样下一个字符的函数
    preds = np.asarray(preds).astype('float64')
    preds = np.log(preds) / temperature
    exp_preds = np.exp(preds)
    preds = exp_preds / np.sum(exp_preds)
    probas = np.random.multinomial(1, preds, 1)
    return np.argmax(probas) 
Example 55
Project: deep-learning-note   Author: wdxtub   File: utils.py    License: MIT License 5 votes vote down vote up
def read_birth_life_data(filename):
    """
    Read in birth_life_2010.txt and return:
    data in the form of NumPy array
    n_samples: number of samples
    """
    text = open(filename, 'r').readlines()[1:]
    data = [line[:-1].split('\t') for line in text]
    births = [float(line[1]) for line in data]
    lifes = [float(line[2]) for line in data]
    data = list(zip(births, lifes))
    n_samples = len(data)
    data = np.asarray(data, dtype=np.float32)
    return data, n_samples 
Example 56
Project: deep-learning-note   Author: wdxtub   File: 18_basic_tfrecord.py    License: MIT License 5 votes vote down vote up
def get_image_binary(filename):
    image = Image.open(filename)
    image = np.asarray(image, np.uint8)
    shape = np.array(image.shape, np.int32)
    return shape.tobytes(), image.tobytes() 
Example 57
Project: neuropythy   Author: noahbenson   File: register_retinotopy.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def _guess_surf_file(fl):
    # MGH/MGZ files
    try: return np.asarray(fsmgh.load(fl).dataobj).flatten()
    except Exception: pass
    # FreeSurfer Curv files
    try: return fsio.read_morph_data(fl)
    except Exception: pass
    # Nifti files
    try: return np.squeeze(nib.load(fl).dataobj)
    except Exception: pass
    raise ValueError('Could not determine filetype for: %s' % fl) 
Example 58
Project: neuropythy   Author: noahbenson   File: __init__.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def to_java_ints(m):
    '''
    to_java_ints(m) yields a java array object for the vector or matrix m.
    '''
    global _java
    if _java is None: _init_registration()
    m = np.asarray(m)
    dims = len(m.shape)
    if dims > 2: raise ValueError('1D and 2D arrays supported only')
    bindat = serialize_numpy(m, 'i')
    return (_java.jvm.nben.util.Numpy.int2FromBytes(bindat) if dims == 2
            else _java.jvm.nben.util.Numpy.int1FromBytes(bindat)) 
Example 59
Project: neuropythy   Author: noahbenson   File: __init__.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def to_java_array(m):
    '''
    to_java_array(m) yields to_java_ints(m) if m is an array of integers and to_java_doubles(m) if
    m is anything else. The numpy array m is tested via numpy.issubdtype(m.dtype, numpy.int64).
    '''
    if not hasattr(m, '__iter__'): return m
    m = np.asarray(m)
    if np.issubdtype(m.dtype, np.dtype(int).type) or all(isinstance(x, num.Integral) for x in m):
        return to_java_ints(m)
    else:
        return to_java_doubles(m) 
Example 60
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def to_rgba(val):
    '''
    to_rgba(val) is identical to matplotlib.colors.to_rgba(val) except that it operates over lists
      as well as individual elements to yield matrices of rgba values. In addition, it always yields
      numpy vectors or matrices.
    '''
    if pimms.is_npmatrix(val) and val.shape[1] == 4: return val
    try: return np.asarray(matplotlib.colors.to_rgba(val))
    except Exception: return np.asarray([matplotlib.colors.to_rgba(u) for u in val])