Python numpy.rollaxis() Examples

The following are 30 code examples for showing how to use numpy.rollaxis(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: capsulenet.py    License: Apache License 2.0 6 votes vote down vote up
def apply_transform(x,
                    transform_matrix,
                    fill_mode='nearest',
                    cval=0.):
    x = np.rollaxis(x, 0, 0)
    final_affine_matrix = transform_matrix[:2, :2]
    final_offset = transform_matrix[:2, 2]
    channel_images = [ndi.interpolation.affine_transform(
        x_channel,
        final_affine_matrix,
        final_offset,
        order=0,
        mode=fill_mode,
        cval=cval) for x_channel in x]
    x = np.stack(channel_images, axis=0)
    x = np.rollaxis(x, 0, 0 + 1)
    return x 
Example 2
Project: paramz   Author: sods   File: param.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _indices(self, slice_index=None):
        # get a int-array containing all indices in the first axis.
        if slice_index is None:
            slice_index = self._current_slice_
        #try:
        indices = np.indices(self._realshape_, dtype=int)
        indices = indices[(slice(None),)+slice_index]
        indices = np.rollaxis(indices, 0, indices.ndim).reshape(-1,self._realndim_)
            #print indices_
            #if not np.all(indices==indices__):
            #    import ipdb; ipdb.set_trace()
        #except:
        #    indices = np.indices(self._realshape_, dtype=int)
        #    indices = indices[(slice(None),)+slice_index]
        #    indices = np.rollaxis(indices, 0, indices.ndim)
        return indices 
Example 3
Project: face_classification   Author: oarriaga   File: data_augmentation.py    License: MIT License 6 votes vote down vote up
def _do_random_crop(self, image_array):
        """IMPORTANT: random crop only works for classification since the
        current implementation does no transform bounding boxes"""
        height = image_array.shape[0]
        width = image_array.shape[1]
        x_offset = np.random.uniform(0, self.translation_factor * width)
        y_offset = np.random.uniform(0, self.translation_factor * height)
        offset = np.array([x_offset, y_offset])
        scale_factor = np.random.uniform(self.zoom_range[0],
                                         self.zoom_range[1])
        crop_matrix = np.array([[scale_factor, 0],
                                [0, scale_factor]])

        image_array = np.rollaxis(image_array, axis=-1, start=0)
        image_channel = [ndi.interpolation.affine_transform(image_channel,
                         crop_matrix, offset=offset, order=0, mode='nearest',
                         cval=0.0) for image_channel in image_array]

        image_array = np.stack(image_channel, axis=0)
        image_array = np.rollaxis(image_array, 0, 3)
        return image_array 
Example 4
Project: face_classification   Author: oarriaga   File: data_augmentation.py    License: MIT License 6 votes vote down vote up
def do_random_rotation(self, image_array):
        """IMPORTANT: random rotation only works for classification since the
        current implementation does no transform bounding boxes"""
        height = image_array.shape[0]
        width = image_array.shape[1]
        x_offset = np.random.uniform(0, self.translation_factor * width)
        y_offset = np.random.uniform(0, self.translation_factor * height)
        offset = np.array([x_offset, y_offset])
        scale_factor = np.random.uniform(self.zoom_range[0],
                                         self.zoom_range[1])
        crop_matrix = np.array([[scale_factor, 0],
                                [0, scale_factor]])

        image_array = np.rollaxis(image_array, axis=-1, start=0)
        image_channel = [ndi.interpolation.affine_transform(image_channel,
                         crop_matrix, offset=offset, order=0, mode='nearest',
                         cval=0.0) for image_channel in image_array]

        image_array = np.stack(image_channel, axis=0)
        image_array = np.rollaxis(image_array, 0, 3)
        return image_array 
Example 5
Project: lambda-packs   Author: ryfeus   File: _cubic.py    License: MIT License 6 votes vote down vote up
def __init__(self, x, y, axis=0, extrapolate=None):
        x = _asarray_validated(x, check_finite=False, as_inexact=True)
        y = _asarray_validated(y, check_finite=False, as_inexact=True)

        axis = axis % y.ndim

        xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
        yp = np.rollaxis(y, axis)

        dk = self._find_derivatives(xp, yp)
        data = np.hstack((yp[:, None, ...], dk[:, None, ...]))

        _b = BPoly.from_derivatives(x, data, orders=None)
        super(PchipInterpolator, self).__init__(_b.c, _b.x,
                                                extrapolate=extrapolate)
        self.axis = axis 
Example 6
Project: lambda-packs   Author: ryfeus   File: _bvp.py    License: MIT License 6 votes vote down vote up
def create_spline(y, yp, x, h):
    """Create a cubic spline given values and derivatives.

    Formulas for the coefficients are taken from interpolate.CubicSpline.

    Returns
    -------
    sol : PPoly
        Constructed spline as a PPoly instance.
    """
    from scipy.interpolate import PPoly

    n, m = y.shape
    c = np.empty((4, n, m - 1), dtype=y.dtype)
    slope = (y[:, 1:] - y[:, :-1]) / h
    t = (yp[:, :-1] + yp[:, 1:] - 2 * slope) / h
    c[0] = t / h
    c[1] = (slope - yp[:, :-1]) / h - t
    c[2] = yp[:, :-1]
    c[3] = y[:, :-1]
    c = np.rollaxis(c, 1)

    return PPoly(c, x, extrapolate=True, axis=1) 
Example 7
Project: lambda-packs   Author: ryfeus   File: nanfunctions.py    License: MIT License 6 votes vote down vote up
def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
                   interpolation='linear'):
    """
    Private function that doesn't support extended axis or keepdims.
    These methods are extended to this function using _ureduce
    See nanpercentile for parameter usage

    """
    if axis is None or a.ndim == 1:
        part = a.ravel()
        result = _nanpercentile1d(part, q, overwrite_input, interpolation)
    else:
        result = np.apply_along_axis(_nanpercentile1d, axis, a, q,
                                     overwrite_input, interpolation)
        # apply_along_axis fills in collapsed axis with results.
        # Move that axis to the beginning to match percentile's
        # convention.
        if q.ndim != 0:
            result = np.rollaxis(result, axis)

    if out is not None:
        out[...] = result
    return result 
Example 8
Project: auto-alt-text-lambda-api   Author: abhisuri97   File: nanfunctions.py    License: MIT License 6 votes vote down vote up
def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
                   interpolation='linear'):
    """
    Private function that doesn't support extended axis or keepdims.
    These methods are extended to this function using _ureduce
    See nanpercentile for parameter usage

    """
    if axis is None:
        part = a.ravel()
        result = _nanpercentile1d(part, q, overwrite_input, interpolation)
    else:
        result = np.apply_along_axis(_nanpercentile1d, axis, a, q,
                                     overwrite_input, interpolation)
        # apply_along_axis fills in collapsed axis with results.
        # Move that axis to the beginning to match percentile's
        # convention.
        if q.ndim != 0:
            result = np.rollaxis(result, axis)

    if out is not None:
        out[...] = result
    return result 
Example 9
Project: deepJDOT   Author: bbdamodaran   File: DatasetLoad.py    License: MIT License 6 votes vote down vote up
def SVHN_dataload():
    import numpy as np
    import scipy.io as sio
    import os
    pathname = 'data/SVHN'
    fn = 'train_32x32.mat'
    loaddata = sio.loadmat(os.path.join(pathname, fn))
    Traindata = loaddata['X']
    trainlabel = loaddata['y']
    Traindata = np.rollaxis(Traindata, 3, 0)
    fn = 'test_32x32.mat'
    loadtdata = sio.loadmat(os.path.join(pathname, fn))
    Testdata = loadtdata['X']
    testlabel = loadtdata['y']
    Testdata = np.rollaxis(Testdata, 3, 0)
    return Traindata, trainlabel, Testdata, testlabel


# %% MNIST-M load 
Example 10
Project: deepJDOT   Author: bbdamodaran   File: DatasetLoad.py    License: MIT License 6 votes vote down vote up
def synthetic_digits_small_dataload():
    import os
    import scipy.io as sio
    import numpy as np

    filepath = '/home/damodara/OT/DA/datasets/SynthDigits'
    train_fname = os.path.join(filepath, 'synth_train_32x32_small.mat')
    loaddata = sio.loadmat(train_fname)
    Traindata = loaddata['X']
    train_label = loaddata['y']
    #
    test_fname = os.path.join(filepath, 'synth_test_32x32_small.mat')
    loaddata = sio.loadmat(test_fname)
    Testdata = loaddata['X']
    test_label = loaddata['y']
    Traindata = np.rollaxis(Traindata, 3, 0)
    Testdata = np.rollaxis(Testdata, 3, 0)

    return Traindata, train_label, Testdata, test_label 
Example 11
Project: deepJDOT   Author: bbdamodaran   File: DatasetLoad.py    License: MIT License 6 votes vote down vote up
def synthetic_digits_dataload():
    import os
    import scipy.io as sio
    import numpy as np

    filepath = '/home/damodara/OT/DA/datasets/SynthDigits'
    train_fname = os.path.join(filepath, 'synth_train_32x32.mat')
    loaddata = sio.loadmat(train_fname)
    Traindata = loaddata['X']
    train_label = loaddata['y']
    #
    test_fname = os.path.join(filepath, 'synth_test_32x32.mat')
    loaddata = sio.loadmat(test_fname)
    Testdata = loaddata['X']
    test_label = loaddata['y']

    Traindata = np.rollaxis(Traindata, 3, 0)
    Testdata = np.rollaxis(Testdata, 3, 0)
    return Traindata, train_label, Testdata, test_label


# %% stl9 
Example 12
Project: DeepDIVA   Author: DIVA-DIA   File: misc.py    License: GNU Lesser General Public License v3.0 6 votes vote down vote up
def multi_label_img_to_multi_hot(np_array):
    """
    TODO: There must be a faster way of doing this + ajust to correct input format (see gt_tensor_to_one_hot)
    Convert ground truth label image to multi-one-hot encoded matrix of size image height x image width x #classes

    Parameters
    -------
    np_array: numpy array
        RGB image [W x H x C]
    Returns
    -------
    numpy array of size [#C x W x H]
        sparse one-hot encoded multi-class matrix, where #C is the number of classes
    """
    im_np = np_array[:, :, 2].astype(np.int8)
    nb_classes = len(int_to_one_hot(im_np.max(), ''))

    class_dict = {x: int_to_one_hot(x, nb_classes) for x in np.unique(im_np)}
    # create the one hot matrix
    one_hot_matrix = np.asanyarray(
        [[class_dict[im_np[i, j]] for j in range(im_np.shape[1])] for i in range(im_np.shape[0])])

    return np.rollaxis(one_hot_matrix.astype(np.uint8), 2, 0) 
Example 13
Project: DeepDIVA   Author: DIVA-DIA   File: misc.py    License: GNU Lesser General Public License v3.0 6 votes vote down vote up
def multi_one_hot_to_output(matrix):
    """
    This function converts the multi-one-hot encoded matrix to an image like it was provided in the ground truth

    Parameters
    -------
    tensor of size [#C x W x H]
        sparse one-hot encoded multi-class matrix, where #C is the number of classes
    Returns
    -------
    np_array: numpy array
        RGB image [C x W x H]
    """
    # TODO: fix input and output dims (see one_hot_to_output)
    # create RGB
    matrix = np.rollaxis(np.char.mod('%d', matrix.numpy()), 0, 3)
    zeros = (32 - matrix.shape[2]) * '0'
    B = np.array([[int('{}{}'.format(zeros, ''.join(matrix[i][j])), 2) for j in range(matrix.shape[1])] for i in
                  range(matrix.shape[0])])

    RGB = np.dstack((np.zeros(shape=(matrix.shape[0], matrix.shape[1], 2), dtype=np.int8), B))

    return RGB 
Example 14
Project: Emotion   Author: petercunha   File: data_augmentation.py    License: MIT License 6 votes vote down vote up
def _do_random_crop(self, image_array):
        """IMPORTANT: random crop only works for classification since the
        current implementation does no transform bounding boxes"""
        height = image_array.shape[0]
        width = image_array.shape[1]
        x_offset = np.random.uniform(0, self.translation_factor * width)
        y_offset = np.random.uniform(0, self.translation_factor * height)
        offset = np.array([x_offset, y_offset])
        scale_factor = np.random.uniform(self.zoom_range[0],
                                        self.zoom_range[1])
        crop_matrix = np.array([[scale_factor, 0],
                                [0, scale_factor]])

        image_array = np.rollaxis(image_array, axis=-1, start=0)
        image_channel = [ndi.interpolation.affine_transform(image_channel,
                        crop_matrix, offset=offset, order=0, mode='nearest',
                        cval=0.0) for image_channel in image_array]

        image_array = np.stack(image_channel, axis=0)
        image_array = np.rollaxis(image_array, 0, 3)
        return image_array 
Example 15
Project: Emotion   Author: petercunha   File: data_augmentation.py    License: MIT License 6 votes vote down vote up
def do_random_rotation(self, image_array):
        """IMPORTANT: random rotation only works for classification since the
        current implementation does no transform bounding boxes"""
        height = image_array.shape[0]
        width = image_array.shape[1]
        x_offset = np.random.uniform(0, self.translation_factor * width)
        y_offset = np.random.uniform(0, self.translation_factor * height)
        offset = np.array([x_offset, y_offset])
        scale_factor = np.random.uniform(self.zoom_range[0],
                                        self.zoom_range[1])
        crop_matrix = np.array([[scale_factor, 0],
                                [0, scale_factor]])

        image_array = np.rollaxis(image_array, axis=-1, start=0)
        image_channel = [ndi.interpolation.affine_transform(image_channel,
                        crop_matrix, offset=offset, order=0, mode='nearest',
                        cval=0.0) for image_channel in image_array]

        image_array = np.stack(image_channel, axis=0)
        image_array = np.rollaxis(image_array, 0, 3)
        return image_array 
Example 16
Project: DDPAE-video-prediction   Author: jthsieh   File: video_transforms.py    License: MIT License 5 votes vote down vote up
def __call__(self, arr):
    if isinstance(arr, np.ndarray):
      video = torch.from_numpy(np.rollaxis(arr, axis=-1, start=-3))

      if self.scale:
        return video.float().div(255)
      else:
        return video.float()
    else:
      raise NotImplementedError 
Example 17
def read_image(img_path, image_dims=None, mean=None):
    """
    Reads an image from file path or URL, optionally resizing to given image dimensions and
    subtracting mean.
    :param img_path: path to file, or url to download
    :param image_dims: image dimensions to resize to, or None
    :param mean: mean file to subtract, or None
    :return: loaded image, in RGB format
    """

    import urllib

    filename = img_path.split("/")[-1]
    if img_path.startswith('http'):
        urllib.urlretrieve(img_path, filename)
        img = cv2.imread(filename)
    else:
        img = cv2.imread(img_path)

    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    if image_dims is not None:
        img = cv2.resize(img, image_dims)  # resize to image_dims to fit model
    img = np.rollaxis(img, 2) # change to (c, h, w) order
    img = img[np.newaxis, :]  # extend to (n, c, h, w)
    if mean is not None:
        mean = np.array(mean)
        if mean.shape == (3,):
            mean = mean[np.newaxis, :, np.newaxis, np.newaxis]  # extend to (n, c, 1, 1)
        img = img.astype(np.float32) - mean # subtract mean

    return img 
Example 18
def read_image(img_path, image_dims=None, mean=None):
    """
    Reads an image from file path or URL, optionally resizing to given image dimensions and
    subtracting mean.
    :param img_path: path to file, or url to download
    :param image_dims: image dimensions to resize to, or None
    :param mean: mean file to subtract, or None
    :return: loaded image, in RGB format
    """

    import urllib

    filename = img_path.split("/")[-1]
    if img_path.startswith('http'):
        urllib.urlretrieve(img_path, filename)
        img = cv2.imread(filename)
    else:
        img = cv2.imread(img_path)

    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    if image_dims is not None:
        img = cv2.resize(img, image_dims)  # resize to image_dims to fit model
    img = np.rollaxis(img, 2) # change to (c, h, w) order
    img = img[np.newaxis, :]  # extend to (n, c, h, w)
    if mean is not None:
        mean = np.array(mean)
        if mean.shape == (3,):
            mean = mean[np.newaxis, :, np.newaxis, np.newaxis]  # extend to (n, c, 1, 1)
        img = img.astype(np.float32) - mean # subtract mean

    return img 
Example 19
Project: IntroToDeepLearning   Author: robb-brown   File: TensorFlowInterface.py    License: MIT License 5 votes vote down vote up
def plotOutput(layer,feed_dict,fieldShape=None,channel=None,figOffset=1,cmap=None):
	# Output summary
	try:
		W = layer.output
	except:
		W = layer
	wp = W.eval(feed_dict=feed_dict);
	if len(np.shape(wp)) < 4:		# Fully connected layer, has no shape
		temp = np.zeros(np.product(fieldShape)); temp[0:np.shape(wp.ravel())[0]] = wp.ravel()
		fields = np.reshape(temp,[1]+fieldShape)
	else:			# Convolutional layer already has shape
		wp = np.rollaxis(wp,3,0)
		features, channels, iy,ix = np.shape(wp)
		if channel is not None:
			fields = wp[:,channel,:,:]
		else:
			fields = np.reshape(wp,[features*channels,iy,ix])

	perRow = int(math.floor(math.sqrt(fields.shape[0])))
	perColumn = int(math.ceil(fields.shape[0]/float(perRow)))
	fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
	tiled = []
	for i in range(0,perColumn*perRow,perColumn):
		tiled.append(np.hstack(fields2[i:i+perColumn]))

	tiled = np.vstack(tiled)
	if figOffset is not None:
		mpl.figure(figOffset); mpl.clf(); 

	mpl.imshow(tiled,cmap=cmap); mpl.title('%s Output' % layer.name); mpl.colorbar(); 
Example 20
Project: IntroToDeepLearning   Author: robb-brown   File: TensorFlowInterface.py    License: MIT License 5 votes vote down vote up
def plotOutput(layer,feed_dict,fieldShape=None,channel=None,figOffset=1,cmap=None):
	# Output summary
	W = layer.output
	wp = W.eval(feed_dict=feed_dict);
	if len(np.shape(wp)) < 4:		# Fully connected layer, has no shape
		temp = np.zeros(np.product(fieldShape)); temp[0:np.shape(wp.ravel())[0]] = wp.ravel()
		fields = np.reshape(temp,[1]+fieldShape)
	else:			# Convolutional layer already has shape
		wp = np.rollaxis(wp,3,0)
		features, channels, iy,ix = np.shape(wp)
		if channel is not None:
			fields = wp[:,channel,:,:]
		else:
			fields = np.reshape(wp,[features*channels,iy,ix])

	perRow = int(math.floor(math.sqrt(fields.shape[0])))
	perColumn = int(math.ceil(fields.shape[0]/float(perRow)))
	fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
	tiled = []
	for i in range(0,perColumn*perRow,perColumn):
		tiled.append(np.hstack(fields2[i:i+perColumn]))

	tiled = np.vstack(tiled)
	if figOffset is not None:
		mpl.figure(figOffset); mpl.clf();

	mpl.imshow(tiled,cmap=cmap); mpl.title('%s Output' % layer.name); mpl.colorbar(); 
Example 21
Project: ACAN   Author: miraiaroha   File: losses.py    License: MIT License 5 votes vote down vote up
def get_ohem_label(self, pred, label):
        n, c, h, w = pred.size()
        if self.ignore_index is None:
            self.ignore_index = c + 1

        input_label = label.data.cpu().numpy().ravel().astype(np.int32)
        x = np.rollaxis(pred.data.cpu().numpy(), 1).reshape((c, -1))
        input_prob = np.exp(x - x.max(axis=0, keepdims=True))
        input_prob /= input_prob.sum(axis=0, keepdims=True)

        valid_flag = input_label != self.ignore_index
        valid_inds = np.where(valid_flag)[0]
        valid_label = input_label[valid_flag]
        num_valid = valid_flag.sum()
        if self.min_kept >= num_valid:
            print('Labels: {}'.format(num_valid))
        elif num_valid > 0:
            valid_prob = input_prob[:,valid_flag]
            valid_prob = valid_prob[valid_label, np.arange(len(valid_label), dtype=np.int32)]
            threshold = self.thresh
            if self.min_kept > 0:
                index = valid_prob.argsort()
                threshold_index = index[ min(len(index), self.min_kept) - 1 ]
                if valid_prob[threshold_index] > self.thresh:
                    threshold = valid_prob[threshold_index]
            kept_flag = valid_prob <= threshold
            valid_kept_inds = valid_inds[kept_flag]
            valid_inds = valid_kept_inds

        self.ohem_ratio = len(valid_inds) / num_valid
        #print('Max prob: {:.4f}, hard ratio: {:.4f} = {} / {} '.format(input_prob.max(), self.ohem_ratio, len(valid_inds), num_valid))
        valid_kept_label = input_label[valid_inds].copy()
        input_label.fill(self.ignore_index)
        input_label[valid_inds] = valid_kept_label
        #valid_flag_new = input_label != self.ignore_index
        # print(np.sum(valid_flag_new))
        label = torch.from_numpy(input_label.reshape(label.size())).long().cuda()
        return label 
Example 22
Project: End-to-end-ASR-Pytorch   Author: Alexander-H-Liu   File: ctc.py    License: MIT License 5 votes vote down vote up
def full_compute(self, g, r_prev):
        '''Given prefix g, return the probability of all possible sequence y (where y = concat(g,c))
           This function computes all possible tokens for c (memory inefficient)'''
        prefix_length = len(g)
        last_char = g[-1] if prefix_length > 0 else 0

        # init. r
        r = np.full((self.input_length, 2, self.odim),
                    self.logzero, dtype=np.float32)

        # start from len(g) because is impossible for CTC to generate |y|>|X|
        start = max(1, prefix_length)

        if prefix_length == 0:
            r[0, 0, :] = self.x[0, :]    # if g = <sos>

        psi = r[start-1, 0, :]

        phi = np.logaddexp(r_prev[:, 0], r_prev[:, 1])

        for t in range(start, self.input_length):
            # prev_blank
            prev_blank = np.full((self.odim), r_prev[t-1, 1], dtype=np.float32)
            # prev_nonblank
            prev_nonblank = np.full(
                (self.odim), r_prev[t-1, 0], dtype=np.float32)
            prev_nonblank[last_char] = self.logzero

            phi = np.logaddexp(prev_nonblank, prev_blank)
            # P(h|current step is non-blank) = [ P(prev. step = y) + P()]*P(c)
            r[t, 0, :] = np.logaddexp(r[t-1, 0, :], phi) + self.x[t, :]
            # P(h|current step is blank) = [P(prev. step is blank) + P(prev. step is non-blank)]*P(now=blank)
            r[t, 1, :] = np.logaddexp(
                r[t-1, 1, :], r[t-1, 0, :]) + self.x[t, self.blank]
            psi = np.logaddexp(psi, phi+self.x[t, :])

        #psi[self.eos] = np.logaddexp(r_prev[-1,0], r_prev[-1,1])
        return psi, np.rollaxis(r, 2) 
Example 23
Project: End-to-end-ASR-Pytorch   Author: Alexander-H-Liu   File: ctc.py    License: MIT License 5 votes vote down vote up
def cheap_compute(self, g, r_prev, candidates):
        '''Given prefix g, return the probability of all possible sequence y (where y = concat(g,c))
           This function considers only those tokens in candidates for c (memory efficient)'''
        prefix_length = len(g)
        odim = len(candidates)
        last_char = g[-1] if prefix_length > 0 else 0

        # init. r
        r = np.full((self.input_length, 2, len(candidates)),
                    self.logzero, dtype=np.float32)

        # start from len(g) because is impossible for CTC to generate |y|>|X|
        start = max(1, prefix_length)

        if prefix_length == 0:
            r[0, 0, :] = self.x[0, candidates]    # if g = <sos>

        psi = r[start-1, 0, :]
        # Phi = (prev_nonblank,prev_blank)
        sum_prev = np.logaddexp(r_prev[:, 0], r_prev[:, 1])
        phi = np.repeat(sum_prev[..., None],odim,axis=-1)
        # Handle edge case : last tok of prefix in candidates
        if  prefix_length>0 and last_char in candidates:
            phi[:,candidates.index(last_char)] = r_prev[:,1]

        for t in range(start, self.input_length):
            # prev_blank
            # prev_blank = np.full((odim), r_prev[t-1, 1], dtype=np.float32)
            # prev_nonblank
            # prev_nonblank = np.full((odim), r_prev[t-1, 0], dtype=np.float32)
            # phi = np.logaddexp(prev_nonblank, prev_blank)
            # P(h|current step is non-blank) =  P(prev. step = y)*P(c)
            r[t, 0, :] = np.logaddexp( r[t-1, 0, :], phi[t-1]) + self.x[t, candidates]
            # P(h|current step is blank) = [P(prev. step is blank) + P(prev. step is non-blank)]*P(now=blank)
            r[t, 1, :] = np.logaddexp( r[t-1, 1, :], r[t-1, 0, :]) + self.x[t, self.blank]
            psi = np.logaddexp(psi, phi[t-1,]+self.x[t, candidates])

        # P(end of sentence) = P(g)
        if self.eos in candidates:
            psi[candidates.index(self.eos)] = sum_prev[-1]
        return psi, np.rollaxis(r, 2) 
Example 24
Project: Recipes   Author: Lasagne   File: inception_v3.py    License: MIT License 5 votes vote down vote up
def preprocess(im):
    # Expected input: RGB uint8 image
    # Input to network should be bc01, 299x299 pixels, scaled to [-1, 1].
    import skimage.transform
    import numpy as np

    im = skimage.transform.resize(im, (299, 299), preserve_range=True)
    im = (im - 128) / 128.
    im = np.rollaxis(im, 2)[np.newaxis].astype('float32')

    return im 
Example 25
Project: pytorch-segmentation-toolbox   Author: speedinghzl   File: loss.py    License: MIT License 5 votes vote down vote up
def find_threshold(self, np_predict, np_target):
        # downsample 1/8
        factor = self.factor
        predict = nd.zoom(np_predict, (1.0, 1.0, 1.0/factor, 1.0/factor), order=1)
        target = nd.zoom(np_target, (1.0, 1.0/factor, 1.0/factor), order=0)

        n, c, h, w = predict.shape
        min_kept = self.min_kept // (factor*factor) #int(self.min_kept_ratio * n * h * w)

        input_label = target.ravel().astype(np.int32)
        input_prob = np.rollaxis(predict, 1).reshape((c, -1))

        valid_flag = input_label != self.ignore_label
        valid_inds = np.where(valid_flag)[0]
        label = input_label[valid_flag]
        num_valid = valid_flag.sum()
        if min_kept >= num_valid:
            threshold = 1.0
        elif num_valid > 0:
            prob = input_prob[:,valid_flag]
            pred = prob[label, np.arange(len(label), dtype=np.int32)]
            threshold = self.thresh
            if min_kept > 0:
                k_th = min(len(pred), min_kept)-1
                new_array = np.partition(pred, k_th)
                new_threshold = new_array[k_th]
                if new_threshold > self.thresh:
                    threshold = new_threshold
        return threshold 
Example 26
Project: pytorch-segmentation-toolbox   Author: speedinghzl   File: loss.py    License: MIT License 5 votes vote down vote up
def generate_new_target(self, predict, target):
        np_predict = predict.data.cpu().numpy()
        np_target = target.data.cpu().numpy()
        n, c, h, w = np_predict.shape

        threshold = self.find_threshold(np_predict, np_target)

        input_label = np_target.ravel().astype(np.int32)
        input_prob = np.rollaxis(np_predict, 1).reshape((c, -1))

        valid_flag = input_label != self.ignore_label
        valid_inds = np.where(valid_flag)[0]
        label = input_label[valid_flag]
        num_valid = valid_flag.sum()

        if num_valid > 0:
            prob = input_prob[:,valid_flag]
            pred = prob[label, np.arange(len(label), dtype=np.int32)]
            kept_flag = pred <= threshold
            valid_inds = valid_inds[kept_flag]
            print('Labels: {} {}'.format(len(valid_inds), threshold))

        label = input_label[valid_inds].copy()
        input_label.fill(self.ignore_label)
        input_label[valid_inds] = label
        new_target = torch.from_numpy(input_label.reshape(target.size())).long().cuda(target.get_device())

        return new_target 
Example 27
Project: me-ica   Author: ME-ICA   File: funcs.py    License: GNU Lesser General Public License v2.1 5 votes vote down vote up
def concat_images(images, check_affines=True):
    ''' Concatenate images in list to single image, along last dimension

    Parameters
    ----------
    images : sequence
       sequence of ``SpatialImage`` or of filenames\s
    check_affines : {True, False}, optional
       If True, then check that all the affines for `images` are nearly
       the same, raising a ``ValueError`` otherwise.  Default is True

    Returns
    -------
    concat_img : ``SpatialImage``
       New image resulting from concatenating `images` across last
       dimension
    '''
    n_imgs = len(images)
    img0 = images[0]
    is_filename = False
    if not hasattr(img0, 'get_data'):
        img0 = load(img0)
        is_filename = True
    i0shape = img0.shape
    affine = img0.get_affine()
    header = img0.get_header()
    out_shape = (n_imgs, ) + i0shape
    out_data = np.empty(out_shape)
    for i, img in enumerate(images):
        if is_filename:
            img = load(img)
        if check_affines:
            if not np.all(img.get_affine() == affine):
                raise ValueError('Affines do not match')
        out_data[i] = img.get_data()
    out_data = np.rollaxis(out_data, 0, len(i0shape)+1)
    klass = img0.__class__
    return klass(out_data, affine, header) 
Example 28
Project: me-ica   Author: ME-ICA   File: test_affines.py    License: GNU Lesser General Public License v2.1 5 votes vote down vote up
def test_apply_affine():
    rng = np.random.RandomState(20110903)
    aff = np.diag([2, 3, 4, 1])
    pts = rng.uniform(size=(4,3))
    assert_array_equal(apply_affine(aff, pts),
                       pts * [[2, 3, 4]])
    aff[:3,3] = [10, 11, 12]
    assert_array_equal(apply_affine(aff, pts),
                       pts * [[2, 3, 4]] + [[10, 11, 12]])
    aff[:3,:] = rng.normal(size=(3,4))
    exp_res = np.concatenate((pts.T, np.ones((1,4))), axis=0)
    exp_res = np.dot(aff, exp_res)[:3,:].T
    assert_array_equal(apply_affine(aff, pts), exp_res)
    # Check we get the same result as the previous implementation
    assert_almost_equal(validated_apply_affine(aff, pts), apply_affine(aff, pts))
    # Check that lists work for inputs
    assert_array_equal(apply_affine(aff.tolist(), pts.tolist()), exp_res)
    # Check that it's the same as a banal implementation in the simple case
    aff = np.array([[0,2,0,10],[3,0,0,11],[0,0,4,12],[0,0,0,1]])
    pts = np.array([[1,2,3],[2,3,4],[4,5,6],[6,7,8]])
    exp_res = (np.dot(aff[:3,:3], pts.T) + aff[:3,3:4]).T
    assert_array_equal(apply_affine(aff, pts), exp_res)
    # That points can be reshaped and you'll get the same shape output
    pts = pts.reshape((2,2,3))
    exp_res = exp_res.reshape((2,2,3))
    assert_array_equal(apply_affine(aff, pts), exp_res)
    # That ND also works
    for N in range(2,6):
        aff = np.eye(N)
        nd = N-1
        aff[:nd,:nd] = rng.normal(size=(nd,nd))
        pts = rng.normal(size=(2,3,nd))
        res = apply_affine(aff, pts)
        # crude apply
        new_pts = np.ones((N,6))
        new_pts[:-1,:] = np.rollaxis(pts, -1).reshape((nd,6))
        exp_pts = np.dot(aff, new_pts)
        exp_pts = np.rollaxis(exp_pts[:-1,:], 0, 2)
        exp_res = exp_pts.reshape((2,3,nd))
        assert_array_almost_equal(res, exp_res) 
Example 29
Project: tangent   Author: google   File: tangents.py    License: Apache License 2.0 5 votes vote down vote up
def trollaxis(z, a, axis, start=0):
  d[z] = numpy.rollaxis(d[a], axis, start) 
Example 30
Project: pyGSTi   Author: pyGSTio   File: spamvec.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, vec, basis, truncate=False):
        """
        Initialize a CPTPSPAMVec object.

        Parameters
        ----------
        vec : array_like or SPAMVec
            a 1D numpy array representing the SPAM operation.  The
            shape of this array sets the dimension of the SPAM op.

        basis : {"std", "gm", "pp", "qt"} or Basis
            The basis `vec` is in.  Needed because this parameterization
            requires we construct the density matrix corresponding to
            the Lioville vector `vec`.

        trunctate : bool, optional
            Whether or not a non-positive, trace=1 `vec` should
            be truncated to force a successful construction.
        """
        vector = SPAMVec.convert_to_vector(vec)
        basis = _Basis.cast(basis, len(vector))

        self.basis = basis
        self.basis_mxs = basis.elements  # shape (len(vec), dmDim, dmDim)
        self.basis_mxs = _np.rollaxis(self.basis_mxs, 0, 3)  # shape (dmDim, dmDim, len(vec))
        assert(self.basis_mxs.shape[-1] == len(vector))

        # set self.params and self.dmDim
        self._set_params_from_vector(vector, truncate)

        #scratch space
        self.Lmx = _np.zeros((self.dmDim, self.dmDim), 'complex')

        DenseSPAMVec.__init__(self, vector, "densitymx", "prep")