Python numpy.fliplr() Examples

The following are 30 code examples of numpy.fliplr(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: movie.py    From kvae with MIT License 7 votes vote down vote up
def save_movie_to_frame(images, filename, idx=0, cmap='Blues'):
    # Collect to single image
    image = movie_to_frame(images[idx])

    # Flip it
    # image = np.fliplr(image)
    # image = np.flipud(image)

    f = plt.figure(figsize=[12, 12])
    plt.imshow(image, cmap=plt.cm.get_cmap(cmap), interpolation='none', vmin=0, vmax=1)

    plt.axis('image')
    plt.xticks([])
    plt.yticks([])
    plt.savefig(filename, format='png', bbox_inches='tight', dpi=80)
    plt.close(f) 
Example #2
Source File: landmark_augment.py    From face_landmark_dnn with MIT License 6 votes vote down vote up
def __flip(self, image, landmarks, run_prob=0.5):
        """
        Do image flop. Only for horizontal
        Args:
            image: a numpy type
            landmarks: face landmarks with format [(x1, y1), (x2, y2), ...]
            run_prob: probability to do this operate. 0.0-1.0
        Return:
            an image and landmarks will be returned
        Raises:
            Unsupport count of landmarks
        """
        if np.random.rand() < run_prob:
            return image, landmarks
        image = np.fliplr(image)
        landmarks[:, 0] = image.shape[1] - landmarks[:, 0]
        landmarks = LandmarkHelper.flip(landmarks, landmarks.shape[0])
        return image, landmarks 
Example #3
Source File: data_loader.py    From Keras-GAN with MIT License 6 votes vote down vote up
def load_data(self, domain, batch_size=1, is_testing=False):
        data_type = "train%s" % domain if not is_testing else "test%s" % domain
        path = glob('./datasets/%s/%s/*' % (self.dataset_name, data_type))

        batch_images = np.random.choice(path, size=batch_size)

        imgs = []
        for img_path in batch_images:
            img = self.imread(img_path)
            if not is_testing:
                img = scipy.misc.imresize(img, self.img_res)

                if np.random.random() > 0.5:
                    img = np.fliplr(img)
            else:
                img = scipy.misc.imresize(img, self.img_res)
            imgs.append(img)

        imgs = np.array(imgs)/127.5 - 1.

        return imgs 
Example #4
Source File: operator.py    From scarlet with MIT License 6 votes vote down vote up
def prox_soft_symmetry(X, step, strength=1):
    """Soft version of symmetry
    Using a `strength` that varies from 0 to 1,
    with 0 meaning no symmetry enforced at all and
    1  being completely symmetric, the user can customize
    the level of symmetry required for a component
    """
    pads = [[0, 0], [0, 0]]
    slices = [slice(None), slice(None)]
    if X.shape[0] % 2 == 0:
        pads[0][1] = 1
        slices[0] = slice(0, X.shape[0])
    if X.shape[1] % 2 == 0:
        pads[1][1] = 1
        slices[1] = slice(0, X.shape[1])

    X = fft.fast_zero_pad(X, pads)
    Xs = np.fliplr(np.flipud(X))
    X = 0.5 * strength * (X + Xs) + (1 - strength) * X
    return X[tuple(slices)] 
Example #5
Source File: geometry.py    From connecting_the_dots with MIT License 6 votes vote down vote up
def decompose_projection_matrix(P, return_t=True):
  if P.shape[0] != 3 or P.shape[1] != 4:
    raise Exception('P has to be 3x4')
  M = P[:, :3]
  C = -np.linalg.inv(M) @ P[:, 3:]

  R,K = np.linalg.qr(np.flipud(M).T)
  K = np.flipud(K.T)
  K = np.fliplr(K)
  R = np.flipud(R.T)

  T = np.diag(np.sign(np.diag(K)))
  K = K @ T
  R = T @ R

  if np.linalg.det(R) < 0:
    R *= -1

  K /= K[2,2]
  if return_t:
    return K, R, cameracenter_to_translation(R, C)
  else:
    return K, R, C 
Example #6
Source File: dicomwrappers.py    From me-ica with GNU Lesser General Public License v2.1 6 votes vote down vote up
def rotation_matrix(self):
        ''' Return rotation matrix between array indices and mm

        Note that we swap the two columns of the 'ImageOrientPatient'
        when we create the rotation matrix.  This is takes into account
        the slightly odd ij transpose construction of the DICOM
        orientation fields - see doc/theory/dicom_orientaiton.rst.
        '''
        iop = self.image_orient_patient
        s_norm = self.slice_normal
        if None in (iop, s_norm):
            return None
        R = np.eye(3)
        # np.fliplr(iop) gives matrix F in
        # doc/theory/dicom_orientation.rst The fliplr accounts for the
        # fact that the first column in ``iop`` refers to changes in
        # column index, and the second to changes in row index.
        R[:,:2] = np.fliplr(iop)
        R[:,2] = s_norm
        # check this is in fact a rotation matrix
        assert np.allclose(np.eye(3),
                           np.dot(R, R.T),
                           atol=1e-6)
        return R 
Example #7
Source File: test_orientations.py    From me-ica with GNU Lesser General Public License v2.1 6 votes vote down vote up
def test_flip_axis():
    a = np.arange(24).reshape((2,3,4))
    assert_array_equal(
        flip_axis(a),
        np.flipud(a))
    assert_array_equal(
        flip_axis(a, axis=0),
        np.flipud(a))
    assert_array_equal(
        flip_axis(a, axis=1),
        np.fliplr(a))
    # check accepts array-like
    assert_array_equal(
        flip_axis(a.tolist(), axis=0),
        np.flipud(a))
    # third dimension
    b = a.transpose()
    b = np.flipud(b)
    b = b.transpose()
    assert_array_equal(flip_axis(a, axis=2), b) 
Example #8
Source File: densepose_methods.py    From Parsing-R-CNN with MIT License 6 votes vote down vote up
def get_symmetric_densepose(self, I, U, V, x, y, Mask):
        # This is a function to get the mirror symmetric UV labels.
        Labels_sym = np.zeros(I.shape)
        U_sym = np.zeros(U.shape)
        V_sym = np.zeros(V.shape)
        for i in (range(24)):
            if i + 1 in I:
                Labels_sym[I == (i + 1)] = self.Index_Symmetry_List[i]
                jj = np.where(I == (i + 1))
                U_loc = (U[jj] * 255).astype(np.int64)
                V_loc = (V[jj] * 255).astype(np.int64)
                V_sym[jj] = self.UV_symmetry_transformations['V_transforms'][0, i][V_loc, U_loc]
                U_sym[jj] = self.UV_symmetry_transformations['U_transforms'][0, i][V_loc, U_loc]
        Mask_flip = np.fliplr(Mask)
        Mask_flipped = np.zeros(Mask.shape)

        for i in (range(14)):
            Mask_flipped[Mask_flip == (i + 1)] = self.SemanticMaskSymmetries[i + 1]
        [y_max, x_max] = Mask_flip.shape
        y_sym = y
        x_sym = x_max - x

        return Labels_sym, U_sym, V_sym, x_sym, y_sym, Mask_flipped 
Example #9
Source File: datasets.py    From StackGAN with MIT License 6 votes vote down vote up
def transform(self, images):
        if self._aug_flag:
            transformed_images =\
                np.zeros([images.shape[0], self._imsize, self._imsize, 3])
            ori_size = images.shape[1]
            for i in range(images.shape[0]):
                h1 = np.floor((ori_size - self._imsize) * np.random.random())
                w1 = np.floor((ori_size - self._imsize) * np.random.random())
                cropped_image =\
                    images[i][w1: w1 + self._imsize, h1: h1 + self._imsize, :]
                if random.random() > 0.5:
                    transformed_images[i] = np.fliplr(cropped_image)
                else:
                    transformed_images[i] = cropped_image
            return transformed_images
        else:
            return images 
Example #10
Source File: network.py    From Training-Neural-Networks-for-Event-Based-End-to-End-Robot-Control with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self):
		nest.ResetKernel()
		nest.SetKernelStatus({"local_num_threads" : 1, "resolution" : p.time_resolution})
		self.spike_generators_l = nest.Create("poisson_generator", p.resolution[0]//2*p.resolution[1], params=p.poisson_params)
		self.spike_generators_r = nest.Create("poisson_generator", p.resolution[0]//2*p.resolution[1], params=p.poisson_params)
		self.neuron_l = nest.Create("iaf_psc_alpha", params=p.iaf_params)
		self.neuron_r = nest.Create("iaf_psc_alpha", params=p.iaf_params)
		self.spike_detector_l = nest.Create("spike_detector", params={"withtime": True})
		self.spike_detector_r = nest.Create("spike_detector", params={"withtime": True})
		self.multimeter_l = nest.Create("multimeter", params={"withtime":True, "record_from":["V_m"]})
		self.multimeter_r = nest.Create("multimeter", params={"withtime":True, "record_from":["V_m"]})
		weights_l = np.fliplr(p.weights_l.T).reshape(p.weights_l.size)
		weights_r = np.fliplr(p.weights_r.T).reshape(p.weights_r.size)
		for i in range(weights_l.size):
			syn_dict = {"model": "static_synapse", 
						"weight": weights_l[i]}
			nest.Connect([self.spike_generators_l[i]], self.neuron_l, syn_spec=syn_dict)
		for i in range(weights_r.size):
			syn_dict = {"model": "static_synapse", 
						"weight": weights_r[i]}
			nest.Connect([self.spike_generators_r[i]], self.neuron_r, syn_spec=syn_dict)
		nest.Connect(self.neuron_l, self.spike_detector_l)
		nest.Connect(self.neuron_r, self.spike_detector_r)
		nest.Connect(self.multimeter_l, self.neuron_l)
		nest.Connect(self.multimeter_r, self.neuron_r) 
Example #11
Source File: utils.py    From Residual_Image_Learning_GAN with MIT License 6 votes vote down vote up
def center_crop(x, crop_h, crop_w=None, resize_w=64):

    if crop_w is None:
        crop_w = crop_h
    h, w = x.shape[:2]
    j = int(round((h - crop_h)/2.))
    i = int(round((w - crop_w)/2.))

    rate = np.random.uniform(0, 1, size=1)

    if rate < 0.5:
        x = np.fliplr(x)

    #first crop tp 178x178 and resize to 128x128
    return scipy.misc.imresize(x[20:218-20, 0: 178], [resize_w, resize_w])

    #Another cropped method

    # return scipy.misc.imresize(x[j:j+crop_h, i:i+crop_w],
    #                            [resize_w, resize_w]) 
Example #12
Source File: image_processing.py    From text-to-image with MIT License 6 votes vote down vote up
def load_image_array(image_file, image_size):
	img = skimage.io.imread(image_file)
	# GRAYSCALE
	if len(img.shape) == 2:
		img_new = np.ndarray( (img.shape[0], img.shape[1], 3), dtype = 'uint8')
		img_new[:,:,0] = img
		img_new[:,:,1] = img
		img_new[:,:,2] = img
		img = img_new

	img_resized = skimage.transform.resize(img, (image_size, image_size))

	# FLIP HORIZONTAL WIRH A PROBABILITY 0.5
	if random.random() > 0.5:
		img_resized = np.fliplr(img_resized)
	
	
	return img_resized.astype('float32') 
Example #13
Source File: image_processing.py    From text-to-image with MIT License 6 votes vote down vote up
def load_image_array(image_file, image_size):
	img = skimage.io.imread(image_file)
	# GRAYSCALE
	if len(img.shape) == 2:
		img_new = np.ndarray( (img.shape[0], img.shape[1], 3), dtype = 'uint8')
		img_new[:,:,0] = img
		img_new[:,:,1] = img
		img_new[:,:,2] = img
		img = img_new

	img_resized = skimage.transform.resize(img, (image_size, image_size))

	# FLIP HORIZONTAL WIRH A PROBABILITY 0.5
	if random.random() > 0.5:
		img_resized = np.fliplr(img_resized)
	
	
	return img_resized.astype('float32') 
Example #14
Source File: inky212x104.py    From inky-phat with MIT License 6 votes vote down vote up
def update(self):
        if self.inky_colour is None:
            raise RuntimeError("You must specify which colour of Inky pHAT you're using: inkyphat.set_colour('red', 'black' or 'yellow')")

        self._display_init()

        x1, x2 = self.update_x1, self.update_x2
        y1, y2 = self.update_y1, self.update_y2

        region = self.buffer[y1:y2, x1:x2]

        if self.v_flip:
            region = numpy.fliplr(region)

        if self.h_flip:
            region = numpy.flipud(region)

        buf_red = numpy.packbits(numpy.where(region == RED, 1, 0)).tolist()
        if self.inky_version == 1:
            buf_black = numpy.packbits(numpy.where(region == 0, 0, 1)).tolist()
        else:
            buf_black = numpy.packbits(numpy.where(region == BLACK, 0, 1)).tolist()

        self._display_update(buf_black, buf_red)
        self._display_fini() 
Example #15
Source File: create_svhn_dataset.py    From stn-ocr with GNU General Public License v3.0 5 votes vote down vote up
def fade_image(self, image, fade_percentage=0.4):
        def interpolate_width(data):
            num_interpolation_pixels = int(data.shape[1] * fade_percentage)
            interpolation_start = data.shape[1] - num_interpolation_pixels
            for i in range(num_interpolation_pixels):
                data[:, interpolation_start + i, 3] *= (num_interpolation_pixels - i) / num_interpolation_pixels
            return data

        def interpolate_height(data):
            num_interpolation_pixels = int(data.shape[0] * fade_percentage)
            interpolation_start = data.shape[0] - num_interpolation_pixels
            for i in range(num_interpolation_pixels):
                data[interpolation_start + i, :, 3] *= (num_interpolation_pixels - i) / num_interpolation_pixels
            return data

        image_data = np.asarray(image).copy().astype(np.float64)

        # create horizontal alpha mask
        image_data = interpolate_width(image_data)

        image_data = np.fliplr(image_data)
        image_data = interpolate_width(image_data)
        image_data = np.fliplr(image_data)

        # create vertical alpha mask
        image_data = interpolate_height(image_data)

        image_data = np.flipud(image_data)
        image_data = interpolate_height(image_data)
        image_data = np.flipud(image_data)

        image = Image.fromarray(image_data.astype(np.uint8), mode='RGBA')
        return image 
Example #16
Source File: transforms.py    From ACAN with MIT License 5 votes vote down vote up
def __call__(self, img):
        """
        Args:
            img (numpy.ndarray (C x H x W)): Image to be flipped.

        Returns:
            img (numpy.ndarray (C x H x W)): flipped image.
        """
        if not(_is_numpy_image(img)):
            raise TypeError('img should be ndarray. Got {}'.format(type(img)))

        if self.do_flip:
            return np.fliplr(img)
        else:
            return img 
Example #17
Source File: test_base_execute.py    From mars with Apache License 2.0 5 votes vote down vote up
def testFlipExecution(self):
        a = arange(8, chunk_size=2).reshape((2, 2, 2))

        t = flip(a, 0)

        res = self.executor.execute_tensor(t, concat=True)[0]
        expected = np.flip(np.arange(8).reshape(2, 2, 2), 0)
        np.testing.assert_equal(res, expected)

        t = flip(a, 1)

        res = self.executor.execute_tensor(t, concat=True)[0]
        expected = np.flip(np.arange(8).reshape(2, 2, 2), 1)
        np.testing.assert_equal(res, expected)

        t = flipud(a)

        res = self.executor.execute_tensor(t, concat=True)[0]
        expected = np.flipud(np.arange(8).reshape(2, 2, 2))
        np.testing.assert_equal(res, expected)

        t = fliplr(a)

        res = self.executor.execute_tensor(t, concat=True)[0]
        expected = np.fliplr(np.arange(8).reshape(2, 2, 2))
        np.testing.assert_equal(res, expected) 
Example #18
Source File: reader.py    From variance-networks with Apache License 2.0 5 votes vote down vote up
def batch_iterator_train_crop_flip(data, y, batchsize, shuffle=False, PIXELS=32, PAD_CROP=4):
    data = data.transpose((0, 3, 1, 2))
    n_samples = data.shape[0]
    indx = np.random.permutation(xrange(n_samples))
    for i in range((n_samples + batchsize - 1) // batchsize):
        sl = slice(i * batchsize, (i + 1) * batchsize)
        X_batch = data[indx[sl]]
        y_batch = y[indx[sl]]

        # pad and crop settings
        trans_1 = random.randint(0, (PAD_CROP*2))
        trans_2 = random.randint(0, (PAD_CROP*2))
        crop_x1 = trans_1
        crop_x2 = (PIXELS + trans_1)
        crop_y1 = trans_2
        crop_y2 = (PIXELS + trans_2)

        # flip left-right choice
        flip_lr = random.randint(0,1)

        # set empty copy to hold augmented images so that we don't overwrite
        X_batch_aug = np.copy(X_batch)

        # for each image in the batch do the augmentation
        for j in range(X_batch.shape[0]):
            # for each image channel
            for k in range(X_batch.shape[1]):
                # pad and crop images
                img_pad = np.pad(
                    X_batch_aug[j, k], pad_width=((PAD_CROP, PAD_CROP), (PAD_CROP, PAD_CROP)), mode='constant')
                X_batch_aug[j, k] = img_pad[crop_x1:crop_x2, crop_y1:crop_y2]

                # flip left-right if chosen
                if flip_lr == 1:
                    X_batch_aug[j, k] = np.fliplr(X_batch_aug[j,k])

        X_batch_aug = X_batch_aug.transpose((0, 2, 3, 1))
        yield X_batch_aug, y_batch 
Example #19
Source File: augmentation.py    From open-solution-salt-identification with MIT License 5 votes vote down vote up
def per_channel_fliplr(x):
    x_ = x.copy()
    for i, channel in enumerate(x):
        x_[i, :, :] = np.fliplr(channel)
    return x_ 
Example #20
Source File: augmentation.py    From open-solution-salt-identification with MIT License 5 votes vote down vote up
def test_time_augmentation_transform(image, tta_parameters):
    if tta_parameters['ud_flip']:
        image = np.flipud(image)
    if tta_parameters['lr_flip']:
        image = np.fliplr(image)
    if tta_parameters['color_shift']:
        tta_intensity = reseed(tta_intensity_seq, deterministic=False)
        image = tta_intensity.augment_image(image)
    image = rotate(image, tta_parameters['rotation'])
    return image 
Example #21
Source File: utils.py    From DeepLab_v3 with MIT License 5 votes vote down vote up
def flip_image_and_label(image, label):

    image_flipped = np.fliplr(image)
    label_flipped = np.fliplr(label)

    return image_flipped, label_flipped 
Example #22
Source File: identification.py    From kaggle-humpback with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def __getitem__(self, index):
        example = self.df_examples.iloc[index % len(self.df_examples)]

        filepath = example['filepath']
        image = misc.imread(example['filepath'], mode='RGB')
        assert image.ndim == 3
        assert image.shape[-1] == 3

        if 'category' in example:
            category = example['category']
        else:
            category = None

        if self.split == 'train' and index > len(self.df_examples):
            category = category + self.num_category
            image = np.fliplr(image)
            box = [1.0 - example['x'], example['y'], example['w'], example['h']]
            landmark = [1.0 - example['xr'], example['yr'], 1.0 - example['xn'], example['yn'],
                        1.0 - example['xl'], example['yl'], 1.0 - example['xd'], example['yd']]
        else:
            box = [example['x'], example['y'], example['w'], example['h']]
            landmark = [example['xl'], example['yl'], example['xn'], example['yn'],
                        example['xr'], example['yr'], example['xd'], example['yd']]

        if self.transform is not None:
            image = self.transform(image, box, landmark, example['Image'])
            
        if category is not None:
            return {'image': image,
                    'label': category,
                    'id': example['Id'],
                    'key': example['Image']}
        else:
            return {'image': image,
                    'id': example['Id'],
                    'key': example['Image']} 
Example #23
Source File: operator.py    From scarlet with MIT License 5 votes vote down vote up
def prox_sdss_symmetry(X, step):
    """SDSS/HSC symmetry operator

    This function uses the *minimum* of the two
    symmetric pixels in the update.
    """
    Xs = np.fliplr(np.flipud(X))
    X[:] = np.min([X, Xs], axis=0)
    return X 
Example #24
Source File: test_transformers.py    From deepchem with MIT License 5 votes vote down vote up
def test_flipping(self):
    # Check flip
    dt = DataTransforms(self.d)
    flip_lr = dt.flip(direction="lr")
    flip_ud = dt.flip(direction="ud")
    check_lr = np.fliplr(self.d)
    check_ud = np.flipud(self.d)
    assert np.allclose(flip_ud, check_ud)
    assert np.allclose(flip_lr, check_lr) 
Example #25
Source File: transformers.py    From deepchem with MIT License 5 votes vote down vote up
def flip(self, direction="lr"):
    """ Flips the image
          Parameters:
              direction - "lr" denotes left-right fliplr
                          "ud" denotes up-down flip
    """
    if direction == "lr":
      return np.fliplr(self.Image)
    elif direction == "ud":
      return np.flipud(self.Image)
    else:
      raise ValueError(
          "Invalid flip command : Enter either lr (for left to right flip) or ud (for up to down flip)"
      ) 
Example #26
Source File: cifar10.py    From ResNeXt-Tensorflow with MIT License 5 votes vote down vote up
def _random_flip_leftright(batch):
    for i in range(len(batch)):
        if bool(random.getrandbits(1)):
            batch[i] = np.fliplr(batch[i])
    return batch 
Example #27
Source File: util.py    From rcan-tensorflow with MIT License 5 votes vote down vote up
def rotate(images):
    images = np.append(images, [np.fliplr(image) for image in images], axis=0)  # 180 degree
    images = np.append(images, [np.rot90(image) for image in images], axis=0)   # 90 degree
    return images 
Example #28
Source File: train.py    From alpha-zero with MIT License 5 votes vote down vote up
def augment_data(self, game_state, training_data, row, column):
        """Loop for each self-play game.

        Runs MCTS for each game state and plays a move based on the MCTS output.
        Stops when the game is over and prints out a winner.

        Args:
            game_state: An object containing the state, pis and value.
            training_data: A list to store self play states, pis and vs.
            row: An integer indicating the length of the board row.
            column: An integer indicating the length of the board column.
        """
        state = deepcopy(game_state[0])
        psa_vector = deepcopy(game_state[1])

        if CFG.game == 2 or CFG.game == 1:
            training_data.append([state, psa_vector, game_state[2]])
        else:
            psa_vector = np.reshape(psa_vector, (row, column))

            # Augment data by rotating and flipping the game state.
            for i in range(4):
                training_data.append([np.rot90(state, i),
                                      np.rot90(psa_vector, i).flatten(),
                                      game_state[2]])

                training_data.append([np.fliplr(np.rot90(state, i)),
                                      np.fliplr(
                                          np.rot90(psa_vector, i)).flatten(),
                                      game_state[2]]) 
Example #29
Source File: flip_image.py    From gradio-UI with Apache License 2.0 5 votes vote down vote up
def flip2(image):
    start = time()
    return np.fliplr(image), time() - start 
Example #30
Source File: cifar10.py    From TripleGAN-Tensorflow with MIT License 5 votes vote down vote up
def _random_flip_leftright(batch):
    for i in range(len(batch)):
        if bool(random.getrandbits(1)):
            batch[i] = np.fliplr(batch[i])
    return batch