Python PIL.Image.FLIP_LEFT_RIGHT Examples

The following are 30 code examples of PIL.Image.FLIP_LEFT_RIGHT(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module PIL.Image , or try the search function .
Example #1
Source File: predict.py    From Custom-vision-service-iot-edge-raspberry-pi with MIT License 7 votes vote down vote up
def update_orientation(image):
    exif_orientation_tag = 0x0112
    if hasattr(image, '_getexif'):
        exif = image._getexif()
        if exif != None and exif_orientation_tag in exif:
            orientation = exif.get(exif_orientation_tag, 1)
            log_msg('Image has EXIF Orientation: ' + str(orientation))
            # orientation is 1 based, shift to zero based and flip/transpose based on 0-based values
            orientation -= 1
            if orientation >= 4:
                image = image.transpose(Image.TRANSPOSE)
            if orientation == 2 or orientation == 3 or orientation == 6 or orientation == 7:
                image = image.transpose(Image.FLIP_TOP_BOTTOM)
            if orientation == 1 or orientation == 2 or orientation == 5 or orientation == 6:
                image = image.transpose(Image.FLIP_LEFT_RIGHT)
    return image 
Example #2
Source File: predict-amd64.py    From Custom-vision-service-iot-edge-raspberry-pi with MIT License 6 votes vote down vote up
def update_orientation(image):
    exif_orientation_tag = 0x0112
    if hasattr(image, '_getexif'):
        exif = image._getexif()
        if exif != None and exif_orientation_tag in exif:
            orientation = exif.get(exif_orientation_tag, 1)
            log_msg('Image has EXIF Orientation: ' + str(orientation))
            # orientation is 1 based, shift to zero based and flip/transpose based on 0-based values
            orientation -= 1
            if orientation >= 4:
                image = image.transpose(Image.TRANSPOSE)
            if orientation == 2 or orientation == 3 or orientation == 6 or orientation == 7:
                image = image.transpose(Image.FLIP_TOP_BOTTOM)
            if orientation == 1 or orientation == 2 or orientation == 5 or orientation == 6:
                image = image.transpose(Image.FLIP_LEFT_RIGHT)
    return image 
Example #3
Source File: data_utils.py    From EvolutionaryGAN with MIT License 6 votes vote down vote up
def FlipImgs(xmb, ymb):
    bs, ci, hi, wi = xmb.shape	
    xmb_out = np.zeros((bs,ci,hi,wi),dtype='uint8')
    ymb_out = np.zeros((bs,ci,hi,wi),dtype='uint8')
    for i in xrange(bs):
        x_img = xmb[i,:,:,:]
        y_img = ymb[i,:,:,:]
        if np.random.rand(1) > 0.5:
            x_img = convert_img_back(x_img)
            y_img = convert_img_back(y_img)

            x_img = Image.fromarray(x_img)	
            y_img = Image.fromarray(y_img)	
		
            x_img = x_img.transpose(Image.FLIP_LEFT_RIGHT)
            y_img = y_img.transpose(Image.FLIP_LEFT_RIGHT)
		
            x_img = convert_img(np.array(x_img))
            y_img = convert_img(np.array(y_img))
	xmb_out[i,:,:,:] = x_img 
	ymb_out[i,:,:,:] = y_img 
    xmb_out = xmb_out.reshape([bs,ci,hi,wi]) 
    ymb_out = ymb_out.reshape([bs,ci,hi,wi]) 
    return xmb_out, ymb_out 
Example #4
Source File: pil_aug_transforms.py    From openseg.pytorch with MIT License 6 votes vote down vote up
def __call__(self, img, labelmap=None, maskmap=None):
        assert isinstance(img, Image.Image)
        assert labelmap is None or isinstance(labelmap, Image.Image)
        assert maskmap is None or isinstance(maskmap, Image.Image)

        if random.random() > self.ratio:
            return img, labelmap, maskmap

        img = img.transpose(Image.FLIP_LEFT_RIGHT)
        if labelmap is not None:
            labelmap = labelmap.transpose(Image.FLIP_LEFT_RIGHT)

        if maskmap is not None:
            maskmap = maskmap.transpose(Image.FLIP_LEFT_RIGHT)

        return img, labelmap, maskmap 
Example #5
Source File: kinetics.py    From 2D-kinectics with MIT License 6 votes vote down vote up
def pil_random_crop(img, scale_size, output_size, params=None):
    img = pilresize(img, scale_size)
    th = output_size
    tw = output_size
    if params is None:
        w, h = img.size
        if w == tw and h == th:
            return img
        i = random.randint(0, h - th)
        j = random.randint(0, w - tw)
        flip = random.random()<0.5
    else:
        i,j,flip = params
    img = img.crop((j, i, j + tw, i + th))
    if flip:
        img = img.transpose(Image.FLIP_LEFT_RIGHT)

    return img, [i, j, flip] 
Example #6
Source File: image.py    From Deep-Feature-Flow-Segmentation with MIT License 6 votes vote down vote up
def flip_and_save_func(image_path):
    """
    flip the image by the path and save the flipped image with suffix 'flip'
    :param path: the path of specific image
    :return: the path of saved image
    """
    [image_name, image_ext] = os.path.splitext(os.path.basename(image_path))
    image_dir = os.path.dirname(image_path)
    saved_image_path = os.path.join(image_dir, image_name + '_flip' + image_ext)
    try:
        flipped_image = Image.open(saved_image_path)
    except:
        flipped_image = Image.open(image_path)
        flipped_image = flipped_image.transpose(Image.FLIP_LEFT_RIGHT)
        flipped_image.save(saved_image_path, 'png')
    return saved_image_path 
Example #7
Source File: imdb.py    From Deep-Feature-Flow-Segmentation with MIT License 6 votes vote down vote up
def flip_and_save(self, image_path):
        """
        flip the image by the path and save the flipped image with suffix 'flip'
        :param path: the path of specific image
        :return: the path of saved image
        """
        [image_name, image_ext] = os.path.splitext(os.path.basename(image_path))
        image_dir = os.path.dirname(image_path)
        saved_image_path = os.path.join(image_dir, image_name + '_flip' + image_ext)
        try:
            flipped_image = Image.open(saved_image_path)
        except:
            flipped_image = Image.open(image_path)
            flipped_image = flipped_image.transpose(Image.FLIP_LEFT_RIGHT)
            flipped_image.save(saved_image_path, 'png')
        return saved_image_path 
Example #8
Source File: icon.py    From Waveshare-E-Ink with MIT License 5 votes vote down vote up
def __init__(self, image, icon_file, xstart=14, ystart=14):
    icon = Image.open(icon_file).convert('RGBA')
    icon = icon.transpose(Image.FLIP_LEFT_RIGHT).transpose(Image.ROTATE_90)

    self._name = icon_file
    self._image = image
    self._width, self._height = icon.size
    self._image.paste(icon, (xstart, ystart, xstart+self._width, ystart+self._height), mask=icon) 
Example #9
Source File: seg_dataset.py    From imgclsmob with MIT License 5 votes vote down vote up
def _sync_transform(self, image, mask):
        # random mirror
        if random.random() < 0.5:
            image = image.transpose(Image.FLIP_LEFT_RIGHT)
            mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
        crop_size = self.crop_size
        # random scale (short edge)
        short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
        w, h = image.size
        if h > w:
            ow = short_size
            oh = int(1.0 * h * ow / w)
        else:
            oh = short_size
            ow = int(1.0 * w * oh / h)
        image = image.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # pad crop
        if short_size < crop_size:
            padh = crop_size - oh if oh < crop_size else 0
            padw = crop_size - ow if ow < crop_size else 0
            image = ImageOps.expand(image, border=(0, 0, padw, padh), fill=0)
            mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
        # random crop crop_size
        w, h = image.size
        x1 = random.randint(0, w - crop_size)
        y1 = random.randint(0, h - crop_size)
        image = image.crop((x1, y1, x1 + crop_size, y1 + crop_size))
        mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size))
        # gaussian blur as in PSP
        if random.random() < 0.5:
            image = image.filter(ImageFilter.GaussianBlur(
                radius=random.random()))
        # final transform
        image, mask = self._img_transform(image), self._mask_transform(mask)
        return image, mask 
Example #10
Source File: seg_dataset.py    From imgclsmob with MIT License 5 votes vote down vote up
def _train_sync_transform(self, image, mask, ctx=mx.cpu()):
        # Random mirror:
        if random.random() < 0.5:
            image = image.transpose(Image.FLIP_LEFT_RIGHT)
            mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
        # Random scale (short edge):
        short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
        w, h = image.size
        if w > h:
            oh = short_size
            ow = int(float(w * oh) / h)
        else:
            ow = short_size
            oh = int(float(h * ow) / w)
        image = image.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # Pad crop:
        crop_size = self.crop_size
        if short_size < crop_size:
            padh = crop_size - oh if oh < crop_size else 0
            padw = crop_size - ow if ow < crop_size else 0
            image = ImageOps.expand(image, border=(0, 0, padw, padh), fill=0)
            mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
        # Random crop crop_size:
        w, h = image.size
        x1 = random.randint(0, w - crop_size)
        y1 = random.randint(0, h - crop_size)
        image = image.crop((x1, y1, x1 + crop_size, y1 + crop_size))
        mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size))
        # Gaussian blur as in PSP:
        if random.random() < 0.5:
            image = image.filter(ImageFilter.GaussianBlur(radius=random.random()))
        # Final transform:
        image, mask = self._img_transform(image, ctx=ctx), self._mask_transform(mask, ctx=ctx)
        return image, mask 
Example #11
Source File: utils.py    From training_results_v0.5 with Apache License 2.0 5 votes vote down vote up
def __call__(self, image, bboxes):
        if random.random() < self.p:
            bboxes[:, 0], bboxes[:, 2] = 1.0 - bboxes[:, 2], 1.0 - bboxes[:, 0]
            return image.transpose(Image.FLIP_LEFT_RIGHT), bboxes
        return image, bboxes

# Do data augumentation 
Example #12
Source File: sampler.py    From Counting-ICCV-DSSINet with MIT License 5 votes vote down vote up
def i_flip(img, flip):
    if flip:
        return img.transpose(Image.FLIP_LEFT_RIGHT)
    return img 
Example #13
Source File: N_modules.py    From DualResidualNetworks with MIT License 5 votes vote down vote up
def DataAugmentation(im_input, label):
    if random.random() > 0.5:
        label    = label.transpose(   Image.FLIP_LEFT_RIGHT)
        im_input = im_input.transpose(Image.FLIP_LEFT_RIGHT)
#    if random.random() > 0.5:
#        label    = label.transpose(   Image.FLIP_TOP_BOTTOM)
#        im_input = im_input.transpose(Image.FLIP_TOP_BOTTOM)
#    if random.random() > 0.5:
#        angle    = random.choice([90, 180, 270])
#        label    = label.rotate(angle)
#        im_input = im_input.rotate(angle)
    return im_input, label 
Example #14
Source File: transforms.py    From binseg_pytoch with Apache License 2.0 5 votes vote down vote up
def __call__(self, img):
        """
        Args:
            img (PIL.Image): Image to be flipped.

        Returns:
            PIL.Image: Randomly flipped image.
        """
        if random.random() < 0.5:
            return img.transpose(Image.FLIP_LEFT_RIGHT)
        return img 
Example #15
Source File: base_dataset.py    From everybody_dance_now_pytorch with GNU Affero General Public License v3.0 5 votes vote down vote up
def __flip(img, flip):
    if flip:
        return img.transpose(Image.FLIP_LEFT_RIGHT)
    return img 
Example #16
Source File: transforms.py    From GST-video with MIT License 5 votes vote down vote up
def __call__(self, img):

		img_group,label = img

		if self.scale_worker is not None:
			img_group = self.scale_worker(img_group)

		image_w, image_h = img_group[0].size
		crop_w, crop_h = self.crop_size

		offsets = GroupMultiScaleCrop.fill_fix_offset(False, image_w, image_h, crop_w, crop_h)
		oversample_group = list()
		for o_w, o_h in offsets:
			normal_group = list()
			flip_group = list()
			for i, img in enumerate(img_group):
				crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h))
				normal_group.append(crop)
				flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT)

				if img.mode == 'L' and i % 2 == 0:
					flip_group.append(ImageOps.invert(flip_crop))
				else:
					flip_group.append(flip_crop)

			oversample_group.extend(normal_group)
			oversample_group.extend(flip_group)
		return oversample_group,label 
Example #17
Source File: joint_transforms.py    From cross-season-segmentation with MIT License 5 votes vote down vote up
def __call__(self, img, mask):
        if random.random() < 0.5:
            return img.transpose(Image.FLIP_LEFT_RIGHT), mask.transpose(
                Image.FLIP_LEFT_RIGHT)
        return img, mask 
Example #18
Source File: transforms.py    From pnn.pytorch.update with MIT License 5 votes vote down vote up
def __call__(self, input):
		if random.random() < 0.5:
			input['img'] = input['img'].transpose(Image.FLIP_LEFT_RIGHT)
			input['tgt'] = input['tgt'].transpose(Image.FLIP_LEFT_RIGHT)
			input['loc'][0] = input['loc'][0] - math.ceil(input['img'].size[0]/2)
		return input 
Example #19
Source File: seg_dataset.py    From imgclsmob with MIT License 5 votes vote down vote up
def _sync_transform(self, image, mask):
        # random mirror
        if random.random() < 0.5:
            image = image.transpose(Image.FLIP_LEFT_RIGHT)
            mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
        crop_size = self.crop_size
        # random scale (short edge)
        short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
        w, h = image.size
        if h > w:
            ow = short_size
            oh = int(1.0 * h * ow / w)
        else:
            oh = short_size
            ow = int(1.0 * w * oh / h)
        image = image.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # pad crop
        if short_size < crop_size:
            padh = crop_size - oh if oh < crop_size else 0
            padw = crop_size - ow if ow < crop_size else 0
            image = ImageOps.expand(image, border=(0, 0, padw, padh), fill=0)
            mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
        # random crop crop_size
        w, h = image.size
        x1 = random.randint(0, w - crop_size)
        y1 = random.randint(0, h - crop_size)
        image = image.crop((x1, y1, x1 + crop_size, y1 + crop_size))
        mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size))
        # gaussian blur as in PSP
        if random.random() < 0.5:
            image = image.filter(ImageFilter.GaussianBlur(
                radius=random.random()))
        # final transform
        image, mask = self._img_transform(image), self._mask_transform(mask)
        return image, mask 
Example #20
Source File: functional.py    From Global-Second-order-Pooling-Convolutional-Networks with MIT License 5 votes vote down vote up
def hflip(img):
    """Horizontally flip the given PIL Image.

    Args:
        img (PIL Image): Image to be flipped.

    Returns:
        PIL Image:  Horizontall flipped image.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    return img.transpose(Image.FLIP_LEFT_RIGHT) 
Example #21
Source File: cityscapescoarse.py    From PyTorch-Encoding with MIT License 5 votes vote down vote up
def _sync_transform(self, img, mask):
        # random mirror
        if random.random() < 0.5:
            img  = img.transpose(Image.FLIP_LEFT_RIGHT)
            mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
        base_size = 2048
        crop_size = 720
        # random scale (short edge from 480 to 720)
        long_size = random.randint(int(base_size*0.5), int(base_size*2.0))
        w, h = img.size
        if h > w:
            oh = long_size
            ow = int(1.0 * w * oh / h)
            short_size = ow
        else:
            ow = long_size
            oh = int(1.0 * h * ow / w)
            short_size = oh
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # random rotate -10~10, mask using NN rotate
        deg = random.uniform(-10,10)
        img = img.rotate(deg, resample=Image.BILINEAR)
        mask = mask.rotate(deg, resample=Image.NEAREST)
        # pad crop
        if short_size < crop_size:
            padh = crop_size - oh if oh < crop_size else 0
            padw = crop_size - ow if ow < crop_size else 0
            img  = ImageOps.expand(img,  border=(0,0,padw,padh), fill=0)
            mask = ImageOps.expand(mask, border=(0,0,padw,padh), fill=0)
        # random crop 480
        w, h = img.size
        x1 = random.randint(0, w - crop_size)
        y1 = random.randint(0, h - crop_size) 
        img = img.crop((x1, y1, x1+crop_size, y1+crop_size))
        mask = mask.crop((x1, y1, x1+crop_size, y1+crop_size))
        # gaussian blur as in PSP ?
        if random.random() < 0.5:
            img = img.filter(ImageFilter.GaussianBlur(
                radius=random.random()))
        return img, mask 
Example #22
Source File: base.py    From PyTorch-Encoding with MIT License 5 votes vote down vote up
def _sync_transform(self, img, mask):
        # random mirror
        if random.random() < 0.5:
            img = img.transpose(Image.FLIP_LEFT_RIGHT)
            mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
        crop_size = self.crop_size
        # random scale (short edge)
        w, h = img.size
        long_size = random.randint(int(self.base_size*0.5), int(self.base_size*2.0))
        if h > w:
            oh = long_size
            ow = int(1.0 * w * long_size / h + 0.5)
            short_size = ow
        else:
            ow = long_size
            oh = int(1.0 * h * long_size / w + 0.5)
            short_size = oh
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # pad crop
        if short_size < crop_size:
            padh = crop_size - oh if oh < crop_size else 0
            padw = crop_size - ow if ow < crop_size else 0
            img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
            mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
        # random crop crop_size
        w, h = img.size
        x1 = random.randint(0, w - crop_size)
        y1 = random.randint(0, h - crop_size)
        img = img.crop((x1, y1, x1+crop_size, y1+crop_size))
        mask = mask.crop((x1, y1, x1+crop_size, y1+crop_size))
        # final transform
        return img, self._mask_transform(mask) 
Example #23
Source File: joint_transforms.py    From pytorch-semantic-segmentation with MIT License 5 votes vote down vote up
def __call__(self, img, mask):
        if random.random() < 0.5:
            return img.transpose(Image.FLIP_LEFT_RIGHT), mask.transpose(Image.FLIP_LEFT_RIGHT)
        return img, mask 
Example #24
Source File: data_utils.py    From conditional-motion-propagation with MIT License 5 votes vote down vote up
def image_flow_aug(img1, img2, flow, flip_horizon=True):
    if flip_horizon:
        if random.random() < 0.5:
            img1 = img1.transpose(Image.FLIP_LEFT_RIGHT)
            img2 = img2.transpose(Image.FLIP_LEFT_RIGHT)
            flow = flow[:,::-1,:].copy()
            flow[:,:,0] = -flow[:,:,0]
    return img1, img2, flow 
Example #25
Source File: pascal_voc_loader.py    From sunets with MIT License 5 votes vote down vote up
def r_flip(self, img, lbl):
        if random.random() < 0.5:
            return img.transpose(Image.FLIP_LEFT_RIGHT), lbl.transpose(Image.FLIP_LEFT_RIGHT)
        return img, lbl 
Example #26
Source File: coco_loader.py    From sunets with MIT License 5 votes vote down vote up
def r_flip(self, img, lbl):
        if random.random() < 0.5:
            return img.transpose(Image.FLIP_LEFT_RIGHT), lbl.transpose(Image.FLIP_LEFT_RIGHT)
        return img, lbl 
Example #27
Source File: main.py    From LEDNet with MIT License 5 votes vote down vote up
def __call__(self, input, target):
        # do something to both images
        input =  Resize(self.height, Image.BILINEAR)(input)
        target = Resize(self.height, Image.NEAREST)(target)

        if(self.augment):
            # Random hflip
            hflip = random.random()
            if (hflip < 0.5):
                input = input.transpose(Image.FLIP_LEFT_RIGHT)
                target = target.transpose(Image.FLIP_LEFT_RIGHT)
            
            #Random translation 0-2 pixels (fill rest with padding
            transX = random.randint(-2, 2) 
            transY = random.randint(-2, 2)

            input = ImageOps.expand(input, border=(transX,transY,0,0), fill=0)
            target = ImageOps.expand(target, border=(transX,transY,0,0), fill=255) #pad label filling with 255
            input = input.crop((0, 0, input.size[0]-transX, input.size[1]-transY))
            target = target.crop((0, 0, target.size[0]-transX, target.size[1]-transY))   

        input = ToTensor()(input)
        if (self.enc):
            target = Resize(int(self.height/8), Image.NEAREST)(target)
        target = ToLabel()(target)
        target = Relabel(255, 19)(target)

        return input, target 
Example #28
Source File: segbase.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def _sync_transform(self, img, mask):
        # random mirror
        if random.random() < 0.5:
            img = img.transpose(Image.FLIP_LEFT_RIGHT)
            mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
        crop_size = self.crop_size
        # random scale (short edge)
        long_size = random.randint(int(self.base_size*0.5), int(self.base_size*2.0))
        w, h = img.size
        if h > w:
            oh = long_size
            ow = int(1.0 * w * long_size / h + 0.5)
            short_size = ow
        else:
            ow = long_size
            oh = int(1.0 * h * long_size / w + 0.5)
            short_size = oh
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # pad crop
        if short_size < crop_size:
            padh = crop_size - oh if oh < crop_size else 0
            padw = crop_size - ow if ow < crop_size else 0
            img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
            mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
        # random crop crop_size
        w, h = img.size
        x1 = random.randint(0, w - crop_size)
        y1 = random.randint(0, h - crop_size)
        img = img.crop((x1, y1, x1+crop_size, y1+crop_size))
        mask = mask.crop((x1, y1, x1+crop_size, y1+crop_size))
        # gaussian blur as in PSP
        if random.random() < 0.5:
            img = img.filter(ImageFilter.GaussianBlur(
                radius=random.random()))
        # final transform
        img, mask = self._img_transform(img), self._mask_transform(mask)
        return img, mask 
Example #29
Source File: spatial_transforms.py    From TKP with Apache License 2.0 5 votes vote down vote up
def __call__(self, img):
        """
        Args:
            img (PIL.Image): Image to be flipped.
        Returns:
            PIL.Image: Randomly flipped image.
        """
        if self.p < 0.5:
            return img.transpose(Image.FLIP_LEFT_RIGHT)
        return img 
Example #30
Source File: datasets.py    From deep-head-pose with Apache License 2.0 5 votes vote down vote up
def __getitem__(self, index):
        img = Image.open(os.path.join(self.data_dir, self.X_train[index] + self.img_ext))
        img = img.convert(self.image_mode)
        txt_path = os.path.join(self.data_dir, self.y_train[index] + self.annot_ext)

        # We get the pose in radians
        annot = open(txt_path, 'r')
        line = annot.readline().split(' ')
        pose = [float(line[1]), float(line[2]), float(line[3])]
        # And convert to degrees.
        yaw = pose[0] * 180 / np.pi
        pitch = pose[1] * 180 / np.pi
        roll = pose[2] * 180 / np.pi
        # Fix the roll in AFLW
        roll *= -1

        # Augment
        # Flip?
        rnd = np.random.random_sample()
        if rnd < 0.5:
            yaw = -yaw
            roll = -roll
            img = img.transpose(Image.FLIP_LEFT_RIGHT)

        # Bin values
        bins = np.array(range(-99, 102, 3))
        labels = torch.LongTensor(np.digitize([yaw, pitch, roll], bins) - 1)
        cont_labels = torch.FloatTensor([yaw, pitch, roll])

        if self.transform is not None:
            img = self.transform(img)

        return img, labels, cont_labels, self.X_train[index]