Python torch.flip() Examples

The following are 30 code examples of torch.flip(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: data_helper.py    From openseg.pytorch with MIT License 6 votes vote down vote up
def _reverse_data_dict(self, data_dict):
        result = {}
        for k, x in data_dict.items():

            if not isinstance(x, torch.Tensor):
                result[k] = x
                continue

            new_x = torch.flip(x, [len(x.shape) - 1])

            # since direction_label_map, direction_multilabel_map will not appear in inputs, we omit the flipping
            if k == 'offsetmap_w':
                new_x = -new_x
            elif k == 'angle_map':
                new_x = x.clone()
                mask = (x > 0) & (x < 180)
                new_x[mask] = 180 - x[mask]
                mask = (x < 0) & (x > -180)
                new_x[mask] = - (180 + x[mask])

            result[k] = new_x

        return result 
Example #2
Source File: ctc.py    From neural_sp with Apache License 2.0 6 votes vote down vote up
def _flip_label_probability(log_probs, xlens):
    """Flips a label probability matrix.
    This function rotates a label probability matrix and flips it.
    ``log_probs[i, b, l]`` stores log probability of label ``l`` at ``i``-th
    input in ``b``-th batch.
    The rotated matrix ``r`` is defined as
    ``r[i, b, l] = log_probs[i + xlens[b], b, l]``

    Args:
        cum_log_prob (FloatTensor): `[T, B, vocab]`
        xlens (LongTensor): `[B]`
    Returns:
        FloatTensor: `[T, B, vocab]`

    """
    xmax, bs, vocab = log_probs.size()
    rotate = (torch.arange(xmax, dtype=torch.int64)[:, None] + xlens) % xmax
    return torch.flip(log_probs[rotate[:, :, None],
                                torch.arange(bs, dtype=torch.int64)[None, :, None],
                                torch.arange(vocab, dtype=torch.int64)[None, None, :]], dims=[0]) 
Example #3
Source File: ctc.py    From neural_sp with Apache License 2.0 6 votes vote down vote up
def _flip_path_probability(cum_log_prob, xlens, path_lens):
    """Flips a path probability matrix.
    This function returns a path probability matrix and flips it.
    ``cum_log_prob[i, b, t]`` stores log probability at ``i``-th input and
    at time ``t`` in a output sequence in ``b``-th batch.
    The rotated matrix ``r`` is defined as
    ``r[i, j, k] = cum_log_prob[i + xlens[j], j, k + path_lens[j]]``

    Args:
        cum_log_prob (FloatTensor): `[T, B, 2*L+1]`
        xlens (LongTensor): `[B]`
        path_lens (LongTensor): `[B]`
    Returns:
        FloatTensor: `[T, B, 2*L+1]`

    """
    xmax, bs, max_path_len = cum_log_prob.size()
    rotate_input = ((torch.arange(xmax, dtype=torch.int64)[:, None] + xlens) % xmax)
    rotate_label = ((torch.arange(max_path_len, dtype=torch.int64) + path_lens[:, None]) % max_path_len)
    return torch.flip(cum_log_prob[rotate_input[:, :, None],
                                   torch.arange(bs, dtype=torch.int64)[None, :, None],
                                   rotate_label], dims=[0, 2]) 
Example #4
Source File: listmle.py    From pt-ranking.github.io with MIT License 6 votes vote down vote up
def forward(ctx, input):
		'''
		In the forward pass we receive a context object and a Tensor containing the input;
		we must return a Tensor containing the output, and we can use the context object to cache objects for use in the backward pass.
		Specifically, ctx is a context object that can be used to stash information for backward computation.
		You can cache arbitrary objects for use in the backward pass using the ctx.save_for_backward method.
		:param ctx:
		:param input: i.e., batch_preds of [batch, ranking_size], each row represents the relevance predictions for documents within a ltr_adhoc
		:return: [batch, ranking_size], each row represents the log_cumsum_exp value
		'''

		m, _ = torch.max(input, dim=1, keepdim=True)    #a transformation aiming for higher stability when computing softmax() with exp()
		y = input - m
		y = torch.exp(y)
		y_cumsum_t2h = torch.flip(torch.cumsum(torch.flip(y, dims=[1]), dim=1), dims=[1])    #row-wise cumulative sum, from tail to head
		fd_output = torch.log(y_cumsum_t2h) + m # corresponding to the '-m' operation

		ctx.save_for_backward(input, fd_output)

		return fd_output 
Example #5
Source File: ctc.py    From neural_sp with Apache License 2.0 6 votes vote down vote up
def _flip_path(path, path_lens):
    """Flips label sequence.
    This function rotates a label sequence and flips it.
    ``path[b, t]`` stores a label at time ``t`` in ``b``-th batch.
    The rotated matrix ``r`` is defined as
    ``r[b, t] = path[b, t + path_lens[b]]``
    .. ::
       a b c d .     . a b c d    d c b a .
       e f . . .  -> . . . e f -> f e . . .
       g h i j k     g h i j k    k j i h g

    Args:
        path (FloatTensor): `[B, 2*L+1]`
        path_lens (LongTensor): `[B]`
    Returns:
        FloatTensor: `[B, 2*L+1]`

    """
    bs = path.size(0)
    max_path_len = path.size(1)
    rotate = (torch.arange(max_path_len) + path_lens[:, None]) % max_path_len
    return torch.flip(path[torch.arange(bs, dtype=torch.int64)[:, None], rotate], dims=[1]) 
Example #6
Source File: burgerFiniteDifference.py    From ar-pde-cnn with MIT License 6 votes vote down vote up
def conditionalUpwind(self, u):
        """
        Upwind scheme:
        https://en.wikipedia.org/wiki/Upwind_scheme
        Args:
            u (torch.Tensor): [B, C, H]
        Returns:
            grad_u: [B, C, H]
        """
        u_shape = u.shape
        u = u.view(-1, 1, *u_shape[-1:])

        u1 = F.conv1d(F.pad(u, self.padding, mode='circular'), 
            self.weight, stride=1, padding=0, bias=None) / (self.dx)

        u2 = F.conv1d(F.pad(u, self.padding, mode='circular'), 
            -torch.flip(self.weight, dims=[-1]), stride=1, padding=0, bias=None) / (self.dx)

        u = torch.where(u > 0, u1, u2)

        return u2.view(u_shape) 
Example #7
Source File: image.py    From packnet-sfm with MIT License 6 votes vote down vote up
def flip_model(model, image, flip):
    """
    Flip input image and flip output inverse depth map

    Parameters
    ----------
    model : nn.Module
        Module to be used
    image : torch.Tensor [B,3,H,W]
        Input image
    flip : bool
        True if the flip is happening

    Returns
    -------
    inv_depths : list of torch.Tensor [B,1,H,W]
        List of predicted inverse depth maps
    """
    if flip:
        return [flip_lr(inv_depth) for inv_depth in model(flip_lr(image))]
    else:
        return model(image)

######################################################################################################################## 
Example #8
Source File: image.py    From packnet-sfm with MIT License 6 votes vote down vote up
def flip_lr(image):
    """
    Flip image horizontally

    Parameters
    ----------
    image : torch.Tensor [B,3,H,W]
        Image to be flipped

    Returns
    -------
    image_flipped : torch.Tensor [B,3,H,W]
        Flipped image
    """
    assert image.dim() == 4, 'You need to provide a [B,C,H,W] image to flip'
    return torch.flip(image, [3]) 
Example #9
Source File: loss.py    From real-world-sr with MIT License 6 votes vote down vote up
def forward(self, x, y):
        if self.rotations:
            k_rot = random.choice([-1, 0, 1])
            x = torch.rot90(x, k_rot, [2, 3])
            y = torch.rot90(y, k_rot, [2, 3])
        if self.flips:
            if random.choice([True, False]):
                x = torch.flip(x, (2,))
                y = torch.flip(y, (2,))
            if random.choice([True, False]):
                x = torch.flip(x, (3,))
                y = torch.flip(y, (3,))
        return self.loss(x, y) 
Example #10
Source File: model.py    From conv-emotion with MIT License 5 votes vote down vote up
def _reverse_seq(self, X, mask):
        """
        X -> seq_len, batch, dim
        mask -> batch, seq_len
        """
        X_ = X.transpose(0,1)
        mask_sum = torch.sum(mask, 1).int()

        xfs = []
        for x, c in zip(X_, mask_sum):
            xf = torch.flip(x[:c], [0])
            xfs.append(xf)

        return pad_sequence(xfs) 
Example #11
Source File: manipulations.py    From heat with MIT License 5 votes vote down vote up
def fliplr(a):
    """
        Flip array in the left/right direction. If a.ndim > 2, flip along dimension 1.

        Parameters
        ----------
        a: ht.DNDarray
            Input array to be flipped, must be at least 2-D

        Returns
        -------
        res: ht.DNDarray
            The flipped array.

        Examples
        --------
        >>> a = ht.array([[0,1],[2,3]])
        >>> ht.fliplr(a)
        tensor([[1, 0],
                [3, 2]])

        >>> b = ht.array([[0,1,2],[3,4,5]], split=0)
        >>> ht.fliplr(b)
        (1/2) tensor([[2, 1, 0]])
        (2/2) tensor([[5, 4, 3]])
    """
    return flip(a, 1) 
Example #12
Source File: segmenter.py    From gandissect with MIT License 5 votes vote down vote up
def raw_seg_prediction(self, tensor_images, downsample=1):
        '''
        Generates a segmentation by applying multiresolution voting on
        the segmentation model, using (rounded to 32 pixels) a set of
        resolutions in the example benchmark code.
        '''
        y, x = tensor_images.shape[2:]
        b = len(tensor_images)
        tensor_images = (tensor_images + 1) / 2 * 255
        tensor_images = torch.flip(tensor_images, (1,)) # BGR!!!?
        tensor_images -= torch.tensor([102.9801, 115.9465, 122.7717]).to(
                   dtype=tensor_images.dtype, device=tensor_images.device
                   )[None,:,None,None]
        seg_shape = (y // downsample, x // downsample)
        # We want these to be multiples of 32 for the model.
        sizes = [(s, s) for s in self.segsizes]
        pred = {category: torch.zeros(
            len(tensor_images), len(self.segmodel.labeldata[category]),
            seg_shape[0], seg_shape[1]).cuda()
            for category in ['object', 'material']}
        part_pred = {partobj_index: torch.zeros(
            len(tensor_images), len(partindex),
            seg_shape[0], seg_shape[1]).cuda()
            for partobj_index, partindex in enumerate(self.part_index)}
        for size in sizes:
            if size == tensor_images.shape[2:]:
                resized = tensor_images
            else:
                resized = torch.nn.AdaptiveAvgPool2d(size)(tensor_images)
            r_pred = self.segmodel(
                dict(img=resized), seg_size=seg_shape)
            for k in pred:
                pred[k] += r_pred[k]
            for k in part_pred:
                part_pred[k] += r_pred['part'][k]
        return pred, part_pred 
Example #13
Source File: segmenter.py    From gandissect with MIT License 5 votes vote down vote up
def raw_seg_prediction(self, tensor_images, downsample=1):
        '''
        Generates a segmentation by applying multiresolution voting on
        the segmentation model, using (rounded to 32 pixels) a set of
        resolutions in the example benchmark code.
        '''
        y, x = tensor_images.shape[2:]
        b = len(tensor_images)
        # Flip the RGB order if specified.
        if self.bgr:
           tensor_images = torch.flip(tensor_images, (1,))
        # Transform from our [-1..1] range to torch standard [0..1] range
        # and then apply normalization.
        tensor_images = ((tensor_images + 1) / 2
                ).sub_(self.imagemean[None,:,None,None].to(tensor_images.device)
                ).div_(self.imagestd[None,:,None,None].to(tensor_images.device))
        # Output shape can be downsampled.
        seg_shape = (y // downsample, x // downsample)
        # We want these to be multiples of 32 for the model.
        sizes = [(s, s) for s in self.segsizes]
        pred = torch.zeros(
            len(tensor_images), (self.num_underlying_classes),
            seg_shape[0], seg_shape[1]).cuda()
        for size in sizes:
            if size == tensor_images.shape[2:]:
                resized = tensor_images
            else:
                resized = torch.nn.AdaptiveAvgPool2d(size)(tensor_images)
            raw_pred = self.segmodel(
                dict(img_data=resized), segSize=seg_shape)
            softmax_pred = torch.empty_like(raw_pred)
            for catindex in self.category_indexes.values():
                softmax_pred[:, catindex] = torch.nn.functional.softmax(
                        raw_pred[:, catindex], dim=1)
            pred += softmax_pred
        return pred 
Example #14
Source File: dnn_models.py    From SincNet with MIT License 5 votes vote down vote up
def flip(x, dim):
    xsize = x.size()
    dim = x.dim() + dim if dim < 0 else dim
    x = x.contiguous()
    x = x.view(-1, *xsize[dim:])
    x = x.view(x.size(0), x.size(1), -1)[:, getattr(torch.arange(x.size(1)-1, 
                      -1, -1), ('cpu','cuda')[x.is_cuda])().long(), :]
    return x.view(xsize) 
Example #15
Source File: dnn_models.py    From SincNet with MIT License 5 votes vote down vote up
def sinc(band,t_right):
    y_right= torch.sin(2*math.pi*band*t_right)/(2*math.pi*band*t_right)
    y_left= flip(y_right,0)

    y=torch.cat([y_left,Variable(torch.ones(1)).cuda(),y_right])

    return y 
Example #16
Source File: dnn_models.py    From SincNet with MIT License 5 votes vote down vote up
def forward(self, waveforms):
        """
        Parameters
        ----------
        waveforms : `torch.Tensor` (batch_size, 1, n_samples)
            Batch of waveforms.
        Returns
        -------
        features : `torch.Tensor` (batch_size, out_channels, n_samples_out)
            Batch of sinc filters activations.
        """

        self.n_ = self.n_.to(waveforms.device)

        self.window_ = self.window_.to(waveforms.device)

        low = self.min_low_hz  + torch.abs(self.low_hz_)
        
        high = torch.clamp(low + self.min_band_hz + torch.abs(self.band_hz_),self.min_low_hz,self.sample_rate/2)
        band=(high-low)[:,0]
        
        f_times_t_low = torch.matmul(low, self.n_)
        f_times_t_high = torch.matmul(high, self.n_)

        band_pass_left=((torch.sin(f_times_t_high)-torch.sin(f_times_t_low))/(self.n_/2))*self.window_ # Equivalent of Eq.4 of the reference paper (SPEAKER RECOGNITION FROM RAW WAVEFORM WITH SINCNET). I just have expanded the sinc and simplified the terms. This way I avoid several useless computations. 
        band_pass_center = 2*band.view(-1,1)
        band_pass_right= torch.flip(band_pass_left,dims=[1])
        
        
        band_pass=torch.cat([band_pass_left,band_pass_center,band_pass_right],dim=1)

        
        band_pass = band_pass / (2*band[:,None])
        

        self.filters = (band_pass).view(
            self.out_channels, 1, self.kernel_size)

        return F.conv1d(waveforms, self.filters, stride=self.stride,
                        padding=self.padding, dilation=self.dilation,
                         bias=None, groups=1) 
Example #17
Source File: burgerFiniteDifference.py    From ar-pde-cnn with MIT License 5 votes vote down vote up
def __call__(self, u):
        """
        Args:
            u (torch.Tensor): (B, C, H)
        Returns:
            grad_u: (B, C, H)
        """
        u_shape = u.shape
        u = u.view(-1, 1, *u_shape[-1:])

        flux = u**2/2.0
        
        edge_flux = self.calcEdgeFlux(flux)
        edge_flux_r = self.calcEdgeFlux(torch.flip(flux, dims=[-1]))
        edge_flux_r = torch.flip(edge_flux_r, dims=[-1])

        flux_grad = (edge_flux[:,:,1:] - edge_flux[:,:,:-1])/self.dx
        flux_grad_r = (edge_flux_r[:,:,1:] - edge_flux_r[:,:,:-1])/self.dx
        
        # with torch.no_grad():
        #     grad = F.conv1d(F.pad(u, (1,1), mode='circular'), 
        #         self.weight, stride=1, padding=0, bias=None) / (self.dx)

        flux_grad = torch.where(u < 0, flux_grad, flux_grad_r)

        return flux_grad.view(u_shape) 
Example #18
Source File: augmentations.py    From FATE with Apache License 2.0 5 votes vote down vote up
def horisontal_flip(images, targets):
    images = torch.flip(images, [-1])
    targets[:, 2] = 1 - targets[:, 2]
    return images, targets 
Example #19
Source File: test_both_side_detection.py    From Complex-YOLOv3 with GNU General Public License v3.0 5 votes vote down vote up
def detect_and_draw(model, bev_maps, Tensor, is_front=True):

    # If back side bev, flip around vertical axis
    if not is_front:
        bev_maps = torch.flip(bev_maps, [2, 3])
    imgs = Variable(bev_maps.type(Tensor))

    # Get Detections
    img_detections = []
    with torch.no_grad():
        detections = model(imgs)
        detections = utils.non_max_suppression_rotated_bbox(detections, opt.conf_thres, opt.nms_thres)

    img_detections.extend(detections)

    # Only supports single batch
    display_bev = np.zeros((cnf.BEV_WIDTH, cnf.BEV_WIDTH, 3))
    
    bev_map = bev_maps[0].numpy()
    display_bev[:, :, 2] = bev_map[0, :, :]  # r_map
    display_bev[:, :, 1] = bev_map[1, :, :]  # g_map
    display_bev[:, :, 0] = bev_map[2, :, :]  # b_map

    display_bev *= 255
    display_bev = display_bev.astype(np.uint8)

    for detections in img_detections:
        if detections is None:
            continue
        # Rescale boxes to original image
        detections = utils.rescale_boxes(detections, opt.img_size, display_bev.shape[:2])
        for x, y, w, l, im, re, conf, cls_conf, cls_pred in detections:
            yaw = np.arctan2(im, re)
            # Draw rotated box
            bev_utils.drawRotatedBox(display_bev, x, y, w, l, yaw, cnf.colors[int(cls_pred)])

    return display_bev, img_detections 
Example #20
Source File: kitti_yolo_dataset.py    From Complex-YOLOv3 with GNU General Public License v3.0 5 votes vote down vote up
def horisontal_flip(self, images, targets):
        images = torch.flip(images, [-1])
        targets[:, 2] = 1 - targets[:, 2] # horizontal flip
        targets[:, 6] = - targets[:, 6] # yaw angle flip

        return images, targets 
Example #21
Source File: test.py    From pytorch-saltnet with MIT License 5 votes vote down vote up
def predict(model, batch, flipped_batch, use_gpu):
    image_ids, inputs = batch['image_id'], batch['input']
    if use_gpu:
        inputs = inputs.cuda()
    outputs, _, _ = model(inputs)
    probs = torch.sigmoid(outputs)

    if flipped_batch is not None:
        flipped_image_ids, flipped_inputs = flipped_batch['image_id'], flipped_batch['input']
        # assert image_ids == flipped_image_ids
        if use_gpu:
            flipped_inputs = flipped_inputs.cuda()
        flipped_outputs, _, _ = model(flipped_inputs)
        flipped_probs = torch.sigmoid(flipped_outputs)

        probs += torch.flip(flipped_probs, (3,))  # flip back and add
        probs *= 0.5

    probs = probs.squeeze(1).cpu().numpy()
    if args.resize:
        probs = np.swapaxes(probs, 0, 2)
        probs = cv2.resize(probs, (orig_img_size, orig_img_size), interpolation=cv2.INTER_LINEAR)
        probs = np.swapaxes(probs, 0, 2)
    else:
        probs = probs[:, y0:y1, x0:x1]
    return probs 
Example #22
Source File: util.py    From allennlp with Apache License 2.0 5 votes vote down vote up
def masked_flip(padded_sequence: torch.Tensor, sequence_lengths: List[int]) -> torch.Tensor:
    """
    Flips a padded tensor along the time dimension without affecting masked entries.

    # Parameters

    padded_sequence : `torch.Tensor`
        The tensor to flip along the time dimension.
        Assumed to be of dimensions (batch size, num timesteps, ...)
    sequence_lengths : `torch.Tensor`
        A list containing the lengths of each unpadded sequence in the batch.

    # Returns

    `torch.Tensor`
        A `torch.Tensor` of the same shape as padded_sequence.
    """
    assert padded_sequence.size(0) == len(
        sequence_lengths
    ), f"sequence_lengths length ${len(sequence_lengths)} does not match batch size ${padded_sequence.size(0)}"
    num_timesteps = padded_sequence.size(1)
    flipped_padded_sequence = torch.flip(padded_sequence, [1])
    sequences = [
        flipped_padded_sequence[i, num_timesteps - length :]
        for i, length in enumerate(sequence_lengths)
    ]
    return torch.nn.utils.rnn.pad_sequence(sequences, batch_first=True) 
Example #23
Source File: vpg.py    From autonomous-learning-library with MIT License 5 votes vote down vote up
def _compute_discounted_returns(self, rewards):
        returns = rewards.clone()
        t = len(returns) - 1
        discounted_return = 0
        for reward in torch.flip(rewards, dims=(0,)):
            discounted_return = reward + self.discount_factor * discounted_return
            returns[t] = discounted_return
            t -= 1
        return returns 
Example #24
Source File: random_flip.py    From torchio with MIT License 5 votes vote down vote up
def apply_transform(self, sample: Subject) -> dict:
        axes_to_flip_hot = self.get_params(self.axes, self.flip_probability)
        random_parameters_dict = {'axes': axes_to_flip_hot}
        items = sample.get_images_dict(intensity_only=False).items()
        for image_name, image_dict in items:
            data = image_dict[DATA]
            is_2d = data.shape[-3] == 1
            dims = []
            for dim, flip_this in enumerate(axes_to_flip_hot):
                if not flip_this:
                    continue
                actual_dim = dim + 1  # images are 4D
                # If the user is using 2D images and they use (0, 1) for axes,
                # they probably mean (1, 2). This should make this transform
                # more user-friendly.
                if is_2d:
                    actual_dim += 1
                if actual_dim > 3:
                    message = (
                        f'Image "{image_name}" with shape {data.shape} seems to'
                        ' be 2D, so all axes must be in (0, 1),'
                        f' but they are {self.axes}'
                    )
                    raise RuntimeError(message)
                dims.append(actual_dim)
            data = torch.flip(data, dims=dims)
            image_dict[DATA] = data
        sample.add_transform(self, random_parameters_dict)
        return sample 
Example #25
Source File: utils.py    From STE-NVAN with MIT License 5 votes vote down vote up
def __getitem__(self,ID):
        sub_info = self.info[self.info[:,1] == ID] 

        if self.cam_type == 'normal':
            tracks_pool = list(np.random.choice(sub_info[:,0],self.track_per_class))
        elif self.cam_type == 'two_cam':
            unique_cam = np.random.permutation(np.unique(sub_info[:,2]))[:2]
            tracks_pool = list(np.random.choice(sub_info[sub_info[:,2]==unique_cam[0],0],1))+\
                list(np.random.choice(sub_info[sub_info[:,2]==unique_cam[1],0],1))
        elif self.cam_type == 'cross_cam':
            unique_cam = np.random.permutation(np.unique(sub_info[:,2]))
            while len(unique_cam) < self.track_per_class:
                unique_cam = np.append(unique_cam,unique_cam)
            unique_cam = unique_cam[:self.track_per_class]
            tracks_pool = []
            for i in range(self.track_per_class):
                tracks_pool += list(np.random.choice(sub_info[sub_info[:,2]==unique_cam[i],0],1))

        one_id_tracks = []
        for track_pool in tracks_pool:
            idx = np.random.choice(track_pool.shape[1],track_pool.shape[0])
            number = track_pool[np.arange(len(track_pool)),idx]
            imgs = [self.transform(Image.open(path)) for path in self.imgs[number]]
            imgs = torch.stack(imgs,dim=0)

            random_p = random.random()
            if random_p  < self.flip_p:
                imgs = torch.flip(imgs,dims=[3])
            one_id_tracks.append(imgs)
        return torch.stack(one_id_tracks,dim=0), ID*torch.ones(self.track_per_class,dtype=torch.int64) 
Example #26
Source File: sampling_utils.py    From pt-ranking.github.io with MIT License 5 votes vote down vote up
def batch_global_unique_count(batch_std_labels, max_rele_lavel, descending=True):
    '''  '''
    batch_asc_std_labels, _ = torch.sort(batch_std_labels, dim=1)
    global_uni_elements = torch.arange(max_rele_lavel+1).type(tensor) # default ascending order

    asc_uni_cnts = torch.cat([(batch_asc_std_labels == e).sum(dim=1, keepdim=True) for e in global_uni_elements], dim=1) # row-wise count per element

    if descending:
        des_uni_cnts = torch.flip(asc_uni_cnts, dims=[1])
        return des_uni_cnts
    else:
        return asc_uni_cnts 
Example #27
Source File: graph.py    From PPGNet with MIT License 5 votes vote down vote up
def forward(self, line_feat):
        num_st, num_ed, c, s = line_feat.size()
        output_st2ed = line_feat.view(num_st * num_ed, c, s)
        output_ed2st = torch.flip(output_st2ed, (2, ))
        output_st2ed = self.dblock(output_st2ed)
        output_ed2st = self.dblock(output_ed2st)
        adjacency_matrix1 = self.connectivity_inference(output_st2ed).view(num_st, num_ed)
        adjacency_matrix2 = self.connectivity_inference(output_ed2st).view(num_st, num_ed)

        return torch.min(adjacency_matrix1, adjacency_matrix2) 
Example #28
Source File: variational_rnn.py    From fastNLP with Apache License 2.0 5 votes vote down vote up
def flip(x, dims):
        indices = [slice(None)] * x.dim()
        for dim in dims:
            indices[dim] = torch.arange(
                x.size(dim) - 1, -1, -1, dtype=torch.long, device=x.device)
        return x[tuple(indices)] 
Example #29
Source File: spatial_gru.py    From MatchZoo-py with Apache License 2.0 5 votes vote down vote up
def forward(self, inputs):
        """
        Perform SpatialGRU on word interation matrix.

        :param inputs: input tensors.
        """

        batch_size, channels, left_length, right_length = inputs.shape

        # inputs = [L, R, B, C]
        inputs = inputs.permute([2, 3, 0, 1])
        if self._direction == 'rb':
            # inputs = [R, L, B, C]
            inputs = torch.flip(inputs, [0, 1])

        # states = [L+1, R+1, B, U]
        states = [
            [torch.zeros([batch_size, self._units]).type_as(inputs)
             for j in range(right_length + 1)] for i in range(left_length + 1)
        ]

        # Calculate h_ij
        # h_ij = [B, U]
        for i in range(left_length):
            for j in range(right_length):
                states[i + 1][j + 1] = self.calculate_recurrent_unit(inputs, states, i, j)
        return states[left_length][right_length] 
Example #30
Source File: Inference.py    From pneumothorax-segmentation with MIT License 5 votes vote down vote up
def inference_model(model, loader, device, use_flip):
    mask_dict = {}
    for image_ids, images in tqdm(loader):
        masks = inference_image(model, images, device)
        if use_flip:
            flipped_imgs = torch.flip(images, dims=(3,))
            flipped_masks = inference_image(model, flipped_imgs, device)
            flipped_masks = np.flip(flipped_masks, axis=2)
            masks = (masks + flipped_masks) / 2
        for name, mask in zip(image_ids, masks):
            mask_dict[name] = mask.astype(np.float32)
    return mask_dict