Python torch.reshape() Examples

The following are 30 code examples of torch.reshape(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: loss.py    From PyMIC with Apache License 2.0 6 votes vote down vote up
def reshape_prediction_and_ground_truth(predict, soft_y):
    """
    reshape input variables of shape [B, C, D, H, W] to [voxel_n, C]
    """
    tensor_dim = len(predict.size())
    num_class  = list(predict.size())[1]
    if(tensor_dim == 5):
        soft_y  = soft_y.permute(0, 2, 3, 4, 1)
        predict = predict.permute(0, 2, 3, 4, 1)
    elif(tensor_dim == 4):
        soft_y  = soft_y.permute(0, 2, 3, 1)
        predict = predict.permute(0, 2, 3, 1)
    else:
        raise ValueError("{0:}D tensor not supported".format(tensor_dim))
    
    predict = torch.reshape(predict, (-1, num_class)) 
    soft_y  = torch.reshape(soft_y,  (-1, num_class))
      
    return predict, soft_y 
Example #2
Source File: recurrent.py    From Tagger with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def forward(self, x, state):
        c, h = state

        gates = self.gates(torch.cat([x, h], 1))

        if self.layer_norm is not None:
            combined = self.layer_norm(
                torch.reshape(gates, [-1, 4, self.output_size]))
        else:
            combined = torch.reshape(gates, [-1, 4, self.output_size])

        i, j, f, o = torch.unbind(combined, 1)
        i, f, o = torch.sigmoid(i), torch.sigmoid(f), torch.sigmoid(o)

        new_c = f * c + i * torch.tanh(j)

        if self.activation is None:
            # Do not use tanh activation
            new_h = o * new_c
        else:
            new_h = o * self.activation(new_c)

        return new_h, (new_c, new_h) 
Example #3
Source File: my_net2d.py    From PyMIC with Apache License 2.0 6 votes vote down vote up
def forward(self, x):
        x_shape = list(x.shape)
        if(len(x_shape) == 5):
          [N, C, D, H, W] = x_shape
          new_shape = [N*D, C, H, W]
          x = torch.transpose(x, 1, 2)
          x = torch.reshape(x, new_shape)
        x0 = self.in_conv(x)
        x1 = self.down1(x0)
        x2 = self.down2(x1)
        x3 = self.down3(x2)
        x4 = self.down4(x3)
        
        x = self.up1(x4, x3)
        x = self.up2(x, x2)
        x = self.up3(x, x1)
        x = self.up4(x, x0)
        output = self.out_conv(x)

        if(len(x_shape) == 5):
            new_shape = [N, D] + list(output.shape)[1:]
            output = torch.reshape(output, new_shape)
            output = torch.transpose(output, 1, 2)
        return output 
Example #4
Source File: unet2d5.py    From PyMIC with Apache License 2.0 6 votes vote down vote up
def forward(self, x):
        x_shape = list(x.shape)
        if(self.dim == 2 and len(x_shape) == 5):
            [N, C, D, H, W] = x_shape
            new_shape = [N*D, C, H, W]
            x = torch.transpose(x, 1, 2)
            x = torch.reshape(x, new_shape)
        output = self.conv(x)
        if(self.downsample):
            output_d = self.down_layer(output)
        else:
            output_d = None 
        if(self.dim == 2 and len(x_shape) == 5):
            new_shape = [N, D] + list(output.shape)[1:]
            output = torch.reshape(output, new_shape)
            output = torch.transpose(output, 1, 2)
            if(self.downsample):
                new_shape = [N, D] + list(output_d.shape)[1:]
                output_d = torch.reshape(output_d, new_shape)
                output_d = torch.transpose(output_d, 1, 2)

        return output, output_d 
Example #5
Source File: unet2d5.py    From PyMIC with Apache License 2.0 6 votes vote down vote up
def forward(self, x1, x2):
        x1_shape = list(x1.shape)
        x2_shape = list(x2.shape)
        if(self.dim == 2 and len(x1_shape) == 5):
            [N, C, D, H, W] = x1_shape
            new_shape = [N*D, C, H, W]
            x1 = torch.transpose(x1, 1, 2)
            x1 = torch.reshape(x1, new_shape)
            [N, C, D, H, W] = x2_shape
            new_shape = [N*D, C, H, W]
            x2 = torch.transpose(x2, 1, 2)
            x2 = torch.reshape(x2, new_shape)

        x1 = self.up(x1)
        output = torch.cat([x2, x1], dim=1)
        output = self.conv(output)
        if(self.dim == 2 and len(x1_shape) == 5):
            new_shape = [N, D] + list(output.shape)[1:]
            output = torch.reshape(output, new_shape)
            output = torch.transpose(output, 1, 2)
        return output 
Example #6
Source File: unet2d.py    From PyMIC with Apache License 2.0 6 votes vote down vote up
def forward(self, x):
        x_shape = list(x.shape)
        if(len(x_shape) == 5):
          [N, C, D, H, W] = x_shape
          new_shape = [N*D, C, H, W]
          x = torch.transpose(x, 1, 2)
          x = torch.reshape(x, new_shape)
        x0 = self.in_conv(x)
        x1 = self.down1(x0)
        x2 = self.down2(x1)
        x3 = self.down3(x2)
        x4 = self.down4(x3)
        
        x = self.up1(x4, x3)
        x = self.up2(x, x2)
        x = self.up3(x, x1)
        x = self.up4(x, x0)
        output = self.out_conv(x)

        if(len(x_shape) == 5):
            new_shape = [N, D] + list(output.shape)[1:]
            output = torch.reshape(output, new_shape)
            output = torch.transpose(output, 1, 2)
        return output 
Example #7
Source File: unet2d_scse.py    From PyMIC with Apache License 2.0 6 votes vote down vote up
def forward(self, x):
        x_shape = list(x.shape)
        if(len(x_shape) == 5):
          [N, C, D, H, W] = x_shape
          new_shape = [N*D, C, H, W]
          x = torch.transpose(x, 1, 2)
          x = torch.reshape(x, new_shape)
        x0 = self.in_conv(x)
        x1 = self.down1(x0)
        x2 = self.down2(x1)
        x3 = self.down3(x2)
        x4 = self.down4(x3)
        
        x = self.up1(x4, x3)
        x = self.up2(x, x2)
        x = self.up3(x, x1)
        x = self.up4(x, x0)
        output = self.out_conv(x)

        if(len(x_shape) == 5):
            new_shape = [N, D] + list(output.shape)[1:]
            output = torch.reshape(output, new_shape)
            output = torch.transpose(output, 1, 2)
        return output 
Example #8
Source File: losses.py    From centerpose with MIT License 6 votes vote down vote up
def create_window(window_size, channel=3, sigma=1.5, gauss='original', n=2):
    if gauss == 'original':
        _1D_window = gaussian(window_size, sigma).unsqueeze(1)
        _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
        window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
        return window
    elif gauss == 'butterworth':
        _1D_window = butterworth(window_size, sigma, n).unsqueeze(1)
        _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
        window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
        return window
    else:
        g = _fspecial_gauss(window_size, sigma)
        g = torch.reshape(g, (1, 1, window_size, window_size))
        # 2019.06.05.
        # https://discuss.pytorch.org/t/how-to-tile-a-tensor/13853
        g = tile(g, 0, 3)
        return g 
Example #9
Source File: utility.py    From hmd with MIT License 6 votes vote down vote up
def photometricLossgray(colorImg_gray, depthImg, albedoImg_gray, 
                        mask, lighting_est, device, K, thres):
    
    N,C,H,W = colorImg_gray.size()
    
    # color loss
    normals, _ = lighting.depthToNormalBatch(depthImg, device, K, thres)
    SHs     = lighting.normalToSHBatch(normals,device)
    
    SHs    = torch.reshape(SHs, (N, H*W, 9))
    lighting_est = torch.reshape(lighting_est, (N, 9, 1))
    
    #SHs to [B, H*W,9] lighting [B, 9, 1] --[N, H*W] --[B,H,W,1]             
    color_shading = torch.bmm(SHs, lighting_est) # N H*W 1   
    color_shading = torch.reshape(color_shading, (N, H, W))
    
    mask1 = torch.reshape(mask[:,0,:,:], (N,H,W)) # one layer mask
    color_pre  = mask1 * (color_shading * albedoImg_gray) # N*H*W
    colorImg_gray_mask = mask1 * colorImg_gray # mask
    
    colorloss = F.l1_loss(color_pre, colorImg_gray_mask) # NHW size directly
        
    return colorloss, color_pre

# come from hmr-src/util/image.py 
Example #10
Source File: losses.py    From centerpose with MIT License 6 votes vote down vote up
def _fspecial_gauss(window_size, sigma=1.5):
    # Function to mimic the 'fspecial' gaussian MATLAB function.
    coords = np.arange(0, window_size, dtype=np.float32)
    coords -= (window_size - 1) / 2.0

    g = coords ** 2
    g *= (-0.5 / (sigma ** 2))
    g = np.reshape(g, (1, -1)) + np.reshape(g, (-1, 1))
    g = torch.from_numpy(np.reshape(g, (1, -1)))
    g = torch.softmax(g, dim=1)
    g = g / g.sum()
    return g


# 2019.05.26. butterworth filter.
# ref: http://www.cnblogs.com/laumians-notes/p/8592968.html 
Example #11
Source File: test_tensorboard.py    From PyMIC with Apache License 2.0 5 votes vote down vote up
def soft_dice_loss(predict, soft_y, num_class, softmax = True):
    soft_y  = soft_y.permute(0, 2, 3, 4, 1)
    soft_y  = torch.reshape(soft_y, (-1, num_class))
    predict = predict.permute(0, 2, 3, 4, 1)
    predict = torch.reshape(predict, (-1, num_class))
    if(softmax):
        predict = nn.Softmax(dim = -1)(predict)
    y_vol = torch.sum(soft_y, dim = 0)
    p_vol = torch.sum(predict, dim = 0)
    intersect = torch.sum(soft_y * predict, dim = 0)
    dice_score = (2.0 * intersect + 1e-5)/ (y_vol + p_vol + 1e-5)
    dice_score = torch.mean(dice_score)
    return 1.0 - dice_score 
Example #12
Source File: plotting.py    From latent_ode with MIT License 5 votes vote down vote up
def plot_vector_field(ax, odefunc, latent_dim, device):
	# Code borrowed from https://github.com/rtqichen/ffjord/blob/29c016131b702b307ceb05c70c74c6e802bb8a44/diagnostics/viz_toy.py
	K = 13j
	y, x = np.mgrid[-6:6:K, -6:6:K]
	K = int(K.imag)
	zs = torch.from_numpy(np.stack([x, y], -1).reshape(K * K, 2)).to(device, torch.float32)
	if latent_dim > 2:
		# Plots dimensions 0 and 2
		zs = torch.cat((zs, torch.zeros(K * K, latent_dim-2)), 1)
	dydt = odefunc(0, zs)
	dydt = -dydt.cpu().detach().numpy()
	if latent_dim > 2:
		dydt = dydt[:,:2]

	mag = np.sqrt(dydt[:, 0]**2 + dydt[:, 1]**2).reshape(-1, 1)
	dydt = (dydt / mag)
	dydt = dydt.reshape(K, K, 2)

	ax.streamplot(x, y, dydt[:, :, 0], dydt[:, :, 1], #color = dydt[:, :, 0],
		cmap="coolwarm", linewidth=2)

	# ax.quiver(
	# 	x, y, dydt[:, :, 0], dydt[:, :, 1],
	# 	np.exp(logmag), cmap="coolwarm", pivot="mid", scale = 100,
	# )
	ax.set_xlim(-6, 6)
	ax.set_ylim(-6, 6)
	#ax.axis("off") 
Example #13
Source File: smpl_torch_batch.py    From SMPL with MIT License 5 votes vote down vote up
def rodrigues(r):
    """
    Rodrigues' rotation formula that turns axis-angle tensor into rotation
    matrix in a batch-ed manner.

    Parameter:
    ----------
    r: Axis-angle rotation tensor of shape [batch_size * angle_num, 1, 3].

    Return:
    -------
    Rotation matrix of shape [batch_size * angle_num, 3, 3].

    """
    eps = r.clone().normal_(std=1e-8)
    theta = torch.norm(r + eps, dim=(1, 2), keepdim=True)  # dim cannot be tuple
    theta_dim = theta.shape[0]
    r_hat = r / theta
    cos = torch.cos(theta)
    z_stick = torch.zeros(theta_dim, dtype=torch.float64).to(r.device)
    m = torch.stack(
      (z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], r_hat[:, 0, 2], z_stick,
       -r_hat[:, 0, 0], -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick), dim=1)
    m = torch.reshape(m, (-1, 3, 3))
    i_cube = (torch.eye(3, dtype=torch.float64).unsqueeze(dim=0) \
             + torch.zeros((theta_dim, 3, 3), dtype=torch.float64)).to(r.device)
    A = r_hat.permute(0, 2, 1)
    dot = torch.matmul(A, r_hat)
    R = cos * i_cube + (1 - cos) * dot + torch.sin(theta) * m
    return R 
Example #14
Source File: smpl_torch_batch.py    From densebody_pytorch with GNU General Public License v3.0 5 votes vote down vote up
def rodrigues(r):
    """
    Rodrigues' rotation formula that turns axis-angle tensor into rotation
    matrix in a batch-ed manner.

    Parameter:
    ----------
    r: Axis-angle rotation tensor of shape [batch_size * angle_num, 1, 3].

    Return:
    -------
    Rotation matrix of shape [batch_size * angle_num, 3, 3].

    """
    eps = r.clone().normal_(std=1e-8)
    theta = torch.norm(r + eps, dim=(1, 2), keepdim=True)  # dim cannot be tuple
    theta_dim = theta.shape[0]
    r_hat = r / theta
    cos = torch.cos(theta)
    z_stick = torch.zeros(theta_dim, dtype=r.dtype).to(r.device)
    m = torch.stack(
      (z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], r_hat[:, 0, 2], z_stick,
       -r_hat[:, 0, 0], -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick), dim=1)
    m = torch.reshape(m, (-1, 3, 3))
    i_cube = (torch.eye(3, dtype=r.dtype).unsqueeze(dim=0) \
             + torch.zeros((theta_dim, 3, 3), dtype=r.dtype)).to(r.device)
    A = r_hat.permute(0, 2, 1)
    dot = torch.matmul(A, r_hat)
    R = cos * i_cube + (1 - cos) * dot + torch.sin(theta) * m
    return R 
Example #15
Source File: loss.py    From PyMIC with Apache License 2.0 5 votes vote down vote up
def distance_loss(predict, soft_y, lab_distance, softmax = True):
    """
    get distance loss function
    lab_distance is unsigned distance transform of foreground contour
    """
    tensor_dim = len(predict.size())
    num_class  = list(predict.size())[1]
    if(softmax):
        predict = nn.Softmax(dim = 1)(predict)
    if(tensor_dim == 5):
        lab_distance  = lab_distance.permute(0, 2, 3, 4, 1)
        predict = predict.permute(0, 2, 3, 4, 1)
        soft_y  = soft_y.permute(0, 2, 3, 4, 1)
    elif(tensor_dim == 4):
        lab_distance  = lab_distance.permute(0, 2, 3, 1)
        predict = predict.permute(0, 2, 3, 1)
        soft_y  = soft_y.permute(0, 2, 3, 1)
    else:
        raise ValueError("{0:}D tensor not supported".format(tensor_dim))

    lab_distance  = torch.reshape(lab_distance,  (-1, num_class))
    predict = torch.reshape(predict, (-1, num_class))
    soft_y  = torch.reshape(soft_y, (-1, num_class))

    # mis_seg  = torch.abs(predict - soft_y)
    dis_sum  = torch.sum(lab_distance * predict, dim = 0)
    vox_sum  = torch.sum(predict, dim = 0)
    avg_dis  = (dis_sum + 1e-5)/(vox_sum + 1e-5)
    avg_dis  = torch.mean(avg_dis)
    return avg_dis 
Example #16
Source File: smpl_torch_batch.py    From densebody_pytorch with GNU General Public License v3.0 5 votes vote down vote up
def _lR2G(self, lRs, J):
    batch_num = lRs.shape[0]
    results = []    # results correspond to G' terms in original paper.
    results.append(
      self.with_zeros(torch.cat((lRs[:, 0], torch.reshape(J[:, 0, :], (-1, 3, 1))), dim=2))
    )
    for i in range(1, self.kintree_table.shape[1]):
      results.append(
        torch.matmul(
          results[self.parent[i]],
          self.with_zeros(
            torch.cat(
              (lRs[:, i], torch.reshape(J[:, i, :] - J[:, self.parent[i], :], (-1, 3, 1))),
              dim=2
            )
          )
        )
      )
    
    stacked = torch.stack(results, dim=1)
    deformed_joint = \
        torch.matmul(
          stacked,
          torch.reshape(
            torch.cat((J, torch.zeros((batch_num, 24, 1), dtype=self.data_type).to(self.device)), dim=2),
            (batch_num, 24, 4, 1)
          )
        ) 
    results = stacked - self.pack(deformed_joint)
    return results, lRs 
Example #17
Source File: model.py    From ACAN with MIT License 5 votes vote down vote up
def decode_ord(self, y):
        batch_size, prob, height, width = y.shape
        y = torch.reshape(y, (batch_size, prob//2, 2, height, width))
        denominator = torch.sum(torch.exp(y), 2)
        pred_score = torch.div(torch.exp(y[:, :, 1, :, :]), denominator)
        return pred_score 
Example #18
Source File: loss.py    From PyMIC with Apache License 2.0 5 votes vote down vote up
def reshape_tensor_to_2D(x):
    """
    reshape input variables of shape [B, C, D, H, W] to [voxel_n, C]
    """
    tensor_dim = len(x.size())
    num_class  = list(x.size())[1]
    if(tensor_dim == 5):
        x_perm  = x.permute(0, 2, 3, 4, 1)
    elif(tensor_dim == 4):
        x_perm  = x.permute(0, 2, 3, 1)
    else:
        raise ValueError("{0:}D tensor not supported".format(tensor_dim))
    
    y = torch.reshape(x_perm, (-1, num_class)) 
    return y 
Example #19
Source File: recurrent.py    From Tagger with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def forward(self, x, state):
        c, h = state

        gates = self.gates(torch.cat([x, h], 1))
        combined = torch.reshape(gates, [-1, 5, self.output_size])
        i, j, f, o, t = torch.unbind(combined, 1)
        i, f, o = torch.sigmoid(i), torch.sigmoid(f), torch.sigmoid(o)
        t = torch.sigmoid(t)

        new_c = f * c + i * torch.tanh(j)
        tmp_h = o * torch.tanh(new_c)
        new_h = t * tmp_h + (1.0 - t) * self.trans(x)

        return new_h, (new_c, new_h) 
Example #20
Source File: pooling.py    From fastNLP with Apache License 2.0 5 votes vote down vote up
def forward(self, x):
        r"""
        :param torch.Tensor x: [N, C, L] 初始tensor
        :return: torch.Tensor x: [N, C*k] k-max pool后的结果
        """
        x, index = torch.topk(x, self.k, dim=-1, sorted=False)
        x = torch.reshape(x, (x.size(0), -1))
        return x 
Example #21
Source File: attention.py    From Tagger with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def combine_heads(x):
        batch = x.shape[0]
        heads = x.shape[1]
        length = x.shape[2]
        channels = x.shape[3]

        y = torch.transpose(x, 2, 1)

        return torch.reshape(y, [batch, length, heads * channels]) 
Example #22
Source File: attention.py    From Tagger with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def split_heads(x, heads):
        batch = x.shape[0]
        length = x.shape[1]
        channels = x.shape[2]

        y = torch.reshape(x, [batch, length, heads, channels // heads])
        return torch.transpose(y, 2, 1) 
Example #23
Source File: losses.py    From Tagger with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def forward(self, logits, labels):
        shape = labels.shape
        logits = torch.reshape(logits, [-1, logits.shape[-1]])
        labels = torch.reshape(labels, [-1])

        log_probs = torch.nn.functional.log_softmax(logits, dim=-1)
        batch_idx = torch.arange(labels.shape[0], device=logits.device)
        loss = log_probs[batch_idx, labels]

        if not self.smoothing:
            return -torch.reshape(loss, shape)

        n = logits.shape[-1] - 1.0
        p = 1.0 - self.smoothing
        q = self.smoothing / n

        if log_probs.dtype != torch.float16:
            sum_probs = torch.sum(log_probs, dim=-1)
            loss = p * loss + q * (sum_probs - loss)
        else:
            # Prevent FP16 overflow
            sum_probs = torch.sum(log_probs.to(torch.float32), dim=-1)
            loss = loss.to(torch.float32)
            loss = p * loss + q * (sum_probs - loss)
            loss = loss.to(torch.float16)

        loss = -torch.reshape(loss, shape)

        if self.normalize:
            normalizing = -(p * math.log(p) + n * q * math.log(q + 1e-20))
            return loss - normalizing
        else:
            return loss 
Example #24
Source File: detect_face.py    From facenet-pytorch with MIT License 5 votes vote down vote up
def bbreg(boundingbox, reg):
    if reg.shape[1] == 1:
        reg = torch.reshape(reg, (reg.shape[2], reg.shape[3]))

    w = boundingbox[:, 2] - boundingbox[:, 0] + 1
    h = boundingbox[:, 3] - boundingbox[:, 1] + 1
    b1 = boundingbox[:, 0] + reg[:, 0] * w
    b2 = boundingbox[:, 1] + reg[:, 1] * h
    b3 = boundingbox[:, 2] + reg[:, 2] * w
    b4 = boundingbox[:, 3] + reg[:, 3] * h
    boundingbox[:, :4] = torch.stack([b1, b2, b3, b4]).permute(1, 0)

    return boundingbox 
Example #25
Source File: models.py    From fastNLP with Apache License 2.0 5 votes vote down vote up
def forward(self, chars, bigrams, seq_len, target):
        if self.debug:

            print_info('chars:{}'.format(chars.size()))
            print_info('bigrams:{}'.format(bigrams.size()))
            print_info('seq_len:{}'.format(seq_len.size()))
            print_info('target:{}'.format(target.size()))
        embed_char = self.char_embed(chars)

        if self.use_bigram:

            embed_bigram = self.bigram_embed(bigrams)

            embedding = torch.cat([embed_char, embed_bigram], dim=-1)
        else:

            embedding = embed_char

        embedding = self.embed_dropout(embedding)

        encoded_h, encoded_c = self.encoder(embedding, seq_len)

        encoded_h = self.output_dropout(encoded_h)

        pred = self.output(encoded_h)

        mask = seq_len_to_mask(seq_len)

        # pred = self.crf(pred)

        # batch_size, sent_len = pred.shape[0], pred.shape[1]
        # loss = self.loss_func(pred.reshape(batch_size * sent_len, -1), target.reshape(batch_size * sent_len))
        if self.debug:
            print('debug mode:finish')
            exit(1208)
        if self.training:
            loss = self.crf(pred, target, mask)
            return {'loss': loss}
        else:
            pred, path = self.crf.viterbi_decode(pred, mask)
            return {'pred': pred} 
Example #26
Source File: smpl_torch.py    From SMPL with MIT License 5 votes vote down vote up
def rodrigues(r):
    """
    Rodrigues' rotation formula that turns axis-angle tensor into rotation
    matrix in a batch-ed manner.

    Parameter:
    ----------
    r: Axis-angle rotation tensor of shape [batch_size, 1, 3].

    Return:
    -------
    Rotation matrix of shape [batch_size, 3, 3].

    """
    #r = r.to(self.device)
    eps = r.clone().normal_(std=1e-8)
    theta = torch.norm(r + eps, dim=(1, 2), keepdim=True)  # dim cannot be tuple
    theta_dim = theta.shape[0]
    r_hat = r / theta
    cos = torch.cos(theta)
    z_stick = torch.zeros(theta_dim, dtype=torch.float64).to(r.device)
    m = torch.stack(
      (z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], r_hat[:, 0, 2], z_stick,
       -r_hat[:, 0, 0], -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick), dim=1)
    m = torch.reshape(m, (-1, 3, 3))
    i_cube = (torch.eye(3, dtype=torch.float64).unsqueeze(dim=0) \
             + torch.zeros((theta_dim, 3, 3), dtype=torch.float64)).to(r.device)
    A = r_hat.permute(0, 2, 1)
    dot = torch.matmul(A, r_hat)
    R = cos * i_cube + (1 - cos) * dot + torch.sin(theta) * m
    return R 
Example #27
Source File: SMIL_torch_batch.py    From SMPL with MIT License 5 votes vote down vote up
def regress_joints(self, vertices):
        """The J_regressor matrix transforms vertices to joints."""
        # Given the template + pose blend shapes.
        batch_size = vertices.shape[0]

        # We could get the result as torch.matmul(self.J_regressor, vertices) or
        #  torch.stack([self.J_regressor.mm(verts) for verts in vertices]) in case J_regressor is sparse.
        # But turns out there is a solution faster than both of the above:
        batch_vertices = vertices.transpose(0, 1).reshape(self.J_regressor.shape[1], -1)
        batch_results = self.J_regressor.mm(batch_vertices)
        batch_results = batch_results.reshape(self.J_regressor.shape[0], batch_size, -1).transpose(0, 1)
        return batch_results 
Example #28
Source File: SMIL_torch_batch.py    From SMPL with MIT License 5 votes vote down vote up
def rodrigues(self, r):
        """
        Rodrigues' rotation formula that turns axis-angle tensor into rotation
        matrix in a batch-ed manner.

        Parameter:
        ----------
        r: Axis-angle rotation tensor of shape [N, 1, 3].

        Return:
        -------
        Rotation matrix of shape [N, 3, 3].
        """
        theta = torch.norm(r, dim=(1, 2), keepdim=True)
        # avoid division by zero
        torch.max(theta, theta.new_full((1,), torch.finfo(theta.dtype).tiny), out=theta)
        #The .tiny has to be uploaded to GPU, but self.regress_joints is such a big bottleneck it is not felt.

        r_hat = r / theta
        z_stick = torch.zeros_like(r_hat[:, 0, 0])
        m = torch.stack(
            (z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1],
             r_hat[:, 0, 2], z_stick, -r_hat[:, 0, 0],
             -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick), dim=1)
        m = m.reshape(-1, 3, 3)

        dot = torch.bmm(r_hat.transpose(1, 2), r_hat)  # Batched outer product.
        # torch.matmul or torch.stack([torch.ger(r, r) for r in r_hat.squeeze(1)] works too.
        cos = theta.cos()
        R = cos * self.eye + (1 - cos) * dot + theta.sin() * m
        return R 
Example #29
Source File: sparse_image_warp_pytorch.py    From SpecAugment with Apache License 2.0 5 votes vote down vote up
def create_dense_flows(flattened_flows, batch_size, image_height, image_width):
    # possibly .view
    return torch.reshape(flattened_flows, [batch_size, image_height, image_width, 2]) 
Example #30
Source File: sparse_image_warp_pytorch.py    From SpecAugment with Apache License 2.0 5 votes vote down vote up
def flatten_grid_locations(grid_locations, image_height, image_width):
    return np.reshape(grid_locations, [image_height * image_width, 2])