Python torch.autograd() Examples

The following are 30 code examples of torch.autograd(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: export.py    From robosat with MIT License 7 votes vote down vote up
def main(args):
    dataset = load_config(args.dataset)

    num_classes = len(dataset["common"]["classes"])
    net = UNet(num_classes)

    def map_location(storage, _):
        return storage.cpu()

    chkpt = torch.load(args.checkpoint, map_location=map_location)
    net = torch.nn.DataParallel(net)
    net.load_state_dict(chkpt["state_dict"])

    # Todo: make input channels configurable, not hard-coded to three channels for RGB
    batch = torch.autograd.Variable(torch.randn(1, 3, args.image_size, args.image_size))

    torch.onnx.export(net, batch, args.model) 
Example #2
Source File: sliced_sm.py    From ncsn with GNU General Public License v3.0 6 votes vote down vote up
def sliced_score_matching(energy_net, samples, n_particles=1):
    dup_samples = samples.unsqueeze(0).expand(n_particles, *samples.shape).contiguous().view(-1, *samples.shape[1:])
    dup_samples.requires_grad_(True)
    vectors = torch.randn_like(dup_samples)
    vectors = vectors / torch.norm(vectors, dim=-1, keepdim=True)

    logp = -energy_net(dup_samples).sum()
    grad1 = autograd.grad(logp, dup_samples, create_graph=True)[0]
    gradv = torch.sum(grad1 * vectors)
    loss1 = torch.sum(grad1 * vectors, dim=-1) ** 2 * 0.5
    grad2 = autograd.grad(gradv, dup_samples, create_graph=True)[0]
    loss2 = torch.sum(vectors * grad2, dim=-1)

    loss1 = loss1.view(n_particles, -1).mean(dim=0)
    loss2 = loss2.view(n_particles, -1).mean(dim=0)
    loss = loss1 + loss2
    return loss.mean(), loss1.mean(), loss2.mean() 
Example #3
Source File: seq2sql.py    From SQL_Database_Optimization with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def reinforce_backward(self, score, rewards):
        agg_score, sel_score, cond_score = score

        cur_reward = rewards[:]
        eof = self.SQL_TOK.index('<END>')
        for t in range(len(cond_score[1])):
            reward_inp = torch.FloatTensor(cur_reward).unsqueeze(1)
            if self.gpu:
                reward_inp = reward_inp.cuda()
            cond_score[1][t].reinforce(reward_inp)

            for b in range(len(rewards)):
                if cond_score[1][t][b].data.cpu().numpy()[0] == eof:
                    cur_reward[b] = 0
        torch.autograd.backward(cond_score[1], [None for _ in cond_score[1]])
        return 
Example #4
Source File: prunable_nn_test.py    From prunnable-layers-pytorch with GNU General Public License v3.0 6 votes vote down vote up
def test_pruneFeatureMap_ShouldPruneRightParams(self):
        dropped_index = 0
        output = self.module(self.input)
        torch.autograd.backward(output, self.upstream_gradient)

        old_weight_size = self.module.weight.size()
        old_bias_size = self.module.bias.size()
        old_out_channels = self.module.out_channels
        old_weight_values = self.module.weight.data.cpu().numpy()

        # ensure that the chosen index is dropped
        self.module.prune_feature_map(dropped_index)

        # check bias size
        self.assertEqual(self.module.bias.size()[0], (old_bias_size[0]-1))
        # check output channels
        self.assertEqual(self.module.out_channels, old_out_channels-1)

        _, *other_old_weight_sizes = old_weight_size
        # check weight size
        self.assertEqual(self.module.weight.size(), (old_weight_size[0]-1, *other_old_weight_sizes))
        # check weight value
        expected = np.delete(old_weight_values, dropped_index , 0)
        self.assertTrue(np.array_equal(self.module.weight.data.cpu().numpy(), expected)) 
Example #5
Source File: rnn.py    From slot-filling with MIT License 6 votes vote down vote up
def forward(self, inputs, hidden=None):  
        if hidden is None and self.mode != "jordan":
        # if hidden is None:
            batch_size = inputs.size(0)
            # print(batch_size)
            hidden = torch.autograd.Variable(torch.zeros(batch_size,
                                                       self.hidden_size))
            if self.cuda:
                hidden = hidden.cuda()

        output_forward, hidden_forward = self._forward(inputs, hidden)
        output_forward = torch.stack(output_forward, dim=0)
        if not self.bidirectional:
            if self.batch_first:
                output_forward = output_forward.transpose(0,1)
            return output_forward, hidden_forward

        output_reversed, hidden_reversed = self._reversed_forward(inputs, hidden)
        hidden = torch.cat([hidden_forward, hidden_reversed], dim=hidden_forward.dim() - 1)
        output_reversed = torch.stack(output_reversed, dim=0)
        output = torch.cat([output_forward, output_reversed],
                                dim=output_reversed.data.dim() - 1)
        if self.batch_first:
            output = output.transpose(0,1)
        return output, hidden 
Example #6
Source File: bn.py    From pytorch-segmentation-toolbox with MIT License 6 votes vote down vote up
def forward(self, x):
        if x.get_device() == self.devices[0]:
            # Master mode
            extra = {
                "is_master": True,
                "master_queue": self.master_queue,
                "worker_queues": self.worker_queues,
                "worker_ids": self.worker_ids
            }
        else:
            # Worker mode
            extra = {
                "is_master": False,
                "master_queue": self.master_queue,
                "worker_queue": self.worker_queues[self.worker_ids.index(x.get_device())]
            }

        return inplace_abn_sync(x, self.weight, self.bias, autograd.Variable(self.running_mean),
                                autograd.Variable(self.running_var), extra, self.training, self.momentum, self.eps,
                                self.activation, self.slope) 
Example #7
Source File: __init__.py    From yolo2-pytorch with GNU Lesser General Public License v3.0 6 votes vote down vote up
def forward(self, x):
        device_id = x.get_device() if torch.cuda.is_available() else None
        feature = self.dnn(x)
        rows, cols = feature.size()[-2:]
        cells = rows * cols
        _feature = feature.permute(0, 2, 3, 1).contiguous().view(feature.size(0), cells, self.anchors.size(0), -1)
        sigmoid = F.sigmoid(_feature[:, :, :, :3])
        iou = sigmoid[:, :, :, 0]
        ij = torch.autograd.Variable(utils.ensure_device(meshgrid(rows, cols).view(1, -1, 1, 2), device_id))
        center_offset = sigmoid[:, :, :, 1:3]
        center = ij + center_offset
        size_norm = _feature[:, :, :, 3:5]
        anchors = torch.autograd.Variable(utils.ensure_device(self.anchors.view(1, 1, -1, 2), device_id))
        size = torch.exp(size_norm) * anchors
        size2 = size / 2
        yx_min = center - size2
        yx_max = center + size2
        logits = _feature[:, :, :, 5:] if _feature.size(-1) > 5 else None
        return feature, iou, center_offset, size_norm, yx_min, yx_max, logits 
Example #8
Source File: LSTM_classifierX3_CUDA-xsub.py    From Action-Recognition with MIT License 6 votes vote down vote up
def evaluate_stocha_val_acc(model):
    model.eval()
    acc_cum = 0
    N = dvd_sub.shape[0]
    for i in range(N):
        X = dvd_sub[i, :, :]
        X = autograd.Variable(torch.from_numpy(X).float().cuda())
        X = X.view(len(X), 1, -1)
        y_pred = model(X)
        y_pred = y_pred.data.cpu().max(1)[1].numpy()[0]
        if y_pred == dvl_sub[1][i]:
            acc_cum += 1
    return acc_cum*100/N


# ## observations
# * better to use the log_softmax instead of softmax
# * decrease lr succicesively to get better results

# In[19]:


#training function 
Example #9
Source File: train_confusion.py    From glc with Apache License 2.0 6 votes vote down vote up
def get_C_hat_transpose():
    probs = []
    net.eval()
    for batch_idx, (data, target) in enumerate(train_gold_deterministic_loader):
        # we subtract 10 because we added 10 to gold so we could identify which example is gold in train_phase2
        data, target = torch.autograd.Variable(data.cuda(), volatile=True),\
                       torch.autograd.Variable((target - num_classes).cuda(), volatile=True)

        # forward
        output = net(data)
        pred = F.softmax(output)
        probs.extend(list(pred.data.cpu().numpy()))

    probs = np.array(probs, dtype=np.float32)
    preds = np.argmax(probs, axis=1)
    C_hat = np.zeros([num_classes, num_classes])
    for i in range(len(train_data_gold.train_labels)):
        C_hat[int(np.rint(train_data_gold.train_labels[i] - num_classes)), preds[i]] += 1

    C_hat /= (np.sum(C_hat, axis=1, keepdims=True) + 1e-7)
    C_hat = C_hat * 0.99 + np.full_like(C_hat, 1/num_classes) * 0.01  # smoothing

    return C_hat.T.astype(np.float32) 
Example #10
Source File: train_1bw.py    From RAdam with Apache License 2.0 6 votes vote down vote up
def evaluate(data_loader, lm_model, criterion, limited = 76800):
    print('evaluating')
    lm_model.eval()

    iterator = data_loader.get_tqdm()

    lm_model.init_hidden()
    total_loss = 0
    total_len = 0
    for word_t, label_t in iterator:
        label_t = label_t.view(-1)
        tmp_len = label_t.size(0)
        output = lm_model.log_prob(word_t)
        total_loss += tmp_len * utils.to_scalar(criterion(autograd.Variable(output), label_t))
        total_len += tmp_len

        if limited >=0 and total_len > limited:
            break

    ppl = math.exp(total_loss / total_len)
    print('PPL: ' + str(ppl))

    return ppl 
Example #11
Source File: normalization.py    From weakalign with MIT License 6 votes vote down vote up
def normalize_image(image, forward=True, mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]):
        im_size = image.size()
        mean=torch.FloatTensor(mean).unsqueeze(1).unsqueeze(2)
        std=torch.FloatTensor(std).unsqueeze(1).unsqueeze(2)
        if image.is_cuda:
            mean = mean.cuda()
            std = std.cuda()
        if isinstance(image,torch.autograd.variable.Variable):
            mean = Variable(mean,requires_grad=False)
            std = Variable(std,requires_grad=False)
        if forward:
            if len(im_size)==3:
                result = image.sub(mean.expand(im_size)).div(std.expand(im_size))
            elif len(im_size)==4:
                result = image.sub(mean.unsqueeze(0).expand(im_size)).div(std.unsqueeze(0).expand(im_size))
        else:
            if len(im_size)==3:
                result = image.mul(std.expand(im_size)).add(mean.expand(im_size))
            elif len(im_size)==4:
                result = image.mul(std.unsqueeze(0).expand(im_size)).add(mean.unsqueeze(0).expand(im_size))
                
        return  result 
Example #12
Source File: trpo.py    From pytorchrl with MIT License 6 votes vote down vote up
def pearlmutter_hvp(kl_func, all_obs, old_dist, policy, v):
    """
    TODO (ewei) add docstring here.

    Parameters
    ----------
    see docstring of finite_diff_hvp function.

    Returns
    -------
    see docstring of finite_diff_hvp function.
    """
    policy.zero_grad()
    kl_div = kl_func(policy, all_obs, old_dist)
    param_grads = torch.autograd.grad(kl_div, policy.ordered_params(),
        create_graph=True)
    flat_grad = torch.cat([grad.view(-1) for grad in param_grads])
    gradient_vector_product = torch.sum(flat_grad * Variable(v))
    hessian_vector_product = torch.autograd.grad(gradient_vector_product,
        policy.ordered_params())
    flat_hvp = torch.cat([product.contiguous().view(-1) for product in hessian_vector_product])
    return flat_hvp.data 
Example #13
Source File: actor_critic.py    From pytorchrl with MIT License 6 votes vote down vote up
def finish_episode():
    R = 0
    saved_actions = model.saved_actions
    value_loss = 0
    rewards = []
    for r in model.rewards[::-1]:
        R = r + args.gamma * R
        rewards.insert(0, R)
    rewards = torch.Tensor(rewards)
    rewards = (rewards - rewards.mean()) / (rewards.std() + np.finfo(np.float32).eps)
    for (action, value), r in zip(saved_actions, rewards):
        reward = r - value.data[0,0]
        action.reinforce(reward)
        value_loss += F.smooth_l1_loss(value, Variable(torch.Tensor([r])))
    optimizer.zero_grad()
    final_nodes = [value_loss] + list(map(lambda p: p.action, saved_actions))
    gradients = [torch.ones(1)] + [None] * len(saved_actions)
    autograd.backward(final_nodes, gradients)
    optimizer.step()
    del model.rewards[:]
    del model.saved_actions[:] 
Example #14
Source File: train.py    From ACME with GNU General Public License v3.0 6 votes vote down vote up
def compute_gradient_penalty(D, real_samples, fake_samples):
    """Calculates the gradient penalty loss for WGAN GP"""
    # Random weight term for interpolation between real and fake samples
    alpha = torch.cuda.FloatTensor(np.random.random((real_samples.size(0), 1)))
    # Get random interpolation between real and fake samples
    interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)
    d_interpolates = D(interpolates)
    fake = torch.autograd.Variable(torch.cuda.FloatTensor(real_samples.shape[0], 1).fill_(1.0), requires_grad=False)
    # Get gradient w.r.t. interpolates
    gradients = torch.autograd.grad(
        outputs=d_interpolates,  # fack samples
        inputs=interpolates,   # real samples
        grad_outputs=fake,
        create_graph=True,
        retain_graph=True,
        only_inputs=True,
    )[0]
    gradients = gradients.view(gradients.size(0), -1)
    gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
    return gradient_penalty 
Example #15
Source File: bn.py    From CCNet with MIT License 6 votes vote down vote up
def forward(self, x):
        if x.get_device() == self.devices[0]:
            # Master mode
            extra = {
                "is_master": True,
                "master_queue": self.master_queue,
                "worker_queues": self.worker_queues,
                "worker_ids": self.worker_ids
            }
        else:
            # Worker mode
            extra = {
                "is_master": False,
                "master_queue": self.master_queue,
                "worker_queue": self.worker_queues[self.worker_ids.index(x.get_device())]
            }

        return inplace_abn_sync(x, self.weight, self.bias, autograd.Variable(self.running_mean),
                                autograd.Variable(self.running_var), extra, self.training, self.momentum, self.eps,
                                self.activation, self.slope) 
Example #16
Source File: seq2sql.py    From SQLNet with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def reinforce_backward(self, score, rewards):
        agg_score, sel_score, cond_score = score

        cur_reward = rewards[:]
        eof = self.SQL_TOK.index('<END>')
        for t in range(len(cond_score[1])):
            reward_inp = torch.FloatTensor(cur_reward).unsqueeze(1)
            if self.gpu:
                reward_inp = reward_inp.cuda()
            cond_score[1][t].reinforce(reward_inp)

            for b in range(len(rewards)):
                if cond_score[1][t][b].data.cpu().numpy()[0] == eof:
                    cur_reward[b] = 0
        torch.autograd.backward(cond_score[1], [None for _ in cond_score[1]])
        return 
Example #17
Source File: bn.py    From pytorch-0.4-yolov3 with MIT License 6 votes vote down vote up
def forward(self, x): 
        nB = x.data.size(0)
        nC = x.data.size(1)
        nH = x.data.size(2)
        nW = x.data.size(3)
        samples = nB*nH*nW
        y = x.view(nB, nC, nH*nW).transpose(1,2).contiguous().view(-1,nC)
        if self.training:
            print('forward in training mode on autograd')
            m = Variable(y.mean(0).data, requires_grad=False)
            v = Variable(y.var(0).data, requires_grad=False)
            self.running_mean = (1-self.momentum)*self.running_mean + self.momentum * m.data.view(-1)
            self.running_var = (1-self.momentum)*self.running_var + self.momentum * v.data.view(-1)
            m = m.repeat(samples, 1)
            v = v.repeat(samples, 1)*(samples-1.0)/samples
        else:
            m = Variable(self.running_mean.repeat(samples, 1), requires_grad=False)
            v = Variable(self.running_var.repeat(samples, 1), requires_grad=False)
        w = self.weight.repeat(samples, 1)
        b = self.bias.repeat(samples, 1)
        y = (y - m)/(v+self.eps).sqrt() * w + b 
        y = y.view(nB, nH*nW, nC).transpose(1,2).contiguous().view(nB, nC, nH, nW) 
        return y 
Example #18
Source File: net_utils.py    From cascade-rcnn_Pytorch with MIT License 5 votes vote down vote up
def compare_grid_sample():
    # do gradcheck
    N = random.randint(1, 8)
    C = 2 # random.randint(1, 8)
    H = 5 # random.randint(1, 8)
    W = 4 # random.randint(1, 8)
    input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True)
    input_p = input.clone().data.contiguous()

    grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True)
    grid_clone = grid.clone().contiguous()

    out_offcial = F.grid_sample(input, grid)
    grad_outputs = Variable(torch.rand(out_offcial.size()).cuda())
    grad_outputs_clone = grad_outputs.clone().contiguous()
    grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous())
    grad_input_off = grad_inputs[0]


    crf = RoICropFunction()
    grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda()
    out_stn = crf.forward(input_p, grid_yx)
    grad_inputs = crf.backward(grad_outputs_clone.data)
    grad_input_stn = grad_inputs[0]
    pdb.set_trace()

    delta = (grad_input_off.data - grad_input_stn).sum() 
Example #19
Source File: test.py    From block with Apache License 2.0 5 votes vote down vote up
def test_torch():
    import torch
    from torch.autograd import Variable

    torch.manual_seed(0)

    nx, nineq, neq = 4, 6, 7
    Q = torch.randn(nx, nx)
    G = torch.randn(nineq, nx)
    A = torch.randn(neq, nx)
    D = torch.diag(torch.rand(nineq))

    K_ = torch.cat((
        torch.cat((Q, torch.zeros(nx, nineq).type_as(Q), G.t(), A.t()), 1),
        torch.cat((torch.zeros(nineq, nx).type_as(Q), D,
                   torch.eye(nineq).type_as(Q),
                   torch.zeros(nineq, neq).type_as(Q)), 1),
        torch.cat((G, torch.eye(nineq).type_as(Q), torch.zeros(
            nineq, nineq + neq).type_as(Q)), 1),
        torch.cat((A, torch.zeros((neq, nineq + nineq + neq))), 1)
    ))

    K = block((
        (Q,   0, G.t(), A.t()),
        (0,   D,   'I',     0),
        (G, 'I',     0,     0),
        (A,   0,     0,     0)
    ))

    assert (K - K_).norm() == 0.0
    K = block((
        (Variable(Q),   0, G.t(), Variable(A.t())),
        (0,   Variable(D),   'I',     0),
        (Variable(G), 'I',     0,     0),
        (A,   0,     0,     0)
    ))

    assert (K.data - K_).norm() == 0.0 
Example #20
Source File: pytorch_test_synthesize_motion.py    From Auto_Conditioned_RNN_motion with MIT License 5 votes vote down vote up
def init_hidden(self, batch):
        #c batch*(3*1024)
        c0 = torch.autograd.Variable(torch.FloatTensor(np.zeros((batch, self.hidden_size)) ).cuda())
        c1= torch.autograd.Variable(torch.FloatTensor(np.zeros((batch, self.hidden_size)) ).cuda())
        c2 = torch.autograd.Variable(torch.FloatTensor(np.zeros((batch, self.hidden_size)) ).cuda())
        h0 = torch.autograd.Variable(torch.FloatTensor(np.zeros((batch, self.hidden_size)) ).cuda())
        h1= torch.autograd.Variable(torch.FloatTensor(np.zeros((batch, self.hidden_size)) ).cuda())
        h2= torch.autograd.Variable(torch.FloatTensor(np.zeros((batch, self.hidden_size)) ).cuda())
        return  ([h0,h1,h2], [c0,c1,c2])
    
    #in_frame b*In_frame_size
    #vec_h [b*1024,b*1024,b*1024] vec_c [b*1024,b*1024,b*1024]
    #out_frame b*In_frame_size
    #vec_h_new [b*1024,b*1024,b*1024] vec_c_new [b*1024,b*1024,b*1024] 
Example #21
Source File: prunable_nn_test.py    From prunnable-layers-pytorch with GNU General Public License v3.0 5 votes vote down vote up
def test_getTaylorEstimates_ShouldGiveValidValueAndSize(self):
        output = self.module(self.input)
        torch.autograd.backward(output, self.upstream_gradient)

        # ensure input and output are different
        self.assertFalse(np.array_equal(self.input.data.cpu().numpy(), output.data.cpu().numpy()))

        estimates = self.module.taylor_estimates.data.cpu()
        size = estimates.size()

        # ensure sane size
        self.assertEqual(size, torch.FloatTensor(self.input_shape[1]).size())
        # ensure not zero
        self.assertFalse(np.array_equal(estimates.numpy(), torch.zeros(size).numpy())) 
Example #22
Source File: worker_scheduler.py    From pase with MIT License 5 votes vote down vote up
def _get_gen_grads(self, loss_):
        # grads = torch.autograd.grad(outputs=loss_, inputs=self.model.frontend.parameters())
        self.model.frontend.zero_grad()
        loss_.backward()
        # grads = self.model.frontend.grad()
        for params in self.model.frontend.parameters():

            try:
                grads_ = torch.cat([grads_, params.grad.view(-1)], 0)
            except:
                grads_ = params.grad.view(-1)

        return grads_ / grads_.norm() 
Example #23
Source File: dataset.py    From RAdam with Apache License 2.0 5 votes vote down vote up
def __next__(self):
        if self.cur_idx >= self.index_length:
            self.open_next()

        word_t = autograd.Variable(self.x[self.cur_idx]).cuda()
        # label_t = autograd.Variable(self.y[self.cur_idx]).cuda()
        label_t = self.y[self.cur_idx].cuda()

        self.cur_idx += 1

        return word_t, label_t 
Example #24
Source File: pytorch_test_synthesize_motion.py    From Auto_Conditioned_RNN_motion with MIT License 5 votes vote down vote up
def forward(self, initial_seq, generate_frames_number):
        
        batch=initial_seq.size()[0]
        
        
        #initialize vec_h vec_m #set as 0
        (vec_h, vec_c) = self.init_hidden(batch)
        
        out_seq = torch.autograd.Variable(torch.FloatTensor(  np.zeros((batch,1))   ).cuda())

        out_frame=torch.autograd.Variable(torch.FloatTensor(  np.zeros((batch,self.out_frame_size))  ).cuda())
        
        
        for i in range(initial_seq.size()[1]):
            in_frame=initial_seq[:,i]
            
            (out_frame, vec_h,vec_c) = self.forward_lstm(in_frame, vec_h, vec_c)
    
            out_seq = torch.cat((out_seq, out_frame),1)
        
        for i in range(generate_frames_number):
            
            in_frame=out_frame
            
            (out_frame, vec_h,vec_c) = self.forward_lstm(in_frame, vec_h, vec_c)
    
            out_seq = torch.cat((out_seq, out_frame),1)
    
        return out_seq[:, 1: out_seq.size()[1]]
    
    #cuda tensor out_seq batch*(seq_len*frame_size)
    #cuda tensor groundtruth_seq batch*(seq_len*frame_size) 
Example #25
Source File: pytorch_test_synthesize_motion.py    From Auto_Conditioned_RNN_motion with MIT License 5 votes vote down vote up
def generate_seq(initial_seq_np, generate_frames_number, model, save_dance_folder):

    #set hip_x and hip_z as the difference from the future frame to current frame
    dif = initial_seq_np[:, 1:initial_seq_np.shape[1]] - initial_seq_np[:, 0: initial_seq_np.shape[1]-1]
    initial_seq_dif_hip_x_z_np = initial_seq_np[:, 0:initial_seq_np.shape[1]-1].copy()
    initial_seq_dif_hip_x_z_np[:,:,Hip_index*3]=dif[:,:,Hip_index*3]
    initial_seq_dif_hip_x_z_np[:,:,Hip_index*3+2]=dif[:,:,Hip_index*3+2]
    
    
    initial_seq  = torch.autograd.Variable(torch.FloatTensor(initial_seq_dif_hip_x_z_np.tolist()).cuda() )
 
    predict_seq = model.forward(initial_seq, generate_frames_number)
    
    batch=initial_seq_np.shape[0]
   
    for b in range(batch):
        
        out_seq=np.array(predict_seq[b].data.tolist()).reshape(-1,In_frame_size)
        last_x=0.0
        last_z=0.0
        for frame in range(out_seq.shape[0]):
            out_seq[frame,Hip_index*3]=out_seq[frame,Hip_index*3]+last_x
            last_x=out_seq[frame,Hip_index*3]
            
            out_seq[frame,Hip_index*3+2]=out_seq[frame,Hip_index*3+2]+last_z
            last_z=out_seq[frame,Hip_index*3+2]
            
        read_bvh.write_traindata_to_bvh(save_dance_folder+"out"+"%02d"%b+".bvh", out_seq)
    return np.array(predict_seq.data.tolist()).reshape(batch, -1, In_frame_size)



#input a list of dances [dance1, dance2, dance3]
#return a list of dance index, the occurence number of a dance's index is proportional to the length of the dance 
Example #26
Source File: pytorch_train_aclstm.py    From Auto_Conditioned_RNN_motion with MIT License 5 votes vote down vote up
def init_hidden(self, batch):
        #c batch*(3*1024)
        c0 = torch.autograd.Variable(torch.FloatTensor(np.zeros((batch, self.hidden_size)) ).cuda())
        c1= torch.autograd.Variable(torch.FloatTensor(np.zeros((batch, self.hidden_size)) ).cuda())
        c2 = torch.autograd.Variable(torch.FloatTensor(np.zeros((batch, self.hidden_size)) ).cuda())
        h0 = torch.autograd.Variable(torch.FloatTensor(np.zeros((batch, self.hidden_size)) ).cuda())
        h1= torch.autograd.Variable(torch.FloatTensor(np.zeros((batch, self.hidden_size)) ).cuda())
        h2= torch.autograd.Variable(torch.FloatTensor(np.zeros((batch, self.hidden_size)) ).cuda())
        return  ([h0,h1,h2], [c0,c1,c2])
    
    #in_frame b*In_frame_size
    #vec_h [b*1024,b*1024,b*1024] vec_c [b*1024,b*1024,b*1024]
    #out_frame b*In_frame_size
    #vec_h_new [b*1024,b*1024,b*1024] vec_c_new [b*1024,b*1024,b*1024] 
Example #27
Source File: pytorch_train_aclstm.py    From Auto_Conditioned_RNN_motion with MIT License 5 votes vote down vote up
def forward(self, real_seq, condition_num=5, groundtruth_num=5):
        
        batch=real_seq.size()[0]
        seq_len=real_seq.size()[1]
        
        condition_lst=self.get_condition_lst(condition_num, groundtruth_num, seq_len)
        
        #initialize vec_h vec_m #set as 0
        (vec_h, vec_c) = self.init_hidden(batch)
        
        out_seq = torch.autograd.Variable(torch.FloatTensor(  np.zeros((batch,1))   ).cuda())

        out_frame=torch.autograd.Variable(torch.FloatTensor(  np.zeros((batch,self.out_frame_size))  ).cuda())
        
        
        for i in range(seq_len):
            
            if(condition_lst[i]==1):##input groundtruth frame
                in_frame=real_seq[:,i]
            else:
                in_frame=out_frame
            
            (out_frame, vec_h,vec_c) = self.forward_lstm(in_frame, vec_h, vec_c)
    
            out_seq = torch.cat((out_seq, out_frame),1)
    
        return out_seq[:, 1: out_seq.size()[1]]
    
    #cuda tensor out_seq batch*(seq_len*frame_size)
    #cuda tensor groundtruth_seq batch*(seq_len*frame_size) 
Example #28
Source File: dataset.py    From MSMARCO with MIT License 5 votes vote down vote up
def __iter__(self):
        """
        Generate batches from data.
        All outputs are in torch.autograd.Variable, for convenience.
        """

        if self.shuffle:
            np.random.shuffle(self.idx)

        for start_ind in range(0, self.n_samples - 1, self.batch_size):
            batch_idx = self.idx[start_ind:start_ind+self.batch_size]

            qids = [self.data[ind][0] for ind in batch_idx]
            passages = [self.data[ind][1][0] for ind in batch_idx]
            c_passages = [self.data[ind][1][1] for ind in batch_idx]
            queries = [self.data[ind][2][0] for ind in batch_idx]
            c_queries = [self.data[ind][2][1] for ind in batch_idx]
            answers = [self.data[ind][3] for ind in batch_idx]
            mappings = [self.data[ind][4] for ind in batch_idx]

            passages = self.process_batch_for_length(
                    passages, c_passages)
            queries = self.process_batch_for_length(
                    queries, c_queries)

            answers = Variable(self.tensor_type(answers))

            batch = (qids,
                     passages, queries,
                     answers,
                     mappings)
            yield batch
        return 
Example #29
Source File: loss_function.py    From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License 5 votes vote down vote up
def BCE_bootstrap_with_logits(input, target, ishard=False, beta=0.95, weight=None, size_average=True):
    r"""Function that measures Binary Cross Entropy between target and output
    logits with prediction consistency(bootstrap)

    Args:
        input: Variable of arbitrary shape
        target: Variable of the same shape as input
        ishard: Choose soft/hard bootstrap mode
        beta: Weight between ``gt`` label and prediction. In paper, 0.8 for hard and 0.95 for soft
        weight (Variable, optional): a manual rescaling weight
                if provided it's repeated to match input tensor shape
        size_average (bool, optional): By default, the losses are averaged
                over observations for each minibatch. However, if the field
                sizeAverage is set to False, the losses are instead summed
                for each minibatch. Default: ``True``

    Examples::

         >>> input = autograd.Variable(torch.randn(3), requires_grad=True)
         >>> target = autograd.Variable(torch.FloatTensor(3).random_(2))
         >>> loss = BCE_bootstrap_with_logits(input, target)
         >>> loss.backward()
    """
    if not (target.size() == input.size()):
        raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
    input_prob = torch.sigmoid(input)
    if ishard:
        target = target * beta + (input_prob>0.5) * (1-beta)
    else:
        target = target * beta + input_prob * (1-beta)
    print(target)
    max_val = (-input).clamp(min=0)
    loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log()

    if weight is not None:
        loss = loss * weight

    if size_average:
        return loss.mean()
    else:
        return loss.sum() 
Example #30
Source File: crf.py    From PyTorch-Elmo-BiLSTMCRF with MIT License 5 votes vote down vote up
def decode(self,
               emissions: Union[Variable, torch.FloatTensor],
               mask: Optional[Union[Variable, torch.ByteTensor]] = None) -> List[List[int]]:
        """Find the most likely tag sequence using Viterbi algorithm.
        Arguments
        ---------
        emissions : :class:`~torch.autograd.Variable` or :class:`~torch.FloatTensor`
            Emission score tensor of size ``(seq_length, batch_size, num_tags)``.
        mask : :class:`~torch.autograd.Variable` or :class:`torch.ByteTensor`
            Mask tensor of size ``(seq_length, batch_size)``.
        Returns
        -------
        list
            List of list containing the best tag sequence for each batch.
        """
        if emissions.dim() != 3:
            raise ValueError(f'emissions must have dimension of 3, got {emissions.dim()}')
        if emissions.size(2) != self.num_tags:
            raise ValueError(
                f'expected last dimension of emissions is {self.num_tags}, '
                f'got {emissions.size(2)}'
            )
        if mask is not None and emissions.size()[:2] != mask.size():
            raise ValueError(
                'the first two dimensions of emissions and mask must match, '
                f'got {tuple(emissions.size()[:2])} and {tuple(mask.size())}'
            )

        if isinstance(emissions, Variable):
            emissions = emissions.data
        if mask is None:
            mask = self._new(emissions.size()[:2]).fill_(1).byte()
        elif isinstance(mask, Variable):
            mask = mask.data

        return self._viterbi_decode(emissions, mask)