Python torch.autograd() Examples

The following are 30 code examples of torch.autograd(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source Project: slot-filling   Author: llhthinker   File: rnn.py    License: MIT License 6 votes vote down vote up
def forward(self, inputs, hidden=None):  
        if hidden is None and self.mode != "jordan":
        # if hidden is None:
            batch_size = inputs.size(0)
            # print(batch_size)
            hidden = torch.autograd.Variable(torch.zeros(batch_size,
                                                       self.hidden_size))
            if self.cuda:
                hidden = hidden.cuda()

        output_forward, hidden_forward = self._forward(inputs, hidden)
        output_forward = torch.stack(output_forward, dim=0)
        if not self.bidirectional:
            if self.batch_first:
                output_forward = output_forward.transpose(0,1)
            return output_forward, hidden_forward

        output_reversed, hidden_reversed = self._reversed_forward(inputs, hidden)
        hidden = torch.cat([hidden_forward, hidden_reversed], dim=hidden_forward.dim() - 1)
        output_reversed = torch.stack(output_reversed, dim=0)
        output = torch.cat([output_forward, output_reversed],
                                dim=output_reversed.data.dim() - 1)
        if self.batch_first:
            output = output.transpose(0,1)
        return output, hidden 
Example #2
Source Project: prunnable-layers-pytorch   Author: alexfjw   File: prunable_nn_test.py    License: GNU General Public License v3.0 6 votes vote down vote up
def test_pruneFeatureMap_ShouldPruneRightParams(self):
        dropped_index = 0
        output = self.module(self.input)
        torch.autograd.backward(output, self.upstream_gradient)

        old_weight_size = self.module.weight.size()
        old_bias_size = self.module.bias.size()
        old_out_channels = self.module.out_channels
        old_weight_values = self.module.weight.data.cpu().numpy()

        # ensure that the chosen index is dropped
        self.module.prune_feature_map(dropped_index)

        # check bias size
        self.assertEqual(self.module.bias.size()[0], (old_bias_size[0]-1))
        # check output channels
        self.assertEqual(self.module.out_channels, old_out_channels-1)

        _, *other_old_weight_sizes = old_weight_size
        # check weight size
        self.assertEqual(self.module.weight.size(), (old_weight_size[0]-1, *other_old_weight_sizes))
        # check weight value
        expected = np.delete(old_weight_values, dropped_index , 0)
        self.assertTrue(np.array_equal(self.module.weight.data.cpu().numpy(), expected)) 
Example #3
Source Project: pytorch-segmentation-toolbox   Author: speedinghzl   File: bn.py    License: MIT License 6 votes vote down vote up
def forward(self, x):
        if x.get_device() == self.devices[0]:
            # Master mode
            extra = {
                "is_master": True,
                "master_queue": self.master_queue,
                "worker_queues": self.worker_queues,
                "worker_ids": self.worker_ids
            }
        else:
            # Worker mode
            extra = {
                "is_master": False,
                "master_queue": self.master_queue,
                "worker_queue": self.worker_queues[self.worker_ids.index(x.get_device())]
            }

        return inplace_abn_sync(x, self.weight, self.bias, autograd.Variable(self.running_mean),
                                autograd.Variable(self.running_var), extra, self.training, self.momentum, self.eps,
                                self.activation, self.slope) 
Example #4
Source Project: SQL_Database_Optimization   Author: llSourcell   File: seq2sql.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def reinforce_backward(self, score, rewards):
        agg_score, sel_score, cond_score = score

        cur_reward = rewards[:]
        eof = self.SQL_TOK.index('<END>')
        for t in range(len(cond_score[1])):
            reward_inp = torch.FloatTensor(cur_reward).unsqueeze(1)
            if self.gpu:
                reward_inp = reward_inp.cuda()
            cond_score[1][t].reinforce(reward_inp)

            for b in range(len(rewards)):
                if cond_score[1][t][b].data.cpu().numpy()[0] == eof:
                    cur_reward[b] = 0
        torch.autograd.backward(cond_score[1], [None for _ in cond_score[1]])
        return 
Example #5
Source Project: robosat   Author: mapbox   File: export.py    License: MIT License 6 votes vote down vote up
def main(args):
    dataset = load_config(args.dataset)

    num_classes = len(dataset["common"]["classes"])
    net = UNet(num_classes)

    def map_location(storage, _):
        return storage.cpu()

    chkpt = torch.load(args.checkpoint, map_location=map_location)
    net = torch.nn.DataParallel(net)
    net.load_state_dict(chkpt["state_dict"])

    # Todo: make input channels configurable, not hard-coded to three channels for RGB
    batch = torch.autograd.Variable(torch.randn(1, 3, args.image_size, args.image_size))

    torch.onnx.export(net, batch, args.model) 
Example #6
Source Project: yolo2-pytorch   Author: ruiminshen   File: __init__.py    License: GNU Lesser General Public License v3.0 6 votes vote down vote up
def forward(self, x):
        device_id = x.get_device() if torch.cuda.is_available() else None
        feature = self.dnn(x)
        rows, cols = feature.size()[-2:]
        cells = rows * cols
        _feature = feature.permute(0, 2, 3, 1).contiguous().view(feature.size(0), cells, self.anchors.size(0), -1)
        sigmoid = F.sigmoid(_feature[:, :, :, :3])
        iou = sigmoid[:, :, :, 0]
        ij = torch.autograd.Variable(utils.ensure_device(meshgrid(rows, cols).view(1, -1, 1, 2), device_id))
        center_offset = sigmoid[:, :, :, 1:3]
        center = ij + center_offset
        size_norm = _feature[:, :, :, 3:5]
        anchors = torch.autograd.Variable(utils.ensure_device(self.anchors.view(1, 1, -1, 2), device_id))
        size = torch.exp(size_norm) * anchors
        size2 = size / 2
        yx_min = center - size2
        yx_max = center + size2
        logits = _feature[:, :, :, 5:] if _feature.size(-1) > 5 else None
        return feature, iou, center_offset, size_norm, yx_min, yx_max, logits 
Example #7
Source Project: Action-Recognition   Author: Naman-ntc   File: LSTM_classifierX3_CUDA-xsub.py    License: MIT License 6 votes vote down vote up
def evaluate_stocha_val_acc(model):
    model.eval()
    acc_cum = 0
    N = dvd_sub.shape[0]
    for i in range(N):
        X = dvd_sub[i, :, :]
        X = autograd.Variable(torch.from_numpy(X).float().cuda())
        X = X.view(len(X), 1, -1)
        y_pred = model(X)
        y_pred = y_pred.data.cpu().max(1)[1].numpy()[0]
        if y_pred == dvl_sub[1][i]:
            acc_cum += 1
    return acc_cum*100/N


# ## observations
# * better to use the log_softmax instead of softmax
# * decrease lr succicesively to get better results

# In[19]:


#training function 
Example #8
Source Project: glc   Author: mmazeika   File: train_confusion.py    License: Apache License 2.0 6 votes vote down vote up
def get_C_hat_transpose():
    probs = []
    net.eval()
    for batch_idx, (data, target) in enumerate(train_gold_deterministic_loader):
        # we subtract 10 because we added 10 to gold so we could identify which example is gold in train_phase2
        data, target = torch.autograd.Variable(data.cuda(), volatile=True),\
                       torch.autograd.Variable((target - num_classes).cuda(), volatile=True)

        # forward
        output = net(data)
        pred = F.softmax(output)
        probs.extend(list(pred.data.cpu().numpy()))

    probs = np.array(probs, dtype=np.float32)
    preds = np.argmax(probs, axis=1)
    C_hat = np.zeros([num_classes, num_classes])
    for i in range(len(train_data_gold.train_labels)):
        C_hat[int(np.rint(train_data_gold.train_labels[i] - num_classes)), preds[i]] += 1

    C_hat /= (np.sum(C_hat, axis=1, keepdims=True) + 1e-7)
    C_hat = C_hat * 0.99 + np.full_like(C_hat, 1/num_classes) * 0.01  # smoothing

    return C_hat.T.astype(np.float32) 
Example #9
Source Project: pytorch-0.4-yolov3   Author: andy-yun   File: bn.py    License: MIT License 6 votes vote down vote up
def forward(self, x): 
        nB = x.data.size(0)
        nC = x.data.size(1)
        nH = x.data.size(2)
        nW = x.data.size(3)
        samples = nB*nH*nW
        y = x.view(nB, nC, nH*nW).transpose(1,2).contiguous().view(-1,nC)
        if self.training:
            print('forward in training mode on autograd')
            m = Variable(y.mean(0).data, requires_grad=False)
            v = Variable(y.var(0).data, requires_grad=False)
            self.running_mean = (1-self.momentum)*self.running_mean + self.momentum * m.data.view(-1)
            self.running_var = (1-self.momentum)*self.running_var + self.momentum * v.data.view(-1)
            m = m.repeat(samples, 1)
            v = v.repeat(samples, 1)*(samples-1.0)/samples
        else:
            m = Variable(self.running_mean.repeat(samples, 1), requires_grad=False)
            v = Variable(self.running_var.repeat(samples, 1), requires_grad=False)
        w = self.weight.repeat(samples, 1)
        b = self.bias.repeat(samples, 1)
        y = (y - m)/(v+self.eps).sqrt() * w + b 
        y = y.view(nB, nH*nW, nC).transpose(1,2).contiguous().view(nB, nC, nH, nW) 
        return y 
Example #10
Source Project: SQLNet   Author: xiaojunxu   File: seq2sql.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def reinforce_backward(self, score, rewards):
        agg_score, sel_score, cond_score = score

        cur_reward = rewards[:]
        eof = self.SQL_TOK.index('<END>')
        for t in range(len(cond_score[1])):
            reward_inp = torch.FloatTensor(cur_reward).unsqueeze(1)
            if self.gpu:
                reward_inp = reward_inp.cuda()
            cond_score[1][t].reinforce(reward_inp)

            for b in range(len(rewards)):
                if cond_score[1][t][b].data.cpu().numpy()[0] == eof:
                    cur_reward[b] = 0
        torch.autograd.backward(cond_score[1], [None for _ in cond_score[1]])
        return 
Example #11
Source Project: RAdam   Author: LiyuanLucasLiu   File: train_1bw.py    License: Apache License 2.0 6 votes vote down vote up
def evaluate(data_loader, lm_model, criterion, limited = 76800):
    print('evaluating')
    lm_model.eval()

    iterator = data_loader.get_tqdm()

    lm_model.init_hidden()
    total_loss = 0
    total_len = 0
    for word_t, label_t in iterator:
        label_t = label_t.view(-1)
        tmp_len = label_t.size(0)
        output = lm_model.log_prob(word_t)
        total_loss += tmp_len * utils.to_scalar(criterion(autograd.Variable(output), label_t))
        total_len += tmp_len

        if limited >=0 and total_len > limited:
            break

    ppl = math.exp(total_loss / total_len)
    print('PPL: ' + str(ppl))

    return ppl 
Example #12
Source Project: weakalign   Author: ignacio-rocco   File: normalization.py    License: MIT License 6 votes vote down vote up
def normalize_image(image, forward=True, mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]):
        im_size = image.size()
        mean=torch.FloatTensor(mean).unsqueeze(1).unsqueeze(2)
        std=torch.FloatTensor(std).unsqueeze(1).unsqueeze(2)
        if image.is_cuda:
            mean = mean.cuda()
            std = std.cuda()
        if isinstance(image,torch.autograd.variable.Variable):
            mean = Variable(mean,requires_grad=False)
            std = Variable(std,requires_grad=False)
        if forward:
            if len(im_size)==3:
                result = image.sub(mean.expand(im_size)).div(std.expand(im_size))
            elif len(im_size)==4:
                result = image.sub(mean.unsqueeze(0).expand(im_size)).div(std.unsqueeze(0).expand(im_size))
        else:
            if len(im_size)==3:
                result = image.mul(std.expand(im_size)).add(mean.expand(im_size))
            elif len(im_size)==4:
                result = image.mul(std.unsqueeze(0).expand(im_size)).add(mean.unsqueeze(0).expand(im_size))
                
        return  result 
Example #13
Source Project: pytorchrl   Author: nosyndicate   File: trpo.py    License: MIT License 6 votes vote down vote up
def pearlmutter_hvp(kl_func, all_obs, old_dist, policy, v):
    """
    TODO (ewei) add docstring here.

    Parameters
    ----------
    see docstring of finite_diff_hvp function.

    Returns
    -------
    see docstring of finite_diff_hvp function.
    """
    policy.zero_grad()
    kl_div = kl_func(policy, all_obs, old_dist)
    param_grads = torch.autograd.grad(kl_div, policy.ordered_params(),
        create_graph=True)
    flat_grad = torch.cat([grad.view(-1) for grad in param_grads])
    gradient_vector_product = torch.sum(flat_grad * Variable(v))
    hessian_vector_product = torch.autograd.grad(gradient_vector_product,
        policy.ordered_params())
    flat_hvp = torch.cat([product.contiguous().view(-1) for product in hessian_vector_product])
    return flat_hvp.data 
Example #14
Source Project: pytorchrl   Author: nosyndicate   File: actor_critic.py    License: MIT License 6 votes vote down vote up
def finish_episode():
    R = 0
    saved_actions = model.saved_actions
    value_loss = 0
    rewards = []
    for r in model.rewards[::-1]:
        R = r + args.gamma * R
        rewards.insert(0, R)
    rewards = torch.Tensor(rewards)
    rewards = (rewards - rewards.mean()) / (rewards.std() + np.finfo(np.float32).eps)
    for (action, value), r in zip(saved_actions, rewards):
        reward = r - value.data[0,0]
        action.reinforce(reward)
        value_loss += F.smooth_l1_loss(value, Variable(torch.Tensor([r])))
    optimizer.zero_grad()
    final_nodes = [value_loss] + list(map(lambda p: p.action, saved_actions))
    gradients = [torch.ones(1)] + [None] * len(saved_actions)
    autograd.backward(final_nodes, gradients)
    optimizer.step()
    del model.rewards[:]
    del model.saved_actions[:] 
Example #15
Source Project: ACME   Author: hwang1996   File: train.py    License: GNU General Public License v3.0 6 votes vote down vote up
def compute_gradient_penalty(D, real_samples, fake_samples):
    """Calculates the gradient penalty loss for WGAN GP"""
    # Random weight term for interpolation between real and fake samples
    alpha = torch.cuda.FloatTensor(np.random.random((real_samples.size(0), 1)))
    # Get random interpolation between real and fake samples
    interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)
    d_interpolates = D(interpolates)
    fake = torch.autograd.Variable(torch.cuda.FloatTensor(real_samples.shape[0], 1).fill_(1.0), requires_grad=False)
    # Get gradient w.r.t. interpolates
    gradients = torch.autograd.grad(
        outputs=d_interpolates,  # fack samples
        inputs=interpolates,   # real samples
        grad_outputs=fake,
        create_graph=True,
        retain_graph=True,
        only_inputs=True,
    )[0]
    gradients = gradients.view(gradients.size(0), -1)
    gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
    return gradient_penalty 
Example #16
Source Project: CCNet   Author: speedinghzl   File: bn.py    License: MIT License 6 votes vote down vote up
def forward(self, x):
        if x.get_device() == self.devices[0]:
            # Master mode
            extra = {
                "is_master": True,
                "master_queue": self.master_queue,
                "worker_queues": self.worker_queues,
                "worker_ids": self.worker_ids
            }
        else:
            # Worker mode
            extra = {
                "is_master": False,
                "master_queue": self.master_queue,
                "worker_queue": self.worker_queues[self.worker_ids.index(x.get_device())]
            }

        return inplace_abn_sync(x, self.weight, self.bias, autograd.Variable(self.running_mean),
                                autograd.Variable(self.running_var), extra, self.training, self.momentum, self.eps,
                                self.activation, self.slope) 
Example #17
Source Project: ncsn   Author: ermongroup   File: sliced_sm.py    License: GNU General Public License v3.0 6 votes vote down vote up
def sliced_score_matching(energy_net, samples, n_particles=1):
    dup_samples = samples.unsqueeze(0).expand(n_particles, *samples.shape).contiguous().view(-1, *samples.shape[1:])
    dup_samples.requires_grad_(True)
    vectors = torch.randn_like(dup_samples)
    vectors = vectors / torch.norm(vectors, dim=-1, keepdim=True)

    logp = -energy_net(dup_samples).sum()
    grad1 = autograd.grad(logp, dup_samples, create_graph=True)[0]
    gradv = torch.sum(grad1 * vectors)
    loss1 = torch.sum(grad1 * vectors, dim=-1) ** 2 * 0.5
    grad2 = autograd.grad(gradv, dup_samples, create_graph=True)[0]
    loss2 = torch.sum(vectors * grad2, dim=-1)

    loss1 = loss1.view(n_particles, -1).mean(dim=0)
    loss2 = loss2.view(n_particles, -1).mean(dim=0)
    loss = loss1 + loss2
    return loss.mean(), loss1.mean(), loss2.mean() 
Example #18
def BCE_bootstrap_with_logits(input, target, ishard=False, beta=0.95, weight=None, size_average=True):
    r"""Function that measures Binary Cross Entropy between target and output
    logits with prediction consistency(bootstrap)

    Args:
        input: Variable of arbitrary shape
        target: Variable of the same shape as input
        ishard: Choose soft/hard bootstrap mode
        beta: Weight between ``gt`` label and prediction. In paper, 0.8 for hard and 0.95 for soft
        weight (Variable, optional): a manual rescaling weight
                if provided it's repeated to match input tensor shape
        size_average (bool, optional): By default, the losses are averaged
                over observations for each minibatch. However, if the field
                sizeAverage is set to False, the losses are instead summed
                for each minibatch. Default: ``True``

    Examples::

         >>> input = autograd.Variable(torch.randn(3), requires_grad=True)
         >>> target = autograd.Variable(torch.FloatTensor(3).random_(2))
         >>> loss = BCE_bootstrap_with_logits(input, target)
         >>> loss.backward()
    """
    if not (target.size() == input.size()):
        raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
    input_prob = torch.sigmoid(input)
    if ishard:
        target = target * beta + (input_prob>0.5) * (1-beta)
    else:
        target = target * beta + input_prob * (1-beta)
    print(target)
    max_val = (-input).clamp(min=0)
    loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log()

    if weight is not None:
        loss = loss * weight

    if size_average:
        return loss.mean()
    else:
        return loss.sum() 
Example #19
Source Project: cascade-rcnn_Pytorch   Author: guoruoqian   File: net_utils.py    License: MIT License 5 votes vote down vote up
def compare_grid_sample():
    # do gradcheck
    N = random.randint(1, 8)
    C = 2 # random.randint(1, 8)
    H = 5 # random.randint(1, 8)
    W = 4 # random.randint(1, 8)
    input = Variable(torch.randn(N, C, H, W).cuda(), requires_grad=True)
    input_p = input.clone().data.contiguous()

    grid = Variable(torch.randn(N, H, W, 2).cuda(), requires_grad=True)
    grid_clone = grid.clone().contiguous()

    out_offcial = F.grid_sample(input, grid)
    grad_outputs = Variable(torch.rand(out_offcial.size()).cuda())
    grad_outputs_clone = grad_outputs.clone().contiguous()
    grad_inputs = torch.autograd.grad(out_offcial, (input, grid), grad_outputs.contiguous())
    grad_input_off = grad_inputs[0]


    crf = RoICropFunction()
    grid_yx = torch.stack([grid_clone.data[:,:,:,1], grid_clone.data[:,:,:,0]], 3).contiguous().cuda()
    out_stn = crf.forward(input_p, grid_yx)
    grad_inputs = crf.backward(grad_outputs_clone.data)
    grad_input_stn = grad_inputs[0]
    pdb.set_trace()

    delta = (grad_input_off.data - grad_input_stn).sum() 
Example #20
Source Project: prunnable-layers-pytorch   Author: alexfjw   File: prunable_nn_test.py    License: GNU General Public License v3.0 5 votes vote down vote up
def test_getTaylorEstimates_ShouldGiveValidValueAndSize(self):
        output = self.module(self.input)
        torch.autograd.backward(output, self.upstream_gradient)

        # ensure input and output are different
        self.assertFalse(np.array_equal(self.input.data.cpu().numpy(), output.data.cpu().numpy()))

        estimates = self.module.taylor_estimates.data.cpu()
        size = estimates.size()

        # ensure sane size
        self.assertEqual(size, torch.FloatTensor(self.input_shape[1]).size())
        # ensure not zero
        self.assertFalse(np.array_equal(estimates.numpy(), torch.zeros(size).numpy())) 
Example #21
Source Project: pytorch-segmentation-toolbox   Author: speedinghzl   File: encoding.py    License: MIT License 5 votes vote down vote up
def allreduce(*inputs):
    """Cross GPU all reduce autograd operation for calculate mean and
    variance in SyncBN.
    """
    return AllReduce.apply(*inputs) 
Example #22
Source Project: pytorch-segmentation-toolbox   Author: speedinghzl   File: bn.py    License: MIT License 5 votes vote down vote up
def forward(self, x):
        return inplace_abn(x, self.weight, self.bias, autograd.Variable(self.running_mean),
                           autograd.Variable(self.running_var), self.training, self.momentum, self.eps,
                           self.activation, self.slope) 
Example #23
Source Project: yolo2-pytorch   Author: ruiminshen   File: __init__.py    License: GNU Lesser General Public License v3.0 5 votes vote down vote up
def loss(anchors, data, pred, threshold):
    iou = pred['iou']
    device_id = iou.get_device() if torch.cuda.is_available() else None
    rows, cols = pred['feature'].size()[-2:]
    iou_matrix, _iou, _, _data = iou_match(pred['yx_min'].data, pred['yx_max'].data, data)
    anchors = utils.ensure_device(anchors, device_id)
    positive = fit_positive(rows, cols, *(data[key] for key in 'yx_min, yx_max'.split(', ')), anchors)
    negative = ~positive & (_iou < threshold)
    _center_offset, _size_norm = fill_norm(*(_data[key] for key in 'yx_min, yx_max'.split(', ')), anchors)
    positive, negative, _iou, _center_offset, _size_norm, _cls = (torch.autograd.Variable(t) for t in (positive, negative, _iou, _center_offset, _size_norm, _data['cls']))
    _positive = torch.unsqueeze(positive, -1)
    loss = {}
    # iou
    loss['foreground'] = F.mse_loss(iou[positive], _iou[positive], size_average=False)
    loss['background'] = torch.sum(square(iou[negative]))
    # bbox
    loss['center'] = F.mse_loss(pred['center_offset'][_positive], _center_offset[_positive], size_average=False)
    loss['size'] = F.mse_loss(pred['size_norm'][_positive], _size_norm[_positive], size_average=False)
    # cls
    if 'logits' in pred:
        logits = pred['logits']
        if len(_cls.size()) > 3:
            loss['cls'] = F.mse_loss(F.softmax(logits, -1)[_positive], _cls[_positive], size_average=False)
        else:
            loss['cls'] = F.cross_entropy(logits[_positive].view(-1, logits.size(-1)), _cls[positive].view(-1))
    # normalize
    cnt = float(np.multiply.reduce(positive.size()))
    for key in loss:
        loss[key] /= cnt
    return loss, dict(iou=_iou, data=_data, positive=positive, negative=negative) 
Example #24
Source Project: yolo2-pytorch   Author: ruiminshen   File: checksum_torch.py    License: GNU Lesser General Public License v3.0 5 votes vote down vote up
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    torch.manual_seed(args.seed)
    cache_dir = utils.get_cache_dir(config)
    model_dir = utils.get_model_dir(config)
    category = utils.get_category(config, cache_dir if os.path.exists(cache_dir) else None)
    anchors = utils.get_anchors(config)
    anchors = torch.from_numpy(anchors).contiguous()
    path, step, epoch = utils.train.load_model(model_dir)
    state_dict = torch.load(path, map_location=lambda storage, loc: storage)
    dnn = utils.parse_attr(config.get('model', 'dnn'))(model.ConfigChannels(config, state_dict), anchors, len(category))
    dnn.load_state_dict(state_dict)
    height, width = tuple(map(int, config.get('image', 'size').split()))
    tensor = torch.randn(1, 3, height, width)
    # Checksum
    for key, var in dnn.state_dict().items():
        a = var.cpu().numpy()
        print('\t'.join(map(str, [key, a.shape, utils.abs_mean(a), hashlib.md5(a.tostring()).hexdigest()])))
    output = dnn(torch.autograd.Variable(tensor, volatile=True)).data
    for key, a in [
        ('tensor', tensor.cpu().numpy()),
        ('output', output.cpu().numpy()),
    ]:
        print('\t'.join(map(str, [key, a.shape, utils.abs_mean(a), hashlib.md5(a.tostring()).hexdigest()]))) 
Example #25
Source Project: Action-Recognition   Author: Naman-ntc   File: LSTM_classifierX4_CUDA.py    License: MIT License 5 votes vote down vote up
def init_hidden3(self):
        # the first is the hidden h
        # the second is the cell  c
        return (autograd.Variable(torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).cuda()),
                autograd.Variable(torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).cuda())) 
Example #26
Source Project: Action-Recognition   Author: Naman-ntc   File: LSTM_classifierX4_CUDA.py    License: MIT License 5 votes vote down vote up
def init_hidden2_1(self):
        # the first is the hidden h
        # the second is the cell  c
        return (autograd.Variable(torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).cuda()),
                autograd.Variable(torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).cuda())) 
Example #27
Source Project: Action-Recognition   Author: Naman-ntc   File: LSTM_classifierX4_CUDA.py    License: MIT License 5 votes vote down vote up
def init_hidden2_2(self):
        # the first is the hidden h
        # the second is the cell  c
        return (autograd.Variable(torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).cuda()),
                autograd.Variable(torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).cuda())) 
Example #28
Source Project: Action-Recognition   Author: Naman-ntc   File: LSTM_classifierX4_CUDA.py    License: MIT License 5 votes vote down vote up
def init_hidden2_3(self):
        # the first is the hidden h
        # the second is the cell  c
        return (autograd.Variable(torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).cuda()),
                autograd.Variable(torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).cuda())) 
Example #29
Source Project: Action-Recognition   Author: Naman-ntc   File: LSTM_classifierX3_CUDA-xsub.py    License: MIT License 5 votes vote down vote up
def init_hidden3(self):
        # the first is the hidden h
        # the second is the cell  c
        return (autograd.Variable(torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).cuda()),
                autograd.Variable(torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).cuda())) 
Example #30
Source Project: Action-Recognition   Author: Naman-ntc   File: LSTM_classifierX3_CUDA-xsub.py    License: MIT License 5 votes vote down vote up
def init_hidden2_1(self):
        # the first is the hidden h
        # the second is the cell  c
        return (autograd.Variable(torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).cuda()),
                autograd.Variable(torch.zeros(self.num_layers, self.batch_size, self.hidden_dim).cuda()))