Python torch.randn() Examples
The following are 30
code examples of torch.randn().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch
, or try the search function
.
Example #1
Source File: UpdateFunction.py From nmp_qc with MIT License | 6 votes |
def init_duvenaud(self, params): learn_args = [] learn_modules = [] args = {} # Filter degree 0 (the message will be 0 and therefore there is no update args['deg'] = [i for i in params['deg'] if i!=0] args['in'] = params['in'] args['out'] = params['out'] # Define a parameter matrix H for each degree. learn_args.append(torch.nn.Parameter(torch.randn(len(args['deg']), args['in'], args['out']))) return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args # GG-NN, Li et al.
Example #2
Source File: googlenet.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def forward(self, x): out = self.pre_layers(x) out = self.a3(out) out = self.b3(out) out = self.maxpool(out) out = self.a4(out) out = self.b4(out) out = self.c4(out) out = self.d4(out) out = self.e4(out) out = self.maxpool(out) out = self.a5(out) out = self.b5(out) out = self.avgpool(out) out = out.view(out.size(0), -1) out = self.linear(out) return out # net = GoogLeNet() # x = torch.randn(1,3,32,32) # y = net(Variable(x)) # print(y.size())
Example #3
Source File: vgg.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _make_layers(self, cfg): layers = [] in_channels = 3 for x in cfg: if x == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), nn.ReLU(inplace=True)] in_channels = x layers += [nn.AvgPool2d(kernel_size=1, stride=1)] return nn.Sequential(*layers) # net = VGG('VGG11') # x = torch.randn(2,3,32,32) # print(net(Variable(x)).size())
Example #4
Source File: ReadoutFunction.py From nmp_qc with MIT License | 6 votes |
def init_duvenaud(self, params): learn_args = [] learn_modules = [] args = {} args['out'] = params['out'] # Define a parameter matrix W for each layer. for l in range(params['layers']): learn_args.append(nn.Parameter(torch.randn(params['in'][l], params['out']))) # learn_modules.append(nn.Linear(params['out'], params['target'])) learn_modules.append(NNet(n_in=params['out'], n_out=params['target'])) return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args # GG-NN, Li et al.
Example #5
Source File: vgg.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _make_layers(self, cfg): layers = [] in_channels = 3 for x in cfg: if x == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), nn.ReLU(inplace=True)] in_channels = x layers += [nn.AvgPool2d(kernel_size=1, stride=1)] return nn.Sequential(*layers) # net = VGG('VGG11') # x = torch.randn(2,3,32,32) # print(net(Variable(x)).size())
Example #6
Source File: test_backbones.py From mmdetection with Apache License 2.0 | 6 votes |
def test_res2net_backbone(): with pytest.raises(KeyError): # Res2Net depth should be in [50, 101, 152] Res2Net(depth=18) # Test Res2Net with scales 4, base_width 26 model = Res2Net(depth=50, scales=4, base_width=26) for m in model.modules(): if is_block(m): assert m.scales == 4 model.init_weights() model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, 256, 56, 56]) assert feat[1].shape == torch.Size([1, 512, 28, 28]) assert feat[2].shape == torch.Size([1, 1024, 14, 14]) assert feat[3].shape == torch.Size([1, 2048, 7, 7])
Example #7
Source File: test_batch_consistency.py From audio with BSD 2-Clause "Simplified" License | 6 votes |
def test_batch_InverseMelScale(self): n_mels = 32 n_stft = 5 mel_spec = torch.randn(2, n_mels, 32) ** 2 # Single then transform then batch expected = torchaudio.transforms.InverseMelScale(n_stft, n_mels)(mel_spec).repeat(3, 1, 1, 1) # Batch then transform computed = torchaudio.transforms.InverseMelScale(n_stft, n_mels)(mel_spec.repeat(3, 1, 1, 1)) # shape = (3, 2, n_mels, 32) # Because InverseMelScale runs SGD on randomly initialized values so they do not yield # exactly same result. For this reason, tolerance is very relaxed here. self.assertEqual(computed, expected, atol=1.0, rtol=1e-5)
Example #8
Source File: test_backbones.py From mmdetection with Apache License 2.0 | 6 votes |
def test_resnext_backbone(): with pytest.raises(KeyError): # ResNeXt depth should be in [50, 101, 152] ResNeXt(depth=18) # Test ResNeXt with group 32, base_width 4 model = ResNeXt(depth=50, groups=32, base_width=4) for m in model.modules(): if is_block(m): assert m.conv2.groups == 32 model.init_weights() model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, 256, 56, 56]) assert feat[1].shape == torch.Size([1, 512, 28, 28]) assert feat[2].shape == torch.Size([1, 1024, 14, 14]) assert feat[3].shape == torch.Size([1, 2048, 7, 7])
Example #9
Source File: vgg.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _make_layers(self, cfg): layers = [] in_channels = 3 for x in cfg: if x == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), nn.ReLU(inplace=True)] in_channels = x layers += [nn.AvgPool2d(kernel_size=1, stride=1)] return nn.Sequential(*layers) # net = VGG('VGG11') # x = torch.randn(2,3,32,32) # print(net(Variable(x)).size())
Example #10
Source File: googlenet.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def forward(self, x): out = self.pre_layers(x) out = self.a3(out) out = self.b3(out) out = self.maxpool(out) out = self.a4(out) out = self.b4(out) out = self.c4(out) out = self.d4(out) out = self.e4(out) out = self.maxpool(out) out = self.a5(out) out = self.b5(out) out = self.avgpool(out) out = out.view(out.size(0), -1) out = self.linear(out) return out # net = GoogLeNet() # x = torch.randn(1,3,32,32) # y = net(Variable(x)) # print(y.size())
Example #11
Source File: two_stage.py From mmdetection with Apache License 2.0 | 6 votes |
def forward_dummy(self, img): """Used for computing network flops. See `mmdetection/tools/get_flops.py` """ outs = () # backbone x = self.extract_feat(img) # rpn if self.with_rpn: rpn_outs = self.rpn_head(x) outs = outs + (rpn_outs, ) proposals = torch.randn(1000, 4).to(img.device) # roi_head roi_outs = self.roi_head.forward_dummy(x, proposals) outs = outs + (roi_outs, ) return outs
Example #12
Source File: hgnn.py From hgraph2graph with MIT License | 6 votes |
def translate(self, tensors, cond, num_decode, enum_root): assert enum_root tensors = make_cuda(tensors) root_vecs, tree_vecs, graph_vecs = self.encode(tensors) cond = cond.view(1,1,-1) tree_cond = cond.expand(num_decode, tree_vecs.size(1), -1) graph_cond = cond.expand(num_decode, graph_vecs.size(1), -1) if enum_root: repeat = num_decode // len(root_vecs) modulo = num_decode % len(root_vecs) root_vecs = torch.cat([root_vecs] * repeat + [root_vecs[:modulo]], dim=0) tree_vecs = torch.cat([tree_vecs] * repeat + [tree_vecs[:modulo]], dim=0) graph_vecs = torch.cat([graph_vecs] * repeat + [graph_vecs[:modulo]], dim=0) z_tree = torch.randn(num_decode, 1, self.latent_size).expand(-1, tree_vecs.size(1), -1).cuda() z_graph = torch.randn(num_decode, 1, self.latent_size).expand(-1, graph_vecs.size(1), -1).cuda() z_tree_vecs = self.W_tree( torch.cat([tree_vecs, z_tree, tree_cond], dim=-1) ) z_graph_vecs = self.W_graph( torch.cat([graph_vecs, z_graph, graph_cond], dim=-1) ) return self.decoder.decode( (root_vecs, z_tree_vecs, z_graph_vecs) )
Example #13
Source File: hgnn.py From hgraph2graph with MIT License | 6 votes |
def translate(self, tensors, num_decode, enum_root, greedy=True): tensors = make_cuda(tensors) root_vecs, tree_vecs, graph_vecs = self.encode(tensors) all_smiles = [] if enum_root: repeat = num_decode // len(root_vecs) modulo = num_decode % len(root_vecs) root_vecs = torch.cat([root_vecs] * repeat + [root_vecs[:modulo]], dim=0) tree_vecs = torch.cat([tree_vecs] * repeat + [tree_vecs[:modulo]], dim=0) graph_vecs = torch.cat([graph_vecs] * repeat + [graph_vecs[:modulo]], dim=0) batch_size = len(root_vecs) z_tree = torch.randn(batch_size, 1, self.latent_size).expand(-1, tree_vecs.size(1), -1).cuda() z_graph = torch.randn(batch_size, 1, self.latent_size).expand(-1, graph_vecs.size(1), -1).cuda() z_tree_vecs = self.W_tree( torch.cat([tree_vecs, z_tree], dim=-1) ) z_graph_vecs = self.W_graph( torch.cat([graph_vecs, z_graph], dim=-1) ) return self.decoder.decode( (root_vecs, z_tree_vecs, z_graph_vecs), greedy=greedy)
Example #14
Source File: hgnn.py From hgraph2graph with MIT License | 6 votes |
def translate(self, tensors, cond, num_decode, enum_root): assert enum_root tensors = make_cuda(tensors) root_vecs, tree_vecs, graph_vecs = self.encode(tensors) cond = cond.view(1,1,-1) tree_cond = cond.expand(num_decode, tree_vecs.size(1), -1) graph_cond = cond.expand(num_decode, graph_vecs.size(1), -1) if enum_root: repeat = num_decode // len(root_vecs) modulo = num_decode % len(root_vecs) root_vecs = torch.cat([root_vecs] * repeat + [root_vecs[:modulo]], dim=0) tree_vecs = torch.cat([tree_vecs] * repeat + [tree_vecs[:modulo]], dim=0) graph_vecs = torch.cat([graph_vecs] * repeat + [graph_vecs[:modulo]], dim=0) z_tree = torch.randn(num_decode, 1, self.latent_size).expand(-1, tree_vecs.size(1), -1).cuda() z_graph = torch.randn(num_decode, 1, self.latent_size).expand(-1, graph_vecs.size(1), -1).cuda() z_tree_vecs = self.W_tree( torch.cat([tree_vecs, z_tree, tree_cond], dim=-1) ) z_graph_vecs = self.W_graph( torch.cat([graph_vecs, z_graph, graph_cond], dim=-1) ) return self.decoder.decode( (root_vecs, z_tree_vecs, z_graph_vecs) )
Example #15
Source File: hgnn.py From hgraph2graph with MIT License | 6 votes |
def translate(self, tensors, num_decode, enum_root, greedy=True): tensors = make_cuda(tensors) root_vecs, tree_vecs, graph_vecs = self.encode(tensors) all_smiles = [] if enum_root: repeat = num_decode // len(root_vecs) modulo = num_decode % len(root_vecs) root_vecs = torch.cat([root_vecs] * repeat + [root_vecs[:modulo]], dim=0) tree_vecs = torch.cat([tree_vecs] * repeat + [tree_vecs[:modulo]], dim=0) graph_vecs = torch.cat([graph_vecs] * repeat + [graph_vecs[:modulo]], dim=0) batch_size = len(root_vecs) z_tree = torch.randn(batch_size, 1, self.latent_size).expand(-1, tree_vecs.size(1), -1).cuda() z_graph = torch.randn(batch_size, 1, self.latent_size).expand(-1, graph_vecs.size(1), -1).cuda() z_tree_vecs = self.W_tree( torch.cat([tree_vecs, z_tree], dim=-1) ) z_graph_vecs = self.W_graph( torch.cat([graph_vecs, z_graph], dim=-1) ) return self.decoder.decode( (root_vecs, z_tree_vecs, z_graph_vecs), greedy=greedy)
Example #16
Source File: googlenet.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def forward(self, x): out = self.pre_layers(x) out = self.a3(out) out = self.b3(out) out = self.maxpool(out) out = self.a4(out) out = self.b4(out) out = self.c4(out) out = self.d4(out) out = self.e4(out) out = self.maxpool(out) out = self.a5(out) out = self.b5(out) out = self.avgpool(out) out = out.view(out.size(0), -1) out = self.linear(out) return out # net = GoogLeNet() # x = torch.randn(1,3,32,32) # y = net(Variable(x)) # print(y.size())
Example #17
Source File: 11_dropout_raw.py From deep-learning-note with MIT License | 5 votes |
def dropout(X, drop_prob): X = X.float() assert 0 <= drop_prob <= 1 keep_prob = 1 - drop_prob if keep_prob == 0: return torch.zeros_like(X) mask = (torch.randn(X.shape) < keep_prob).float() return mask * X / keep_prob
Example #18
Source File: resnet.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test(): net = ResNet18() y = net(Variable(torch.randn(1,3,32,32))) print(y.size()) # test()
Example #19
Source File: 9_weight_decay_raw.py From deep-learning-note with MIT License | 5 votes |
def init_params(): w = torch.randn((num_inputs, 1), requires_grad=True) b = torch.zeros(1, requires_grad=True) return [w, b]
Example #20
Source File: test_batch_consistency.py From audio with BSD 2-Clause "Simplified" License | 5 votes |
def test_batch_Resample(self): waveform = torch.randn(2, 2786) # Single then transform then batch expected = torchaudio.transforms.Resample()(waveform).repeat(3, 1, 1) # Batch then transform computed = torchaudio.transforms.Resample()(waveform.repeat(3, 1, 1)) self.assertEqual(computed, expected)
Example #21
Source File: kaldi_compatibility_impl.py From audio with BSD 2-Clause "Simplified" License | 5 votes |
def test_sliding_window_cmn(self): """sliding_window_cmn should be numerically compatible with apply-cmvn-sliding""" kwargs = { 'cmn_window': 600, 'min_cmn_window': 100, 'center': False, 'norm_vars': False, } tensor = torch.randn(40, 10, dtype=self.dtype, device=self.device) result = F.sliding_window_cmn(tensor, **kwargs) command = ['apply-cmvn-sliding'] + _convert_args(**kwargs) + ['ark:-', 'ark:-'] kaldi_result = _run_kaldi(command, 'ark', tensor) self.assert_equal(result, expected=kaldi_result)
Example #22
Source File: test_batch_consistency.py From audio with BSD 2-Clause "Simplified" License | 5 votes |
def test_batch_MelScale(self): specgram = torch.randn(2, 31, 2786) # Single then transform then batch expected = torchaudio.transforms.MelScale()(specgram).repeat(3, 1, 1, 1) # Batch then transform computed = torchaudio.transforms.MelScale()(specgram.repeat(3, 1, 1, 1)) # shape = (3, 2, 201, 1394) self.assertEqual(computed, expected)
Example #23
Source File: test_transforms.py From audio with BSD 2-Clause "Simplified" License | 5 votes |
def test_compute_deltas(self): channel = 13 n_mfcc = channel * 3 time = 1021 win_length = 2 * 7 + 1 specgram = torch.randn(channel, n_mfcc, time) transform = transforms.ComputeDeltas(win_length=win_length) computed = transform(specgram) self.assertTrue(computed.shape == specgram.shape, (computed.shape, specgram.shape))
Example #24
Source File: MessageFunction.py From nmp_qc with MIT License | 5 votes |
def init_ggnn(self, params): learn_args = [] learn_modules = [] args = {} args['e_label'] = params['e_label'] args['in'] = params['in'] args['out'] = params['out'] # Define a parameter matrix A for each edge label. learn_args.append(nn.Parameter(torch.randn(len(params['e_label']), params['in'], params['out']))) return nn.ParameterList(learn_args), nn.ModuleList(learn_modules), args # Battaglia et al. (2016), Interaction Networks
Example #25
Source File: shufflenet.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test(): net = ShuffleNetG2() x = Variable(torch.randn(1,3,32,32)) y = net(x) print(y) # test()
Example #26
Source File: densenet.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_densenet(): net = densenet_cifar() x = torch.randn(1,3,32,32) y = net(Variable(x)) print(y) # test_densenet()
Example #27
Source File: dcgan_generator.py From Pytorch-Project-Template with MIT License | 5 votes |
def main(): config = json.load(open('../../configs/dcgan_exp_0.json')) config = edict(config) inp = torch.autograd.Variable(torch.randn(config.batch_size, config.g_input_size, 1, 1)) print (inp.shape) netD = Generator(config) out = netD(inp) print (out.shape)
Example #28
Source File: preact_resnet.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test(): net = PreActResNet18() y = net(Variable(torch.randn(1,3,32,32))) print(y.size()) # test()
Example #29
Source File: senet.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test(): net = SENet18() y = net(Variable(torch.randn(1,3,32,32))) print(y.size()) # test()
Example #30
Source File: dpn.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test(): net = DPN92() x = Variable(torch.randn(1,3,32,32)) y = net(x) print(y) # test()