Python torch.nn.functional.selu() Examples
The following are 25
code examples of torch.nn.functional.selu().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
torch.nn.functional
, or try the search function
.
Example #1
Source File: test_nn_activations.py From numpy-ml with GNU General Public License v3.0 | 6 votes |
def test_selu_grad(N=50): from numpy_ml.neural_nets.activations import SELU N = np.inf if N is None else N mine = SELU() gold = torch_gradient_generator(F.selu) i = 0 while i < N: n_ex = np.random.randint(1, 100) n_dims = np.random.randint(1, 100) z = random_tensor((n_ex, n_dims)) assert_almost_equal(mine.grad(z), gold(z), decimal=6) print("PASSED") i += 1
Example #2
Source File: aac.py From doom-net-pytorch with MIT License | 6 votes |
def forward(self, screen, variables): # cnn screen_features = F.selu(self.conv1(screen)) screen_features = F.selu(self.conv2(screen_features)) screen_features = F.selu(self.conv3(screen_features)) screen_features = F.selu(self.conv4(screen_features)) screen_features = F.selu(self.conv5(screen_features)) screen_features = F.selu(self.conv6(screen_features)) screen_features = screen_features.view(screen_features.size(0), -1) # features input = self.screen_features1(screen_features) input = self.batch_norm(input) input = F.selu(input) # action action = F.selu(self.action1(input)) #action = torch.cat([action, variables], 1) action = self.action2(action) return action, input
Example #3
Source File: aac.py From doom-net-pytorch with MIT License | 6 votes |
def forward(self, screen, variables): action_prob, input = super(AdvantageActorCritic, self).forward(screen, variables) if not self.training: _, action = action_prob.max(1, keepdim=True) return action, None # greedy actions if random.random() < 0.1: action = torch.LongTensor(action_prob.size(0), 1).random_(0, action_prob.size(1)).to(device) else: _, action = action_prob.max(1, keepdim=True) # value prediction - critic value = F.selu(self.value1(input)) #value = torch.cat([value, variables], 1) value = self.value2(value) # save output for backpro action_prob = F.log_softmax(action_prob, dim=1) self.outputs.append(ModelOutput(action_prob.gather(-1, action), value)) return action, value
Example #4
Source File: aac_depth.py From doom-net-pytorch with MIT License | 6 votes |
def forward(self, screen, variables): # cnn screen_features = F.max_pool2d(screen, kernel_size=(20, 20), stride=(20, 20)) screen_features = F.selu(self.conv1(screen_features)) screen_features = F.selu(self.conv2(screen_features)) screen_features = F.selu(self.conv3(screen_features)) screen_features = screen_features.view(screen_features.size(0), -1) # features input = self.screen_features1(screen_features) input = self.batch_norm(input) input = F.selu(input) # action action = F.selu(self.action1(input)) action = torch.cat([action, variables], 1) action = self.batch_norm_action(action) action = self.action2(action) return action, input
Example #5
Source File: aac_depth.py From doom-net-pytorch with MIT License | 6 votes |
def forward(self, screen, variables): action_prob, input = super(AdvantageActorCriticDepth, self).forward(screen, variables) if not self.training: _, action = action_prob.max(1, keepdim=True) return action, None # greedy actions if random.random() < 0.1: action = torch.LongTensor(action_prob.size(0), 1).random_(0, action_prob.size(1)).to(device) else: _, action = action_prob.max(1, keepdim=True) # value prediction - critic value = F.selu(self.value1(input)) value = torch.cat([value, variables], 1) value = self.batch_norm_value(value) value = self.value2(value) # save output for backpro action_prob = F.log_softmax(action_prob, dim=1) self.outputs.append(ModelOutput(action_prob.gather(-1, action), value)) return action, value
Example #6
Source File: model.py From DeepRecommender with MIT License | 6 votes |
def activation(input, kind): #print("Activation: {}".format(kind)) if kind == 'selu': return F.selu(input) elif kind == 'relu': return F.relu(input) elif kind == 'relu6': return F.relu6(input) elif kind == 'sigmoid': return F.sigmoid(input) elif kind == 'tanh': return F.tanh(input) elif kind == 'elu': return F.elu(input) elif kind == 'lrelu': return F.leaky_relu(input) elif kind == 'swish': return input*F.sigmoid(input) elif kind == 'none': return input else: raise ValueError('Unknown non-linearity type')
Example #7
Source File: selu.py From onnx2keras with MIT License | 5 votes |
def forward(self, x): from torch.nn import functional as F return F.selu(x)
Example #8
Source File: mpnn.py From deepchem with MIT License | 5 votes |
def readout(h, h2): catted_reads = map(lambda x: torch.cat([h[x[0]], h2[x[1]]], 1), zip(h2.keys(), h.keys())) activated_reads = map(lambda x: F.selu( R(x) ), catted_reads) readout = Variable(torch.zeros(1, 128)) for read in activated_reads: readout = readout + read return F.tanh( readout )
Example #9
Source File: selu.py From onnx2keras with MIT License | 5 votes |
def forward(self, x): x = self.selu(x) return x
Example #10
Source File: selu.py From onnx2keras with MIT License | 5 votes |
def __init__(self): super(LayerSELUTest, self).__init__() self.selu = nn.SELU()
Example #11
Source File: test_pyprof_nvtx.py From apex with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_selu(self): inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype) output = F.selu(inp)
Example #12
Source File: ba_tracknet.py From sanet_relocal_demo with GNU General Public License v3.0 | 5 votes |
def lambda_prediction(self, r, level): """ Predict lambda weight for Levenberg-Marquardt update :param r: residual error with dim: (N, C, M) :param level: pyramid level used in this iteration, int :return: lambda weight, dim: (N, 6) """ avg_r = torch.mean(torch.abs(r), dim=2) # (N, C) lambda_fc = getattr(self, 'lambda_fc_' + str(level)) lambda_w = F.selu(lambda_fc(avg_r)) + 2.0 # (N, 6) return lambda_w
Example #13
Source File: ba_tracknet_mirror_b.py From sanet_relocal_demo with GNU General Public License v3.0 | 5 votes |
def lambda_prediction(self, r, level): """ Predict lambda weight for Levenberg-Marquardt update :param r: residual error with dim: (N, C, M) :param level: pyramid level used in this iteration, int :return: lambda weight, dim: (N, 6) """ avg_r = torch.mean(torch.abs(r), dim=2) # (N, C) lambda_fc = getattr(self, 'lambda_fc_' + str(level)) lambda_w = F.selu(lambda_fc(avg_r)) + 2.0 # (N, 6) return lambda_w
Example #14
Source File: ai.py From diffai with MIT License | 5 votes |
def selu(self): return TaggedDomain(self.a.selu(), self.tag)
Example #15
Source File: ai.py From diffai with MIT License | 5 votes |
def selu(self): return self.new(a.selu() for a in self.al)
Example #16
Source File: ai.py From diffai with MIT License | 5 votes |
def selu(self): return self.applyMonotone(F.selu)
Example #17
Source File: hashNet.py From Triplet-deep-hash-pytorch with Apache License 2.0 | 5 votes |
def forward(self, x1, x2, y): # x1 = self.sm(self.fc(self.sma(x1))) # x2 = self.sm(self.fc(self.sma(x2))) # y = self.sm(self.fc(self.sma(y))) # x1 = self.sm(self.fc(x1)) # x2 = self.sm(self.fc(x2)) # y = self.sm(self.fc(y)) x1 = F.selu(self.fc(self.sma(x1))) x2 = F.selu(self.fc(self.sma(x2))) y = F.selu(self.fc(self.sma(y))) return x1, x2, y
Example #18
Source File: selu.py From pytorch2keras with MIT License | 5 votes |
def forward(self, x): from torch.nn import functional as F return F.selu(x)
Example #19
Source File: selu.py From pytorch2keras with MIT License | 5 votes |
def forward(self, x): x = self.selu(x) return x
Example #20
Source File: selu.py From pytorch2keras with MIT License | 5 votes |
def __init__(self): super(LayerTest, self).__init__() self.selu = nn.SELU()
Example #21
Source File: model.py From DeepMove with GNU General Public License v2.0 | 5 votes |
def forward(self, loc, tim): h1 = Variable(torch.zeros(1, 1, self.hidden_size)) c1 = Variable(torch.zeros(1, 1, self.hidden_size)) if self.use_cuda: h1 = h1.cuda() c1 = c1.cuda() loc_emb = self.emb_loc(loc) tim_emb = self.emb_tim(tim) x = torch.cat((loc_emb, tim_emb), 2) x = self.dropout(x) if self.rnn_type == 'GRU' or self.rnn_type == 'RNN': out, h1 = self.rnn(x, h1) elif self.rnn_type == 'LSTM': out, (h1, c1) = self.rnn(x, (h1, c1)) out = out.squeeze(1) out = F.selu(out) out = self.dropout(out) y = self.fc(out) score = F.log_softmax(y) # calculate loss by NLLoss return score # ############# rnn model with attention ####################### #
Example #22
Source File: test_nn_activations.py From numpy-ml with GNU General Public License v3.0 | 5 votes |
def test_selu_activation(N=50): from numpy_ml.neural_nets.activations import SELU N = np.inf if N is None else N mine = SELU() gold = lambda z: F.selu(torch.FloatTensor(z)).numpy() i = 0 while i < N: n_dims = np.random.randint(1, 100) z = random_stochastic_matrix(1, n_dims) assert_almost_equal(mine.fn(z), gold(z)) print("PASSED") i += 1
Example #23
Source File: mpnn.py From deepchem with MIT License | 5 votes |
def message_pass(g, h, k): for v in g.keys(): neighbors = g[v] for neighbor in neighbors: e_vw = neighbor[0] # feature variable w = neighbor[1] m_w = V[k](h[w]) m_e_vw = E(e_vw) reshaped = torch.cat( (h[v], m_w, m_e_vw), 1) h[v] = F.selu(U[k](reshaped))
Example #24
Source File: model.py From DeepRecommender with MIT License | 4 votes |
def __init__(self, layer_sizes, nl_type='selu', is_constrained=True, dp_drop_prob=0.0, last_layer_activations=True): """ Describes an AutoEncoder model :param layer_sizes: Encoder network description. Should start with feature size (e.g. dimensionality of x). For example: [10000, 1024, 512] will result in: - encoder 2 layers: 10000x1024 and 1024x512. Representation layer (z) will be 512 - decoder 2 layers: 512x1024 and 1024x10000. :param nl_type: (default 'selu') Type of no-linearity :param is_constrained: (default: True) Should constrain decoder weights :param dp_drop_prob: (default: 0.0) Dropout drop probability :param last_layer_activations: (default: True) Whether to apply activations on last decoder layer """ super(AutoEncoder, self).__init__() self._dp_drop_prob = dp_drop_prob self._last_layer_activations = last_layer_activations if dp_drop_prob > 0: self.drop = nn.Dropout(dp_drop_prob) self._last = len(layer_sizes) - 2 self._nl_type = nl_type self.encode_w = nn.ParameterList( [nn.Parameter(torch.rand(layer_sizes[i + 1], layer_sizes[i])) for i in range(len(layer_sizes) - 1)]) for ind, w in enumerate(self.encode_w): weight_init.xavier_uniform_(w) self.encode_b = nn.ParameterList( [nn.Parameter(torch.zeros(layer_sizes[i + 1])) for i in range(len(layer_sizes) - 1)]) reversed_enc_layers = list(reversed(layer_sizes)) self.is_constrained = is_constrained if not is_constrained: self.decode_w = nn.ParameterList( [nn.Parameter(torch.rand(reversed_enc_layers[i + 1], reversed_enc_layers[i])) for i in range(len(reversed_enc_layers) - 1)]) for ind, w in enumerate(self.decode_w): weight_init.xavier_uniform(w) self.decode_b = nn.ParameterList( [nn.Parameter(torch.zeros(reversed_enc_layers[i + 1])) for i in range(len(reversed_enc_layers) - 1)]) print("******************************") print("******************************") print(layer_sizes) print("Dropout drop probability: {}".format(self._dp_drop_prob)) print("Encoder pass:") for ind, w in enumerate(self.encode_w): print(w.data.size()) print(self.encode_b[ind].size()) print("Decoder pass:") if self.is_constrained: print('Decoder is constrained') for ind, w in enumerate(list(reversed(self.encode_w))): print(w.transpose(0, 1).size()) print(self.decode_b[ind].size()) else: for ind, w in enumerate(self.decode_w): print(w.data.size()) print(self.decode_b[ind].size()) print("******************************") print("******************************")
Example #25
Source File: model.py From DeepMove with GNU General Public License v2.0 | 4 votes |
def forward(self, loc, tim, history_loc, history_tim, history_count, uid, target_len): h1 = Variable(torch.zeros(1, 1, self.hidden_size)) c1 = Variable(torch.zeros(1, 1, self.hidden_size)) if self.use_cuda: h1 = h1.cuda() c1 = c1.cuda() loc_emb = self.emb_loc(loc) tim_emb = self.emb_tim(tim) x = torch.cat((loc_emb, tim_emb), 2) x = self.dropout(x) loc_emb_history = self.emb_loc(history_loc).squeeze(1) tim_emb_history = self.emb_tim(history_tim).squeeze(1) count = 0 loc_emb_history2 = Variable(torch.zeros(len(history_count), loc_emb_history.size()[-1])).cuda() tim_emb_history2 = Variable(torch.zeros(len(history_count), tim_emb_history.size()[-1])).cuda() for i, c in enumerate(history_count): if c == 1: tmp = loc_emb_history[count].unsqueeze(0) else: tmp = torch.mean(loc_emb_history[count:count + c, :], dim=0, keepdim=True) loc_emb_history2[i, :] = tmp tim_emb_history2[i, :] = tim_emb_history[count, :].unsqueeze(0) count += c history = torch.cat((loc_emb_history2, tim_emb_history2), 1) history = F.tanh(self.fc_attn(history)) if self.rnn_type == 'GRU' or self.rnn_type == 'RNN': out_state, h1 = self.rnn(x, h1) elif self.rnn_type == 'LSTM': out_state, (h1, c1) = self.rnn(x, (h1, c1)) out_state = out_state.squeeze(1) # out_state = F.selu(out_state) attn_weights = self.attn(out_state[-target_len:], history).unsqueeze(0) context = attn_weights.bmm(history.unsqueeze(0)).squeeze(0) out = torch.cat((out_state[-target_len:], context), 1) # no need for fc_attn uid_emb = self.emb_uid(uid).repeat(target_len, 1) out = torch.cat((out, uid_emb), 1) out = self.dropout(out) y = self.fc_final(out) score = F.log_softmax(y) return score