Python torch.nn.functional.prelu() Examples

The following are 9 code examples of torch.nn.functional.prelu(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn.functional , or try the search function .
Example #1
Source File: pytorch_to_caffe.py    From PytorchToCaffe with MIT License 6 votes vote down vote up
def _relu(raw, input, inplace=False):
    # for threshold or prelu
    x = raw(input, False)
    name = log.add_layer(name='relu')
    log.add_blobs([x], name='relu_blob')
    layer = caffe_net.Layer_param(name=name, type='ReLU',
                                  bottom=[log.blobs(input)], top=[log.blobs(x)])
    log.cnet.add_layer(layer)
    return x 
Example #2
Source File: test_pyprof_nvtx.py    From apex with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_prelu(self):
        inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
        weight = torch.randn(1, device='cuda', dtype=self.dtype)
        output = F.prelu(inp, weight) 
Example #3
Source File: pytorch_to_caffe.py    From PytorchToCaffe with MIT License 5 votes vote down vote up
def _prelu(raw, input, weight):
    # for threshold or prelu
    x = raw(input, weight)
    bottom_blobs=[log.blobs(input)]
    name = log.add_layer(name='prelu')
    log.add_blobs([x], name='prelu_blob')
    layer = caffe_net.Layer_param(name=name, type='PReLU',
                                  bottom=bottom_blobs, top=[log.blobs(x)])
    if weight.size()[0]==1:
        layer.param.prelu_param.channel_shared=True
        layer.add_data(weight.cpu().data.numpy()[0])
    else:
        layer.add_data(weight.cpu().data.numpy())
    log.cnet.add_layer(layer)
    return x 
Example #4
Source File: pytorch_emitter.py    From MMdnn with MIT License 5 votes vote down vote up
def emit_PRelu(self, IR_node):
        code = "{:<15} = F.prelu({}, torch.from_numpy(__weights_dict['{}']['weights']))".format(
            IR_node.variable_name,
            self.parent_variable_name(IR_node, [0]),
            IR_node.name)
        
        if self.weight_loaded:
            self.weights_dict[IR_node.name]['weights'] = self.weights_dict[IR_node.name]['gamma']
        
        return code 
Example #5
Source File: pytorch_to_caffe.py    From fast-reid with Apache License 2.0 5 votes vote down vote up
def _relu(raw, input, inplace=False):
    # for threshold or prelu
    x = raw(input, False)
    name = log.add_layer(name='relu')
    log.add_blobs([x], name='relu_blob')
    layer = caffe_net.Layer_param(name=name, type='ReLU',
                                  bottom=[log.blobs(input)], top=[log.blobs(x)])
    log.cnet.add_layer(layer)
    return x 
Example #6
Source File: pytorch_to_caffe.py    From fast-reid with Apache License 2.0 5 votes vote down vote up
def _prelu(raw, input, weight):
    # for threshold or prelu
    x = raw(input, weight)
    bottom_blobs = [log.blobs(input)]
    name = log.add_layer(name='prelu')
    log.add_blobs([x], name='prelu_blob')
    layer = caffe_net.Layer_param(name=name, type='PReLU',
                                  bottom=bottom_blobs, top=[log.blobs(x)])
    if weight.size()[0] == 1:
        layer.param.prelu_param.channel_shared = True
        layer.add_data(weight.cpu().data.numpy()[0])
    else:
        layer.add_data(weight.cpu().data.numpy())
    log.cnet.add_layer(layer)
    return x 
Example #7
Source File: prelu.py    From onnx2keras with MIT License 5 votes vote down vote up
def __init__(self, num_params=3):
        super(LayerPReLUTest, self).__init__()
        self.num_params = num_params
        self.prelu = nn.PReLU(num_params) 
Example #8
Source File: prelu.py    From onnx2keras with MIT License 5 votes vote down vote up
def forward(self, x):
        x = self.prelu(x)
        return x 
Example #9
Source File: prelu.py    From onnx2keras with MIT License 5 votes vote down vote up
def forward(self, x):
        from torch.nn import functional as F
        weights = torch.FloatTensor(torch.rand(self.num_params).numpy())
        return F.prelu(x, weight=weights)