Python mxnet.ndarray.relu() Examples
The following are 15
code examples of mxnet.ndarray.relu().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
mxnet.ndarray
, or try the search function
.
![](https://www.programcreek.com/common/static/images/search.png)
Example #1
Source File: icnet.py From gluon-cv with Apache License 2.0 | 6 votes |
def demo(self, x_low, x_high): self._up_kwargs['height'] = x_high.shape[2] self._up_kwargs['width'] = x_high.shape[3] import mxnet.ndarray as F x_low = F.contrib.BilinearResize2D(x_low, height=self._up_kwargs['height'], width=self._up_kwargs['width']) x_low = self.conv_low(x_low) x_high = self.conv_hign(x_high) x = x_low + x_high x = F.relu(x) x_low_cls = self.conv_low_cls(x_low) return x, x_low_cls
Example #2
Source File: oth_icnet.py From imgclsmob with MIT License | 6 votes |
def predict(self, x): import mxnet.ndarray as F x_sub1_out = self.conv_sub1(x) x_sub2 = F.contrib.BilinearResize2D(x, height=x.shape[2] // 2, width=x.shape[3] // 2) x = self.conv1(x_sub2) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x_sub2_out = self.layer2(x) x_sub4 = F.contrib.BilinearResize2D(x_sub2_out, height=x_sub2_out.shape[2] // 2, width=x_sub2_out.shape[3] // 2) x = self.layer3(x_sub4) x = self.layer4(x) x_sub4_out = self.psp_head(x) x_sub4_out = self.conv_sub4(x_sub4_out) x_sub2_out = self.conv_sub2(x_sub2_out) res = self.head(x_sub1_out, x_sub2_out, x_sub4_out) return res[0]
Example #3
Source File: entity_classify.py From dgl with Apache License 2.0 | 5 votes |
def build_input_layer(self): return RelGraphConv(self.num_nodes, self.h_dim, self.num_rels, "basis", self.num_bases, activation=F.relu, self_loop=self.use_self_loop, dropout=self.dropout)
Example #4
Source File: entity_classify.py From dgl with Apache License 2.0 | 5 votes |
def build_hidden_layer(self, idx): return RelGraphConv(self.h_dim, self.h_dim, self.num_rels, "basis", self.num_bases, activation=F.relu, self_loop=self.use_self_loop, dropout=self.dropout)
Example #5
Source File: icnet.py From gluon-cv with Apache License 2.0 | 5 votes |
def hybrid_forward(self, F, x): # large resolution branch x_sub1_out = self.conv_sub1(x) # medium resolution branch x_sub2 = F.contrib.BilinearResize2D(x, height=self._up_kwargs['height'] // 2, width=self._up_kwargs['width'] // 2) x = self.conv1(x_sub2) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x_sub2_out = self.layer2(x) # small resolution branch x_sub4 = F.contrib.BilinearResize2D(x_sub2_out, height=self._up_kwargs['height'] // 32, width=self._up_kwargs['width'] // 32) x = self.layer3(x_sub4) x = self.layer4(x) x_sub4_out = self.psp_head(x) # reduce conv x_sub4_out = self.conv_sub4(x_sub4_out) x_sub2_out = self.conv_sub2(x_sub2_out) # ICNet head res = self.head(x_sub1_out, x_sub2_out, x_sub4_out) return res
Example #6
Source File: icnet.py From gluon-cv with Apache License 2.0 | 5 votes |
def predict(self, x): h, w = x.shape[2:] self._up_kwargs['height'] = h self._up_kwargs['width'] = w import mxnet.ndarray as F x_sub1_out = self.conv_sub1(x) x_sub2 = F.contrib.BilinearResize2D(x, height=self._up_kwargs['height'] // 2, width=self._up_kwargs['width'] // 2) x = self.conv1(x_sub2) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x_sub2_out = self.layer2(x) x_sub4 = F.contrib.BilinearResize2D(x_sub2_out, height=self._up_kwargs['height'] // 32, width=self._up_kwargs['width'] // 32) x = self.layer3(x_sub4) x = self.layer4(x) x_sub4_out = self.psp_head.demo(x) x_sub4_out = self.conv_sub4(x_sub4_out) x_sub2_out = self.conv_sub2(x_sub2_out) res = self.head.demo(x_sub1_out, x_sub2_out, x_sub4_out) return res[0]
Example #7
Source File: icnet.py From gluon-cv with Apache License 2.0 | 5 votes |
def hybrid_forward(self, F, x_low, x_high): x_low = F.contrib.BilinearResize2D(x_low, height=self._up_kwargs['height'], width=self._up_kwargs['width']) x_low = self.conv_low(x_low) x_high = self.conv_hign(x_high) x = x_low + x_high x = F.relu(x) x_low_cls = self.conv_low_cls(x_low) return x, x_low_cls
Example #8
Source File: icnet.py From gluon-cv with Apache License 2.0 | 5 votes |
def __init__(self, in_planes, out_planes, ksize, stride=1, pad=0, dilation=1, groups=1, has_bn=True, norm_layer=nn.BatchNorm, bn_eps=1e-5, has_relu=True, has_bias=False, **kwargs): super(ConvBnRelu, self).__init__() with self.name_scope(): self.conv = nn.Conv2D(in_channels=in_planes, channels=out_planes, kernel_size=ksize, padding=pad, strides=stride, dilation=dilation, groups=groups, use_bias=has_bias) self.has_bn = has_bn self.has_relu = has_relu if self.has_bn: self.bn = norm_layer(in_channels=out_planes, epsilon=bn_eps) if self.has_relu: self.relu = nn.Activation('relu')
Example #9
Source File: icnet.py From gluon-cv with Apache License 2.0 | 5 votes |
def demo(self, x): x = self.conv(x) if self.has_bn: x = self.bn(x) if self.has_relu: x = self.relu(x) return x
Example #10
Source File: custom_layers.py From d-SNE with Apache License 2.0 | 5 votes |
def hybrid_forward(self, F, fts, ys, ftt, yt): """ Semantic Alignment Loss :param F: Function :param yt: label for the target domain [N] :param ftt: features for the target domain [N, K] :param ys: label for the source domain [M] :param fts: features for the source domain [M, K] :return: """ if self._fn: # Normalize ft fts = F.L2Normalization(fts, mode='instance') ftt = F.L2Normalization(ftt, mode='instance') fts_rpt = F.broadcast_to(fts.expand_dims(axis=0), shape=(self._bs_tgt, self._bs_src, self._embed_size)) ftt_rpt = F.broadcast_to(ftt.expand_dims(axis=1), shape=(self._bs_tgt, self._bs_src, self._embed_size)) dists = F.sum(F.square(ftt_rpt - fts_rpt), axis=2) yt_rpt = F.broadcast_to(yt.expand_dims(axis=1), shape=(self._bs_tgt, self._bs_src)).astype('int32') ys_rpt = F.broadcast_to(ys.expand_dims(axis=0), shape=(self._bs_tgt, self._bs_src)).astype('int32') y_same = F.equal(yt_rpt, ys_rpt).astype('float32') y_diff = F.not_equal(yt_rpt, ys_rpt).astype('float32') intra_cls_dists = dists * y_same inter_cls_dists = dists * y_diff max_dists = F.max(dists, axis=1, keepdims=True) max_dists = F.broadcast_to(max_dists, shape=(self._bs_tgt, self._bs_src)) revised_inter_cls_dists = F.where(y_same, max_dists, inter_cls_dists) max_intra_cls_dist = F.max(intra_cls_dists, axis=1) min_inter_cls_dist = F.min(revised_inter_cls_dists, axis=1) loss = F.relu(max_intra_cls_dist - min_inter_cls_dist + self._margin) return loss
Example #11
Source File: oth_icnet.py From imgclsmob with MIT License | 5 votes |
def hybrid_forward(self, F, x): # large resolution branch --> (1, 3, 480, 480) x_sub1_out = self.conv_sub1(x) # --> (1, 64, 60, 60) # medium resolution branch --> (1, 3, 240, 240) x_sub2 = F.contrib.BilinearResize2D(x, height=self._up_kwargs['height'] // 2, width=self._up_kwargs['width'] // 2) x = self.conv1(x_sub2) # --> (1, 128, 120, 120) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) # --> (1, 128, 60, 60) x = self.layer1(x) # --> (1, 256, 60, 60) x_sub2_out = self.layer2(x) # --> (1, 512, 30, 30) # small resolution branch --> (1, 512, 15, 15) x_sub4 = F.contrib.BilinearResize2D(x_sub2_out, height=self._up_kwargs['height'] // 32, width=self._up_kwargs['width'] // 32) x = self.layer3(x_sub4) # --> (1, 1024, 15, 15) x = self.layer4(x) # --> (1, 2048, 15, 15) x_sub4_out = self.psp_head(x) # --> (1, 512, 15, 15) # reduce conv x_sub4_out = self.conv_sub4(x_sub4_out) # --> (1, 256, 15, 15) x_sub2_out = self.conv_sub2(x_sub2_out) # --> (1, 256, 30, 30) # ICNet head res = self.head(x_sub1_out, x_sub2_out, x_sub4_out) return res
Example #12
Source File: oth_icnet.py From imgclsmob with MIT License | 5 votes |
def hybrid_forward(self, F, x_low, x_high): x_low = F.contrib.BilinearResize2D(x_low, height=self._up_kwargs['height'], width=self._up_kwargs['width']) x_low = self.conv_low(x_low) x_high = self.conv_hign(x_high) x = x_low + x_high x = F.relu(x) x_low_cls = self.conv_low_cls(x_low) return x, x_low_cls
Example #13
Source File: oth_icnet.py From imgclsmob with MIT License | 5 votes |
def demo(self, x_low, x_high): import mxnet.ndarray as F x_low = F.contrib.BilinearResize2D(x_low, height=x_high.shape[2], width=x_high.shape[3]) x_low = self.conv_low(x_low) x_high = self.conv_hign(x_high) x = x_low + x_high x = F.relu(x) x_low_cls = self.conv_low_cls(x_low) return x, x_low_cls
Example #14
Source File: oth_icnet.py From imgclsmob with MIT License | 5 votes |
def __init__(self, in_planes, out_planes, ksize, stride=1, pad=0, dilation=1, groups=1, has_bn=True, norm_layer=nn.BatchNorm, bn_eps=1e-5, has_relu=True, has_bias=False, **kwargs): super(ConvBnRelu, self).__init__() with self.name_scope(): self.conv = nn.Conv2D(in_channels=in_planes, channels=out_planes, kernel_size=ksize, padding=pad, strides=stride, dilation=dilation, groups=groups, use_bias=has_bias) self.has_bn = has_bn self.has_relu = has_relu if self.has_bn: self.bn = norm_layer(in_channels=out_planes, epsilon=bn_eps) if self.has_relu: self.relu = nn.Activation('relu')
Example #15
Source File: oth_icnet.py From imgclsmob with MIT License | 5 votes |
def demo(self, x): x = self.conv(x) if self.has_bn: x = self.bn(x) if self.has_relu: x = self.relu(x) return x