Python chainer.functions.relu() Examples

The following are 30 code examples of chainer.functions.relu(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module chainer.functions , or try the search function .
Example #1
Source File: GoogleNet_with_loss.py    From chainer-compiler with MIT License 6 votes vote down vote up
def forward(self, x):
        """Computes the output of the Inception module.
        Args:
            x (~chainer.Variable): Input variable.
        Returns:
            Variable: Output variable. Its array has the same spatial size and
            the same minibatch size as the input array. The channel dimension
            has size ``out1 + out3 + out5 + proj_pool``.
        """
        out1 = self.conv1(x)
        out3 = self.conv3(relu.relu(self.proj3(x)))
        out5 = self.conv5(relu.relu(self.proj5(x)))
        pool = self.projp(F.max_pooling_2d(
            x, 3, stride=1, pad=1))

        y = relu.relu(concat.concat((out1, out3, out5, pool), axis=1))
        return y 
Example #2
Source File: inception_resnet_v2.py    From nips17-adversarial-attack with MIT License 6 votes vote down vote up
def __init__(self, scale=1.0, activation_fn=F.relu):
        super(Block8, self).__init__()
        with self.init_scope():
            self.Branch_0 = TFLoadableChain()
            with self.Branch_0.init_scope():
                self.Branch_0.Conv2d_1x1 = ConvBnRelu(192, 1)

            self.Branch_1 = TFLoadableChain()
            with self.Branch_1.init_scope():
                self.Branch_1.Conv2d_0a_1x1 = ConvBnRelu(192, 1)
                self.Branch_1.Conv2d_0b_1x3 = ConvBnRelu(224, (1, 3), pad=(0, 1))
                self.Branch_1.Conv2d_0c_3x1 = ConvBnRelu(256, (3, 1), pad=(1, 0))

                # NOTE: Conv2d_1x1 is built at the first iteration

        self.scale = scale
        self.activation_fn = activation_fn 
Example #3
Source File: inception_resnet_v2.py    From nips17-adversarial-attack with MIT License 6 votes vote down vote up
def __init__(self, scale=1.0, activation_fn=F.relu):
        super(Block35, self).__init__()
        with self.init_scope():
            self.Branch_0 = TFLoadableChain()
            with self.Branch_0.init_scope():
                self.Branch_0.Conv2d_1x1 = ConvBnRelu(32, 1)

            self.Branch_1 = TFLoadableChain()
            with self.Branch_1.init_scope():
                self.Branch_1.Conv2d_0a_1x1 = ConvBnRelu(32, 1)
                self.Branch_1.Conv2d_0b_3x3 = ConvBnRelu(32, 3, pad=1)

            self.Branch_2 = TFLoadableChain()
            with self.Branch_2.init_scope():
                self.Branch_2.Conv2d_0a_1x1 = ConvBnRelu(32, 1)
                self.Branch_2.Conv2d_0b_3x3 = ConvBnRelu(48, 3, pad=1)
                self.Branch_2.Conv2d_0c_3x3 = ConvBnRelu(64, 3, pad=1)

                # NOTE: Conv2d_1x1 is built at the first iteration

        self.scale = scale
        self.activation_fn = activation_fn 
Example #4
Source File: spp_discriminator.py    From Semantic-Segmentation-using-Adversarial-Networks with MIT License 6 votes vote down vote up
def __call__(self, x):
        h = F.relu(self.conv1_1(x))
        h = F.relu(self.conv1_2(h))
        h = F.max_pooling_2d(h, 2, stride=2)
        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        h = F.max_pooling_2d(h, 2, stride=2)
        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.max_pooling_2d(h, 2, stride=2)
        h = F.relu(self.conv4_1(h))
        h = F.relu(self.conv4_2(h))
        h = F.spatial_pyramid_pooling_2d(h, 3, F.MaxPooling2D)
        h = F.tanh(self.fc4(h))
        h = F.dropout(h, ratio=.5, train=self.train)
        h = F.tanh(self.fc5(h))
        h = F.dropout(h, ratio=.5, train=self.train)
        h = self.fc6(h)
        return h 
Example #5
Source File: state_q_functions.py    From chainerrl with MIT License 6 votes vote down vote up
def __call__(self, state):
        h = state
        for layer in self.hidden_layers:
            h = F.relu(layer(h))
        v = self.v(h)
        mu = self.mu(h)

        if self.scale_mu:
            mu = scale_by_tanh(mu, high=self.action_space.high,
                               low=self.action_space.low)

        mat_diag = F.exp(self.mat_diag(h))
        if hasattr(self, 'mat_non_diag'):
            mat_non_diag = self.mat_non_diag(h)
            tril = lower_triangular_matrix(mat_diag, mat_non_diag)
            mat = F.matmul(tril, tril, transb=True)
        else:
            mat = F.expand_dims(mat_diag ** 2, axis=2)
        return QuadraticActionValue(
            mu, mat, v, min_action=self.action_space.low,
            max_action=self.action_space.high) 
Example #6
Source File: test_double_iqn.py    From chainerrl with MIT License 6 votes vote down vote up
def make_q_func(self, env):
        obs_size = env.observation_space.low.size
        hidden_size = 64
        return iqn.StatelessRecurrentImplicitQuantileQFunction(
            psi=chainerrl.links.StatelessRecurrentSequential(
                L.Linear(obs_size, hidden_size),
                F.relu,
                L.NStepRNNTanh(1, hidden_size, hidden_size, 0),
            ),
            phi=chainerrl.links.Sequence(
                chainerrl.agents.iqn.CosineBasisLinear(32, hidden_size),
                F.relu,
            ),
            f=L.Linear(hidden_size, env.action_space.n,
                       initialW=chainer.initializers.LeCunNormal(1e-1)),
        ) 
Example #7
Source File: train_agent_chainer.py    From gym-malware with MIT License 6 votes vote down vote up
def __init__(self, obs_size, n_actions, n_hidden_channels=[1024,256]):
        super(QFunction,self).__init__()
        net = []
        inpdim = obs_size
        for i,n_hid in enumerate(n_hidden_channels):
            net += [ ('l{}'.format(i), L.Linear( inpdim, n_hid ) ) ]
            net += [ ('norm{}'.format(i), L.BatchNormalization( n_hid ) ) ]
            net += [ ('_act{}'.format(i), F.relu ) ]
            inpdim = n_hid

        net += [('output', L.Linear( inpdim, n_actions) )]

        with self.init_scope():
            for n in net:
                if not n[0].startswith('_'):
                    setattr(self, n[0], n[1])

        self.forward = net 
Example #8
Source File: test_iqn.py    From chainerrl with MIT License 6 votes vote down vote up
def make_q_func(self, env):
        obs_size = env.observation_space.low.size
        hidden_size = 64
        return iqn.StatelessRecurrentImplicitQuantileQFunction(
            psi=chainerrl.links.StatelessRecurrentSequential(
                L.Linear(obs_size, hidden_size),
                F.relu,
                L.NStepRNNTanh(1, hidden_size, hidden_size, 0),
            ),
            phi=chainerrl.links.Sequence(
                chainerrl.agents.iqn.CosineBasisLinear(32, hidden_size),
                F.relu,
            ),
            f=L.Linear(hidden_size, env.action_space.n,
                       initialW=chainer.initializers.LeCunNormal(1e-1)),
        ) 
Example #9
Source File: MnihCNN_cis.py    From ssai-cnn with MIT License 6 votes vote down vote up
def __call__(self, x, t):
        h = F.relu(self.conv1(x))
        h = F.max_pooling_2d(h, 2, 1)
        h = F.relu(self.conv2(h))
        h = F.relu(self.conv3(h))
        h = F.dropout(F.relu(self.fc4(h)), train=self.train)
        h = self.fc5(h)
        h = F.reshape(h, (x.data.shape[0], 3, 16, 16))
        h = self.channelwise_inhibited(h)

        if self.train:
            self.loss = F.softmax_cross_entropy(h, t, normalize=False)
            return self.loss
        else:
            self.pred = F.softmax(h)
            return self.pred 
Example #10
Source File: MnihCNN_rcis.py    From ssai-cnn with MIT License 6 votes vote down vote up
def __call__(self, x, t):
        h = F.relu(self.conv1(x))
        h = F.max_pooling_2d(h, 2, 1)
        h = F.relu(self.conv2(h))
        h = F.relu(self.conv3(h))
        h = F.relu(self.fc4(h))
        h = self.fc5(h)
        h = F.reshape(h, (x.data.shape[0], 3, 16, 16))
        h = self.channelwise_inhibited(h)

        if self.train:
            self.loss = F.softmax_cross_entropy(h, t, normalize=False)
            return self.loss
        else:
            self.pred = F.softmax(h)
            return self.pred 
Example #11
Source File: train_dqn_batch_grasping.py    From chainerrl with MIT License 6 votes vote down vote up
def __init__(self, n_actions, max_episode_steps):
        super().__init__()
        with self.init_scope():
            self.embed = L.EmbedID(max_episode_steps + 1, 3136)
            self.image2hidden = chainerrl.links.Sequence(
                L.Convolution2D(None, 32, 8, stride=4),
                F.relu,
                L.Convolution2D(None, 64, 4, stride=2),
                F.relu,
                L.Convolution2D(None, 64, 3, stride=1),
                functools.partial(F.reshape, shape=(-1, 3136)),
            )
            self.hidden2out = chainerrl.links.Sequence(
                L.Linear(None, 512),
                F.relu,
                L.Linear(None, n_actions),
                DiscreteActionValue,
            ) 
Example #12
Source File: Alex_with_loss.py    From chainer-compiler with MIT License 6 votes vote down vote up
def forward(self, x, t):
        # def forward(self, x):
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv1(x))), 3, stride=2)
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv2(h))), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)))
        h = F.dropout(F.relu(self.fc7(h)))
        h = self.fc8(h)

        loss = F.softmax_cross_entropy(h, t)
        #loss = h

        # chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
        return loss

# from https://github.com/chainer/chainer/blob/master/examples/imagenet/alex.py 
Example #13
Source File: FCN_32s.py    From ssai-cnn with MIT License 6 votes vote down vote up
def __call__(self, x, t):
        h = F.relu(self.conv1_1(x))
        h = F.relu(self.conv1_2(h))
        h = F.max_pooling_2d(h, 2, 2)
        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        h = F.max_pooling_2d(h, 2, 2)
        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.relu(self.conv3_3(h))
        h = F.max_pooling_2d(h, 2, 2)
        h = F.relu(self.conv4_1(h))
        h = F.relu(self.conv4_2(h))
        h = F.relu(self.conv4_3(h))
        h = F.max_pooling_2d(h, 2, 2)
        h = F.relu(self.conv5_1(h))
        h = F.relu(self.conv5_2(h))
        h = F.relu(self.conv5_3(h))
        h = F.max_pooling_2d(h, 2, 2)
        h = F.dropout(F.relu(self.fc6(h)), ratio=0.5, train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), ratio=0.5, train=self.train)
        h = self.score_fr(h)
        h = self.upsample(h)

        return h 
Example #14
Source File: block_1d.py    From Deep_VoiceChanger with MIT License 6 votes vote down vote up
def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=F.relu, mode='none', bn=True, dr=None):
        super(ResBlock, self).__init__()
        initializer = chainer.initializers.GlorotUniform()
        initializer_sc = chainer.initializers.GlorotUniform()
        self.activation = activation
        self.mode = _downsample if mode == 'down' else _upsample if mode == 'up' else None
        self.learnable_sc = in_channels != out_channels
        self.dr = dr
        self.bn = bn
        with self.init_scope():
            self.c1 = L.Convolution1D(in_channels,  out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn)
            self.c2 = L.Convolution1D(out_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn)
            if bn:
                self.b1 = L.BatchNormalization(out_channels)
                self.b2 = L.BatchNormalization(out_channels)
            if self.learnable_sc:
                self.c_sc = L.Convolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc) 
Example #15
Source File: train_mnist.py    From chainer-compiler with MIT License 5 votes vote down vote up
def forward(self, x):
        h1 = F.relu(self.l1(x))
        h2 = F.relu(self.l2(h1))
        return self.l3(h2) 
Example #16
Source File: MLP_with_loss.py    From chainer-compiler with MIT License 5 votes vote down vote up
def forward(self, x, t):
        h1 = F.relu(self.l1(x))
        h2 = F.relu(self.l2(h1))
        h3 = self.l3(h2)
        loss = F.softmax_cross_entropy(h3, t)
        # loss = h3
        return loss


# ======================================from MLP 
Example #17
Source File: fcn8s.py    From Semantic-Segmentation-using-Adversarial-Networks with MIT License 5 votes vote down vote up
def __call__(self, x):
        h = F.relu(self.conv1_1(x))
        h = F.relu(self.conv1_2(h))
        h = F.max_pooling_2d(h, 2, stride=2, pad=0)
        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        h = F.max_pooling_2d(h, 2, stride=2, pad=0)
        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.relu(self.conv3_3(h))
        pool3 = F.max_pooling_2d(h, 2, stride=2, pad=0)
        h = F.relu(self.conv4_1(pool3))
        h = F.relu(self.conv4_2(h))
        h = F.relu(self.conv4_3(h))
        pool4 = F.max_pooling_2d(h, 2, stride=2, pad=0)
        h = F.relu(self.conv5_1(pool4))
        h = F.relu(self.conv5_2(h))
        h = F.relu(self.conv5_3(h))
        h = F.max_pooling_2d(h, 2, stride=2, pad=0)
        h = F.relu(self.fc6(h))
        h = F.dropout(h, ratio=.5, train=self.train)
        h = F.relu(self.fc7(h))
        h = F.dropout(h, ratio=.5, train=self.train)
        score_fr = self.score_fr(h)

        upscore2 = self.upscore2(score_fr)
        score_pool4 = self.score_pool4(pool4)
        score_pool4c = f.crop_to_target(score_pool4, target=upscore2)
        fuse_pool4 = upscore2 + score_pool4c

        upscore_pool4 = self.upscore_pool4(fuse_pool4)
        score_pool3 = self.score_pool3(pool3)
        score_pool3c = f.crop_to_target(score_pool3, target=upscore_pool4)
        fuse_pool3 = upscore_pool4 + score_pool3c

        upscore8 = self.upscore8(fuse_pool3)
        score = f.crop_to_target(upscore8, target=x)

        return score 
Example #18
Source File: rec_multibp_resnet.py    From nips17-adversarial-attack with MIT License 5 votes vote down vote up
def __call__(self, h, *h_skip):
        h = F.relu(self.b0(self.d0(h)))
        h = F.concat((h, *h_skip))
        h = F.relu(self.b1(self.c1(h)))
        h = F.relu(self.b2(self.c2(h)))

        return h 
Example #19
Source File: MnihCNN_single.py    From ssai-cnn with MIT License 5 votes vote down vote up
def __call__(self, x, t):
        h = F.relu(self.conv1(x))
        h = F.relu(self.conv2(h))
        h = F.relu(self.conv3(h))
        h = F.dropout(F.relu(self.fc4(h)), train=self.train)
        h = self.fc5(h)
        self.pred = F.reshape(h, (x.data.shape[0], 16, 16))

        if t is not None:
            self.loss = F.sigmoid_cross_entropy(self.pred, t, normalize=False)
            return self.loss
        else:
            self.pred = F.sigmoid(self.pred)
            return self.pred 
Example #20
Source File: fcn32s.py    From Semantic-Segmentation-using-Adversarial-Networks with MIT License 5 votes vote down vote up
def __call__(self, x):
        h = F.relu(self.conv1_1(x))
        h = F.relu(self.conv1_2(h))
        h = F.max_pooling_2d(h, 2, stride=2, pad=0)
        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        h = F.max_pooling_2d(h, 2, stride=2, pad=0)
        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.relu(self.conv3_3(h))
        h = F.max_pooling_2d(h, 2, stride=2, pad=0)
        h = F.relu(self.conv4_1(h))
        h = F.relu(self.conv4_2(h))
        h = F.relu(self.conv4_3(h))
        h = F.max_pooling_2d(h, 2, stride=2, pad=0)
        h = F.relu(self.conv5_1(h))
        h = F.relu(self.conv5_2(h))
        h = F.relu(self.conv5_3(h))
        h = F.max_pooling_2d(h, 2, stride=2, pad=0)
        h = F.relu(self.fc6(h))
        h = F.dropout(h, ratio=.5, train=self.train)
        h = F.relu(self.fc7(h))
        h = F.dropout(h, ratio=.5, train=self.train)
        score_fr = self.score_fr(h)

        upscore = self.upscore(score_fr)
        score = f.crop_to_target(upscore, target=x)

        return score 
Example #21
Source File: rec_multibp_resnet.py    From nips17-adversarial-attack with MIT License 5 votes vote down vote up
def __call__(self, x):
        h = F.relu(self.bn1(self.conv1(x)))
        h = F.relu(self.bn2(self.conv2(h)))
        h = self.bn3(self.conv3(h))

        return F.relu(h + x) 
Example #22
Source File: utils.py    From knmt with GNU General Public License v3.0 5 votes vote down vote up
def __call__(self, x_input):
#         print "FF", x_input.data
        if len(x_input.data.shape) > 2:
            x = F.reshape(x_input, (-1, x_input.shape[-1]))
        else:
            x = x_input
            
        ff_output = self.lin2(F.relu(self.lin1(x)))
        
        norm_ff_output = self.layer_reduce(ff_output, x)
        
#         if self.dropout is not None:
#             ff_output = F.dropout(ff_output, self.dropout, train=train)
#             
#         if self.no_add:
#             added_output = ff_output
#         else:
#             added_output = ff_output + x
#         
#         if self.no_normalize:
#             norm_ff_output = added_output
#         else:
#             norm_ff_output = self.normalization_layer(added_output)
        
        if len(x_input.data.shape) > 2:
            norm_ff_output = F.reshape(norm_ff_output, x_input.data.shape)
            
#         print "FFR", norm_ff_output.data
        return norm_ff_output

#########################################################################
# Reshaping utility function
# 
Example #23
Source File: dqn_head.py    From async-rl with MIT License 5 votes vote down vote up
def __init__(self, n_input_channels=4, n_output_channels=256,
                 activation=F.relu, bias=0.1):
        self.n_input_channels = n_input_channels
        self.activation = activation
        self.n_output_channels = n_output_channels

        layers = [
            L.Convolution2D(n_input_channels, 16, 8, stride=4, bias=bias),
            L.Convolution2D(16, 32, 4, stride=2, bias=bias),
            L.Linear(2592, n_output_channels, bias=bias),
        ]

        super(NIPSDQNHead, self).__init__(*layers) 
Example #24
Source File: dqn_head.py    From async-rl with MIT License 5 votes vote down vote up
def __init__(self, n_input_channels=4, n_output_channels=512,
                 activation=F.relu, bias=0.1):
        self.n_input_channels = n_input_channels
        self.activation = activation
        self.n_output_channels = n_output_channels

        layers = [
            L.Convolution2D(n_input_channels, 32, 8, stride=4, bias=bias),
            L.Convolution2D(32, 64, 4, stride=2, bias=bias),
            L.Convolution2D(64, 64, 3, stride=1, bias=bias),
            L.Linear(3136, n_output_channels, bias=bias),
        ]

        super(NatureDQNHead, self).__init__(*layers) 
Example #25
Source File: v_function.py    From async-rl with MIT License 5 votes vote down vote up
def __call__(self, state):
        h = state
        for layer in self[:-1]:
            h = F.relu(layer(h))
        h = self[-1](h)
        return h 
Example #26
Source File: policy.py    From async-rl with MIT License 5 votes vote down vote up
def compute_logits(self, state):
        h = state
        for layer in self[:-1]:
            h = F.relu(layer(h))
        h = self[-1](h)
        return h 
Example #27
Source File: train_agent_chainer.py    From gym-malware with MIT License 5 votes vote down vote up
def create_acer_agent(env):
    obs_dim = env.observation_space.shape[0]
    n_actions = env.action_space.n

    model = acer.ACERSeparateModel(
        pi=links.Sequence(
            L.Linear( obs_dim, 1024, initialW=LeCunNormal(1e-3)),
            F.relu,
            L.Linear( 1024, 512, initialW=LeCunNormal(1e-3)),
            F.relu,
            L.Linear( 512, n_actions, initialW=LeCunNormal(1e-3)),
            SoftmaxDistribution),
        q=links.Sequence(
            L.Linear( obs_dim, 1024, initialW=LeCunNormal(1e-3)),
            F.relu,
            L.Linear( 1024, 512, initialW=LeCunNormal(1e-3)),
            F.relu,
            L.Linear( 512, n_actions, initialW=LeCunNormal(1e-3)),
            DiscreteActionValue),
        )

    opt = rmsprop_async.RMSpropAsync( lr=7e-4, eps=1e-2, alpha=0.99)
    opt.setup( model )
    opt.add_hook( chainer.optimizer.GradientClipping(40) )

    replay_buffer = EpisodicReplayBuffer( 128 )
    agent = acer.ACER( model, opt, 
        gamma=0.95, # reward discount factor
        t_max=32, # update the model after this many local steps
        replay_buffer=replay_buffer,
        n_times_replay=4, # number of times experience replay is repeated for each update
        replay_start_size=64, # don't start replay unless we have this many experiences in the buffer
        disable_online_update=True, # rely only on experience buffer
        use_trust_region=True,  # enable trust region policy optimiztion
        trust_region_delta=0.1,  # a parameter for TRPO
        truncation_threshold=5.0, # truncate large importance weights
        beta=1e-2, # entropy regularization parameter
        phi= lambda obs: obs.astype(np.float32, copy=False) )

    return agent 
Example #28
Source File: VGG_single.py    From ssai-cnn with MIT License 5 votes vote down vote up
def __call__(self, x, t):
        h = F.relu(self.conv1_1(x))
        h = F.relu(self.conv1_2(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.relu(self.conv3_3(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv4_1(h))
        h = F.relu(self.conv4_2(h))
        h = F.relu(self.conv4_3(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.conv5_1(h))
        h = F.relu(self.conv5_2(h))
        h = F.relu(self.conv5_3(h))
        h = F.max_pooling_2d(h, 2, stride=2)

        h = F.relu(self.fc6(h))
        h = F.relu(self.fc7(h))
        h = self.fc8(h)

        self.pred = F.reshape(h, (x.data.shape[0], 1, 16, 16))

        if t is not None:
            self.loss = F.softmax_cross_entropy(self.pred, t, normalize=False)
            self.loss /= 16 * 16
            return self.loss
        else:
            self.pred = F.sigmoid(self.pred)
            return self.pred 
Example #29
Source File: MnihCNN_multi.py    From ssai-cnn with MIT License 5 votes vote down vote up
def __call__(self, x, t):
        h = F.relu(self.conv1(x))
        h = F.max_pooling_2d(h, 2, 1)
        h = F.relu(self.conv2(h))
        h = F.relu(self.conv3(h))
        h = F.dropout(F.relu(self.fc4(h)), train=self.train)
        h = self.fc5(h)
        h = F.reshape(h, (x.data.shape[0], 3, 16, 16))

        if t is not None:
            self.loss = F.softmax_cross_entropy(h, t, normalize=False)
            return self.loss
        else:
            self.pred = F.softmax(h)
            return self.pred 
Example #30
Source File: cnn_model.py    From cgp-cnn with MIT License 5 votes vote down vote up
def __call__(self, x, h, train):
        xp = chainer.cuda.get_array_module(x)
        param_num = 0
        for name, f in self.forward:
            if 'conv' in name:
                x = getattr(self, name)(x)
                param_num += (f.W.shape[0]*f.W.shape[2]*f.W.shape[3]*f.W.shape[1]+f.W.shape[0])
            elif 'bn' in name:
                x = getattr(self, name)(x, not train)
                param_num += x.data.shape[1]*2
            elif 'act' in name:
                x = f(x)
            else:
                print('not defined function at ResBlock __call__')
                exit(1)
        in_data = [x, h]
        # check of the image size
        small_in_id, large_in_id = (0, 1) if in_data[0].shape[2] < in_data[1].shape[2] else (1, 0)
        pool_num = xp.floor(xp.log2(in_data[large_in_id].shape[2] / in_data[small_in_id].shape[2]))
        for _ in xp.arange(pool_num):
            in_data[large_in_id] = F.max_pooling_2d(in_data[large_in_id], self.pool_size, self.pool_size, 0, False)
        # check of the channel size
        small_ch_id, large_ch_id = (0, 1) if in_data[0].shape[1] < in_data[1].shape[1] else (1, 0)
        pad_num = int(in_data[large_ch_id].shape[1] - in_data[small_ch_id].shape[1])
        tmp = in_data[large_ch_id][:, :pad_num, :, :]
        in_data[small_ch_id] = F.concat((in_data[small_ch_id], tmp * 0), axis=1)
        return (F.relu(in_data[0]+in_data[1]), param_num)


# Construct a CNN model using CGP (list)