Python torch.nn.Flatten() Examples

The following are 30 code examples of torch.nn.Flatten(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn , or try the search function .
Example #1
Source File: ResNetV2.py    From Pytorch-Networks with MIT License 6 votes vote down vote up
def __init__(self,block,block_list):
        super(ResNet,self).__init__()
        self.head_conv = nn.Sequential(
            nn.Conv2d(3,64,7,2,3,bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),)
        self.maxpool_1 = nn.MaxPool2d(3,2,1)
        b_ = block.expansion
        self.layer_1 = self._make_layer(block,64,64*b_,block_list[0],1)
        self.layer_2 = self._make_layer(block,64*b_,128*b_,block_list[1],2)
        self.layer_3 = self._make_layer(block,128*b_,256*b_,block_list[2],2)
        self.layer_4 = self._make_layer(block,256*b_,512*b_,block_list[3],2)
        self.avgpool_1 = nn.AdaptiveAvgPool2d((1,1))
        self.fc_1 = nn.Sequential(
            nn.Flatten(),
            nn.Linear(512*b_,1000),
            nn.Softmax(dim = 1),)
        self._initialization() 
Example #2
Source File: train_enas_imagenet.py    From autogluon with Apache License 2.0 6 votes vote down vote up
def __init__(self, features, blocks, dropout_rate=0.2, num_classes=1000, input_size=224):
        super().__init__()
        self.features = features
        # blocks
        self.blocks = blocks
        # head
        Conv2D = get_same_padding_conv2d(input_size//32)
        self.conv_head = nn.Sequential(
            Conv2D(320, 1280, kernel_size=3, stride=2),
            nn.BatchNorm2d(1280),
            nn.ReLU(True),
        )
        # pool + fc
        self.pool = nn.AdaptiveAvgPool2d(1)
        self.flatten = nn.Flatten()
        self._dropout = nn.Dropout(dropout_rate) if dropout_rate > 0 else None
        self.fc = nn.Linear(1280, num_classes) 
Example #3
Source File: test_gradnorm_logger.py    From catalyst with Apache License 2.0 6 votes vote down vote up
def __init__(self, input_shape: Tuple[int]):
        super().__init__()
        assert len(input_shape) == 3
        c, h, w = input_shape
        self.conv1 = nn.Conv2d(in_channels=c, out_channels=64, kernel_size=3)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=2)
        self.flatten = nn.Flatten()

        for conv in [self.conv1, self.conv2]:
            h_kernel, w_kernel = conv.kernel_size
            h_stride, w_stride = conv.stride
            c = conv.out_channels
            h, w = self.conv2d_size_out(
                size=(h, w),
                kernel_size=(h_kernel, w_kernel),
                stride=(h_stride, w_stride),
            )

        self.fc1 = nn.Linear(in_features=c * h * w, out_features=10) 
Example #4
Source File: test_tracer_callback.py    From catalyst with Apache License 2.0 6 votes vote down vote up
def __init__(self, input_shape: Tuple[int]):
        """
        Args:
            input_shape (Tuple[int]): Shape of input tensor.
        """
        super().__init__()
        assert len(input_shape) == 3
        c, h, w = input_shape
        self.conv1 = nn.Conv2d(in_channels=c, out_channels=64, kernel_size=3)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=2)
        self.flatten = nn.Flatten()

        for conv in [self.conv1, self.conv2]:
            h_kernel, w_kernel = conv.kernel_size
            h_stride, w_stride = conv.stride
            c = conv.out_channels
            h, w = self.conv2d_size_out(
                size=(h, w),
                kernel_size=(h_kernel, w_kernel),
                stride=(h_stride, w_stride),
            )

        self.fc1 = nn.Linear(in_features=c * h * w, out_features=10) 
Example #5
Source File: nfsp_agent_pytorch.py    From rlcard with MIT License 6 votes vote down vote up
def __init__(self, action_num=2, state_shape=None, mlp_layers=None):
        ''' Initialize the policy network.  It's just a bunch of ReLU
        layers with no activation on the final one, initialized with
        Xavier (sonnet.nets.MLP and tensorflow defaults)

        Args:
            action_num (int): number of output actions
            state_shape (list): shape of state tensor for each sample
            mlp_laters (list): output size of each mlp layer including final
        '''
        super(AveragePolicyNetwork, self).__init__()

        self.action_num = action_num
        self.state_shape = state_shape
        self.mlp_layers = mlp_layers

        # set up mlp w/ relu activations
        layer_dims = [np.prod(self.state_shape)] + self.mlp_layers
        mlp = [nn.Flatten()]
        mlp.append(nn.BatchNorm1d(layer_dims[0]))
        for i in range(len(layer_dims)-1):
            mlp.append(nn.Linear(layer_dims[i], layer_dims[i+1]))
            if i != len(layer_dims) - 2: # all but final have relu
                mlp.append(nn.ReLU())
        self.mlp = nn.Sequential(*mlp) 
Example #6
Source File: dqn_agent_pytorch.py    From rlcard with MIT License 6 votes vote down vote up
def __init__(self, action_num=2, state_shape=None, mlp_layers=None):
        ''' Initialize the Q network

        Args:
            action_num (int): number of legal actions
            state_shape (list): shape of state tensor
            mlp_layers (list): output size of each fc layer
        '''
        super(EstimatorNetwork, self).__init__()

        self.action_num = action_num
        self.state_shape = state_shape
        self.mlp_layers = mlp_layers

        # build the Q network
        layer_dims = [np.prod(self.state_shape)] + self.mlp_layers
        fc = [nn.Flatten()]
        fc.append(nn.BatchNorm1d(layer_dims[0]))
        for i in range(len(layer_dims)-1):
            fc.append(nn.Linear(layer_dims[i], layer_dims[i+1], bias=True))
            fc.append(nn.Tanh())
        fc.append(nn.Linear(layer_dims[-1], self.action_num, bias=True))
        self.fc_layers = nn.Sequential(*fc) 
Example #7
Source File: patchy_san.py    From cogdl with MIT License 6 votes vote down vote up
def build_model(self, num_channel, num_sample, num_neighbor, num_class):
        rep1, stride1 = 4, 4
        num_filter1, num_filter2 = 16, 8
        self.conv1 = nn.Conv1d(num_channel, num_filter1, rep1, stride=stride1, groups=1)
        self.conv2 = nn.Conv1d(num_filter1, num_filter2, num_neighbor, stride=1, groups=1)
        
        num_lin = (int(num_sample * num_neighbor/ stride1 ) - num_neighbor + 1)  * num_filter2
        self.lin1 = torch.nn.Linear(num_lin, 128)
        self.lin2 = torch.nn.Linear(128, num_class)
        
        self.nn = nn.Sequential(
            self.conv1,
            nn.ReLU(),
            self.conv2,
            nn.ReLU(),
            nn.Flatten(),
            self.lin1,
            nn.ReLU(),
            nn.Dropout(0.2),
            self.lin2,
            nn.Softmax(),
        )
        
        self.criterion = nn.CrossEntropyLoss()
        # self.criterion = nn.NLLLoss() 
Example #8
Source File: ppo_atari_visual.py    From cleanrl with MIT License 6 votes vote down vote up
def __init__(self, frames=4):
        super(Agent, self).__init__()
        self.network = nn.Sequential(
            Scale(1/255),
            layer_init(nn.Conv2d(frames, 32, 8, stride=4)),
            nn.ReLU(),
            layer_init(nn.Conv2d(32, 64, 4, stride=2)),
            nn.ReLU(),
            layer_init(nn.Conv2d(64, 64, 3, stride=1)),
            nn.ReLU(),
            nn.Flatten(),
            layer_init(nn.Linear(3136, 512)),
            nn.ReLU()
        )
        self.actor = layer_init(nn.Linear(512, envs.action_space.n), std=0.01)
        self.critic = layer_init(nn.Linear(512, 1), std=1) 
Example #9
Source File: c51_atari.py    From cleanrl with MIT License 6 votes vote down vote up
def __init__(self, frames=4, n_atoms=51, v_min=-10, v_max=10):
        super(QNetwork, self).__init__()
        self.n_atoms = n_atoms
        self.atoms = torch.linspace(v_min, v_max, steps=n_atoms).to(device)
        self.network = nn.Sequential(
            Scale(1/255),
            nn.Conv2d(frames, 32, 8, stride=4),
            nn.ReLU(),
            nn.Conv2d(32, 64, 4, stride=2),
            nn.ReLU(),
            nn.Conv2d(64, 64, 3, stride=1),
            nn.ReLU(),
            nn.Flatten(),
            nn.Linear(3136, 512),
            nn.ReLU(),
            nn.Linear(512, env.action_space.n * n_atoms)
        ) 
Example #10
Source File: ppo_atari.py    From cleanrl with MIT License 6 votes vote down vote up
def __init__(self, frames=4):
        super(Agent, self).__init__()
        self.network = nn.Sequential(
            Scale(1/255),
            layer_init(nn.Conv2d(frames, 32, 8, stride=4)),
            nn.ReLU(),
            layer_init(nn.Conv2d(32, 64, 4, stride=2)),
            nn.ReLU(),
            layer_init(nn.Conv2d(64, 64, 3, stride=1)),
            nn.ReLU(),
            nn.Flatten(),
            layer_init(nn.Linear(3136, 512)),
            nn.ReLU()
        )
        self.actor = layer_init(nn.Linear(512, envs.action_space.n), std=0.01)
        self.critic = layer_init(nn.Linear(512, 1), std=1) 
Example #11
Source File: MobileNet.py    From Pytorch-Networks with MIT License 6 votes vote down vote up
def __init__(self,):
        super(MobileNet_V1,self).__init__()
        self.conv = nn.Sequential(BasicConv(3,32,3,2,1),
             DPConv(32,64,1),
             DPConv(64,128,2),
             DPConv(128,128,1),
             DPConv(128,256,2),
             DPConv(256,256,1),
             DPConv(256,512,2),

             DPConv(512,512,1),
             DPConv(512,512,1),
             DPConv(512,512,1),
             DPConv(512,512,1),
             DPConv(512,512,1),

             DPConv(512,1024,2),
             DPConv(1024,1024,1),)
        
        self.final = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Flatten(),
            nn.Linear(1024,1000),
            nn.Softmax(dim=1)
        ) 
Example #12
Source File: ResNeXt2016.py    From Pytorch-Networks with MIT License 6 votes vote down vote up
def __init__(self,block,block_list,cardinality):
        super(ResNet,self).__init__()
        self.head_conv = nn.Sequential(
            nn.Conv2d(3,64,7,2,3,bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),)
        self.maxpool_1 = nn.MaxPool2d(3,2,1)
        b_ = block.expansion
        self.layer_1 = self._make_layer(block,64,128*b_,block_list[0],1,cardinality)
        self.layer_2 = self._make_layer(block,128*b_,256*b_,block_list[1],2,cardinality)
        self.layer_3 = self._make_layer(block,256*b_,512*b_,block_list[2],2,cardinality)
        self.layer_4 = self._make_layer(block,512*b_,1024*b_,block_list[3],2,cardinality)
        self.avgpool_1 = nn.AdaptiveAvgPool2d((1,1))
        self.fc_1 = nn.Sequential(
            nn.Flatten(),
            nn.Linear(1024*b_,1000),
            nn.Softmax(dim = 1),)
        self._initialization() 
Example #13
Source File: DenseNet2016.py    From Pytorch-Networks with MIT License 6 votes vote down vote up
def __init__(self,k,block_list,num_init_features=64, bn_size=4, 
                 drop_rate=0, memory_efficient=False):
        super(DenseNet,self).__init__()
        self.head_conv = nn.Sequential(
            nn.Conv2d(3,num_init_features,7,2,3,bias=False),
            nn.BatchNorm2d(num_init_features),
            nn.ReLU(inplace=True),)
        self.maxpool_1 = nn.MaxPool2d(3,2,1)
        self.dense_body, self.final_channels = self._make_layers(num_init_features,
                                  bn_size,block_list,k,drop_rate, memory_efficient)
        self.avgpool_1 = nn.AdaptiveAvgPool2d((1,1))
        self.fc_1 = nn.Sequential(
            nn.Flatten(),
            nn.Linear(self.final_channels,1000),
            nn.Softmax(dim = 1),)
        self._initialization() 
Example #14
Source File: EfficientNet2019.py    From Pytorch-Networks with MIT License 5 votes vote down vote up
def __init__(self,in_dim,ratio):
        super(_SElayer,self).__init__()
        self.gap = nn.AdaptiveAvgPool2d((1,1))
        reduced_dim = max(1, in_dim//ratio)
        self.fc1 = nn.Sequential(nn.Flatten(),
                   nn.Linear(in_dim, reduced_dim),
                   Swish(),
                   nn.Linear(reduced_dim, in_dim),
                   nn.Softmax(dim=1),) 
Example #15
Source File: Darknet2016.py    From Pytorch-Networks with MIT License 5 votes vote down vote up
def __init__(self,block_config):
        super(_DarkNet,self).__init__()
        self.headconv = nn.Sequential(BasicConv(3,32,3,1,1))
        self.in_dim = 32     
        self.layers = self._make_layers(block_config)
        self.final = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Flatten(),
            nn.Linear(1024,1000),
            nn.Softmax(dim=1)
        ) 
Example #16
Source File: darknet.py    From Holocron with MIT License 5 votes vote down vote up
def __init__(self, layout, num_classes=10, norm_layer=None):

        super().__init__(OrderedDict([
            ('features', DarknetBodyV3(layout, norm_layer)),
            ('global_pool', nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten())),
            ('classifier', nn.Linear(layout[4][-1], num_classes))]))

        init_module(self, 'leaky_relu') 
Example #17
Source File: darknet.py    From Holocron with MIT License 5 votes vote down vote up
def __init__(self, layout, num_classes=10, norm_layer=None):

        super().__init__(OrderedDict([
            ('features', DarknetBodyV2(layout, norm_layer)),
            ('classifier', conv1x1(layout[-1][0], num_classes)),
            ('global_pool', nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten()))]))

        init_module(self, 'leaky_relu') 
Example #18
Source File: darknet.py    From Holocron with MIT License 5 votes vote down vote up
def __init__(self, layout, num_classes=10, norm_layer=None):

        super().__init__(OrderedDict([
            ('features', DarknetBodyV1(layout)),
            ('global_pool', nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten())),
            ('classifier', nn.Linear(layout[2][-1], num_classes))]))

        init_module(self, 'leaky_relu') 
Example #19
Source File: yolo.py    From Holocron with MIT License 5 votes vote down vote up
def __init__(self, layout, num_classes=20, num_anchors=2, lambda_noobj=0.5, lambda_coords=5.):

        super().__init__()

        self.backbone = DarknetBodyV1(layout)

        self.block4 = nn.Sequential(
            conv3x3(1024, 1024),
            nn.LeakyReLU(inplace=True),
            conv3x3(1024, 1024, stride=2),
            nn.LeakyReLU(inplace=True),
            conv3x3(1024, 1024),
            nn.LeakyReLU(inplace=True),
            conv3x3(1024, 1024),
            nn.LeakyReLU(inplace=True))

        self.classifier = nn.Sequential(
            nn.Flatten(),
            nn.Linear(1024 * 7 ** 2, 4096),
            nn.LeakyReLU(inplace=True),
            nn.Linear(4096, 7 ** 2 * (num_anchors * 5 + num_classes)))
        self.num_anchors = num_anchors
        self.num_classes = num_classes
        # Loss coefficients
        self.lambda_noobj = lambda_noobj
        self.lambda_coords = lambda_coords

        init_module(self, 'leaky_relu') 
Example #20
Source File: mobilenetv3.py    From pytorch-image-models with Apache License 2.0 5 votes vote down vote up
def as_sequential(self):
        layers = [self.conv_stem, self.bn1, self.act1]
        layers.extend(self.blocks)
        layers.extend([self.global_pool, self.conv_head, self.act2])
        layers.extend([nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier])
        return nn.Sequential(*layers) 
Example #21
Source File: efficientnet.py    From pytorch-image-models with Apache License 2.0 5 votes vote down vote up
def as_sequential(self):
        layers = [self.conv_stem, self.bn1, self.act1]
        layers.extend(self.blocks)
        layers.extend([self.conv_head, self.bn2, self.act2, self.global_pool])
        layers.extend([nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier])
        return nn.Sequential(*layers) 
Example #22
Source File: resnet.py    From deeplab-pytorch with MIT License 5 votes vote down vote up
def __init__(self, n_classes, n_blocks):
        super(ResNet, self).__init__()
        ch = [64 * 2 ** p for p in range(6)]
        self.add_module("layer1", _Stem(ch[0]))
        self.add_module("layer2", _ResLayer(n_blocks[0], ch[0], ch[2], 1, 1))
        self.add_module("layer3", _ResLayer(n_blocks[1], ch[2], ch[3], 2, 1))
        self.add_module("layer4", _ResLayer(n_blocks[2], ch[3], ch[4], 2, 1))
        self.add_module("layer5", _ResLayer(n_blocks[3], ch[4], ch[5], 2, 1))
        self.add_module("pool5", nn.AdaptiveAvgPool2d(1))
        self.add_module("flatten", nn.Flatten())
        self.add_module("fc", nn.Linear(ch[5], n_classes)) 
Example #23
Source File: ShuffleNet.py    From Pytorch-Networks with MIT License 5 votes vote down vote up
def __init__(self,block_config,groups):
        super(_ShuffleNet,self).__init__()
        self.head_conv = nn.Sequential(
            nn.Conv2d(3,24,3,2,1,bias=False),
            nn.BatchNorm2d(24),
            nn.ReLU(inplace=True),)
        self.maxpool_1 = nn.MaxPool2d(3,2,1)
        self.layer_1 = self._make_layer(24,block_config[0][1],block_config[0][0],groups)
        self.layer_2 = self._make_layer(block_config[0][1],block_config[1][1],block_config[1][0],groups)
        self.layer_3 = self._make_layer(block_config[1][1],block_config[2][1],block_config[2][0],groups)
        self.avgpool_1 = nn.AdaptiveAvgPool2d((1,1))
        self.fc_1 = nn.Sequential(
            nn.Flatten(),
            nn.Linear(1536,1000),
            nn.Softmax(dim = 1),) 
Example #24
Source File: dqn_atari_visual.py    From cleanrl with MIT License 5 votes vote down vote up
def __init__(self, frames=4):
        super(QNetwork, self).__init__()
        self.network = nn.Sequential(
            Scale(1/255),
            nn.Conv2d(frames, 32, 8, stride=4),
            nn.ReLU(),
            nn.Conv2d(32, 64, 4, stride=2),
            nn.ReLU(),
            nn.Conv2d(64, 64, 3, stride=1),
            nn.ReLU(),
            nn.Flatten(),
            nn.Linear(3136, 512),
            nn.ReLU(),
            Linear0(512, env.action_space.n)
        ) 
Example #25
Source File: dqn_atari.py    From cleanrl with MIT License 5 votes vote down vote up
def __init__(self, frames=4):
        super(QNetwork, self).__init__()
        self.network = nn.Sequential(
            Scale(1/255),
            nn.Conv2d(frames, 32, 8, stride=4),
            nn.ReLU(),
            nn.Conv2d(32, 64, 4, stride=2),
            nn.ReLU(),
            nn.Conv2d(64, 64, 3, stride=1),
            nn.ReLU(),
            nn.Flatten(),
            nn.Linear(3136, 512),
            nn.ReLU(),
            nn.Linear(512, env.action_space.n)
        ) 
Example #26
Source File: test_balance.py    From torchgpipe with Apache License 2.0 5 votes vote down vote up
def test_balance_by_time_loop_resets_input():
    # nn.Flatten was introduced at PyTorch 1.2.0.
    class Flatten(nn.Module):
        def forward(self, x):
            return x.flatten(1)

    model = nn.Sequential(nn.Conv2d(3, 2, 1), Flatten(), nn.Linear(128, 10))
    sample = torch.rand(10, 3, 8, 8)
    balance = balance_by_time(2, model, sample, device='cpu')
    assert balance == [1, 2] 
Example #27
Source File: mobilenetv3.py    From gen-efficientnet-pytorch with Apache License 2.0 5 votes vote down vote up
def as_sequential(self):
        layers = [self.conv_stem, self.bn1, self.act1]
        layers.extend(self.blocks)
        layers.extend([
            self.global_pool, self.conv_head, self.act2,
            nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier])
        return nn.Sequential(*layers) 
Example #28
Source File: MnasNet2018.py    From Pytorch-Networks with MIT License 5 votes vote down vote up
def __init__(self,in_dim,ratio):
        super(_SElayer,self).__init__()
        self.gap = nn.AdaptiveAvgPool2d((1,1))
        reduced_dim = max(1, in_dim//ratio)
        self.fc1 = nn.Sequential(nn.Flatten(),
                   nn.Linear(in_dim, reduced_dim),
                   nn.ReLU(inplace=True),
                   nn.Linear(reduced_dim, in_dim),
                   nn.Softmax(dim=1),) 
Example #29
Source File: MnasNet2018.py    From Pytorch-Networks with MIT License 5 votes vote down vote up
def __init__(self):
        super(MnasNet_A1,self).__init__()
        self.HeadConv = _Conv(3,32,3,2,1)
        self.Seq_1 = _SepConv(32,16,3)

        self.MBConv6_1 = _MBConv(16,24,6,3,2)
        self.MBConv6_2 = _MBConv(24,24,6,3,1)

        self.MBConv3_1 = _MBConv(24,40,3,5,2,4)
        self.MBConv3_2 = _MBConv(40,40,3,5,1,4)
        self.MBConv3_3 = _MBConv(40,40,3,5,1,4)

        self.MBConv6_3 = _MBConv(40,80,6,3,2)
        self.MBConv6_4 = _MBConv(80,80,6,3,1)
        self.MBConv6_5 = _MBConv(80,80,6,3,1)
        self.MBConv6_6 = _MBConv(80,80,6,3,1)

        self.MBConv6_7 = _MBConv(80,112,6,3,1,4)
        self.MBConv6_8 = _MBConv(112,112,6,3,1,4)

        self.MBConv6_9 = _MBConv(112,160,6,5,2,4)
        self.MBConv6_10 = _MBConv(160,160,6,5,1,4)
        self.MBConv6_11 = _MBConv(160,160,6,5,1,4)
        
        self.MBConv6_12 = _MBConv(160,320,6,3,1)

        self.logits = nn.Sequential(
            nn.AdaptiveAvgPool2d((1,1)),
            nn.Flatten(),
            nn.Linear(320,1000),
            nn.Softmax(dim=1)) 
Example #30
Source File: ResNet2015.py    From Pytorch-Networks with MIT License 5 votes vote down vote up
def __init__(self, cfg, logger):
        '''
        block, BLOCK_LIST, in_dim, 
        class_num, BASE=64, use_fc=True, CONV1=(7,2,3),
        MAX_POOL=True, pretrained=False
        '''
        super(ResNet,self).__init__()
        self.head_conv = nn.Sequential(
            nn.Conv2d(cfg.IN_DIM,cfg.BASE,cfg.CONV1[0],cfg.CONV1[1],cfg.CONV1[2],bias=False),
            nn.BatchNorm2d(cfg.BASE),
            nn.ReLU(inplace=True),)
        if cfg.MAX_POOL:
            self.maxpool_1 = nn.MaxPool2d(3,2,1)
        else:
            self.maxpool_1 = nn.Sequential()
        block = BottleNeck if cfg.BLOCK == 'bottleneck' else BasicBlock
        b_ = block.expansion
        self.layer_1 = self._make_layer(block,cfg.BASE,cfg.BASE*b_,cfg.BLOCK_LIST[0],cfg.STRIDE1,cfg.OPERATION)
        self.layer_2 = self._make_layer(block,cfg.BASE*b_,cfg.BASE*2*b_,cfg.BLOCK_LIST[1],2,cfg.OPERATION)
        self.layer_3 = self._make_layer(block,cfg.BASE*2*b_,cfg.BASE*4*b_,cfg.BLOCK_LIST[2],2,cfg.OPERATION)
        self.layer_4 = self._make_layer(block,cfg.BASE*4*b_,cfg.BASE*8*b_,cfg.BLOCK_LIST[3],2,cfg.OPERATION)

        final_feature = cfg.BASE*4*b_ if cfg.BLOCK_LIST[3] == 0 else cfg.BASE*8*b_
        if cfg.USE_FC:
            self.avgpool_1 = nn.AdaptiveAvgPool2d((1,1))
            self.fc_1 = nn.Sequential(
                nn.Flatten(),
                nn.Linear(final_feature,cfg.CLASS_NUM),)
        else:
            self.avgpool_1 = nn.Sequential()
            self.fc_1 = nn.Sequential()
        if cfg.DROPOUT > 0: 
            self.dropout = nn.Dropout(p=cfg.DROPOUT)
        else:
            self.dropout = nn.Sequential()
        self.logger = logger
        self.pretrained = cfg.PRETRAINED
        self._initialization()