Python tf_util.dropout() Examples

The following are 30 code examples of tf_util.dropout(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tf_util , or try the search function .
Example #1
Source File: inception_v4_io.py    From DBNet with Apache License 2.0 6 votes vote down vote up
def get_model(net, is_training, add_lstm=False, bn_decay=None, separately=False):
    """ Inception_V4 regression model, input is BxWxHx3, output Bx2"""
    net = get_inception(299, 299)(net)

    if not add_lstm:
        net = tf_util.fully_connected(net, 2, activation_fn=None, scope='fc_final')

    else:
        net = tf_util.fully_connected(net, 784, bn=True,
                                      is_training=is_training,
                                      scope='fc_lstm',
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net, keep_prob=0.7,
                              is_training=is_training,
                              scope="dp1")
        net = cnn_lstm_block(net)

    return net 
Example #2
Source File: pointnet2_cls_msg.py    From dfc2019 with MIT License 6 votes vote down vote up
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}

    l0_xyz = point_cloud
    l0_points = None

    # Set abstraction layers
    l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points, 512, [0.1,0.2,0.4], [16,32,128], [[32,32,64], [64,64,128], [64,96,128]], is_training, bn_decay, scope='layer1', use_nchw=True)
    l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points, 128, [0.2,0.4,0.8], [32,64,128], [[64,64,128], [128,128,256], [128,128,256]], is_training, bn_decay, scope='layer2')
    l3_xyz, l3_points, _ = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')

    # Fully connected layers
    net = tf.reshape(l3_points, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.4, is_training=is_training, scope='dp1')
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.4, is_training=is_training, scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points 
Example #3
Source File: pointnet2_part_seg.py    From dfc2019 with MIT License 6 votes vote down vote up
def get_model(point_cloud, is_training, bn_decay=None):
    """ Part segmentation PointNet, input is BxNx6 (XYZ NormalX NormalY NormalZ), output Bx50 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = tf.slice(point_cloud, [0,0,0], [-1,-1,3])
    l0_points = tf.slice(point_cloud, [0,0,3], [-1,-1,3])

    # Set Abstraction layers
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=64, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')

    # Feature Propagation layers
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer1')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer2')
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz, tf.concat([l0_xyz,l0_points],axis=-1), l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer3')

    # FC layers
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    end_points['feats'] = net 
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = tf_util.conv1d(net, 50, 1, padding='VALID', activation_fn=None, scope='fc2')

    return net, end_points 
Example #4
Source File: pointnet2_cls_ssg.py    From dfc2019 with MIT License 6 votes vote down vote up
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = point_cloud
    l0_points = None
    end_points['l0_xyz'] = l0_xyz

    # Set abstraction layers
    # Note: When using NCHW for layer 2, we see increased GPU memory usage (in TF1.4).
    # So we only use NCHW for layer 1 until this issue can be resolved.
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1', use_nchw=True)
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')

    # Fully connected layers
    net = tf.reshape(l3_points, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net, end_points 
Example #5
Source File: densenet169_io.py    From DBNet with Apache License 2.0 6 votes vote down vote up
def get_model(net, is_training, add_lstm=False, bn_decay=None, separately=False):
    """ Densenet169 regression model, input is BxWxHx3, output Bx2"""
    net = get_densenet(224, 224)(net)

    if not add_lstm:
        net = tf_util.fully_connected(net, 2, activation_fn=None, scope='fc_final')

    else:
        net = tf_util.fully_connected(net, 784, bn=True,
                                      is_training=is_training,
                                      scope='fc_lstm',
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net, keep_prob=0.7,
                              is_training=is_training,
                              scope="dp1")
        net = cnn_lstm_block(net)

    return net 
Example #6
Source File: pointnet2_cls_ssg.py    From scanobjectnn with MIT License 6 votes vote down vote up
def get_model(point_cloud, is_training, bn_decay=None, num_class=NUM_CLASSES):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = point_cloud
    l0_points = None
    end_points['l0_xyz'] = l0_xyz

    # Set abstraction layers
    # Note: When using NCHW for layer 2, we see increased GPU memory usage (in TF1.4).
    # So we only use NCHW for layer 1 until this issue can be resolved.
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1', use_nchw=True)
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')

    # Fully connected layers
    net = tf.reshape(l3_points, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2')
    net = tf_util.fully_connected(net, num_class, activation_fn=None, scope='fc3')

    return net, end_points 
Example #7
Source File: densenet169_io.py    From DBNet with Apache License 2.0 6 votes vote down vote up
def dense_block(x, stage, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True):
    ''' Build a dense_block where the output of each conv_block is fed to subsequent ones
        # Arguments
            x: input tensor
            stage: index for dense block
            nb_layers: the number of layers of conv_block to append to the model.
            nb_filter: number of filters
            growth_rate: growth rate
            dropout_rate: dropout rate
            weight_decay: weight decay factor
            grow_nb_filters: flag to decide to allow number of filters to grow
    '''

    eps = 1.1e-5
    concat_feat = x

    for i in range(nb_layers):
        branch = i+1
        x = conv_block(concat_feat, stage, branch, growth_rate, dropout_rate, weight_decay)
        concat_feat = concatenate([concat_feat, x], axis=3, name='concat_'+str(stage)+'_'+str(branch))

        if grow_nb_filters:
            nb_filter += growth_rate

    return concat_feat, nb_filter 
Example #8
Source File: pointconv_util.py    From JSNet with MIT License 6 votes vote down vote up
def nonlinear_transform(data_in, mlp, scope, is_training, bn_decay=None, weight_decay=None,
                        activation_fn=tf.nn.relu, is_dist=False):
    with tf.variable_scope(scope) as sc:

        net = data_in
        l = len(mlp)
        if l > 1:
            for i, out_ch in enumerate(mlp[0:(l - 1)]):
                net = tf_util.conv2d(net, out_ch, [1, 1],
                                     padding='VALID', stride=[1, 1],
                                     bn=True, is_training=is_training, activation_fn=tf.nn.relu,
                                     scope='nonlinear{}'.format(i), bn_decay=bn_decay,
                                     weight_decay=weight_decay, is_dist=is_dist)

                # net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp_nonlinear{}'.format(i))
        net = tf_util.conv2d(net, mlp[-1], [1, 1],
                             padding='VALID', stride=[1, 1],
                             bn=False, is_training=is_training,
                             scope='nonlinear%d' % (l - 1), bn_decay=bn_decay,
                             activation_fn=tf.nn.sigmoid, weight_decay=weight_decay, is_dist=is_dist)

    return net 
Example #9
Source File: pointconv_util.py    From JSNet with MIT License 6 votes vote down vote up
def weight_net(xyz, hidden_units, scope, is_training, bn_decay=None, weight_decay=None,
               activation_fn=tf.nn.relu, is_dist=False):
    with tf.variable_scope(scope) as sc:
        net = xyz
        for i, num_hidden_units in enumerate(hidden_units):
            if i != len(hidden_units) - 1:
                net = tf_util.conv2d(net, num_hidden_units, [1, 1],
                                     padding='VALID', stride=[1, 1],
                                     bn=True, is_training=is_training, activation_fn=activation_fn,
                                     scope='wconv{}'.format(i), bn_decay=bn_decay,
                                     weight_decay=weight_decay, is_dist=is_dist)
            else:
                net = tf_util.conv2d(net, num_hidden_units, [1, 1],
                                     padding='VALID', stride=[1, 1],
                                     bn=False, is_training=is_training, activation_fn=None,
                                     scope='wconv{}'.format(i), bn_decay=bn_decay,
                                     weight_decay=weight_decay, is_dist=is_dist)
            # net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='wconv_dp{}'.format(i))
    return net 
Example #10
Source File: tp8.py    From AlignNet-3D with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_model(pcs1, pcs2, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = pcs1.get_shape()[0].value
    end_points = {}

    with tf.variable_scope("siamese"):
        embedding_output1, center_mean1, s1_pred_center1, s2_pred_center1, s2_pred_angle_logits1 = get_embedding_net(pcs1, is_training, end_points, bn_decay)
    with tf.variable_scope("siamese", reuse=tf.AUTO_REUSE):
        embedding_output2, center_mean2, s1_pred_center2, s2_pred_center2, s2_pred_angle_logits2 = get_embedding_net(pcs2, is_training, end_points, bn_decay)
    embedding_output_combined = tf.concat([embedding_output1, embedding_output2], axis=3)

    end_points['pred_s1_pc1centers'] = s1_pred_center1
    end_points['pred_s1_pc2centers'] = s1_pred_center2
    end_points['pred_s2_pc1centers'] = s2_pred_center1
    end_points['pred_s2_pc2centers'] = s2_pred_center2
    end_points['pred_pc1angle_logits'] = s2_pred_angle_logits1
    end_points['pred_pc2angle_logits'] = s2_pred_angle_logits2

    net = tf.reshape(embedding_output_combined, [batch_size, -1])
    net = get_mlp(net, [*cfg.model.options.remaining_transform_prediction[0], 3 + cfg.model.angles.num_bins * 2], '', is_training, bn_decay, dropout=cfg.model.options.remaining_transform_prediction[1])
    end_points['pred_translations'] = net[:, :3] + (s2_pred_center2 - s2_pred_center1)
    end_points['pred_remaining_angle_logits'] = net[:, 3:]

    return end_points 
Example #11
Source File: densenet169_pn.py    From DBNet with Apache License 2.0 6 votes vote down vote up
def dense_block(x, stage, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True):
    ''' Build a dense_block where the output of each conv_block is fed to subsequent ones
        # Arguments
            x: input tensor
            stage: index for dense block
            nb_layers: the number of layers of conv_block to append to the model.
            nb_filter: number of filters
            growth_rate: growth rate
            dropout_rate: dropout rate
            weight_decay: weight decay factor
            grow_nb_filters: flag to decide to allow number of filters to grow
    '''

    eps = 1.1e-5
    concat_feat = x

    for i in range(nb_layers):
        branch = i+1
        x = conv_block(concat_feat, stage, branch, growth_rate, dropout_rate, weight_decay)
        concat_feat = concatenate([concat_feat, x], axis=3, name='concat_'+str(stage)+'_'+str(branch))

        if grow_nb_filters:
            nb_filter += growth_rate

    return concat_feat, nb_filter 
Example #12
Source File: resnet152_io.py    From DBNet with Apache License 2.0 6 votes vote down vote up
def get_model(net, is_training, add_lstm=False, bn_decay=None, separately=False):
    """ ResNet152 regression model, input is BxWxHx3, output Bx2"""
    net = get_resnet(224, 224)(net)

    if not add_lstm:
        net = tf_util.fully_connected(net, 2, activation_fn=None, scope='fc_final')

    else:
        net = tf_util.fully_connected(net, 784, bn=True,
                                      is_training=is_training,
                                      scope='fc_lstm',
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net, keep_prob=0.7,
                              is_training=is_training,
                              scope="dp1")
        net = cnn_lstm_block(net)

    return net 
Example #13
Source File: model.py    From deep_gcns with MIT License 6 votes vote down vote up
def build_mlp_pred_block(self, fusion, num_classes):
    self.mlp_builder.bn_decay = None
    out = self.mlp_builder.build(fusion,
                                 512,
                                 scope='seg/conv1',
                                 is_training=self.is_training)
    out = self.mlp_builder.build(out,
                                 256,
                                 scope='seg/conv2',
                                 is_training=self.is_training)
    out = tf_util.dropout(out,
                          keep_prob=0.7,
                          scope='dp1',
                          is_training=self.is_training)
    self.mlp_builder.bn = False
    out = self.mlp_builder.build(out,
                                 num_classes,
                                 scope='seg/conv3',
                                 activation_fn=None)
    pred = tf.squeeze(out, [2])

    return pred 
Example #14
Source File: pointconv_util.py    From JSNet with MIT License 5 votes vote down vote up
def weight_net_hidden(xyz, hidden_units, scope, is_training, bn_decay=None, weight_decay=None,
                      activation_fn=tf.nn.relu, is_dist=False):
    with tf.variable_scope(scope) as sc:
        net = xyz
        for i, num_hidden_units in enumerate(hidden_units):
            net = tf_util.conv2d(net, num_hidden_units, [1, 1],
                                 padding='VALID', stride=[1, 1],
                                 bn=True, is_training=is_training, activation_fn=activation_fn,
                                 scope='wconv{}'.format(i), bn_decay=bn_decay,
                                 weight_decay=weight_decay, is_dist=is_dist)

            # net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='wconv_dp{}'.format(i))
    return net 
Example #15
Source File: densenet169_pm.py    From DBNet with Apache License 2.0 5 votes vote down vote up
def transition_block(x, stage, nb_filter, compression=1.0, dropout_rate=None, weight_decay=1E-4):
    ''' Apply BatchNorm, 1x1 Convolution, averagePooling, optional compression, dropout
        # Arguments
            x: input tensor
            stage: index for dense block
            nb_filter: number of filters
            compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block.
            dropout_rate: dropout rate
            weight_decay: weight decay factor
    '''

    eps = 1.1e-5
    conv_name_base = 'conv' + str(stage) + '_blk'
    relu_name_base = 'relu' + str(stage) + '_blk'
    pool_name_base = 'pool' + str(stage)

    x = BatchNormalization(epsilon=eps, axis=3, name=conv_name_base+'_bn')(x)
    x = Scale(axis=3, name=conv_name_base+'_scale')(x)
    x = Activation('relu', name=relu_name_base)(x)
    x = Convolution2D(int(nb_filter * compression), (1, 1), name=conv_name_base, use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    x = AveragePooling2D((2, 2), strides=(2, 2), name=pool_name_base)(x)

    return x 
Example #16
Source File: pointnet2_sem_seg.py    From path_invariance_map_network with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_model(point_cloud, is_training, num_class, bn_decay=None):
    """ Semantic segmentation PointNet, input is BxNx3, output Bxnum_class """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    end_points = {}
    l0_xyz = point_cloud
    l0_points = None
    end_points['l0_xyz'] = l0_xyz

    # Layer 1
    l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=1024, radius=0.1, nsample=32, mlp=[32,32,64], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1')
    l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=256, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2')
    l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=64, radius=0.4, nsample=32, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer3')
    l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=16, radius=0.8, nsample=32, mlp=[256,256,512], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer4')

    # Feature Propagation layers
    l3_points = pointnet_fp_module(l3_xyz, l4_xyz, l3_points, l4_points, [256,256], is_training, bn_decay, scope='fa_layer1')
    l2_points = pointnet_fp_module(l2_xyz, l3_xyz, l2_points, l3_points, [256,256], is_training, bn_decay, scope='fa_layer2')
    l1_points = pointnet_fp_module(l1_xyz, l2_xyz, l1_points, l2_points, [256,128], is_training, bn_decay, scope='fa_layer3')
    l0_points = pointnet_fp_module(l0_xyz, l1_xyz, l0_points, l1_points, [128,128,128], is_training, bn_decay, scope='fa_layer4')

    # FC layers
    net = tf_util.conv1d(l0_points, 128, 1, padding='VALID', bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
    end_points['feats'] = net 
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1')
    net = tf_util.conv1d(net, num_class, 1, padding='VALID', activation_fn=None, scope='fc2')

    return net, end_points 
Example #17
Source File: ldgcnn_classifier.py    From ldgcnn with MIT License 5 votes vote down vote up
def get_model(feature, is_training, bn_decay=None):
  # Fully connected layers: classifier
  layers = {}
  feature = tf.squeeze(feature)
  layer_name = 'ft_'
  
  # B: batch size; C: channels;
  # feature: B*C
  # net: B*512
  net = tf_util.fully_connected(feature, 512, bn=True, is_training=is_training,
                                scope=layer_name + 'fc2', bn_decay=bn_decay,
                                activation_fn = tf.nn.relu)
  layers[layer_name + 'fc2'] = net
  
  net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
                         scope=layer_name + 'dp2')
  
  # net: B*256
  net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                                scope=layer_name + 'fc3', bn_decay=bn_decay,
                                activation_fn = tf.nn.relu)
  layers[layer_name + 'fc3'] = net
  
  net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
                        scope=layer_name + 'dp3')
  # net: B*40
  net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc4')
  layers[layer_name + 'fc4'] = net

  return net, layers 
Example #18
Source File: nvidia_io.py    From DBNet with Apache License 2.0 5 votes vote down vote up
def get_model(net, is_training, bn_decay=None, separately=False):
    """ NVIDIA regression model, input is BxWxHx3, output Bx2"""
    batch_size = net.get_shape()[0].value

    for i, dim in enumerate([24, 36, 48, 64, 64]):
        scope = "conv" + str(i + 1)
        net = tf_util.conv2d(net, dim, [5, 5],
                             padding='VALID', stride=[1, 1],
                             bn=True, is_training=is_training,
                             scope=scope, bn_decay=bn_decay)

    net = tf.reshape(net, [batch_size, -1])
    for i, dim in enumerate([256, 100, 50, 10]):
        fc_scope = "fc" + str(i + 1)
        dp_scope = "dp" + str(i + 1)
        net = tf_util.fully_connected(net, dim, bn=True,
                                      is_training=is_training,
                                      scope=fc_scope, 
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net, keep_prob=0.7,
                              is_training=is_training,
                              scope=dp_scope)

    net = tf_util.fully_connected(net, 2, activation_fn=None, scope='fc5')

    return net 
Example #19
Source File: ldgcnn.py    From ldgcnn with MIT License 5 votes vote down vote up
def get_model(point_cloud, is_training, bn_decay=None):
    """ Classification PointNet, input is BxNx3, output Bx40 """
    batch_size = point_cloud.get_shape()[0].value
    layers = {}
    
    # Extract global feature
    net = calc_ldgcnn_feature(point_cloud, is_training, bn_decay)
    
    # MLP on global point cloud vector
    net = tf.reshape(net, [batch_size, -1]) 
    layers['global_feature'] = net
    
    # Fully connected layers: classifier
    # net: B*512
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay)
    layers['fc1'] = net
    # Each element is kept or dropped independently, and the drop rate is 0.5.
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
                           scope='dp1')
    
    # net: B*256
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                                  scope='fc2', bn_decay=bn_decay)
    layers['fc2'] = net
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
                          scope='dp2')
    
    # net: B*40
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')
    layers['fc3'] = net
    return net,layers 
Example #20
Source File: nvidia_pm.py    From DBNet with Apache License 2.0 5 votes vote down vote up
def get_model(net, is_training, bn_decay=None, separately=False):
    """ NVIDIA regression model, input is BxWxHx3, output Bx2"""
    batch_size = net[0].get_shape()[0].value
    img_net, fmap_net = net
    for i, dim in enumerate([24, 36, 48, 64, 64]):
        scope_img = "image_conv" + str(i + 1)
        scope_fmap = "fmap_conv" + str(i + 1)
        img_net = tf_util.conv2d(img_net, dim, [5, 5],
                                 padding='VALID', stride=[1, 1],
                                 bn=True, is_training=is_training,
                                 scope=scope_img, bn_decay=bn_decay)
        fmap_net = tf_util.conv2d(fmap_net, dim, [5, 5],
                                  padding='VALID', stride=[1, 1],
                                  bn=True, is_training=is_training,
                                  scope=scope_fmap, bn_decay=bn_decay)
    net = tf.reshape(tf.stack([img_net, fmap_net]), [batch_size, -1])
    for i, dim in enumerate([256, 100, 50, 10]):
        fc_scope = "fc" + str(i + 1)
        dp_scope = "dp" + str(i + 1)
        net = tf_util.fully_connected(net, dim, bn=True,
                                      is_training=is_training,
                                      scope=fc_scope, 
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net, keep_prob=0.7,
                              is_training=is_training,
                              scope=dp_scope)

    net = tf_util.fully_connected(net, 2, activation_fn=None, scope='fc5')

    return net 
Example #21
Source File: densenet169_pm.py    From DBNet with Apache License 2.0 5 votes vote down vote up
def get_model(net, is_training, add_lstm=False, bn_decay=None, separately=False):
    """ Densenet169 regression model, input is BxWxHx3, output Bx2"""
    batch_size = net[0].get_shape()[0].value
    img_net, fmap_net = net[0], net[1]

    img_net = get_densenet(224, 224)(img_net)
    fmap_net = get_densenet(224, 224)(fmap_net)

    net = tf.reshape(tf.stack([img_net, fmap_net]), [batch_size, -1])

    if not add_lstm:
        for i, dim in enumerate([256, 128, 16]):
            fc_scope = "fc" + str(i + 1)
            dp_scope = "dp" + str(i + 1)
            net = tf_util.fully_connected(net, dim, bn=True,
                                        is_training=is_training,
                                        scope=fc_scope,
                                        bn_decay=bn_decay)
            net = tf_util.dropout(net, keep_prob=0.7,
                                is_training=is_training,
                                scope=dp_scope)
        net = tf_util.fully_connected(net, 2, activation_fn=None, scope='fc4')
    else:
        fc_scope = "fc1"
        net = tf_util.fully_connected(net, 784, bn=True,
                                      is_training=is_training,
                                      scope=fc_scope,
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net, keep_prob=0.7,
                              is_training=is_training,
                              scope="dp1")
        net = cnn_lstm_block(net)
    return net 
Example #22
Source File: densenet169_pm.py    From DBNet with Apache License 2.0 5 votes vote down vote up
def conv_block(x, stage, branch, nb_filter, dropout_rate=None, weight_decay=1e-4):
    '''Apply BatchNorm, Relu, bottleneck 1x1 Conv2D, 3x3 Conv2D, and option dropout
        # Arguments
            x: input tensor
            stage: index for dense block
            branch: layer index within each dense block
            nb_filter: number of filters
            dropout_rate: dropout rate
            weight_decay: weight decay factor
    '''
    eps = 1.1e-5
    conv_name_base = 'conv' + str(stage) + '_' + str(branch)
    relu_name_base = 'relu' + str(stage) + '_' + str(branch)

    # 1x1 Convolution (Bottleneck layer)
    inter_channel = nb_filter * 4
    x = BatchNormalization(epsilon=eps, axis=3, name=conv_name_base+'_x1_bn')(x)
    x = Scale(axis=3, name=conv_name_base+'_x1_scale')(x)
    x = Activation('relu', name=relu_name_base+'_x1')(x)
    x = Convolution2D(inter_channel, (1, 1), name=conv_name_base+'_x1', use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    # 3x3 Convolution
    x = BatchNormalization(epsilon=eps, axis=3, name=conv_name_base+'_x2_bn')(x)
    x = Scale(axis=3, name=conv_name_base+'_x2_scale')(x)
    x = Activation('relu', name=relu_name_base+'_x2')(x)
    x = ZeroPadding2D((1, 1), name=conv_name_base+'_x2_zeropadding')(x)
    x = Convolution2D(nb_filter, (3, 3), name=conv_name_base+'_x2', use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    return x 
Example #23
Source File: nvidia_pn.py    From DBNet with Apache License 2.0 5 votes vote down vote up
def get_model(net, is_training, bn_decay=None, separately=False):
    """ NVIDIA regression model, input is BxWxHx3, output Bx2"""
    batch_size = net[0].get_shape()[0].value
    img_net, pt_net = net[0], net[1]

    for i, dim in enumerate([24, 36, 48, 64, 64]):
        scope = "conv" + str(i + 1)
        img_net = tf_util.conv2d(img_net, dim, [5, 5],
                                 padding='VALID', stride=[1, 1],
                                 bn=True, is_training=is_training,
                                 scope=scope, bn_decay=bn_decay)

    img_net = tf.reshape(img_net, [batch_size, -1])
    img_net = tf_util.fully_connected(img_net, 256, bn=True,
                                      is_training=is_training,
                                      scope='img_fc0',
                                      bn_decay=bn_decay)
    with tf.variable_scope('pointnet'):
        pt_net = pointnet.get_model(pt_net, tf.constant(True))
    net = tf.reshape(tf.stack([img_net, pt_net], axis=2), [batch_size, 512])

    for i, dim in enumerate([256, 128, 16]):
        fc_scope = "fc" + str(i + 1)
        dp_scope = "dp" + str(i + 1)
        net = tf_util.fully_connected(net, dim, bn=True,
                                      is_training=is_training,
                                      scope=fc_scope, 
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net, keep_prob=0.7,
                              is_training=is_training,
                              scope=dp_scope)

    net = tf_util.fully_connected(net, 2, activation_fn=None, scope='fc5')

    return net 
Example #24
Source File: densenet169_io.py    From DBNet with Apache License 2.0 5 votes vote down vote up
def conv_block(x, stage, branch, nb_filter, dropout_rate=None, weight_decay=1e-4):
    '''Apply BatchNorm, Relu, bottleneck 1x1 Conv2D, 3x3 Conv2D, and option dropout
        # Arguments
            x: input tensor
            stage: index for dense block
            branch: layer index within each dense block
            nb_filter: number of filters
            dropout_rate: dropout rate
            weight_decay: weight decay factor
    '''
    eps = 1.1e-5
    conv_name_base = 'conv' + str(stage) + '_' + str(branch)
    relu_name_base = 'relu' + str(stage) + '_' + str(branch)

    # 1x1 Convolution (Bottleneck layer)
    inter_channel = nb_filter * 4
    x = BatchNormalization(epsilon=eps, axis=3, name=conv_name_base+'_x1_bn')(x)
    x = Scale(axis=3, name=conv_name_base+'_x1_scale')(x)
    x = Activation('relu', name=relu_name_base+'_x1')(x)
    x = Convolution2D(inter_channel, (1, 1), name=conv_name_base+'_x1', use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    # 3x3 Convolution
    x = BatchNormalization(epsilon=eps, axis=3, name=conv_name_base+'_x2_bn')(x)
    x = Scale(axis=3, name=conv_name_base+'_x2_scale')(x)
    x = Activation('relu', name=relu_name_base+'_x2')(x)
    x = ZeroPadding2D((1, 1), name=conv_name_base+'_x2_zeropadding')(x)
    x = Convolution2D(nb_filter, (3, 3), name=conv_name_base+'_x2', use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    return x 
Example #25
Source File: densenet169_io.py    From DBNet with Apache License 2.0 5 votes vote down vote up
def transition_block(x, stage, nb_filter, compression=1.0, dropout_rate=None, weight_decay=1E-4):
    ''' Apply BatchNorm, 1x1 Convolution, averagePooling, optional compression, dropout
        # Arguments
            x: input tensor
            stage: index for dense block
            nb_filter: number of filters
            compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block.
            dropout_rate: dropout rate
            weight_decay: weight decay factor
    '''

    eps = 1.1e-5
    conv_name_base = 'conv' + str(stage) + '_blk'
    relu_name_base = 'relu' + str(stage) + '_blk'
    pool_name_base = 'pool' + str(stage)

    x = BatchNormalization(epsilon=eps, axis=3, name=conv_name_base+'_bn')(x)
    x = Scale(axis=3, name=conv_name_base+'_scale')(x)
    x = Activation('relu', name=relu_name_base)(x)
    x = Convolution2D(int(nb_filter * compression), (1, 1), name=conv_name_base, use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    x = AveragePooling2D((2, 2), strides=(2, 2), name=pool_name_base)(x)

    return x 
Example #26
Source File: densenet169_pn.py    From DBNet with Apache License 2.0 5 votes vote down vote up
def get_model(net, is_training, add_lstm=False, bn_decay=None, separately=False):
    """ Densenet169 regression model, input is BxWxHx3, output Bx2"""
    batch_size = net[0].get_shape()[0].value
    img_net, pt_net = net[0], net[1]

    img_net = get_densenet(299, 299)(img_net)
    with tf.variable_scope('pointnet'):
        pt_net = pointnet.get_model(pt_net, tf.constant(True))
    net = tf.reshape(tf.stack([img_net, pt_net], axis=2), [batch_size, -1])

    if not add_lstm:
        for i, dim in enumerate([256, 128, 16]):
            fc_scope = "fc" + str(i + 1)
            dp_scope = "dp" + str(i + 1)
            net = tf_util.fully_connected(net, dim, bn=True,
                                        is_training=is_training,
                                        scope=fc_scope,
                                        bn_decay=bn_decay)
            net = tf_util.dropout(net, keep_prob=0.7,
                                is_training=is_training,
                                scope=dp_scope)

        net = tf_util.fully_connected(net, 2, activation_fn=None, scope='fc4')
    else:
        fc_scope = "fc1"
        net = tf_util.fully_connected(net, 784, bn=True,
                                      is_training=is_training,
                                      scope=fc_scope,
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net, keep_prob=0.7,
                              is_training=is_training,
                              scope="dp1")
        net = cnn_lstm_block(net)
    return net 
Example #27
Source File: densenet169_pn.py    From DBNet with Apache License 2.0 5 votes vote down vote up
def conv_block(x, stage, branch, nb_filter, dropout_rate=None, weight_decay=1e-4):
    '''Apply BatchNorm, Relu, bottleneck 1x1 Conv2D, 3x3 Conv2D, and option dropout
        # Arguments
            x: input tensor
            stage: index for dense block
            branch: layer index within each dense block
            nb_filter: number of filters
            dropout_rate: dropout rate
            weight_decay: weight decay factor
    '''
    eps = 1.1e-5
    conv_name_base = 'conv' + str(stage) + '_' + str(branch)
    relu_name_base = 'relu' + str(stage) + '_' + str(branch)

    # 1x1 Convolution (Bottleneck layer)
    inter_channel = nb_filter * 4
    x = BatchNormalization(epsilon=eps, axis=3, name=conv_name_base+'_x1_bn')(x)
    x = Scale(axis=3, name=conv_name_base+'_x1_scale')(x)
    x = Activation('relu', name=relu_name_base+'_x1')(x)
    x = Convolution2D(inter_channel, (1, 1), name=conv_name_base+'_x1', use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    # 3x3 Convolution
    x = BatchNormalization(epsilon=eps, axis=3, name=conv_name_base+'_x2_bn')(x)
    x = Scale(axis=3, name=conv_name_base+'_x2_scale')(x)
    x = Activation('relu', name=relu_name_base+'_x2')(x)
    x = ZeroPadding2D((1, 1), name=conv_name_base+'_x2_zeropadding')(x)
    x = Convolution2D(nb_filter, (3, 3), name=conv_name_base+'_x2', use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    return x 
Example #28
Source File: densenet169_pn.py    From DBNet with Apache License 2.0 5 votes vote down vote up
def transition_block(x, stage, nb_filter, compression=1.0, dropout_rate=None, weight_decay=1E-4):
    ''' Apply BatchNorm, 1x1 Convolution, averagePooling, optional compression, dropout
        # Arguments
            x: input tensor
            stage: index for dense block
            nb_filter: number of filters
            compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block.
            dropout_rate: dropout rate
            weight_decay: weight decay factor
    '''

    eps = 1.1e-5
    conv_name_base = 'conv' + str(stage) + '_blk'
    relu_name_base = 'relu' + str(stage) + '_blk'
    pool_name_base = 'pool' + str(stage)

    x = BatchNormalization(epsilon=eps, axis=3, name=conv_name_base+'_bn')(x)
    x = Scale(axis=3, name=conv_name_base+'_scale')(x)
    x = Activation('relu', name=relu_name_base)(x)
    x = Convolution2D(int(nb_filter * compression), (1, 1), name=conv_name_base, use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    x = AveragePooling2D((2, 2), strides=(2, 2), name=pool_name_base)(x)

    return x 
Example #29
Source File: inception_v4_pm.py    From DBNet with Apache License 2.0 5 votes vote down vote up
def get_model(net, is_training, add_lstm=False, bn_decay=None, separately=False):
    """ Inception_V4 regression model, input is BxWxHx3, output Bx2"""
    batch_size = net[0].get_shape()[0].value
    img_net, fmap_net = net[0], net[1]

    img_net = get_inception(299, 299)(img_net)
    fmap_net = get_inception(299, 299)(fmap_net)

    net = tf.reshape(tf.stack([img_net, fmap_net]), [batch_size, -1])

    if not add_lstm:
        for i, dim in enumerate([256, 128, 16]):
            fc_scope = "fc" + str(i + 1)
            dp_scope = "dp" + str(i + 1)
            net = tf_util.fully_connected(net, dim, bn=True,
                                        is_training=is_training,
                                        scope=fc_scope,
                                        bn_decay=bn_decay)
            net = tf_util.dropout(net, keep_prob=0.7,
                                is_training=is_training,
                                scope=dp_scope)
        net = tf_util.fully_connected(net, 2, activation_fn=None, scope='fc4')
    else:
        fc_scope = "fc1"
        net = tf_util.fully_connected(net, 784, bn=True,
                                      is_training=is_training,
                                      scope=fc_scope,
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net, keep_prob=0.7,
                              is_training=is_training,
                              scope="dp1")
        net = cnn_lstm_block(net)
    return net 
Example #30
Source File: resnet152_pn.py    From DBNet with Apache License 2.0 5 votes vote down vote up
def get_model(net, is_training, add_lstm=False, bn_decay=None, separately=False):
    """ ResNet152 regression model, input is BxWxHx3, output Bx2"""
    batch_size = net[0].get_shape()[0].value
    img_net, pt_net = net[0], net[1]

    img_net = get_resnet(224, 224)(img_net)
    with tf.variable_scope('pointnet'):
        pt_net = pointnet.get_model(pt_net, tf.constant(True))
    net = tf.reshape(tf.stack([img_net, pt_net], axis=2), [batch_size, -1])

    if not add_lstm:
        for i, dim in enumerate([256, 128, 16]):
            fc_scope = "fc" + str(i + 1)
            dp_scope = "dp" + str(i + 1)
            net = tf_util.fully_connected(net, dim, bn=True,
                                        is_training=is_training,
                                        scope=fc_scope,
                                        bn_decay=bn_decay)
            net = tf_util.dropout(net, keep_prob=0.7,
                                is_training=is_training,
                                scope=dp_scope)

        net = tf_util.fully_connected(net, 2, activation_fn=None, scope='fc4')
    else:
        fc_scope = "fc1"
        net = tf_util.fully_connected(net, 784, bn=True,
                                      is_training=is_training,
                                      scope=fc_scope,
                                      bn_decay=bn_decay)
        net = tf_util.dropout(net, keep_prob=0.7,
                              is_training=is_training,
                              scope="dp1")
        net = cnn_lstm_block(net)
    return net