Python tensorflow.contrib.slim.dropout() Examples
The following are 30
code examples of tensorflow.contrib.slim.dropout().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow.contrib.slim
, or try the search function
.
Example #1
Source File: vgslspecs.py From Gun-Detector with Apache License 2.0 | 6 votes |
def AddDropout(self, prev_layer, index): """Adds a dropout layer. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor, end index in model_str. """ pattern = re.compile(R'(Do)({\w+})?') m = pattern.match(self.model_str, index) if m is None: return None, None name = self._GetLayerName(m.group(0), index, m.group(2)) layer = slim.dropout( prev_layer, 0.5, is_training=self.is_training, scope=name) return layer, m.end()
Example #2
Source File: vgslspecs.py From ad-versarial with MIT License | 6 votes |
def AddDropout(self, prev_layer, index, reuse=None): """Adds a dropout layer. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor, end index in model_str. """ pattern = re.compile(R'(Do)({\w+})?') m = pattern.match(self.model_str, index) if m is None: return None, None name = self._GetLayerName(m.group(0), index, m.group(2)) layer = slim.dropout( prev_layer, 0.5, is_training=self.is_training, scope=name) return layer, m.end()
Example #3
Source File: model.py From minimal-entropy-correlation-alignment with MIT License | 6 votes |
def E(self, images, is_training = False, reuse=False): if images.get_shape()[3] == 3: images = tf.image.rgb_to_grayscale(images) with tf.variable_scope('encoder',reuse=reuse): with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.relu): with slim.arg_scope([slim.conv2d], activation_fn=tf.nn.relu, padding='VALID'): net = slim.conv2d(images, 64, 5, scope='conv1') net = slim.max_pool2d(net, 2, stride=2, scope='pool1') net = slim.conv2d(net, 128, 5, scope='conv2') net = slim.max_pool2d(net, 2, stride=2, scope='pool2') net = tf.contrib.layers.flatten(net) net = slim.fully_connected(net, 1024, activation_fn=tf.nn.relu, scope='fc3') net = slim.dropout(net, 0.5, is_training=is_training) net = slim.fully_connected(net, self.hidden_repr_size, activation_fn=tf.tanh,scope='fc4') # dropout here or not? #~ net = slim.dropout(net, 0.5, is_training=is_training) return net
Example #4
Source File: vgslspecs.py From DOTA_models with Apache License 2.0 | 6 votes |
def AddDropout(self, prev_layer, index): """Adds a dropout layer. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor, end index in model_str. """ pattern = re.compile(R'(Do)({\w+})?') m = pattern.match(self.model_str, index) if m is None: return None, None name = self._GetLayerName(m.group(0), index, m.group(2)) layer = slim.dropout( prev_layer, 0.5, is_training=self.is_training, scope=name) return layer, m.end()
Example #5
Source File: vgg16.py From Faster-RCNN-TensorFlow-Python3 with MIT License | 6 votes |
def build_predictions(self, net, rois, is_training, initializer, initializer_bbox): # Crop image ROIs pool5 = self._crop_pool_layer(net, rois, "pool5") pool5_flat = slim.flatten(pool5, scope='flatten') # Fully connected layers fc6 = slim.fully_connected(pool5_flat, 4096, scope='fc6') if is_training: fc6 = slim.dropout(fc6, keep_prob=0.5, is_training=True, scope='dropout6') fc7 = slim.fully_connected(fc6, 4096, scope='fc7') if is_training: fc7 = slim.dropout(fc7, keep_prob=0.5, is_training=True, scope='dropout7') # Scores and predictions cls_score = slim.fully_connected(fc7, self._num_classes, weights_initializer=initializer, trainable=is_training, activation_fn=None, scope='cls_score') cls_prob = self._softmax_layer(cls_score, "cls_prob") bbox_prediction = slim.fully_connected(fc7, self._num_classes * 4, weights_initializer=initializer_bbox, trainable=is_training, activation_fn=None, scope='bbox_pred') return cls_score, cls_prob, bbox_prediction
Example #6
Source File: iCAN_ResNet50_HICO.py From iCAN with MIT License | 6 votes |
def head_to_tail(self, fc7_H, fc7_O, pool5_SH, pool5_SO, sp, is_training, name): with slim.arg_scope(resnet_arg_scope(is_training=is_training)): fc7_SH = tf.reduce_mean(pool5_SH, axis=[1, 2]) fc7_SO = tf.reduce_mean(pool5_SO, axis=[1, 2]) Concat_SH = tf.concat([fc7_H, fc7_SH], 1) fc8_SH = slim.fully_connected(Concat_SH, self.num_fc, scope='fc8_SH') fc8_SH = slim.dropout(fc8_SH, keep_prob=0.5, is_training=is_training, scope='dropout8_SH') fc9_SH = slim.fully_connected(fc8_SH, self.num_fc, scope='fc9_SH') fc9_SH = slim.dropout(fc9_SH, keep_prob=0.5, is_training=is_training, scope='dropout9_SH') Concat_SO = tf.concat([fc7_O, fc7_SO], 1) fc8_SO = slim.fully_connected(Concat_SO, self.num_fc, scope='fc8_SO') fc8_SO = slim.dropout(fc8_SO, keep_prob=0.5, is_training=is_training, scope='dropout8_SO') fc9_SO = slim.fully_connected(fc8_SO, self.num_fc, scope='fc9_SO') fc9_SO = slim.dropout(fc9_SO, keep_prob=0.5, is_training=is_training, scope='dropout9_SO') Concat_SHsp = tf.concat([fc7_H, sp], 1) Concat_SHsp = slim.fully_connected(Concat_SHsp, self.num_fc, scope='Concat_SHsp') Concat_SHsp = slim.dropout(Concat_SHsp, keep_prob=0.5, is_training=is_training, scope='dropout6_SHsp') fc7_SHsp = slim.fully_connected(Concat_SHsp, self.num_fc, scope='fc7_SHsp') fc7_SHsp = slim.dropout(fc7_SHsp, keep_prob=0.5, is_training=is_training, scope='dropout7_SHsp') return fc9_SH, fc9_SO, fc7_SHsp
Example #7
Source File: squeezenet.py From tf_ctpn with MIT License | 6 votes |
def _arg_scope(self, is_training, reuse=None): weight_decay = 0.0 keep_probability = 1.0 batch_norm_params = { 'is_training': is_training, # Decay for the moving averages. 'decay': 0.995, # epsilon to prevent 0s in variance. 'epsilon': 0.001 } with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=slim.xavier_initializer_conv2d(uniform=True), weights_regularizer=slim.l2_regularizer(weight_decay), normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params): with tf.variable_scope(self._scope, self._scope, reuse=reuse): with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training) as sc: return sc
Example #8
Source File: iCAN_ResNet50_VCOCO_Early.py From iCAN with MIT License | 6 votes |
def head_to_tail(self, fc7_H, fc7_O, pool5_SH, pool5_SO, sp, is_training, name): with slim.arg_scope(resnet_arg_scope(is_training=is_training)): fc7_SH = tf.reduce_mean(pool5_SH, axis=[1, 2]) fc7_SO = tf.reduce_mean(pool5_SO, axis=[1, 2]) Concat_SH = tf.concat([fc7_H[:self.H_num,:], fc7_SH[:self.H_num,:]], 1) fc8_SH = slim.fully_connected(Concat_SH, self.num_fc, scope='fc8_SH') fc8_SH = slim.dropout(fc8_SH, keep_prob=0.5, is_training=is_training, scope='dropout8_SH') fc9_SH = slim.fully_connected(fc8_SH, self.num_fc, scope='fc9_SH') fc9_SH = slim.dropout(fc9_SH, keep_prob=0.5, is_training=is_training, scope='dropout9_SH') Concat_HOS = tf.concat([fc7_H, \ fc7_O, \ fc7_SH,\ fc7_SO, sp], 1) fc8_HOS = slim.fully_connected(Concat_HOS, self.num_fc, scope='fc8_HOS') fc8_HOS = slim.dropout(fc8_HOS, keep_prob=0.5, is_training=is_training, scope='dropout8_HOS') fc9_HOS = slim.fully_connected(fc8_HOS, self.num_fc, scope='fc9_HOS') fc9_HOS = slim.dropout(fc9_HOS, keep_prob=0.5, is_training=is_training, scope='dropout9_HOS') return fc9_SH, fc9_HOS
Example #9
Source File: tf_modules.py From tensor2robot with Apache License 2.0 | 6 votes |
def argscope(is_training=None, normalizer_fn=slim.layer_norm): """Default TF argscope used for convnet-based grasping models. Args: is_training: Whether this argscope is for training or inference. normalizer_fn: Which conv/fc normalizer to use. Returns: Dictionary of argument overrides. """ with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training): with slim.arg_scope( [slim.conv2d, slim.fully_connected], weights_initializer=tf.truncated_normal_initializer(stddev=0.01), activation_fn=tf.nn.relu, normalizer_fn=normalizer_fn): with slim.arg_scope( [slim.conv2d, slim.max_pool2d], stride=2, padding='VALID') as scope: return scope
Example #10
Source File: model.py From yolo_v2 with Apache License 2.0 | 6 votes |
def conv_tower_fn(self, images, is_training=True, reuse=None): """Computes convolutional features using the InceptionV3 model. Args: images: A tensor of shape [batch_size, height, width, channels]. is_training: whether is training or not. reuse: whether or not the network and its variables should be reused. To be able to reuse 'scope' must be given. Returns: A tensor of shape [batch_size, OH, OW, N], where OWxOH is resolution of output feature map and N is number of output features (depends on the network architecture). """ mparams = self._mparams['conv_tower_fn'] logging.debug('Using final_endpoint=%s', mparams.final_endpoint) with tf.variable_scope('conv_tower_fn/INCE'): if reuse: tf.get_variable_scope().reuse_variables() with slim.arg_scope(inception.inception_v3_arg_scope()): with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training): net, _ = inception.inception_v3_base( images, final_endpoint=mparams.final_endpoint) return net
Example #11
Source File: vgslspecs.py From yolo_v2 with Apache License 2.0 | 6 votes |
def AddDropout(self, prev_layer, index): """Adds a dropout layer. Args: prev_layer: Input tensor. index: Position in model_str to start parsing Returns: Output tensor, end index in model_str. """ pattern = re.compile(R'(Do)({\w+})?') m = pattern.match(self.model_str, index) if m is None: return None, None name = self._GetLayerName(m.group(0), index, m.group(2)) layer = slim.dropout( prev_layer, 0.5, is_training=self.is_training, scope=name) return layer, m.end()
Example #12
Source File: mobilenetv2.py From mobilenetv2 with MIT License | 6 votes |
def mobilenet_v2_arg_scope(weight_decay, is_training=True, depth_multiplier=1.0, regularize_depthwise=False, dropout_keep_prob=1.0): regularizer = tf.contrib.layers.l2_regularizer(weight_decay) if regularize_depthwise: depthwise_regularizer = regularizer else: depthwise_regularizer = None with slim.arg_scope([slim.conv2d, slim.separable_conv2d], activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, normalizer_params={'is_training': is_training, 'center': True, 'scale': True }): with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer): with slim.arg_scope([slim.separable_conv2d], weights_regularizer=depthwise_regularizer, depth_multiplier=depth_multiplier): with slim.arg_scope([slim.dropout], is_training=is_training, keep_prob=dropout_keep_prob) as sc: return sc
Example #13
Source File: model.py From Gun-Detector with Apache License 2.0 | 6 votes |
def conv_tower_fn(self, images, is_training=True, reuse=None): """Computes convolutional features using the InceptionV3 model. Args: images: A tensor of shape [batch_size, height, width, channels]. is_training: whether is training or not. reuse: whether or not the network and its variables should be reused. To be able to reuse 'scope' must be given. Returns: A tensor of shape [batch_size, OH, OW, N], where OWxOH is resolution of output feature map and N is number of output features (depends on the network architecture). """ mparams = self._mparams['conv_tower_fn'] logging.debug('Using final_endpoint=%s', mparams.final_endpoint) with tf.variable_scope('conv_tower_fn/INCE'): if reuse: tf.get_variable_scope().reuse_variables() with slim.arg_scope(inception.inception_v3_arg_scope()): with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training): net, _ = inception.inception_v3_base( images, final_endpoint=mparams.final_endpoint) return net
Example #14
Source File: iCAN_ResNet50_VCOCO_Early.py From iCAN with MIT License | 5 votes |
def attention_pool_layer_H(self, bottom, fc7_H, is_training, name): with tf.variable_scope(name) as scope: fc1 = slim.fully_connected(fc7_H, 512, scope='fc1_b') fc1 = slim.dropout(fc1, keep_prob=0.8, is_training=is_training, scope='dropout1_b') fc1 = tf.reshape(fc1, [tf.shape(fc1)[0], 1, 1, tf.shape(fc1)[1]]) att = tf.reduce_mean(tf.multiply(bottom, fc1), 3, keep_dims=True) return att
Example #15
Source File: utils.py From taskonomy with MIT License | 5 votes |
def add_fc_with_dropout_layer( net, is_training, num_outputs, dropout=0.8, activation_fn=None, reuse=None, scope=None ): ''' Sets up a FC layer with dropout using the args passed in ''' #print(activation_fn) net = slim.fully_connected(net, num_outputs, activation_fn=activation_fn, reuse=reuse, scope=scope) net = slim.dropout(net, keep_prob=dropout, is_training=is_training) tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, net) if 'scope' is not None: print( '\t\t{scope}'.format( scope=scope ), net.get_shape() ) return net
Example #16
Source File: iCAN_ResNet50_VCOCO.py From iCAN with MIT License | 5 votes |
def attention_pool_layer_O(self, bottom, fc7_O, is_training, name): with tf.variable_scope(name) as scope: fc1 = slim.fully_connected(fc7_O, 512, scope='fc1_b') fc1 = slim.dropout(fc1, keep_prob=0.8, is_training=is_training, scope='dropout1_b') fc1 = tf.reshape(fc1, [tf.shape(fc1)[0], 1, 1, tf.shape(fc1)[1]]) att = tf.reduce_mean(tf.multiply(bottom, fc1), 3, keep_dims=True) return att
Example #17
Source File: models.py From human_dynamics with BSD 2-Clause "Simplified" License | 5 votes |
def hmr_ief(phi, omega_start, scope, num_output=85, num_stage=3, is_training=True): """ Runs HMR-style IEF. Args: phi (Bx2048): Image features. omega_start (Bx85): Starting Omega as input to first IEF. scope (str): Name of scope for reuse. num_output (int): Size of output. num_stage (int): Number of iterations for IEF. is_training (bool): If False, don't apply dropout. Returns: Final theta (Bx{num_output}) """ with tf.variable_scope(scope): theta_prev = omega_start theta_here = None for _ in range(num_stage): # ---- Compute outputs state = tf.concat([phi, theta_prev], 1) delta_theta, _ = encoder_fc3_dropout( state, is_training=is_training, num_output=num_output, reuse=tf.AUTO_REUSE ) # Compute new theta theta_here = theta_prev + delta_theta # Finally update to end iteration. theta_prev = theta_here return theta_here
Example #18
Source File: iCAN_ResNet50_VCOCO.py From iCAN with MIT License | 5 votes |
def attention_pool_layer_H(self, bottom, fc7_H, is_training, name): with tf.variable_scope(name) as scope: fc1 = slim.fully_connected(fc7_H, 512, scope='fc1_b') fc1 = slim.dropout(fc1, keep_prob=0.8, is_training=is_training, scope='dropout1_b') fc1 = tf.reshape(fc1, [tf.shape(fc1)[0], 1, 1, tf.shape(fc1)[1]]) att = tf.reduce_mean(tf.multiply(bottom, fc1), 3, keep_dims=True) return att
Example #19
Source File: models.py From motion_reconstruction with BSD 3-Clause "New" or "Revised" License | 5 votes |
def Encoder_fc3_dropout(x, num_output=85, is_training=True, reuse=False, name="3D_module"): """ 3D inference module. 3 MLP layers (last is the output) With dropout on first 2. Input: - x: N x [|img_feat|, |3D_param|] - reuse: bool Outputs: - 3D params: N x num_output if orthogonal: either 85: (3 + 24*3 + 10) or 109 (3 + 24*4 + 10) for factored axis-angle representation if perspective: 86: (f, tx, ty, tz) + 24*3 + 10, or 110 for factored axis-angle. - variables: tf variables """ if reuse: print('Reuse is on!') with tf.variable_scope(name, reuse=reuse) as scope: net = slim.fully_connected(x, 1024, scope='fc1') net = slim.dropout(net, 0.5, is_training=is_training, scope='dropout1') net = slim.fully_connected(net, 1024, scope='fc2') net = slim.dropout(net, 0.5, is_training=is_training, scope='dropout2') small_xavier = variance_scaling_initializer( factor=.01, mode='FAN_AVG', uniform=True) net = slim.fully_connected( net, num_output, activation_fn=None, weights_initializer=small_xavier, scope='fc3') variables = tf.contrib.framework.get_variables(scope) return net, variables
Example #20
Source File: base_network.py From cartpoleplusplus with MIT License | 5 votes |
def hidden_layers_starting_at(self, layer, layer_sizes, opts=None): # TODO: opts=None => will force exception on old calls.... if not isinstance(layer_sizes, list): layer_sizes = map(int, layer_sizes.split(",")) assert len(layer_sizes) > 0 for i, size in enumerate(layer_sizes): layer = slim.fully_connected(scope="h%d" % i, inputs=layer, num_outputs=size, weights_regularizer=tf.contrib.layers.l2_regularizer(0.01), activation_fn=tf.nn.relu) if opts.use_dropout: layer = slim.dropout(layer, is_training=IS_TRAINING, scope="do%d" % i) return layer
Example #21
Source File: squeezenet.py From facenet_mtcnn_to_mobile with MIT License | 5 votes |
def inference(images, keep_probability, phase_train=True, bottleneck_layer_size=128, weight_decay=0.0, reuse=None): batch_norm_params = { # Decay for the moving averages. 'decay': 0.995, # epsilon to prevent 0s in variance. 'epsilon': 0.001, # force in-place updates of mean and variance estimates 'updates_collections': None, # Moving averages ends up in the trainable variables collection 'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ], } with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=slim.xavier_initializer_conv2d(uniform=True), weights_regularizer=slim.l2_regularizer(weight_decay), normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params): with tf.variable_scope('squeezenet', [images], reuse=reuse): with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=phase_train): net = slim.conv2d(images, 96, [7, 7], stride=2, scope='conv1') net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool1') net = fire_module(net, 16, 64, scope='fire2') net = fire_module(net, 16, 64, scope='fire3') net = fire_module(net, 32, 128, scope='fire4') net = slim.max_pool2d(net, [2, 2], stride=2, scope='maxpool4') net = fire_module(net, 32, 128, scope='fire5') net = fire_module(net, 48, 192, scope='fire6') net = fire_module(net, 48, 192, scope='fire7') net = fire_module(net, 64, 256, scope='fire8') net = slim.max_pool2d(net, [3, 3], stride=2, scope='maxpool8') net = fire_module(net, 64, 256, scope='fire9') net = slim.dropout(net, keep_probability) net = slim.conv2d(net, 1000, [1, 1], activation_fn=None, normalizer_fn=None, scope='conv10') net = slim.avg_pool2d(net, net.get_shape()[1:3], scope='avgpool10') net = tf.squeeze(net, [1, 2], name='logits') net = slim.fully_connected(net, bottleneck_layer_size, activation_fn=None, scope='Bottleneck', reuse=False) return net, None
Example #22
Source File: uncertainty_module.py From Probabilistic-Face-Embeddings with MIT License | 5 votes |
def inference(inputs, embedding_size, phase_train, weight_decay=5e-4, reuse=None, scope='UncertaintyModule'): with slim.arg_scope([slim.fully_connected], weights_regularizer=slim.l2_regularizer(weight_decay), activation_fn=tf.nn.relu): with tf.variable_scope(scope, [inputs], reuse=reuse): with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=phase_train): print('UncertaintyModule input shape:', [dim.value for dim in inputs.shape]) net = slim.flatten(inputs) net = slim.fully_connected(net, embedding_size, scope='fc1', normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params, activation_fn=tf.nn.relu) log_sigma_sq = slim.fully_connected(net, embedding_size, scope='fc_log_sigma_sq', normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params_sigma, activation_fn=None) # Share the gamma and beta for all dimensions log_sigma_sq = scale_and_shift(log_sigma_sq, 1e-4, -7.0) # Add epsilon for sigma_sq for numerical stableness log_sigma_sq = tf.log(1e-6 + tf.exp(log_sigma_sq)) return log_sigma_sq
Example #23
Source File: sphere_net_PFE.py From Probabilistic-Face-Embeddings with MIT License | 5 votes |
def inference(images, embedding_size=512, reuse=None, scope='SphereNet'): with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(0.0), normalizer_fn=None, normalizer_params=None, activation_fn=parametric_relu): with tf.variable_scope('SphereNet', [images], reuse=reuse): # Fix the moving mean and std when training PFE with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=False): print('SphereNet input shape:', [dim.value for dim in images.shape]) model_version = '64' num_layers, num_kernels = model_params[model_version] net = conv_module(images, num_layers[0], num_kernels[0], scope='conv1') print('module_1 shape:', [dim.value for dim in net.shape]) net = conv_module(net, num_layers[1], num_kernels[1], scope='conv2') print('module_2 shape:', [dim.value for dim in net.shape]) net = conv_module(net, num_layers[2], num_kernels[2], scope='conv3') print('module_3 shape:', [dim.value for dim in net.shape]) net = conv_module(net, num_layers[3], num_kernels[3], scope='conv4') print('module_4 shape:', [dim.value for dim in net.shape]) net_ = net net = slim.flatten(net) mu = slim.fully_connected(net, embedding_size, scope='Bottleneck', weights_initializer=slim.xavier_initializer(), normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params_last, activation_fn=None) # Output used for PFE mu = tf.nn.l2_normalize(mu, axis=1) conv_final = net return mu, conv_final
Example #24
Source File: vgg16.py From MSDS-RCNN with MIT License | 5 votes |
def _bcn_to_tail(self, pool5, is_training, initializer, suffix='', reuse=False): with tf.variable_scope(self._scope + '_bcn', self._scope + '_bcn', reuse=reuse): pool5_flat = slim.flatten(pool5, scope='flatten'+suffix) fc6 = slim.fully_connected(pool5_flat, 4096, weights_initializer=initializer, trainable=is_training, scope='fc6'+suffix) if is_training: fc6 = slim.dropout(fc6, keep_prob=0.5, is_training=True, scope='dropout6'+suffix) fc7 = slim.fully_connected(fc6, 4096, weights_initializer=initializer, trainable=is_training, scope='fc7'+suffix) if is_training: fc7 = slim.dropout(fc7, keep_prob=0.5, is_training=True, scope='dropout7'+suffix) return fc7
Example #25
Source File: vgg16.py From tf-faster-rcnn with MIT License | 5 votes |
def _head_to_tail(self, pool5, is_training, reuse=None): with tf.variable_scope(self._scope, self._scope, reuse=reuse): pool5_flat = slim.flatten(pool5, scope='flatten') fc6 = slim.fully_connected(pool5_flat, 4096, scope='fc6') if is_training: fc6 = slim.dropout(fc6, keep_prob=0.5, is_training=True, scope='dropout6') fc7 = slim.fully_connected(fc6, 4096, scope='fc7') if is_training: fc7 = slim.dropout(fc7, keep_prob=0.5, is_training=True, scope='dropout7') return fc7
Example #26
Source File: model.py From adversarial-feature-augmentation with MIT License | 5 votes |
def feature_generator(self, noise, labels, reuse=False): ''' Takes in input noise and labels, and generates f(z|y) ''' try: # just to make it work on different Tensorflow releases inputs = tf.concat(1, [noise, tf.cast(labels,tf.float32)]) except: inputs = tf.concat([noise, tf.cast(labels,tf.float32)], 1) with tf.variable_scope('feature_generator', reuse=reuse): with slim.arg_scope([slim.fully_connected], weights_initializer=tf.contrib.layers.xavier_initializer(), biases_initializer = tf.constant_initializer(0.0)): with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True, activation_fn=tf.nn.relu, is_training=(self.mode=='train_feature_generator')): net = slim.fully_connected(inputs, 1024, activation_fn = tf.nn.relu, scope='sgen_fc1') net = slim.batch_norm(net, scope='sgen_bn1') net = slim.dropout(net, 0.5) #dropout needs to be always on, do not include "is_training" net = slim.fully_connected(net, 1024, activation_fn = tf.nn.relu, scope='sgen_fc2') net = slim.batch_norm(net, scope='sgen_bn2') net = slim.dropout(net, 0.5) net = slim.fully_connected(net, self.hidden_repr_size, activation_fn = tf.tanh, scope='sgen_feat') return net
Example #27
Source File: models.py From phd with BSD 2-Clause "Simplified" License | 5 votes |
def hmr_ief(phi, omega_start, scope, num_output=85, num_stage=3, is_training=True): """ Runs HMR-style IEF. Args: phi (Bx2048): Image features. omega_start (Bx85): Starting Omega as input to first IEF. scope (str): Name of scope for reuse. num_output (int): Size of output. num_stage (int): Number of iterations for IEF. is_training (bool): If False, don't apply dropout. Returns: Final theta (Bx{num_output}) """ with tf.variable_scope(scope): theta_prev = omega_start theta_here = None for _ in range(num_stage): # ---- Compute outputs state = tf.concat([phi, theta_prev], 1) delta_theta, _ = encoder_fc3_dropout( state, is_training=is_training, num_output=num_output, reuse=tf.AUTO_REUSE ) # Compute new theta theta_here = theta_prev + delta_theta # Finally update to end iteration. theta_prev = theta_here return theta_here
Example #28
Source File: models.py From phd with BSD 2-Clause "Simplified" License | 5 votes |
def encoder_fc3_dropout(x, num_output=85, is_training=True, reuse=False, name='3D_module'): """ 3D inference module. 3 MLP layers (last is the output) With dropout on first 2. Input: - x: N x [|img_feat|, |3D_param|] - reuse: bool Outputs: - 3D params: N x num_output if orthogonal: either 85: (3 + 24*3 + 10) or 109 (3 + 24*4 + 10) for factored axis-angle representation if perspective: 86: (f, tx, ty, tz) + 24*3 + 10, or 110 for factored axis-angle. - variables: tf variables """ with tf.variable_scope(name, reuse=reuse) as scope: net = slim.fully_connected(x, 1024, scope='fc1') net = slim.dropout(net, 0.5, is_training=is_training, scope='dropout1') net = slim.fully_connected(net, 1024, scope='fc2') net = slim.dropout(net, 0.5, is_training=is_training, scope='dropout2') small_xavier = variance_scaling_initializer( factor=.01, mode='FAN_AVG', uniform=True) net = slim.fully_connected( net, num_output, activation_fn=None, weights_initializer=small_xavier, scope='fc3') variables = tf.contrib.framework.get_variables(scope) return net, variables # Functions for f_{movie strip} and f_{AR}.
Example #29
Source File: tf_utils.py From DOTA_models with Apache License 2.0 | 5 votes |
def fc_network(x, neurons, wt_decay, name, num_pred=None, offset=0, batch_norm_param=None, dropout_ratio=0.0, is_training=None): if dropout_ratio > 0: assert(is_training is not None), \ 'is_training needs to be defined when trainnig with dropout.' repr = [] for i, neuron in enumerate(neurons): init_var = np.sqrt(2.0/neuron) if batch_norm_param is not None: x = slim.fully_connected(x, neuron, activation_fn=None, weights_initializer=tf.random_normal_initializer(stddev=init_var), weights_regularizer=slim.l2_regularizer(wt_decay), normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_param, biases_initializer=tf.zeros_initializer(), scope='{:s}_{:d}'.format(name, offset+i)) else: x = slim.fully_connected(x, neuron, activation_fn=tf.nn.relu, weights_initializer=tf.random_normal_initializer(stddev=init_var), weights_regularizer=slim.l2_regularizer(wt_decay), biases_initializer=tf.zeros_initializer(), scope='{:s}_{:d}'.format(name, offset+i)) if dropout_ratio > 0: x = slim.dropout(x, keep_prob=1-dropout_ratio, is_training=is_training, scope='{:s}_{:d}'.format('dropout_'+name, offset+i)) repr.append(x) if num_pred is not None: init_var = np.sqrt(2.0/num_pred) x = slim.fully_connected(x, num_pred, weights_regularizer=slim.l2_regularizer(wt_decay), weights_initializer=tf.random_normal_initializer(stddev=init_var), biases_initializer=tf.zeros_initializer(), activation_fn=None, scope='{:s}_pred'.format(name)) return x, repr
Example #30
Source File: utils.py From taskonomy with MIT License | 5 votes |
def add_fc_with_dropout_layer( net, is_training, num_outputs, dropout=0.8, activation_fn=None, reuse=None, scope=None ): ''' Sets up a FC layer with dropout using the args passed in ''' #print(activation_fn) net = slim.fully_connected(net, num_outputs, activation_fn=activation_fn, reuse=reuse, scope=scope) net = slim.dropout(net, keep_prob=dropout, is_training=is_training) tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, net) if 'scope' is not None: print( '\t\t{scope}'.format( scope=scope ), net.get_shape() ) return net