Python torch.nn.functional.instance_norm() Examples

The following are 9 code examples of torch.nn.functional.instance_norm(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn.functional , or try the search function .
Example #1
Source File: cbin.py    From DMIT with MIT License 6 votes vote down vote up
def forward(self, input, ConInfor):
        self._check_input_dim(input)
        b, c = input.size(0), input.size(1)
        if self.num_con >0:
            tarBias = self.ConBias(ConInfor).view(b,c,1,1)
        else:
            tarBias = 0
        out = F.instance_norm(
            input, self.running_mean, self.running_var, None, None,
            self.training or not self.track_running_stats, self.momentum, self.eps)
        
        if self.affine:
            bias = self.bias.repeat(b).view(b,c,1,1)
            weight = self.weight.repeat(b).view(b,c,1,1)
            return (out.view(b, c, *input.size()[2:])+tarBias)*weight + bias
        else:
            return out.view(b, c, *input.size()[2:])+tarBias 
Example #2
Source File: layers.py    From BigGAN-PyTorch with MIT License 6 votes vote down vote up
def forward(self, x, y):
    # Calculate class-conditional gains and biases
    gain = (1 + self.gain(y)).view(y.size(0), -1, 1, 1)
    bias = self.bias(y).view(y.size(0), -1, 1, 1)
    # If using my batchnorm
    if self.mybn or self.cross_replica:
      return self.bn(x, gain=gain, bias=bias)
    # else:
    else:
      if self.norm_style == 'bn':
        out = F.batch_norm(x, self.stored_mean, self.stored_var, None, None,
                          self.training, 0.1, self.eps)
      elif self.norm_style == 'in':
        out = F.instance_norm(x, self.stored_mean, self.stored_var, None, None,
                          self.training, 0.1, self.eps)
      elif self.norm_style == 'gn':
        out = groupnorm(x, self.normstyle)
      elif self.norm_style == 'nonorm':
        out = x
      return out * gain + bias 
Example #3
Source File: adain.py    From DMIT with MIT License 5 votes vote down vote up
def forward(self, input, ConInfor):
        self._check_input_dim(input)
        b, c = input.size(0), input.size(1)
        out = F.instance_norm(
            input, self.running_mean, self.running_var, None, None,
            self.training or not self.track_running_stats, self.momentum, self.eps)
        
        if self.num_con >0:
            weight = self.ConAlpha(ConInfor).view(b,c,1,1)
            bias = self.ConBeta(ConInfor).view(b,c,1,1)
        else:
            weight = 1
            bias = 0
        return out.view(b, c, *input.size()[2:])*weight + bias 
Example #4
Source File: pytorch_to_caffe.py    From PytorchToCaffe with MIT License 5 votes vote down vote up
def _instance_norm(raw, input, running_mean=None, running_var=None, weight=None,
                  bias=None, use_input_stats=True, momentum=0.1, eps=1e-5):
    # TODO: the batch size!=1 view operations
    print("WARNING: The Instance Normalization transfers to Caffe using BatchNorm, so the batch size should be 1")
    if running_var is not None or weight is not None:
        # TODO: the affine=True or track_running_stats=True case
        raise NotImplementedError("not implement the affine=True or track_running_stats=True case InstanceNorm")
    x= torch.batch_norm(
        input, weight, bias, running_mean, running_var,
        use_input_stats, momentum, eps,torch.backends.cudnn.enabled)
    bottom_blobs = [log.blobs(input)]
    layer_name1 = log.add_layer(name='instance_norm')
    top_blobs = log.add_blobs([x], name='instance_norm_blob')
    layer1 = caffe_net.Layer_param(name=layer_name1, type='BatchNorm',
                                   bottom=bottom_blobs, top=top_blobs)
    if running_mean is None or running_var is None:
        # not use global_stats, normalization is performed over the current mini-batch
        layer1.batch_norm_param(use_global_stats=0,eps=eps)
        running_mean=torch.zeros(input.size()[1])
        running_var=torch.ones(input.size()[1])
    else:
        layer1.batch_norm_param(use_global_stats=1, eps=eps)
    running_mean_clone = running_mean.clone()
    running_var_clone = running_var.clone()
    layer1.add_data(running_mean_clone.cpu().numpy(), running_var_clone.cpu().numpy(), np.array([1.0]))
    log.cnet.add_layer(layer1)
    if weight is not None and bias is not None:
        layer_name2 = log.add_layer(name='bn_scale')
        layer2 = caffe_net.Layer_param(name=layer_name2, type='Scale',
                                       bottom=top_blobs, top=top_blobs)
        layer2.param.scale_param.bias_term = True
        layer2.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())
        log.cnet.add_layer(layer2)
    return x


#upsample layer 
Example #5
Source File: network.py    From ngransac with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def forward(self, inputs):
		'''
		Forward pass, return log probabilities over correspondences.

		inputs -- 4D data tensor (BxCxNx1)
		B -> batch size (multiple image pairs)
		C -> 5 values (2D coordinate + 2D coordinate + 1D side information)
		N -> number of correspondences
		1 -> dummy dimension
		
		'''
		batch_size = inputs.size(0)
		data_size = inputs.size(2) # number of correspondences

		x = inputs
		x = F.relu(self.p_in(x))
		
		for r in self.res_blocks:
			res = x
			x = F.relu(r[1](F.instance_norm(r[0](x)))) 
			x = F.relu(r[3](F.instance_norm(r[2](x))))
			x = x + res

		log_probs = F.logsigmoid(self.p_out(x))

		# normalization in log space such that probabilities sum to 1
		log_probs = log_probs.view(batch_size, -1)
		normalizer = torch.logsumexp(log_probs, dim=1)
		normalizer = normalizer.unsqueeze(1).expand(-1, data_size)
		log_probs = log_probs - normalizer
		log_probs = log_probs.view(batch_size, 1, data_size, 1)

		return log_probs 
Example #6
Source File: cbin.py    From SingleGAN with MIT License 5 votes vote down vote up
def forward(self, input, ConInfor):
        self._check_input_dim(input)
        b, c = input.size(0), input.size(1)
        tarBias = self.ConBias(ConInfor).view(b,c,1,1)
        out = F.instance_norm(
            input, self.running_mean, self.running_var, None, None,
            self.training or not self.track_running_stats, self.momentum, self.eps)
        
        if self.affine:
            bias = self.bias.repeat(b).view(b,c,1,1)
            weight = self.weight.repeat(b).view(b,c,1,1)
            return (out.view(b, c, *input.size()[2:])+tarBias)*weight + bias
        else:
            return out.view(b, c, *input.size()[2:])+tarBias 
Example #7
Source File: pytorch_to_caffe.py    From fast-reid with Apache License 2.0 5 votes vote down vote up
def _instance_norm(raw, input, running_mean=None, running_var=None, weight=None,
                   bias=None, use_input_stats=True, momentum=0.1, eps=1e-5):
    # TODO: the batch size!=1 view operations
    print("WARNING: The Instance Normalization transfers to Caffe using BatchNorm, so the batch size should be 1")
    if running_var is not None or weight is not None:
        # TODO: the affine=True or track_running_stats=True case
        raise NotImplementedError("not implement the affine=True or track_running_stats=True case InstanceNorm")
    x = torch.batch_norm(
        input, weight, bias, running_mean, running_var,
        use_input_stats, momentum, eps, torch.backends.cudnn.enabled)
    bottom_blobs = [log.blobs(input)]
    layer_name1 = log.add_layer(name='instance_norm')
    top_blobs = log.add_blobs([x], name='instance_norm_blob')
    layer1 = caffe_net.Layer_param(name=layer_name1, type='BatchNorm',
                                   bottom=bottom_blobs, top=top_blobs)
    if running_mean is None or running_var is None:
        # not use global_stats, normalization is performed over the current mini-batch
        layer1.batch_norm_param(use_global_stats=0, eps=eps)
        running_mean = torch.zeros(input.size()[1])
        running_var = torch.ones(input.size()[1])
    else:
        layer1.batch_norm_param(use_global_stats=1, eps=eps)
    running_mean_clone = running_mean.clone()
    running_var_clone = running_var.clone()
    layer1.add_data(running_mean_clone.cpu().numpy(), running_var_clone.cpu().numpy(), np.array([1.0]))
    log.cnet.add_layer(layer1)
    if weight is not None and bias is not None:
        layer_name2 = log.add_layer(name='bn_scale')
        layer2 = caffe_net.Layer_param(name=layer_name2, type='Scale',
                                       bottom=top_blobs, top=top_blobs)
        layer2.param.scale_param.bias_term = True
        layer2.add_data(weight.cpu().data.numpy(), bias.cpu().data.numpy())
        log.cnet.add_layer(layer2)
    return x


# upsample layer 
Example #8
Source File: test_pyprof_nvtx.py    From apex with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_instance_norm(self):
        inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
        running_mean = torch.randn(3, device='cuda', dtype=self.dtype)
        running_var = torch.randn(3, device='cuda', dtype=self.dtype)
        output = F.instance_norm(inp, running_mean=running_mean, running_var=running_var, weight=None, bias=None, use_input_stats=True, momentum=0.1, eps=1e-05) 
Example #9
Source File: trainer_pono.py    From PONO with MIT License 5 votes vote down vote up
def compute_vgg19_loss(self, vgg, img, target, vgg_type='vgg19'):
        img_feature = self.vgg((img + 1) / 2)
        target_feature = self.vgg((target + 1) / 2).detach()
        if vgg_type == 'vgg19':
            return F.l1_loss(img_feature, target_feature)
        if vgg_type == 'vgg19_sp':
            sp = SpatialNorm(affine=False)
            return F.l1_loss(sp(img_feature)[0], sp(target_feature)[0])
        elif vgg_type == 'vgg19_sp_mean':
            m1, m2 = img_feature.mean(dim=1), target_feature.mean(dim=1)
            return F.l1_loss(m1, m2)
        elif vgg_type == 'vgg19_sp_mean_mix':
            m1, m2 = img_feature.mean(dim=1), target_feature.mean(dim=1)
            return 0.5 * F.l1_loss(img_feature, target_feature) + 0.5 * F.l1_loss(m1, m2)
        elif vgg_type == 'vgg19_sp_meanstd':
            m1, m2 = img_feature.mean(dim=1), target_feature.mean(dim=1)
            std1, std2 = img_feature.std(dim=1), target_feature.std(dim=1)
            return 0.5 * F.l1_loss(m1, m2) + 0.5 *F.l1_loss(std1, std2)
        elif vgg_type == 'vgg19_sp_meanstd_mix':
            m1, m2 = img_feature.mean(dim=1), target_feature.mean(dim=1)
            std1, std2 = img_feature.std(dim=1), target_feature.std(dim=1)
            return 0.5 * F.l1_loss(img_feature, target_feature) + 0.25 * F.l1_loss(m1, m2) + 0.25 * F.l1_loss(std1, std2)
        elif vgg_type == 'vgg19_in':
            return F.l1_loss(F.instance_norm(img_feature), F.instance_norm(target_feature))
        elif vgg_type == 'vgg19_in_mean':
            img_feature = img_feature.view(*img_feature.shape[:2], -1)
            target_feature = target_feature.view(*target_feature.shape[:2], -1)
            m1, m2 = img_feature.mean(dim=2), target_feature.mean(dim=2)
            return F.l1_loss(m1, m2)
        elif vgg_type == 'vgg19_in_meanstd':
            img_feature = img_feature.view(*img_feature.shape[:2], -1)
            target_feature = target_feature.view(*target_feature.shape[:2], -1)
            m1, m2 = img_feature.mean(dim=2), target_feature.mean(dim=2)
            std1, std2 = img_feature.std(dim=2), target_feature.std(dim=2)
            return F.l1_loss(m1, m2) + F.l1_loss(std1, std2)
        else:
            raise ValueError('vgg_type = {}'.format(vgg_type))