Python torch.nn.functional.conv1d() Examples

The following are 30 code examples of torch.nn.functional.conv1d(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn.functional , or try the search function .
Example #1
Source File: test_pyprof_nvtx.py    From apex with BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def test_conv1d(self):
        # Data and weight tensors
        tensor1d_in_conv = torch.randn(32, 3, 224, device='cuda', dtype=self.dtype)
        tensor1d_in_conv_grouped = torch.randn(32, 6, 224, device='cuda', dtype=self.dtype)
        conv1d_filter = torch.randn(16, 3, 3, device='cuda', dtype=self.dtype)
        conv1d_bias = torch.ones(16, device='cuda', dtype=self.dtype)
        # Vanilla conv1d
        conv1d_out_vanilla = F.conv1d(tensor1d_in_conv, conv1d_filter)
        # conv1d with bias
        conv1d_out_with_bias = F.conv1d(tensor1d_in_conv, conv1d_filter, bias=conv1d_bias)
        # conv1d - stride > 1
        conv1d_out_strided = F.conv1d(tensor1d_in_conv, conv1d_filter, stride=2)
        # conv1d - dilation > 1
        conv1d_out_dilated = F.conv1d(tensor1d_in_conv, conv1d_filter, dilation=2)
        # conv1d - groups > 1
        conv1d_out_grouped = F.conv1d(tensor1d_in_conv_grouped, conv1d_filter, groups=2)
        # conv1d - padding with zeros
        conv1d_out_padding_zeros = F.conv1d(tensor1d_in_conv, conv1d_filter, padding=6) 
Example #2
Source File: BayesianLayers.py    From Tutorial_BayesianCompressionForDL with MIT License 6 votes vote down vote up
def forward(self, x):
        if self.deterministic:
            assert self.training == False, "Flag deterministic is True. This should not be used in training."
            return F.conv1d(x, self.post_weight_mu, self.bias_mu, self.stride, self.padding, self.dilation, self.groups)
        batch_size = x.size()[0]
        # apply local reparametrisation trick see [1] Eq. (6)
        # to the parametrisation given in [3] Eq. (6)
        mu_activations = F.conv1d(x, self.weight_mu, self.bias_mu, self.stride,
                                  self.padding, self.dilation, self.groups)

        var_activations = F.conv1d(x.pow(2), self.weight_logvar.exp(), self.bias_logvar.exp(), self.stride,
                                   self.padding, self.dilation, self.groups)
        # compute z
        # note that we reparametrise according to [2] Eq. (11) (not [1])
        z = reparametrize(self.z_mu.repeat(batch_size, 1, 1), self.z_logvar.repeat(batch_size, 1, 1),
                          sampling=self.training, cuda=self.cuda)
        z = z[:, :, None]

        return reparametrize(mu_activations * z, (var_activations * z.pow(2)).log(), sampling=self.training,
                             cuda=self.cuda) 
Example #3
Source File: tcn.py    From savn with Apache License 2.0 6 votes vote down vote up
def net(self, x, block_num, params=None):
        layer_name = "ll_tc.ll_temporal_block" + str(block_num)
        if params is None:
            x = self.ll_conv1(x)
        else:
            x = F.conv1d(
                x,
                weight=params[layer_name + ".ll_conv1.weight"],
                bias=params[layer_name + ".ll_conv1.bias"],
                stride=self.stride,
                padding=self.padding,
                dilation=self.dilation,
            )

        x = self.chomp1(x)
        x = F.leaky_relu(x)

        return x 
Example #4
Source File: glow.py    From fac-via-ppg with Apache License 2.0 6 votes vote down vote up
def forward(self, z, reverse=False):
        # shape
        batch_size, group_size, n_of_groups = z.size()

        W = self.conv.weight.squeeze()

        if reverse:
            if not hasattr(self, 'W_inverse'):
                # Reverse computation
                W_inverse = W.inverse()
                W_inverse = Variable(W_inverse[..., None])
                if z.type() == 'torch.cuda.HalfTensor':
                    W_inverse = W_inverse.half()
                self.W_inverse = W_inverse
            z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
            return z
        else:
            # Forward computation
            log_det_W = batch_size * n_of_groups * torch.logdet(W)
            z = self.conv(z)
            return z, log_det_W 
Example #5
Source File: simplelayers.py    From MONAI with Apache License 2.0 6 votes vote down vote up
def __init__(self, spatial_dims: int, sigma, truncated: float = 4.0):
        """
        Args:
            spatial_dims: number of spatial dimensions of the input image.
                must have shape (Batch, channels, H[, W, ...]).
            sigma (float or sequence of floats): std.
            truncated: spreads how many stds.
        """
        super().__init__()
        self.spatial_dims = int(spatial_dims)
        _sigma = ensure_tuple_rep(sigma, self.spatial_dims)
        self.kernel = [
            torch.nn.Parameter(torch.as_tensor(gaussian_1d(s, truncated), dtype=torch.float), False) for s in _sigma
        ]
        self.padding = [same_padding(k.size()[0]) for k in self.kernel]
        self.conv_n = [F.conv1d, F.conv2d, F.conv3d][spatial_dims - 1]
        for idx, param in enumerate(self.kernel):
            self.register_parameter(f"kernel_{idx}", param) 
Example #6
Source File: glow.py    From tn2-wg with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def forward(self, z, reverse=False):
        # shape
        batch_size, group_size, n_of_groups = z.size()

        W = self.conv.weight.squeeze()

        if reverse:
            if not hasattr(self, 'W_inverse'):
                # Reverse computation
                W_inverse = W.inverse()
                W_inverse = Variable(W_inverse[..., None])
                if z.type() == 'torch.cuda.HalfTensor':
                    W_inverse = W_inverse.half()
                self.W_inverse = W_inverse
            z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
            return z
        else:
            # Forward computation
            log_det_W = batch_size * n_of_groups * torch.logdet(W)
            z = self.conv(z)
            return z, log_det_W 
Example #7
Source File: lightweight_convolution.py    From fairseq with MIT License 6 votes vote down vote up
def forward(self, input):
        '''
        input size: B x C x T
        output size: B x C x T
        '''
        B, C, T = input.size()
        H = self.num_heads

        weight = self.weight
        if self.weight_softmax:
            weight = F.softmax(weight, dim=-1)

        weight = F.dropout(weight, self.weight_dropout, training=self.training)
        # Merge every C/H entries into the batch dimension (C = self.input_size)
        # B x C x T -> (B * C/H) x H x T
        # One can also expand the weight to C x 1 x K by a factor of C/H
        # and do not reshape the input instead, which is slow though
        input = input.view(-1, H, T)
        output = F.conv1d(input, weight, padding=self.padding, groups=self.num_heads)
        output = output.view(B, C, T)
        if self.bias is not None:
            output = output + self.bias.view(1, -1, 1)

        return output 
Example #8
Source File: glow.py    From Tacotron2-Mandarin with MIT License 6 votes vote down vote up
def forward(self, z, reverse=False):
        # shape
        batch_size, group_size, n_of_groups = z.size()

        W = self.conv.weight.squeeze()

        if reverse:
            if not hasattr(self, 'W_inverse'):
                # Reverse computation
                W_inverse = W.float().inverse()
                W_inverse = Variable(W_inverse[..., None])
                if z.type() == 'torch.cuda.HalfTensor':
                    W_inverse = W_inverse.half()
                self.W_inverse = W_inverse
            z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
            return z
        else:
            # Forward computation
            log_det_W = batch_size * n_of_groups * torch.logdet(W)
            z = self.conv(z)
            return z, log_det_W 
Example #9
Source File: waveunet_utils.py    From Wave-U-Net-Pytorch with MIT License 6 votes vote down vote up
def forward(self, x):
        # Pad here if not using transposed conv
        input_size = x.shape[2]
        if self.padding != "valid":
            num_pad = (self.kernel_size-1)//2
            out = F.pad(x, (num_pad, num_pad), mode=self.padding)
        else:
            out = x

        # Lowpass filter (+ 0 insertion if transposed)
        if self.transpose:
            expected_steps = ((input_size - 1) * self.stride + 1)
            if self.padding == "valid":
                expected_steps = expected_steps - self.kernel_size + 1

            out = F.conv_transpose1d(out, self.filter, stride=self.stride, padding=0, groups=self.channels)
            diff_steps = out.shape[2] - expected_steps
            if diff_steps > 0:
                assert(diff_steps % 2 == 0)
                out = out[:,:,diff_steps//2:-diff_steps//2]
        else:
            assert(input_size % self.stride == 1)
            out = F.conv1d(out, self.filter, stride=self.stride, padding=0, groups=self.channels)

        return out 
Example #10
Source File: lightweight_convolution.py    From helo_word with Apache License 2.0 6 votes vote down vote up
def forward(self, input):
        '''
        input size: B x C x T
        output size: B x C x T
        '''
        B, C, T = input.size()
        H = self.num_heads

        weight = self.weight
        if self.weight_softmax:
            weight = F.softmax(weight, dim=-1)

        weight = F.dropout(weight, self.weight_dropout, training=self.training)
        # Merge every C/H entries into the batch dimension (C = self.input_size)
        # B x C x T -> (B * C/H) x H x T
        # One can also expand the weight to C x 1 x K by a factor of C/H
        # and do not reshape the input instead, which is slow though
        input = input.view(-1, H, T)
        output = F.conv1d(input, weight, padding=self.padding, groups=self.num_heads)
        output = output.view(B, C, T)
        if self.bias is not None:
            output = output + self.bias.view(1, -1, 1)

        return output 
Example #11
Source File: lightweight_convolution.py    From attn2d with MIT License 6 votes vote down vote up
def forward(self, input):
        '''
        input size: B x C x T
        output size: B x C x T
        '''
        B, C, T = input.size()
        H = self.num_heads

        weight = self.weight
        if self.weight_softmax:
            weight = F.softmax(weight, dim=-1)

        weight = F.dropout(weight, self.weight_dropout, training=self.training)
        # Merge every C/H entries into the batch dimension (C = self.input_size)
        # B x C x T -> (B * C/H) x H x T
        # One can also expand the weight to C x 1 x K by a factor of C/H
        # and do not reshape the input instead, which is slow though
        input = input.view(-1, H, T)
        output = F.conv1d(input, weight, padding=self.padding, groups=self.num_heads)
        output = output.view(B, C, T)
        if self.bias is not None:
            output = output + self.bias.view(1, -1, 1)

        return output 
Example #12
Source File: quaternion_ops.py    From Quaternion-Recurrent-Neural-Networks with GNU General Public License v3.0 6 votes vote down vote up
def quaternion_conv(input, r_weight, i_weight, j_weight, k_weight, bias, stride, 
                    padding, groups, dilatation):
    """
    Applies a quaternion convolution to the incoming data:
    """

    cat_kernels_4_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=1)
    cat_kernels_4_i = torch.cat([i_weight,  r_weight, -k_weight, j_weight], dim=1)
    cat_kernels_4_j = torch.cat([j_weight,  k_weight, r_weight, -i_weight], dim=1)
    cat_kernels_4_k = torch.cat([k_weight,  -j_weight, i_weight, r_weight], dim=1)
    cat_kernels_4_quaternion   = torch.cat([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=0)

    if   input.dim() == 3:
        convfunc = F.conv1d
    elif input.dim() == 4:
        convfunc = F.conv2d
    elif input.dim() == 5:
        convfunc = F.conv3d
    else:
        raise Exception("The convolutional input is either 3, 4 or 5 dimensions."
                        " input.dim = " + str(input.dim()))

    return convfunc(input, cat_kernels_4_quaternion, bias, stride, padding, dilatation, groups) 
Example #13
Source File: glow.py    From FastSpeech with MIT License 6 votes vote down vote up
def forward(self, z, reverse=False):
        # shape
        batch_size, group_size, n_of_groups = z.size()

        W = self.conv.weight.squeeze()

        if reverse:
            if not hasattr(self, 'W_inverse'):
                # Reverse computation
                W_inverse = W.inverse()
                W_inverse = Variable(W_inverse[..., None])
                if z.type() == 'torch.cuda.HalfTensor':
                    W_inverse = W_inverse.half()
                self.W_inverse = W_inverse
            z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
            return z
        else:
            # Forward computation
            log_det_W = batch_size * n_of_groups * torch.logdet(W)
            z = self.conv(z)
            return z, log_det_W 
Example #14
Source File: burgerFiniteDifference.py    From ar-pde-cnn with MIT License 6 votes vote down vote up
def __call__(self, u):
        """
        Args:
            u (torch.Tensor): [B, C, H]
        Returns:
            grad_u: [B, C, H]
        """
        if(self.kernel_size == 2):
            return self.conditionalUpwind(u)

        u_shape = u.shape
        u = u.view(-1, 1, *u_shape[-1:])
        u = F.conv1d(F.pad(u, self.padding, mode='circular'), 
            self.weight, stride=1, padding=0, bias=None) / (self.dx)

        return u.view(u_shape) 
Example #15
Source File: burgerFiniteDifference.py    From ar-pde-cnn with MIT License 6 votes vote down vote up
def conditionalUpwind(self, u):
        """
        Upwind scheme:
        https://en.wikipedia.org/wiki/Upwind_scheme
        Args:
            u (torch.Tensor): [B, C, H]
        Returns:
            grad_u: [B, C, H]
        """
        u_shape = u.shape
        u = u.view(-1, 1, *u_shape[-1:])

        u1 = F.conv1d(F.pad(u, self.padding, mode='circular'), 
            self.weight, stride=1, padding=0, bias=None) / (self.dx)

        u2 = F.conv1d(F.pad(u, self.padding, mode='circular'), 
            -torch.flip(self.weight, dims=[-1]), stride=1, padding=0, bias=None) / (self.dx)

        u = torch.where(u > 0, u1, u2)

        return u2.view(u_shape) 
Example #16
Source File: glow.py    From FastSpeech with MIT License 6 votes vote down vote up
def forward(self, z, reverse=False):
        # shape
        batch_size, group_size, n_of_groups = z.size()

        W = self.conv.weight.squeeze()

        if reverse:
            if not hasattr(self, 'W_inverse'):
                # Reverse computation
                W_inverse = W.float().inverse()
                W_inverse = Variable(W_inverse[..., None])
                if z.type() == 'torch.cuda.HalfTensor':
                    W_inverse = W_inverse.half()
                self.W_inverse = W_inverse
            z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
            return z
        else:
            # Forward computation
            log_det_W = batch_size * n_of_groups * torch.logdet(W)
            z = self.conv(z)
            return z, log_det_W 
Example #17
Source File: quaternion_ops.py    From Pytorch-Quaternion-Neural-Networks with GNU General Public License v3.0 6 votes vote down vote up
def quaternion_conv(input, r_weight, i_weight, j_weight, k_weight, bias, stride,
                    padding, groups, dilatation):
    """
    Applies a quaternion convolution to the incoming data:
    """

    cat_kernels_4_r = torch.cat([r_weight, -i_weight, -j_weight, -k_weight], dim=1)
    cat_kernels_4_i = torch.cat([i_weight,  r_weight, -k_weight, j_weight], dim=1)
    cat_kernels_4_j = torch.cat([j_weight,  k_weight, r_weight, -i_weight], dim=1)
    cat_kernels_4_k = torch.cat([k_weight,  -j_weight, i_weight, r_weight], dim=1)

    cat_kernels_4_quaternion   = torch.cat([cat_kernels_4_r, cat_kernels_4_i, cat_kernels_4_j, cat_kernels_4_k], dim=0)

    if   input.dim() == 3:
        convfunc = F.conv1d
    elif input.dim() == 4:
        convfunc = F.conv2d
    elif input.dim() == 5:
        convfunc = F.conv3d
    else:
        raise Exception("The convolutional input is either 3, 4 or 5 dimensions."
                        " input.dim = " + str(input.dim()))

    return convfunc(input, cat_kernels_4_quaternion, bias, stride, padding, dilatation, groups) 
Example #18
Source File: waveglow.py    From NeMo with Apache License 2.0 6 votes vote down vote up
def forward(self, z, reverse: bool = False):
        # shape
        batch_size, group_size, n_of_groups = z.size()

        W = self.conv.weight.squeeze()

        if reverse:
            if not hasattr(self, 'W_inverse'):
                # Reverse computation
                W_inverse = W.float().inverse()
                W_inverse = Variable(W_inverse[..., None])
                if z.dtype == torch.half:
                    W_inverse = W_inverse.half()
                self.W_inverse = W_inverse
            z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
            return z
        else:
            # Forward computation
            log_det_W = batch_size * n_of_groups * torch.logdet(W.float())
            z = self.conv(z)
            return (
                z,
                log_det_W,
            ) 
Example #19
Source File: plca.py    From pytorch-NMF with MIT License 6 votes vote down vote up
def update_params(self, VdivWZH, update_W, update_H, update_Z, W_alpha, H_alpha, Z_alpha):
        # type: (Tensor, bool, bool, bool, float, float, float) -> None

        if update_W or update_Z:
            new_W = F.conv1d(VdivWZH[:, None], self.H[:, None] * self.Z[:, None, None]) * self.W

        if update_H:
            new_H = F.conv1d(VdivWZH[None, ...], torch.transpose(self.W * self.Z[:, None], 0, 1))[0] * self.H
            new_H = normalize(self.fix_neg(new_H + H_alpha - 1), 1)
            self.H[:] = new_H

        if update_W:
            self.W[:] = normalize(self.fix_neg(new_W + W_alpha - 1), (0, 2))

        if update_Z:
            Z = normalize(self.fix_neg(new_W.sum((0, 2)) + Z_alpha - 1))
            self.Z[:] = Z 
Example #20
Source File: kernelFunction.py    From airlab with Apache License 2.0 6 votes vote down vote up
def bspline_kernel_1d(sigma, order=2, asTensor=False, dtype=th.float32, device='cpu'):

    kernel_ones = th.ones(1, 1, sigma)
    kernel = kernel_ones
	
    padding = sigma - 1

    for i in range(1, order + 1):
        kernel = F.conv1d(kernel, kernel_ones, padding=padding)/sigma
	


    if asTensor:
        return kernel[0, 0, ...].to(dtype=dtype, device=device)
    else:
        return kernel[0, 0, ...].numpy() 
Example #21
Source File: modules.py    From TTS-Cube with Apache License 2.0 6 votes vote down vote up
def forward(self, input_data):
        num_batches, _, num_samples = input_data.size()

        self.num_samples = num_samples

        forward_transform = F.conv1d(input_data,
                                     self.forward_basis,
                                     stride=self.hop_length,
                                     padding=self.filter_length)
        cutoff = int((self.filter_length / 2) + 1)
        real_part = forward_transform[:, :cutoff, :]
        imag_part = forward_transform[:, cutoff:, :]

        magnitude = torch.sqrt(real_part**2 + imag_part**2)
        phase = torch.autograd.Variable(torch.atan2(imag_part.data, real_part.data))
        return magnitude, phase 
Example #22
Source File: trellisnet.py    From trellisnet with MIT License 6 votes vote down vote up
def forward(self, input, dilation, hid=None):
        k = self.kernel_size
        padding = (k - 1) * dilation    # To maintain causality constraint
        x = F.pad(input, (padding, 0))

        # Input part
        x_1 = x[:, :self.n_inp1]

        # Hidden part
        z_1 = x[:, self.n_inp1:]
        z_1[:, :, :padding] = hid.repeat(1, 1, padding)  # Note: we only pad the hidden part :-)
        device = x_1.get_device()

        # A linear transformation of the input sequence (and pre-computed once)
        if (dilation, device) not in self.dict or self.dict[(dilation, device)] is None:
            self.dict[(dilation, device)] = F.conv1d(x_1, self.weight1, dilation=dilation)

        # Input injection
        return self.dict[(dilation, device)] + F.conv1d(self.drop(z_1), self.weight2, self.bias2, dilation=dilation) 
Example #23
Source File: modules.py    From source_separation with Apache License 2.0 6 votes vote down vote up
def forward(self, x):
        """
        Implemented complex convolution using combining 'grouped convolution' and 'real / img weight'
        :param x: data (N, C, T) C is concatenated with C/2 real channels and C/2 idea channels
        :return: complex conved result
        """
        # adopt reflect padding
        if self.padding:
            x = F.pad(x, (self.padding, self.padding), 'reflect')

        # forward real
        real_part = F.conv1d(x, self.A, None, stride=self.stride, padding=0,
                             dilation=self.dilation, groups=2)

        # forward idea
        spl = self.in_channels // 2
        weight_B = torch.cat([self.B[:spl].data * (-1), self.B[spl:].data])
        idea_part = F.conv1d(x, weight_B, None, stride=self.stride, padding=0,
                             dilation=self.dilation, groups=2)

        return real_part + idea_part 
Example #24
Source File: glow.py    From waveglow with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def forward(self, z, reverse=False):
        # shape
        batch_size, group_size, n_of_groups = z.size()

        W = self.conv.weight.squeeze()

        if reverse:
            if not hasattr(self, 'W_inverse'):
                # Reverse computation
                W_inverse = W.float().inverse()
                W_inverse = Variable(W_inverse[..., None])
                if z.type() == 'torch.cuda.HalfTensor':
                    W_inverse = W_inverse.half()
                self.W_inverse = W_inverse
            z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
            return z
        else:
            # Forward computation
            log_det_W = batch_size * n_of_groups * torch.logdet(W)
            z = self.conv(z)
            return z, log_det_W 
Example #25
Source File: glow.py    From LightSpeech with MIT License 6 votes vote down vote up
def forward(self, z, reverse=False):
        # shape
        batch_size, group_size, n_of_groups = z.size()

        W = self.conv.weight.squeeze()

        if reverse:
            if not hasattr(self, 'W_inverse'):
                # Reverse computation
                W_inverse = W.inverse()
                W_inverse = Variable(W_inverse[..., None])
                if z.type() == 'torch.cuda.HalfTensor':
                    W_inverse = W_inverse.half()
                self.W_inverse = W_inverse
            z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
            return z
        else:
            # Forward computation
            log_det_W = batch_size * n_of_groups * torch.logdet(W)
            z = self.conv(z)
            return z, log_det_W 
Example #26
Source File: glow.py    From LightSpeech with MIT License 6 votes vote down vote up
def forward(self, z, reverse=False):
        # shape
        batch_size, group_size, n_of_groups = z.size()

        W = self.conv.weight.squeeze()

        if reverse:
            if not hasattr(self, 'W_inverse'):
                # Reverse computation
                W_inverse = W.float().inverse()
                W_inverse = Variable(W_inverse[..., None])
                if z.type() == 'torch.cuda.HalfTensor':
                    W_inverse = W_inverse.half()
                self.W_inverse = W_inverse
            z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
            return z
        else:
            # Forward computation
            log_det_W = batch_size * n_of_groups * torch.logdet(W)
            z = self.conv(z)
            return z, log_det_W 
Example #27
Source File: mocha.py    From neural_sp with Apache License 2.0 6 votes vote down vote up
def moving_sum(x, back, forward):
    """Compute the moving sum of x over a chunk_size with the provided bounds.

    Args:
        x (FloatTensor): `[B, H_ma, H_ca, qlen, klen]`
        back (int):
        forward (int):

    Returns:
        x_sum (FloatTensor): `[B, H_ma, H_ca, qlen, klen]`

    """
    bs, n_heads_mono, n_heads_chunk, qlen, klen = x.size()
    x = x.view(-1, klen)
    # Moving sum is computed as a carefully-padded 1D convolution with ones
    x_padded = F.pad(x, pad=[back, forward])  # `[B * H_ma * H_ca * qlen, back + klen + forward]`
    # Add a "channel" dimension
    x_padded = x_padded.unsqueeze(1)
    # Construct filters
    filters = x.new_ones(1, 1, back + forward + 1)
    x_sum = F.conv1d(x_padded, filters)
    x_sum = x_sum.squeeze(1).view(bs, n_heads_mono, n_heads_chunk, qlen, -1)
    return x_sum 
Example #28
Source File: burgerFiniteDifference.py    From ar-pde-cnn with MIT License 6 votes vote down vote up
def calcEdgeFlux(self, flux):

        flux_edge1 = F.conv1d(F.pad(flux, (2,1), mode='circular'), self.weight_flux1, 
                        stride=1, padding=0, bias=None)

        flux_edge2 = F.conv1d(F.pad(flux, (1,2), mode='circular'), self.weight_flux2, 
                        stride=1, padding=0, bias=None)

        beta1 = torch.pow(F.conv1d(F.pad(flux, (2,1), mode='circular'), self.weight_beta1, 
                        stride=1, padding=0, bias=None), 2)
        
        beta2 = torch.pow(F.conv1d(F.pad(flux, (1,2), mode='circular'), self.weight_beta2, 
                        stride=1, padding=0, bias=None), 2)
        
        eps = 1e-6
        w1 = 1./(3*(eps + beta1)**2)
        w2 = 2./(3*(eps + beta2)**2)

        w = torch.stack([w1, w2], dim = 0)

        w = w / torch.sum(w, dim=0)

        edge_flux = w[0]*flux_edge1 + w[1]*flux_edge2

        return edge_flux 
Example #29
Source File: stft.py    From Tacotron2-Mandarin with MIT License 5 votes vote down vote up
def transform(self, input_data):
        num_batches = input_data.size(0)
        num_samples = input_data.size(1)

        self.num_samples = num_samples

        # similar to librosa, reflect-pad the input
        input_data = input_data.view(num_batches, 1, num_samples)
        input_data = F.pad(
            input_data.unsqueeze(1),
            (int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
            mode='reflect')
        input_data = input_data.squeeze(1)

        forward_transform = F.conv1d(
            input_data,
            Variable(self.forward_basis, requires_grad=False),
            stride=self.hop_length,
            padding=0)

        cutoff = int((self.filter_length / 2) + 1)
        real_part = forward_transform[:, :cutoff, :]
        imag_part = forward_transform[:, cutoff:, :]

        magnitude = torch.sqrt(real_part ** 2 + imag_part ** 2)
        phase = torch.autograd.Variable(
            torch.atan2(imag_part.data, real_part.data))

        return magnitude, phase 
Example #30
Source File: enc_dec.py    From asteroid with MIT License 5 votes vote down vote up
def batch_1d_conv(self, inp, filters):
        # Here we perform multichannel / multi-source convolution. Ou
        # Output should be (batch, channels, freq, conv_time)
        batched_conv = F.conv1d(inp.view(-1, 1, inp.shape[-1]),
                                filters, stride=self.stride,
                                padding=self.padding)
        output_shape = inp.shape[:-1] + batched_conv.shape[-2:]
        return batched_conv.view(output_shape)