Python torch.linspace() Examples

The following are 30 code examples of torch.linspace(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: vmf_unif.py    From vmf_vae_nlp with MIT License 6 votes vote down vote up
def _sample_ortho_batch(self, mu, dim):
        """

        :param mu: Variable, [batch size, latent dim]
        :param dim: scala. =latent dim
        :return:
        """
        _batch_sz, _lat_dim = mu.size()
        assert _lat_dim == dim
        squeezed_mu = mu.unsqueeze(1)

        v = GVar(torch.randn(_batch_sz, dim, 1))  # TODO random

        # v = GVar(torch.linspace(-1, 1, steps=dim))
        # v = v.expand(_batch_sz, dim).unsqueeze(2)

        rescale_val = torch.bmm(squeezed_mu, v).squeeze(2)
        proj_mu_v = mu * rescale_val
        ortho = v.squeeze() - proj_mu_v
        ortho_norm = torch.norm(ortho, p=2, dim=1, keepdim=True)
        y = ortho / ortho_norm
        return y 
Example #2
Source File: transforms.py    From audio with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def _fade_out(self, waveform_length: int) -> Tensor:
        fade = torch.linspace(0, 1, self.fade_out_len)
        ones = torch.ones(waveform_length - self.fade_out_len)

        if self.fade_shape == "linear":
            fade = - fade + 1

        if self.fade_shape == "exponential":
            fade = torch.pow(2, - fade) * (1 - fade)

        if self.fade_shape == "logarithmic":
            fade = torch.log10(1.1 - fade) + 1

        if self.fade_shape == "quarter_sine":
            fade = torch.sin(fade * math.pi / 2 + math.pi / 2)

        if self.fade_shape == "half_sine":
            fade = torch.sin(fade * math.pi + math.pi / 2) / 2 + 0.5

        return torch.cat((ones, fade)).clamp_(0, 1) 
Example #3
Source File: pytorch.py    From iSketchNFill with GNU General Public License v3.0 6 votes vote down vote up
def uniform_grid(shape):
    '''Uniformly places control points aranged in grid accross normalized image coordinates.
    
    Params
    ------
    shape : tuple
        HxW defining the number of control points in height and width dimension

    Returns
    -------
    points: HxWx2 tensor
        Control points over [0,1] normalized image range.
    '''
    H,W = shape[:2]    
    c = torch.zeros(H, W, 2)
    c[..., 0] = torch.linspace(0, 1, W)
    c[..., 1] = torch.linspace(0, 1, H).unsqueeze(-1)
    return c 
Example #4
Source File: problems.py    From torchdiffeq with MIT License 6 votes vote down vote up
def construct_problem(device, npts=10, ode='constant', reverse=False):

    f = PROBLEMS[ode]().to(device)

    t_points = torch.linspace(1, 8, npts).to(device).requires_grad_(True)
    sol = f.y_exact(t_points)

    def _flip(x, dim):
        indices = [slice(None)] * x.dim()
        indices[dim] = torch.arange(x.size(dim) - 1, -1, -1, dtype=torch.long, device=x.device)
        return x[tuple(indices)]

    if reverse:
        t_points = _flip(t_points, 0).clone().detach()
        sol = _flip(sol, 0).clone().detach()

    return f, sol[0].detach(), t_points, sol 
Example #5
Source File: pytorch.py    From iSketchNFill with GNU General Public License v3.0 6 votes vote down vote up
def uniform_grid(shape):
    '''Uniformly places control points aranged in grid accross normalized image coordinates.
    
    Params
    ------
    shape : tuple
        HxW defining the number of control points in height and width dimension

    Returns
    -------
    points: HxWx2 tensor
        Control points over [0,1] normalized image range.
    '''
    H,W = shape[:2]    
    c = torch.zeros(H, W, 2)
    c[..., 0] = torch.linspace(0, 1, W)
    c[..., 1] = torch.linspace(0, 1, H).unsqueeze(-1)
    return c 
Example #6
Source File: graph.py    From PPGNet with MIT License 6 votes vote down vote up
def forward(self, feat, coord_st, coord_ed):
        _, ch, h, w = feat.size()
        num_st, num_ed = coord_st.size(0), coord_ed.size(0)
        assert coord_st.size(1) == 3 and coord_ed.size(1) == 3
        assert (coord_st[:, 0] == coord_st[0, 0]).all() and (coord_ed[:, 0] == coord_st[0, 0]).all()
        bs = coord_st[0, 0].item()
        # construct bounding boxes from junction points
        with torch.no_grad():
            coord_st = coord_st[:, 1:] * self.scale
            coord_ed = coord_ed[:, 1:] * self.scale
            coord_st = coord_st.unsqueeze(1).expand(num_st, num_ed, 2)
            coord_ed = coord_ed.unsqueeze(0).expand(num_st, num_ed, 2)
            arr_st2ed = coord_ed - coord_st
            sample_grid = torch.linspace(0, 1, steps=self.align_size).to(feat).view(1, 1, self.align_size).expand(num_st, num_ed, self.align_size)
            sample_grid = torch.einsum("ijd,ijs->ijsd", (arr_st2ed, sample_grid)) + coord_st.view(num_st, num_ed, 1, 2).expand(num_st, num_ed, self.align_size, 2)
            sample_grid = sample_grid.view(num_st, num_ed, self.align_size, 2)
            sample_grid[..., 0] = sample_grid[..., 0] / (w - 1) * 2 - 1
            sample_grid[..., 1] = sample_grid[..., 1] / (h - 1) * 2 - 1

        output = F.grid_sample(feat[int(bs)].view(1, ch, h, w).expand(num_st, ch, h, w), sample_grid)
        assert output.size() == (num_st, ch, num_ed, self.align_size)
        output = output.permute(0, 2, 1, 3).contiguous()

        return output 
Example #7
Source File: transforms.py    From audio with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def _fade_in(self, waveform_length: int) -> Tensor:
        fade = torch.linspace(0, 1, self.fade_in_len)
        ones = torch.ones(waveform_length - self.fade_in_len)

        if self.fade_shape == "linear":
            fade = fade

        if self.fade_shape == "exponential":
            fade = torch.pow(2, (fade - 1)) * fade

        if self.fade_shape == "logarithmic":
            fade = torch.log10(.1 + fade) + 1

        if self.fade_shape == "quarter_sine":
            fade = torch.sin(fade * math.pi / 2)

        if self.fade_shape == "half_sine":
            fade = torch.sin(fade * math.pi - math.pi / 2) / 2 + 0.5

        return torch.cat((fade, ones)).clamp_(0, 1) 
Example #8
Source File: models.py    From end-to-end-SLU with Apache License 2.0 6 votes vote down vote up
def __init__(self, N_filt,Filt_dim,fs, stride=1, padding=0, is_cuda=False):
		super(SincLayer,self).__init__()

		# Mel Initialization of the filterbanks
		low_freq_mel = 80
		high_freq_mel = (2595 * np.log10(1 + (fs / 2) / 700))  # Convert Hz to Mel
		mel_points = np.linspace(low_freq_mel, high_freq_mel, N_filt)  # Equally spaced in Mel scale
		f_cos = (700 * (10**(mel_points / 2595) - 1)) # Convert Mel to Hz
		b1=np.roll(f_cos,1)
		b2=np.roll(f_cos,-1)
		b1[0]=30
		b2[-1]=(fs/2)-100

		self.freq_scale=fs*1.0
		self.filt_b1 = torch.nn.Parameter(torch.from_numpy(b1/self.freq_scale))
		self.filt_band = torch.nn.Parameter(torch.from_numpy((b2-b1)/self.freq_scale))

		self.N_filt=N_filt
		self.Filt_dim=Filt_dim
		self.fs=fs
		self.stride=stride
		self.padding=padding
		self.is_cuda = is_cuda 
Example #9
Source File: gradient_tests.py    From torchdiffeq with MIT License 6 votes vote down vote up
def problem(self):

        class Odefunc(torch.nn.Module):

            def __init__(self):
                super(Odefunc, self).__init__()
                self.A = torch.nn.Parameter(torch.tensor([[-0.1, 2.0], [-2.0, -0.1]]))
                self.unused_module = torch.nn.Linear(2, 5)

            def forward(self, t, y):
                return torch.mm(y**3, self.A)

        y0 = torch.tensor([[2., 0.]]).to(TEST_DEVICE).requires_grad_(True)
        t_points = torch.linspace(0., 25., 10).to(TEST_DEVICE).requires_grad_(True)
        func = Odefunc().to(TEST_DEVICE)
        return func, y0, t_points 
Example #10
Source File: warp.py    From conditional-motion-propagation with MIT License 6 votes vote down vote up
def forward(self, image, flow):
        flow_for_grip = torch.zeros_like(flow)
        flow_for_grip[:,0,:,:] = flow[:,0,:,:] / ((flow.size(3) - 1.0) / 2.0)
        flow_for_grip[:,1,:,:] = flow[:,1,:,:] / ((flow.size(2) - 1.0) / 2.0)

        torchHorizontal = torch.linspace(
            -1.0, 1.0, image.size(3)).view(
            1, 1, 1, image.size(3)).expand(
            image.size(0), 1, image.size(2), image.size(3))
        torchVertical = torch.linspace(
            -1.0, 1.0, image.size(2)).view(
            1, 1, image.size(2), 1).expand(
            image.size(0), 1, image.size(2), image.size(3))
        grid = torch.cat([torchHorizontal, torchVertical], 1).cuda()

        grid = (grid + flow_for_grip).permute(0, 2, 3, 1)
        return torch.nn.functional.grid_sample(image, grid) 
Example #11
Source File: vmf_batch.py    From vmf_vae_nlp with MIT License 6 votes vote down vote up
def _sample_orthonormal_to(self, mu, dim):
        """Sample point on sphere orthogonal to mu.
        """
        v = GVar(torch.randn(dim))  # TODO random

        # v = GVar(torch.linspace(-1,1,steps=dim))

        rescale_value = mu.dot(v) / mu.norm()
        proj_mu_v = mu * rescale_value.expand(dim)
        ortho = v - proj_mu_v
        ortho_norm = torch.norm(ortho)
        return ortho / ortho_norm.expand_as(ortho)

# vmf = vMF_fast(50, 100, 100)
# batchsz = 100
#
# mu = torch.FloatTensor(np.random.uniform(0, 1, 20 * batchsz))
# mu = mu.view(batchsz, -1)
# mu = mu / torch.norm(mu, p=2, dim=1, keepdim=True)
# vmf.sample_cell(mu, None, 100)
# x = vMF(10,lat_dim=50,kappa=50) 
Example #12
Source File: vmf_batch.py    From vmf_vae_nlp with MIT License 6 votes vote down vote up
def _sample_ortho_batch(self, mu, dim):
        """

        :param mu: Variable, [batch size, latent dim]
        :param dim: scala. =latent dim
        :return:
        """
        _batch_sz, _lat_dim = mu.size()
        assert _lat_dim == dim
        squeezed_mu = mu.unsqueeze(1)

        v = GVar(torch.randn(_batch_sz, dim, 1))  # TODO random

        # v = GVar(torch.linspace(-1, 1, steps=dim))
        # v = v.expand(_batch_sz, dim).unsqueeze(2)

        rescale_val = torch.bmm(squeezed_mu, v).squeeze(2)
        proj_mu_v = mu * rescale_val
        ortho = v.squeeze() - proj_mu_v
        ortho_norm = torch.norm(ortho, p=2, dim=1, keepdim=True)
        y = ortho / ortho_norm
        return y 
Example #13
Source File: vmf_hypvae.py    From vmf_vae_nlp with MIT License 6 votes vote down vote up
def _sample_orthonormal_to(self, mu, dim):
        """Sample point on sphere orthogonal to mu.
        """
        v = GVar(torch.randn(dim))  # TODO random

        # v = GVar(torch.linspace(-1,1,steps=dim))

        rescale_value = mu.dot(v) / mu.norm()
        proj_mu_v = mu * rescale_value.expand(dim)
        ortho = v - proj_mu_v
        ortho_norm = torch.norm(ortho)
        return ortho / ortho_norm.expand_as(ortho)


#
# a = torch.tensor(10)
# b = torch.ones(1, dtype=torch.float, requires_grad=True)
#
# y = bessel(a, b)
# loss = 1 - y
# print(y)
# loss.backward()
# print(a) 
Example #14
Source File: run.py    From pytorch-pwc with GNU General Public License v3.0 6 votes vote down vote up
def backwarp(tenInput, tenFlow):
	if str(tenFlow.size()) not in backwarp_tenGrid:
		tenHorizontal = torch.linspace(-1.0, 1.0, tenFlow.shape[3]).view(1, 1, 1, tenFlow.shape[3]).expand(tenFlow.shape[0], -1, tenFlow.shape[2], -1)
		tenVertical = torch.linspace(-1.0, 1.0, tenFlow.shape[2]).view(1, 1, tenFlow.shape[2], 1).expand(tenFlow.shape[0], -1, -1, tenFlow.shape[3])

		backwarp_tenGrid[str(tenFlow.size())] = torch.cat([ tenHorizontal, tenVertical ], 1).cuda()
	# end

	if str(tenFlow.size()) not in backwarp_tenPartial:
		backwarp_tenPartial[str(tenFlow.size())] = tenFlow.new_ones([ tenFlow.shape[0], 1, tenFlow.shape[2], tenFlow.shape[3] ])
	# end

	tenFlow = torch.cat([ tenFlow[:, 0:1, :, :] / ((tenInput.shape[3] - 1.0) / 2.0), tenFlow[:, 1:2, :, :] / ((tenInput.shape[2] - 1.0) / 2.0) ], 1)
	tenInput = torch.cat([ tenInput, backwarp_tenPartial[str(tenFlow.size())] ], 1)

	tenOutput = torch.nn.functional.grid_sample(input=tenInput, grid=(backwarp_tenGrid[str(tenFlow.size())] + tenFlow).permute(0, 2, 3, 1), mode='bilinear', padding_mode='zeros', align_corners=True)

	tenMask = tenOutput[:, -1:, :, :]; tenMask[tenMask > 0.999] = 1.0; tenMask[tenMask < 1.0] = 0.0

	return tenOutput[:, :-1, :, :] * tenMask
# end

########################################################## 
Example #15
Source File: vmf_hypvae.py    From vmf_vae_nlp with MIT License 6 votes vote down vote up
def _sample_ortho_batch(self, mu, dim):
        """

        :param mu: Variable, [batch size, latent dim]
        :param dim: scala. =latent dim
        :return:
        """
        _batch_sz, _lat_dim = mu.size()
        assert _lat_dim == dim
        squeezed_mu = mu.unsqueeze(1)

        v = GVar(torch.randn(_batch_sz, dim, 1))  # TODO random

        # v = GVar(torch.linspace(-1, 1, steps=dim))
        # v = v.expand(_batch_sz, dim).unsqueeze(2)

        rescale_val = torch.bmm(squeezed_mu, v).squeeze(2)
        proj_mu_v = mu * rescale_val
        ortho = v.squeeze() - proj_mu_v
        ortho_norm = torch.norm(ortho, p=2, dim=1, keepdim=True)
        y = ortho / ortho_norm
        return y 
Example #16
Source File: test_gyrovector_math.py    From geoopt with Apache License 2.0 6 votes vote down vote up
def test_geodesic_segment_length_property(a, b, manifold, dtype):
    extra_dims = len(a.shape)
    segments = 12
    t = torch.linspace(0, 1, segments + 1, dtype=dtype).view(
        (segments + 1,) + (1,) * extra_dims
    )
    gamma_ab_t = manifold.geodesic(t, a, b)
    gamma_ab_t0 = gamma_ab_t[:-1]
    gamma_ab_t1 = gamma_ab_t[1:]
    dist_ab_t0mt1 = manifold.dist(gamma_ab_t0, gamma_ab_t1, keepdim=True)
    speed = manifold.dist(a, b, keepdim=True).unsqueeze(0).expand_as(dist_ab_t0mt1)
    # we have exactly 12 line segments
    tolerance = {
        torch.float32: dict(rtol=1e-5, atol=5e-3),
        torch.float64: dict(rtol=1e-5, atol=5e-3),
    }
    length = speed / segments
    np.testing.assert_allclose(
        dist_ab_t0mt1.detach(), length.detach(), **tolerance[dtype]
    )
    (length + dist_ab_t0mt1).sum().backward()
    assert torch.isfinite(a.grad).all()
    assert torch.isfinite(b.grad).all()
    assert torch.isfinite(manifold.k.grad).all() 
Example #17
Source File: test_gyrovector_math.py    From geoopt with Apache License 2.0 6 votes vote down vote up
def test_geodesic_segement_unit_property(a, b, manifold, dtype):
    extra_dims = len(a.shape)
    segments = 12
    t = torch.linspace(0, 1, segments + 1, dtype=dtype).view(
        (segments + 1,) + (1,) * extra_dims
    )
    gamma_ab_t = manifold.geodesic_unit(t, a, b)
    gamma_ab_t0 = gamma_ab_t[:1]
    gamma_ab_t1 = gamma_ab_t
    dist_ab_t0mt1 = manifold.dist(gamma_ab_t0, gamma_ab_t1, keepdim=True)
    true_distance_travelled = t.expand_as(dist_ab_t0mt1)
    # we have exactly 12 line segments
    tolerance = {
        torch.float32: dict(atol=2e-4, rtol=5e-5),
        torch.float64: dict(atol=1e-10),
    }
    np.testing.assert_allclose(
        dist_ab_t0mt1.detach(), true_distance_travelled.detach(), **tolerance[dtype]
    )
    (true_distance_travelled + dist_ab_t0mt1).sum().backward()
    assert torch.isfinite(a.grad).all()
    assert torch.isfinite(b.grad).all()
    assert torch.isfinite(manifold.k.grad).all() 
Example #18
Source File: imwrap.py    From DSMnet with Apache License 2.0 6 votes vote down vote up
def imwrap_BCHW0(im_src, disp):
    # imwrap
    bn, c, h, w = im_src.shape
    row = torch.linspace(-1, 1, w)
    col = torch.linspace(-1, 1, h)
    grid = torch.zeros(bn, h, w, 2)
    for n in range(bn):
        for i in range(h):
            grid[n, i, :, 0] = row
        for i in range(w):
            grid[n, :, i, 1] = col
    grid = Variable(grid, requires_grad=True).type_as(im_src)
    grid[:, :, :, 0] = grid[:, :, :, 0] - disp.squeeze(1)*2/w
    #print disp[-1, -1, -1], grid[-1, -1, -1, 0]
    im_src.clamp(min=1e-6)
    im_wrap = F.grid_sample(im_src, grid)
    return im_wrap 
Example #19
Source File: f2m.py    From pytorch-pcen with MIT License 6 votes vote down vote up
def _init_buffers(self):
        m_min = 0. if self.f_min == 0 else 2595 * np.log10(1. + (self.f_min / 700))
        m_max = 2595 * np.log10(1. + (self.f_max / 700))

        m_pts = torch.linspace(m_min, m_max, self.n_mels + 2)
        f_pts = (700 * (10**(m_pts / 2595) - 1))

        bins = torch.floor(((self.n_fft - 1) * 2) * f_pts / self.sr).long()

        fb = torch.zeros(self.n_fft, self.n_mels)
        for m in range(1, self.n_mels + 1):
            f_m_minus = bins[m - 1].item()
            f_m = bins[m].item()
            f_m_plus = bins[m + 1].item()

            if f_m_minus != f_m:
                fb[f_m_minus:f_m, m - 1] = (torch.arange(f_m_minus, f_m) - f_m_minus) / (f_m - f_m_minus)
            if f_m != f_m_plus:
                fb[f_m:f_m_plus, m - 1] = (f_m_plus - torch.arange(f_m, f_m_plus)) / (f_m_plus - f_m)
        self.register_buffer("fb", fb) 
Example #20
Source File: test_cat_fms.py    From DenseMatchingBenchmark with MIT License 6 votes vote down vote up
def test_speed(self):
        max_disp = 192
        scale = 4
        start_disp = 0
        dilation = 1
        SH, SW = 540, 960
        B, C, H, W = 1, 32, SH//scale, SW//scale

        reference_fm = torch.rand(B, C, H, W).to(self.device)
        target_fm = torch.rand(B, C, H, W).to(self.device)

        self.timeTemplate(cat_fms, 'CAT_FMS', reference_fm, target_fm, max_disp//scale, start_disp, dilation)

        self.timeTemplate(fast_cat_fms, 'FAST_CAT_FMS', reference_fm, target_fm, max_disp//scale, start_disp, dilation)

        print('Test fast_cat_fms with disparity samples')

        d = (max_disp + dilation - 1) // dilation
        end_disp = start_disp + max_disp - 1

        # generate disparity samples
        disp_samples = torch.linspace(start_disp, end_disp, d).repeat(1, H, W, 1). \
            permute(0, 3, 1, 2).contiguous().to(self.device)

        self.timeTemplate(fast_cat_fms, 'FAST_CAT_FMS', reference_fm, target_fm, max_disp//scale, start_disp, dilation, disp_samples) 
Example #21
Source File: faster_soft_argmin.py    From DenseMatchingBenchmark with MIT License 6 votes vote down vote up
def __init__(self, max_disp, start_disp=0, dilation=1, alpha=1.0, normalize=True):
        super(FasterSoftArgmin, self).__init__()
        self.max_disp = max_disp
        self.start_disp = start_disp
        self.dilation = dilation
        self.end_disp = start_disp + max_disp - 1
        self.disp_sample_number = (max_disp + dilation - 1) // dilation

        self.alpha = alpha
        self.normalize = normalize

        # compute disparity index: (1 ,1, disp_sample_number, 1, 1)
        disp_sample = torch.linspace(
            self.start_disp, self.end_disp, self.disp_sample_number
        )
        disp_sample = disp_sample.repeat(1, 1, 1, 1, 1).permute(0, 1, 4, 2, 3).contiguous()

        self.disp_regression = nn.Conv3d(1, 1, (self.disp_sample_number, 1, 1), 1, 0, bias=False)

        self.disp_regression.weight.data = disp_sample
        self.disp_regression.weight.requires_grad = False 
Example #22
Source File: pytorch.py    From iSketchNFill with GNU General Public License v3.0 5 votes vote down vote up
def tps_grid(theta, ctrl, size):
    '''Compute a thin-plate-spline grid from parameters for sampling.
    
    Params
    ------
    theta: Nx(T+3)x2 tensor
        Batch size N, T+3 model parameters for T control points in dx and dy.
    ctrl: NxTx2 tensor, or Tx2 tensor
        T control points in normalized image coordinates [0..1]
    size: tuple
        Output grid size as NxCxHxW. C unused. This defines the output image
        size when sampling.
    
    Returns
    -------
    grid : NxHxWx2 tensor
        Grid suitable for sampling in pytorch containing source image
        locations for each output pixel.
    '''    
    N, _, H, W = size

    grid = theta.new(N, H, W, 3)
    grid[:, :, :, 0] = 1.
    grid[:, :, :, 1] = torch.linspace(0, 1, W)
    grid[:, :, :, 2] = torch.linspace(0, 1, H).unsqueeze(-1)   
    
    z = tps(theta, ctrl, grid)
    return (grid[...,1:] + z)*2-1 # [-1,1] range required by F.sample_grid 
Example #23
Source File: utils.py    From EvolutionaryGAN-pytorch with MIT License 5 votes vote down vote up
def interp(x0, x1, num_midpoints):
  lerp = torch.linspace(0, 1.0, num_midpoints + 2, device='cuda').to(x0.dtype)
  return ((x0 * (1 - lerp.view(1, -1, 1))) + (x1 * lerp.view(1, -1, 1)))


# interp sheet function
# Supports full, class-wise and intra-class interpolation 
Example #24
Source File: run.py    From pytorch-liteflownet with GNU General Public License v3.0 5 votes vote down vote up
def backwarp(tenInput, tenFlow):
	if str(tenFlow.size()) not in backwarp_tenGrid:
		tenHorizontal = torch.linspace(-1.0, 1.0, tenFlow.shape[3]).view(1, 1, 1, tenFlow.shape[3]).expand(tenFlow.shape[0], -1, tenFlow.shape[2], -1)
		tenVertical = torch.linspace(-1.0, 1.0, tenFlow.shape[2]).view(1, 1, tenFlow.shape[2], 1).expand(tenFlow.shape[0], -1, -1, tenFlow.shape[3])

		backwarp_tenGrid[str(tenFlow.size())] = torch.cat([ tenHorizontal, tenVertical ], 1).cuda()
	# end

	tenFlow = torch.cat([ tenFlow[:, 0:1, :, :] / ((tenInput.shape[3] - 1.0) / 2.0), tenFlow[:, 1:2, :, :] / ((tenInput.shape[2] - 1.0) / 2.0) ], 1)

	return torch.nn.functional.grid_sample(input=tenInput, grid=(backwarp_tenGrid[str(tenFlow.size())] + tenFlow).permute(0, 2, 3, 1), mode='bilinear', padding_mode='zeros', align_corners=True)
# end

########################################################## 
Example #25
Source File: gfl_head.py    From mmdetection with Apache License 2.0 5 votes vote down vote up
def __init__(self, reg_max=16):
        super(Integral, self).__init__()
        self.reg_max = reg_max
        self.register_buffer('project',
                             torch.linspace(0, self.reg_max, self.reg_max + 1)) 
Example #26
Source File: disp2prob.py    From DenseMatchingBenchmark with MIT License 5 votes vote down vote up
def getCost(self):
        # [BatchSize, 1, Height, Width]
        b, c, h, w = self.gtDisp.shape
        assert c == 1

        # if start_disp = 0, dilation = 1, then generate disparity candidates as [0, 1, 2, ... , maxDisp-1]
        if self.disp_sample is None:
            self.disp_sample_number = (self.max_disp + self.dilation - 1) // self.dilation

            # [disp_sample_number]
            self.disp_sample = torch.linspace(
                self.start_disp, self.end_disp, self.disp_sample_number
            ).to(self.gtDisp.device)

            # [BatchSize, disp_sample_number, Height, Width]
            self.disp_sample = self.disp_sample.repeat(b, h, w, 1).permute(0, 3, 1, 2).contiguous()


        # value of gtDisp must within (start_disp, end_disp), otherwise, we have to mask it out
        mask = (self.gtDisp > self.start_disp) & (self.gtDisp < self.end_disp)
        mask = mask.detach().type_as(self.gtDisp)
        self.gtDisp = self.gtDisp * mask

        # [BatchSize, disp_sample_number, Height, Width]
        cost = self.calCost()

        # let the outliers' cost to be -inf
        # [BatchSize, disp_sample_number, Height, Width]
        cost = cost * mask - 1e12

        # in case cost is NaN
        if isNaN(cost.min()) or isNaN(cost.max()):
            print('Cost ==> min: {:.4f}, max: {:.4f}'.format(cost.min(), cost.max()))
            print('Disparity Sample ==> min: {:.4f}, max: {:.4f}'.format(self.disp_sample.min(),
                                                                         self.disp_sample.max()))
            print('Disparity Ground Truth after mask out ==> min: {:.4f}, max: {:.4f}'.format(self.gtDisp.min(),
                                                                                      self.gtDisp.max()))
            raise ValueError(" \'cost contains NaN!")

        return cost 
Example #27
Source File: disp2prob.py    From DenseMatchingBenchmark with MIT License 5 votes vote down vote up
def getProb(self):
        # [BatchSize, 1, Height, Width]
        b, c, h, w = self.gtDisp.shape
        assert c == 1

        # if start_disp = 0, dilation = 1, then generate disparity candidates as [0, 1, 2, ... , maxDisp-1]
        if self.disp_sample is None:
            self.disp_sample_number = (self.max_disp + self.dilation - 1) // self.dilation

            # [disp_sample_number]
            self.disp_sample = torch.linspace(
                self.start_disp, self.end_disp, self.disp_sample_number
            ).to(self.gtDisp.device)

            # [BatchSize, disp_sample_number, Height, Width]
            self.disp_sample = self.disp_sample.repeat(b, h, w, 1).permute(0, 3, 1, 2).contiguous()


        # value of gtDisp must within (start_disp, end_disp), otherwise, we have to mask it out
        mask = (self.gtDisp > self.start_disp) & (self.gtDisp < self.end_disp)
        mask = mask.detach().type_as(self.gtDisp)
        self.gtDisp = self.gtDisp * mask

        # [BatchSize, disp_sample_number, Height, Width]
        probability = self.calProb()

        # let the outliers' probability to be 0
        # in case divide or log 0, we plus a tiny constant value
        # [BatchSize, disp_sample_number, Height, Width]
        probability = probability * mask + self.eps

        # in case probability is NaN
        if isNaN(probability.min()) or isNaN(probability.max()):
            print('Probability ==> min: {:.4f}, max: {:.4f}'.format(probability.min(), probability.max()))
            print('Disparity Sample ==> min: {:.4f}, max: {:.4f}'.format(self.disp_sample.min(),
                                                                         self.disp_sample.max()))
            print('Disparity Ground Truth after mask out ==> min: {:.4f}, max: {:.4f}'.format(self.gtDisp.min(),
                                                                                      self.gtDisp.max()))
            raise ValueError(" \'probability contains NaN!")

        return probability 
Example #28
Source File: AnyNet.py    From DenseMatchingBenchmark with MIT License 5 votes vote down vote up
def forward(self, stage, left, right, disp=None):
        B, C, H, W = left.shape
        # construct the raw cost volume

        end_disp = self.start_disp[stage] + self.max_disp[stage] - 1

        # disparity sample number
        D = (self.max_disp[stage] + self.dilation[stage] - 1) // self.dilation[stage]

        # generate disparity samples, in [B, D, H, W] layout
        disp_sample = torch.linspace(self.start_disp[stage], end_disp, D)
        disp_sample = disp_sample.view(1, D, 1, 1).expand(B, D, H, W).to(left.device).float()

        # if initial disparity guessed, used for warping
        if disp is not None:
            # up-sample disparity map to the size of left
            H, W = left.shape[-2:]
            scale = W / disp.shape[-1]
            disp = F.interpolate(disp * scale, size=(H, W), mode='bilinear', align_corners=False)
            # shift the disparity sample to be centered at the given disparity map
            disp_sample = disp_sample + disp

        # [B, C, D, H, W]
        raw_cost = fast_dif_fms(left, right, disp_sample=disp_sample)

        # list [[B, D, H, W]]
        cost = self.aggregator[stage](raw_cost)

        return cost 
Example #29
Source File: dif_fms.py    From DenseMatchingBenchmark with MIT License 5 votes vote down vote up
def fast_dif_fms(reference_fm, target_fm, max_disp=192, start_disp=0, dilation=1, disp_sample=None,
                 normalize=False, p=1.0,):
    device = reference_fm.device
    B, C, H, W = reference_fm.shape

    if disp_sample is None:
        end_disp = start_disp + max_disp - 1

        disp_sample_number = (max_disp + dilation - 1) // dilation
        D = disp_sample_number

        # generate disparity samples, in [B,D, H, W] layout
        disp_sample = torch.linspace(start_disp, end_disp, D)
        disp_sample = disp_sample.view(1, D, 1, 1).expand(B, D, H, W).to(device).float()

    else:  # direct provide disparity samples
        # the number of disparity samples
        D = disp_sample.shape[1]

    # expand D dimension
    dif_reference_fm = reference_fm.unsqueeze(2).expand(B, C, D, H, W)
    dif_target_fm = target_fm.unsqueeze(2).expand(B, C, D, H, W)

    # shift reference feature map with disparity through grid sample
    # shift target feature according to disparity samples
    dif_target_fm = inverse_warp_3d(dif_target_fm, -disp_sample, padding_mode='zeros')

    # mask out features in reference
    dif_reference_fm = dif_reference_fm * (dif_target_fm > 0).type_as(dif_reference_fm)

    # [B, C, D, H, W)
    dif_fm = dif_reference_fm - dif_target_fm

    if normalize:
        # [B, D, H, W]
        dif_fm = torch.norm(dif_fm, p=p, dim=1, keepdim=False)

    return dif_fm 
Example #30
Source File: modules.py    From pase with MIT License 5 votes vote down vote up
def __init__(self, N_filt, Filt_dim, fs, stride=1,
                 padding='VALID', pad_mode='reflect'):
        super(SincConv, self).__init__()

        # Mel Initialization of the filterbanks
        low_freq_mel = 80
        high_freq_mel = (2595 * np.log10(1 + (fs / 2) \
                                         / 700))  # Convert Hz to Mel
        mel_points = np.linspace(low_freq_mel, high_freq_mel, 
                                 N_filt)  # Equally spaced in Mel scale
        f_cos = (700 * (10 ** (mel_points / 2595) - 1)) # Convert Mel to Hz
        b1 = np.roll(f_cos, 1)
        b2 = np.roll(f_cos, -1)
        b1[0] = 30
        b2[-1] = (fs / 2) - 100
                
        self.freq_scale=fs * 1.0
        self.filt_b1 = nn.Parameter(torch.from_numpy(b1/self.freq_scale))
        self.filt_band = nn.Parameter(torch.from_numpy((b2-b1)/self.freq_scale))

        self.N_filt = N_filt
        self.Filt_dim = Filt_dim
        self.fs = fs
        self.padding = padding
        self.stride =stride
        self.pad_mode = pad_mode