Python scipy.signal.fftconvolve() Examples

The following are 30 code examples of scipy.signal.fftconvolve(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.signal , or try the search function .
Example #1
Source File: audio.py    From ZASR_tensorflow with Apache License 2.0 6 votes vote down vote up
def convolve(self, impulse_segment, allow_resample=False):
        """Convolve this audio segment with the given impulse segment.

        Note that this is an in-place transformation.

        :param impulse_segment: Impulse response segments.
        :type impulse_segment: AudioSegment
        :param allow_resample: Indicates whether resampling is allowed when
                               the impulse_segment has a different sample 
                               rate from this signal.
        :type allow_resample: bool
        :raises ValueError: If the sample rate is not match between two
                            audio segments when resample is not allowed.
        """
        if allow_resample and self.sample_rate != impulse_segment.sample_rate:
            impulse_segment.resample(self.sample_rate)
        if self.sample_rate != impulse_segment.sample_rate:
            raise ValueError("Impulse segment's sample rate (%d Hz) is not "
                             "equal to base signal sample rate (%d Hz)." %
                             (impulse_segment.sample_rate, self.sample_rate))
        samples = signal.fftconvolve(self.samples, impulse_segment.samples,
                                     "full")
        self._samples = samples 
Example #2
Source File: convergence_integrals.py    From lenstronomy with MIT License 6 votes vote down vote up
def potential_from_kappa_grid_adaptive(kappa_high_res, grid_spacing, low_res_factor, high_res_kernel_size):
    """
    lensing potential on the convergence grid
    the computation is performed as a convolution of the Green's function with the convergence map using FFT

    :param kappa_high_res: 2d grid of convergence values
    :param grid_spacing: pixel size of grid
    :param low_res_factor: lower resolution factor of larger scale kernel.
    :param high_res_kernel_size: int, size of high resolution kernel in units of degraded pixels
    :return: lensing potential in a 2d grid at positions x_grid, y_grid
    """
    kappa_low_res = image_util.re_size(kappa_high_res, factor=low_res_factor)
    num_pix = len(kappa_high_res) * 2
    if num_pix % 2 == 0:
        num_pix += 1
    grid_spacing_low_res = grid_spacing * low_res_factor
    kernel = potential_kernel(num_pix, grid_spacing)
    kernel_low_res, kernel_high_res = kernel_util.split_kernel(kernel, high_res_kernel_size, low_res_factor, normalized=False)

    f_high_res = scp.fftconvolve(kappa_high_res, kernel_high_res, mode='same') / np.pi * grid_spacing ** 2
    f_high_res = image_util.re_size(f_high_res, low_res_factor)
    f_low_res = scp.fftconvolve(kappa_low_res, kernel_low_res, mode='same') / np.pi * grid_spacing_low_res ** 2
    return f_high_res + f_low_res 
Example #3
Source File: convergence_integrals.py    From lenstronomy with MIT License 6 votes vote down vote up
def deflection_from_kappa_grid(kappa, grid_spacing):
    """
    deflection angles on the convergence grid
    the computation is performed as a convolution of the Green's function with the convergence map using FFT

    :param kappa: convergence values for each pixel (2-d array)
    :param grid_spacing: pixel size of grid
    :return: numerical deflection angles in x- and y- direction
    """
    num_pix = len(kappa) * 2
    if num_pix % 2 == 0:
        num_pix += 1
    kernel_x, kernel_y = deflection_kernel(num_pix, grid_spacing)
    f_x = scp.fftconvolve(kappa, kernel_x, mode='same') / np.pi * grid_spacing ** 2
    f_y = scp.fftconvolve(kappa, kernel_y, mode='same') / np.pi * grid_spacing ** 2
    return f_x, f_y 
Example #4
Source File: metrics.py    From sigsep-mus-eval with MIT License 6 votes vote down vote up
def _project(reference_sources, C):
    """Project images using pre-computed filters C
    reference_sources are nsrc X nsampl X nchan
    C is nsrc X nchan X filters_len X nchan
    """
    # shapes: ensure that input is 3d (comprising the source index)
    if len(reference_sources.shape) == 2:
        reference_sources = reference_sources[None, ...]
        C = C[None, ...]

    (nsrc, nsampl, nchan) = reference_sources.shape
    filters_len = C.shape[-2]

    # zero pad
    reference_sources = _zeropad(reference_sources, filters_len - 1, axis=1)
    sproj = np.zeros((nchan, nsampl + filters_len - 1))

    for (j, cj, c) in itertools.product(
        list(range(nsrc)), list(range(nchan)), list(range(nchan))
    ):
        sproj[c] += fftconvolve(
            C[j, cj, :, c],
            reference_sources[j, :, cj]
        )[:nsampl + filters_len - 1]
    return sproj.T 
Example #5
Source File: preproc.py    From science_rcn with MIT License 6 votes vote down vote up
def get_gabor_filters(size=21, filter_scale=4., num_orients=16, weights=False):
    """Get Gabor filter bank. See Preproc for parameters and returns."""
    def _get_sparse_gaussian():
        """Sparse Gaussian."""
        size = 2 * np.ceil(np.sqrt(2.) * filter_scale) + 1
        alt = np.zeros((int(size), int(size)), np.float32)
        alt[int(size // 2), int(size // 2)] = 1
        gaussian = gaussian_filter(alt, filter_scale / np.sqrt(2.), mode='constant')
        gaussian[gaussian < 0.05 * gaussian.max()] = 0
        return gaussian

    gaussian = _get_sparse_gaussian()
    filts = []
    for angle in np.linspace(0., 2 * np.pi, num_orients, endpoint=False):
        acts = np.zeros((size, size), np.float32)
        x, y = np.cos(angle) * filter_scale, np.sin(angle) * filter_scale
        acts[int(size / 2 + y), int(size / 2 + x)] = 1.
        acts[int(size / 2 - y), int(size / 2 - x)] = -1.
        filt = fftconvolve(acts, gaussian, mode='same')
        filt /= np.abs(filt).sum()  # Normalize to ensure the maximum output is 1
        if weights:
            filt = np.abs(filt)
        filts.append(filt)
    return filts 
Example #6
Source File: TFMethods.py    From ASP with GNU General Public License v3.0 6 votes vote down vote up
def sincinterp(x):
        """
        Sinc interpolation for computation of fractional transformations.
        As appears in :
        -https://github.com/audiolabs/frft/
        ----------
        Args:
            f       : (array) Complex valued input array
            a       : (float) Alpha factor
        Returns:
            ret     : (array) Real valued synthesised data
        """
        N = len(x)
        y = np.zeros(2 * N - 1, dtype=x.dtype)
        y[:2 * N:2] = x
        xint = fftconvolve( y[:2 * N], np.sinc(np.arange(-(2 * N - 3), (2 * N - 2)).T / 2),)
        return xint[2 * N - 3: -2 * N + 3] 
Example #7
Source File: transitionModels.py    From bayesloop with MIT License 6 votes vote down vote up
def convolve(self, distribution):
        """
        Convolves distribution with alpha-stable kernel.

        Args:
            distribution(ndarray): Discrete probability distribution to convolve.

        Returns:
            ndarray: convolution
        """
        gs = np.array(self.study.gridSize)
        padded_distribution = np.zeros(3*np.array(gs))
        if len(gs) == 2:
            padded_distribution[gs[0]:2*gs[0], gs[1]:2*gs[1]] = distribution
        elif len(gs) == 1:
            padded_distribution[gs[0]:2*gs[0]] = distribution

        padded_convolution = fftconvolve(padded_distribution, self.kernel, mode='same')
        if len(gs) == 2:
            convolution = padded_convolution[gs[0]:2*gs[0], gs[1]:2*gs[1]]
        elif len(gs) == 1:
            convolution = padded_convolution[gs[0]:2*gs[0]]

        return convolution 
Example #8
Source File: test_signaltools.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_valid_mode(self):
        # See gh-5897
        a = array([3, 2, 1])
        b = array([3, 3, 5, 6, 8, 7, 9, 0, 1])
        expected = array([24., 31., 41., 43., 49., 25., 12.])

        out = fftconvolve(a, b, 'valid')
        assert_array_almost_equal(out, expected)

        out = fftconvolve(b, a, 'valid')
        assert_array_almost_equal(out, expected)

        a = array([3 - 1j, 2 + 7j, 1 + 0j])
        b = array([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j])
        expected = array([45. + 12.j, 30. + 23.j, 48 + 32.j])

        out = fftconvolve(a, b, 'valid')
        assert_array_almost_equal(out, expected)

        out = fftconvolve(b, a, 'valid')
        assert_array_almost_equal(out, expected) 
Example #9
Source File: stressmodels.py    From pastas with MIT License 6 votes vote down vote up
def simulate(self, p=None, tmin=None, tmax=None, freq=None, dt=1.0):
        """Method to simulate the contribution of recharge to the head.

        Parameters
        ----------
        p: numpy.ndarray, optional
            parameter used for the simulation
        tmin: string, optional
        tmax: string, optional
        freq: string, optional
        dt: float, optional
            Time step to use in the recharge calculation.

        Returns
        -------
        pandas.Series

        """
        if p is None:
            p = self.parameters.initial.values
        b = self.get_block(p[:-self.recharge.nparam], dt, tmin, tmax)
        stress = self.get_stress(p=p, tmin=tmin, tmax=tmax, freq=freq).values
        return Series(data=fftconvolve(stress, b, 'full')[:stress.size],
                      index=self.prec.series.index, name=self.name,
                      fastpath=True) 
Example #10
Source File: tsa.py    From Splunking-Crime with GNU Affero General Public License v3.0 6 votes vote down vote up
def acovf_fft(x, demean=True):
    '''autocovariance function with call to fftconvolve, biased

    Parameters
    ----------
    x : array_like
        timeseries, signal
    demean : boolean
        If true, then demean time series

    Returns
    -------
    acovf : array
        autocovariance for data, same length as x

    might work for nd in parallel with time along axis 0

    '''
    from scipy import signal
    x = np.asarray(x)

    if demean:
        x = x - x.mean()

    signal.fftconvolve(x,x[::-1])[len(x)-1:len(x)+10]/x.shape[0] 
Example #11
Source File: tsa.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def acovf_fft(x, demean=True):
    '''autocovariance function with call to fftconvolve, biased

    Parameters
    ----------
    x : array_like
        timeseries, signal
    demean : boolean
        If true, then demean time series

    Returns
    -------
    acovf : array
        autocovariance for data, same length as x

    might work for nd in parallel with time along axis 0

    '''
    from scipy import signal
    x = np.asarray(x)

    if demean:
        x = x - x.mean()

    signal.fftconvolve(x,x[::-1])[len(x)-1:len(x)+10]/x.shape[0] 
Example #12
Source File: stressmodels.py    From pastas with MIT License 6 votes vote down vote up
def simulate(self, p=None, tmin=None, tmax=None, freq=None, dt=1,
                 istress=None):
        self.update_stress(tmin=tmin, tmax=tmax, freq=freq)
        h = Series(data=0, index=self.stress[0].series.index, name=self.name)
        stresses = self.get_stress(istress=istress)
        distances = self.get_distances(istress=istress)
        for stress, r in zip(stresses, distances):
            npoints = stress.index.size
            p_with_r = np.concatenate([p, np.asarray([r])])
            b = self.get_block(p_with_r, dt, tmin, tmax)
            c = fftconvolve(stress, b, 'full')[:npoints]
            h = h.add(Series(c, index=stress.index, fastpath=True),
                      fill_value=0.0)
        if istress is not None:
            if self.stress[istress].name is not None:
                h.name = self.stress[istress].name
            else:
                h.name = self.name + "_" + str(istress)
        else:
            h.name = self.name
        return h 
Example #13
Source File: test_signaltools.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_many_sizes(self):
        np.random.seed(1234)

        def ns():
            for j in range(1, 100):
                yield j
            for j in range(1000, 1500):
                yield j
            for k in range(50):
                yield np.random.randint(1001, 10000)

        for n in ns():
            msg = 'n=%d' % (n,)
            a = np.random.rand(n) + 1j * np.random.rand(n)
            b = np.random.rand(n) + 1j * np.random.rand(n)
            c = fftconvolve(a, b, 'full')
            d = np.convolve(a, b, 'full')
            assert_allclose(c, d, atol=1e-10, err_msg=msg) 
Example #14
Source File: stressmodels.py    From pastas with MIT License 6 votes vote down vote up
def simulate(self, p, tmin=None, tmax=None, freq=None, dt=1.0):
        """Simulates the head contribution.

        Parameters
        ----------
        p: numpy.ndarray
           Parameters used for simulation.
        tmin: str, optional
        tmax: str, optional
        freq: str, optional
        dt: int, optional

        Returns
        -------
        pandas.Series
            The simulated head contribution.

        """
        self.update_stress(tmin=tmin, tmax=tmax, freq=freq)
        b = self.get_block(p, dt, tmin, tmax)
        stress = self.stress[0].series
        npoints = stress.index.size
        h = Series(data=fftconvolve(stress, b, 'full')[:npoints],
                   index=stress.index, name=self.name, fastpath=True)
        return h 
Example #15
Source File: frequency.py    From wonambi with GNU General Public License v3.0 5 votes vote down vote up
def _convolve(i_chan, wavelet, dat):
    tf = fftconvolve(dat[i_chan, :], wavelet, 'same')
    return tf 
Example #16
Source File: whitening.py    From alphacsc with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def apply_ar(ar_model, x, zero_phase=True, mode='same', reverse_ar=False):
    # TODO: speed-up with only one conv
    ar_coef = np.concatenate((np.ones(1), ar_model.AR_))
    if reverse_ar:
        ar_coef = ar_coef[::-1]

    if zero_phase:
        tmp = signal.fftconvolve(x, ar_coef, mode)[::-1]
        return signal.fftconvolve(tmp, ar_coef, mode)[::-1]
    else:
        return signal.fftconvolve(x, ar_coef, mode) 
Example #17
Source File: arma.py    From alphacsc with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def inverse(self, sigin):
        """Apply the inverse ARMA filter to a signal

        sigin : input signal (ndarray)

        returns the filtered signal(ndarray)

        """
        arpart = np.concatenate((np.ones(1), self.AR_))
        return signal.fftconvolve(sigin, arpart, 'same') 
Example #18
Source File: text_utils.py    From SynthText with Apache License 2.0 5 votes vote down vote up
def place_text(self, text_arrs, back_arr, bbs):
        areas = [-np.prod(ta.shape) for ta in text_arrs]
        order = np.argsort(areas)

        locs = [None for i in range(len(text_arrs))]
        out_arr = np.zeros_like(back_arr)
        for i in order:            
            ba = np.clip(back_arr.copy().astype(np.float), 0, 255)
            ta = np.clip(text_arrs[i].copy().astype(np.float), 0, 255)
            ba[ba > 127] = 1e8
            intersect = ssig.fftconvolve(ba,ta[::-1,::-1],mode='valid')
            safemask = intersect < 1e8

            if not np.any(safemask): # no collision-free position:
                #warn("COLLISION!!!")
                return back_arr,locs[:i],bbs[:i],order[:i]

            minloc = np.transpose(np.nonzero(safemask))
            loc = minloc[np.random.choice(minloc.shape[0]),:]
            locs[i] = loc

            # update the bounding-boxes:
            bbs[i] = move_bb(bbs[i],loc[::-1])

            # blit the text onto the canvas
            w,h = text_arrs[i].shape
            out_arr[loc[0]:loc[0]+w,loc[1]:loc[1]+h] += text_arrs[i]

        return out_arr, locs, bbs, order 
Example #19
Source File: try_var_convolve.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def fftconvolve_old(in1, in2, in3=None, mode="full"):
    """Convolve two N-dimensional arrays using FFT. See convolve.

    copied from scipy.signal.signaltools, but here used to try out inverse filter
    doesn't work or I can't get it to work

     2010-10-23:
    looks ok to me for 1d,
    from results below with padded data array (fftp)
    but it doesn't work for multidimensional inverse filter (fftn)
    original signal.fftconvolve also uses fftn

    """
    s1 = array(in1.shape)
    s2 = array(in2.shape)
    complex_result = (np.issubdtype(in1.dtype, np.complex) or
                      np.issubdtype(in2.dtype, np.complex))
    size = s1+s2-1

    # Always use 2**n-sized FFT
    fsize = 2**np.ceil(np.log2(size))
    IN1 = fftn(in1,fsize)
    #IN1 *= fftn(in2,fsize) #JP: this looks like the only change I made
    IN1 /= fftn(in2,fsize)  # use inverse filter
    # note the inverse is elementwise not matrix inverse
    # is this correct, NO  doesn't seem to work for VARMA
    fslice = tuple([slice(0, int(sz)) for sz in size])
    ret = ifftn(IN1)[fslice].copy()
    del IN1
    if not complex_result:
        ret = ret.real
    if mode == "full":
        return ret
    elif mode == "same":
        if product(s1,axis=0) > product(s2,axis=0):
            osize = s1
        else:
            osize = s2
        return _centered(ret,osize)
    elif mode == "valid":
        return _centered(ret,abs(s2-s1)+1) 
Example #20
Source File: filtertools.py    From Splunking-Crime with GNU Affero General Public License v3.0 5 votes vote down vote up
def _pad_nans(x, head=None, tail=None):
    if np.ndim(x) == 1:
        if head is None and tail is None:
            return x
        elif head and tail:
            return np.r_[[np.nan] * head, x, [np.nan] * tail]
        elif tail is None:
            return np.r_[[np.nan] * head, x]
        elif head is None:
            return np.r_[x, [np.nan] * tail]
    elif np.ndim(x) == 2:
        if head is None and tail is None:
            return x
        elif head and tail:
            return np.r_[[[np.nan] * x.shape[1]] * head, x,
                         [[np.nan] * x.shape[1]] * tail]
        elif tail is None:
            return np.r_[[[np.nan] * x.shape[1]] * head, x]
        elif head is None:
            return np.r_[x, [[np.nan] * x.shape[1]] * tail]
    else:
        raise ValueError("Nan-padding for ndim > 2 not implemented")

#original changes and examples in sandbox.tsa.try_var_convolve

# don't do these imports, here just for copied fftconvolve
#get rid of these imports
#from scipy.fftpack import fft, ifft, ifftshift, fft2, ifft2, fftn, \
#     ifftn, fftfreq
#from numpy import product,array 
Example #21
Source File: lineshape_analysis.py    From quantum-python-lectures with MIT License 5 votes vote down vote up
def voigt(x,c1,w1,c2,w2):
	""" Voigt function: convolution of Lorentzian and Gaussian.
		Convolution implemented with the FFT convolve function in scipy.
		NOT NORMALISED """
	
	### Create larger array so convolution doesn't screw up at the edges of the arrays
	# this assumes nicely behaved x-array...
	# i.e. x[0] == x.min() and x[-1] == x.max(), monotonically increasing
	dx = (x[-1]-x[0])/len(x)
	xp_min = x[0] - len(x)/3 * dx
	xp_max = x[-1] + len(x)/3 * dx
	xp = linspace(xp_min,xp_max,3*len(x))
	
	L = lorentzian(xp,c1,w1)
	G = gaussian(xp,c2,w2)
	
	#convolve
	V = conv(L,G,mode='same')
	
	#normalise to unity height !!! delete me later !!!
	V /= V.max()
	
	#create interpolation function to convert back to original array size
	fn_out = interp(xp,V)
	
	return fn_out(x) 
Example #22
Source File: convolution.py    From lenstronomy with MIT License 5 votes vote down vote up
def convolution2d(self, image):
        """

        :param image: 2d array (image) to be convolved
        :return: fft convolution
        """
        if self._type == 'fft':
            image_conv = signal.fftconvolve(image, self._kernel, mode='same')
        elif self._type == 'fft_static':
            image_conv = self._static_fft(image, mode='same')
        elif self._type == 'grid':
            image_conv = signal.convolve2d(image, self._kernel, mode='same')
        else:
            raise ValueError('convolution_type %s not supported!' % self._type)
        return image_conv 
Example #23
Source File: perturb.py    From patter with MIT License 5 votes vote down vote up
def perturb(self, data):
        impulse_record = self._rng.sample(self._manifest.data, 1)[0]
        impulse = AudioSegment.from_file(impulse_record['audio_filepath'], target_sr=data.sample_rate)
        # print("DEBUG: impulse:", impulse_record['audio_filepath'])
        data._samples = signal.fftconvolve(data.samples, impulse.samples, "full") 
Example #24
Source File: convergence_integrals.py    From lenstronomy with MIT License 5 votes vote down vote up
def potential_from_kappa_grid(kappa, grid_spacing):
    """
    lensing potential on the convergence grid
    the computation is performed as a convolution of the Green's function with the convergence map using FFT

    :param kappa: 2d grid of convergence values
    :param grid_spacing: pixel size of grid
    :return: lensing potential in a 2d grid at positions x_grid, y_grid
    """
    num_pix = len(kappa) * 2
    if num_pix % 2 == 0:
        num_pix += 1
    kernel = potential_kernel(num_pix, grid_spacing)
    f_ = scp.fftconvolve(kappa, kernel, mode='same') / np.pi * grid_spacing ** 2
    return f_ 
Example #25
Source File: render.py    From picasso with MIT License 5 votes vote down vote up
def _fftconvolve(image, blur_width, blur_height):
    kernel_width = 10 * int(_np.round(blur_width)) + 1
    kernel_height = 10 * int(_np.round(blur_height)) + 1
    kernel_y = _signal.gaussian(kernel_height, blur_height)
    kernel_x = _signal.gaussian(kernel_width, blur_width)
    kernel = _np.outer(kernel_y, kernel_x)
    kernel /= kernel.sum()
    return _signal.fftconvolve(image, kernel, mode="same") 
Example #26
Source File: filters.py    From pysdr with GNU General Public License v3.0 5 votes vote down vote up
def filter(self, x):
        out = signal.fftconvolve(np.concatenate((self.previous_batch, x)), self.taps, mode='valid')
        self.previous_batch = x[-(len(self.taps) - 1):] # the last portion of the batch gets saved for the next iteration #FIXME if batches become smaller than taps this won't work
        return out


##############
# UNIT TESTS # 
############## 
Example #27
Source File: expected.py    From cooltools with MIT License 5 votes vote down vote up
def lattice_pdist_frequencies(n, points):
    """
    Distribution of pairwise 1D distances among a collection of distinct
    integers ranging from 0 to n-1.

    Parameters
    ----------
    n : int
        Size of the lattice on which the integer points reside.
    points : sequence of int
        Arbitrary integers between 0 and n-1, inclusive, in any order but
        with no duplicates.

    Returns
    -------
    h : 1D array of length n
        h[d] counts the number of integer pairs that are exactly d units apart

    Notes
    -----
    This is done using a convolution via FFT. Thanks to Peter de Rivaz; see
    `<http://stackoverflow.com/questions/42423823/distribution-of-pairwise-distances-between-many-integers>`_.

    """
    if len(np.unique(points)) != len(points):
        raise ValueError("Integers must be distinct.")
    x = np.zeros(n)
    x[points] = 1
    return np.round(fftconvolve(x, x[::-1], mode="full")).astype(int)[-n:] 
Example #28
Source File: stressmodels.py    From pastas with MIT License 5 votes vote down vote up
def simulate(self, p, tmin=None, tmax=None, freq=None, dt=1, istress=None):
        """Simulates the head contribution.

        Parameters
        ----------
        p: numpy.ndarray
           Parameters used for simulation.
        tmin: str, optional
        tmax: str, optional
        freq: str, optional
        dt: int, optional
        istress: int, optional

        Returns
        -------
        pandas.Series
            The simulated head contribution.

        """
        self.update_stress(tmin=tmin, tmax=tmax, freq=freq)
        b = self.get_block(p[:-1], dt, tmin, tmax)
        stress = self.get_stress(p=p, istress=istress)
        npoints = stress.index.size
        h = Series(data=fftconvolve(stress, b, 'full')[:npoints],
                   index=stress.index, name=self.name, fastpath=True)
        if istress is not None:
            if self.stress[istress].name is not None:
                h.name = h.name + ' (' + self.stress[istress].name + ')'
        # see whether it makes a difference to subtract gain * mean_stress
        # h -= self.rfunc.gain(p) * stress.mean()
        return h 
Example #29
Source File: utils.py    From medSynthesisV1 with MIT License 5 votes vote down vote up
def ssim(img1, img2, cs_map=False):
    """Return the Structural Similarity Map corresponding to input images img1 
    and img2 (images are assumed to be uint8)
    
    This function attempts to mimic precisely the functionality of ssim.m a 
    MATLAB provided by the author's of SSIM
    https://ece.uwaterloo.ca/~z70wang/research/ssim/ssim_index.m
    """
    img1 = img1.astype(numpy.float64)
    img2 = img2.astype(numpy.float64)
    size = 11
    sigma = 1.5
    window = gauss.fspecial_gauss(size, sigma)
    K1 = 0.01
    K2 = 0.03
    L = 255 #bitdepth of image
    C1 = (K1*L)**2
    C2 = (K2*L)**2
    mu1 = signal.fftconvolve(window, img1, mode='valid')
    mu2 = signal.fftconvolve(window, img2, mode='valid')
    mu1_sq = mu1*mu1
    mu2_sq = mu2*mu2
    mu1_mu2 = mu1*mu2
    sigma1_sq = signal.fftconvolve(window, img1*img1, mode='valid') - mu1_sq
    sigma2_sq = signal.fftconvolve(window, img2*img2, mode='valid') - mu2_sq
    sigma12 = signal.fftconvolve(window, img1*img2, mode='valid') - mu1_mu2
    if cs_map:
        return (((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*
                    (sigma1_sq + sigma2_sq + C2)), 
                (2.0*sigma12 + C2)/(sigma1_sq + sigma2_sq + C2))
    else:
        return ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*
                    (sigma1_sq + sigma2_sq + C2)) 
Example #30
Source File: stressmodels.py    From pastas with MIT License 5 votes vote down vote up
def simulate(self, p, tmin=None, tmax=None, freq=None, dt=1):
        tstart = Timestamp.fromordinal(int(p[-1]), freq="D")
        tindex = date_range(tmin, tmax, freq=freq)
        h = Series(0, tindex, name=self.name)
        h.loc[h.index > tstart] = 1

        b = self.get_block(p[:-1], dt, tmin, tmax)
        npoints = h.index.size
        h = Series(data=fftconvolve(h, b, 'full')[:npoints],
                   index=h.index, name=self.name, fastpath=True)
        return h