Python scipy.fftpack() Examples

The following are 20 code examples of scipy.fftpack(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy , or try the search function .
Example #1
Source File: thinkdsp.py    From Lie_to_me with MIT License 5 votes vote down vote up
def make_dct(self):
        """Computes the DCT of this wave.
        """
        N = len(self.ys)
        hs = scipy.fftpack.dct(self.ys, type=2)
        fs = (0.5 + np.arange(N)) / 2
        return Dct(hs, fs, self.framerate) 
Example #2
Source File: noise_module.py    From NoisePy with MIT License 5 votes vote down vote up
def optimized_correlate1(fft1_smoothed_abs,fft2,maxlag,dt,Nfft,nwin,method="cross-correlation"):
    '''
    Optimized version of the correlation functions: put the smoothed 
    source spectrum amplitude out of the inner for loop. 
    It also takes advantage of the linear relationship of ifft, so that
    stacking in spectrum first to reduce the total number of times for ifft,
    which is the most time consuming steps in the previous correlate function  
    '''

    #------convert all 2D arrays into 1D to speed up--------
    corr = np.zeros(nwin*(Nfft//2),dtype=np.complex64)
    corr = fft1_smoothed_abs.reshape(fft1_smoothed_abs.size,) * fft2.reshape(fft2.size,)

    if method == "coherence":
        temp = moving_ave(np.abs(fft2.reshape(fft2.size,)),10)
        try:
            corr /= temp
        except ValueError:
            raise ValueError('smoothed spectrum has zero values')

    corr  = corr.reshape(nwin,Nfft//2)
    ncorr = np.zeros(shape=Nfft,dtype=np.complex64)
    ncorr[:Nfft//2] = np.mean(corr,axis=0)
    ncorr[-(Nfft//2)+1:]=np.flip(np.conj(ncorr[1:(Nfft//2)]),axis=0)
    ncorr = np.real(np.fft.ifftshift(scipy.fftpack.ifft(ncorr, Nfft, axis=0)))

    tcorr = np.arange(-Nfft//2 + 1, Nfft//2)*dt
    ind   = np.where(np.abs(tcorr) <= maxlag)[0]
    ncorr = ncorr[ind]
    
    return ncorr 
Example #3
Source File: noise_module.py    From NoisePy with MIT License 5 votes vote down vote up
def C3_process(S1_data,S2_data,Nfft,win):
    '''
    performs all C3 processes including 1) cutting the time window for P-N parts;
    2) doing FFT for the two time-seris; 3) performing cross-correlations in freq;
    4) ifft to time domain
    '''
    #-----initialize the spectrum variables----
    ccp1 = np.zeros(Nfft,dtype=np.complex64)
    ccn1 = ccp1
    ccp2 = ccp1
    ccn2 = ccp1
    ccp  = ccp1
    ccn  = ccp1

    #------find the time window for sta1------
    S1_data_N = S1_data[win[0]:win[1]]
    S1_data_N = S1_data_N[::-1]
    S1_data_P = S1_data[win[2]:win[3]]
    S2_data_N = S2_data[win[0]:win[1]]
    S2_data_N = S2_data_N[::-1]
    S2_data_P = S2_data[win[2]:win[3]]

    #---------------do FFT-------------
    ccp1 = scipy.fftpack.fft(S1_data_P, Nfft)
    ccn1 = scipy.fftpack.fft(S1_data_N, Nfft)
    ccp2 = scipy.fftpack.fft(S2_data_P, Nfft)
    ccn2 = scipy.fftpack.fft(S2_data_N, Nfft)

    #------cross correlations--------
    ccp = np.conj(ccp1)*ccp2
    ccn = np.conj(ccn1)*ccn2

    return ccp,ccn 
Example #4
Source File: noise_module.py    From NoisePy with MIT License 5 votes vote down vote up
def check_and_phase_shift(trace):
    # print trace
    taper_length = 20.0
    # if trace.stats.npts < 4 * taper_length*trace.stats.sampling_rate:
    # 	trace.data = np.zeros(trace.stats.npts)
    # 	return trace

    dt = np.mod(trace.stats.starttime.datetime.microsecond*1.0e-6,
                trace.stats.delta)
    if (trace.stats.delta - dt) <= np.finfo(float).eps:
        dt = 0
    if dt != 0:
        if dt <= (trace.stats.delta / 2.):
            dt = -dt
        # direction = "left"
        else:
            dt = (trace.stats.delta - dt)
        # direction = "right"
        trace.detrend(type="demean")
        trace.detrend(type="simple")
        taper_1s = taper_length * float(trace.stats.sampling_rate) / trace.stats.npts
        trace.taper(taper_1s)

        n = int(2**nextpow2(len(trace.data)))
        FFTdata = scipy.fftpack.fft(trace.data, n=n)
        fftfreq = scipy.fftpack.fftfreq(n, d=trace.stats.delta)
        FFTdata = FFTdata * np.exp(1j * 2. * np.pi * fftfreq * dt)
        trace.data = np.real(scipy.fftpack.ifft(FFTdata, n=n)[:len(trace.data)])
        trace.stats.starttime += dt
        return trace
    else:
        return trace 
Example #5
Source File: noise_module.py    From NoisePy with MIT License 5 votes vote down vote up
def C3_process(S1_data,S2_data,Nfft,win):
    '''
    performs all C3 processes including 1) cutting the time window for P-N parts;
    2) doing FFT for the two time-seris; 3) performing cross-correlations in freq;
    4) ifft to time domain
    '''
    #-----initialize the spectrum variables----
    ccp1 = np.zeros(Nfft,dtype=np.complex64)
    ccn1 = ccp1
    ccp2 = ccp1
    ccn2 = ccp1
    ccp  = ccp1
    ccn  = ccp1

    #------find the time window for sta1------
    S1_data_N = S1_data[win[0]:win[1]]
    S1_data_N = S1_data_N[::-1]
    S1_data_P = S1_data[win[2]:win[3]]
    S2_data_N = S2_data[win[0]:win[1]]
    S2_data_N = S2_data_N[::-1]
    S2_data_P = S2_data[win[2]:win[3]]

    #---------------do FFT-------------
    ccp1 = scipy.fftpack.fft(S1_data_P, Nfft)
    ccn1 = scipy.fftpack.fft(S1_data_N, Nfft)
    ccp2 = scipy.fftpack.fft(S2_data_P, Nfft)
    ccn2 = scipy.fftpack.fft(S2_data_N, Nfft)

    #------cross correlations--------
    ccp = np.conj(ccp1)*ccp2
    ccn = np.conj(ccn1)*ccn2

    return ccp,ccn 
Example #6
Source File: noise_module.py    From NoisePy with MIT License 5 votes vote down vote up
def check_and_phase_shift(trace):
    # print trace
    taper_length = 20.0
    # if trace.stats.npts < 4 * taper_length*trace.stats.sampling_rate:
    # 	trace.data = np.zeros(trace.stats.npts)
    # 	return trace

    dt = np.mod(trace.stats.starttime.datetime.microsecond*1.0e-6,
                trace.stats.delta)
    if (trace.stats.delta - dt) <= np.finfo(float).eps:
        dt = 0
    if dt != 0:
        if dt <= (trace.stats.delta / 2.):
            dt = -dt
        # direction = "left"
        else:
            dt = (trace.stats.delta - dt)
        # direction = "right"
        trace.detrend(type="demean")
        trace.detrend(type="simple")
        taper_1s = taper_length * float(trace.stats.sampling_rate) / trace.stats.npts
        trace.taper(taper_1s)

        n = int(2**nextpow2(len(trace.data)))
        FFTdata = scipy.fftpack.fft(trace.data, n=n)
        fftfreq = scipy.fftpack.fftfreq(n, d=trace.stats.delta)
        FFTdata = FFTdata * np.exp(1j * 2. * np.pi * fftfreq * dt)
        trace.data = np.real(scipy.fftpack.ifft(FFTdata, n=n)[:len(trace.data)])
        trace.stats.starttime += dt
        return trace
    else:
        return trace 
Example #7
Source File: noise_module.py    From NoisePy with MIT License 5 votes vote down vote up
def optimized_correlate1(fft1_smoothed_abs,fft2,maxlag,dt,Nfft,nwin,method="cross-correlation"):
    '''
    Optimized version of the correlation functions: put the smoothed 
    source spectrum amplitude out of the inner for loop. 
    It also takes advantage of the linear relationship of ifft, so that
    stacking in spectrum first to reduce the total number of times for ifft,
    which is the most time consuming steps in the previous correlate function  
    '''

    #------convert all 2D arrays into 1D to speed up--------
    corr = np.zeros(nwin*(Nfft//2),dtype=np.complex64)
    corr = fft1_smoothed_abs.reshape(fft1_smoothed_abs.size,) * fft2.reshape(fft2.size,)

    if method == "coherence":
        temp = moving_ave(np.abs(fft2.reshape(fft2.size,)),10)
        corr /= temp

    corr  = corr.reshape(nwin,Nfft//2)
    ncorr = np.zeros(shape=Nfft,dtype=np.complex64)
    ncorr[:Nfft//2] = np.mean(corr,axis=0)
    ncorr[-(Nfft//2)+1:]=np.flip(np.conj(ncorr[1:(Nfft//2)]),axis=0)
    ncorr[0]=complex(0,0)
    ncorr = np.real(np.fft.ifftshift(scipy.fftpack.ifft(ncorr, Nfft, axis=0)))

    tcorr = np.arange(-Nfft//2 + 1, Nfft//2)*dt
    ind   = np.where(np.abs(tcorr) <= maxlag)[0]
    ncorr = ncorr[ind]
    
    return ncorr 
Example #8
Source File: noise_module.py    From NoisePy with MIT License 5 votes vote down vote up
def noise_processing(fft_para,dataS):
    '''
    this function performs time domain and frequency domain normalization if needed. in real case, we prefer use include
    the normalization in the cross-correaltion steps by selecting coherency or decon (Prieto et al, 2008, 2009; Denolle et al, 2013) 
    PARMAETERS:
    ------------------------
    fft_para: dictionary containing all useful variables used for fft and cc
    dataS: 2D matrix of all segmented noise data
    # OUTPUT VARIABLES:
    source_white: 2D matrix of data spectra
    '''
    # load parameters first
    time_norm   = fft_para['time_norm']
    freq_norm   = fft_para['freq_norm']
    smooth_N    = fft_para['smooth_N']
    N = dataS.shape[0]

    #------to normalize in time or not------
    if time_norm != 'no':

        if time_norm == 'one_bit': 	# sign normalization
            white = np.sign(dataS)
        elif time_norm == 'rma': # running mean: normalization over smoothed absolute average           
            white = np.zeros(shape=dataS.shape,dtype=dataS.dtype)
            for kkk in range(N):
                white[kkk,:] = dataS[kkk,:]/moving_ave(np.abs(dataS[kkk,:]),smooth_N)

    else:	# don't normalize
        white = dataS

    #-----to whiten or not------
    if freq_norm != 'no':
        source_white = whiten(white,fft_para)	# whiten and return FFT
    else:
        Nfft = int(next_fast_len(int(dataS.shape[1])))
        source_white = scipy.fftpack.fft(white, Nfft, axis=1) # return FFT
    
    return source_white 
Example #9
Source File: noise_module.py    From NoisePy with MIT License 5 votes vote down vote up
def noise_processing(fft_para,dataS):
    '''
    this function performs time domain and frequency domain normalization if needed. in real case, we prefer use include
    the normalization in the cross-correaltion steps by selecting coherency or decon (Prieto et al, 2008, 2009; Denolle et al, 2013)
    PARMAETERS:
    ------------------------
    fft_para: dictionary containing all useful variables used for fft and cc
    dataS: 2D matrix of all segmented noise data
    # OUTPUT VARIABLES:
    source_white: 2D matrix of data spectra
    '''
    # load parameters first
    time_norm   = fft_para['time_norm']
    freq_norm   = fft_para['freq_norm']
    smooth_N    = fft_para['smooth_N']
    N = dataS.shape[0]

    #------to normalize in time or not------
    if time_norm != 'no':

        if time_norm == 'one_bit': 	# sign normalization
            white = np.sign(dataS)
        elif time_norm == 'rma': # running mean: normalization over smoothed absolute average
            white = np.zeros(shape=dataS.shape,dtype=dataS.dtype)
            for kkk in range(N):
                white[kkk,:] = dataS[kkk,:]/moving_ave(np.abs(dataS[kkk,:]),smooth_N)

    else:	# don't normalize
        white = dataS

    #-----to whiten or not------
    if freq_norm != 'no':
        source_white = whiten(white,fft_para)	# whiten and return FFT
    else:
        Nfft = int(next_fast_len(int(dataS.shape[1])))
        source_white = scipy.fftpack.fft(white, Nfft, axis=1) # return FFT

    return source_white 
Example #10
Source File: poisson_reconstruct.py    From SynthText with Apache License 2.0 5 votes vote down vote up
def IDST(X):
    """
    Inverse DST. Python -> Matlab
    """
    n = X.shape[0]
    x = np.real(scipy.fftpack.idst(X,type=1,axis=0))
    return x/(n+1.0) 
Example #11
Source File: poisson_reconstruct.py    From SynthText with Apache License 2.0 5 votes vote down vote up
def DST(x):
    """
    Converts Scipy's DST output to Matlab's DST (scaling).
    """
    X = scipy.fftpack.dst(x,type=1,axis=0)
    return X/2.0 
Example #12
Source File: test_import.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_fftpack_import(self):
            base = Path(scipy.__file__).parent
            regexp = r"\s*from.+\.fftpack import .*\n"
            for path in base.rglob("*.py"):
                if base / "fftpack" in path.parents:
                    continue
                # use tokenize to auto-detect encoding on systems where no
                # default encoding is defined (e.g. LANG='C')
                with tokenize.open(str(path)) as file:
                    assert_(all(not re.fullmatch(regexp, line)
                                for line in file),
                            "{0} contains an import from fftpack".format(path)) 
Example #13
Source File: thinkdsp.py    From Lie_to_me with MIT License 5 votes vote down vote up
def make_wave(self):
        """Transforms to the time domain.

        returns: Wave
        """
        N = len(self.hs)
        ys = scipy.fftpack.idct(self.hs, type=2) / 2 / N
        #NOTE: whatever the start time was, we lose it when
        # we transform back
        #ts = self.start + np.arange(len(ys)) / self.framerate
        return Wave(ys, framerate=self.framerate) 
Example #14
Source File: noise_module.py    From NoisePy with MIT License 4 votes vote down vote up
def adaptive_filter(arr,g):
    '''
    the adaptive covariance filter to enhance coherent signals. Fellows the method of
    Nakata et al., 2015 (Appendix B)

    the filtered signal [x1] is given by x1 = ifft(P*x1(w)) where x1 is the ffted spectra
    and P is the filter. P is constructed by using the temporal covariance matrix.

    PARAMETERS:
    ----------------------
    arr: numpy.ndarray contains the 2D traces of daily/hourly cross-correlation functions
    g: a positive number to adjust the filter harshness
    RETURNS:
    ----------------------
    narr: numpy vector contains the stacked cross correlation function
    '''
    if arr.ndim == 1:
        return arr
    N,M = arr.shape
    Nfft = next_fast_len(M)

    # fft the 2D array
    spec = scipy.fftpack.fft(arr,axis=1,n=Nfft)[:,:M]

    # make cross-spectrm matrix
    cspec = np.zeros(shape=(N*N,M),dtype=np.complex64)
    for ii in range(N):
        for jj in range(N):
            kk = ii*N+jj
            cspec[kk] = spec[ii]*np.conjugate(spec[jj])

    S1 = np.zeros(M,dtype=np.complex64)
    S2 = np.zeros(M,dtype=np.complex64)
    # construct the filter P
    for ii in range(N):
        mm = ii*N+ii
        S2 += cspec[mm]
        for jj in range(N):
            kk = ii*N+jj
            S1 += cspec[kk]

    p = np.power((S1-S2)/(S2*(N-1)),g)

    # make ifft
    narr = np.real(scipy.fftpack.ifft(np.multiply(p,spec),Nfft,axis=1)[:,:M])
    return np.mean(narr,axis=0) 
Example #15
Source File: noise_module.py    From NoisePy with MIT License 4 votes vote down vote up
def adaptive_filter(arr,g):
    '''
    the adaptive covariance filter to enhance coherent signals. Fellows the method of
    Nakata et al., 2015 (Appendix B)

    the filtered signal [x1] is given by x1 = ifft(P*x1(w)) where x1 is the ffted spectra 
    and P is the filter. P is constructed by using the temporal covariance matrix. 

    PARAMETERS:
    ----------------------
    arr: numpy.ndarray contains the 2D traces of daily/hourly cross-correlation functions
    g: a positive number to adjust the filter harshness
    RETURNS:
    ----------------------
    narr: numpy vector contains the stacked cross correlation function
    '''
    if arr.ndim == 1:
        return arr
    N,M = arr.shape
    Nfft = next_fast_len(M)

    # fft the 2D array
    spec = scipy.fftpack.fft(arr,axis=1,n=Nfft)[:,:M]

    # make cross-spectrm matrix
    cspec = np.zeros(shape=(N*N,M),dtype=np.complex64)
    for ii in range(N):
        for jj in range(N):
            kk = ii*N+jj
            cspec[kk] = spec[ii]*np.conjugate(spec[jj])
        
    S1 = np.zeros(M,dtype=np.complex64)
    S2 = np.zeros(M,dtype=np.complex64)
    # construct the filter P
    for ii in range(N):
        mm = ii*N+ii
        S2 += cspec[mm]
        for jj in range(N):
            kk = ii*N+jj
            S1 += cspec[kk]
    
    p = np.power((S1-S2)/(S2*(N-1)),g)

    # make ifft
    narr = np.real(scipy.fftpack.ifft(np.multiply(p,spec),Nfft,axis=1)[:,:M])
    return np.mean(narr,axis=0) 
Example #16
Source File: noise_module.py    From NoisePy with MIT License 4 votes vote down vote up
def adaptive_filter(arr,g):
    '''
    the adaptive covariance filter to enhance coherent signals. Fellows the method of
    Nakata et al., 2015 (Appendix B)

    the filtered signal [x1] is given by x1 = ifft(P*x1(w)) where x1 is the ffted spectra 
    and P is the filter. P is constructed by using the temporal covariance matrix. 

    PARAMETERS:
    ----------------------
    arr: numpy.ndarray contains the 2D traces of daily/hourly cross-correlation functions
    g: a positive number to adjust the filter harshness
    RETURNS:
    ----------------------
    narr: numpy vector contains the stacked cross correlation function
    '''
    if arr.ndim == 1:
        return arr
    N,M = arr.shape
    Nfft = next_fast_len(M)

    # fft the 2D array
    spec = scipy.fftpack.fft(arr,axis=1,n=Nfft)[:,:M]

    # make cross-spectrm matrix
    cspec = np.zeros(shape=(N*N,M),dtype=np.complex64)
    for ii in range(N):
        for jj in range(N):
            kk = ii*N+jj
            cspec[kk] = spec[ii]*np.conjugate(spec[jj])
        
    S1 = np.zeros(M,dtype=np.complex64)
    S2 = np.zeros(M,dtype=np.complex64)
    # construct the filter P
    for ii in range(N):
        mm = ii*N+ii
        S2 += cspec[mm]
        for jj in range(N):
            kk = ii*N+jj
            S1 += cspec[kk]
    
    p = np.power((S1-S2)/(S2*(N-1)),g)

    # make ifft
    narr = np.real(scipy.fftpack.ifft(np.multiply(p,spec),Nfft,axis=1)[:,:M])
    return np.mean(narr,axis=0) 
Example #17
Source File: noise_module.py    From NoisePy with MIT License 4 votes vote down vote up
def correlate(fft1,fft2, maxlag,dt, Nfft, method="cross-correlation"):
    """This function takes ndimensional *data* array, computes the cross-correlation in the frequency domain
    and returns the cross-correlation function between [-*maxlag*:*maxlag*].

    :type fft1: :class:`numpy.ndarray`
    :param fft1: This array contains the fft of each timeseries to be cross-correlated.
    :type maxlag: int
    :param maxlag: This number defines the number of samples (N=2*maxlag + 1) of the CCF that will be returned.

    :rtype: :class:`numpy.ndarray`
    :returns: The cross-correlation function between [-maxlag:maxlag]
    """
    # Speed up FFT by padding to optimal size for FFTPACK
    t0=time.time()
    if fft1.ndim == 1:
        axis = 0
        nwin=1
    elif fft1.ndim == 2:
        axis = 1
        nwin= int(fft1.shape[0])

    corr=np.zeros(shape=(nwin,Nfft),dtype=np.complex64)
    corr[:,:Nfft//2]  = np.conj(fft1) * fft2

    if method == 'deconv':
        ind = np.where(np.abs(fft1)>0)
        corr[ind] /= moving_ave(np.abs(fft1[ind]),10)**2
        #corr[ind] /= running_abs_mean(np.abs(fft1[ind]),10) ** 2
    elif method == 'coherence':
        ind = np.where(np.abs(fft1)>0)
        corr[ind] /= running_abs_mean(np.abs(fft1[ind]),5)
        ind = np.where(np.abs(fft2)>0)
        corr[ind] /= running_abs_mean(np.abs(fft2[ind]),5)
    elif method == 'raw':
        ind = 1

    #--------------------problems: [::-1] only flips along axis=0 direction------------------------
    #corr[:,-(Nfft // 2):] = corr[:,:(Nfft // 2)].conjugate()[::-1] # fill in the complex conjugate
    #----------------------------------------------------------------------------------------------
    corr[:,0] = complex(0,0)
    corr[:,-(Nfft//2)+1:]=np.flip(np.conj(corr[:,1:(Nfft//2)]),axis=axis)
    corr = np.real(np.fft.ifftshift(scipy.fftpack.ifft(corr, Nfft, axis=axis)))

    tcorr = np.arange(-Nfft//2 + 1, Nfft//2)*dt
    ind = np.where(np.abs(tcorr) <= maxlag)[0]
    if axis == 1:
        corr = corr[:,ind]
    else:
        corr = corr[ind]
    tcorr=tcorr[ind]

    t1=time.time()
    print('original takes '+str(t1-t0))
    return corr,tcorr 
Example #18
Source File: comp_stacking.py    From NoisePy with MIT License 4 votes vote down vote up
def adaptive_filter(cc_array,g):
    '''
    the adaptive covariance filter to enhance coherent signals. Fellows the method of
    Nakata et al., 2015 (Appendix B)

    the filtered signal [x1] is given by x1 = ifft(P*x1(w)) where x1 is the ffted spectra 
    and P is the filter. P is constructed by using the temporal covariance matrix. 

    PARAMETERS:
    ----------------------
    cc_array: numpy.ndarray contains the 2D traces of daily/hourly cross-correlation functions
    g: a positive number to adjust the filter harshness
    RETURNS:
    ----------------------
    narr: numpy vector contains the stacked cross correlation function

    Written by Chengxin Jiang @Harvard (Oct2019)
    '''
    if cc_array.ndim == 1:
        print('2D matrix is needed for adaptive filtering')
        return cc_array
    N,M = cc_array.shape
    Nfft = next_fast_len(M)

    # fft the 2D array
    spec = scipy.fftpack.fft(cc_array,axis=1,n=Nfft)[:,:M]

    # make cross-spectrm matrix
    cspec = np.zeros(shape=(N*N,M),dtype=np.complex64)
    for ii in range(N):
        for jj in range(N):
            kk = ii*N+jj
            cspec[kk] = spec[ii]*np.conjugate(spec[jj])
        
    S1 = np.zeros(M,dtype=np.complex64)
    S2 = np.zeros(M,dtype=np.complex64)
    # construct the filter P
    for ii in range(N):
        mm = ii*N+ii
        S2 += cspec[mm]
        for jj in range(N):
            kk = ii*N+jj
            S1 += cspec[kk]
    
    p = np.power((S1-S2)/(S2*(N-1)),g)

    # make ifft
    narr = np.real(scipy.fftpack.ifft(np.multiply(p,spec),Nfft,axis=1)[:,:M])
    return np.mean(narr,axis=0) 
Example #19
Source File: noise_module.py    From NoisePy with MIT License 4 votes vote down vote up
def correlate(fft1,fft2, maxlag,dt, Nfft, method="cross-correlation"):
    """This function takes ndimensional *data* array, computes the cross-correlation in the frequency domain
    and returns the cross-correlation function between [-*maxlag*:*maxlag*].

    :type fft1: :class:`numpy.ndarray`
    :param fft1: This array contains the fft of each timeseries to be cross-correlated.
    :type maxlag: int
    :param maxlag: This number defines the number of samples (N=2*maxlag + 1) of the CCF that will be returned.

    :rtype: :class:`numpy.ndarray`
    :returns: The cross-correlation function between [-maxlag:maxlag]
    """
    # Speed up FFT by padding to optimal size for FFTPACK
    t0=time.time()
    if fft1.ndim == 1:
        axis = 0
        nwin=1
    elif fft1.ndim == 2:
        axis = 1
        nwin= int(fft1.shape[0])

    corr=np.zeros(shape=(nwin,Nfft),dtype=np.complex64)
    corr[:,:Nfft//2]  = np.conj(fft1) * fft2

    if method == 'deconv':
        ind = np.where(np.abs(fft1)>0)
        corr[ind] /= moving_ave(np.abs(fft1[ind]),10)**2
        #corr[ind] /= running_abs_mean(np.abs(fft1[ind]),10) ** 2
    elif method == 'coherence':
        ind = np.where(np.abs(fft1)>0)
        corr[ind] /= running_abs_mean(np.abs(fft1[ind]),5)
        ind = np.where(np.abs(fft2)>0)
        corr[ind] /= running_abs_mean(np.abs(fft2[ind]),5)
    elif method == 'raw':
        ind = 1

    #--------------------problems: [::-1] only flips along axis=0 direction------------------------
    #corr[:,-(Nfft // 2):] = corr[:,:(Nfft // 2)].conjugate()[::-1] # fill in the complex conjugate
    #----------------------------------------------------------------------------------------------
    corr[:,0] = complex(0,0)
    corr[:,-(Nfft//2)+1:]=np.flip(np.conj(corr[:,1:(Nfft//2)]),axis=axis)
    corr = np.real(np.fft.ifftshift(scipy.fftpack.ifft(corr, Nfft, axis=axis)))

    tcorr = np.arange(-Nfft//2 + 1, Nfft//2)*dt
    ind = np.where(np.abs(tcorr) <= maxlag)[0]
    if axis == 1:
        corr = corr[:,ind]
    else:
        corr = corr[ind]
    tcorr=tcorr[ind]

    t1=time.time()
    print('original takes '+str(t1-t0))
    return corr,tcorr 
Example #20
Source File: noise_module.py    From NoisePy with MIT License 4 votes vote down vote up
def noise_processing(fft_para,dataS,flag):
    '''
    perform time domain and frequency normalization according to user's need. note that
    this step is not recommended if deconv or coherency method is selected for calculating
    cross-correlation functions. 

    fft_para: dictionary containing all useful variables used for fft
    dataS: data matrix containing all segmented noise data
    flag: boolen variable to output intermediate variables or not
    '''
    # load parameters first
    time_norm   = fft_para['time_norm']
    to_whiten   = fft_para['to_whiten']
    smooth_N    = fft_para['smooth_N']

    N = dataS.shape[0]

    #------to normalize in time or not------
    if time_norm:
        t0=time.time()   

        if time_norm == 'one_bit': 
            white = np.sign(dataS)
        elif time_norm == 'running_mean':
            
            #--------convert to 1D array for smoothing in time-domain---------
            white = np.zeros(shape=dataS.shape,dtype=dataS.dtype)
            for kkk in range(N):
                white[kkk,:] = dataS[kkk,:]/moving_ave(np.abs(dataS[kkk,:]),smooth_N)

        t1=time.time()
        if flag:
            print("temporal normalization takes %f s"%(t1-t0))
    else:
        white = dataS

    #-----to whiten or not------
    if to_whiten:

        t0=time.time()
        source_white = whiten(white,fft_para)
        t1=time.time()
        if flag:
            print("spectral whitening takes %f s"%(t1-t0))
    else:

        Nfft = int(next_fast_len(int(dataS.shape[1])))
        source_white = scipy.fftpack.fft(white, Nfft, axis=1)
    
    return source_white