Python numpy.convolve() Examples

The following are 30 code examples for showing how to use numpy.convolve(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: vidgear   Author: abhiTronix   File: stabilizer.py    License: Apache License 2.0 7 votes vote down vote up
def __box_filter_convolve(self, path, window_size):
        """
        An internal method that applies *normalized linear box filter* to path w.r.t averaging window
        
        Parameters:
        
        * path (numpy.ndarray): a cumulative sum of transformations
        * window_size (int): averaging window size
        """
        # pad path to size of averaging window
        path_padded = np.pad(path, (window_size, window_size), "median")
        # apply linear box filter to path
        path_smoothed = np.convolve(path_padded, self.__box_filter, mode="same")
        # crop the smoothed path to original path
        path_smoothed = path_smoothed[window_size:-window_size]
        # assert if cropping is completed
        assert path.shape == path_smoothed.shape
        # return smoothed path
        return path_smoothed 
Example 2
Project: neat-python   Author: CodeReclaimers   File: evolve.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def compute_fitness(genome, net, episodes, min_reward, max_reward):
    m = int(round(np.log(0.01) / np.log(genome.discount)))
    discount_function = [genome.discount ** (m - i) for i in range(m + 1)]

    reward_error = []
    for score, data in episodes:
        # Compute normalized discounted reward.
        dr = np.convolve(data[:,-1], discount_function)[m:]
        dr = 2 * (dr - min_reward) / (max_reward - min_reward) - 1.0
        dr = np.clip(dr, -1.0, 1.0)

        for row, dr in zip(data, dr):
            observation = row[:8]
            action = int(row[8])
            output = net.activate(observation)
            reward_error.append(float((output[action] - dr) ** 2))

    return reward_error 
Example 3
Project: pulse2percept   Author: pulse2percept   File: test_convolution.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_conv(mode, method):
    reload(convolution)
    # time vector for stimulus (long)
    stim_dur = 0.5  # seconds
    tsample = 0.001 / 1000
    t = np.arange(0, stim_dur, tsample)

    # stimulus (10 Hz anondic and cathodic pulse train)
    stim = np.zeros_like(t)
    stim[::1000] = 1
    stim[100::1000] = -1

    # kernel
    _, gg = gamma(1, 0.005, tsample)

    # make sure conv returns the same result as np.convolve for all modes:
    npconv = np.convolve(stim, gg, mode=mode)
    conv = convolution.conv(stim, gg, mode=mode, method=method)
    npt.assert_equal(conv.shape, npconv.shape)
    npt.assert_almost_equal(conv, npconv)

    with pytest.raises(ValueError):
        convolution.conv(gg, stim, mode="invalid")
    with pytest.raises(ValueError):
        convolution.conv(gg, stim, method="invalid") 
Example 4
Project: tensorflow-wavenet   Author: ibab   File: test_causal_conv.py    License: MIT License 6 votes vote down vote up
def testCausalConv(self):
        """Tests that the op is equivalent to a numpy implementation."""
        x1 = np.arange(1, 21, dtype=np.float32)
        x = np.append(x1, x1)
        x = np.reshape(x, [2, 20, 1])
        f = np.reshape(np.array([1, 1], dtype=np.float32), [2, 1, 1])
        out = causal_conv(x, f, 4)

        with self.test_session() as sess:
            result = sess.run(out)

        # Causal convolution using numpy
        ref = np.convolve(x1, [1, 0, 0, 0, 1], mode='valid')
        ref = np.append(ref, ref)
        ref = np.reshape(ref, [2, 16, 1])

        self.assertAllEqual(result, ref) 
Example 5
Project: lambda-packs   Author: ryfeus   File: chebyshev.py    License: MIT License 6 votes vote down vote up
def _zseries_mul(z1, z2):
    """Multiply two z-series.

    Multiply two z-series to produce a z-series.

    Parameters
    ----------
    z1, z2 : 1-D ndarray
        The arrays must be 1-D but this is not checked.

    Returns
    -------
    product : 1-D ndarray
        The product z-series.

    Notes
    -----
    This is simply convolution. If symmetric/anti-symmetric z-series are
    denoted by S/A then the following rules apply:

    S*S, A*A -> S
    S*A, A*S -> A

    """
    return np.convolve(z1, z2) 
Example 6
Project: ocelot   Author: ocelot-collab   File: physics_proc.py    License: GNU General Public License v3.0 6 votes vote down vote up
def convolve_beam(self, current, wake):
        """
        convolve wake with beam current

        :param current: current[:, 0] - s in [m], current[:, 1] - current in [A]
        :param wake: wake function in form: wake(s, b, t, period)
        :return:
        """
        s_shift = current[0, 0]
        current[:, 0] -= s_shift
        s = current[:, 0]

        step = (s[-1] - s[0]) / (len(s) - 1)
        q = current[:, 1] / speed_of_light

        w = np.array(
            [wake(si, b=self.b, t=self.t, period=self.period) for si in s]) * 377 * speed_of_light / (
                    4 * np.pi)
        wake = np.convolve(q, w) * step
        s_new = np.cumsum(np.ones(len(wake))) * step
        wake_kick = np.vstack((s_new, wake))
        return wake_kick.T 
Example 7
Project: vnpy_crypto   Author: birforce   File: arma_mle.py    License: MIT License 6 votes vote down vote up
def forecast3(self, step_ahead=1, start=None): #, end=None):
        '''another try for h-step ahead forecasting
        '''

        from .arima_process import arma2ma, ArmaProcess
        p,q = self.nar, self.nma
        k=0
        ar = self.params[k:k+p]
        ma = self.params[k+p:k+p+q]
        marep = arma2ma(ar,ma, start)[step_ahead+1:]  #truncated ma representation
        errors = self.error_estimate
        forecasts = np.convolve(errors, marep)
        return forecasts#[-(errors.shape[0] - start-5):] #get 5 overlapping for testing




    #copied from arima.ARIMA
    #TODO: is this needed as a method at all?
    #JP: not needed in this form, but can be replace with using the parameters 
Example 8
Project: nussl   Author: nussl   File: melodia.py    License: MIT License 6 votes vote down vote up
def create_harmonic_mask(self, melody_signal):
        """
        Creates a harmonic mask from the melody signal. The mask is smoothed to reduce 
        the effects of discontinuities in the melody synthesizer.
        """
        stft = np.abs(melody_signal.stft())

        # Need to threshold the melody stft since the synthesized
        # F0 sequence overtones are at different weights.
        stft = stft ** self.compression
        stft /= np.maximum(np.max(stft, axis=1, keepdims=True), 1e-7)

        mask = np.empty(self.stft.shape)

        # Smoothing the mask row-wise using a low-pass filter to
        # get rid of discontuinities in the mask.
        kernel = np.full((1, self.smooth_length), 1 / self.smooth_length)
        for ch in range(self.audio_signal.num_channels):
            mask[..., ch] = convolve(stft[..., ch], kernel)
        return mask 
Example 9
Project: deep-learning-note   Author: wdxtub   File: util.py    License: MIT License 5 votes vote down vote up
def smooth_curve(x):
    """用于使损失函数的图形变圆滑
    参考:http://glowingpython.blogspot.jp/2012/02/convolution-with-numpy.html
    """
    window_len = 11
    s = np.r_[x[window_len-1:0:-1], x, x[-1:-window_len:-1]]
    w = np.kaiser(window_len, 2)
    y = np.convolve(w/w.sum(), s, mode='valid')
    return y[5:len(y)-5] 
Example 10
Project: NiBetaSeries   Author: HBClab   File: conftest.py    License: MIT License 5 votes vote down vote up
def preproc_file(deriv_dir, sub_metadata, deriv_bold_fname=deriv_bold_fname):
    deriv_bold = deriv_dir.ensure(deriv_bold_fname)
    with open(str(sub_metadata), 'r') as md:
        bold_metadata = json.load(md)
    tr = bold_metadata["RepetitionTime"]
    # time_points
    tp = 200
    ix = np.arange(tp)
    # create voxel timeseries
    task_onsets = np.zeros(tp)
    # add activations at every 40 time points
    # waffles
    task_onsets[0::40] = 1
    # fries
    task_onsets[3::40] = 1.5
    # milkshakes
    task_onsets[6::40] = 2
    signal = np.convolve(task_onsets, spm_hrf(tr))[0:len(task_onsets)]
    # csf
    csf = np.cos(2*np.pi*ix*(50/tp)) * 0.1
    # white matter
    wm = np.sin(2*np.pi*ix*(22/tp)) * 0.1
    # voxel time series (signal and noise)
    voxel_ts = signal + csf + wm
    # a 4d matrix with 2 identical timeseries
    img_data = np.array([[[voxel_ts, voxel_ts]]])
    # make a nifti image
    img = nib.Nifti1Image(img_data, np.eye(4))
    # save the nifti image
    img.to_filename(str(deriv_bold))

    return deriv_bold 
Example 11
Project: fullrmc   Author: bachiraoun   File: Collection.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def smooth(data, winLen=11, window='hanning', check=False):
    """
    Smooth 1D data using window function and length.

    :Parameters:
        #. data (numpy.ndarray): the 1D numpy data.
        #. winLen (integer): the smoothing window length.
        #. window (str): The smoothing window type. Can be anything among
           'flat', 'hanning', 'hamming', 'bartlett' and 'blackman'.
        #. check (boolean): whether to check arguments before smoothing data.

    :Returns:
        #. smoothed (numpy.ndarray): the smoothed 1D data array.
    """
    if check:
        assert isinstance(data, np.ndarray), Logger.error("data must be numpy.ndarray instance")
        assert len(data.shape)==1, Logger.error("data must be of 1 dimensions")
        assert is_integer(winLen), LOGGER.error("winLen must be an integer")
        winLen = int(bin)
        assert winLen>=3, LOGGER.error("winLen must be bigger than 3")
        assert data.size < winLen, LOGGER.error("data needs to be bigger than window size.")
        assert window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman'], LOGGER.error("window must be any of ('flat', 'hanning', 'hamming', 'bartlett', 'blackman')")
    # compute smoothed data
    s=np.r_[data[winLen-1:0:-1],data,data[-1:-winLen:-1]]
    if window == 'flat': #moving average
        w=np.ones(winLen,'d')
    else:
        w=eval('np.'+window+'(winLen)')
    S=np.convolve(w/w.sum(),s, mode='valid')
    # get data and return
    f = winLen/2
    t = f-winLen+1
    return S[f:t] 
Example 12
Project: lirpg   Author: Hwhitetooth   File: plot.py    License: MIT License 5 votes vote down vote up
def smooth_reward_curve(x, y):
    halfwidth = int(np.ceil(len(x) / 60))  # Halfwidth of our smoothing convolution
    k = halfwidth
    xsmoo = x
    ysmoo = np.convolve(y, np.ones(2 * k + 1), mode='same') / np.convolve(np.ones_like(y), np.ones(2 * k + 1),
        mode='same')
    return xsmoo, ysmoo 
Example 13
Project: HardRLWithYoutube   Author: MaxSobolMark   File: plot.py    License: MIT License 5 votes vote down vote up
def smooth_reward_curve(x, y):
    halfwidth = int(np.ceil(len(x) / 60))  # Halfwidth of our smoothing convolution
    k = halfwidth
    xsmoo = x
    ysmoo = np.convolve(y, np.ones(2 * k + 1), mode='same') / np.convolve(np.ones_like(y), np.ones(2 * k + 1),
        mode='same')
    return xsmoo, ysmoo 
Example 14
Project: OpenTrader   Author: OpenTrading   File: OTPpnAmgc.py    License: GNU Lesser General Public License v3.0 5 votes vote down vote up
def nSMA(values, window):
    weigths = np.repeat(1.0, window)/window
    smas = np.convolve(values, weigths, 'valid')
    return smas # as a numpy array

########EMA CALC ADDED############ 
Example 15
Project: OpenTrader   Author: OpenTrading   File: OTPpnAmgc.py    License: GNU Lesser General Public License v3.0 5 votes vote down vote up
def ExpMovingAverage(values, window):
    weights = np.exp(np.linspace(-1., 0., window))
    weights /= weights.sum()
    a =  np.convolve(values, weights, mode='full')[:len(values)]
    a[:window] = a[window]
    return a 
Example 16
Project: NeuroKit   Author: neuropsychology   File: ecg_findpeaks.py    License: MIT License 5 votes vote down vote up
def _ecg_findpeaks_promac_convolve(signal, peaks, sampling_rate=1000):
    x = np.zeros(len(signal))
    x[peaks] = 1

    # Because a typical QRS is roughly defined within about 100ms
    sd = sampling_rate / 10
    shape = scipy.stats.norm.pdf(np.linspace(-sd * 4, sd * 4, num=int(sd * 8)), loc=0, scale=sd)

    return np.convolve(x, shape, "same")  # Return convolved


# =============================================================================
# NeuroKit
# ============================================================================= 
Example 17
Project: NeuroKit   Author: neuropsychology   File: signal_smooth.py    License: MIT License 5 votes vote down vote up
def _signal_smoothing(signal, kernel="boxcar", size=5):

    # Get window.
    size = int(size)
    window = scipy.signal.get_window(kernel, size)
    w = window / window.sum()

    # Extend signal edges to avoid boundary effects.
    x = np.concatenate((signal[0] * np.ones(size), signal, signal[-1] * np.ones(size)))

    # Compute moving average.
    smoothed = np.convolve(w, x, mode="same")
    smoothed = smoothed[size:-size]
    return smoothed 
Example 18
Project: NeuroKit   Author: neuropsychology   File: eog_simulate.py    License: MIT License 5 votes vote down vote up
def _eog_simulate_blink(sampling_rate=1000, length=None, method="scr", parameters="default"):
    """Simulate a canonical blink from vertical EOG.

    Recommended parameters:

    - For method 'scr': ``[3.644, 0.422, 0.356, 0.943]``
    - For method 'gamma': ``[2.659, 5.172, 0.317]``

    Examples
    --------
    >>> blink_scr = _eog_simulate_blink(sampling_rate=100,
    ...                                 method='scr',
    ...                                 parameters=[3.644, 0.422, 0.356, 0.943])
    >>> blink_gamma = _eog_simulate_blink(sampling_rate=100,
    ...                                   method='gamma',
    ...                                   parameters=[2.659, 5.172, 0.317])
    >>> nk.signal_plot([blink_scr, blink_gamma], sampling_rate=100)

    """
    if length is None:
        length = int(sampling_rate)

    x = np.linspace(0, 10, num=length)

    if method.lower() == "scr":
        if isinstance(parameters, str):
            parameters = [3.644, 0.422, 0.356, 0.943]
        gt = np.exp(-((x - parameters[0]) ** 2) / (2 * parameters[1] ** 2))
        ht = np.exp(-x / parameters[2]) + np.exp(-x / parameters[3])

        ft = np.convolve(gt, ht)
        ft = ft[0 : len(x)]
        y = ft / np.max(ft)

    else:
        if isinstance(parameters, str):
            parameters = [2.659, 5.172, 0.317]
        gamma = scipy.stats.gamma.pdf(x, a=parameters[1], loc=parameters[0], scale=parameters[2])
        y = gamma / np.max(gamma)
    return y 
Example 19
Project: NeuroKit   Author: neuropsychology   File: script.py    License: MIT License 5 votes vote down vote up
def fit_scr(x, time_peak, rise, decay1, decay2):
    x = nk.rescale(x, to=[0, 10])
    gt = np.exp(-((x - time_peak) ** 2) / (2 * rise ** 2))
    ht = np.exp(-x / decay1) + np.exp(-x / decay2)

    ft = np.convolve(gt, ht)
    ft = ft[0 : len(x)]
    y = ft / np.max(ft)
    return y

# Starting parameters 
Example 20
Project: Self-Driving-Car-Demo   Author: llSourcell   File: plotting.py    License: MIT License 5 votes vote down vote up
def movingaverage(y, window_size):
    """
    Moving average function from:
    http://stackoverflow.com/questions/11352047/finding-moving-average-from-data-points-in-python
    """
    window = np.ones(int(window_size))/float(window_size)
    return np.convolve(y, window, 'same') 
Example 21
Project: pydiogment   Author: SuperKogito   File: augf.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def convolve(infile, ir_fname, level=0.5):
    """
    Apply convolution to infile using the given impulse response file.

    Args:
        - infile   (str) : input filename/path.
        - ir_fname (str) : name of impulse response file.
        - level  (float) : can be between 0 and 1, default value = 0.5
    """
    # read input file
    fs1, x = read_file(filename=infile)
    x = np.copy(x)

    # change the path below for the sounds folder
    _, ir = read_file(filename=ir_fname)

    # apply convolution
    y = np.convolve(x, ir, 'full')[0:x.shape[0]] * level + x * (1 - level)

    # normalize
    y /= np.mean(np.abs(y))

    # export data to file
    output_file_path = os.path.dirname(infile)
    name_attribute = "_augmented_{0}_convolved_with_level_{1}.wav".format(os.path.basename(ir_fname.split(".")[0]),
                                                                          level)
    write_file(output_file_path=output_file_path,
               input_file_name=infile,
               name_attribute=name_attribute,
               sig=y,
               fs=fs1) 
Example 22
Project: pyqmc   Author: WagnerGroup   File: reblock.py    License: MIT License 5 votes vote down vote up
def test_reblocking():
    """
        Tests reblocking against known distribution.
    """
    from scipy.stats import sem

    def corr_data(N, L):
        """
            Creates correlated data. Taken from 
            https://pyblock.readthedocs.io/en/latest/tutorial.html.
        """
        return np.convolve(np.random.randn(2 ** N), np.ones(2 ** L) / 10, "same")

    n = 11
    cols = ["test_data1", "test_data2"]
    dat1 = corr_data(n, 4)
    dat2 = corr_data(n, 7)
    test_data = pd.DataFrame(data={cols[0]: dat1, cols[1]: dat2})
    reblocked_data = optimally_reblocked(test_data[cols])
    for c in cols:
        row = reblocked_data.loc[c]
        reblocks = reblocked_data["reblocks"].values[0]
        std_err = sem(reblock_by2(test_data, reblocks, c))
        std_err_err = std_err / np.sqrt(2 * (2 ** (n - reblocks) - 1))

        assert np.isclose(
            row["mean"], np.mean(test_data[c]), 1e-10, 1e-12
        ), "Means are not equal"
        assert np.isclose(
            row["standard error"], std_err, 1e-10, 1e-12
        ), "Standard errors are not equal"
        assert np.isclose(
            row["standard error error"], std_err_err, 1e-10, 1e-12
        ), "Standard error errors are not equal"

    statlist = ["mean", "sem", lambda x: x.sem() / np.sqrt(2 * (len(x) - 1))]
    rb1 = reblock(test_data, len(test_data) // 4).agg(statlist).T
    rb2 = reblock_by2(test_data, 2).agg(statlist).T
    for c in rb1.columns:
        assert np.isclose(rb1[c], rb2[c], 1e-10, 1e-12).all(), (c, rb1[c], rb2[c]) 
Example 23
Project: python-control   Author: python-control   File: input_element_int_test.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_tf_den_with_numpy_int_element(self):
        num = 1
        den = np.convolve([1, 2, 1], [1, 1, 1])

        sys = ctl.tf(num, den)

        self.assertAlmostEqual(1.0, ctl.dcgain(sys)) 
Example 24
Project: python-control   Author: python-control   File: input_element_int_test.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_tf_num_with_numpy_int_element(self):
        num = np.convolve([1], [1, 1])
        den = np.convolve([1, 2, 1], [1, 1, 1])

        sys = ctl.tf(num, den)

        self.assertAlmostEqual(1.0, ctl.dcgain(sys))

    # currently these pass 
Example 25
Project: python-control   Author: python-control   File: input_element_int_test.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_tf_input_with_int_element_works(self):
        num = 1
        den = np.convolve([1.0, 2, 1], [1, 1, 1])

        sys = ctl.tf(num, den)

        self.assertAlmostEqual(1.0, ctl.dcgain(sys)) 
Example 26
Project: pulse2percept   Author: pulse2percept   File: convolution.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _sparseconv(data, kernel, mode):
    """Returns the discrete, linear convolution of two 1D sequences

    This function returns the discrete, linear convolution of two
    one-dimensional sequences, where the length of the output is determined
    by ``mode``.
    Can run faster than ``np.convolve`` if:
    (1) ``data`` is much longer than ``kernel``
    (2) ``data`` is sparse (has lots of zeros)
    """
    # NOTE Numba 0.44 has trouble with jitting nested functions when they
    # raise exceptions, so we don't raise ValueError here.
    kernel_len = kernel.size
    data_len = data.size
    out = np.zeros(data_len + kernel_len - 1)

    pos = np.where(data.ravel() != 0)[0]
    # Add shifted and scaled copies of `kernel` only where `data` is nonzero
    for p in pos:
        out[p:p + kernel_len] = (out[p:p + kernel_len] +
                                 kernel.ravel() * data.ravel()[p])
    if mode.lower() == 'full':
        return out
    elif mode.lower() == 'valid':
        return center_vector(out, data_len - kernel_len + 1)
    elif mode.lower() == 'same':
        return center_vector(out, data_len) 
Example 27
Project: sumo-rl   Author: LucasAlegre   File: result_plot.py    License: MIT License 5 votes vote down vote up
def movingaverage(interval, window_size):
    window= np.ones(int(window_size))/float(window_size)
    return np.convolve(interval, window, 'same') 
Example 28
Project: sumo-rl   Author: LucasAlegre   File: plot.py    License: MIT License 5 votes vote down vote up
def moving_average(interval, window_size):
    if window_size == 1:
        return interval
    window = np.ones(int(window_size))/float(window_size)
    return np.convolve(interval, window, 'same') 
Example 29
Project: HRV   Author: pickus91   File: panTompkins.py    License: MIT License 5 votes vote down vote up
def findPeaks(ECG_movavg):
    """finds peaks in Integration Waveform by smoothing, locating zero crossings, and moving average amplitude thresholding"""
    #smoothing
    N = 15
    ECG_movavg_smooth = np.convolve(ECG_movavg, np.ones((N,)) / N, mode = 'same')    
    #signal derivative    
    sigDeriv = np.diff(ECG_movavg_smooth)     
    #find location of zero-crossings
    zeroCross = []
    for i,c in enumerate(np.arange(len(sigDeriv)-1)):
        if sigDeriv[i] > 0 and sigDeriv[i + 1] < 0:
            zeroCross.append(c)           
    
    return np.array(zeroCross) 
Example 30
Project: recruit   Author: Frank-qlu   File: test_numeric.py    License: Apache License 2.0 5 votes vote down vote up
def test_object(self):
        d = [1.] * 100
        k = [1.] * 3
        assert_array_almost_equal(np.convolve(d, k)[2:-2], np.full(98, 3))