Python numpy.r_() Examples

The following are 30 code examples for showing how to use numpy.r_(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: sgd-influence   Author: sato9hara   File: DataModule.py    License: MIT License 7 votes vote down vote up
def load(self):
        categories = ['comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware']
        newsgroups_train = fetch_20newsgroups(
            subset='train', remove=('headers', 'footers', 'quotes'), categories=categories)
        newsgroups_test = fetch_20newsgroups(
            subset='test', remove=('headers', 'footers', 'quotes'), categories=categories)
        vectorizer = TfidfVectorizer(stop_words='english', min_df=0.001, max_df=0.20)
        vectors = vectorizer.fit_transform(newsgroups_train.data)
        vectors_test = vectorizer.transform(newsgroups_test.data)
        x1 = vectors
        y1 = newsgroups_train.target
        x2 = vectors_test
        y2 = newsgroups_test.target
        x = np.array(np.r_[x1.todense(), x2.todense()])
        y = np.r_[y1, y2]
        return x, y 
Example 2
Project: fine-lm   Author: akzaidi   File: algorithmic.py    License: MIT License 6 votes vote down vote up
def zipf_distribution(nbr_symbols, alpha):
  """Helper function: Create a Zipf distribution.

  Args:
    nbr_symbols: number of symbols to use in the distribution.
    alpha: float, Zipf's Law Distribution parameter. Default = 1.5.
      Usually for modelling natural text distribution is in
      the range [1.1-1.6].

  Returns:
    distr_map: list of float, Zipf's distribution over nbr_symbols.

  """
  tmp = np.power(np.arange(1, nbr_symbols + 1), -alpha)
  zeta = np.r_[0.0, np.cumsum(tmp)]
  return [x / zeta[-1] for x in zeta] 
Example 3
Project: transferlearning   Author: jindongwang   File: KMM.py    License: MIT License 6 votes vote down vote up
def fit(self, Xs, Xt):
        '''
        Fit source and target using KMM (compute the coefficients)
        :param Xs: ns * dim
        :param Xt: nt * dim
        :return: Coefficients (Pt / Ps) value vector (Beta in the paper)
        '''
        ns = Xs.shape[0]
        nt = Xt.shape[0]
        if self.eps == None:
            self.eps = self.B / np.sqrt(ns)
        K = kernel(self.kernel_type, Xs, None, self.gamma)
        kappa = np.sum(kernel(self.kernel_type, Xs, Xt, self.gamma) * float(ns) / float(nt), axis=1)

        K = matrix(K)
        kappa = matrix(kappa)
        G = matrix(np.r_[np.ones((1, ns)), -np.ones((1, ns)), np.eye(ns), -np.eye(ns)])
        h = matrix(np.r_[ns * (1 + self.eps), ns * (self.eps - 1), self.B * np.ones((ns,)), np.zeros((ns,))])

        sol = solvers.qp(K, -kappa, G, h)
        beta = np.array(sol['x'])
        return beta 
Example 4
Project: contextualbandits   Author: david-cortes   File: utils.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def get_batch(self, X, y):
        if self.curr == 0:
            self.add_obs(X, y)
            return X, y

        if (self.curr < self.n) and (isinstance(self.X_reserve, list)):
            if not self.has_sparse:
                old_X = np.concatenate(self.X_reserve, axis=0)
            else:
                old_X = sp_vstack(self.X_reserve)
            old_y = np.concatenate(self.y_reserve, axis=0)
        else:
            old_X = self.X_reserve[:self.curr].copy()
            old_y = self.y_reserve[:self.curr].copy()

        if X.shape[0] == 0:
            return old_X, old_y
        else:
            self.add_obs(X, y)

        if not issparse(old_X) and not issparse(X):
            return np.r_[old_X, X], np.r_[old_y, y]
        else:
            return sp_vstack([old_X, X]), np.r_[old_y, y] 
Example 5
Project: hgru4rec   Author: mquad   File: build_dataset.py    License: MIT License 6 votes vote down vote up
def make_sessions(data, session_th=30 * 60, is_ordered=False, user_key='user_id', item_key='item_id', time_key='ts'):
    """Assigns session ids to the events in data without grouping keys"""
    if not is_ordered:
        # sort data by user and time
        data.sort_values(by=[user_key, time_key], ascending=True, inplace=True)
    # compute the time difference between queries
    tdiff = np.diff(data[time_key].values)
    # check which of them are bigger then session_th
    split_session = tdiff > session_th
    split_session = np.r_[True, split_session]
    # check when the user chenges is data
    new_user = data['user_id'].values[1:] != data['user_id'].values[:-1]
    new_user = np.r_[True, new_user]
    # a new sessions stars when at least one of the two conditions is verified
    new_session = np.logical_or(new_user, split_session)
    # compute the session ids
    session_ids = np.cumsum(new_session)
    data['session_id'] = session_ids
    return data 
Example 6
Project: Attentive-Filtering-Network   Author: jefflai108   File: kaldi_io.py    License: MIT License 6 votes vote down vote up
def read_segments_as_bool_vec(segments_file):
  """ [ bool_vec ] = read_segments_as_bool_vec(segments_file)
   using kaldi 'segments' file for 1 wav, format : '<utt> <rec> <t-beg> <t-end>'
   - t-beg, t-end is in seconds,
   - assumed 100 frames/second,
  """
  segs = np.loadtxt(segments_file, dtype='object,object,f,f', ndmin=1)
  # Sanity checks,
  assert(len(segs) > 0) # empty segmentation is an error,
  assert(len(np.unique([rec[1] for rec in segs ])) == 1) # segments with only 1 wav-file,
  # Convert time to frame-indexes,
  start = np.rint([100 * rec[2] for rec in segs]).astype(int)
  end = np.rint([100 * rec[3] for rec in segs]).astype(int)
  # Taken from 'read_lab_to_bool_vec', htk.py,
  frms = np.repeat(np.r_[np.tile([False,True], len(end)), False],
                   np.r_[np.c_[start - np.r_[0, end[:-1]], end-start].flat, 0])
  assert np.sum(end-start) == np.sum(frms)
  return frms 
Example 7
Project: recruit   Author: Frank-qlu   File: test_utils.py    License: Apache License 2.0 6 votes vote down vote up
def test_float64_pass(self):
        # The number of units of least precision
        # In this case, use a few places above the lowest level (ie nulp=1)
        nulp = 5
        x = np.linspace(-20, 20, 50, dtype=np.float64)
        x = 10**x
        x = np.r_[-x, x]

        # Addition
        eps = np.finfo(x.dtype).eps
        y = x + x*eps*nulp/2.
        assert_array_almost_equal_nulp(x, y, nulp)

        # Subtraction
        epsneg = np.finfo(x.dtype).epsneg
        y = x - x*epsneg*nulp/2.
        assert_array_almost_equal_nulp(x, y, nulp) 
Example 8
Project: recruit   Author: Frank-qlu   File: test_utils.py    License: Apache License 2.0 6 votes vote down vote up
def test_complex128_pass(self):
        nulp = 5
        x = np.linspace(-20, 20, 50, dtype=np.float64)
        x = 10**x
        x = np.r_[-x, x]
        xi = x + x*1j

        eps = np.finfo(x.dtype).eps
        y = x + x*eps*nulp/2.
        assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
        assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
        # The test condition needs to be at least a factor of sqrt(2) smaller
        # because the real and imaginary parts both change
        y = x + x*eps*nulp/4.
        assert_array_almost_equal_nulp(xi, y + y*1j, nulp)

        epsneg = np.finfo(x.dtype).epsneg
        y = x - x*epsneg*nulp/2.
        assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
        assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
        y = x - x*epsneg*nulp/4.
        assert_array_almost_equal_nulp(xi, y + y*1j, nulp) 
Example 9
Project: recruit   Author: Frank-qlu   File: test_utils.py    License: Apache License 2.0 6 votes vote down vote up
def test_complex64_pass(self):
        nulp = 5
        x = np.linspace(-20, 20, 50, dtype=np.float32)
        x = 10**x
        x = np.r_[-x, x]
        xi = x + x*1j

        eps = np.finfo(x.dtype).eps
        y = x + x*eps*nulp/2.
        assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
        assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
        y = x + x*eps*nulp/4.
        assert_array_almost_equal_nulp(xi, y + y*1j, nulp)

        epsneg = np.finfo(x.dtype).epsneg
        y = x - x*epsneg*nulp/2.
        assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
        assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
        y = x - x*epsneg*nulp/4.
        assert_array_almost_equal_nulp(xi, y + y*1j, nulp) 
Example 10
Project: ibllib   Author: int-brain-lab   File: ephys_fpga.py    License: MIT License 6 votes vote down vote up
def _audio_events_extraction(audio_t, audio_fronts):
    """
    From detected fronts on the audio sync traces, outputs the synchronisation events
    related to tone in

    :param audio_t: numpy vector containing times of fronts
    :param audio_fronts: numpy vector containing polarity of fronts (1 rise, -1 fall)
    :return: numpy arrays t_ready_tone_in, t_error_tone_in
    """
    # make sure that there are no 2 consecutive fall or consecutive rise events
    assert(np.all(np.abs(np.diff(audio_fronts)) == 2))
    # take only even time differences: ie. from rising to falling fronts
    dt = np.diff(audio_t)[::2]
    # detect ready tone by length below 110 ms
    i_ready_tone_in = np.r_[np.where(dt <= 0.11)[0] * 2]
    t_ready_tone_in = audio_t[i_ready_tone_in]
    # error tones are events lasting from 400ms to 600ms
    i_error_tone_in = np.where(np.logical_and(0.4 < dt, dt < 1.2))[0] * 2
    t_error_tone_in = audio_t[i_error_tone_in]
    return t_ready_tone_in, t_error_tone_in 
Example 11
Project: ibllib   Author: int-brain-lab   File: simplest_raster_plot.py    License: MIT License 6 votes vote down vote up
def raster_complete(R, times, Clusters):
    '''
    Plot a rasterplot for the complete recording
    (might be slow, restrict R if so),
    ordered by insertion depth
    '''

    plt.imshow(R, aspect='auto', cmap='binary', vmax=T_BIN / 0.001 / 4,
               origin='lower', extent=np.r_[times[[0, -1]], Clusters[[0, -1]]])

    plt.xlabel('Time (s)')
    plt.ylabel('Cluster #; ordered by depth')
    plt.show()

    # plt.savefig('/home/mic/Rasters/%s.svg' %(trial_number))
    # plt.close('all')
    plt.tight_layout() 
Example 12
Project: typhon   Author: atmtools   File: __init__.py    License: MIT License 6 votes vote down vote up
def trapz_inte_edge(y, x):
    """ Trapezoidal integration including edge grids

    Parameters:
        y: Array of y-axis value
        x: Array of x-axis value

        Returns:
        Area corresponded to each y (or x) value
            For Example,
             Area corresponded to at y_n is
            ..math:
                0.5*y_n ((x_{n} - x_{n-1}) + (x_{n+1} - x_{n}))
             Area corresponded to at y_0 (start point(edge)) is
            ..math:
                0.5*y_0(x_{1} - x_{0})
    """
    weight_x_0 = 0.5 * (x[1] - x[0])
    weight_x_f = 0.5 * (x[-1] - x[-2])
    weight_x_n = 0.5 * (x[1:-1] - x[:-2]) + 0.5 * (x[2:] - x[1:-1])
    weight_x = np.r_[weight_x_0, weight_x_n, weight_x_f]
    return weight_x*y 
Example 13
Project: sprocket   Author: k2kobayashi   File: test_f0stats.py    License: MIT License 6 votes vote down vote up
def test_estimate_F0statistics(self):
        f0stats = F0statistics()
        orgf0s = []
        for i in range(1, 4):
            orgf0s.append(200 * np.r_[np.random.rand(100 * i), np.zeros(100)])
        orgf0stats = f0stats.estimate(orgf0s)

        tarf0s = []
        for i in range(1, 8):
            tarf0s.append(300 * np.r_[np.random.rand(100 * i), np.zeros(100)])
        tarf0stats = f0stats.estimate(tarf0s)

        orgf0 = 200 * np.r_[np.random.rand(100 * i), np.zeros(100)]
        cvf0 = f0stats.convert(orgf0, orgf0stats, tarf0stats)

        assert len(orgf0) == len(cvf0) 
Example 14
Project: buzzard   Author: airware   File: _footprint.py    License: Apache License 2.0 6 votes vote down vote up
def extent(self):
        """Get the Footprint's extent (`x` then `y`)

        Example
        -------
        >>> minx, maxx, miny, maxy = fp.extent
        >>> plt.imshow(arr, extent=fp.extent)

        fp.extent from fp.bounds using numpy fancy indexing

        >>> minx, maxx, miny, maxy = fp.bounds[[0, 2, 1, 3]]
        """
        points = np.r_["1,0,2", self.coords]
        return np.asarray([
            points[:, 0].min(), points[:, 0].max(),
            points[:, 1].min(), points[:, 1].max(),
        ]) 
Example 15
Project: buzzard   Author: airware   File: _footprint.py    License: Apache License 2.0 6 votes vote down vote up
def bounds(self):
        """Get the Footprint's bounds (`min` then `max`)

        Example
        -------
        >>> minx, miny, maxx, maxy = fp.bounds

        fp.bounds from fp.extent using numpy fancy indexing

        >>> minx, miny, maxx, maxy = fp.extent[[0, 2, 1, 3]]
        """
        points = np.r_["1,0,2", self.coords]
        return np.asarray([
            points[:, 0].min(), points[:, 1].min(),
            points[:, 0].max(), points[:, 1].max(),
        ]) 
Example 16
Project: buzzard   Author: airware   File: _a_gdal_raster.py    License: Apache License 2.0 6 votes vote down vote up
def get_data(self, fp, channel_ids, dst_nodata, interpolation):
        samplefp = self.build_sampling_footprint(fp, interpolation)
        if samplefp is None:
            return np.full(
                np.r_[fp.shape, len(channel_ids)],
                dst_nodata,
                self.dtype
            )
        with self.acquire_driver_object() as gdal_ds:
            array = self.sample_bands_driver(samplefp, channel_ids, gdal_ds)
        array = self.remap(
            samplefp,
            fp,
            array=array,
            mask=None,
            src_nodata=self.nodata,
            dst_nodata=dst_nodata,
            mask_mode='erode',
            interpolation=interpolation,
        )
        array = array.astype(self.dtype, copy=False)
        return array 
Example 17
Project: buzzard   Author: airware   File: _a_gdal_raster.py    License: Apache License 2.0 6 votes vote down vote up
def sample_bands_driver(self, fp, channel_ids, gdal_ds):
        rtlx, rtly = self.fp.spatial_to_raster(fp.tl)
        assert rtlx >= 0 and rtlx < self.fp.rsizex, '{} >= 0 and {} < {}'.format(rtlx, rtlx, self.fp.rsizex)
        assert rtly >= 0 and rtly < self.fp.rsizey, '{} >= 0 and {} < {}'.format(rtly, rtly, self.fp.rsizey)

        dstarray = np.empty(np.r_[fp.shape, len(channel_ids)], self.dtype)
        for i, channel_id in enumerate(channel_ids):
            gdal_band = gdal_ds.GetRasterBand(channel_id + 1)
            success, payload = GDALErrorCatcher(gdal_band.ReadAsArray, none_is_error=True)(
                int(rtlx),
                int(rtly),
                int(fp.rsizex),
                int(fp.rsizey),
                buf_obj=dstarray[..., i],
            )
            if not success: # pragma: no cover
                raise ValueError('Could not read array (gdal error: `{}`)'.format(
                    payload[1]
                ))
        return dstarray

    # set_data implementation ******************************************************************* ** 
Example 18
Project: buzzard   Author: airware   File: reader.py    License: Apache License 2.0 6 votes vote down vote up
def _create_work_job(self, qi, prod_idx, cache_fp, path):
        if prod_idx not in self._sample_array_per_prod_tile[qi]:
            # Allocate sample array
            # If no interpolation or nodata conversion is necessary, this is the array that will be
            # returned in the output queue
            full_sample_fp = qi.prod[prod_idx].sample_fp
            self._sample_array_per_prod_tile[qi][prod_idx] = np.empty(
                np.r_[full_sample_fp.shape, len(qi.unique_channel_ids)],
                self._raster.dtype,
            )
            self._raster.debug_mngr.event(
                'object_allocated',
                self._sample_array_per_prod_tile[qi][prod_idx]
            )
            self._missing_cache_fps_per_prod_tile[qi][prod_idx] = set(qi.prod[prod_idx].cache_fps)

        dst_array = self._sample_array_per_prod_tile[qi][prod_idx]
        return Work(self, qi, prod_idx, cache_fp, path, dst_array) 
Example 19
Project: deep-learning-note   Author: wdxtub   File: util.py    License: MIT License 5 votes vote down vote up
def smooth_curve(x):
    """用于使损失函数的图形变圆滑
    参考:http://glowingpython.blogspot.jp/2012/02/convolution-with-numpy.html
    """
    window_len = 11
    s = np.r_[x[window_len-1:0:-1], x, x[-1:-window_len:-1]]
    w = np.kaiser(window_len, 2)
    y = np.convolve(w/w.sum(), s, mode='valid')
    return y[5:len(y)-5] 
Example 20
Project: fullrmc   Author: bachiraoun   File: Collection.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def smooth(data, winLen=11, window='hanning', check=False):
    """
    Smooth 1D data using window function and length.

    :Parameters:
        #. data (numpy.ndarray): the 1D numpy data.
        #. winLen (integer): the smoothing window length.
        #. window (str): The smoothing window type. Can be anything among
           'flat', 'hanning', 'hamming', 'bartlett' and 'blackman'.
        #. check (boolean): whether to check arguments before smoothing data.

    :Returns:
        #. smoothed (numpy.ndarray): the smoothed 1D data array.
    """
    if check:
        assert isinstance(data, np.ndarray), Logger.error("data must be numpy.ndarray instance")
        assert len(data.shape)==1, Logger.error("data must be of 1 dimensions")
        assert is_integer(winLen), LOGGER.error("winLen must be an integer")
        winLen = int(bin)
        assert winLen>=3, LOGGER.error("winLen must be bigger than 3")
        assert data.size < winLen, LOGGER.error("data needs to be bigger than window size.")
        assert window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman'], LOGGER.error("window must be any of ('flat', 'hanning', 'hamming', 'bartlett', 'blackman')")
    # compute smoothed data
    s=np.r_[data[winLen-1:0:-1],data,data[-1:-winLen:-1]]
    if window == 'flat': #moving average
        w=np.ones(winLen,'d')
    else:
        w=eval('np.'+window+'(winLen)')
    S=np.convolve(w/w.sum(),s, mode='valid')
    # get data and return
    f = winLen/2
    t = f-winLen+1
    return S[f:t] 
Example 21
Project: contextualbandits   Author: david-cortes   File: online.py    License: BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _drop_ix(self, drop_ix):
        if self.choice_names is None:
            self.choice_names = np.arange(self.nchoices)
        self.nchoices -= 1
        self.choice_names = np.r_[self.choice_names[:drop_ix], self.choice_names[drop_ix + 1:]]
        if isinstance(self, _ActivePolicy):
            if isinstance(self._get_grad_norms, list):
                self._get_grad_norms[:drop_ix] + self._get_grad_norms[drop_ix + 1:]
            if isinstance(self._rand_grad_norms, list):
                self._rand_grad_norms[:drop_ix] + self._rand_grad_norms[drop_ix + 1:]

    ## TODO: maybe add functionality to take an arm from another object of this class 
Example 22
Project: contextualbandits   Author: david-cortes   File: online.py    License: BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _append_arm(self, arm_name, f_grad_norm, case_one_class):
        if self.choice_names is not None:
            self.choice_names = np.r_[self.choice_names, np.array(arm_name).reshape(-1)]
        if f_grad_norm is not None:
            self._get_grad_norms.append(f_grad_norm)
        if case_one_class is not None:
            self._rand_grad_norms.append(case_one_class)
        self.nchoices += 1 
Example 23
Project: pywr   Author: pywr   File: test_parameters.py    License: GNU General Public License v3.0 5 votes vote down vote up
def test_uniform_drawdown_profile(self, simple_linear_model):
        """Test the uniform drawn profile over a leap year and non-leap year."""

        m = simple_linear_model
        m.timestepper.start = '2015-04-01'
        m.timestepper.end = '2017-04-01'

        expected_values = np.r_[
            np.linspace(1, 1/366, 366),  # This period covers Apr-2015 to Apr-2016 (i.e. 366 days)
            np.linspace(1, 1/365, 365),  # This period covers Apr-2016 to Apr-2017 (i.e. 365 days)
            np.linspace(1, 1/365, 365),  # This period covers Apr-2017 to Apr-2018 (i.e. 365 days)
        ]

        data = {
            'type': 'uniformdrawdownprofile',
            "reset_day": 1,
            "reset_month": 4
        }

        p = load_parameter(m, data)

        @assert_rec(m, p)
        def expected_func(timestep, scenario_index):
            return expected_values[timestep.index]

        m.run() 
Example 24
Project: deep_sort   Author: nwojke   File: kalman_filter.py    License: GNU General Public License v3.0 5 votes vote down vote up
def initiate(self, measurement):
        """Create track from unassociated measurement.

        Parameters
        ----------
        measurement : ndarray
            Bounding box coordinates (x, y, a, h) with center position (x, y),
            aspect ratio a, and height h.

        Returns
        -------
        (ndarray, ndarray)
            Returns the mean vector (8 dimensional) and covariance matrix (8x8
            dimensional) of the new track. Unobserved velocities are initialized
            to 0 mean.

        """
        mean_pos = measurement
        mean_vel = np.zeros_like(mean_pos)
        mean = np.r_[mean_pos, mean_vel]

        std = [
            2 * self._std_weight_position * measurement[3],
            2 * self._std_weight_position * measurement[3],
            1e-2,
            2 * self._std_weight_position * measurement[3],
            10 * self._std_weight_velocity * measurement[3],
            10 * self._std_weight_velocity * measurement[3],
            1e-5,
            10 * self._std_weight_velocity * measurement[3]]
        covariance = np.diag(np.square(std))
        return mean, covariance 
Example 25
Project: deep_sort   Author: nwojke   File: kalman_filter.py    License: GNU General Public License v3.0 5 votes vote down vote up
def predict(self, mean, covariance):
        """Run Kalman filter prediction step.

        Parameters
        ----------
        mean : ndarray
            The 8 dimensional mean vector of the object state at the previous
            time step.
        covariance : ndarray
            The 8x8 dimensional covariance matrix of the object state at the
            previous time step.

        Returns
        -------
        (ndarray, ndarray)
            Returns the mean vector and covariance matrix of the predicted
            state. Unobserved velocities are initialized to 0 mean.

        """
        std_pos = [
            self._std_weight_position * mean[3],
            self._std_weight_position * mean[3],
            1e-2,
            self._std_weight_position * mean[3]]
        std_vel = [
            self._std_weight_velocity * mean[3],
            self._std_weight_velocity * mean[3],
            1e-5,
            self._std_weight_velocity * mean[3]]
        motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))

        mean = np.dot(self._motion_mat, mean)
        covariance = np.linalg.multi_dot((
            self._motion_mat, covariance, self._motion_mat.T)) + motion_cov

        return mean, covariance 
Example 26
def world_frame(self):
        """ position returns a 3x6 matrix
            where row is [x, y, z] column is m1 m2 m3 m4 origin h
            """
        origin = self.state[0:3]
        quat = Quaternion(self.state[6:10])
        rot = quat.as_rotation_matrix()
        wHb = np.r_[np.c_[rot,origin], np.array([[0, 0, 0, 1]])]
        quadBodyFrame = params.body_frame.T
        quadWorldFrame = wHb.dot(quadBodyFrame)
        world_frame = quadWorldFrame[0:3]
        return world_frame 
Example 27
def update(self, dt, F, M):
        # limit thrust and Moment
        L = params.arm_length
        r = params.r
        prop_thrusts = params.invA.dot(np.r_[np.array([[F]]), M])
        prop_thrusts_clamped = np.maximum(np.minimum(prop_thrusts, params.maxF/4), params.minF/4)
        F = np.sum(prop_thrusts_clamped)
        M = params.A[1:].dot(prop_thrusts_clamped)
        self.state = integrate.odeint(self.state_dot, self.state, [0,dt], args = (F, M))[1] 
Example 28
Project: NeuroKit   Author: neuropsychology   File: ecg_delineate.py    License: MIT License 5 votes vote down vote up
def _dwt_compute_multiscales(ecg: np.ndarray, max_degree):
    """Return multiscales wavelet transforms."""

    def _apply_H_filter(signal_i, power=0):
        zeros = np.zeros(2 ** power - 1)
        timedelay = 2 ** power
        banks = np.r_[
            1.0 / 8, zeros, 3.0 / 8, zeros, 3.0 / 8, zeros, 1.0 / 8,
        ]
        signal_f = scipy.signal.convolve(signal_i, banks, mode="full")
        signal_f[:-timedelay] = signal_f[timedelay:]  # timeshift: 2 steps
        return signal_f

    def _apply_G_filter(signal_i, power=0):
        zeros = np.zeros(2 ** power - 1)
        timedelay = 2 ** power
        banks = np.r_[2, zeros, -2]
        signal_f = scipy.signal.convolve(signal_i, banks, mode="full")
        signal_f[:-timedelay] = signal_f[timedelay:]  # timeshift: 1 step
        return signal_f

    dwtmatr = []
    intermediate_ret = np.array(ecg)
    for deg in range(max_degree):
        S_deg = _apply_G_filter(intermediate_ret, power=deg)
        T_deg = _apply_H_filter(intermediate_ret, power=deg)
        dwtmatr.append(S_deg)
        intermediate_ret = np.array(T_deg)
    dwtmatr = [arr[: len(ecg)] for arr in dwtmatr]  # rescale transforms to the same length
    return np.array(dwtmatr)


# =============================================================================
# WAVELET METHOD (CWT)
# ============================================================================= 
Example 29
Project: hgru4rec   Author: mquad   File: hgru4rec.py    License: MIT License 5 votes vote down vote up
def preprocess_data(self, data):
        # sort by user and time key in order
        data.sort_values([self.user_key, self.session_key, self.time_key], inplace=True)
        data.reset_index(drop=True, inplace=True)
        offset_session = np.r_[0, data.groupby([self.user_key, self.session_key], sort=False).size().cumsum()[:-1]]
        user_indptr = np.r_[0, data.groupby(self.user_key, sort=False)[self.session_key].nunique().cumsum()[:-1]]
        return user_indptr, offset_session 
Example 30
Project: connecting_the_dots   Author: autonomousvision   File: metric.py    License: MIT License 5 votes vote down vote up
def get(self):
    tps = np.array(self.tps).astype(np.float32)
    fps = np.array(self.fps).astype(np.float32)
    fns = np.array(self.fns).astype(np.float32)
    tns = np.array(self.tns).astype(np.float32)
    wp = self.thresholds

    ret = {}

    precisions = np.divide(tps, tps + fps, out=np.zeros_like(tps), where=tps + fps != 0)
    recalls = np.divide(tps, tps + fns, out=np.zeros_like(tps), where=tps + fns != 0) # tprs
    fprs = np.divide(fps, fps + tns, out=np.zeros_like(tps), where=fps + tns != 0)

    precisions = np.r_[0, precisions, 1]
    recalls = np.r_[1, recalls, 0]
    fprs = np.r_[1, fprs, 0]

    ret['auc'] = float(-np.trapz(recalls, fprs))
    ret['prauc'] = float(-np.trapz(precisions, recalls))
    ret['ap'] = float(-(np.diff(recalls) * precisions[:-1]).sum())

    accuracies = np.divide(tps + tns, tps + tns + fps + fns)
    aacc = np.mean(accuracies)
    for t in np.linspace(0,1,num=11)[1:-1]:
      idx = np.argmin(np.abs(t - wp))
      ret[f'acc{wp[idx]:.2f}'] = float(accuracies[idx])

    return ret