Python numpy.fabs() Examples

The following are 30 code examples of numpy.fabs(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: shape_fitter.py    From ms_deisotope with Apache License 2.0 6 votes vote down vote up
def find_right_intersect(vec, target_val, start_index=0):
        nearest_index = start_index
        next_index = start_index

        size = len(vec) - 1
        if next_index == size:
            return size

        next_val = vec[next_index]
        best_distance = np.abs(next_val - target_val)
        while (next_index < size):
            next_index += 1
            next_val = vec[next_index]
            dist = np.fabs(next_val - target_val)  # pylint: disable=assignment-from-no-return
            if dist < best_distance:
                best_distance = dist
                nearest_index = next_index
            if next_index == size or next_val < target_val:
                break
        return nearest_index 
Example #2
Source File: reportableqty.py    From pyGSTi with Apache License 2.0 6 votes vote down vote up
def absdiff(self, constant_value, separate_re_im=False):
        """
        Returns a ReportableQty that is the (element-wise in the vector case)
        difference between `constant_value` and this one given by:

        `abs(self - constant_value)`.
        """
        if separate_re_im:
            re_v = _np.fabs(_np.real(self.value) - _np.real(constant_value))
            im_v = _np.fabs(_np.imag(self.value) - _np.imag(constant_value))
            if self.has_eb():
                return (ReportableQty(re_v, _np.fabs(_np.real(self.errbar)), self.nonMarkovianEBs),
                        ReportableQty(im_v, _np.fabs(_np.imag(self.errbar)), self.nonMarkovianEBs))
            else:
                return ReportableQty(re_v), ReportableQty(im_v)

        else:
            v = _np.absolute(self.value - constant_value)
            if self.has_eb():
                return ReportableQty(v, _np.absolute(self.errbar), self.nonMarkovianEBs)
            else:
                return ReportableQty(v) 
Example #3
Source File: test_datetimelike.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_irreg_hf(self):
        idx = date_range('2012-6-22 21:59:51', freq='S', periods=100)
        df = DataFrame(np.random.randn(len(idx), 2), idx)

        irreg = df.iloc[[0, 1, 3, 4]]
        _, ax = self.plt.subplots()
        irreg.plot(ax=ax)
        diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()

        sec = 1. / 24 / 60 / 60
        assert (np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all()

        _, ax = self.plt.subplots()
        df2 = df.copy()
        df2.index = df.index.astype(object)
        df2.plot(ax=ax)
        diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
        assert (np.fabs(diffs[1:] - sec) < 1e-8).all() 
Example #4
Source File: intensity_measures.py    From gmpe-smtk with GNU Affero General Public License v3.0 6 votes vote down vote up
def rotipp(acceleration_x, time_step_x, acceleration_y, time_step_y, periods,
        percentile, damping=0.05, units="cm/s/s", method="Nigam-Jennings"):
    """
    Returns the rotationally independent spectrum RotIpp as defined by
    Boore (2010)
    """
    if np.fabs(time_step_x - time_step_y) > 1E-10:
        raise ValueError("Record pair must have the same time-step!")
    acceleration_x, acceleration_y = equalise_series(acceleration_x,
                                                     acceleration_y)
    target, rota, rotv, rotd, angles = rotdpp(acceleration_x, time_step_x,
                                              acceleration_y, time_step_y,
                                              periods, percentile, damping,
                                              units, method)
    locn, penalty = _get_gmrotd_penalty(
        np.hstack([target["PGA"],target["Pseudo-Acceleration"]]),
        rota)
    target_theta = np.radians(angles[locn])
    arotpp = acceleration_x * np.cos(target_theta) +\
        acceleration_y * np.sin(target_theta)
    spec = get_response_spectrum(arotpp, time_step_x, periods, damping, units,
        method)[0]
    spec["GMRot{:2.0f}".format(percentile)] = target
    return spec 
Example #5
Source File: sca.py    From NiaPy with MIT License 6 votes vote down vote up
def nextPos(self, x, x_b, r1, r2, r3, r4, task):
		r"""Move individual to new position in search space.

		Args:
			x (numpy.ndarray): Individual represented with components.
			x_b (nmppy.ndarray): Best individual represented with components.
			r1 (float): Number dependent on algorithm iteration/generations.
			r2 (float): Random number in range of 0 and 2 * PI.
			r3 (float): Random number in range [Rmin, Rmax].
			r4 (float): Random number in range [0, 1].
			task (Task): Optimization task.

		Returns:
			numpy.ndarray: New individual that is moved based on individual ``x``.
		"""
		return task.repair(x + r1 * (sin(r2) if r4 < 0.5 else cos(r2)) * fabs(r3 * x_b - x), self.Rand) 
Example #6
Source File: norms.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def rho(self, z):
        r"""
        The robust criterion function for Huber's t.

        Parameters
        ----------
        z : array-like
            1d array

        Returns
        -------
        rho : array
            rho(z) = .5*z**2            for \|z\| <= t

            rho(z) = \|z\|*t - .5*t**2    for \|z\| > t
        """
        z = np.asarray(z)
        test = self._subset(z)
        return (test * 0.5 * z**2 +
                (1 - test) * (np.fabs(z) * self.t - 0.5 * self.t**2)) 
Example #7
Source File: norms.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def rho(self, z):
        r"""
        The robust criterion function for Ramsay's Ea.

        Parameters
        ----------
        z : array-like
            1d array

        Returns
        -------
        rho : array
            rho(z) = a**-2 * (1 - exp(-a*\|z\|)*(1 + a*\|z\|))
        """
        z = np.asarray(z)
        return (1 - np.exp(-self.a * np.fabs(z)) *
                (1 + self.a * np.fabs(z))) / self.a**2 
Example #8
Source File: norms.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def psi(self, z):
        """
        The psi function for Ramsay's Ea estimator

        The analytic derivative of rho

        Parameters
        ----------
        z : array-like
            1d array

        Returns
        -------
        psi : array
            psi(z) = z*exp(-a*\|z\|)
        """
        z = np.asarray(z)
        return z * np.exp(-self.a * np.fabs(z)) 
Example #9
Source File: norms.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def weights(self, z):
        """
        Ramsay's Ea weighting function for the IRLS algorithm

        The psi function scaled by z

        Parameters
        ----------
        z : array-like
            1d array

        Returns
        -------
        weights : array
            weights(z) = exp(-a*\|z\|)
        """

        z = np.asarray(z)
        return np.exp(-self.a * np.fabs(z)) 
Example #10
Source File: mixed.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def cont(self, ML=False, rtol=1.0e-05, params_rtol=1e-5, params_atol=1e-4):
        '''convergence check for iterative estimation

        '''

        self.dev, old = self.deviance(ML=ML), self.dev

        #self.history.append(np.hstack((self.dev, self.a)))
        self.history['llf'].append(self.dev)
        self.history['params'].append(self.a.copy())
        self.history['D'].append(self.D.copy())

        if np.fabs((self.dev - old) / self.dev) < rtol:   #why is there times `*`?
            #print np.fabs((self.dev - old)), self.dev, old
            self.termination = 'llf'
            return False

        #break if parameters converged
        #TODO: check termination conditions, OR or AND
        if np.all(np.abs(self.a - self._a_old) < (params_rtol * self.a + params_atol)):
            self.termination = 'params'
            return False

        self._a_old =  self.a.copy()
        return True 
Example #11
Source File: test_datetimelike.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def test_irreg_hf(self):
        idx = date_range('2012-6-22 21:59:51', freq='S', periods=100)
        df = DataFrame(np.random.randn(len(idx), 2), idx)

        irreg = df.iloc[[0, 1, 3, 4]]
        _, ax = self.plt.subplots()
        irreg.plot(ax=ax)
        diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()

        sec = 1. / 24 / 60 / 60
        assert (np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all()

        _, ax = self.plt.subplots()
        df2 = df.copy()
        df2.index = df.index.astype(object)
        df2.plot(ax=ax)
        diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
        assert (np.fabs(diffs[1:] - sec) < 1e-8).all() 
Example #12
Source File: nanops.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def _wrap_results(result, dtype):
    """ wrap our results if needed """

    if is_datetime64_dtype(dtype):
        if not isinstance(result, np.ndarray):
            result = tslib.Timestamp(result)
        else:
            result = result.view(dtype)
    elif is_timedelta64_dtype(dtype):
        if not isinstance(result, np.ndarray):

            # raise if we have a timedelta64[ns] which is too large
            if np.fabs(result) > _int64_max:
                raise ValueError("overflow in timedelta operation")

            result = tslib.Timedelta(result, unit='ns')
        else:
            result = result.astype('i8').view(dtype)

    return result 
Example #13
Source File: kitti_evaluation.py    From Attentional-PointNet with GNU General Public License v3.0 6 votes vote down vote up
def lidar_to_img(points, img_size):
    # pdb.set_trace()
    lidar_data = np.array(points[:, :2])
    lidar_data *= 9.9999
    lidar_data -= (0.5 * img_size, 0.5 * img_size)
    lidar_data = np.fabs(lidar_data)
    lidar_data = lidar_data.astype(np.int32)
    lidar_data = np.reshape(lidar_data, (-1, 2))
    lidar_img = np.zeros((img_size, img_size))
    lidar_img[tuple(lidar_data.T)] = 255
    return torch.tensor(lidar_img).cuda()


# def lidar_to_img(points, img_size):
#     # pdb.set_trace()
#     lidar_data = points[:, :2]
#     lidar_data *= 9.9999
#     lidar_data -= torch.tensor((0.5 * img_size, 0.5 * img_size)).cuda()
#     lidar_data = torch.abs(lidar_data)
#     lidar_data = torch.floor(lidar_data).long()
#     lidar_data = lidar_data.view(-1, 2)
#     lidar_img = torch.zeros((img_size, img_size)).cuda()
#     lidar_img[lidar_data.permute(1,0)] = 255
#     return lidar_img 
Example #14
Source File: kitti_LidarImg_data_generator.py    From Attentional-PointNet with GNU General Public License v3.0 6 votes vote down vote up
def lidar_to_heightmap(points, img_size):
    # pdb.set_trace()
    lidar_data = np.array(points[:, :2])
    height_data = np.array(points[:,2])
    height_data *= 255/2
    height_data[height_data < 0] = 0
    height_data[height_data > 255] = 255
    height_data = np.fabs(height_data)
    height_data = height_data.astype(np.int32)


    lidar_data *= 9.9999
    lidar_data -= (0.5 * img_size, 0.5 * img_size)
    lidar_data = np.fabs(lidar_data)
    lidar_data = lidar_data.astype(np.int32)
    lidar_data = np.reshape(lidar_data, (-1, 2))
    lidar_img = np.zeros((img_size, img_size))
    lidar_img[tuple(lidar_data.T)] = height_data # TODO: sort the point wrt height first lex sort
    return lidar_img 
Example #15
Source File: test_case.py    From graphics with Apache License 2.0 6 votes vote down vote up
def _max_error(arrays1, arrays2):
  """Computes maximum elementwise gap between two lists of ndarrays.

  Computes the maximum elementwise gap between two lists with the same length,
  of arrays with the same shape.

  Args:
    arrays1: a lists of np.ndarrays.
    arrays2: a lists of np.ndarrays of the same shape as arrays1.

  Returns:
    The maximum elementwise absolute difference between the two lists of arrays.
  """
  error = 0
  for array1, array2 in zip(arrays1, arrays2):
    if array1.size or array2.size:  # Handle zero size ndarrays correctly
      error = np.maximum(error, np.fabs(array1 - array2).max())
  return error 
Example #16
Source File: scale.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def __call__(self, df_resid, nobs, resid):
        h = (df_resid)/nobs*(self.d**2 + (1-self.d**2)*\
                    Gaussian.cdf(self.d)-.5 - self.d/(np.sqrt(2*np.pi))*\
                    np.exp(-.5*self.d**2))
        s = mad(resid)
        subset = lambda x: np.less(np.fabs(resid/x),self.d)
        chi = lambda s: subset(s)*(resid/s)**2/2+(1-subset(s))*(self.d**2/2)
        scalehist = [np.inf,s]
        niter = 1
        while (np.abs(scalehist[niter-1] - scalehist[niter])>self.tol \
                and niter < self.maxiter):
            nscale = np.sqrt(1/(nobs*h)*np.sum(chi(scalehist[-1]))*\
                    scalehist[-1]**2)
            scalehist.append(nscale)
            niter += 1
            #if niter == self.maxiter:
            #    raise ValueError("Huber's scale failed to converge")
        return scalehist[-1] 
Example #17
Source File: norms.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def weights(self, z):
        """
        Huber's t weighting function for the IRLS algorithm

        The psi function scaled by z

        Parameters
        ----------
        z : array-like
            1d array

        Returns
        -------
        weights : array
            weights(z) = 1          for \|z\| <= t

            weights(z) = t/\|z\|      for \|z\| > t
        """
        z = np.asarray(z)
        test = self._subset(z)
        absz = np.fabs(z)
        absz[test] = 1.0
        return test + (1 - test) * self.t / absz 
Example #18
Source File: test_plotting.py    From Computable with MIT License 6 votes vote down vote up
def test_irreg_hf(self):
        import matplotlib.pyplot as plt
        fig = plt.gcf()
        plt.clf()
        fig.add_subplot(111)

        idx = date_range('2012-6-22 21:59:51', freq='S', periods=100)
        df = DataFrame(np.random.randn(len(idx), 2), idx)

        irreg = df.ix[[0, 1, 3, 4]]
        ax = irreg.plot()
        diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()

        sec = 1. / 24 / 60 / 60
        self.assert_((np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all())

        plt.clf()
        fig.add_subplot(111)
        df2 = df.copy()
        df2.index = df.index.asobject
        ax = df2.plot()
        diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
        self.assert_((np.fabs(diffs[1:] - sec) < 1e-8).all()) 
Example #19
Source File: norms.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def weights(self, z):
        """
        Hampel weighting function for the IRLS algorithm

        The psi function scaled by z

        Parameters
        ----------
        z : array-like
            1d array

        Returns
        -------
        weights : array
            weights(z) = 1                            for \|z\| <= a

            weights(z) = a/\|z\|                        for a < \|z\| <= b

            weights(z) = a*(c - \|z\|)/(\|z\|*(c-b))      for b < \|z\| <= c

            weights(z) = 0                            for \|z\| > c

        """
        z = np.asarray(z)
        a = self.a; b = self.b; c = self.c
        t1, t2, t3 = self._subset(z)
        v = (t1 +
            t2 * a/np.fabs(z) +
            t3 * a*(c-np.fabs(z))/(np.fabs(z)*(c-b)))
        v[np.where(np.isnan(v))]=1. # for some reason 0 returns a nan?
        return v 
Example #20
Source File: scale.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def mad(a, c=Gaussian.ppf(3/4.), axis=0, center=np.median):
    # c \approx .6745
    """
    The Median Absolute Deviation along given axis of an array

    Parameters
    ----------
    a : array-like
        Input array.
    c : float, optional
        The normalization constant.  Defined as scipy.stats.norm.ppf(3/4.),
        which is approximately .6745.
    axis : int, optional
        The defaul is 0. Can also be None.
    center : callable or float
        If a callable is provided, such as the default `np.median` then it
        is expected to be called center(a). The axis argument will be applied
        via np.apply_over_axes. Otherwise, provide a float.

    Returns
    -------
    mad : float
        `mad` = median(abs(`a` - center))/`c`
    """
    a = np.asarray(a)
    if callable(center):
        center = np.apply_over_axes(center, a, axis)
    return np.median((np.fabs(a-center))/c, axis=axis) 
Example #21
Source File: norms.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def rho(self, z):
        r"""
        The robust criterion function for Hampel's estimator

        Parameters
        ----------
        z : array-like
            1d array

        Returns
        -------
        rho : array
            rho(z) = (1/2.)*z**2                    for \|z\| <= a

            rho(z) = a*\|z\| - 1/2.*a**2              for a < \|z\| <= b

            rho(z) = a*(c*\|z\|-(1/2.)*z**2)/(c-b)    for b < \|z\| <= c

            rho(z) = a*(b + c - a)                  for \|z\| > c
        """

        z = np.fabs(z)
        a = self.a; b = self.b; c = self.c
        t1, t2, t3 = self._subset(z)
        v = (t1 * z**2 * 0.5 +
             t2 * (a * z - a**2 * 0.5) +
             t3 * (a * (c * z - z**2 * 0.5) / (c - b) - 7 * a**2 / 6.) +
             (1 - t1 + t2 + t3) * a * (b + c - a))
        return v 
Example #22
Source File: norms.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def _subset(self, z):
        """
        Hampel's function is defined piecewise over the range of z
        """
        z = np.fabs(np.asarray(z))
        t1 = np.less_equal(z, self.a)
        t2 = np.less_equal(z, self.b) * np.greater(z, self.a)
        t3 = np.less_equal(z, self.c) * np.greater(z, self.b)
        return t1, t2, t3 
Example #23
Source File: intensity_measures.py    From gmpe-smtk with GNU Affero General Public License v3.0 5 votes vote down vote up
def get_peak_measures(time_step, acceleration, get_vel=False, 
    get_disp=False):
    """
    Returns the peak measures from acceleration, velocity and displacement
    time-series
    :param float time_step:
        Time step of acceleration time series in s
    :param numpy.ndarray acceleration:
        Acceleration time series
    :param bool get_vel:
        Choose to return (and therefore calculate) velocity (True) or otherwise
        (false)
    :returns:
        * pga - Peak Ground Acceleration
        * pgv - Peak Ground Velocity
        * pgd - Peak Ground Displacement
        * velocity - Velocity Time Series
        * dispalcement - Displacement Time series
    """
    pga = np.max(np.fabs(acceleration))
    velocity = None
    displacement = None
    # If displacement is not required then do not integrate to get
    # displacement time series
    if get_disp:
        get_vel = True
    if get_vel:
        velocity = time_step * cumtrapz(acceleration, initial=0.)
        pgv = np.max(np.fabs(velocity))
    else:
        pgv = None
    if get_disp:
        displacement = time_step * cumtrapz(velocity, initial=0.)
        pgd = np.max(np.fabs(displacement))
    else:
        pgd = None
    return pga, pgv, pgd, velocity, displacement 
Example #24
Source File: gam.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def cont(self):
        '''condition to continue iteration loop

        Parameters
        ----------
        tol

        Returns
        -------
        cont : bool
            If true, then iteration should be continued.

        '''
        self.iter += 1 #moved here to always count, not necessary
        if DEBUG:
            print(self.iter, self.results.Y.shape)
            print(self.results.predict(self.exog).shape, self.weights.shape)
        curdev = (((self.results.Y - self.results.predict(self.exog))**2) * self.weights).sum()

        if self.iter > self.maxiter: #kill it, no max iterationoption
            return False
        if np.fabs((self.dev - curdev) / curdev) < self.rtol:
            self.dev = curdev
            return False

        #self.iter += 1
        self.dev = curdev
        return True 
Example #25
Source File: wfg.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def _transformation_param_deceptive(y, A=0.35, B=0.001, C=0.05):
    tmp1 = np.floor(y - A + B) * (1.0 - C + (A - B) / B) / (A - B)
    tmp2 = np.floor(A + B - y) * (1.0 - C + (1.0 - A - B) / B) / (1.0 - A - B)
    ret = 1.0 + (np.fabs(y - A) - B) * (tmp1 + tmp2 + 1.0 / B)
    return correct_to_01(ret)


# ---------------------------------------------------------------------------------------------------------
# REDUCTION
# --------------------------------------------------------------------------------------------------------- 
Example #26
Source File: test_replace.py    From vnpy_crypto with MIT License 5 votes vote down vote up
def test_replace2(self):
        N = 100
        ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),
                        dtype=object)
        ser[:5] = np.nan
        ser[6:10] = 'foo'
        ser[20:30] = 'bar'

        # replace list with a single value
        rs = ser.replace([np.nan, 'foo', 'bar'], -1)

        assert (rs[:5] == -1).all()
        assert (rs[6:10] == -1).all()
        assert (rs[20:30] == -1).all()
        assert (pd.isna(ser[:5])).all()

        # replace with different values
        rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})

        assert (rs[:5] == -1).all()
        assert (rs[6:10] == -2).all()
        assert (rs[20:30] == -3).all()
        assert (pd.isna(ser[:5])).all()

        # replace with different values with 2 lists
        rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
        tm.assert_series_equal(rs, rs2)

        # replace inplace
        ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
        assert (ser[:5] == -1).all()
        assert (ser[6:10] == -1).all()
        assert (ser[20:30] == -1).all() 
Example #27
Source File: wfg.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def _transformation_param_dependent(y, y_deg, A=0.98 / 49.98, B=0.02, C=50.0):
    aux = A - (1.0 - 2.0 * y_deg) * np.fabs(np.floor(0.5 - y_deg) + A)
    ret = np.power(y, B + (C - B) * aux)
    return correct_to_01(ret) 
Example #28
Source File: transform.py    From patch_linemod with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def angle_between_vectors(v0, v1, directed=True, axis=0):
    """Return angle between vectors.

    If directed is False, the input vectors are interpreted as undirected axes,
    i.e. the maximum angle is pi/2.

    >>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3])
    >>> numpy.allclose(a, math.pi)
    True
    >>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False)
    >>> numpy.allclose(a, 0)
    True
    >>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
    >>> v1 = [[3], [0], [0]]
    >>> a = angle_between_vectors(v0, v1)
    >>> numpy.allclose(a, [0, 1.5708, 1.5708, 0.95532])
    True
    >>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
    >>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
    >>> a = angle_between_vectors(v0, v1, axis=1)
    >>> numpy.allclose(a, [1.5708, 1.5708, 1.5708, 0.95532])
    True

    """
    v0 = numpy.array(v0, dtype=numpy.float64, copy=False)
    v1 = numpy.array(v1, dtype=numpy.float64, copy=False)
    dot = numpy.sum(v0 * v1, axis=axis)
    dot /= vector_norm(v0, axis=axis) * vector_norm(v1, axis=axis)
    return numpy.arccos(dot if directed else numpy.fabs(dot)) 
Example #29
Source File: wfg.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def _transformation_shift_multi_modal(y, A, B, C):
    tmp1 = np.fabs(y - C) / (2.0 * (np.floor(C - y) + C))
    tmp2 = (4.0 * A + 2.0) * np.pi * (0.5 - tmp1)
    ret = (1.0 + np.cos(tmp2) + 4.0 * B * np.power(tmp1, 2.0)) / (B + 2.0)
    return correct_to_01(ret) 
Example #30
Source File: transform.py    From sixd_toolkit with MIT License 5 votes vote down vote up
def angle_between_vectors(v0, v1, directed=True, axis=0):
    """Return angle between vectors.

    If directed is False, the input vectors are interpreted as undirected axes,
    i.e. the maximum angle is pi/2.

    >>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3])
    >>> numpy.allclose(a, math.pi)
    True
    >>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False)
    >>> numpy.allclose(a, 0)
    True
    >>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
    >>> v1 = [[3], [0], [0]]
    >>> a = angle_between_vectors(v0, v1)
    >>> numpy.allclose(a, [0, 1.5708, 1.5708, 0.95532])
    True
    >>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
    >>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
    >>> a = angle_between_vectors(v0, v1, axis=1)
    >>> numpy.allclose(a, [1.5708, 1.5708, 1.5708, 0.95532])
    True

    """
    v0 = numpy.array(v0, dtype=numpy.float64, copy=False)
    v1 = numpy.array(v1, dtype=numpy.float64, copy=False)
    dot = numpy.sum(v0 * v1, axis=axis)
    dot /= vector_norm(v0, axis=axis) * vector_norm(v1, axis=axis)
    return numpy.arccos(dot if directed else numpy.fabs(dot))