Python numpy.divide() Examples

The following are 30 code examples for showing how to use numpy.divide(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: sparse-subspace-clustering-python   Author: abhinav4192   File: SpectralClustering.py    License: MIT License 6 votes vote down vote up
def SpectralClustering(CKSym, n):
    # This is direct port of JHU vision lab code. Could probably use sklearn SpectralClustering.
    CKSym = CKSym.astype(float)
    N, _ = CKSym.shape
    MAXiter = 1000  # Maximum number of iterations for KMeans
    REPlic = 20  # Number of replications for KMeans

    DN = np.diag(np.divide(1, np.sqrt(np.sum(CKSym, axis=0) + np.finfo(float).eps)))
    LapN = identity(N).toarray().astype(float) - np.matmul(np.matmul(DN, CKSym), DN)
    _, _, vN = np.linalg.svd(LapN)
    vN = vN.T
    kerN = vN[:, N - n:N]
    normN = np.sqrt(np.sum(np.square(kerN), axis=1))
    kerNS = np.divide(kerN, normN.reshape(len(normN), 1) + np.finfo(float).eps)
    km = KMeans(n_clusters=n, n_init=REPlic, max_iter=MAXiter, n_jobs=-1).fit(kerNS)
    return km.labels_ 
Example 2
Project: HardRLWithYoutube   Author: MaxSobolMark   File: mpi_adam_optimizer.py    License: MIT License 6 votes vote down vote up
def compute_gradients(self, loss, var_list, **kwargs):
        grads_and_vars = tf.train.AdamOptimizer.compute_gradients(self, loss, var_list, **kwargs)
        grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None]
        flat_grad = tf.concat([tf.reshape(g, (-1,)) for g, v in grads_and_vars], axis=0)
        shapes = [v.shape.as_list() for g, v in grads_and_vars]
        sizes = [int(np.prod(s)) for s in shapes]

        num_tasks = self.comm.Get_size()
        buf = np.zeros(sum(sizes), np.float32)

        def _collect_grads(flat_grad):
            self.comm.Allreduce(flat_grad, buf, op=MPI.SUM)
            np.divide(buf, float(num_tasks), out=buf)
            return buf

        avg_flat_grad = tf.py_func(_collect_grads, [flat_grad], tf.float32)
        avg_flat_grad.set_shape(flat_grad.shape)
        avg_grads = tf.split(avg_flat_grad, sizes, axis=0)
        avg_grads_and_vars = [(tf.reshape(g, v.shape), v)
                    for g, (_, v) in zip(avg_grads, grads_and_vars)]

        return avg_grads_and_vars 
Example 3
Project: argus-freesound   Author: lRomul   File: tiles.py    License: MIT License 6 votes vote down vote up
def merge(self, tiles: List[np.ndarray], dtype=np.float32):
        if len(tiles) != len(self.crops):
            raise ValueError

        channels = 1 if len(tiles[0].shape) == 2 else tiles[0].shape[2]
        target_shape = self.image_height + self.margin_bottom + self.margin_top, self.image_width + self.margin_right + self.margin_left, channels

        image = np.zeros(target_shape, dtype=np.float64)
        norm_mask = np.zeros(target_shape, dtype=np.float64)

        w = np.dstack([self.weight] * channels)

        for tile, (x, y, tile_width, tile_height) in zip(tiles, self.crops):
            # print(x, y, tile_width, tile_height, image.shape)
            image[y:y + tile_height, x:x + tile_width] += tile * w
            norm_mask[y:y + tile_height, x:x + tile_width] += w

        # print(norm_mask.min(), norm_mask.max())
        norm_mask = np.clip(norm_mask, a_min=np.finfo(norm_mask.dtype).eps, a_max=None)
        normalized = np.divide(image, norm_mask).astype(dtype)
        crop = self.crop_to_orignal_size(normalized)
        return crop 
Example 4
Project: speech_separation   Author: bill9800   File: utils.py    License: MIT License 6 votes vote down vote up
def generate_cRM(Y,S):
    '''

    :param Y: mixed/noisy stft
    :param S: clean stft
    :return: structed cRM
    '''
    M = np.zeros(Y.shape)
    epsilon = 1e-8
    # real part
    M_real = np.multiply(Y[:,:,0],S[:,:,0])+np.multiply(Y[:,:,1],S[:,:,1])
    square_real = np.square(Y[:,:,0])+np.square(Y[:,:,1])
    M_real = np.divide(M_real,square_real+epsilon)
    M[:,:,0] = M_real
    # imaginary part
    M_img = np.multiply(Y[:,:,0],S[:,:,1])-np.multiply(Y[:,:,1],S[:,:,0])
    square_img = np.square(Y[:,:,0])+np.square(Y[:,:,1])
    M_img = np.divide(M_img,square_img+epsilon)
    M[:,:,1] = M_img
    return M 
Example 5
Project: speech_separation   Author: bill9800   File: utils.py    License: MIT License 6 votes vote down vote up
def cRM_tanh_compress(M,K=10,C=0.1):
    '''
    Recall that the irm takes on vlaues in the range[0,1],compress the cRM with hyperbolic tangent
    :param M: crm (298,257,2)
    :param K: parameter to control the compression
    :param C: parameter to control the compression
    :return crm: compressed crm
    '''

    numerator = 1-np.exp(-C*M)
    numerator[numerator == inf] = 1
    numerator[numerator == -inf] = -1
    denominator = 1+np.exp(-C*M)
    denominator[denominator == inf] = 1
    denominator[denominator == -inf] = -1
    crm = K * np.divide(numerator,denominator)

    return crm 
Example 6
Project: connecting_the_dots   Author: autonomousvision   File: geometry.py    License: MIT License 6 votes vote down vote up
def axisangle_from_rotm(R):
  # logarithm of rotation matrix
  # R = R.reshape(-1,3,3)
  # tr = np.trace(R, axis1=1, axis2=2)
  # phi = np.arccos(np.clip((tr - 1) / 2, -1, 1))
  # scale = np.zeros_like(phi)
  # div = 2 * np.sin(phi)
  # np.divide(phi, div, out=scale, where=np.abs(div) > 1e-6)
  # A = (R - R.transpose(0,2,1)) * scale.reshape(-1,1,1)
  # aa = np.stack((A[:,2,1], A[:,0,2], A[:,1,0]), axis=1)
  # return aa.squeeze()
  R = R.reshape(-1,3,3)
  omega = np.empty((R.shape[0], 3), dtype=R.dtype)
  omega[:,0] = R[:,2,1] - R[:,1,2]
  omega[:,1] = R[:,0,2] - R[:,2,0]
  omega[:,2] = R[:,1,0] - R[:,0,1]
  r = np.linalg.norm(omega, axis=1).reshape(-1,1)
  t = np.trace(R, axis1=1, axis2=2).reshape(-1,1)
  omega = np.arctan2(r, t-1) * omega
  aa = np.zeros_like(omega)
  np.divide(omega, r, out=aa, where=r != 0)
  return aa.squeeze() 
Example 7
def compute_gradients(self, loss, var_list, **kwargs):
        grads_and_vars = tf.train.AdamOptimizer.compute_gradients(self, loss, var_list, **kwargs)
        grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None]
        flat_grad = tf.concat([tf.reshape(g, (-1,)) for g, v in grads_and_vars], axis=0)
        shapes = [v.shape.as_list() for g, v in grads_and_vars]
        sizes = [int(np.prod(s)) for s in shapes]

        num_tasks = self.comm.Get_size()
        buf = np.zeros(sum(sizes), np.float32)

        def _collect_grads(flat_grad):
            self.comm.Allreduce(flat_grad, buf, op=MPI.SUM)
            np.divide(buf, float(num_tasks), out=buf)
            return buf

        avg_flat_grad = tf.py_func(_collect_grads, [flat_grad], tf.float32)
        avg_flat_grad.set_shape(flat_grad.shape)
        avg_grads = tf.split(avg_flat_grad, sizes, axis=0)
        avg_grads_and_vars = [(tf.reshape(g, v.shape), v)
                    for g, (_, v) in zip(avg_grads, grads_and_vars)]

        return avg_grads_and_vars 
Example 8
def compute_gradients(self, loss, var_list, **kwargs):
        grads_and_vars = tf.train.AdamOptimizer.compute_gradients(self, loss, var_list, **kwargs)
        grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None]
        flat_grad = tf.concat([tf.reshape(g, (-1,)) for g, v in grads_and_vars], axis=0)
        shapes = [v.shape.as_list() for g, v in grads_and_vars]
        sizes = [int(np.prod(s)) for s in shapes]

        num_tasks = self.comm.Get_size()
        buf = np.zeros(sum(sizes), np.float32)

        def _collect_grads(flat_grad):
            self.comm.Allreduce(flat_grad, buf, op=MPI.SUM)
            np.divide(buf, float(num_tasks), out=buf)
            return buf

        avg_flat_grad = tf.py_func(_collect_grads, [flat_grad], tf.float32)
        avg_flat_grad.set_shape(flat_grad.shape)
        avg_grads = tf.split(avg_flat_grad, sizes, axis=0)
        avg_grads_and_vars = [(tf.reshape(g, v.shape), v)
                    for g, (_, v) in zip(avg_grads, grads_and_vars)]

        return avg_grads_and_vars 
Example 9
def compute_gradients(self, loss, var_list, **kwargs):
        grads_and_vars = tf.train.AdamOptimizer.compute_gradients(self, loss, var_list, **kwargs)
        grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None]
        flat_grad = tf.concat([tf.reshape(g, (-1,)) for g, v in grads_and_vars], axis=0)
        shapes = [v.shape.as_list() for g, v in grads_and_vars]
        sizes = [int(np.prod(s)) for s in shapes]

        num_tasks = self.comm.Get_size()
        buf = np.zeros(sum(sizes), np.float32)

        def _collect_grads(flat_grad):
            self.comm.Allreduce(flat_grad, buf, op=MPI.SUM)
            np.divide(buf, float(num_tasks), out=buf)
            return buf

        avg_flat_grad = tf.py_func(_collect_grads, [flat_grad], tf.float32)
        avg_flat_grad.set_shape(flat_grad.shape)
        avg_grads = tf.split(avg_flat_grad, sizes, axis=0)
        avg_grads_and_vars = [(tf.reshape(g, v.shape), v)
                    for g, (_, v) in zip(avg_grads, grads_and_vars)]

        return avg_grads_and_vars 
Example 10
Project: recruit   Author: Frank-qlu   File: test_ufunc.py    License: Apache License 2.0 6 votes vote down vote up
def test_NotImplemented_not_returned(self):
        # See gh-5964 and gh-2091. Some of these functions are not operator
        # related and were fixed for other reasons in the past.
        binary_funcs = [
            np.power, np.add, np.subtract, np.multiply, np.divide,
            np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
            np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
            np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
            np.logical_and, np.logical_or, np.logical_xor, np.maximum,
            np.minimum, np.mod,
            np.greater, np.greater_equal, np.less, np.less_equal,
            np.equal, np.not_equal]

        a = np.array('1')
        b = 1
        c = np.array([1., 2.])
        for f in binary_funcs:
            assert_raises(TypeError, f, a, b)
            assert_raises(TypeError, f, c, a) 
Example 11
Project: recruit   Author: Frank-qlu   File: test_numeric.py    License: Apache License 2.0 6 votes vote down vote up
def test_warnings(self):
        # test warning code path
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter("always")
            with np.errstate(all="warn"):
                np.divide(1, 0.)
                assert_equal(len(w), 1)
                assert_("divide by zero" in str(w[0].message))
                np.array(1e300) * np.array(1e300)
                assert_equal(len(w), 2)
                assert_("overflow" in str(w[-1].message))
                np.array(np.inf) - np.array(np.inf)
                assert_equal(len(w), 3)
                assert_("invalid value" in str(w[-1].message))
                np.array(1e-300) * np.array(1e-300)
                assert_equal(len(w), 4)
                assert_("underflow" in str(w[-1].message)) 
Example 12
Project: recruit   Author: Frank-qlu   File: test_timedelta64.py    License: Apache License 2.0 6 votes vote down vote up
def test_td64arr_rmul_numeric_array(self, box_with_array, vector, dtype):
        # GH#4521
        # divide/multiply by integers
        xbox = get_upcast_box(box_with_array, vector)

        tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
        vector = vector.astype(dtype)

        expected = Series(['1180 Days', '1770 Days', 'NaT'],
                          dtype='timedelta64[ns]')

        tdser = tm.box_expected(tdser, box_with_array)
        expected = tm.box_expected(expected, xbox)

        result = tdser * vector
        tm.assert_equal(result, expected)

        result = vector * tdser
        tm.assert_equal(result, expected) 
Example 13
Project: typhon   Author: atmtools   File: em.py    License: MIT License 6 votes vote down vote up
def planck(f, T):
    """Calculate black body radiation for given frequency and temperature.

    Parameters:
        f (float or ndarray): Frquencies [Hz].
        T (float or ndarray): Temperature [K].

    Returns:
        float or ndarray: Radiances.

    """
    c = constants.speed_of_light
    h = constants.planck
    k = constants.boltzmann

    return 2 * h * f**3 / (c**2 * (np.exp(np.divide(h * f, (k * T))) - 1)) 
Example 14
Project: typhon   Author: atmtools   File: em.py    License: MIT License 6 votes vote down vote up
def planck_wavelength(l, T):
    """Calculate black body radiation for given wavelength and temperature.

    Parameters:
        l (float or ndarray): Wavelength [m].
        T (float or ndarray): Temperature [K].

    Returns:
        float or ndarray: Radiances.

    """
    c = constants.speed_of_light
    h = constants.planck
    k = constants.boltzmann

    return 2 * h * c**2 / (l**5 * (np.exp(np.divide(h * c, (l * k * T))) - 1)) 
Example 15
Project: typhon   Author: atmtools   File: em.py    License: MIT License 6 votes vote down vote up
def planck_wavenumber(n, T):
    """Calculate black body radiation for given wavenumber and temperature.

    Parameters:
        n (float or ndarray): Wavenumber.
        T (float or ndarray): Temperature [K].

    Returns:
        float or ndarray: Radiances.

    """
    c = constants.speed_of_light
    h = constants.planck
    k = constants.boltzmann

    return 2 * h * c**2 * n**3 / (np.exp(np.divide(h * c * n, (k * T))) - 1) 
Example 16
Project: typhon   Author: atmtools   File: em.py    License: MIT License 6 votes vote down vote up
def rayleighjeans_wavelength(l, T):
    """Calculates the Rayleigh-Jeans approximation of the Planck function.

     Calculates the approximation of the Planck function for given
     wavelength and temperature.

     Parameters:
        l (float or ndarray): Wavelength [m].
        T (float or ndarray): Temperature [K].

     Returns:
        float or ndarray: Radiance [W/(m2*Hz*sr)].

    """
    c = constants.speed_of_light
    k = constants.boltzmann

    return np.divide(2 * c * k * T, l**4) 
Example 17
Project: Financial-NLP   Author: Coldog2333   File: NLP.py    License: Apache License 2.0 5 votes vote down vote up
def unitvec(vector, ax=1):
    v=vector*vector
    if len(vector.shape)==1:
        sqrtv=np.sqrt(np.sum(v))
    elif len(vector.shape)==2:
        sqrtv=np.sqrt([np.sum(v, axis=ax)])
    else:
        raise Exception('It\'s too large.')
    if ax==1:
        result=np.divide(vector,sqrtv.T)
    elif ax==0:
        result=np.divide(vector,sqrtv)
    return result 
Example 18
Project: DOTA_models   Author: ringringyi   File: object_detection_evaluation_test.py    License: Apache License 2.0 5 votes vote down vote up
def test_evaluate(self):
    (average_precision_per_class, mean_ap, precisions_per_class,
     recalls_per_class, corloc_per_class,
     mean_corloc) = self.od_eval.evaluate()
    expected_precisions_per_class = [np.array([0, 0.5], dtype=float),
                                     np.array([], dtype=float),
                                     np.array([0], dtype=float)]
    expected_recalls_per_class = [
        np.array([0, 1. / 3.], dtype=float), np.array([], dtype=float),
        np.array([0], dtype=float)
    ]
    expected_average_precision_per_class = np.array([1. / 6., 0, 0],
                                                    dtype=float)
    expected_corloc_per_class = np.array([0, np.divide(0, 0), 0], dtype=float)
    expected_mean_ap = 1. / 18
    expected_mean_corloc = 0.0
    for i in range(self.od_eval.num_class):
      self.assertTrue(np.allclose(expected_precisions_per_class[i],
                                  precisions_per_class[i]))
      self.assertTrue(np.allclose(expected_recalls_per_class[i],
                                  recalls_per_class[i]))
    self.assertTrue(np.allclose(expected_average_precision_per_class,
                                average_precision_per_class))
    self.assertTrue(np.allclose(expected_corloc_per_class, corloc_per_class))
    self.assertAlmostEqual(expected_mean_ap, mean_ap)
    self.assertAlmostEqual(expected_mean_corloc, mean_corloc) 
Example 19
Project: object_detector_app   Author: datitran   File: object_detection_evaluation_test.py    License: MIT License 5 votes vote down vote up
def test_evaluate(self):
    (average_precision_per_class, mean_ap, precisions_per_class,
     recalls_per_class, corloc_per_class,
     mean_corloc) = self.od_eval.evaluate()
    expected_precisions_per_class = [np.array([0, 0.5], dtype=float),
                                     np.array([], dtype=float),
                                     np.array([0], dtype=float)]
    expected_recalls_per_class = [
        np.array([0, 1. / 3.], dtype=float), np.array([], dtype=float),
        np.array([0], dtype=float)
    ]
    expected_average_precision_per_class = np.array([1. / 6., 0, 0],
                                                    dtype=float)
    expected_corloc_per_class = np.array([0, np.divide(0, 0), 0], dtype=float)
    expected_mean_ap = 1. / 18
    expected_mean_corloc = 0.0
    for i in range(self.od_eval.num_class):
      self.assertTrue(np.allclose(expected_precisions_per_class[i],
                                  precisions_per_class[i]))
      self.assertTrue(np.allclose(expected_recalls_per_class[i],
                                  recalls_per_class[i]))
    self.assertTrue(np.allclose(expected_average_precision_per_class,
                                average_precision_per_class))
    self.assertTrue(np.allclose(expected_corloc_per_class, corloc_per_class))
    self.assertAlmostEqual(expected_mean_ap, mean_ap)
    self.assertAlmostEqual(expected_mean_corloc, mean_corloc) 
Example 20
Project: argus-freesound   Author: lRomul   File: tiles.py    License: MIT License 5 votes vote down vote up
def compute_pyramid_patch_weight_loss(width, height) -> np.ndarray:
    """Compute a weight matrix that assigns bigger weight on pixels in center and
    less weight to pixels on image boundary.
    This weight matrix then used for merging individual tile predictions and helps dealing
    with prediction artifacts on tile boundaries.

    :param width: Tile width
    :param height: Tile height
    :return: Since-channel image [Width x Height]
    """
    xc = width * 0.5
    yc = height * 0.5
    xl = 0
    xr = width
    yb = 0
    yt = height
    Dc = np.zeros((width, height))
    De = np.zeros((width, height))

    for i in range(width):
        for j in range(height):
            Dc[i, j] = np.sqrt(np.square(i - xc + 0.5) + np.square(j - yc + 0.5))
            De_l = np.sqrt(np.square(i - xl + 0.5) + np.square(j - j + 0.5))
            De_r = np.sqrt(np.square(i - xr + 0.5) + np.square(j - j + 0.5))
            De_b = np.sqrt(np.square(i - i + 0.5) + np.square(j - yb + 0.5))
            De_t = np.sqrt(np.square(i - i + 0.5) + np.square(j - yt + 0.5))
            De[i, j] = np.min([De_l, De_r, De_b, De_t])

    alpha = (width * height) / np.sum(np.divide(De, np.add(Dc, De)))
    W = alpha * np.divide(De, np.add(Dc, De))
    return W, Dc, De 
Example 21
Project: tensortrade   Author: tensortrade-org   File: node.py    License: Apache License 2.0 5 votes vote down vote up
def __truediv__(self, other):
        if np.isscalar(other):
            other = Constant(other, "Constant({})".format(other))
            name = "Divide({},{})".format(self.name, other.name)
            return BinOp(np.divide, name)(self, other)
        assert isinstance(other, Node)
        name = "Divide({},{})".format(self.name, other.name)
        return BinOp(np.divide, name)(self, other) 
Example 22
Project: tensortrade   Author: tensortrade-org   File: node.py    License: Apache License 2.0 5 votes vote down vote up
def __rtruediv__(self, other):
        if not np.isscalar(other):
            raise Exception("Invalid node operation.")
        other = Constant(other, "Constant({})".format(other))
        name = "Divide({},{})".format(other.name, self.name)
        return BinOp(np.divide, name)(other, self) 
Example 23
Project: ciftify   Author: edickie   File: ciftify_falff.py    License: MIT License 5 votes vote down vote up
def calculate_falff(timeseries, min_low_freq, max_low_freq, min_total_freq, max_total_freq, calc_alff):
    ''' this will calculate falff from a timeseries'''

    n = len(timeseries)
    time = (np.arange(n))*2

    # Takes fast Fourier transform of timeseries
    fft_timeseries = fft(timeseries)
    # Calculates frequency scale
    freq_scale = np.fft.fftfreq(n, 1/1)

    # Calculates power of fft
    mag = (abs(fft_timeseries))**0.5

    # Finds low frequency range (0.01-0.08) and total frequency range (0.0-0.25)
    low_ind = np.where((float(min_low_freq) <= freq_scale) & (freq_scale <= float(max_low_freq)))
    total_ind = np.where((float(min_total_freq) <= freq_scale) & (freq_scale <= float(max_total_freq)))

    # Indexes power to low frequency index, total frequency range
    low_power = mag[low_ind]
    total_power = mag[total_ind]
    # Calculates sum of lower power and total power
    low_pow_sum = np.sum(low_power)
    total_pow_sum = np.sum(total_power)

    # Calculates alff as the sum of amplitudes within the low frequency range
    if calc_alff:
        calc = low_pow_sum
    # Calculates falff as the sum of power in low frequnecy range divided by sum of power in the total frequency range
    else:
        calc = np.divide(low_pow_sum, total_pow_sum)

    return calc 
Example 24
Project: NeuroKit   Author: neuropsychology   File: utils.py    License: MIT License 5 votes vote down vote up
def _phi_divide(phi):
    if phi[0] == 0:
        return -np.inf
    division = np.divide(phi[1], phi[0])
    if division == 0:
        return np.inf
    return -np.log(division)


# =============================================================================
# Get Embedded
# ============================================================================= 
Example 25
Project: recordlinkage   Author: J535D165   File: string.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def cosine_similarity(s1, s2, include_wb=True, ngram=(2, 2)):

    if len(s1) != len(s2):
        raise ValueError('Arrays or Series have to be same length.')

    if len(s1) == len(s2) == 0:
        return []

    # include word boundaries or not
    analyzer = 'char_wb' if include_wb is True else 'char'

    # The vectorizer
    vectorizer = CountVectorizer(
        analyzer=analyzer, strip_accents='unicode', ngram_range=ngram)

    data = s1.append(s2).fillna('')

    vec_fit = vectorizer.fit_transform(data)

    def _metric_sparse_cosine(u, v):

        a = np.sqrt(u.multiply(u).sum(axis=1))
        b = np.sqrt(v.multiply(v).sum(axis=1))

        ab = v.multiply(u).sum(axis=1)

        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            m = np.divide(ab, np.multiply(a, b)).A1

        return m

    return _metric_sparse_cosine(vec_fit[:len(s1)], vec_fit[len(s1):]) 
Example 26
Project: speech_separation   Author: bill9800   File: utils.py    License: MIT License 5 votes vote down vote up
def cRM_tanh_recover(O,K=10,C=0.1):
    '''

    :param O: predicted compressed crm
    :param K: parameter to control the compression
    :param C: parameter to control the compression
    :return M : uncompressed crm
    '''

    numerator = K-O
    denominator = K+O
    M = -np.multiply((1.0/C),np.log(np.divide(numerator,denominator)))

    return M 
Example 27
Project: speech_separation   Author: bill9800   File: utils.py    License: MIT License 5 votes vote down vote up
def SNR(true_file,pred_file):
    T_true,_ = librosa.load(true_file,sr=16000)
    F_true = fast_stft(T_true)
    T_pred, _ = librosa.load(pred_file,sr=16000)
    F_pred = fast_stft(T_pred)
    F_inter = F_true - F_pred
    P_true = np.sum(np.square(F_true[:,:,0])+np.square(F_true[:,:,1]))
    P_inter = np.sum(np.square(F_inter[:,:,0])+np.square(F_inter[:,:,1]))
    return 10*np.log10(np.divide(P_true,P_inter)) 
Example 28
Project: connecting_the_dots   Author: autonomousvision   File: metric.py    License: MIT License 5 votes vote down vote up
def get(self):
    tps = np.array(self.tps).astype(np.float32)
    fps = np.array(self.fps).astype(np.float32)
    fns = np.array(self.fns).astype(np.float32)
    tns = np.array(self.tns).astype(np.float32)
    wp = self.thresholds

    ret = {}

    precisions = np.divide(tps, tps + fps, out=np.zeros_like(tps), where=tps + fps != 0)
    recalls = np.divide(tps, tps + fns, out=np.zeros_like(tps), where=tps + fns != 0) # tprs
    fprs = np.divide(fps, fps + tns, out=np.zeros_like(tps), where=fps + tns != 0)

    precisions = np.r_[0, precisions, 1]
    recalls = np.r_[1, recalls, 0]
    fprs = np.r_[1, fprs, 0]

    ret['auc'] = float(-np.trapz(recalls, fprs))
    ret['prauc'] = float(-np.trapz(precisions, recalls))
    ret['ap'] = float(-(np.diff(recalls) * precisions[:-1]).sum())

    accuracies = np.divide(tps + tns, tps + tns + fps + fns)
    aacc = np.mean(accuracies)
    for t in np.linspace(0,1,num=11)[1:-1]:
      idx = np.argmin(np.abs(t - wp))
      ret[f'acc{wp[idx]:.2f}'] = float(accuracies[idx])

    return ret 
Example 29
Project: connecting_the_dots   Author: autonomousvision   File: geometry.py    License: MIT License 5 votes vote down vote up
def rotm_from_axisangle(a):
  # exponential
  a = a.reshape(-1,3)
  phi = np.linalg.norm(a, axis=1).reshape(-1,1,1)
  iphi = np.zeros_like(phi)
  np.divide(1, phi, out=iphi, where=phi != 0)
  A = cross_prod_mat(a) * iphi
  R = np.eye(3, dtype=a.dtype) + np.sin(phi) * A + (1 - np.cos(phi)) * A @ A
  return R.squeeze() 
Example 30
Project: connecting_the_dots   Author: autonomousvision   File: geometry.py    License: MIT License 5 votes vote down vote up
def axisangle_from_quat(q):
  q = q.reshape(-1,4)
  phi = 2 * np.arccos(q[:,0])
  denom = np.zeros_like(q[:,0])
  np.divide(1, np.sqrt(1 - q[:,0]**2), out=denom, where=q[:,0] != 1)
  axis = q[:,1:] * denom.reshape(-1,1)
  denom = np.linalg.norm(axis, axis=1).reshape(-1,1)
  a = np.zeros_like(axis)
  np.divide(phi.reshape(-1,1) * axis, denom, out=a, where=denom != 0)
  aa = a.astype(q.dtype)
  return aa.squeeze()