Python numpy.abs() Examples

The following are code examples for showing how to use numpy.abs(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: b2ac   Author: hbldh   File: fixed_point.py    MIT License 7 votes vote down vote up
def scale_64bit_matrix(v):
    m, M = np.abs(v).min(), np.abs(v).max()
    if np.log2(M) <= 24:
        return v, 0
    elif np.log2(M) <= 32:
        scale = 8
        return v >> scale, scale
    elif np.log2(M) <= 40:
        scale = 16
        return v >> scale, scale
    elif np.log2(M) <= 48:
        scale = 24
        return v >> scale, scale
    elif np.log2(M) <= 56:
        scale = 32
        return v >> scale, scale
    else:
        scale = 40
        return v >> scale, scale 
Example 2
Project: b2ac   Author: hbldh   File: fixed_point.py    MIT License 6 votes vote down vote up
def scale_T_matrix(T_no_det, det_S3):
    m, M = np.abs(T_no_det).min(), np.abs(T_no_det).max()
    if np.log2(M) <= 32:
        scale = 0
    elif np.log2(M) <= 40:
        scale = 8
    elif np.log2(M) <= 48:
        scale = 16
    elif np.log2(M) <= 56:
        scale = 24
    else:
        scale = 32

    det_S3 >>= scale
    T_no_det >>= scale

    return T_no_det, det_S3 
Example 3
Project: autodmri   Author: samuelstjean   File: gamma.py    MIT License 6 votes vote down vote up
def inv_digamma(y, eps=1e-8, max_iter=100):
    '''Numerical inverse to the digamma function by root finding'''

    if y >= -2.22:
        xold = np.exp(y) + 0.5
    else:
        xold = -1 / (y - digamma(1))

    for _ in range(max_iter):

        xnew = xold - (digamma(xold) - y) / polygamma(1, xold)

        if np.abs(xold - xnew) < eps:
            break

        xold = xnew

    return xnew 
Example 4
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: test.py    MIT License 6 votes vote down vote up
def _project_im_rois(im_rois, scales):
    """Project image RoIs into the image pyramid built by _get_image_blob.
    Arguments:
        im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
        scales (list): scale factors as returned by _get_image_blob
    Returns:
        rois (ndarray): R x 4 matrix of projected RoI coordinates
        levels (list): image pyramid levels used by each projected RoI
    """
    im_rois = im_rois.astype(np.float, copy=False)

    if len(scales) > 1:
        widths = im_rois[:, 2] - im_rois[:, 0] + 1
        heights = im_rois[:, 3] - im_rois[:, 1] + 1
        areas = widths * heights
        scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
        diff_areas = np.abs(scaled_areas - 224 * 224)
        levels = diff_areas.argmin(axis=1)[:, np.newaxis]
    else:
        levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)

    rois = im_rois * scales[levels]

    return rois, levels 
Example 5
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: test_train.py    MIT License 6 votes vote down vote up
def _project_im_rois(im_rois, scales):
    """Project image RoIs into the image pyramid built by _get_image_blob.
    Arguments:
        im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
        scales (list): scale factors as returned by _get_image_blob
    Returns:
        rois (ndarray): R x 4 matrix of projected RoI coordinates
        levels (list): image pyramid levels used by each projected RoI
    """
    im_rois = im_rois.astype(np.float, copy=False)

    if len(scales) > 1:
        widths = im_rois[:, 2] - im_rois[:, 0] + 1
        heights = im_rois[:, 3] - im_rois[:, 1] + 1
        areas = widths * heights
        scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
        diff_areas = np.abs(scaled_areas - 224 * 224)
        levels = diff_areas.argmin(axis=1)[:, np.newaxis]
    else:
        levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)

    rois = im_rois * scales[levels]

    return rois, levels 
Example 6
Project: kipet   Author: salvadorgarciamunoz   File: data_tools.py    GNU General Public License v3.0 6 votes vote down vote up
def rank(A, eps=1e-10):
    """ obtains the rank of a matrix based on SVD
    
        Args:
            eps (optional, float): the value of the singular values that corresponds to 0 
                            when smaller than eps. Default = 1e-10

        Returns:
            rank (int): The rank of the matrix

    """
    print(type(A))  
    if isinstance(A, np.matrix):
        u, s, vh = np.linalg.svd(A)
        return len([x for x in s if abs(x) > eps]) 
    elif isinstance(A, pd.core.frame.DataFrame): 
        A = np.array(A)
        U, s, V = np.linalg.svd(A, full_matrices=True)
        return len([x for x in s if abs(x) > eps]) 
    else:
        raise RuntimeError("Must provide A as either numpy matrix or pandas dataframe") 
Example 7
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def draw(self, ax, color, line_width=1, fillcolor=None, name=None, arrow=True, alpha=0.2, scale=50):
        ax.add_patch(PolygonPatch(self.contour, alpha=alpha, fc=fillcolor, ec=color, linewidth=line_width))

        vertices = np.array(self.contour.exterior.coords)[1:]

        if arrow:
            arrow_center = np.mean(vertices, axis=0)
            arrow_direction = (vertices[2] - vertices[1]) / 1.5
            arrow_tail = arrow_center - arrow_direction / 2
            arrow_head = arrow_center + arrow_direction / 2
            style = plt_patches.ArrowStyle.Simple(head_length=.4, head_width=.6, tail_width=.1)
            x = np.array(ax.axis())
            scale_factor = np.sqrt(np.prod(np.abs(x[::2] - x[1::2])) / (60 * 60))
            arrow_patch = plt_patches.FancyArrowPatch(posA=arrow_tail, posB=arrow_head, arrowstyle=style,
                                                      color='w', mutation_scale= scale / scale_factor, alpha=0.4)
            ax.add_patch(arrow_patch)
        elif name is None:
            name = 'front'

        if name is not None:
            text_location = np.mean(vertices[[0, -1]], axis=0)
            ax.text(text_location[0], text_location[1], name, ha='center', va='top', color='w') 
Example 8
Project: Griffin_lim   Author: candlewill   File: LWS.py    MIT License 6 votes vote down vote up
def main():
    data_foler = "data"
    wavs = [os.path.join(data_foler, file[:-4]) for file in os.listdir(data_foler) if file.endswith(".wav")]
    outputs_lws = [file + ".lws.gen.wav" for file in wavs]
    wavs = [audio.load_wav(wav_path + ".wav", hparams.sample_rate) for wav_path in wavs]

    lws_processor = lws.lws(512, 128, mode="speech")  # 512: window length; 128: window shift
    i = 0
    for x in wavs:
        X = lws_processor.stft(x)  # where x is a single-channel waveform
        X0 = np.abs(X)  # Magnitude spectrogram
        print('{:6}: {:5.2f} dB'.format('Abs(X)', lws_processor.get_consistency(X0)))
        X1 = lws_processor.run_lws(
            X0)  # reconstruction from magnitude (in general, one can reconstruct from an initial complex spectrogram)
        print(X1.shape)
        print('{:6}: {:5.2f} dB'.format('LWS', lws_processor.get_consistency(X1)))
        print(X1.shape)
        wav = lws_processor.istft(X1).astype(np.float32)

        audio.save_wav(wav, outputs_lws[i])
        i += 1 
Example 9
Project: models   Author: kipoi   File: dataloader.py    MIT License 6 votes vote down vote up
def _extract(self, intervals, out, **kwargs):

        def find_closest(ldm, interval, use_strand=True):
            """Uses
            """
            # subset the positions to the appropriate strand
            # and extract the positions
            ldm_positions = ldm.loc[interval.chrom]
            if use_strand and interval.strand != ".":
                ldm_positions = ldm_positions.loc[interval.strand]
            ldm_positions = ldm_positions.position.values

            int_midpoint = (interval.end + interval.start) // 2
            dist = (ldm_positions - 1) - int_midpoint  # -1 for 0, 1 indexed positions
            if use_strand and interval.strand == "-":
                dist = - dist

            return dist[np.argmin(np.abs(dist))]

        out[:] = np.array([[find_closest(self.landmarks[ldm_name], interval, self.use_strand)
                            for ldm_name in self.columns]
                           for interval in intervals], dtype=float)

        return out 
Example 10
Project: Deep_VoiceChanger   Author: pstuvwx   File: gla_gpu.py    MIT License 6 votes vote down vote up
def __init__(self, parallel, wave_len=254, wave_dif=64, buffer_size=5, loop_num=5, window=np.hanning(254)):
        self.wave_len = wave_len
        self.wave_dif = wave_dif
        self.buffer_size = buffer_size
        self.loop_num = loop_num
        self.parallel = parallel
        self.window = cp.array([window for _ in range(parallel)])

        self.wave_buf = cp.zeros((parallel, wave_len+wave_dif), dtype=float)
        self.overwrap_buf = cp.zeros((parallel, wave_dif*buffer_size+(wave_len-wave_dif)), dtype=float)
        self.spectrum_buffer = cp.ones((parallel, self.buffer_size, self.wave_len), dtype=complex)
        self.absolute_buffer = cp.ones((parallel, self.buffer_size, self.wave_len), dtype=complex)
        
        self.phase = cp.zeros((parallel, self.wave_len), dtype=complex)
        self.phase += cp.random.random((parallel, self.wave_len))-0.5 + cp.random.random((parallel, self.wave_len))*1j - 0.5j
        self.phase[self.phase == 0] = 1
        self.phase /= cp.abs(self.phase) 
Example 11
Project: Deep_VoiceChanger   Author: pstuvwx   File: gla_util.py    MIT License 6 votes vote down vote up
def __init__(self, wave_len=254, wave_dif=64, buffer_size=5, loop_num=5, window=np.hanning(254)):
        self.wave_len = wave_len
        self.wave_dif = wave_dif
        self.buffer_size = buffer_size
        self.loop_num = loop_num
        self.window = window

        self.wave_buf = np.zeros(wave_len+wave_dif, dtype=float)
        self.overwrap_buf = np.zeros(wave_dif*buffer_size+(wave_len-wave_dif), dtype=float)
        self.spectrum_buffer = np.ones((self.buffer_size, self.wave_len), dtype=complex)
        self.absolute_buffer = np.ones((self.buffer_size, self.wave_len), dtype=complex)
        
        self.phase = np.zeros(self.wave_len, dtype=complex)
        self.phase += np.random.random(self.wave_len)-0.5 + np.random.random(self.wave_len)*1j - 0.5j
        self.phase[self.phase == 0] = 1
        self.phase /= np.abs(self.phase) 
Example 12
Project: Deep_VoiceChanger   Author: pstuvwx   File: dataset.py    MIT License 6 votes vote down vote up
def wave2input_image(wave, window, pos=0, pad=0):
    wave_image = np.hstack([wave[pos+i*sride:pos+(i+pad*2)*sride+dif].reshape(height+pad*2, sride) for i in range(256//sride)])[:,:254]
    wave_image *= window
    spectrum_image = np.fft.fft(wave_image, axis=1)
    input_image = np.abs(spectrum_image[:,:128].reshape(1, height+pad*2, 128), dtype=np.float32)

    np.clip(input_image, 1000, None, out=input_image)
    np.log(input_image, out=input_image)
    input_image += bias
    input_image /= scale

    if np.max(input_image) > 0.95:
        print('input image max bigger than 0.95', np.max(input_image))
    if np.min(input_image) < 0.05:
        print('input image min smaller than 0.05', np.min(input_image))

    return input_image 
Example 13
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: metric.py    Apache License 2.0 6 votes vote down vote up
def update(self, labels, preds):
        """Updates the internal evaluation result.

        Parameters
        ----------
        labels : list of `NDArray`
            The labels of the data.

        preds : list of `NDArray`
            Predicted values.
        """
        labels, preds = check_label_shapes(labels, preds, True)

        for label, pred in zip(labels, preds):
            label = label.asnumpy()
            pred = pred.asnumpy()

            if len(label.shape) == 1:
                label = label.reshape(label.shape[0], 1)
            if len(pred.shape) == 1:
                pred = pred.reshape(pred.shape[0], 1)

            self.sum_metric += numpy.abs(label - pred).mean()
            self.num_inst += 1 # numpy.prod(label.shape) 
Example 14
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 6 votes vote down vote up
def test_abs():
    data = mx.symbol.Variable('data')
    shape = (3, 4)
    data_tmp = np.ones(shape)
    data_tmp[:]=5
    arr_data = mx.nd.array(data_tmp)
    arr_grad = mx.nd.empty(shape)
    arr_grad[:]=3

    test = mx.sym.abs(data)
    exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
    exe_test.forward(is_train=True)
    out = exe_test.outputs[0].asnumpy()
    npout = abs(data_tmp)
    assert_almost_equal(out, npout)

    out_grad = mx.nd.empty(shape)
    out_grad[:] = 2;
    npout_grad = out_grad.asnumpy()
    npout_grad = npout_grad * np.sign(data_tmp)
    exe_test.backward(out_grad)
    assert_almost_equal(arr_grad.asnumpy(), npout_grad) 
Example 15
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_quantization.py    Apache License 2.0 6 votes vote down vote up
def test_quantize_float32_to_int8():
    shape = rand_shape_nd(4)
    data = rand_ndarray(shape, 'default', dtype='float32')
    min_range = mx.nd.min(data)
    max_range = mx.nd.max(data)
    qdata, min_val, max_val = mx.nd.contrib.quantize(data, min_range, max_range, out_type='int8')
    data_np = data.asnumpy()
    min_range = min_range.asscalar()
    max_range = max_range.asscalar()
    real_range = np.maximum(np.abs(min_range), np.abs(max_range))
    quantized_range = 127.0
    scale = quantized_range / real_range
    assert qdata.dtype == np.int8
    assert min_val.dtype == np.float32
    assert max_val.dtype == np.float32
    assert same(min_val.asscalar(), -real_range)
    assert same(max_val.asscalar(), real_range)
    qdata_np = (np.sign(data_np) * np.minimum(np.abs(data_np) * scale + 0.5, quantized_range)).astype(np.int8)
    assert_almost_equal(qdata.asnumpy(), qdata_np, atol = 1) 
Example 16
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 5 votes vote down vote up
def db(audio):
    if len(audio.shape) > 1:
        maxx = np.max(np.abs(audio), axis=1)
        return 20 * np.log10(maxx) if np.any(maxx != 0) else np.array([0])
    maxx = np.max(np.abs(audio))
    return 20 * np.log10(maxx) if maxx != 0 else np.array([0]) 
Example 17
Project: b2ac   Author: hbldh   File: matrix_algorithms.py    MIT License 5 votes vote down vote up
def Givens_rotation_double(a, b):
    # Working with actual trigonometric functions
    # angle = np.arctan2(-a, b)
    # c = np.cos(angle)
    # s = np.sin(angle)

    # Using naive definitions
    # root = np.sqrt(a ** 2 + b ** 2)
    # c = a / root
    # s = -b / root

    # Using Matrix Computations solution
    if b == 0:
        c = 1.0
        s = 0.0
    else:
        if np.abs(b) > np.abs(a):
            tau = - a / b
            s = 1 / (np.sqrt(1 + tau ** 2))
            c = s * tau
        else:
            tau = - b / a
            c = 1 / (np.sqrt(1 + tau ** 2))
            s = c * tau

    return c, s 
Example 18
Project: b2ac   Author: hbldh   File: matrix_algorithms.py    MIT License 5 votes vote down vote up
def Givens_rotation_int(a, b):
    # Using naive definitions
    if np.abs(a) > 0 and np.log2(np.abs(a)) > 32:
        print("Too large value a = {0} for squaring and rooting!".format(a))
    if np.abs(b) > 0 and np.log2(np.abs(b)) > 32:
        print("Too large value b = {0} for squaring and rooting!".format(b))
    root = sqrt_int64(a ** 2 + b ** 2)
    return a, -b, root 
Example 19
Project: b2ac   Author: hbldh   File: fixed_point.py    MIT License 5 votes vote down vote up
def scale_64bit_vector(v):
    m, M = np.abs(v).min(), np.abs(v).max()

    scale = 0
    value = M >> 14
    while value != 0:
        value >>= 1
        scale += 1

    return v >> scale, scale 
Example 20
Project: b2ac   Author: hbldh   File: qr_algorithm.py    MIT License 5 votes vote down vote up
def QR_algorithm_shift_Givens_double(A):
    """The QR algorithm with largest value shift for finding eigenvalues.

    Using Givens rotations for finding RQ.

    :param A: The square matrix to find eigenvalues of.
    :type A: :py:class:`numpy.ndarray`
    :return: The eigenvalues.
    :rtype: list

    """

    # First, tridiagonalize the input matrix to a Hessenberg matrix.
    T = ma.convert_to_Hessenberg_Givens_double(A.copy())

    m, n = T.shape
    if n != m:
        raise np.linalg.LinAlgError("Array must be square.")

    convergence_measure = []
    eigenvalues = np.zeros((m, ), dtype='float')
    m -= 1

    while m > 0:
        # Obtain the shift from the lower right corner of the matrix.
        mu_matrix = np.eye(T.shape[0]) * T[m, m]
        # Perform Givens QR step (which returns RQ) on the shifted
        # matrix and then shift it back.
        T = Givens_QR_step_double(T - mu_matrix) + mu_matrix
        # Add convergence information and extract eigenvalue if close enough.
        convergence_measure.append(np.abs(T[m, m - 1]))
        if convergence_measure[-1] < QR_ALGORITHM_TOLERANCE:
            eigenvalues[m] = T[m, m]
            T = T[:m, :m]
            m -= 1

    eigenvalues[0] = T
    return eigenvalues, convergence_measure 
Example 21
Project: b2ac   Author: hbldh   File: qr_algorithm.py    MIT License 5 votes vote down vote up
def QR_algorithm_shift_Givens_int(A):
    """The QR algorithm with largest value shift for finding eigenvalues, in integer precision.

    Using Givens rotations for finding RQ.

    :param A: The square matrix to find eigenvalues of.
    :type A: :py:class:`numpy.ndarray`
    :return: The eigenvalues.
    :rtype: list

    """

    A, scale = fp.scale_64bit_matrix(A.copy())

    # First, tridiagonalize the input matrix to a Hessenberg matrix.
    T = ma.convert_to_Hessenberg_Givens_int(A)

    m, n = T.shape
    if n != m:
        raise np.linalg.LinAlgError("Array must be square.")

    convergence_measure = []
    eigenvalues = np.zeros((m, ), dtype='int64')
    m -= 1

    while m > 0:
        # Obtain the shift from the lower right corner of the matrix.
        mu_matrix = np.eye(T.shape[0], dtype='int64') * T[m, m]
        # Perform Givens QR step (which returns RQ) on the shifted
        # matrix and then shift it back.
        T = Givens_QR_step_int(T - mu_matrix) + mu_matrix
        # Add convergence information and extract eigenvalue if close enough.
        convergence_measure.append(np.abs(T[m, m - 1]))
        if convergence_measure[-1] <= 1:
            eigenvalues[m] = T[m, m]
            T = T[:m, :m]
            m -= 1

    eigenvalues[0] = T
    return eigenvalues << scale, convergence_measure 
Example 22
Project: projection-methods   Author: akshayka   File: problems.py    GNU General Public License v3.0 5 votes vote down vote up
def residual(self, uv):
        """Return tuple of scaled residuals (primal, dual, cone, duality gap)

        primal residual := norm(A.dot(p) + s - b) / (1 + norm(self.b))
        dual residual := norm(A.T.dot(y) + c) / (1 + norm(self.c))
        cone residual := norm(uv - self.product_set.project(uv))
        duality gap := |c.dot(p) + b.dot(y)| / (1 + |c.dot(x) + b.dot(y)|

        Args:
            uv (array-like): the point for which to compute the residual
                         
        Returns:
            tuple (float, float, float): (primal residual,
                                          dual residual,
                                          cone residual,
                                          duality gap)
        """
        p = self.p(uv)
        s = self.s(uv)
        y = self.y(uv)

        # TODO(akshayka): cache the denominators of pr and dr
        pr = np.linalg.norm(self.A.dot(p) + s - self.b, 2) / (
            1 + np.linalg.norm(self.b))
        dr = np.linalg.norm(self.A.T.dot(y) + self.c) / (
            1 + np.linalg.norm(self.c))
        cr = np.linalg.norm(uv - self.product_set.project(uv), 2)
        dg_unscaled = np.abs(self.c.dot(p) + self.b.dot(y))
        dg = dg_unscaled  / (1 + dg_unscaled)

        return (pr, dr, cr, dg)

    # utility functions for extracting the individual components of (u, v);
    # TODO(akshayka): utility functions to scale variables by tau / kappa
    #
    # recall that u := (p, y, tau), and p is called 'x' in the SCS paper. 
Example 23
Project: explirefit   Author: codogogo   File: simple_stats.py    Apache License 2.0 5 votes vote down vote up
def sign_mismatches(predicts, gold):
	count = 0
	sum = 0.0	
	merge = np.sign(predicts) + np.sign(gold)
	for i in range(len(merge)):	
		if merge[i] == 0:
			count += 1
			sum += np.abs(predicts[i])
	return (count, sum) 
Example 24
Project: SOFTX_2019_164   Author: ElsevierSoftwareX   File: spharafilter.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def specification(self, specification):
        if isinstance(specification, (int)):
            if np.abs(specification) > self._triangsamples.vertlist.shape[0]:
                raise ValueError("""The Number of selected basic functions is
                too large.""")
            else:
                if specification == 0:
                    self._specification = \
                        np.ones(self._triangsamples.vertlist.shape[0])
                else:
                    self._specification = \
                        np.zeros(self._triangsamples.vertlist.shape[0])
                    if specification > 0:
                        self._specification[:specification] = 1
                    else:
                        self._specification[specification:] = 1
        elif isinstance(specification, (list, tuple, np.ndarray)):
            specification = np.asarray(specification)
            if specification.shape[0] != self._triangsamples.vertlist.shape[1]:
                raise IndexError("""The length of the specification vector
                does not match the number of spatial sample points. """)
            else:
                self._specification = specification
        else:
            raise TypeError("""The parameter specification has to be
            int or a vecor""") 
Example 25
Project: autodmri   Author: samuelstjean   File: gamma.py    MIT License 5 votes vote down vote up
def maxlk_sigma(m, xold=None, eps=1e-8, max_iter=100):
    '''Maximum likelihood equation to estimate sigma from gamma distributed values'''

    sum_m2 = np.sum(m**2)
    K = m.size
    sum_log_m2 = np.sum(np.log(m**2))

    def f(sigma):
        return digamma(sum_m2/(2*K*sigma**2)) - sum_log_m2/K + np.log(2*sigma**2)

    def fprime(sigma):
        return -sum_m2 * polygamma(1, sum_m2/(2*K*sigma**2)) / (K*sigma**3) + 2/sigma

    if xold is None:
        xold = m.std()

    for _ in range(max_iter):

        xnew = xold - f(xold) / fprime(xold)

        if np.abs(xold - xnew) < eps:
            break

        xold = xnew

    return xnew 
Example 26
Project: aospy   Author: spencerahill   File: model.py    Apache License 2.0 5 votes vote down vote up
def _grid_sfc_area(lon, lat, lon_bounds=None, lat_bounds=None):
    """Calculate surface area of each grid cell in a lon-lat grid."""
    # Compute the bounds if not given.
    if lon_bounds is None:
        lon_bounds = _bounds_from_array(
            lon, internal_names.LON_STR, internal_names.LON_BOUNDS_STR)
    if lat_bounds is None:
        lat_bounds = _bounds_from_array(
            lat, internal_names.LAT_STR, internal_names.LAT_BOUNDS_STR)
    # Compute the surface area.
    dlon = _diff_bounds(utils.vertcoord.to_radians(lon_bounds, is_delta=True),
                        lon)
    sinlat_bounds = np.sin(utils.vertcoord.to_radians(lat_bounds,
                                                      is_delta=True))
    dsinlat = np.abs(_diff_bounds(sinlat_bounds, lat))
    sfc_area = dlon*dsinlat*(RADIUS_EARTH**2)
    # Rename the coordinates such that they match the actual lat / lon.
    try:
        sfc_area = sfc_area.rename(
            {internal_names.LAT_BOUNDS_STR: internal_names.LAT_STR,
             internal_names.LON_BOUNDS_STR: internal_names.LON_STR})
    except ValueError:
        pass
    # Clean up: correct names and dimension order.
    sfc_area = sfc_area.rename(internal_names.SFC_AREA_STR)
    sfc_area[internal_names.LAT_STR] = lat
    sfc_area[internal_names.LON_STR] = lon
    return sfc_area.transpose() 
Example 27
Project: prediction-constrained-topic-models   Author: dtak   File: util_io_training.py    MIT License 5 votes vote down vote up
def is_lap_in_custom_str(
        cur_lap=None,
        laps_to_save_custom='',
        ):
    ''' Determine if current lap is specified by user custom lap list

    Returns
    -------
    is_custom : boolean

    Examples
    --------
    >>> is_lap_in_custom_str(1.2, '1,1.5,3.00')
    False
    >>> is_lap_in_custom_str(1.5, '1,1.5,3.00')
    True
    >>> is_lap_in_custom_str(3, '1,1.5,3.00')
    True
    >>> is_lap_in_custom_str(3.0001, '1,1.5,3.00')
    False
    >>> is_lap_in_custom_str(1.2, '')
    False
    >>> is_lap_in_custom_str(1.2, 'abc')
    Traceback (most recent call last):
     ...
    ValueError: could not convert string to float: abc
    '''
    list_of_laps_with_no_empties = map(
        float, filter(None, str(laps_to_save_custom).split(',')))
    if len(list_of_laps_with_no_empties) > 0:
        # Do in numerically robust way
        is_match_M = np.abs(
            np.asarray(list_of_laps_with_no_empties) - float(cur_lap))
        is_custom = np.min(is_match_M) < 1e-7
    else:
        is_custom = False
    return is_custom 
Example 28
Project: OpenAPS   Author: medicinexlab   File: oldpred.py    MIT License 5 votes vote down vote up
def _find_nearest_index(array, value):
    nearest_index = (np.abs(array-value)).argmin() #finds the index of the time value closest to the input value
    if (int(np.abs(array[nearest_index] - value)) < ACTUAL_BG_RANGE):
        #If inside the ACTUAL_BG_RANGE, then return the nearest index
        return nearest_index
    else:
        return -1


#Given the actual_bg_array, actual_bg_time_array, pred_array, pred_time_array, and num_pred_minutes,
#this function finds the nearest actual bg value to compare to the prediction value.
#If there is one, then it adds all the values to the result arrays, which are returned as a namedtuple.
#Returns the arrays such that the predBG corresponds to the actualBG in NUM_PRED_MINUTES in the future 
Example 29
Project: FRIDA   Author: LCAV   File: doa.py    MIT License 5 votes vote down vote up
def polar_distance(x1, x2):
    """
    Given two arrays of numbers x1 and x2, pairs the cells that are the
    closest and provides the pairing matrix index: x1(index(1,:)) should be as
    close as possible to x2(index(2,:)). The function outputs the average of 
    the absolute value of the differences abs(x1(index(1,:))-x2(index(2,:))).
    :param x1: vector 1
    :param x2: vector 2
    :return: d: minimum distance between d
             index: the permutation matrix
    """
    x1 = np.reshape(x1, (1, -1), order='F')
    x2 = np.reshape(x2, (1, -1), order='F')
    N1 = x1.size
    N2 = x2.size
    diffmat = np.arccos(np.cos(x1 - np.reshape(x2, (-1, 1), order='F')))
    min_N1_N2 = np.min([N1, N2])
    index = np.zeros((min_N1_N2, 2), dtype=int)
    if min_N1_N2 > 1:
        for k in range(min_N1_N2):
            d2 = np.min(diffmat, axis=0)
            index2 = np.argmin(diffmat, axis=0)
            index1 = np.argmin(d2)
            index2 = index2[index1]
            index[k, :] = [index1, index2]
            diffmat[index2, :] = float('inf')
            diffmat[:, index1] = float('inf')
        d = np.mean(np.arccos(np.cos(x1[:, index[:, 0]] - x2[:, index[:, 1]])))
    else:
        d = np.min(diffmat)
        index = np.argmin(diffmat)
        if N1 == 1:
            index = np.array([1, index])
        else:
            index = np.array([index, 1])
    return d, index 
Example 30
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def axis_angle_to_rotation(axis, angle):
    # converts axis angle notation to a rotation matrix. angle is assumed in radians and in [0, 2pi],
    # axis should be normalized.
    # formula from https://en.wikipedia.org/wiki/Rotation_matrix#Conversion_from_and_to_axis-angle
    assert isinstance(axis, np.ndarray) and axis.shape == (3,)
    assert np.abs(np.linalg.norm(axis) - 1.) < 1e-6
    assert 0 <= angle <= np.pi * 2
    rotation_matrix = np.cos(angle) * np.eye(3) + np.sin(angle) * cross_product_matrix(axis) + \
                      (1 - np.cos(angle)) * np.tensordot(axis, axis, axes=0)
    return rotation_matrix 
Example 31
Project: Tacotron   Author: ElwynWang   File: signal_process.py    GNU General Public License v3.0 5 votes vote down vote up
def griffin_lim(spectrogram):
    '''Applies Griffin-Lim's raw.
    '''
    X_best = copy.deepcopy(spectrogram)
    for i in range(Hp.grilimn_iter):
        X_t = invert_spectrogram(X_best)
        est = librosa.stft(X_t, Hp.num_fft, Hp.hop_length, win_length=Hp.win_length)
        phase = est / np.maximum(1e-8, np.abs(est))
        X_best = spectrogram * phase
    X_t = invert_spectrogram(X_best)
    y = np.real(X_t)

    return y 
Example 32
Project: dc_tts   Author: Kyubyong   File: utils.py    Apache License 2.0 5 votes vote down vote up
def griffin_lim(spectrogram):
    '''Applies Griffin-Lim's raw.'''
    X_best = copy.deepcopy(spectrogram)
    for i in range(hp.n_iter):
        X_t = invert_spectrogram(X_best)
        est = librosa.stft(X_t, hp.n_fft, hp.hop_length, win_length=hp.win_length)
        phase = est / np.maximum(1e-8, np.abs(est))
        X_best = spectrogram * phase
    X_t = invert_spectrogram(X_best)
    y = np.real(X_t)

    return y 
Example 33
Project: neural-fingerprinting   Author: StephanZheng   File: attack_model_featadv.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def main(argv):
    # Set TF random seed to improve reproducibility
    tf.set_random_seed(1234)

    input_shape = [FLAGS.batch_size, 224, 224, 3]
    x_src = tf.abs(tf.random_uniform(input_shape, 0., 1.))
    x_guide = tf.abs(tf.random_uniform(input_shape, 0., 1.))
    print("Input shape:")
    print(input_shape)

    model = make_imagenet_cnn(input_shape)
    attack = FastFeatureAdversaries(model)
    attack_params = {'eps': 0.3, 'clip_min': 0., 'clip_max': 1.,
                     'nb_iter': FLAGS.nb_iter, 'eps_iter': 0.01,
                     'layer': FLAGS.layer}
    x_adv = attack.generate(x_src, x_guide, **attack_params)
    h_adv = model.fprop(x_adv)[FLAGS.layer]
    h_src = model.fprop(x_src)[FLAGS.layer]
    h_guide = model.fprop(x_guide)[FLAGS.layer]

    with tf.Session() as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        ha, hs, hg, xa, xs, xg = sess.run(
            [h_adv, h_src, h_guide, x_adv, x_src, x_guide])

        print("L2 distance between source and adversarial example `%s`: %.4f" %
              (FLAGS.layer, ((hs-ha)*(hs-ha)).sum()))
        print("L2 distance between guide and adversarial example `%s`: %.4f" %
              (FLAGS.layer, ((hg-ha)*(hg-ha)).sum()))
        print("L2 distance between source and guide `%s`: %.4f" %
              (FLAGS.layer, ((hg-hs)*(hg-hs)).sum()))
        print("Maximum perturbation: %.4f" % np.abs((xa-xs)).max())
        print("Original features: ")
        print(hs[:10, :10])
        print("Adversarial features: ")
        print(ha[:10, :10]) 
Example 34
Project: Griffin_lim   Author: candlewill   File: audio.py    MIT License 5 votes vote down vote up
def save_wav(wav, path):
    wav *= 32767 / max(0.01, np.max(np.abs(wav)))
    wavfile.write(path, hparams.sample_rate, wav.astype(np.int16)) 
Example 35
Project: Griffin_lim   Author: candlewill   File: audio.py    MIT License 5 votes vote down vote up
def spectrogram(y):
    D = _stft(_preemphasis(y))
    S = _amp_to_db(np.abs(D)) - hparams.ref_level_db
    return _normalize(S) 
Example 36
Project: Griffin_lim   Author: candlewill   File: audio.py    MIT License 5 votes vote down vote up
def melspectrogram(y):
    D = _stft(_preemphasis(y))
    S = _amp_to_db(_linear_to_mel(np.abs(D)))
    return _normalize(S) 
Example 37
Project: Griffin_lim   Author: candlewill   File: audio.py    MIT License 5 votes vote down vote up
def _griffin_lim(S):
    angles = np.exp(2j * np.pi * np.random.rand(*S.shape))
    S_complex = np.abs(S).astype(np.complex)
    for i in range(hparams.griffin_lim_iters):
        if i > 0:
            angles = np.exp(1j * np.angle(_stft(y)))
        y = _istft(S_complex * angles)
    return y 
Example 38
Project: nmp_qc   Author: priba   File: LogMetric.py    MIT License 5 votes vote down vote up
def error_ratio(pred, target):
    if type(pred) is not np.ndarray:
        pred = np.array(pred)
    if type(target) is not np.ndarray:
        target = np.array(target)       
        
    return np.mean(np.divide(np.abs(pred - target), np.abs(target))) 
Example 39
Project: animal-tracking   Author: colinlaney   File: track.py    Creative Commons Zero v1.0 Universal 5 votes vote down vote up
def angle_cos(p0, p1, p2):
    d1, d2 = (p0 - p1).astype('float'), (p2 - p1).astype('float')
    return np.abs(np.dot(d1, d2) / np.sqrt(np.dot(d1, d1) * np.dot(d2, d2))) 
Example 40
Project: models   Author: kipoi   File: dataloader.py    MIT License 5 votes vote down vote up
def sign_log_func(x):
    return np.sign(x) * np.log10(np.abs(x) + 1) 
Example 41
Project: models   Author: kipoi   File: dataloader.py    MIT License 5 votes vote down vote up
def sign_log_func_inverse(x):
    return np.sign(x) * (np.power(10, np.abs(x)) - 1) 
Example 42
Project: Deep_VoiceChanger   Author: pstuvwx   File: gla_gpu.py    MIT License 5 votes vote down vote up
def inverse(self, spectrum, in_phase=None):
        if in_phase is None:
            in_phase = self.phase
        else:
            in_phase = cp.array(in_phase)
        spectrum = cp.array(spectrum)
        self.spectrum_buffer[:, -1] = spectrum * in_phase
        self.absolute_buffer[:, -1] = spectrum

        for _ in range(self.loop_num):
            self.overwrap_buf *= 0
            waves = cp.fft.ifft(self.spectrum_buffer, axis=2).real
            last = self.spectrum_buffer

            for i in range(self.buffer_size):
                self.overwrap_buf[:,i*self.wave_dif:i*self.wave_dif+self.wave_len] += waves[:,i]
            waves = cp.stack([self.overwrap_buf[:, i*self.wave_dif:i*self.wave_dif+self.wave_len]*self.window for i in range(self.buffer_size)], axis=1)

            spectrum = cp.fft.fft(waves, axis=2)
            self.spectrum_buffer = self.absolute_buffer * spectrum / (cp.abs(spectrum)+1e-10)
            self.spectrum_buffer += 0.5 * (self.spectrum_buffer - last)

        dst = cp.asnumpy(self.spectrum_buffer[:, 0])
        self.absolute_buffer = cp.roll(self.absolute_buffer, -1, axis=1)
        self.spectrum_buffer = cp.roll(self.spectrum_buffer, -1, axis=1)

        return dst 
Example 43
Project: Deep_VoiceChanger   Author: pstuvwx   File: gla_util.py    MIT License 5 votes vote down vote up
def inverse(self, spectrum, in_phase=None):
        spectrum = spectrum.astype(complex)
        if in_phase is None:
            in_phase = self.phase
        self.spectrum_buffer[-1] = spectrum * in_phase
        self.absolute_buffer[-1] = spectrum

        for _ in range(self.loop_num):
            self.overwrap_buf *= 0
            waves = np.fft.ifft(self.spectrum_buffer, axis=1).real
            last = self.spectrum_buffer

            for i in range(self.buffer_size):
                self.overwrap_buf[i*self.wave_dif:i*self.wave_dif+self.wave_len] += waves[i]
            waves = np.vstack([self.overwrap_buf[i*self.wave_dif:i*self.wave_dif+self.wave_len]*self.window for i in range(self.buffer_size)])

            spectrum = np.fft.fft(waves, axis=1)
            self.spectrum_buffer = self.absolute_buffer * spectrum / (np.abs(spectrum)+1e-10)
            self.spectrum_buffer += 0.5 * (self.spectrum_buffer - last)

        waves = np.fft.ifft(self.spectrum_buffer[0]).real
        self.absolute_buffer = np.roll(self.absolute_buffer, -1, axis=0)
        self.spectrum_buffer = np.roll(self.spectrum_buffer, -1, axis=0)

        self.wave_buf = np.roll(self.wave_buf, -self.wave_dif)
        self.wave_buf[-self.wave_dif:] = 0
        self.wave_buf[self.wave_dif:] += waves
        return self.wave_buf[:self.wave_dif]*0.5 
Example 44
Project: programsynthesishunting   Author: flexgp   File: math_functions.py    GNU General Public License v3.0 5 votes vote down vote up
def rlog(x):
    """
    Koza's protected log:
    if x == 0:
      return 1
    else:
      return log(abs(x))

    See pdiv above for explanation of this type of code.

    :param x: argument to log, np.array
    :return: np.array of log(x), or 1 where x is 0.
    """
    with np.errstate(divide='ignore'):
        return np.where(x == 0, np.ones_like(x), np.log(np.abs(x))) 
Example 45
Project: programsynthesishunting   Author: flexgp   File: math_functions.py    GNU General Public License v3.0 5 votes vote down vote up
def ppow(x, y):
    """pow(x, y) is undefined in the case where x negative and y
    non-integer. This takes abs(x) to avoid it.

    :param x: np.array, base
    :param y: np.array, exponent
    :return: np.array x**y, but protected

    """
    return np.abs(x)**y 
Example 46
Project: programsynthesishunting   Author: flexgp   File: math_functions.py    GNU General Public License v3.0 5 votes vote down vote up
def ppow2(x, y):
    """pow(x, y) is undefined in the case where x negative and y
    non-integer. This takes abs(x) to avoid it. But it preserves
    sign using sign(x).

    :param x: np.array, base
    :param y: np.array, exponent
    :return: np.array, x**y, but protected
    """
    return np.sign(x) * (np.abs(x) ** y) 
Example 47
Project: programsynthesishunting   Author: flexgp   File: math_functions.py    GNU General Public License v3.0 5 votes vote down vote up
def psqrt(x):
    """
    Protected square root operator

    :param x: np.array, argument to sqrt
    :return: np.array, sqrt(x) but protected.
    """
    return np.sqrt(np.abs(x)) 
Example 48
Project: programsynthesishunting   Author: flexgp   File: math_functions.py    GNU General Public License v3.0 5 votes vote down vote up
def psqrt2(x):
    """
    Protected square root operator that preserves the sign of the original
    argument.

    :param x: np.array, argument to sqrt
    :return: np.array, sqrt(x) but protected, preserving sign.
    """
    return np.sign(x) * (np.sqrt(np.abs(x))) 
Example 49
Project: spleeter   Author: deezer   File: spectrogram.py    MIT License 5 votes vote down vote up
def compute_spectrogram_tf(
        waveform,
        frame_length=2048, frame_step=512,
        spec_exponent=1., window_exponent=1.):
    """ Compute magnitude / power spectrogram from waveform as
    a n_samples x n_channels tensor.

    :param waveform:        Input waveform as (times x number of channels)
                            tensor.
    :param frame_length:    Length of a STFT frame to use.
    :param frame_step:      HOP between successive frames.
    :param spec_exponent:   Exponent of the spectrogram (usually 1 for
                            magnitude spectrogram, or 2 for power spectrogram).
    :param window_exponent: Exponent applied to the Hann windowing function
                            (may be useful for making perfect STFT/iSTFT
                            reconstruction).
    :returns:   Computed magnitude / power spectrogram as a
                (T x F x n_channels) tensor.
    """
    stft_tensor = tf.transpose(
        stft(
            tf.transpose(waveform),
            frame_length,
            frame_step,
            window_fn=lambda f, dtype: hann_window(
                f,
                periodic=True,
                dtype=waveform.dtype) ** window_exponent),
        perm=[1, 2, 0])
    return np.abs(stft_tensor) ** spec_exponent 
Example 50
Project: VSE-C   Author: ExplorerFreda   File: saliency_visualization.py    MIT License 5 votes vote down vote up
def normalize_grad(grad, stat=False):
    grad = np.abs(grad)
    if stat:
        print('Grad min={}, max={}'.format(grad.min(), grad.max()))
    grad -= grad.min()
    grad /= grad.max()
    return grad.astype('float32') 
Example 51
Project: VSE-C   Author: ExplorerFreda   File: saliency_visualization.py    MIT License 5 votes vote down vote up
def print_txt_saliency(txt, ind, content, img_embedding_var, caption_embedding_var, backward=False):
    if backward:
        dis = (caption_embedding_var.squeeze() * img_embedding_var.squeeze()).sum()
        dis.backward(retain_graph=True)

    content = content.split()
    assert txt.grad.size(1) == 2 + len(content), (txt.grad.size(1), len(content))
    grad_txt = txt.grad[ind,1:1+len(content)].data.cpu().squeeze().abs().numpy()
    grad_txt = grad_txt.mean(-1)
    grad_txt /= grad_txt.sum()
    print('Text saliency:', ' '.join(['{}({:.3f})'.format(c, float(g)) for c, g in zip(content, grad_txt)])) 
Example 52
Project: spqrel_tools   Author: LCAS   File: iswaving.py    MIT License 5 votes vote down vote up
def pad_img_to_fit_bbox(img, x1, x2, y1, y2):
    if img.ndim==3:
        img = np.pad(img, ((np.abs(np.minimum(0, y1)), np.maximum(y2 - img.shape[0], 0)),
               (np.abs(np.minimum(0, x1)), np.maximum(x2 - img.shape[1], 0)), (0,0)), mode="constant")
    else:
        img = np.pad(img, ((np.abs(np.minimum(0, y1)), np.maximum(y2 - img.shape[0], 0)),
                  (np.abs(np.minimum(0, x1)), np.maximum(x2 - img.shape[1], 0))), mode="constant")

    y1 += np.abs(np.minimum(0, y1))
    y2 += np.abs(np.minimum(0, y1))
    x1 += np.abs(np.minimum(0, x1))
    x2 += np.abs(np.minimum(0, x1))
    return img, x1, x2, y1, y2 
Example 53
Project: core   Author: lifemapper   File: permutation_testing.py    GNU General Public License v3.0 5 votes vote down vote up
def compare_absolute_values(obs, rand):
    """Compares the absolute value of the observed data and the random data

    Args:
        obs (:obj: `Numpy array`): A numpy array of observed values
        rand (:obj: `Numpy array`): A numpy array of random values
    """
    return np.abs(rand) > np.abs(obs)

# ............................................................................. 
Example 54
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: profiler_ndarray.py    Apache License 2.0 5 votes vote down vote up
def reldiff(a, b):
    diff = np.abs(a - b)
    norm = np.abs(a)
    reldiff = np.max(diff  / (norm + 1e-7))
    return reldiff 
Example 55
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: profiler_ndarray.py    Apache License 2.0 5 votes vote down vote up
def test_ndarray_copy():
    c = mx.nd.array(np.random.uniform(-10, 10, (10, 10)))
    d = c.copyto(mx.Context('cpu', 0))
    assert np.sum(np.abs(c.asnumpy() != d.asnumpy())) == 0.0 
Example 56
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: metric.py    Apache License 2.0 5 votes vote down vote up
def create(metric, *args, **kwargs):
    """Creates evaluation metric from metric names or instances of EvalMetric
    or a custom metric function.

    Parameters
    ----------
    metric : str or callable
        Specifies the metric to create.
        This argument must be one of the below:

        - Name of a metric.
        - An instance of `EvalMetric`.
        - A list, each element of which is a metric or a metric name.
        - An evaluation function that computes custom metric for a given batch of
          labels and predictions.
    *args : list
        Additional arguments to metric constructor.
        Only used when metric is str.
    **kwargs : dict
        Additional arguments to metric constructor.
        Only used when metric is str

    Examples
    --------
    >>> def custom_metric(label, pred):
    ...     return np.mean(np.abs(label - pred))
    ...
    >>> metric1 = mx.metric.create('acc')
    >>> metric2 = mx.metric.create(custom_metric)
    >>> metric3 = mx.metric.create([metric1, metric2, 'rmse'])
    """
    if callable(metric):
        return CustomMetric(metric, *args, **kwargs)
    elif isinstance(metric, list):
        composite_metric = CompositeEvalMetric()
        for child_metric in metric:
            composite_metric.add(create(child_metric, *args, **kwargs))
        return composite_metric

    return _create(metric, *args, **kwargs) 
Example 57
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: metric.py    Apache License 2.0 5 votes vote down vote up
def np(numpy_feval, name=None, allow_extra_outputs=False):
    """Creates a custom evaluation metric that receives its inputs as numpy arrays.

    Parameters
    ----------
    numpy_feval : callable(label, pred)
        Custom evaluation function that receives labels and predictions for a minibatch
        as numpy arrays and returns the corresponding custom metric as a floating point number.
    name : str, optional
        Name of the custom metric.
    allow_extra_outputs : bool, optional
        Whether prediction output is allowed to have extra outputs. This is useful in cases
        like RNN where states are also part of output which can then be fed back to the RNN
        in the next step. By default, extra outputs are not allowed.

    Returns
    -------
    float
        Custom metric corresponding to the provided labels and predictions.

    Example
    -------
    >>> def custom_metric(label, pred):
    ...     return np.mean(np.abs(label-pred))
    ...
    >>> metric = mx.metric.np(custom_metric)
    """
    def feval(label, pred):
        """Internal eval function."""
        return numpy_feval(label, pred)
    feval.__name__ = numpy_feval.__name__
    return CustomMetric(feval, name, allow_extra_outputs)
# pylint: enable=invalid-name 
Example 58
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def check_softmax_with_ignore_label(xpu):
    X = mx.symbol.Variable('X')
    L = mx.symbol.Variable('L')
    Y = mx.symbol.SoftmaxOutput(data=X, label=L, ignore_label=0, use_ignore=True)

    shape = (20, 10)
    x = mx.nd.empty(shape, ctx = xpu)
    l = mx.nd.empty((shape[0],), ctx = xpu)
    x_np = np.random.rand(*shape)
    l_np = np.random.randint(0, shape[1]-1, (shape[0],))
    x[:] = x_np
    l[:] = l_np

    grad = mx.nd.empty(shape, ctx = xpu)

    exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
    exec1.forward(is_train=True)
    exec1.backward()

    grad0 = grad.asnumpy()

    for i in range(int(shape[0]/2)):
        l_np[i] = 0
    l[:] = l_np

    exec1.forward(is_train=True)
    exec1.backward()
    grad1 = grad.asnumpy()

    assert abs(np.sum(grad1[:int(shape[0]/2)])) < 1e-5
    assert_almost_equal(grad0[int(shape[0]/2):], grad1[int(shape[0]/2):]) 
Example 59
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_relu():
    def frelu(x):
        return np.maximum(x, 0.0)
    def frelu_grad(x):
        return 1.0 * (x > 0.0)
    shape = (3, 4)
    x = mx.symbol.Variable("x")
    y = mx.sym.relu(x)
    xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
    eps = 1e-4
    # Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
    # Here we replace small problematic inputs with 1.0.  Repro issue with seed 97264195.
    xa[abs(xa) < eps] = 1.0
    ya = frelu(xa)
    ga = frelu_grad(xa)
    check_numeric_gradient(y, [xa], numeric_eps=eps)
    check_symbolic_forward(y, [xa], [ya])
    check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])


# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness. 
Example 60
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_leaky_relu():
    def fleaky_relu(x, act_type, slope=0.25):
        neg_indices = x < 0
        out = x.copy()
        if act_type == 'elu':
            out[neg_indices] = slope * np.expm1(out[neg_indices])
        elif act_type == 'leaky':
            out[neg_indices] = slope * out[neg_indices]
        return out
    def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
        neg_indices = x < 0
        out = np.ones(x.shape)
        if act_type == 'elu':
            out[neg_indices] = y[neg_indices] + slope
        elif act_type == 'leaky':
            out[neg_indices] = slope
        return out * grad
    for ndim in range(1, 4):
        shape = rand_shape_nd(ndim)
        x = mx.symbol.Variable("x")
        slp = 0.25
        for dtype in [np.float16, np.float32, np.float64]:
            xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
            eps = 1e-4
            rtol = 1e-2
            atol = 1e-3
            xa[abs(xa) < eps] = 1.0
            for act_type in ['elu', 'leaky']:
                y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
                ya = fleaky_relu(xa, slope=slp, act_type=act_type)
                ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
                # Skip numeric check for float16 type to get rid of flaky behavior
                if dtype is not np.float16:
                    check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
                check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
                check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)


# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness. 
Example 61
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_selu():
    alpha = 1.6732632423543772848170429916717
    lamb = 1.0507009873554804934193349852946
    def fselu(x):
        neg_indices = x < 0
        out = x.copy()
        out[neg_indices] = alpha * np.expm1(out[neg_indices])
        return out * lamb
    def fselu_grad(grad, x, y):
        neg_indices = x < 0
        out = np.ones(x.shape).astype(x.dtype)
        out[neg_indices] = y[neg_indices] + alpha
        return out * lamb

    shape = (3, 4)
    x = mx.sym.Variable("x")
    y = mx.sym.LeakyReLU(data=x, act_type="selu")
    for dtype in [np.float16, np.float32, np.float64]:
        xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
        eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
        if dtype is np.float16:
            xa /= 10.0
        xa[abs(xa) < eps] = 0.01
        ya = fselu(xa)
        ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
        check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
        check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
        check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype) 
Example 62
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_softsign():
    def fsoftsign(a):
        return np.divide(a, (1.0 + np.abs(a)))
    def fsoftsign_grad(a):
        return np.divide(1.0, np.square((1.0 + np.abs(a))))
    shape = (3, 4)
    x = mx.symbol.Variable("x")
    y = mx.sym.softsign(x)
    xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
    ya = fsoftsign(xa)
    ya_grad = fsoftsign_grad(xa)
    check_numeric_gradient(y, [xa], numeric_eps=1E-3)
    check_symbolic_forward(y, [xa], [ya])
    check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad]) 
Example 63
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_cbrt_op():
    eps = 2**(-11)
    data_tmp = np.random.rand(3, 4) * 10 - 5
    # Avoid finite difference method inaccuracies due to infinite gradient at the origin.
    # Factor of 4 below set empirically, depends on eps.
    # Issue exposed by seed 553872106.
    # Replace problematic inputs with 1.0.
    data_tmp[abs(data_tmp) < 4*eps] = 1.0
    data = mx.symbol.Variable('data')
    test = mx.sym.cbrt(data)

    check_numeric_gradient(test, [data_tmp], numeric_eps=eps)
    check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)]) 
Example 64
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def test_rcbrt_op():
    eps = 2**(-11)
    data_tmp = np.random.rand(3, 4) * 10 - 5
    # Avoid possible division by 0 errors and finite difference method inaccuracies.
    # Factor of 4 below set empirically, depends on eps.
    # Issue exposed by seed 788174893.
    # Replace problematic inputs with 1.0.
    data_tmp[abs(data_tmp) < 4*eps] = 1.0
    data = mx.symbol.Variable('data')
    test = mx.sym.rcbrt(data)

    check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
    check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)]) 
Example 65
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def _syevd_forw_eigvec_sign(v):
    ind = np.argmax(np.abs(v))
    if v[ind] < 0.:
        v[:] = -v 
Example 66
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def np_smooth_l1(x, sigma):
    issq = 1. / sigma / sigma
    absx = np.abs(x)
    temp = x * sigma
    return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq) 
Example 67
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 5 votes vote down vote up
def np_smooth_l1_grad(x, sigma):
    ssq = sigma * sigma
    return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))

# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data 
Example 68
Project: Black-Box-Audio   Author: rtaori   File: run_audio_attack.py    MIT License 4 votes vote down vote up
def run(self, log=None):
        max_fitness_score = float('-inf') 
        dist = float('inf')
        best_text = ''
        itr = 1
        prev_loss = None
        if log is not None:
            log.write('target phrase: ' + self.target_phrase + '\n')
            log.write('itr, corr, lev dist \n')
        
        while itr <= self.max_iters and best_text != self.target_phrase:
            pop_scores, ctc = self.get_fitness_score(self.pop, self.target_phrase, self.input_audio)
            elite_ind = np.argsort(pop_scores)[-self.elite_size:]
            elite_pop, elite_pop_scores, elite_ctc = self.pop[elite_ind], pop_scores[elite_ind], ctc[elite_ind]
            
            if prev_loss is not None and prev_loss != elite_ctc[-1]: 
                self.mutation_p = self.mu * self.mutation_p + self.alpha / np.abs(prev_loss - elite_ctc[-1]) 

            if itr % 10 == 0:
                print('**************************** ITERATION {} ****************************'.format(itr))
                print('Current loss: {}'.format(-elite_ctc[-1]))
                save_wav(elite_pop[-1], self.output_wave_file)
                best_pop = np.tile(np.expand_dims(elite_pop[-1], axis=0), (100, 1))
                _, best_text = self.get_fitness_score(best_pop, self.target_phrase, self.input_audio, classify=True)
                
                dist = levenshteinDistance(best_text, self.target_phrase)
                corr = "{0:.4f}".format(np.corrcoef([self.input_audio, elite_pop[-1]])[0][1])
                print('Audio similarity to input: {}'.format(corr))
                print('Edit distance to target: {}'.format(dist))
                print('Currently decoded as: {}'.format(best_text))
                if log is not None:
                    log.write(str(itr) + ", " + corr + ", " + str(dist) + "\n")
                    
            if dist > 2:
                next_pop = get_new_pop(elite_pop, elite_pop_scores, self.pop_size)
                self.pop = mutate_pop(next_pop, self.mutation_p, self.noise_stdev, elite_pop)
                prev_loss = elite_ctc[-1]
                
            else:
                perturbed = np.tile(np.expand_dims(elite_pop[-1], axis=0), (self.num_points_estimate, 1))
                indices = np.random.choice(self.pop.shape[1], size=self.num_points_estimate, replace=False)

                perturbed[np.arange(self.num_points_estimate), indices] += self.delta_for_gradient
                perturbed_scores = self.get_fitness_score(perturbed, self.target_phrase, self.input_audio)[0]

                grad = (perturbed_scores - elite_ctc[-1]) / self.delta_for_gradient
                grad /= np.abs(grad).max()
                modified = elite_pop[-1].copy()
                modified[indices] += grad * self.delta_for_perturbation

                self.pop = np.tile(np.expand_dims(modified, axis=0), (self.pop_size, 1))
                self.delta_for_perturbation *= 0.995
                
            itr += 1

        return itr > self.max_iterations 
Example 69
Project: projection-methods   Author: akshayka   File: affine_set.py    GNU General Public License v3.0 4 votes vote down vote up
def query(self, x_0, data_hyperplanes=0, policy='random'):
        """As ConvexSet.query, but returns a Hyperplane

        Args:
            x_0 (array-like): query point
            data_hyperplanes: number of data hyperplanes to include per query
                              call
            policy: policy to use when gathering data hyperplanes; one of
                \{'random', 'largest_residual'\}.
        Returns:
            array-like: the projection of x_0 onto the set
            list of Hyperplane: a hyperplane of the form <a, x> = b
                in which every point x in the affine set must lie
        """
        x_star = self.project(x_0)
        if np.array_equal(x_star, x_0):
            return x_0, []

        hyperplanes = []
        # a.dot(y - x_star) == 0, for all y in affine set
        # <==> a.dot(y) == a.dot(x_star)
        a = x_0 - x_star
        b = a.dot(x_star)
        if abs(b) < 1e-7:
            # If the affine set is in fact a subspace, this
            # will always be triggered.
            b = np.array([0])
        hyperplanes.append(Hyperplane(x=self._x , a=a, b=b))

        if data_hyperplanes > 0:
            if policy == 'random':
                indices = np.random.permutation(np.prod(self.b.shape))
            elif policy == 'largest_residual':
                # sort the indices of the residuals in decreasing order
                r = np.abs(self.A.dot(x_0) - self.b)
                indices = np.flipud(np.argsort(r))
            else:
                raise ValueError('Unknown policy %s' % policy)
            num_chosen = 0
            for idx in indices:
                if num_chosen >= data_hyperplanes:
                    break
                if idx not in self.chosen_rows:
                    self.chosen_rows.add(idx)
                    num_chosen += 1
                    hyperplanes.append(Hyperplane(x=self._x,
                        a=self.A.getrow(idx).T, b=np.array(self.b[idx])))
        self._info.extend(hyperplanes)
        return x_star, hyperplanes 
Example 70
Project: projection-methods   Author: akshayka   File: test_nonneg.py    GNU General Public License v3.0 4 votes vote down vote up
def test_projection(self):
        """Test projection, query methods of the NonNeg oracle."""
        x = cvxpy.Variable(1000)
        nonneg = NonNeg(x)
        constr = nonneg._constr

        # Test idempotency
        x_0 = np.abs(np.random.randn(1000))
        x_star = nonneg.project(x_0)
        self.assertTrue(nonneg.contains(x_star))
        self.assertTrue(np.array_equal(x_0, x_star), "projection not "
            "idemptotent")
        p = cvxpy.Problem(cvxpy.Minimize(cvxpy.pnorm(x - x_0, 2)), constr)
        p.solve()
        self.assertTrue(np.isclose(np.array(x.value).flatten(), x_star,
            atol=1e-3).all())
        utils.query_helper(self, x_0, x_star, nonneg, idempotent=True)
        

        # Test the case in which x_0 <= 0
        x_0 = -1 * np.abs(np.random.randn(1000))
        x_star = nonneg.project(x_0)
        self.assertTrue(np.array_equal(x_star, np.zeros(1000)),
            "projection not zero")
        p = cvxpy.Problem(cvxpy.Minimize(cvxpy.pnorm(x - x_0, 2)), constr)
        p.solve()
        self.assertTrue(np.isclose(np.array(x.value).flatten(), x_star,
            atol=1e-3).all())
        utils.query_helper(self, x_0, x_star, nonneg, idempotent=False)

        # Test the case in which x_0 == 0 (idempotent as well)
        x_0 = np.zeros(1000)
        x_star = nonneg.project(x_0)
        self.assertTrue(np.array_equal(x_star, np.zeros(1000)),
            "projection not zero")
        p = cvxpy.Problem(cvxpy.Minimize(cvxpy.pnorm(x - x_0, 2)), constr)
        p.solve()
        self.assertTrue(np.isclose(np.array(x.value).flatten(), x_star,
            atol=1e-3).all())
        utils.query_helper(self, x_0, x_star, nonneg, idempotent=True)

        # Test random projection
        x_0 = np.random.randn(1000)
        x_star = nonneg.project(x_0)
        p = cvxpy.Problem(cvxpy.Minimize(cvxpy.pnorm(x - x_0, 2)), constr)
        p.solve()
        self.assertTrue(np.isclose(np.array(x.value).flatten(), x_star,
            atol=1e-3).all()) 
Example 71
Project: projection-methods   Author: akshayka   File: test_zeros.py    GNU General Public License v3.0 4 votes vote down vote up
def test_projection(self):
        """Test projection, query methods of the Zeros oracle."""
        x = cvxpy.Variable(1000)
        zeros = Zeros(x)
        constr = zeros._constr

        # Test idempotency
        x_0 = np.zeros(1000)
        x_star = zeros.project(x_0)
        self.assertTrue(zeros.contains(x_star))
        self.assertTrue(np.array_equal(x_0, x_star), "projection not "
            "idemptotent")
        p = cvxpy.Problem(cvxpy.Minimize(cvxpy.pnorm(x - x_0, 2)), constr)
        p.solve()
        self.assertTrue(np.isclose(np.array(x.value).flatten(), x_star,
            atol=1e-3).all())
        utils.query_helper(self, x_0, x_star, zeros, idempotent=True)
        

        # Test the case in which x_0 >= 0
        x_0 = np.abs(np.random.randn(1000))
        x_star = zeros.project(x_0)
        self.assertTrue(np.array_equal(x_star, np.zeros(1000)),
            "projection not  zero")
        p = cvxpy.Problem(cvxpy.Minimize(cvxpy.pnorm(x - x_0, 2)), constr)
        p.solve()
        self.assertTrue(np.isclose(np.array(x.value).flatten(), x_star,
            atol=1e-3).all())
        utils.query_helper(self, x_0, x_star, zeros, idempotent=False)

        # Test the case in which x_0 <= 0
        x_0 = -1 * np.abs(np.random.randn(1000))
        x_star = zeros.project(x_0)
        self.assertTrue(np.array_equal(x_star, np.zeros(1000)),
            "projection not  zero")
        p = cvxpy.Problem(cvxpy.Minimize(cvxpy.pnorm(x - x_0, 2)), constr)
        p.solve()
        self.assertTrue(np.isclose(np.array(x.value).flatten(), x_star,
            atol=1e-3).all())
        utils.query_helper(self, x_0, x_star, zeros, idempotent=False)

        # Test random projection
        x_0 = np.random.randn(1000)
        x_star = zeros.project(x_0)
        self.assertTrue(np.array_equal(x_star, np.zeros(1000)),
            "projection not  zero")
        p = cvxpy.Problem(cvxpy.Minimize(cvxpy.pnorm(x - x_0, 2)), constr)
        p.solve()
        self.assertTrue(np.isclose(np.array(x.value).flatten(), x_star,
            atol=1e-3).all()) 
Example 72
Project: cs207-FinalProject   Author: PYNE-AD   File: elemFunctions.py    MIT License 4 votes vote down vote up
def arctanh(x):
	''' Compute the hyperbolic arc tangent of an AutoDiff object and its derivative.
	
	INPUTS
	======
	x: an AutoDiff object
	
	RETURNS
	=======
	A new AutoDiff object with calculated value and derivative.
	
	EXAMPLES
	========
	>>> x = AutoDiff(0.5, 2)
	>>> myAutoDiff = arctanh(x)
	>>> myAutoDiff.val
	0.5493061443340548
	>>> myAutoDiff.der
	2/(1-(0.5)**2)
	>>> myAutoDiff.jacobian
	1/(1-(0.5)**2)
	
	'''
	try:
		new_val = np.arctanh(x.val)
		new_der = ((1)/(1-x.val**2))*x.der
		new_jacobian = ((1)/(1-x.val**2))*x.jacobian
		return AutoDiff(new_val, new_der, x.n, 0, new_jacobian)
	except AttributeError:
		try:
			if(np.abs(x.Real)==1):
				real = np.inf
				dual = np.inf
				warnings.warn('Undefined at value', RuntimeWarning)
			else:
				real = np.arctanh(x.Real)
				dual = x.Dual/(1-x.Real**2)
			return Dual(real, dual)	
		except AttributeError:
			try:
				return Dual(arctanh(x.Real), x.Dual/(1-x.Real**2))
			except AttributeError:
			# Constant
				return_val = np.arctanh(x)
				return return_val


#--------------------------EXPONENT FAMILY----------------------------#
# Exponential 
Example 73
Project: Tacotron   Author: ElwynWang   File: signal_process.py    GNU General Public License v3.0 4 votes vote down vote up
def get_spectrograms(fpath):
    '''Returns normalized log(melspectrogram) and log(magnitude) from `sound_file`.
    Args:
      sound_file: A string. The full path of a sound file.

    Returns:
      mel: A 2d array of shape (T, n_mels) <- Transposed
      mag: A 2d array of shape (T, 1+n_fft/2) <- Transposed
    '''
    # Loading sound file
    y, sr = librosa.load(fpath, sr=Hp.sample_rate)

    # Trimming
    y, _ = librosa.effects.trim(y)

    # Preemphasis
    y = np.append(y[0], y[1:] - Hp.preemphasis * y[:-1])

    # stft
    linear = librosa.stft(y=y,
                          n_fft=Hp.num_fft,
                          hop_length=Hp.hop_length,
                          win_length=Hp.win_length)

    # magnitude spectrogram
    mag = np.abs(linear)  # (1+n_fft//2, T)

    # mel spectrogram
    mel_basis = librosa.filters.mel(Hp.sample_rate, Hp.num_fft, Hp.num_mels)  # (n_mels, 1+n_fft//2)
    mel = np.dot(mel_basis, mag)  # (n_mels, t)

    # to decibel
    mel = 20 * np.log10(np.maximum(1e-5, mel))
    mag = 20 * np.log10(np.maximum(1e-5, mag))

    # normalize
    mel = np.clip((mel - Hp.ref_db + Hp.max_db) / Hp.max_db, 1e-8, 1)
    mag = np.clip((mag - Hp.ref_db + Hp.max_db) / Hp.max_db, 1e-8, 1)

    # Transpose
    mel = mel.T.astype(np.float32)  # (T, n_mels)
    mag = mag.T.astype(np.float32)  # (T, 1+n_fft//2)

    return mel, mag 
Example 74
Project: dc_tts   Author: Kyubyong   File: utils.py    Apache License 2.0 4 votes vote down vote up
def get_spectrograms(fpath):
    '''Parse the wave file in `fpath` and
    Returns normalized melspectrogram and linear spectrogram.

    Args:
      fpath: A string. The full path of a sound file.

    Returns:
      mel: A 2d array of shape (T, n_mels) and dtype of float32.
      mag: A 2d array of shape (T, 1+n_fft/2) and dtype of float32.
    '''
    # Loading sound file
    y, sr = librosa.load(fpath, sr=hp.sr)

    # Trimming
    y, _ = librosa.effects.trim(y)

    # Preemphasis
    y = np.append(y[0], y[1:] - hp.preemphasis * y[:-1])

    # stft
    linear = librosa.stft(y=y,
                          n_fft=hp.n_fft,
                          hop_length=hp.hop_length,
                          win_length=hp.win_length)

    # magnitude spectrogram
    mag = np.abs(linear)  # (1+n_fft//2, T)

    # mel spectrogram
    mel_basis = librosa.filters.mel(hp.sr, hp.n_fft, hp.n_mels)  # (n_mels, 1+n_fft//2)
    mel = np.dot(mel_basis, mag)  # (n_mels, t)

    # to decibel
    mel = 20 * np.log10(np.maximum(1e-5, mel))
    mag = 20 * np.log10(np.maximum(1e-5, mag))

    # normalize
    mel = np.clip((mel - hp.ref_db + hp.max_db) / hp.max_db, 1e-8, 1)
    mag = np.clip((mag - hp.ref_db + hp.max_db) / hp.max_db, 1e-8, 1)

    # Transpose
    mel = mel.T.astype(np.float32)  # (T, n_mels)
    mag = mag.T.astype(np.float32)  # (T, 1+n_fft//2)

    return mel, mag 
Example 75
Project: core   Author: lifemapper   File: mcpa.py    GNU General Public License v3.0 4 votes vote down vote up
def get_p_values(observed_value, test_values, num_permutations=None):
    """Gets an array of P-Values

    Gets an (1 or 2 dimension) array of P values where the P value for an array
        location is determined by finding the number of test values at 
        corresponding locations are greater than or equal to that value and
        then dividing that number by the number of permutations

    Args:
        observed_value (Matrix): An array of observed values to use as a
            reference.
        test_values (Matrix): A list of arrays generated from randomizations
            that will be compared to the observed
        num_permutations: (optional) The total number of randomizations 
            performed.  Divide the P-values by this if provided.

    Todo:
        Deprecate this in favor of new method that is more flexible
    """
    if num_permutations is None:
        num_permutations = 1.0

    # Create the P-Values matrix
    p_vals = np.zeros(observed_value.data.shape, dtype=float)
    # For each matrix in test values
    for test_mtx in test_values:
        # Add 1 where every value in the test matrix is greater than or equal
        #    to the value in the observed value.  Numpy comparisons will create
        #    a matrix of boolean values for each cell, which when added to the
        #    p_vals matrix will be treated as 1 for True and 0 for False

        # If this is a stack
        if test_mtx.data.ndim == 3:
            for i in range(test_mtx.data.shape[2]):
                p_vals += np.abs(np.round(test_mtx.data[:,:,[i]], 5)
                                 ) >= np.abs(np.round(observed_value.data, 5))
        else:
            p_vals += np.abs(np.round(test_mtx.data, 5)
                             ) >= np.abs(np.round(observed_value.data, 5))
    # Reshape and adding depth header
    if len(p_vals.shape) == 2:
        p_vals = np.expand_dims(p_vals, axis=2)
    headers = observed_value.headers
    headers['2'] = ['P-values']
    # Scale and return the p-values matrix
    ret_data_tmp = np.nan_to_num(p_vals / num_permutations)
    return Matrix(np.clip(ret_data_tmp, -1.0, 1.0), headers=headers)

# ............................................................................. 
Example 76
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 4 votes vote down vote up
def test_prelu():
    def fprelu(x, gamma):
        pos_indices = x > 0
        out = x.copy()
        if len(x.shape) == 4:
            out = out.transpose(2,3,0,1)
            out = np.multiply(out, gamma)
            out = out.transpose(2,3,0,1)
        else:
            out = np.multiply(out, gamma)
        out[pos_indices] = x[pos_indices]
        return out
    def fprelu_grad(x, y, gamma):
        pos_indices = x > 0
        if len(x.shape) == 4:
            grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
            grad_x = grad_x.transpose(2,3,0,1)
        else:
            grad_x = np.multiply(np.ones(x.shape), gamma)
        grad_gam = np.zeros(gamma.shape)
        copy_x = x.copy()
        copy_x[pos_indices] = 0.0
        grad_x[pos_indices] = 1.0
        if len(gamma.shape) > 1 and len(x.shape) != 4:
            grad_gam = copy_x
        elif len(gamma.shape) > 1 and len(x.shape) == 4:
            grad_gam = np.sum(copy_x, axis=(2,3))
        elif gamma.shape[0] == 1:
            grad_gam = np.sum(np.sum(copy_x))
        elif gamma.shape[0] > 1 and len(x.shape) != 4:
            grad_gam = np.sum(copy_x, axis=0)
        elif gamma.shape[0] > 1 and len(x.shape) == 4:
            grad_gam = np.sum(copy_x, axis=(0,2,3))
        return (grad_x, grad_gam)
    x = mx.symbol.Variable("x")
    gamma = mx.symbol.Variable("gamma")
    for shape in [(3,4), (3,4,4,5)]:
        for dtype in [np.float16, np.float32, np.float64]:
            for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
                gam_full = np.array([gam, gam, gam])
                xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
                rtol = 1e-2
                atol = 1e-3
                eps = 1e-4
                xa[abs(xa) < eps] = 1.0
                y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
                ya = fprelu(xa, gam)
                ya_full = fprelu(xa, gam_full)
                g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
                g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
                # Skip numeric check for float16 type to get rid of flaky behavior
                if dtype is not np.float16:
                    check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
                    check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
                check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
                check_symbolic_backward(y, [xa, gam], [np.ones(shape), np.ones(gam.shape)], [g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
                check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
                check_symbolic_backward(y, [xa, gam_full], [np.ones(shape), np.ones(gam_full.shape)],
                                        [g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype) 
Example 77
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 4 votes vote down vote up
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
    sample_num = 200
    for i in range(sample_num):
        d = gen_data(i)
        y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
        y.forward(is_train=True)
        y = y.outputs[0].asnumpy()
        x = baseline(d[0], d[1]).astype(y.dtype)

        #np.set_printoptions(precision=20)

        a = d[0]
        b = d[1]
        #print("a: {} {}".format(a.dtype, a))
        #print("a: {} {}".format(b.dtype, b))

        #print("x: {} {}".format(x.dtype, x))
        #print("y: {} {}".format(y.dtype, y))
        if mx_nd_func is not None:
            d0 = mx.nd.array(d[0], dtype=d[0].dtype)
            d1 = mx.nd.array(d[1], dtype=d[1].dtype)
            assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
        idx = np.abs(x-y) > atol+rtol*np.abs(x)
        if idx.any():
            import binascii
            np.set_printoptions(precision=20)
            logging.error('found precision problem:')
            d[0] = np.broadcast_to(d[0], x.shape)
            d[1] = np.broadcast_to(d[1], x.shape)
            logging.error('input a: {}'.format(d[0][idx]))
            logging.error('input b: {}'.format(d[1][idx]))
            logging.error("output x: {} {}".format(x.dtype, x))
            logging.error("output y: {} {}".format(y.dtype, y))
            def ftohex(xs):
                import struct
                return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
            logging.error('output x in baseline(a, b): {}'.format(x[idx]))
            logging.error('output y in symbol(a, b): {}'.format(y[idx]))
            logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
            logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
            logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
            logging.error('input a hex: {}'.format(ftohex(d[1][idx])))

            logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
        assert_allclose(y, x, rtol=rtol, atol=atol) 
Example 78
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 4 votes vote down vote up
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):

    # compute output's dimension
    paddedbottomheight = data1.shape[2] + 2 * pad_size
    paddedbottomwidth = data1.shape[3] + 2 * pad_size
    kernel_radius = (kernel_size - 1) // 2
    border_size = max_displacement + kernel_radius
    top_width = (paddedbottomwidth - border_size * 2) // stride1
    top_height = (paddedbottomheight - border_size  * 2) // stride1
    neighborhood_grid_radius = max_displacement // stride2
    neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
    top_channels = neighborhood_grid_width * neighborhood_grid_width

    out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
    tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
    tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))

    tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
    tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]

    for i in range(top_height):
        for j in range(top_width):
            for nbatch in range(data1.shape[0]):

                # x1,y1 is the location in data1 , i,j is the location in output
                x1 = j * stride1 + max_displacement
                y1 = i * stride1 + max_displacement

                for top_channel in range(top_channels):

                    s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
                    s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2

                    # location in data2
                    x2 = x1 + s2o
                    y2 = y1 + s2p

                    for h in range(kernel_size):
                        for w in range(kernel_size):
                            for channel in range(data1.shape[1]):
                                if is_multiply:
                                    out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
                                else:
                                    out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
    out /= float(kernel_size**2*data1.shape[1])
    return out,tmp1,tmp2 
Example 79
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_operator.py    Apache License 2.0 4 votes vote down vote up
def test_norm():
    try:
        import scipy
        assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
        from scipy.linalg import norm as sp_norm
    except (AssertionError, ImportError):
        print("Could not import scipy.linalg.norm or scipy is too old. "
              "Falling back to numpy.linalg.norm which is not numerically stable.")
        from numpy.linalg import norm as sp_norm

    def l1norm(input_data, axis=0, keepdims=True):
        return np.sum(abs(input_data), axis=axis, keepdims=keepdims)

    def l2norm(input_data, axis=0, keepdims=True):
        return sp_norm(input_data, axis=axis, keepdims=keepdims)

    ctx = default_context()
    data = mx.symbol.Variable('data')
    in_data_dim = random_sample([4,5,6], 1)[0]
    in_shape = rand_shape_nd(in_data_dim)
    epsilon = 1e-3
    for order in [1, 2]:
        for dtype in [np.float16, np.float32, np.float64]:
            in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
            in_data[abs(in_data) < epsilon] = 2 * epsilon
            for i in range(in_data_dim):
                norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, keepdims=True)
                npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
                npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
                check_symbolic_forward(norm_sym, [in_data], [npy_out],
                                        rtol=1e-2 if dtype is np.float16 else 1e-5,
                                        atol=1e-2 if dtype is np.float16 else 1e-5, ctx=ctx)
                check_symbolic_backward(norm_sym, [in_data], [np.ones(npy_out.shape)],
                                        [npy_out_backward],
                                        rtol=1e-2 if dtype is np.float16 else 1e-5,
                                        atol=1e-2 if dtype is np.float16 else 1e-5, ctx=ctx)
                # Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
                # # check gradient
                # if dtype is not np.float16:
                #     check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon, rtol=1e-1, atol=1e-3)
                if i < in_data_dim-1:
                    norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
                    npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
                    npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
                    check_symbolic_forward(norm_sym, [in_data], [npy_out],
                                           rtol=1e-2 if dtype is np.float16 else 1e-5,
                                           atol=1e-2 if dtype is np.float16 else 1e-5, ctx=ctx)
                    check_symbolic_backward(norm_sym, [in_data], [np.ones(npy_out.shape)],
                                            [npy_out_backward],
                                            rtol=1e-2 if dtype is np.float16 else 1e-5,
                                            atol=1e-2 if dtype is np.float16 else 1e-5, ctx=ctx)
                    # # check gradient
                    # if dtype is not np.float16:
                    #     check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon, rtol=1e-1, atol=1e-3) 
Example 80
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_quantization.py    Apache License 2.0 4 votes vote down vote up
def test_requantize_int32_to_int8():
    def quantized_int32_to_float(qdata, min_range, max_range):
        assert qdata.dtype == 'int32'
        quantized_range = np.iinfo('int32').max
        real_range = np.maximum(np.abs(min_range), np.abs(max_range))
        scale = float(real_range) / float(quantized_range)
        return qdata.astype('float32') * scale

    def float_to_quantized_int8(data, min_range, max_range):
        assert data.dtype == 'float32'
        real_range = np.maximum(np.abs(min_range), np.abs(max_range))
        quantized_range = np.iinfo('int8').max
        scale = float(quantized_range) / float(real_range)
        return (np.sign(data) * np.minimum(np.abs(data) * scale + 0.5, quantized_range)).astype('int8')

    def requantize(qdata, min_data, max_data, real_range):
        data = quantized_int32_to_float(qdata, min_data, max_data)
        output = float_to_quantized_int8(data, -real_range, real_range)
        return output, -real_range, real_range

    def requantize_baseline(qdata, min_data, max_data, min_calib_range=None, max_calib_range=None):
        if min_calib_range is not None and max_calib_range is not None:
            real_range = np.maximum(np.abs(min_calib_range), np.abs(max_calib_range))
            return requantize(qdata, min_data, max_data, real_range)
        else:
            min_range = quantized_int32_to_float(np.min(qdata), min_data, max_data)
            max_range = quantized_int32_to_float(np.max(qdata), min_data, max_data)
            return requantize(qdata, min_data, max_data, np.maximum(np.abs(min_range), np.abs(max_range)))

    def check_requantize(shape, min_calib_range=None, max_calib_range=None):
        qdata = mx.nd.random.uniform(low=-1000.0, high=1000.0, shape=shape).astype('int32')
        min_range = mx.nd.array([-1010.0])
        max_range = mx.nd.array([1020.0])
        if min_calib_range is None or max_calib_range is None:
            qdata_int8, min_output, max_output = mx.nd.contrib.requantize(qdata, min_range, max_range)
        else:
            qdata_int8, min_output, max_output = mx.nd.contrib.requantize(qdata, min_range, max_range,
                                                                          min_calib_range, max_calib_range)

        qdata_int8_np, min_output_np, max_output_np = requantize_baseline(qdata.asnumpy(), min_range.asscalar(),
                                                                          max_range.asscalar(),
                                                                          min_calib_range=min_calib_range,
                                                                          max_calib_range=max_calib_range)
        assert_almost_equal(qdata_int8.asnumpy(), qdata_int8_np, atol = 1)
        assert_almost_equal(min_output.asnumpy(), np.array([min_output_np]))
        assert_almost_equal(max_output.asnumpy(), np.array([max_output_np]))

    check_requantize((3, 4, 10, 10))
    check_requantize((32, 3, 23, 23))
    check_requantize((3, 4, 10, 10), min_calib_range=-1050.0, max_calib_range=1040.0)
    check_requantize((32, 3, 23, 23), min_calib_range=-134.349, max_calib_range=523.43)