Python numpy.sqrt() Examples

The following are 30 code examples of numpy.sqrt(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: spectrum_painter.py    From spectrum_painter with MIT License 7 votes vote down vote up
def convert_image(self, filename):
        pic = img.imread(filename)
        # Set FFT size to be double the image size so that the edge of the spectrum stays clear
        # preventing some bandfilter artifacts
        self.NFFT = 2*pic.shape[1]

        # Repeat image lines until each one comes often enough to reach the desired line time
        ffts = (np.flipud(np.repeat(pic[:, :, 0], self.repetitions, axis=0) / 16.)**2.) / 256.

        # Embed image in center bins of the FFT
        fftall = np.zeros((ffts.shape[0], self.NFFT))
        startbin = int(self.NFFT/4)
        fftall[:, startbin:(startbin+pic.shape[1])] = ffts

        # Generate random phase vectors for the FFT bins, this is important to prevent high peaks in the output
        # The phases won't be visible in the spectrum
        phases = 2*np.pi*np.random.rand(*fftall.shape)
        rffts = fftall * np.exp(1j*phases)

        # Perform the FFT per image line, then concatenate them to form the final signal
        timedata = np.fft.ifft(np.fft.ifftshift(rffts, axes=1), axis=1) / np.sqrt(float(self.NFFT))
        linear = timedata.flatten()
        linear = linear / np.max(np.abs(linear))
        return linear 
Example #2
Source File: von_mises_stress.py    From fenics-topopt with MIT License 6 votes vote down vote up
def calculate_diff_stress(self, x, u, nu, side=1):
        """
        Calculate the derivative of the Von Mises stress given the densities x,
        displacements u, and young modulus nu. Optionally, provide the side
        length (default: 1).
        """
        rho = self.penalized_densities(x)
        EB = self.E(nu).dot(self.B(side))
        EBu = sum([EB.dot(u[:, i][self.edofMat]) for i in range(u.shape[1])])
        s11, s22, s12 = numpy.hsplit((EBu * rho / float(u.shape[1])).T, 3)
        drho = self.diff_penalized_densities(x)
        ds11, ds22, ds12 = numpy.hsplit(
            ((1 - rho) * drho * EBu / float(u.shape[1])).T, 3)
        vm_stress = numpy.sqrt(s11**2 - s11 * s22 + s22**2 + 3 * s12**2)
        if abs(vm_stress).sum() > 1e-8:
            dvm_stress = (0.5 * (1. / vm_stress) * (2 * s11 * ds11 -
                ds11 * s22 - s11 * ds22 + 2 * s22 * ds22 + 6 * s12 * ds12))
            return dvm_stress
        return 0 
Example #3
Source File: initializations.py    From Att-ChemdNER with Apache License 2.0 6 votes vote down vote up
def get_fans(shape, dim_ordering='th'):
    if len(shape) == 2:
        fan_in = shape[0]
        fan_out = shape[1]
    elif len(shape) == 4 or len(shape) == 5:
        # assuming convolution kernels (2D or 3D).
        # TH kernel shape: (depth, input_depth, ...)
        # TF kernel shape: (..., input_depth, depth)
        if dim_ordering == 'th':
            receptive_field_size = np.prod(shape[2:])
            fan_in = shape[1] * receptive_field_size
            fan_out = shape[0] * receptive_field_size
        elif dim_ordering == 'tf':
            receptive_field_size = np.prod(shape[:2])
            fan_in = shape[-2] * receptive_field_size
            fan_out = shape[-1] * receptive_field_size
        else:
            raise ValueError('Invalid dim_ordering: ' + dim_ordering)
    else:
        # no specific assumptions
        fan_in = np.sqrt(np.prod(shape))
        fan_out = np.sqrt(np.prod(shape))
    return fan_in, fan_out 
Example #4
Source File: xrft.py    From xrft with MIT License 6 votes vote down vote up
def _radial_wvnum(k, l, N, nfactor):
    """ Creates a radial wavenumber based on two horizontal wavenumbers
    along with the appropriate index map
    """

    # compute target wavenumbers
    k = k.values
    l = l.values
    K = np.sqrt(k[np.newaxis,:]**2 + l[:,np.newaxis]**2)
    nbins = int(N/nfactor)
    if k.max() > l.max():
        ki = np.linspace(0., l.max(), nbins)
    else:
        ki = np.linspace(0., k.max(), nbins)

    # compute bin index
    kidx = np.digitize(np.ravel(K), ki)
    # compute number of points for each wavenumber
    area = np.bincount(kidx)
    # compute the average radial wavenumber for each bin
    kr = (np.bincount(kidx, weights=K.ravel())
          / np.ma.masked_where(area==0, area))

    return ki, kr[1:-1] 
Example #5
Source File: point_cloud.py    From FRIDA with MIT License 6 votes vote down vote up
def classical_mds(self, D):
        ''' 
        Classical multidimensional scaling

        Parameters
        ----------
        D : square 2D ndarray
            Euclidean Distance Matrix (matrix containing squared distances between points
        '''

        # Apply MDS algorithm for denoising
        n = D.shape[0]
        J = np.eye(n) - np.ones((n,n))/float(n)
        G = -0.5*np.dot(J, np.dot(D, J))

        s, U = np.linalg.eig(G)

        # we need to sort the eigenvalues in decreasing order
        s = np.real(s)
        o = np.argsort(s)
        s = s[o[::-1]]
        U = U[:,o[::-1]]

        S = np.diag(s)[0:self.dim,:]
        self.X = np.dot(np.sqrt(S),U.T) 
Example #6
Source File: point_cloud.py    From FRIDA with MIT License 6 votes vote down vote up
def trilateration(self, D):
        '''
        Find the location of points based on their distance matrix using trilateration

        Parameters
        ----------
        D : square 2D ndarray
            Euclidean Distance Matrix (matrix containing squared distances between points
        '''

        dist = np.sqrt(D)

        # Simpler algorithm (no denoising)
        self.X = np.zeros((self.dim, self.m))

        self.X[:,1] = np.array([0, dist[0,1]])
        for i in xrange(2,m):
            self.X[:,i] = self.trilateration_single_point(self.X[1,1],
                    dist[0,i], dist[1,i]) 
Example #7
Source File: tools_fri_doa_plane.py    From FRIDA with MIT License 6 votes vote down vote up
def mtx_freq2visi(M, p_mic_x, p_mic_y):
    """
    build the matrix that maps the Fourier series to the visibility
    :param M: the Fourier series expansion is limited from -M to M
    :param p_mic_x: a vector that constains microphones x coordinates
    :param p_mic_y: a vector that constains microphones y coordinates
    :return:
    """
    num_mic = p_mic_x.size
    ms = np.reshape(np.arange(-M, M + 1, step=1), (1, -1), order='F')
    G = np.zeros((num_mic * (num_mic - 1), 2 * M + 1), dtype=complex, order='C')
    count_G = 0
    for q in range(num_mic):
        p_x_outer = p_mic_x[q]
        p_y_outer = p_mic_y[q]
        for qp in range(num_mic):
            if not q == qp:
                p_x_qqp = p_x_outer - p_mic_x[qp]
                p_y_qqp = p_y_outer - p_mic_y[qp]
                norm_p_qqp = np.sqrt(p_x_qqp ** 2 + p_y_qqp ** 2)
                phi_qqp = np.arctan2(p_y_qqp, p_x_qqp)
                G[count_G, :] = (-1j) ** ms * sp.special.jv(ms, norm_p_qqp) * \
                                np.exp(1j * ms * phi_qqp)
                count_G += 1
    return G 
Example #8
Source File: dynamic.py    From StructEngPy with MIT License 6 votes vote down vote up
def solve_modal(model,k:int):
    """
    Solve eigen mode of the MDOF system
    
    params:
        model: FEModel.
        k: number of modes to extract.
    """
    K_,M_=model.K_,model.M_
    if k>model.DOF:
        logger.info('Warning: the modal number to extract is larger than the system DOFs, only %d modes are available'%model.DOF)
        k=model.DOF
    omega2s,modes = sl.eigsh(K_,k,M_,sigma=0,which='LM')
    delta = modes/np.sum(modes,axis=0)
    model.is_solved=True
    model.mode_=delta
    model.omega_=np.sqrt(omega2s).reshape((k,1)) 
Example #9
Source File: picklable_model.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def set_input_shape(self, input_shape):
        batch_size, dim = input_shape
        self.input_shape = [batch_size, dim]
        self.output_shape = [batch_size, self.num_hid]
        if self.init_mode == "norm":
            init = tf.random_normal([dim, self.num_hid], dtype=tf.float32)
            init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init), axis=0,
                                                       keep_dims=True))
            init = init * self.init_scale
        elif self.init_mode == "uniform_unit_scaling":
            scale = np.sqrt(3. / dim)
            init = tf.random_uniform([dim, self.num_hid], dtype=tf.float32,
                                     minval=-scale, maxval=scale)
        else:
            raise ValueError(self.init_mode)
        self.W = PV(init)
        if self.use_bias:
            self.b = PV((np.zeros((self.num_hid,))
                         + self.init_b).astype('float32')) 
Example #10
Source File: layers.py    From deep-learning-note with MIT License 6 votes vote down vote up
def __forward(self, x, train_flg):
        if self.running_mean is None:
            N, D = x.shape
            self.running_mean = np.zeros(D)
            self.running_var = np.zeros(D)

        if train_flg:
            mu = x.mean(axis=0)
            xc = x - mu
            var = np.mean(xc ** 2, axis=0)
            std = np.sqrt(var + 10e-7)
            xn = xc / std

            self.batch_size = x.shape[0]
            self.xc = xc
            self.xn = xn
            self.std = std
            self.running_mean = self.momentum * self.running_mean + (1 - self.momentum) * mu
            self.running_var = self.momentum * self.running_var + (1 - self.momentum) * var
        else:
            xc = x - self.running_mean
            xn = xc / ((np.sqrt(self.running_var + 10e-7)))

        out = self.gamma * xn + self.beta
        return out 
Example #11
Source File: optimizer.py    From deep-learning-note with MIT License 6 votes vote down vote up
def update(self, params, grads):
        if self.m is None:
            self.m, self.v = {}, {}
            for key, val in params.items():
                self.m[key] = np.zeros_like(val)
                self.v[key] = np.zeros_like(val)

        self.iter += 1
        lr_t = self.lr * np.sqrt(1.0 - self.beta2 ** self.iter) / (1.0 - self.beta1 ** self.iter)

        for key in params.keys():
            # self.m[key] = self.beta1*self.m[key] + (1-self.beta1)*grads[key]
            # self.v[key] = self.beta2*self.v[key] + (1-self.beta2)*(grads[key]**2)
            self.m[key] += (1 - self.beta1) * (grads[key] - self.m[key])
            self.v[key] += (1 - self.beta2) * (grads[key] ** 2 - self.v[key])

            params[key] -= lr_t * self.m[key] / (np.sqrt(self.v[key]) + 1e-7)

            # unbias_m += (1 - self.beta1) * (grads[key] - self.m[key]) # correct bias
            # unbisa_b += (1 - self.beta2) * (grads[key]*grads[key] - self.v[key]) # correct bias
            # params[key] += self.lr * unbias_m / (np.sqrt(unbisa_b) + 1e-7) 
Example #12
Source File: multi_layer_net_extend.py    From deep-learning-note with MIT License 6 votes vote down vote up
def __init_weight(self, weight_init_std):
        """设定权重的初始值
        Parameters
        ----------
        weight_init_std : 指定权重的标准差(e.g. 0.01)
            指定'relu'或'he'的情况下设定“He的初始值”
            指定'sigmoid'或'xavier'的情况下设定“Xavier的初始值”
        """
        all_size_list = [self.input_size] + self.hidden_size_list + [self.output_size]
        for idx in range(1, len(all_size_list)):
            scale = weight_init_std
            if str(weight_init_std).lower() in ('relu', 'he'):
                scale = np.sqrt(2.0 / all_size_list[idx - 1])  # 使用ReLU的情况下推荐的初始值
            elif str(weight_init_std).lower() in ('sigmoid', 'xavier'):
                scale = np.sqrt(1.0 / all_size_list[idx - 1])  # 使用sigmoid的情况下推荐的初始值
            self.params['W' + str(idx)] = scale * np.random.randn(all_size_list[idx - 1], all_size_list[idx])
            self.params['b' + str(idx)] = np.zeros(all_size_list[idx]) 
Example #13
Source File: simulate_sin.py    From deep-learning-note with MIT License 6 votes vote down vote up
def run_eval(sess, test_X, test_y):
    ds = tf.data.Dataset.from_tensor_slices((test_X, test_y))
    ds = ds.batch(1)
    X, y = ds.make_one_shot_iterator().get_next()

    with tf.variable_scope("model", reuse=True):
        prediction, _, _ = lstm_model(X, [0.0], False)
        predictions = []
        labels = []
        for i in range(TESTING_EXAMPLES):
            p, l = sess.run([prediction, y])
            predictions.append(p)
            labels.append(l)

    predictions = np.array(predictions).squeeze()
    labels = np.array(labels).squeeze()
    rmse = np.sqrt(((predictions-labels) ** 2).mean(axis=0))
    print("Mean Square Error is: %f" % rmse)

    plt.figure()
    plt.plot(predictions, label='predictions')
    plt.plot(labels, label='real_sin')
    plt.legend()
    plt.show() 
Example #14
Source File: util.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def point_on_segment(ac, b, atol=1e-8):
    '''
    point_on_segment((a,b), c) yields True if point x is on segment (a,b) and False otherwise. Note
    that this differs from point_in_segment in that a point that if c is equal to a or b it is
    considered 'on' but not 'in' the segment.
    The option atol can be given and is used only to test for difference from 0; by default it is
    1e-8.
    '''
    (a,c) = ac
    abc = [np.asarray(u) for u in (a,b,c)]
    if any(len(u.shape) > 1 for u in abc): (a,b,c) = [np.reshape(u,(len(u),-1)) for u in abc]
    else:                                  (a,b,c) = abc
    vab = b - a
    vbc = c - b
    vac = c - a
    dab = np.sqrt(np.sum(vab**2, axis=0))
    dbc = np.sqrt(np.sum(vbc**2, axis=0))
    dac = np.sqrt(np.sum(vac**2, axis=0))
    return np.isclose(dab + dbc - dac, 0, atol=atol) 
Example #15
Source File: kde.py    From svviz with MIT License 5 votes vote down vote up
def _compute_covariance(self):
        self.factor = self.scotts_factor()
        # Cache covariance and inverse covariance of the data
        if not hasattr(self, '_data_inv_cov'):
            self._data_covariance = atleast_2d(np.cov(self.dataset, rowvar=1,
                                               bias=False))
            self._data_inv_cov = linalg.inv(self._data_covariance)

        self.covariance = self._data_covariance * self.factor**2
        self.inv_cov = self._data_inv_cov / self.factor**2
        self._norm_factor = sqrt(linalg.det(2*pi*self.covariance)) * self.n 
Example #16
Source File: suba.py    From libTLDA with MIT License 5 votes vote down vote up
def zca_whiten(self, X):
        """
        Perform ZCA whitening (aka Mahalanobis whitening).

        Parameters
        ----------
        X : array (M samples x D features)
            data matrix.

        Returns
        -------
        X : array (M samples x D features)
            whitened data.

        """
        # Covariance matrix
        Sigma = np.cov(X.T)

        # Singular value decomposition
        U, S, V = svd(Sigma)

        # Whitening constant to prevent division by zero
        epsilon = 1e-5

        # ZCA whitening matrix
        W = np.dot(U, np.dot(np.diag(1.0 / np.sqrt(S + epsilon)), V))

        # Apply whitening matrix
        return np.dot(X, W) 
Example #17
Source File: NLP.py    From Financial-NLP with Apache License 2.0 5 votes vote down vote up
def unitvec(vector, ax=1):
    v=vector*vector
    if len(vector.shape)==1:
        sqrtv=np.sqrt(np.sum(v))
    elif len(vector.shape)==2:
        sqrtv=np.sqrt([np.sum(v, axis=ax)])
    else:
        raise Exception('It\'s too large.')
    if ax==1:
        result=np.divide(vector,sqrtv.T)
    elif ax==0:
        result=np.divide(vector,sqrtv)
    return result 
Example #18
Source File: filter.py    From fenics-topopt with MIT License 5 votes vote down vote up
def __init__(self, nelx, nely, rmin):
        """
        Filter: Build (and assemble) the index+data vectors for the coo matrix
        format.
        """
        nfilter = int(nelx * nely * ((2 * (np.ceil(rmin) - 1) + 1)**2))
        iH = np.zeros(nfilter)
        jH = np.zeros(nfilter)
        sH = np.zeros(nfilter)
        cc = 0
        for i in range(nelx):
            for j in range(nely):
                row = i * nely + j
                kk1 = int(np.maximum(i - (np.ceil(rmin) - 1), 0))
                kk2 = int(np.minimum(i + np.ceil(rmin), nelx))
                ll1 = int(np.maximum(j - (np.ceil(rmin) - 1), 0))
                ll2 = int(np.minimum(j + np.ceil(rmin), nely))
                for k in range(kk1, kk2):
                    for l in range(ll1, ll2):
                        col = k * nely + l
                        fac = rmin - np.sqrt(
                            ((i - k) * (i - k) + (j - l) * (j - l)))
                        iH[cc] = row
                        jH[cc] = col
                        sH[cc] = np.maximum(0.0, fac)
                        cc = cc + 1
        # Finalize assembly and convert to csc format
        self.H = scipy.sparse.coo_matrix((sH, (iH, jH)),
            shape=(nelx * nely, nelx * nely)).tocsc()
        self.Hs = self.H.sum(1) 
Example #19
Source File: von_mises_stress.py    From fenics-topopt with MIT License 5 votes vote down vote up
def calculate_stress(self, x, u, nu, side=1):
        """
        Calculate the Von Mises stress given the densities x, displacements u,
        and young modulus nu.
        """
        s11, s22, s12 =  self.calculate_principle_stresses(x, u, nu, side)
        vm_stress = numpy.sqrt(s11**2 - s11 * s22 + s22**2 + 3 * s12**2)
        return vm_stress 
Example #20
Source File: filter.py    From fenics-topopt with MIT License 5 votes vote down vote up
def __init__(self, nelx, nely, rmin):
        """
        Filter: Build (and assemble) the index+data vectors for the coo matrix
        format.
        """
        nfilter = int(nelx * nely * ((2 * (np.ceil(rmin) - 1) + 1)**2))
        iH = np.zeros(nfilter)
        jH = np.zeros(nfilter)
        sH = np.zeros(nfilter)
        cc = 0
        for i in range(nelx):
            for j in range(nely):
                row = i * nely + j
                kk1 = int(np.maximum(i - (np.ceil(rmin) - 1), 0))
                kk2 = int(np.minimum(i + np.ceil(rmin), nelx))
                ll1 = int(np.maximum(j - (np.ceil(rmin) - 1), 0))
                ll2 = int(np.minimum(j + np.ceil(rmin), nely))
                for k in range(kk1, kk2):
                    for l in range(ll1, ll2):
                        col = k * nely + l
                        fac = rmin - np.sqrt(
                            ((i - k) * (i - k) + (j - l) * (j - l)))
                        iH[cc] = row
                        jH[cc] = col
                        sH[cc] = np.maximum(0.0, fac)
                        cc = cc + 1
        # Finalize assembly and convert to csc format
        self.H = scipy.sparse.coo_matrix((sH, (iH, jH)),
            shape=(nelx * nely, nelx * nely)).tocsc()
        self.Hs = self.H.sum(1) 
Example #21
Source File: von_mises_stress.py    From fenics-topopt with MIT License 5 votes vote down vote up
def calculate_stress(self, x, u, nu, side=1):
        """
        Calculate the Von Mises stress given the densities x, displacements u,
        and young modulus nu.
        """
        s11, s22, s12 =  self.calculate_principle_stresses(x, u, nu, side)
        vm_stress = numpy.sqrt(s11**2 - s11 * s22 + s22**2 + 3 * s12**2)
        return vm_stress 
Example #22
Source File: custom_objects.py    From keras_mixnets with MIT License 5 votes vote down vote up
def __call__(self, shape, dtype=None):
        dtype = dtype or K.floatx()

        init_range = 1.0 / np.sqrt(shape[1])
        return tf.random_uniform(shape, -init_range, init_range, dtype=dtype)


# Obtained from https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py 
Example #23
Source File: utils.py    From Att-ChemdNER with Apache License 2.0 5 votes vote down vote up
def shared(shape, name):
#{{{
    """
    Create a shared object of a numpy array.
    """ 
    init=initializations.get('glorot_uniform');
    if len(shape) == 1:
        value = np.zeros(shape)  # bias are initialized with zeros
        return theano.shared(value=value.astype(theano.config.floatX), name=name)
    else:
        drange = np.sqrt(6. / (np.sum(shape)))
        value = drange * np.random.uniform(low=-1.0, high=1.0, size=shape)
        return init(shape=shape,name=name);
#}}} 
Example #24
Source File: initializations.py    From Att-ChemdNER with Apache License 2.0 5 votes vote down vote up
def lecun_uniform(shape, name=None, dim_ordering='th'):
    ''' Reference: LeCun 98, Efficient Backprop
        http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
    '''
    fan_in, fan_out = get_fans(shape, dim_ordering=dim_ordering)
    scale = np.sqrt(3. / fan_in)
    return uniform(shape, scale, name=name) 
Example #25
Source File: initializations.py    From Att-ChemdNER with Apache License 2.0 5 votes vote down vote up
def glorot_normal(shape, name=None, dim_ordering='th'):
    ''' Reference: Glorot & Bengio, AISTATS 2010
    '''
    fan_in, fan_out = get_fans(shape, dim_ordering=dim_ordering)
    s = np.sqrt(2. / (fan_in + fan_out))
    return normal(shape, s, name=name) 
Example #26
Source File: initializations.py    From Att-ChemdNER with Apache License 2.0 5 votes vote down vote up
def glorot_uniform(shape, name=None, dim_ordering='th'):
    fan_in, fan_out = get_fans(shape, dim_ordering=dim_ordering)
    s = np.sqrt(6. / (fan_in + fan_out))
    return uniform(shape, s, name=name) 
Example #27
Source File: initializations.py    From Att-ChemdNER with Apache License 2.0 5 votes vote down vote up
def he_uniform(shape, name=None, dim_ordering='th'):
    fan_in, fan_out = get_fans(shape, dim_ordering=dim_ordering)
    s = np.sqrt(6. / fan_in)
    return uniform(shape, s, name=name) 
Example #28
Source File: xrft.py    From xrft with MIT License 5 votes vote down vote up
def isotropize(ps, fftdim, nfactor=4):
    """
    Isotropize a 2D power spectrum or cross spectrum
    by taking an azimuthal average.

    .. math::
        \text{iso}_{ps} = k_r N^{-1} \sum_{N} |\mathbb{F}(da')|^2

    where :math:`N` is the number of azimuthal bins.

    Parameters
    ----------
    ps : `xarray.DataArray`
        The power spectrum or cross spectrum to be isotropized.
    fftdim : list
        The fft dimensions overwhich the isotropization must be performed.
    nfactor : int, optional
        Ratio of number of bins to take the azimuthal averaging with the
        data size. Default is 4.
    """

    # compute radial wavenumber bins
    k = ps[fftdim[1]]
    l = ps[fftdim[0]]
    N = [k.size, l.size]
    ki, kr = _radial_wvnum(k, l, min(N), nfactor)

    # average azimuthally
    ps = ps.assign_coords(freq_r=np.sqrt(k**2+l**2))
    iso_ps = (ps.groupby_bins('freq_r', bins=ki, labels=kr).mean()
              .rename({'freq_r_bins': 'freq_r'})
             )
    return iso_ps * iso_ps.freq_r 
Example #29
Source File: test_xrft.py    From xrft with MIT License 5 votes vote down vote up
def test_isotropize(N=512):
    """Test the isotropization of a power spectrum."""

    # generate synthetic 2D spectrum, isotropize and check values
    dL, amp, s = 1., 1e1, -3.
    dims = ['x','y']
    fftdim = ['freq_x', 'freq_y']
    spacing_tol = 1e-3
    nfactor = 4
    def _test_iso(theta):
        ps = xrft.power_spectrum(theta, spacing_tol, dim=dims)
        ps = np.sqrt(ps.freq_x**2+ps.freq_y**2)
        ps_iso = xrft.isotropize(ps, fftdim, nfactor=nfactor)
        assert len(ps_iso.dims)==1
        assert ps_iso.dims[0]=='freq_r'
        npt.assert_allclose(ps_iso, ps_iso.freq_r**2, atol=0.02)
    # np data
    theta = synthetic_field_xr(N, dL, amp, s)
    _test_iso(theta)
    # np with other dim
    theta = synthetic_field_xr(N, dL, amp, s,
                                other_dim_sizes=[10],
                                dim_order=True)
    _test_iso(theta)
    # da chunked, order 1
    theta = synthetic_field_xr(N, dL, amp, s,
                                chunks={'y': None, 'x': None, 'd0': 2},
                                other_dim_sizes=[10],
                                dim_order=True)
    _test_iso(theta)
    # da chunked, order 2
    theta = synthetic_field_xr(N, dL, amp, s,
                                chunks={'y': None, 'x': None, 'd0': 2},
                                other_dim_sizes=[10],
                                dim_order=False)
    _test_iso(theta) 
Example #30
Source File: point_cloud.py    From FRIDA with MIT License 5 votes vote down vote up
def trilateration_single_point(self, c, Dx, Dy):
        '''
        Given x at origin (0,0) and y at (0,c) the distances from a point
        at unknown location Dx, Dy to x, y, respectively, finds the position of the point.
        '''

        z = (c**2 - (Dy**2 - Dx**2)) / (2*c)
        t = np.sqrt(Dx**2 - z**2)

        return np.array([t,z])