Python numpy.sqrt() Examples

The following are code examples for showing how to use numpy.sqrt(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: xrft   Author: xgcm   File: xrft.py    MIT License 7 votes vote down vote up
def _azimuthal_wvnum(k, l, N, nfactor):
    k = k.values
    l = l.values
    K = np.sqrt(k[np.newaxis,:]**2 + l[:,np.newaxis]**2)
    nbins = int(N/nfactor)
    if k.max() > l.max():
        ki = np.linspace(0., l.max(), nbins)
    else:
        ki = np.linspace(0., k.max(), nbins)

    kidx = np.digitize(np.ravel(K), ki)
    area = np.bincount(kidx)

    kr = np.bincount(kidx, weights=K.ravel()) / area

    return kidx, area, kr 
Example 2
Project: FRIDA   Author: LCAV   File: point_cloud.py    MIT License 7 votes vote down vote up
def classical_mds(self, D):
        ''' 
        Classical multidimensional scaling

        Parameters
        ----------
        D : square 2D ndarray
            Euclidean Distance Matrix (matrix containing squared distances between points
        '''

        # Apply MDS algorithm for denoising
        n = D.shape[0]
        J = np.eye(n) - np.ones((n,n))/float(n)
        G = -0.5*np.dot(J, np.dot(D, J))

        s, U = np.linalg.eig(G)

        # we need to sort the eigenvalues in decreasing order
        s = np.real(s)
        o = np.argsort(s)
        s = s[o[::-1]]
        U = U[:,o[::-1]]

        S = np.diag(s)[0:self.dim,:]
        self.X = np.dot(np.sqrt(S),U.T) 
Example 3
Project: StructEngPy   Author: zhuoju36   File: dynamic.py    MIT License 7 votes vote down vote up
def solve_modal(model,k:int):
    """
    Solve eigen mode of the MDOF system
    
    params:
        model: FEModel.
        k: number of modes to extract.
    """
    K_,M_=model.K_,model.M_
    if k>model.DOF:
        logger.info('Warning: the modal number to extract is larger than the system DOFs, only %d modes are available'%model.DOF)
        k=model.DOF
    omega2s,modes = sl.eigsh(K_,k,M_,sigma=0,which='LM')
    delta = modes/np.sum(modes,axis=0)
    model.is_solved=True
    model.mode_=delta
    model.omega_=np.sqrt(omega2s).reshape((k,1)) 
Example 4
Project: s2g   Author: caesar0301   File: bonus.py    MIT License 6 votes vote down vote up
def great_circle_dist(p1, p2):
    """Return the distance (in km) between two points in
    geographical coordinates.
    """
    lon0, lat0 = p1
    lon1, lat1 = p2
    EARTH_R = 6372.8
    lat0 = np.radians(float(lat0))
    lon0 = np.radians(float(lon0))
    lat1 = np.radians(float(lat1))
    lon1 = np.radians(float(lon1))
    dlon = lon0 - lon1
    y = np.sqrt(
        (np.cos(lat1) * np.sin(dlon)) ** 2
        + (np.cos(lat0) * np.sin(lat1)
           - np.sin(lat0) * np.cos(lat1) * np.cos(dlon)) ** 2)
    x = np.sin(lat0) * np.sin(lat1) + \
        np.cos(lat0) * np.cos(lat1) * np.cos(dlon)
    c = np.arctan2(y, x)
    return EARTH_R * c 
Example 5
Project: chainer-openai-transformer-lm   Author: soskek   File: opt.py    MIT License 6 votes vote down vote up
def update_core_cpu(self, param):
        grad = param.grad
        if grad is None:
            return
        hp = self.hyperparam
        eps = grad.dtype.type(hp.eps)
        if hp.eps != 0 and eps == 0:
            raise ValueError(
                'eps of Adam optimizer is too small for {} ({})'.format(
                    grad.dtype.name, hp.eps))
        m, v = self.state['m'], self.state['v']

        m += (1 - hp.beta1) * (grad - m)
        v += (1 - hp.beta2) * (grad * grad - v)

        vhat = v
        # This adam multipies schduled adaptive learning rate
        # with both main term and weight decay.
        # Normal Adam: param.data -= hp.eta * (self.lr * m / (numpy.sqrt(vhat) + hp.eps) +
        #                                      hp.weight_decay_rate * param.data)
        param.data -= hp.eta * self.lr * (m / (numpy.sqrt(vhat) + hp.eps) +
                                          hp.weight_decay_rate * param.data) 
Example 6
Project: chainer-openai-transformer-lm   Author: soskek   File: opt.py    MIT License 6 votes vote down vote up
def update_core_gpu(self, param):
        grad = param.grad
        if grad is None:
            return

        hp = self.hyperparam
        eps = grad.dtype.type(hp.eps)
        if hp.eps != 0 and eps == 0:
            raise ValueError(
                'eps of Adam optimizer is too small for {} ({})'.format(
                    grad.dtype.name, hp.eps))

        cuda.elementwise(
            'T grad, T lr, T one_minus_beta1, T one_minus_beta2, T eps, \
             T eta, T weight_decay_rate',
            'T param, T m, T v',
            '''m += one_minus_beta1 * (grad - m);
               v += one_minus_beta2 * (grad * grad - v);
               param -= eta * lr * (m / (sqrt(v) + eps) +
                               weight_decay_rate * param);''',
            'adam')(grad, self.lr, 1 - hp.beta1,
                    1 - hp.beta2, hp.eps,
                    hp.eta, hp.weight_decay_rate,
                    param.data, self.state['m'], self.state['v']) 
Example 7
Project: fenics-topopt   Author: zfergus   File: von_mises_stress.py    MIT License 6 votes vote down vote up
def calculate_diff_stress(self, x, u, nu, side=1):
        """
        Calculate the derivative of the Von Mises stress given the densities x,
        displacements u, and young modulus nu. Optionally, provide the side
        length (default: 1).
        """
        rho = self.penalized_densities(x)
        EB = self.E(nu).dot(self.B(side))
        EBu = sum([EB.dot(u[:, i][self.edofMat]) for i in range(u.shape[1])])
        s11, s22, s12 = numpy.hsplit((EBu * rho / float(u.shape[1])).T, 3)
        drho = self.diff_penalized_densities(x)
        ds11, ds22, ds12 = numpy.hsplit(
            ((1 - rho) * drho * EBu / float(u.shape[1])).T, 3)
        vm_stress = numpy.sqrt(s11**2 - s11 * s22 + s22**2 + 3 * s12**2)
        if abs(vm_stress).sum() > 1e-8:
            dvm_stress = (0.5 * (1. / vm_stress) * (2 * s11 * ds11 -
                ds11 * s22 - s11 * ds22 + 2 * s22 * ds22 + 6 * s12 * ds12))
            return dvm_stress
        return 0 
Example 8
Project: cs207-FinalProject   Author: PYNE-AD   File: elemFunctions_test.py    MIT License 6 votes vote down vote up
def test_arcsinh_ad_results():
	# positive real numbers
	x = AutoDiff(1, 2)
	f = ef.arcsinh(x)
	assert f.val == np.arcsinh(1)
	assert f.der == np.array([[((2)/np.sqrt((1)**2 + 1))]])
	assert f.jacobian == np.array([[((1)/np.sqrt((1)**2 + 1))]])
	# negative real numbers
	y = AutoDiff(-1, 2)
	f = ef.arcsinh(y)
	assert f.val == np.arcsinh(-1)
	assert f.der == np.array([[((2)/np.sqrt((-1)**2 + 1))]])
	assert f.jacobian == np.array([[((1)/np.sqrt((-1)**2 + 1))]])
	# zero
	z = AutoDiff(0, 2)
	f = ef.arcsinh(z)
	assert f.val == np.arcsinh(0)
	assert f.der == np.array([[((2)/np.sqrt((0)**2 + 1))]])
	assert f.jacobian == np.array([[((1)/np.sqrt((0)**2 + 1))]]) 
Example 9
Project: cs207-FinalProject   Author: PYNE-AD   File: elemFunctions_test.py    MIT License 6 votes vote down vote up
def test_arccosh_ad_results():
	# value defined at positive real numbers x >= 1
	# derivative defined at positive real numbers x > 1
	x = AutoDiff(1.1, 2)
	f = ef.arccosh(x)
	assert f.val == np.arccosh(1.1)
	assert f.der == np.array([[((2)/np.sqrt((1.1)**2 - 1))]])
	assert f.jacobian == np.array([[((1)/np.sqrt((1.1)**2 - 1))]])
	# value defined at x = 1, derivative not defined
	with pytest.warns(RuntimeWarning):
		y = AutoDiff(1, 2)
		f = ef.arccosh(y)
		assert np.isinf(f.der)
		assert np.isinf(f.jacobian)
	# neither value nor derivative defined at x < 1
	with pytest.warns(RuntimeWarning):
		z = AutoDiff(0, 2)
		f = ef.arccosh(z)
		assert np.isnan(f.val)
		assert np.isnan(f.der)
		assert np.isnan(f.jacobian) 
Example 10
Project: cs207-FinalProject   Author: PYNE-AD   File: elemFunctions_test.py    MIT License 6 votes vote down vote up
def test_sqrt_ad_results():
	# Positive reals
	x = AutoDiff(0.5, 2.0)
	f = ef.sqrt(x)
	assert f.val == np.array([[np.sqrt(0.5)]])
	assert f.der == np.array([[0.5 * 0.5 ** (-0.5) * 2.0]])
	assert f.jacobian == np.array([[0.5 * 0.5 ** (-0.5) * 1]])
	# Value defined but derivative undefined when x == 0
	with pytest.warns(RuntimeWarning):
		y = AutoDiff(0, 2)
		f = ef.sqrt(y)
		assert f.val == np.array([[0]])
		assert np.isinf(f.der[0][0])
		assert np.isinf(f.jacobian[0][0])
	# Value and derivative undefined when x < 0
	with pytest.warns(RuntimeWarning):
		z = AutoDiff(-0.5, 2)
		f = ef.sqrt(z)
		assert np.isnan(f.val[0][0])
		assert np.isnan(f.der[0][0])
		assert np.isnan(f.jacobian[0][0]) 
Example 11
Project: cs207-FinalProject   Author: PYNE-AD   File: elemFunctions_Dual_test.py    MIT License 6 votes vote down vote up
def test_arcsinh_ad_results():
	# positive real numbers
	x = Dual(1, 2)
	f = ef.arcsinh(x)
	assert f.Real == np.arcsinh(1)
	assert f.Dual == np.array([[((2)/np.sqrt((1)**2 + 1))]])
	
	# negative real numbers
	y = Dual(-1, 2)
	f = ef.arcsinh(y)
	assert f.Real == np.arcsinh(-1)
	assert f.Dual == np.array([[((2)/np.sqrt((-1)**2 + 1))]])
	
	# zero
	z = Dual(0, 2)
	f = ef.arcsinh(z)
	assert f.Real == np.arcsinh(0)
	assert f.Dual == np.array([[((2)/np.sqrt((0)**2 + 1))]]) 
Example 12
Project: cs207-FinalProject   Author: PYNE-AD   File: elemFunctions_Dual_test.py    MIT License 6 votes vote down vote up
def test_arccosh_ad_results():
	# Realue defined at positive real numbers x >= 1
	# Dualivative defined at positive real numbers x > 1
	x = Dual(1.1, 2)
	f = ef.arccosh(x)
	assert f.Real == np.arccosh(1.1)
	assert f.Dual == np.array([[((2)/np.sqrt((1.1)**2 - 1))]])
	
	# Realue defined at x = 1, Dualivative not defined
	with pytest.warns(RuntimeWarning):
		y = Dual(1, 2)
		f = ef.arccosh(y)
		assert np.isinf(f.Dual)
	
	# neither Realue nor Dualivative defined at x < 1
	with pytest.warns(RuntimeWarning):
		z = Dual(0, 2)
		f = ef.arccosh(z)
		assert np.isnan(f.Real)
		assert np.isnan(f.Dual) 
Example 13
Project: cs207-FinalProject   Author: PYNE-AD   File: elemFunctions_Dual_test.py    MIT License 6 votes vote down vote up
def test_sqrt_ad_results():
	# Positive reals
	x = Dual(0.5, 2.0)
	f = ef.sqrt(x)
	assert f.Real == np.array([[np.sqrt(0.5)]])
	assert f.Dual == np.array([[0.5 * 0.5 ** (-0.5) * 2.0]])

	# Realue defined but Dualivative undefined when x == 0
	with pytest.warns(RuntimeWarning):
		y = Dual(0, 2)
		f = ef.sqrt(y)
		assert f.Real == np.array([[0]])
		assert np.isinf(f.Dual)

	# Realue and Dualivative undefined when x < 0
	with pytest.warns(RuntimeWarning):
		z = Dual(-0.5, 2)
		f = ef.sqrt(z)
		assert np.isnan(f.Real)
		assert np.isnan(f.Dual) 
Example 14
Project: Att-ChemdNER   Author: lingluodlut   File: initializations.py    Apache License 2.0 6 votes vote down vote up
def get_fans(shape, dim_ordering='th'):
    if len(shape) == 2:
        fan_in = shape[0]
        fan_out = shape[1]
    elif len(shape) == 4 or len(shape) == 5:
        # assuming convolution kernels (2D or 3D).
        # TH kernel shape: (depth, input_depth, ...)
        # TF kernel shape: (..., input_depth, depth)
        if dim_ordering == 'th':
            receptive_field_size = np.prod(shape[2:])
            fan_in = shape[1] * receptive_field_size
            fan_out = shape[0] * receptive_field_size
        elif dim_ordering == 'tf':
            receptive_field_size = np.prod(shape[:2])
            fan_in = shape[-2] * receptive_field_size
            fan_out = shape[-1] * receptive_field_size
        else:
            raise ValueError('Invalid dim_ordering: ' + dim_ordering)
    else:
        # no specific assumptions
        fan_in = np.sqrt(np.prod(shape))
        fan_out = np.sqrt(np.prod(shape))
    return fan_in, fan_out 
Example 15
Project: FRIDA   Author: LCAV   File: point_cloud.py    MIT License 6 votes vote down vote up
def trilateration(self, D):
        '''
        Find the location of points based on their distance matrix using trilateration

        Parameters
        ----------
        D : square 2D ndarray
            Euclidean Distance Matrix (matrix containing squared distances between points
        '''

        dist = np.sqrt(D)

        # Simpler algorithm (no denoising)
        self.X = np.zeros((self.dim, self.m))

        self.X[:,1] = np.array([0, dist[0,1]])
        for i in xrange(2,m):
            self.X[:,i] = self.trilateration_single_point(self.X[1,1],
                    dist[0,i], dist[1,i]) 
Example 16
Project: FRIDA   Author: LCAV   File: tools_fri_doa_plane.py    MIT License 6 votes vote down vote up
def mtx_freq2visi(M, p_mic_x, p_mic_y):
    """
    build the matrix that maps the Fourier series to the visibility
    :param M: the Fourier series expansion is limited from -M to M
    :param p_mic_x: a vector that constains microphones x coordinates
    :param p_mic_y: a vector that constains microphones y coordinates
    :return:
    """
    num_mic = p_mic_x.size
    ms = np.reshape(np.arange(-M, M + 1, step=1), (1, -1), order='F')
    G = np.zeros((num_mic * (num_mic - 1), 2 * M + 1), dtype=complex, order='C')
    count_G = 0
    for q in range(num_mic):
        p_x_outer = p_mic_x[q]
        p_y_outer = p_mic_y[q]
        for qp in range(num_mic):
            if not q == qp:
                p_x_qqp = p_x_outer - p_mic_x[qp]
                p_y_qqp = p_y_outer - p_mic_y[qp]
                norm_p_qqp = np.sqrt(p_x_qqp ** 2 + p_y_qqp ** 2)
                phi_qqp = np.arctan2(p_y_qqp, p_x_qqp)
                G[count_G, :] = (-1j) ** ms * sp.special.jv(ms, norm_p_qqp) * \
                                np.exp(1j * ms * phi_qqp)
                count_G += 1
    return G 
Example 17
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def draw(self, ax, color, line_width=1, fillcolor=None, name=None, arrow=True, alpha=0.2, scale=50):
        ax.add_patch(PolygonPatch(self.contour, alpha=alpha, fc=fillcolor, ec=color, linewidth=line_width))

        vertices = np.array(self.contour.exterior.coords)[1:]

        if arrow:
            arrow_center = np.mean(vertices, axis=0)
            arrow_direction = (vertices[2] - vertices[1]) / 1.5
            arrow_tail = arrow_center - arrow_direction / 2
            arrow_head = arrow_center + arrow_direction / 2
            style = plt_patches.ArrowStyle.Simple(head_length=.4, head_width=.6, tail_width=.1)
            x = np.array(ax.axis())
            scale_factor = np.sqrt(np.prod(np.abs(x[::2] - x[1::2])) / (60 * 60))
            arrow_patch = plt_patches.FancyArrowPatch(posA=arrow_tail, posB=arrow_head, arrowstyle=style,
                                                      color='w', mutation_scale= scale / scale_factor, alpha=0.4)
            ax.add_patch(arrow_patch)
        elif name is None:
            name = 'front'

        if name is not None:
            text_location = np.mean(vertices[[0, -1]], axis=0)
            ax.text(text_location[0], text_location[1], name, ha='center', va='top', color='w') 
Example 18
Project: good-semi-bad-gan   Author: christiancosgrove   File: good-semi.py    MIT License 6 votes vote down vote up
def plot(samples):
    width = min(12,int(np.sqrt(len(samples))))
    fig = plt.figure(figsize=(width, width))
    gs = gridspec.GridSpec(width, width)
    gs.update(wspace=0.05, hspace=0.05)

    for ind, sample in enumerate(samples):
        if ind >= width*width:
            break
        ax = plt.subplot(gs[ind])
        plt.axis('off')
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.set_aspect('equal')
        sample = sample * 0.5 + 0.5
        sample = np.transpose(sample, (1, 2, 0))
        plt.imshow(sample)

    return fig 
Example 19
Project: PIC   Author: ameroyer   File: utils.py    MIT License 6 votes vote down vote up
def tile_image(x_gen, tiles=None):
    """Tiled image representations.

    Args:
      x_gen: 4D array of images (n x w x h x 3)
      tiles (int pair, optional): number of rows and columns

    Returns:
      Array of tiled images (1 x W x H x 3)
    """
    n_images = x_gen.shape[0]
    if tiles is None:
        for i in range(int(np.sqrt(n_images)), 0, -1):
            if n_images % i == 0: break
        n_rows = i; n_cols = n_images // i
    else:
        n_rows, n_cols = tiles
    full = [np.hstack(x_gen[c * n_rows:(c + 1) * n_rows]) for c in range(n_cols)]
    return np.expand_dims(np.vstack(full), 0) 
Example 20
Project: b2ac   Author: hbldh   File: matrix_algorithms.py    MIT License 5 votes vote down vote up
def Givens_rotation_double(a, b):
    # Working with actual trigonometric functions
    # angle = np.arctan2(-a, b)
    # c = np.cos(angle)
    # s = np.sin(angle)

    # Using naive definitions
    # root = np.sqrt(a ** 2 + b ** 2)
    # c = a / root
    # s = -b / root

    # Using Matrix Computations solution
    if b == 0:
        c = 1.0
        s = 0.0
    else:
        if np.abs(b) > np.abs(a):
            tau = - a / b
            s = 1 / (np.sqrt(1 + tau ** 2))
            c = s * tau
        else:
            tau = - b / a
            c = 1 / (np.sqrt(1 + tau ** 2))
            s = c * tau

    return c, s 
Example 21
Project: b2ac   Author: hbldh   File: conversion.py    MIT License 5 votes vote down vote up
def conic_to_general_reference(conic_coeffs):
    """Transform from conic section format to general format.

    :param conic_coeffs: The six coefficients defining the ellipse as a conic shape.
    :type conic_coeffs: :py:class:`numpy.ndarray` or tuple
    :param verbose: If debug printout is desired.
    :type verbose: bool
    :return:
    :rtype: tuple

    """
    a, b, c, d, e, f = conic_coeffs

    angle = np.arctan2(b, a - c) / 2

    cos_theta = np.cos(angle)  # np.sqrt((1 + np.cos(2*angle)) / 2)
    sin_theta = np.sin(angle)  # np.sqrt((1 - np.cos(2*angle)) / 2)

    a_prime = a * (cos_theta ** 2) + (b * cos_theta * sin_theta) + c * (sin_theta ** 2)
    c_prime = a * (sin_theta ** 2) - (b * cos_theta * sin_theta) + c * (cos_theta ** 2)
    d_prime = (d * cos_theta) + (e * sin_theta)
    e_prime = (-(d * sin_theta)) + (e * cos_theta)
    f_prime = f

    x_prime = (-d_prime) / (2 * a_prime)
    y_prime = (-e_prime) / (2 * c_prime)
    major_axis = np.sqrt(
        ((-4 * f_prime * a_prime * c_prime) + (c_prime * (d_prime ** 2)) + (a_prime * (e_prime ** 2))) /
        (4 * a_prime * (c_prime ** 2)))
    minor_axis = np.sqrt(
        ((-4 * f_prime * a_prime * c_prime) + (c_prime * (d_prime ** 2)) + (a_prime * (e_prime ** 2))) /
        (4 * (a_prime ** 2) * c_prime))

    if a_prime > c_prime:
        angle += np.pi / 2

    x = x_prime * cos_theta - y_prime * sin_theta
    y = x_prime * sin_theta + y_prime * cos_theta

    return [x, y], [major_axis, minor_axis], angle 
Example 22
Project: b2ac   Author: hbldh   File: conversion.py    MIT License 5 votes vote down vote up
def conic_to_general_2(conic_coeffs):
    """Transform from conic section format to general format.

    :param conic_coeffs: The six coefficients defining the ellipse as a conic shape.
    :type conic_coeffs: :py:class:`numpy.ndarray` or tuple
    :return: The general form for the ellipse. Returns tuple :math:`(x_c,\ y_c),\\ (a,\\ b),\\ \\theta`
        that fulfills the equation

        .. math::

            \\frac{((x-x_c)\\cos(\\theta) + (y-y_c)\\sin(\\theta))^2}{a^2} +
            \\frac{((x-x_c)\\sin(\\theta) - (y-y_c)\\sin(\\theta))^2}{b^2} = 1

    :rtype: tuple

    """
    a, b, c, d, e, f = conic_coeffs
    denom = 2 * ((b ** 2) - (a * c))
    x = (c * d - b * e) / denom
    y = (a * e - b * d) / denom
    mu = 1 / ((a * (x ** 2)) + (2 * b * x * y) + (c * (y ** 2)) - f)

    sqrt_expr = np.sqrt(((mu * a - mu * c) ** 2) + (4 * ((mu * b) ** 2)))
    min_axis = 1 / np.sqrt((mu * a + mu * c + sqrt_expr) / 2)
    maj_axis = 1 / np.sqrt((mu * a + mu * c - sqrt_expr) / 2)
    angle = np.arctan2(-2 * b, c - a)

    return [x, y], [maj_axis, min_axis], angle 
Example 23
Project: b2ac   Author: hbldh   File: inverse_iteration.py    MIT License 5 votes vote down vote up
def inverse_iteration_for_eigenvector_int(A, eigenvalue):
    """Performs a series of inverse iteration steps with a known
    eigenvalue to produce its eigenvector.

    :param A: The 3x3 matrix to which the eigenvalue belongs.
    :type A: :py:class:`numpy.ndarray`
    :param eigenvalue: One approximate eigenvalue of the matrix A.
    :type eigenvalue: int
    :return: The eigenvector of this matrix and eigenvalue combination.
    :rtype: :py:class:`numpy.ndarray`

    """
    A = np.array(A, 'int64')

    # Subtract the eigenvalue from the diagonal entries of the matrix.
    for k in xrange(A.shape[0]):
        A[k, k] -= eigenvalue
    A, scale = fp.scale_64bit_matrix(A)

    # Obtain the inverse of the matrix.
    adj_A = mo.inverse_3by3_int64(A.flatten(), False)
    eigenvector = adj_A.reshape((3, 3)).sum(1)
    eigenvector, scale = fp.scale_64bit_vector(eigenvector)

    e_norm = int(np.sqrt((eigenvector ** 2).sum()))
    if (eigenvector[0] < 0) and (eigenvector[2] < 0):
        eigenvector = -eigenvector
    return eigenvector, e_norm 
Example 24
Project: chainer-openai-transformer-lm   Author: soskek   File: opt.py    MIT License 5 votes vote down vote up
def _scheduled_learning_rate(schedule, hp, t):
    if t == 0:
        raise RuntimeError(
            'Can\'t determine the learning rate of Adam optimizer '
            'because the update steps have not been started.')
    fix1 = 1. - math.pow(hp.beta1, t)
    fix2 = 1. - math.pow(hp.beta2, t)
    lrt = hp.alpha * math.sqrt(fix2) / fix1
    lrt *= schedule(t / hp.t_total, hp.warmup)
    return lrt 
Example 25
Project: fenics-topopt   Author: zfergus   File: filter.py    MIT License 5 votes vote down vote up
def __init__(self, nelx, nely, rmin):
        """
        Filter: Build (and assemble) the index+data vectors for the coo matrix
        format.
        """
        nfilter = int(nelx * nely * ((2 * (np.ceil(rmin) - 1) + 1)**2))
        iH = np.zeros(nfilter)
        jH = np.zeros(nfilter)
        sH = np.zeros(nfilter)
        cc = 0
        for i in range(nelx):
            for j in range(nely):
                row = i * nely + j
                kk1 = int(np.maximum(i - (np.ceil(rmin) - 1), 0))
                kk2 = int(np.minimum(i + np.ceil(rmin), nelx))
                ll1 = int(np.maximum(j - (np.ceil(rmin) - 1), 0))
                ll2 = int(np.minimum(j + np.ceil(rmin), nely))
                for k in range(kk1, kk2):
                    for l in range(ll1, ll2):
                        col = k * nely + l
                        fac = rmin - np.sqrt(
                            ((i - k) * (i - k) + (j - l) * (j - l)))
                        iH[cc] = row
                        jH[cc] = col
                        sH[cc] = np.maximum(0.0, fac)
                        cc = cc + 1
        # Finalize assembly and convert to csc format
        self.H = scipy.sparse.coo_matrix((sH, (iH, jH)),
            shape=(nelx * nely, nelx * nely)).tocsc()
        self.Hs = self.H.sum(1) 
Example 26
Project: fenics-topopt   Author: zfergus   File: von_mises_stress.py    MIT License 5 votes vote down vote up
def calculate_stress(self, x, u, nu, side=1):
        """
        Calculate the Von Mises stress given the densities x, displacements u,
        and young modulus nu.
        """
        s11, s22, s12 =  self.calculate_principle_stresses(x, u, nu, side)
        vm_stress = numpy.sqrt(s11**2 - s11 * s22 + s22**2 + 3 * s12**2)
        return vm_stress 
Example 27
Project: fenics-topopt   Author: zfergus   File: filter.py    MIT License 5 votes vote down vote up
def __init__(self, nelx, nely, rmin):
        """
        Filter: Build (and assemble) the index+data vectors for the coo matrix
        format.
        """
        nfilter = int(nelx * nely * ((2 * (np.ceil(rmin) - 1) + 1)**2))
        iH = np.zeros(nfilter)
        jH = np.zeros(nfilter)
        sH = np.zeros(nfilter)
        cc = 0
        for i in range(nelx):
            for j in range(nely):
                row = i * nely + j
                kk1 = int(np.maximum(i - (np.ceil(rmin) - 1), 0))
                kk2 = int(np.minimum(i + np.ceil(rmin), nelx))
                ll1 = int(np.maximum(j - (np.ceil(rmin) - 1), 0))
                ll2 = int(np.minimum(j + np.ceil(rmin), nely))
                for k in range(kk1, kk2):
                    for l in range(ll1, ll2):
                        col = k * nely + l
                        fac = rmin - np.sqrt(
                            ((i - k) * (i - k) + (j - l) * (j - l)))
                        iH[cc] = row
                        jH[cc] = col
                        sH[cc] = np.maximum(0.0, fac)
                        cc = cc + 1
        # Finalize assembly and convert to csc format
        self.H = scipy.sparse.coo_matrix((sH, (iH, jH)),
            shape=(nelx * nely, nelx * nely)).tocsc()
        self.Hs = self.H.sum(1) 
Example 28
Project: fenics-topopt   Author: zfergus   File: von_mises_stress.py    MIT License 5 votes vote down vote up
def calculate_stress(self, x, u, nu, side=1):
        """
        Calculate the Von Mises stress given the densities x, displacements u,
        and young modulus nu.
        """
        s11, s22, s12 =  self.calculate_principle_stresses(x, u, nu, side)
        vm_stress = numpy.sqrt(s11**2 - s11 * s22 + s22**2 + 3 * s12**2)
        return vm_stress 
Example 29
Project: cs207-FinalProject   Author: PYNE-AD   File: elemFunctions.py    MIT License 5 votes vote down vote up
def arccos(X):
	''' Compute the arccos of an AutoDiff object and its derivative.

		INPUTS
		======
		X: an AutoDiff object or constant

		RETURNS
		=======
		A new AutoDiff object or scalar with calculated value and derivative.

		EXAMPLES
		========
		>>> X = AutoDiff(0.5, 2)
		>>> arccosAutoDiff = arccos(X)
		>>> arccosAutoDiff.val
		1.0471975511965976
		>>> arccosAutoDiff.der
		-2.3094010767585034
		>>> arccosAutoDiff.jacobian
		-1.1547005383792517
		'''
	try:
		# Is another ADT
		new_val = np.arccos(X.val) #if (-1 <= X.val and X.val <= 1) else np.nan
		new_der = (-1/np.sqrt(1-X.val**2))*X.der #if (-1 < X.val and X.val < 1) else np.nan
		new_jacobian = (-1/np.sqrt(1-X.val**2))*X.jacobian #if (-1 < X.val and X.val < 1) else np.nan

		return AutoDiff(new_val, new_der, X.n, 0, new_jacobian)
	except AttributeError:
		try:
			return Dual(np.arccos(X.Real), -X.Dual/np.sqrt(1-X.Real**2))		
		except AttributeError:
			try:
				return Dual(arccos(X.Real), -X.Dual/sqrt(1-X.Real**2))
			except AttributeError:
			# Constant
				return_val = np.arccos(X)
				return return_val

# arc tangent 
Example 30
Project: cs207-FinalProject   Author: PYNE-AD   File: elemFunctions.py    MIT License 5 votes vote down vote up
def arcsinh(x):
	''' Compute the hyperbolic arc sine of an AutoDiff object and its derivative.
	
	INPUTS
	======
	x: an AutoDiff object
	
	RETURNS
	=======
	A new AutoDiff object with calculated value and derivative.
	
	EXAMPLES
	========
	>>> x = AutoDiff(0.5, 2, 1)
	>>> myAutoDiff = arcsinh(x)
	>>> myAutoDiff.val
	2.3124383412727525
	>>> myAutoDiff.der
	0.39223227027
	>>> myAutoDiff.jacobian
	0.19611613513818404
	
	'''
	try:
		new_val = np.arcsinh(x.val)
		new_der = ((1)/np.sqrt(x.val**2 + 1))*x.der
		new_jacobian = ((1)/np.sqrt(x.val**2 + 1))*x.jacobian
		return AutoDiff(new_val, new_der, x.n, 0, new_jacobian)
	except AttributeError:
		try:
			return Dual(np.arcsinh(x.Real), x.Dual/np.sqrt((x.Real**2)+1))		
		except AttributeError:
			try:
				return Dual(arcsinh(x.Real), (x.Dual*(1+x.Real**2)**-0.5))
			except AttributeError:
			# Constant
				return_val = np.arcsinh(x)
				return return_val

# hyperbolic arc cosine 
Example 31
Project: cs207-FinalProject   Author: PYNE-AD   File: elemFunctions.py    MIT License 5 votes vote down vote up
def arccosh(x):
	''' Compute the hyperbolic arc cosine of an AutoDiff object and its derivative.
	
	INPUTS
	======
	x: an AutoDiff object
	
	RETURNS
	=======
	A new AutoDiff object with calculated value and derivative.
	
	EXAMPLES
	========
	>>> x = AutoDiff(1.1, 2)
	>>> myAutoDiff = arccosh(x)
	>>> myAutoDiff.val
	0.4435682543851154
	>>> myAutoDiff.der
	(2/np.sqrt(1.1**2 - 1))
	>>> myAutoDiff.jacobian
	(1/np.sqrt(1.1**2 - 1))
	
	'''
	try:
		new_val = np.arccosh(x.val)
		# Derivative of arccosh is only defined when x > 1
		new_der = ((1)/np.sqrt(x.val**2 - 1))*x.der  # if x.val > 1 else None
		new_jacobian = ((1)/np.sqrt(x.val**2 - 1))*x.jacobian  # if x.val > 1 else None
		return AutoDiff(new_val, new_der, x.n, 0, new_jacobian)
	except AttributeError:
		try:
			return Dual(np.arccosh(x.Real), x.Dual/np.sqrt((x.Real**2)-1))		
		except AttributeError:
			try:
				return Dual(arccosh(x.Real), (x.Dual*((x.Real**2)-1)**-0.5))
			except AttributeError:
			# Constant
				return_val = np.arccosh(x)
				return return_val

# hyperbolic arc tangent 
Example 32
Project: cs207-FinalProject   Author: PYNE-AD   File: elemFunctions_test.py    MIT License 5 votes vote down vote up
def test_arccos_ad_results():
	# positive real numbers
	x = AutoDiff(0.5, 2)
	f = ef.arccos(x)
	assert f.val == np.array([[np.arccos(0.5)]])
	assert f.der == np.array([[-2/np.sqrt(1-0.5**2)]])
	assert f.jacobian == np.array([[-1/np.sqrt(1-0.5**2)]])

	# out of bounds - negative sqrt
	with pytest.warns(RuntimeWarning):
		y = AutoDiff(-2, 2)
		f = ef.arccos(y)
		assert np.isnan(f.val[0][0])
		assert np.isnan(f.der[0][0])
		assert np.isnan(f.jacobian[0][0])

	# out of bounds - divide by 0
	with pytest.warns(RuntimeWarning):
		y = AutoDiff(1, 2)
		f = ef.arccos(y)
		assert f.val == np.array([[np.arccos(1)]])
		assert np.isneginf(f.der[0][0])
		assert np.isneginf(f.jacobian[0][0])

	# zero
	z = AutoDiff(0, 2)
	f = ef.arccos(z)
	assert f.val == np.array([[np.arccos(0)]])
	assert f.der == np.array([[-2/np.sqrt(1-0**2)]])
	assert f.jacobian == np.array([[-1/np.sqrt(1-0**2)]]) 
Example 33
Project: cs207-FinalProject   Author: PYNE-AD   File: elemFunctions_test.py    MIT License 5 votes vote down vote up
def test_sqrt_constant_results():
	a = ef.sqrt(5)
	assert a == np.sqrt(5)
	b = ef.sqrt(0)
	assert b == np.sqrt(0)
	# Value undefined when x < 0
	with pytest.warns(RuntimeWarning):
		c = ef.sqrt(-5)
		assert np.isnan(c) 
Example 34
Project: cs207-FinalProject   Author: PYNE-AD   File: elemFunctions_Dual_test.py    MIT License 5 votes vote down vote up
def test_arcsin_ad_results():
	# positive real numbers
	x = Dual(0.5, 2)
	f = ef.arcsin(x)
	assert f.Real == np.array([[np.arcsin(0.5)]])
	assert f.Dual == np.array([[2/np.sqrt(1-0.5**2)]])

	# out of bounds - undefined sqrt
	with pytest.warns(RuntimeWarning):
		y = Dual(-2, 2)
		f = ef.arcsin(y)
		assert np.isnan(f.Real)
		assert np.isnan(f.Dual)

	# out of bounds - div by zero
	with pytest.warns(RuntimeWarning):
		y = Dual(1, 2)
		f = ef.arcsin(y)
		assert f.Real == np.array([[np.arcsin(1)]])
		assert np.isinf(f.Dual)

	# zero
	z = Dual(0, 2)
	f = ef.arcsin(z)
	assert f.Real == np.array([[0.0]])
	assert f.Dual == np.array([[2.0]]) 
Example 35
Project: cs207-FinalProject   Author: PYNE-AD   File: elemFunctions_Dual_test.py    MIT License 5 votes vote down vote up
def test_arccos_ad_results():
	# positive real numbers
	x = Dual(0.5, 2)
	f = ef.arccos(x)
	assert f.Real == np.array([[np.arccos(0.5)]])
	assert f.Dual == np.array([[-2/np.sqrt(1-0.5**2)]])

	# out of bounds - negative sqrt
	with pytest.warns(RuntimeWarning):
		y = Dual(-2, 2)
		f = ef.arccos(y)
		assert np.isnan(f.Real)
		assert np.isnan(f.Dual)

	# out of bounds - divide by 0
	with pytest.warns(RuntimeWarning):
		y = Dual(1, 2)
		f = ef.arccos(y)
		assert f.Real == np.array([[np.arccos(1)]])
		assert np.isneginf(f.Dual)

	# zero
	z = Dual(0, 2)
	f = ef.arccos(z)
	assert f.Real == np.array([[np.arccos(0)]])
	assert f.Dual == np.array([[-2/np.sqrt(1-0**2)]]) 
Example 36
Project: cs207-FinalProject   Author: PYNE-AD   File: elemFunctions_Dual_test.py    MIT License 5 votes vote down vote up
def test_sqrt_types():
	with pytest.raises(TypeError):
		ef.sqrt('x')
	with pytest.raises(TypeError):
		ef.sqrt("1234")


# ---------------LOGISTIC FUNC----------------# 
Example 37
Project: keras_mixnets   Author: titu1994   File: custom_objects.py    MIT License 5 votes vote down vote up
def __call__(self, shape, dtype=None):
        dtype = dtype or K.floatx()

        kernel_height, kernel_width, _, out_filters = shape
        fan_out = int(kernel_height * kernel_width * out_filters)
        return tf.random_normal(
            shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype)


# Obtained from https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py 
Example 38
Project: keras_mixnets   Author: titu1994   File: custom_objects.py    MIT License 5 votes vote down vote up
def __call__(self, shape, dtype=None):
        dtype = dtype or K.floatx()

        init_range = 1.0 / np.sqrt(shape[1])
        return tf.random_uniform(shape, -init_range, init_range, dtype=dtype)


# Obtained from https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py 
Example 39
Project: Att-ChemdNER   Author: lingluodlut   File: utils.py    Apache License 2.0 5 votes vote down vote up
def shared(shape, name):
#{{{
    """
    Create a shared object of a numpy array.
    """ 
    init=initializations.get('glorot_uniform');
    if len(shape) == 1:
        value = np.zeros(shape)  # bias are initialized with zeros
        return theano.shared(value=value.astype(theano.config.floatX), name=name)
    else:
        drange = np.sqrt(6. / (np.sum(shape)))
        value = drange * np.random.uniform(low=-1.0, high=1.0, size=shape)
        return init(shape=shape,name=name);
#}}} 
Example 40
Project: Att-ChemdNER   Author: lingluodlut   File: initializations.py    Apache License 2.0 5 votes vote down vote up
def lecun_uniform(shape, name=None, dim_ordering='th'):
    ''' Reference: LeCun 98, Efficient Backprop
        http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
    '''
    fan_in, fan_out = get_fans(shape, dim_ordering=dim_ordering)
    scale = np.sqrt(3. / fan_in)
    return uniform(shape, scale, name=name) 
Example 41
Project: Att-ChemdNER   Author: lingluodlut   File: initializations.py    Apache License 2.0 5 votes vote down vote up
def glorot_normal(shape, name=None, dim_ordering='th'):
    ''' Reference: Glorot & Bengio, AISTATS 2010
    '''
    fan_in, fan_out = get_fans(shape, dim_ordering=dim_ordering)
    s = np.sqrt(2. / (fan_in + fan_out))
    return normal(shape, s, name=name) 
Example 42
Project: Att-ChemdNER   Author: lingluodlut   File: initializations.py    Apache License 2.0 5 votes vote down vote up
def glorot_uniform(shape, name=None, dim_ordering='th'):
    fan_in, fan_out = get_fans(shape, dim_ordering=dim_ordering)
    s = np.sqrt(6. / (fan_in + fan_out))
    return uniform(shape, s, name=name) 
Example 43
Project: Att-ChemdNER   Author: lingluodlut   File: initializations.py    Apache License 2.0 5 votes vote down vote up
def he_uniform(shape, name=None, dim_ordering='th'):
    fan_in, fan_out = get_fans(shape, dim_ordering=dim_ordering)
    s = np.sqrt(6. / fan_in)
    return uniform(shape, s, name=name) 
Example 44
Project: prediction-constrained-topic-models   Author: dtak   File: grad_descent_minimizer.py    MIT License 5 votes vote down vote up
def calc_l2_norm_of_vector_per_entry(grad_vec):
    return np.sqrt(np.sum(np.square(grad_vec))) / float(grad_vec.size) 
Example 45
Project: FRIDA   Author: LCAV   File: point_cloud.py    MIT License 5 votes vote down vote up
def trilateration_single_point(self, c, Dx, Dy):
        '''
        Given x at origin (0,0) and y at (0,c) the distances from a point
        at unknown location Dx, Dy to x, y, respectively, finds the position of the point.
        '''

        z = (c**2 - (Dy**2 - Dx**2)) / (2*c)
        t = np.sqrt(Dx**2 - z**2)

        return np.array([t,z]) 
Example 46
Project: AutoDL   Author: tanguofu   File: convnet_builder.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def affine(self,
             num_out_channels,
             input_layer=None,
             num_channels_in=None,
             bias=0.0,
             stddev=None,
             activation='relu'):
    if input_layer is None:
      input_layer = self.top_layer
    if num_channels_in is None:
      num_channels_in = self.top_size
    name = 'affine' + str(self.counts['affine'])
    self.counts['affine'] += 1
    with tf.variable_scope(name):
      init_factor = 2. if activation == 'relu' else 1.
      stddev = stddev or np.sqrt(init_factor / num_channels_in)
      kernel = self.get_variable(
          'weights', [num_channels_in, num_out_channels],
          self.variable_dtype, self.dtype,
          initializer=tf.truncated_normal_initializer(stddev=stddev))
      biases = self.get_variable('biases', [num_out_channels],
                                 self.variable_dtype, self.dtype,
                                 initializer=tf.constant_initializer(bias))
      logits = tf.nn.xw_plus_b(input_layer, kernel, biases)
      if activation == 'relu':
        affine1 = tf.nn.relu(logits, name=name)
      elif activation == 'linear' or activation is None:
        affine1 = logits
      else:
        raise KeyError('Invalid activation type \'%s\'' % activation)
      self.top_layer = affine1
      self.top_size = num_out_channels
      return affine1 
Example 47
Project: AutoDL   Author: tanguofu   File: convnet_builder.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def affine(self,
             num_out_channels,
             input_layer=None,
             num_channels_in=None,
             bias=0.0,
             stddev=None,
             activation='relu'):
    if input_layer is None:
      input_layer = self.top_layer
    if num_channels_in is None:
      num_channels_in = self.top_size
    name = 'affine' + str(self.counts['affine'])
    self.counts['affine'] += 1
    with tf.variable_scope(name):
      init_factor = 2. if activation == 'relu' else 1.
      stddev = stddev or np.sqrt(init_factor / num_channels_in)
      kernel = self.get_variable(
          'weights', [num_channels_in, num_out_channels],
          self.variable_dtype, self.dtype,
          initializer=tf.truncated_normal_initializer(stddev=stddev))
      biases = self.get_variable('biases', [num_out_channels],
                                 self.variable_dtype, self.dtype,
                                 initializer=tf.constant_initializer(bias))
      logits = tf.nn.xw_plus_b(input_layer, kernel, biases)
      if activation == 'relu':
        affine1 = tf.nn.relu(logits, name=name)
      elif activation == 'linear' or activation is None:
        affine1 = logits
      else:
        raise KeyError('Invalid activation type \'%s\'' % activation)
      self.top_layer = affine1
      self.top_size = num_out_channels
      return affine1 
Example 48
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: convolution.py    MIT License 5 votes vote down vote up
def batchnorm(self, layer, inp):
        if not self.var:
            temp = (inp - layer.w['moving_mean'])
            temp /= (np.sqrt(layer.w['moving_variance']) + 1e-5)
            temp *= layer.w['gamma']
            return temp
        else:
            args = dict({
                'center' : False, 'scale' : True,
                'epsilon': 1e-5, 'scope' : self.scope,
                'updates_collections' : None,
                'is_training': layer.h['is_training'],
                'param_initializers': layer.w
                })
            return slim.batch_norm(inp, **args) 
Example 49
Project: VAE-MF-TensorFlow   Author: dongwookim-ml   File: matrix_vae.py    MIT License 5 votes vote down vote up
def weight_variable(shape, name):
    #xavier initialisation
    in_dim = shape[0]
    xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
    return tf.Variable(tf.random_normal(shape=shape, stddev=xavier_stddev), name=name) 
Example 50
Project: VAE-MF-TensorFlow   Author: dongwookim-ml   File: matrix_vae.py    MIT License 5 votes vote down vote up
def train_test_validation(self, M, train_idx, test_idx, valid_idx, n_steps=100000, result_path='result/'):
        nonzero_user_idx = M.nonzero()[0]
        nonzero_item_idx = M.nonzero()[1]

        trainM = np.zeros(M.shape)
        trainM[nonzero_user_idx[train_idx], nonzero_item_idx[train_idx]] = M[nonzero_user_idx[train_idx], nonzero_item_idx[train_idx]]

        validM = np.zeros(M.shape)
        validM[nonzero_user_idx[valid_idx], nonzero_item_idx[valid_idx]] = M[nonzero_user_idx[valid_idx], nonzero_item_idx[valid_idx]]

        testM = np.zeros(M.shape)
        testM[nonzero_user_idx[test_idx], nonzero_item_idx[test_idx]] = M[nonzero_user_idx[test_idx], nonzero_item_idx[test_idx]]

        for i in range(self.num_user):
            if np.sum(trainM[i]) == 0:
                testM[i] = 0
                validM[i] = 0

        train_writer = tf.summary.FileWriter(
            result_path + '/train', graph=self.sess.graph)

        best_val_rmse = np.inf
        best_test_rmse = 0

        self.sess.run(tf.global_variables_initializer())
        for step in range(1, n_steps):
            feed_dict = {self.user: trainM, self.valid_rating:validM, self.test_rating:testM}

            _, mse, mae, valid_rmse, test_rmse,  summary_str = self.sess.run(
                [self.train_step, self.MSE, self.MAE, self.valid_RMSE, self.test_RMSE, self.summary_op], feed_dict=feed_dict)
            train_writer.add_summary(summary_str, step)
            print("Iter {0} Train RMSE:{1}, Valid RMSE:{2}, Test RMSE:{3}".format(step, np.sqrt(mse), valid_rmse, test_rmse))

            if best_val_rmse > valid_rmse:
                best_val_rmse = valid_rmse
                best_test_rmse = test_rmse

        self.saver.save(self.sess, result_path + "/model.ckpt")
        return best_test_rmse 
Example 51
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def align_plane_to_ground(cls, u, d):
        """
        Generates a rotation translation object that rotates such that u coincides with (0, 0, -1) and d=0
        :param vector1: the vector to be rotated
        :param vector2: the vector to be rotated to
        :return: RotationTranslationData
        """
        u = u.ravel()
        c = np.sqrt(1-u[0]**2)
        cinv = 1 / c

        R = np.array([[c, -u[0]*u[1]*cinv, -u[0]*u[2]*cinv],
                      [0, -u[2] * cinv, u[1] * cinv],
                      [-u[0], -u[1], -u[2]]])
        return RotationTranslationData(rt=(R, np.array([0., 0., d]))) 
Example 52
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def extract_quaternion(R):
    d = np.diagonal(R)
    t = np.sum(d)
    if t + 1 < 0.25:
        symmetric_mat = R + R.T
        asymmetric_mat = R - R.T
        symmetric_diag = np.diagonal(symmetric_mat)
        i_max = np.argmax(symmetric_diag)
        q = np.empty(4)
        if i_max == 0:
            q[1] = np.sqrt(symmetric_diag[0] - t + 1) / 2
            normalizer = 1 / q[1]
            q[2] = symmetric_mat[1, 0] / 4 * normalizer
            q[3] = symmetric_mat[2, 0] / 4 * normalizer
            q[0] = asymmetric_mat[2, 1] / 4 * normalizer
        elif i_max == 1:
            q[2] = np.sqrt(symmetric_diag[1] - t + 1) / 2
            normalizer = 1 / q[2]
            q[1] = symmetric_mat[1, 0] / 4 * normalizer
            q[3] = symmetric_mat[2, 1] / 4 * normalizer
            q[0] = asymmetric_mat[0, 2] / 4 * normalizer
        elif i_max == 2:
            q[3] = np.sqrt(symmetric_diag[2] - t + 1) / 2
            normalizer = 1 / q[3]
            q[1] = symmetric_mat[2, 0] / 4 * normalizer
            q[2] = symmetric_mat[1, 2] / 4 * normalizer
            q[0] = asymmetric_mat[1, 0] / 4 * normalizer
    else:
        r = np.sqrt(1+t)
        s = 0.5 / r
        q = np.array([0.5*r, (R[2, 1] - R[1, 2])*s, (R[0, 2] - R[2, 0])*s, (R[1, 0] - R[0, 1])*s])

    return q 
Example 53
Project: Multi-Modal-Spectral-Image-Super-Resolution   Author: IVRL   File: test.py    MIT License 5 votes vote down vote up
def PSNR(gt, rc):
    mse = MSE(gt, rc)
    pmax = 65536
    return 20 * np.log10(pmax / np.sqrt(mse + 1e-3)) 
Example 54
Project: Multi-Modal-Spectral-Image-Super-Resolution   Author: IVRL   File: test.py    MIT License 5 votes vote down vote up
def PSNR(gt, rc):
    mse = MSE(gt, rc)
    pmax = 65536
    return 20 * np.log10(pmax / np.sqrt(mse + 1e-3)) 
Example 55
Project: Automated-Social-Annotation   Author: acadTags   File: BiGRU_train.py    MIT License 5 votes vote down vote up
def assign_pretrained_word_embedding(sess,vocabulary_index2word,vocab_size,model,num_run,word2vec_model_path=None):
    if num_run==0:
        print("using pre-trained word emebedding.started.word2vec_model_path:",word2vec_model_path)
    # transform embedding input into a dictionary
    # word2vecc=word2vec.load('word_embedding.txt') #load vocab-vector fiel.word2vecc['w91874']
    word2vec_model = word2vec.load(word2vec_model_path, kind='bin')
    word2vec_dict = {}
    for word, vector in zip(word2vec_model.vocab, word2vec_model.vectors):
        word2vec_dict[word] = vector
    word_embedding_2dlist = [[]] * vocab_size  # create an empty word_embedding list: which is a list of list, i.e. a list of word, where each word is a list of values as an embedding vector.
    word_embedding_2dlist[0] = np.zeros(FLAGS.embed_size)  # assign empty for first word:'PAD'
    bound = np.sqrt(6.0) / np.sqrt(vocab_size)  # bound for random variables.
    count_exist = 0;
    count_not_exist = 0
    for i in range(1, vocab_size):  # loop each word
        word = vocabulary_index2word[i]  # get a word
        embedding = None
        try:
            embedding = word2vec_dict[word]  # try to get vector:it is an array.
        except Exception:
            embedding = None
        if embedding is not None:  # the 'word' exist a embedding
            word_embedding_2dlist[i] = embedding;
            count_exist = count_exist + 1  # assign array to this word.
        else:  # no embedding for this word
            word_embedding_2dlist[i] = np.random.uniform(-bound, bound, FLAGS.embed_size);
            count_not_exist = count_not_exist + 1  # init a random value for the word.
    word_embedding_final = np.array(word_embedding_2dlist)  # covert to 2d array.
    #print(word_embedding_final[0]) # print the original embedding for the first word
    word_embedding = tf.constant(word_embedding_final, dtype=tf.float32)  # convert to tensor
    t_assign_embedding = tf.assign(model.Embedding,word_embedding)  # assign this value to our embedding variables of our model.
    sess.run(t_assign_embedding);
    if num_run==0:
        print("word. exists embedding:", count_exist, " ;word not exist embedding:", count_not_exist)
        print("using pre-trained word emebedding.ended...")

# based on a threshold, 在验证集上做验证,报告损失、精确度-multilabel 
Example 56
Project: Automated-Social-Annotation   Author: acadTags   File: HAN_train.py    MIT License 5 votes vote down vote up
def assign_pretrained_word_embedding(sess,vocabulary_index2word,vocab_size,model,num_run,word2vec_model_path=None):
    if num_run==0:
        print("using pre-trained word emebedding.started.word2vec_model_path:",word2vec_model_path)
    # transform embedding input into a dictionary
    # word2vecc=word2vec.load('word_embedding.txt') #load vocab-vector fiel.word2vecc['w91874']
    word2vec_model = word2vec.load(word2vec_model_path, kind='bin')
    word2vec_dict = {}
    for word, vector in zip(word2vec_model.vocab, word2vec_model.vectors):
        word2vec_dict[word] = vector
    word_embedding_2dlist = [[]] * vocab_size  # create an empty word_embedding list: which is a list of list, i.e. a list of word, where each word is a list of values as an embedding vector.
    word_embedding_2dlist[0] = np.zeros(FLAGS.embed_size)  # assign empty for first word:'PAD'
    bound = np.sqrt(6.0) / np.sqrt(vocab_size)  # bound for random variables.
    count_exist = 0;
    count_not_exist = 0
    for i in range(1, vocab_size):  # loop each word
        word = vocabulary_index2word[i]  # get a word
        embedding = None
        try:
            embedding = word2vec_dict[word]  # try to get vector:it is an array.
        except Exception:
            embedding = None
        if embedding is not None:  # the 'word' exist a embedding
            word_embedding_2dlist[i] = embedding;
            count_exist = count_exist + 1  # assign array to this word.
        else:  # no embedding for this word
            word_embedding_2dlist[i] = np.random.uniform(-bound, bound, FLAGS.embed_size);
            count_not_exist = count_not_exist + 1  # init a random value for the word.
    word_embedding_final = np.array(word_embedding_2dlist)  # covert to 2d array.
    word_embedding = tf.constant(word_embedding_final, dtype=tf.float32)  # convert to tensor
    t_assign_embedding = tf.assign(model.Embedding,word_embedding)  # assign this value to our embedding variables of our model.
    sess.run(t_assign_embedding);
    if num_run==0:
        print("word. exists embedding:", count_exist, " ;word not exist embedding:", count_not_exist)
        print("using pre-trained word emebedding.ended...")

# based on a threshold, 在验证集上做验证,报告损失、精确度-multilabel 
Example 57
Project: Automated-Social-Annotation   Author: acadTags   File: JMAN_train.py    MIT License 5 votes vote down vote up
def assign_pretrained_word_embedding(sess,vocabulary_index2word,vocab_size,model,num_run,word2vec_model_path=None):
    if num_run==0:
        print("using pre-trained word emebedding.started.word2vec_model_path:",word2vec_model_path)
    # transform embedding input into a dictionary
    # word2vecc=word2vec.load('word_embedding.txt') #load vocab-vector fiel.word2vecc['w91874']
    word2vec_model = word2vec.load(word2vec_model_path, kind='bin')
    word2vec_dict = {}
    for word, vector in zip(word2vec_model.vocab, word2vec_model.vectors):
        word2vec_dict[word] = vector
    word_embedding_2dlist = [[]] * vocab_size  # create an empty word_embedding list: which is a list of list, i.e. a list of word, where each word is a list of values as an embedding vector.
    word_embedding_2dlist[0] = np.zeros(FLAGS.embed_size)  # assign empty for first word:'PAD'
    bound = np.sqrt(6.0) / np.sqrt(vocab_size)  # bound for random variables.
    count_exist = 0;
    count_not_exist = 0
    for i in range(1, vocab_size):  # loop each word
        word = vocabulary_index2word[i]  # get a word
        embedding = None
        try:
            embedding = word2vec_dict[word]  # try to get vector:it is an array.
        except Exception:
            embedding = None
        if embedding is not None:  # the 'word' exist a embedding
            word_embedding_2dlist[i] = embedding;
            count_exist = count_exist + 1  # assign array to this word.
        else:  # no embedding for this word
            word_embedding_2dlist[i] = np.random.uniform(-bound, bound, FLAGS.embed_size);
            count_not_exist = count_not_exist + 1  # init a random value for the word.
    word_embedding_final = np.array(word_embedding_2dlist)  # covert to 2d array.
    word_embedding = tf.constant(word_embedding_final, dtype=tf.float32)  # convert to tensor
    t_assign_embedding = tf.assign(model.Embedding,word_embedding)  # assign this value to our embedding variables of our model.
    sess.run(t_assign_embedding);
    if num_run==0:
        print("word. exists embedding:", count_exist, " ;word not exist embedding:", count_not_exist)
        print("using pre-trained word emebedding.ended...") 
Example 58
Project: Neural-LP   Author: fanyangxyz   File: model.py    MIT License 5 votes vote down vote up
def _random_uniform_unit(self, r, c):
        """ Initialize random and unit row norm matrix of size (r, c). """
        bound = 6./ np.sqrt(c)
        init_matrix = np.random.uniform(-bound, bound, (r, c))
        init_matrix = np.array(map(lambda row: row / np.linalg.norm(row), init_matrix))
        return init_matrix 
Example 59
Project: disentangling_conditional_gans   Author: zalandoresearch   File: networks.py    MIT License 5 votes vote down vote up
def get_weight(shape, gain=np.sqrt(2), use_wscale=False, fan_in=None):
    if fan_in is None: fan_in = np.prod(shape[:-1])
    std = gain / np.sqrt(fan_in) # He init
    if use_wscale:
        wscale = tf.constant(np.float32(std), name='wscale')
        return tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal()) * wscale
    else:
        return tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal(0, std))

#----------------------------------------------------------------------------
# Fully-connected layer. 
Example 60
Project: disentangling_conditional_gans   Author: zalandoresearch   File: networks.py    MIT License 5 votes vote down vote up
def dense(x, fmaps, gain=np.sqrt(2), use_wscale=False):
    if len(x.shape) > 2:
        x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])])
    w = get_weight([x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
    w = tf.cast(w, x.dtype)
    return tf.matmul(x, w)

#----------------------------------------------------------------------------
# Convolutional layer. 
Example 61
Project: disentangling_conditional_gans   Author: zalandoresearch   File: networks.py    MIT License 5 votes vote down vote up
def conv2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False):
    assert kernel >= 1 and kernel % 2 == 1
    w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
    w = tf.cast(w, x.dtype)
    return tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='SAME', data_format='NCHW')

#----------------------------------------------------------------------------
# Apply bias to the given activation tensor. 
Example 62
Project: disentangling_conditional_gans   Author: zalandoresearch   File: networks.py    MIT License 5 votes vote down vote up
def upscale2d_conv2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False):
    assert kernel >= 1 and kernel % 2 == 1
    w = get_weight([kernel, kernel, fmaps, x.shape[1].value], gain=gain, use_wscale=use_wscale, fan_in=(kernel**2)*x.shape[1].value)
    w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT')
    w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]])
    w = tf.cast(w, x.dtype)
    os = [tf.shape(x)[0], fmaps, x.shape[2] * 2, x.shape[3] * 2]
    return tf.nn.conv2d_transpose(x, w, os, strides=[1,1,2,2], padding='SAME', data_format='NCHW')

#----------------------------------------------------------------------------
# Box filter downscaling layer. 
Example 63
Project: disentangling_conditional_gans   Author: zalandoresearch   File: networks.py    MIT License 5 votes vote down vote up
def conv2d_downscale2d(x, fmaps, kernel, gain=np.sqrt(2), use_wscale=False):
    assert kernel >= 1 and kernel % 2 == 1
    w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
    w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT')
    w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) * 0.25
    w = tf.cast(w, x.dtype)
    return tf.nn.conv2d(x, w, strides=[1,1,2,2], padding='SAME', data_format='NCHW')

#----------------------------------------------------------------------------
# Pixelwise feature vector normalization. 
Example 64
Project: disentangling_conditional_gans   Author: zalandoresearch   File: util_scripts.py    MIT License 5 votes vote down vote up
def generate_interpolation_video(run_id, snapshot=None, grid_size=[1,1], image_shrink=1, image_zoom=1, duration_sec=60.0, smoothing_sec=1.0, mp4=None, mp4_fps=30, mp4_codec='libx265', mp4_bitrate='16M', random_seed=1000, minibatch_size=8):
    network_pkl = misc.locate_network_pkl(run_id, snapshot)
    if mp4 is None:
        mp4 = misc.get_id_string_for_network_pkl(network_pkl) + '-lerp.mp4'
    num_frames = int(np.rint(duration_sec * mp4_fps))
    random_state = np.random.RandomState(random_seed)

    print('Loading network from "%s"...' % network_pkl)
    G, D, Gs = misc.load_network_pkl(run_id, snapshot)

    print('Generating latent vectors...')
    shape = [num_frames, np.prod(grid_size)] + Gs.input_shape[1:] # [frame, image, channel, component]
    all_latents = random_state.randn(*shape).astype(np.float32)
    all_latents = scipy.ndimage.gaussian_filter(all_latents, [smoothing_sec * mp4_fps] + [0] * len(Gs.input_shape), mode='wrap')
    all_latents /= np.sqrt(np.mean(np.square(all_latents)))

    # Frame generation func for moviepy.
    def make_frame(t):
        frame_idx = int(np.clip(np.round(t * mp4_fps), 0, num_frames - 1))
        latents = all_latents[frame_idx]
        labels = np.zeros([latents.shape[0], 0], np.float32)
        images = Gs.run(latents, labels, minibatch_size=minibatch_size, num_gpus=config.num_gpus, out_mul=127.5, out_add=127.5, out_shrink=image_shrink, out_dtype=np.uint8)
        grid = misc.create_image_grid(images, grid_size).transpose(1, 2, 0) # HWC
        if image_zoom > 1:
            grid = scipy.ndimage.zoom(grid, [image_zoom, image_zoom, 1], order=0)
        if grid.shape[2] == 1:
            grid = grid.repeat(3, 2) # grayscale => RGB
        return grid

    # Generate video.
    import moviepy.editor # pip install moviepy
    result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
    moviepy.editor.VideoClip(make_frame, duration=duration_sec).write_videofile(os.path.join(result_subdir, mp4), fps=mp4_fps, codec='libx264', bitrate=mp4_bitrate)
    open(os.path.join(result_subdir, '_done.txt'), 'wt').close()

#----------------------------------------------------------------------------
# Generate MP4 video of training progress for a previous training run.
# To run, uncomment the appropriate line in config.py and launch train.py. 
Example 65
Project: DataComp   Author: Cojabi   File: utils.py    Apache License 2.0 5 votes vote down vote up
def diff_mean_conf_formula(n1, n2, mean1, mean2, var1, var2, rnd=2):
    """
    Calculates the confidence interval for the difference of means between two features.

    :param n1: Sample size of sample 1
    :param n2: Sample size of sample 2
    :param mean1: Mean of sample 1
    :param mean2: Mean of sample 2
    :param var1: Variance of sample 1
    :param var2: Variance of sample 2
    :param rnd: Number of decimal places the result shall be round to. Default is 2.
    :return: Confidence interval given as a list: [intervat start, interval end]
    """
    # estimate common variance
    s2 = ((n1 - 1) * var1 + (n2 - 1) * var2) / (n1 - 1 + n2 - 1)

    # estimate standard deviation
    sd = np.sqrt(s2 * (1 / n1 + 1 / n2))

    # calculate difference in means
    diff = mean1 - mean2

    # set z value. 1.96 is standard for a 95% significance level
    z = 1.96  # t.ppf((1+0.95) / 2, len(series1)-1+len(series2)-1)

    start = diff - z * sd
    end = diff + z * sd

    return [np.round(start, rnd), np.round(end, rnd)] 
Example 66
Project: DataComp   Author: Cojabi   File: utils.py    Apache License 2.0 5 votes vote down vote up
def diff_prop_conf_formula(count1, n1, count2, n2, rnd=2):
    """
    Calculates the Agresti / Cuffo confidence interval for the difference of proportions between two features.

    :param n1: Sample size of sample 1
    :param n2: Sample size of sample 2
    :param prop1: Mean of sample 1
    :param prop2: Mean of sample 2
    :param rnd: Number of decimal places the result shall be round to. Default is 2.
    :return: Confidence interval given as a list: [intervat start, interval end]
    """

    # Agresti / Cuffo adjustment
    n1m = n1 + 2
    n2m = n2 + 2
    prop1 = (count1 + 1) / n1m
    prop2 = (count2 + 1) / n2m


    # calculate combined standard error
    se = np.sqrt(prop1 * (1 - prop1) / n1m + prop2 * (1 - prop2) / n2m)

    # calculate difference in means
    diff = prop1 - prop2

    # set z value. 1.96 is standard for a 95% significance level
    z = 1.96  # t.ppf((1+0.95) / 2, len(series1)-1+len(series2)-1)

    start = diff - z * se
    end = diff + z * se

    return [np.round(start, rnd), np.round(end, rnd)] 
Example 67
Project: StructEngPy   Author: zhuoju36   File: element.py    MIT License 5 votes vote down vote up
def angle(node_i,node_j,x):
        v=np.array([node_j.X-node_i.X,node_j.Y-node_i.Y,node_j.Z-node_i.Z])
        L1=np.sqrt(v.dot(v))
        L2=np.sqrt(x.dot(x))
        return np.arccos(v.dot(x)/L1/L2)

        #derivation 
Example 68
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_generate_np(self):
        x_val = np.random.rand(100, 1000)
        perturbation = self.attack.generate_np(x_val) - x_val
        perturbation_norm = np.sqrt(np.sum(perturbation**2, axis=1))
        # test perturbation norm
        self.assertClose(perturbation_norm, self.attack.eps) 
Example 69
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_attack_strength(self):
        """
        This test generates a random source and guide and feeds them in a
        randomly initialized CNN. Checks if an adversarial example can get
        at least 50% closer to the guide compared to the original distance of
        the source and the guide.
        """
        tf.set_random_seed(1234)
        input_shape = self.input_shape
        x_src = tf.abs(tf.random_uniform(input_shape, 0., 1.))
        x_guide = tf.abs(tf.random_uniform(input_shape, 0., 1.))

        layer = 'fc7'
        attack_params = {'eps': 5./256, 'clip_min': 0., 'clip_max': 1.,
                         'nb_iter': 10, 'eps_iter': 0.005,
                         'layer': layer}
        x_adv = self.attack.generate(x_src, x_guide, **attack_params)
        h_adv = self.model.fprop(x_adv)[layer]
        h_src = self.model.fprop(x_src)[layer]
        h_guide = self.model.fprop(x_guide)[layer]

        init = tf.global_variables_initializer()
        self.sess.run(init)

        ha, hs, hg, xa, xs, xg = self.sess.run(
            [h_adv, h_src, h_guide, x_adv, x_src, x_guide])
        d_as = np.sqrt(((hs-ha)*(hs-ha)).sum())
        d_ag = np.sqrt(((hg-ha)*(hg-ha)).sum())
        d_sg = np.sqrt(((hg-hs)*(hg-hs)).sum())
        print("L2 distance between source and adversarial example `%s`: %.4f" %
              (layer, d_as))
        print("L2 distance between guide and adversarial example `%s`: %.4f" %
              (layer, d_ag))
        print("L2 distance between source and guide `%s`: %.4f" %
              (layer, d_sg))
        print("d_ag/d_sg*100 `%s`: %.4f" % (layer, d_ag*100/d_sg))
        self.assertTrue(d_ag*100/d_sg < 50.) 
Example 70
Project: neural-fingerprinting   Author: StephanZheng   File: madry_cifar10_model.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _conv(name, x, filter_size, in_filters, out_filters, strides):
    """Convolution."""
    with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
        n = filter_size * filter_size * out_filters
        kernel = tf.get_variable(
            'DW', [filter_size, filter_size, in_filters, out_filters],
            tf.float32, initializer=tf.random_normal_initializer(
                stddev=np.sqrt(2.0 / n)))
        return tf.nn.conv2d(x, kernel, strides, padding='SAME') 
Example 71
Project: sfcc   Author: kv-kunalvyas   File: auxiliary.py    MIT License 4 votes vote down vote up
def initialise_train(dates):
    if not dates:
        data_frame = pd.read_csv('../data/train.csv', header=0)
    elif dates:
        data_frame = pd.read_csv('../data/train.csv', header=0, parse_dates=['Dates'])
        data_frame['Year'] = data_frame['Dates'].map(lambda x: x.year)
        data_frame['Month'] = data_frame['Dates'].map(lambda x: x.month)
        data_frame['Week'] = data_frame['Dates'].map(lambda x: x.week)
        data_frame['Hour'] = data_frame['Dates'].map(lambda x: x.hour)

    # Change string categories to integer classifiers
    # 1. determine all values
    Categories = list(enumerate(sorted(np.unique(data_frame['Category']))))
    Descriptions = list(enumerate(sorted(np.unique(data_frame['Descript']))))
    DaysOfWeeks = list(enumerate(sorted(np.unique(data_frame['DayOfWeek']))))
    PdDistricts = list(enumerate(sorted(np.unique(data_frame['PdDistrict']))))
    Resolutions = list(enumerate(sorted(np.unique(data_frame['Resolution']))))
    # 2. set up dictionaries
    CategoriesDict = {name: i for i, name in Categories}
    DescriptionsDict = {name: i for i, name in Descriptions}
    DaysOfWeeksDict = {name: i for i, name in DaysOfWeeks}
    PdDistrictsDict = {name: i for i, name in PdDistricts}
    ResolutionsDict = {name: i for i, name in Resolutions}
    # 3. Convert all strings to int
    data_frame.Category = data_frame.Category.map(lambda x: CategoriesDict[x]).astype(int)
    data_frame.Descript = data_frame.Descript.map(lambda x: DescriptionsDict[x]).astype(int)
    data_frame.DayOfWeek = data_frame.DayOfWeek.map(lambda x: DaysOfWeeksDict[x]).astype(int)
    data_frame.PdDistrict = data_frame.PdDistrict.map(lambda x: PdDistrictsDict[x]).astype(int)
    data_frame.Resolution = data_frame.Resolution.map(lambda x: ResolutionsDict[x]).astype(int)

    xy_scaler = pp.StandardScaler()
    xy_scaler.fit(data_frame[["X", "Y"]])
    data_frame[["X", "Y"]] = xy_scaler.transform(data_frame[["X", "Y"]])
    data_frame["rot45_X"] = .707 * data_frame["Y"] + .707 * data_frame["X"]
    data_frame["rot45_Y"] = .707 * data_frame["Y"] - .707 * data_frame["X"]
    data_frame["rot30_X"] = (1.732 / 2) * data_frame["X"] + (1. / 2) * data_frame["Y"]
    data_frame["rot30_Y"] = (1.732 / 2) * data_frame["Y"] - (1. / 2) * data_frame["X"]
    data_frame["rot60_X"] = (1. / 2) * data_frame["X"] + (1.732 / 2) * data_frame["Y"]
    data_frame["rot60_Y"] = (1. / 2) * data_frame["Y"] - (1.732 / 2) * data_frame["X"]
    data_frame["radial_r"] = np.sqrt(np.power(data_frame["Y"], 2) + np.power(data_frame["X"], 2))

    # rounding off location coordinates to 2 decimal points
    data_frame.X = data_frame.X.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.Y = data_frame.Y.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.rot45_X = data_frame.rot45_X.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.rot45_Y = data_frame.rot45_Y.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.rot30_X = data_frame.rot30_X.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.rot30_Y = data_frame.rot30_Y.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.rot60_X = data_frame.rot60_X.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.rot60_Y = data_frame.rot60_Y.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.radial_r = data_frame.radial_r.map(lambda x: "%.2f" % round(x, 2)).astype(float)

    return data_frame 
Example 72
Project: sfcc   Author: kv-kunalvyas   File: auxiliary.py    MIT License 4 votes vote down vote up
def initialise_test(dates):
    if not dates:
        data_frame = pd.read_csv('../data/test.csv', header=0)
    elif dates:
        data_frame = pd.read_csv('../data/test.csv', header=0, parse_dates=['Dates'])
        data_frame['Year'] = data_frame['Dates'].map(lambda x: x.year)
        data_frame['Month'] = data_frame['Dates'].map(lambda x: x.month)
        data_frame['Week'] = data_frame['Dates'].map(lambda x: x.week)
        data_frame['Hour'] = data_frame['Dates'].map(lambda x: x.hour)

    # Change string categories to integer classifiers
    PdDistricts = list(enumerate(sorted(np.unique(data_frame['PdDistrict']))))
    DaysOfWeeks = list(enumerate(sorted(np.unique(data_frame['DayOfWeek']))))
    PdDistrictsDict = {name: i for i, name in PdDistricts}
    DaysOfWeeksDict = {name: i for i, name in DaysOfWeeks}
    data_frame.PdDistrict = data_frame.PdDistrict.map(lambda x: PdDistrictsDict[x]).astype(int)
    data_frame.DayOfWeek = data_frame.DayOfWeek.map(lambda x: DaysOfWeeksDict[x]).astype(int)

    xy_scaler = pp.StandardScaler()
    xy_scaler.fit(data_frame[["X", "Y"]])
    data_frame[["X", "Y"]] = xy_scaler.transform(data_frame[["X", "Y"]])
    data_frame["rot45_X"] = .707 * data_frame["Y"] + .707 * data_frame["X"]
    data_frame["rot45_Y"] = .707 * data_frame["Y"] - .707 * data_frame["X"]
    data_frame["rot30_X"] = (1.732 / 2) * data_frame["X"] + (1. / 2) * data_frame["Y"]
    data_frame["rot30_Y"] = (1.732 / 2) * data_frame["Y"] - (1. / 2) * data_frame["X"]
    data_frame["rot60_X"] = (1. / 2) * data_frame["X"] + (1.732 / 2) * data_frame["Y"]
    data_frame["rot60_Y"] = (1. / 2) * data_frame["Y"] - (1.732 / 2) * data_frame["X"]
    data_frame["radial_r"] = np.sqrt(np.power(data_frame["Y"], 2) + np.power(data_frame["X"], 2))

    # rounding off location coordinates to 2 decimal points
    data_frame.X = data_frame.X.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.Y = data_frame.Y.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.rot45_X = data_frame.rot45_X.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.rot45_Y = data_frame.rot45_Y.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.rot30_X = data_frame.rot30_X.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.rot30_Y = data_frame.rot30_Y.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.rot60_X = data_frame.rot60_X.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.rot60_Y = data_frame.rot60_Y.map(lambda x: "%.2f" % round(x, 2)).astype(float)
    data_frame.radial_r = data_frame.radial_r.map(lambda x: "%.2f" % round(x, 2)).astype(float)

    return data_frame


# TODO: Fill missing values if any
# Compute mean of a column and fill missing values 
Example 73
Project: autodmri   Author: samuelstjean   File: gamma.py    MIT License 4 votes vote down vote up
def get_noise_distribution(data, method='moments'):
    '''Computes sigma and N from an array of gamma distributed data

    input
    -----
    data
        A numpy array of gamma distributed values
    method='moments' or method='maxlk'
        Use either the moments or maximum likelihood equations to estimate the parameters.

    output
    ------
    sigma, N
        parameters related to the original Gaussian noise distribution
    '''

    data = data[data > 0]

    # If we have no voxel or only the same value
    # it leads to a divide by 0 as an edge case
    if data.size == 0 or np.std(data) == 0:
        return 0, 0

    # First get sigma
    if method == 'moments':
        mdata2 = np.mean(data**2)
        mdata4 = np.mean(data**4)

        p1 = mdata4 / mdata2
        p2 = mdata2
        sigma = np.sqrt(p1 - p2) / np.sqrt(2)
    elif method == 'maxlk':
            sigma = maxlk_sigma(data)
    else:
        raise ValueError('Invalid method name {}'.format(method))

    t = data**2 / (2*sigma**2)

    # Now compute N
    if method == 'moments':
        N = np.mean(t)
    elif method == 'maxlk':
            y = np.mean(np.log(t))
            N = inv_digamma(y)
    else:
        raise ValueError('Invalid method name {}'.format(method))

    return sigma, N 
Example 74
Project: cs207-FinalProject   Author: PYNE-AD   File: elemFunctions.py    MIT License 4 votes vote down vote up
def arcsin(X):
	''' Compute the arcsin of an AutoDiff object and its derivative.

		INPUTS
		======
		X: an AutoDiff object or constant

		RETURNS
		=======
		A new AutoDiff object or scalar with calculated value and derivative.

		EXAMPLES
		========
		>>> X = AutoDiff(0.5, 2)
		>>> arcsinAutoDiff = arcsin(X)
		>>> arcsinAutoDiff.val
		0.5235987755982988
		>>> arcsinAutoDiff.der
		2.3094010767585034
		>>> arcsinAutoDiff.jacobian
		1.1547005383792517
		'''

	try:
		# Is another ADT
		new_val = np.arcsin(X.val) 
		new_der = (1/np.sqrt(1-X.val**2))*X.der 
		new_jacobian = (1/np.sqrt(1-X.val**2))*X.jacobian 
		
		return AutoDiff(new_val, new_der, X.n, 0, new_jacobian)
	except AttributeError:
		try:
			return Dual(np.arcsin(X.Real), X.Dual/np.sqrt(1-X.Real**2))
		except AttributeError:
			try:
				# return Dual(arcsin(X.Real), X.Dual/sqrt(1-X.Real**2))
				return Dual(arcsin(X.Real), (X.Dual*(1-X.Real**2)**-0.5))
			except AttributeError:
			# Constant
				return_val = np.arcsin(X)
				return return_val

# arc cosine 
Example 75
Project: cs207-FinalProject   Author: PYNE-AD   File: elemFunctions.py    MIT License 4 votes vote down vote up
def sqrt(x):
	''' Compute the square root an AutoDiff object and its derivative.

	INPUTS
	======
	x: an AutoDiff object

	RETURNS
	=======
	A new AutoDiff object with calculated value and derivative.

	EXAMPLES
	========
	>>> x = AutoDiff(np.array([[5]]).T, np.array([[1]]), 1, 1)
	>>> myAutoDiff = sqrt(x)
	>>> myAutoDiff.val
	2.2360679775
	>>> myAutoDiff.der
	0.2236068

	'''
	try:
		new_val = np.sqrt(x.val)
		new_der = 0.5 * x.val ** (-0.5) * x.der
		new_jacobian = 0.5 * x.val ** (-0.5) * x.jacobian
		return AutoDiff(new_val, new_der, x.n, 0, new_jacobian)
	except AttributeError:
			try:
				if x.Real < 0.0:
					warnings.warn('Undefined at value', RuntimeWarning)
					dual=np.nan
				
				elif(x.Real==0):
					warnings.warn('Undefined at value', RuntimeWarning)
					dual = np.inf
				
				else:
					dual = 0.5 * x.Real ** (-0.5) * x.Dual
				
				real = np.sqrt(x.Real)
				return Dual(real, dual)
			except AttributeError:
				if x < 0.0:
					warnings.warn('Undefined at value', RuntimeWarning)
					return np.nan
				else:
					return np.sqrt(x)

# log base 
Example 76
Project: FRIDA   Author: LCAV   File: generators.py    MIT License 4 votes vote down vote up
def gen_speech_at_mic_stft(phi_ks, source_signals, mic_array_coord, noise_power, fs, fft_size=1024):
    """
    generate microphone signals with short time Fourier transform
    :param phi_ks: azimuth of the acoustic sources
    :param source_signals: speech signals for each arrival angle, one per row
    :param mic_array_coord: x and y coordinates of the microphone array
    :param noise_power: the variance of the microphone noise signal
    :param fs: sampling frequency
    :param fft_size: number of FFT bins
    :return: y_hat_stft: received (complex) signal at microphones
             y_hat_stft_noiseless: the noiseless received (complex) signal at microphones
    """
    frame_shift_step = np.int(fft_size / 1.)  # half block overlap for adjacent frames
    K = source_signals.shape[0]  # number of point sources
    num_mic = mic_array_coord.shape[1]  # number of microphones

    # Generate the impulse responses for the array and source directions
    impulse_response = gen_far_field_ir(np.reshape(phi_ks, (1, -1), order='F'),
                                        mic_array_coord, fs)
    # Now generate all the microphone signals
    y = np.zeros((num_mic, source_signals.shape[1] + impulse_response.shape[2] - 1), dtype=np.float32)
    for src in xrange(K):
        for mic in xrange(num_mic):
            y[mic] += fftconvolve(impulse_response[src, mic], source_signals[src])

    # Now do the short time Fourier transform
    # The resulting signal is M x fft_size/2+1 x number of frames
    y_hat_stft_noiseless = \
        np.array([pra.stft(signal, fft_size, frame_shift_step, transform=mkl_fft.rfft).T
                  for signal in y]) / np.sqrt(fft_size)

    # Add noise to the signals
    y_noisy = y + np.sqrt(noise_power) * np.array(np.random.randn(*y.shape), dtype=np.float32)
    # compute sources stft
    source_stft = \
        np.array([pra.stft(s_loop, fft_size, frame_shift_step, transform=mkl_fft.rfft).T
                  for s_loop in source_signals]) / np.sqrt(fft_size)

    y_hat_stft = \
        np.array([pra.stft(signal, fft_size, frame_shift_step, transform=mkl_fft.rfft).T
                  for signal in y_noisy]) / np.sqrt(fft_size)

    return y_hat_stft, y_hat_stft_noiseless, source_stft 
Example 77
Project: FRIDA   Author: LCAV   File: generators.py    MIT License 4 votes vote down vote up
def gen_sig_at_mic_stft(phi_ks, alpha_ks, mic_array_coord, SNR, fs, fft_size=1024, Ns=256):
    """
    generate microphone signals with short time Fourier transform
    :param phi_ks: azimuth of the acoustic sources
    :param alpha_ks: power of the sources
    :param mic_array_coord: x and y coordinates of the microphone array
    :param SNR: signal to noise ratio at the microphone
    :param fs: sampling frequency
    :param fft_size: number of FFT bins
    :param Ns: number of time snapshots used to estimate covariance matrix
    :return: y_hat_stft: received (complex) signal at microphones
             y_hat_stft_noiseless: the noiseless received (complex) signal at microphones
    """
    frame_shift_step = np.int(fft_size / 1.)  # half block overlap for adjacent frames
    K = alpha_ks.shape[0]  # number of point sources
    num_mic = mic_array_coord.shape[1]  # number of microphones

    # Generate the impulse responses for the array and source directions
    impulse_response = gen_far_field_ir(np.reshape(phi_ks, (1, -1), order='F'),
                                        mic_array_coord, fs)

    # Now generate some noise
    # source_signal = np.random.randn(K, Ns * fft_size) * np.sqrt(alpha_ks[:, np.newaxis])
    source_signal = np.random.randn(K, fft_size + (Ns - 1) * frame_shift_step) * \
                    np.sqrt(np.reshape(alpha_ks, (-1, 1), order='F'))

    # Now generate all the microphone signals
    y = np.zeros((num_mic, source_signal.shape[1] + impulse_response.shape[2] - 1), dtype=np.float32)
    for src in xrange(K):
        for mic in xrange(num_mic):
            y[mic] += fftconvolve(impulse_response[src, mic], source_signal[src])

    # Now do the short time Fourier transform
    # The resulting signal is M x fft_size/2+1 x number of frames
    y_hat_stft_noiseless = \
        np.array([pra.stft(signal, fft_size, frame_shift_step, transform=mkl_fft.rfft).T
                  for signal in y]) / np.sqrt(fft_size)

    # compute noise variance based on SNR
    signal_energy = linalg.norm(y_hat_stft_noiseless.flatten()) ** 2
    noise_energy = signal_energy / 10 ** (SNR * 0.1)
    sigma2_noise = noise_energy / y_hat_stft_noiseless.size

    # Add noise to the signals
    y_noisy = y + np.sqrt(sigma2_noise) * np.array(np.random.randn(*y.shape), dtype=np.float32)

    y_hat_stft = \
        np.array([pra.stft(signal, fft_size, frame_shift_step, transform=mkl_fft.rfft).T
                  for signal in y_noisy]) / np.sqrt(fft_size)

    return y_hat_stft, y_hat_stft_noiseless 
Example 78
Project: disentangling_conditional_gans   Author: zalandoresearch   File: legacy.py    MIT License 4 votes vote down vote up
def patch_theano_gan(state):
    if 'version' in state or state['build_func_spec']['func'] not in theano_gan_remap:
        return state

    spec = dict(state['build_func_spec'])
    func = spec.pop('func')
    resolution = spec.get('resolution', 32)
    resolution_log2 = int(np.log2(resolution))
    use_wscale = spec.get('use_wscale', True)

    assert spec.pop('label_size',       0)          == 0
    assert spec.pop('use_batchnorm',    False)      == False
    assert spec.pop('tanh_at_end',      None)       is None
    assert spec.pop('mbstat_func',      'Tstdeps')  == 'Tstdeps'
    assert spec.pop('mbstat_avg',       'all')      == 'all'
    assert spec.pop('mbdisc_kernels',   None)       is None
    spec.pop(       'use_gdrop',        True)       # doesn't make a difference
    assert spec.pop('use_layernorm',    False)      == False
    spec[           'fused_scale']                  = False
    spec[           'mbstd_group_size']             = 16

    vars = []
    param_iter = iter(state['param_values'])
    relu = np.sqrt(2); linear = 1.0
    def flatten2(w): return w.reshape(w.shape[0], -1)
    def he_std(gain, w): return gain / np.sqrt(np.prod(w.shape[:-1]))
    def wscale(gain, w): return w * next(param_iter) / he_std(gain, w) if use_wscale else w
    def layer(name, gain, w): return [(name + '/weight', wscale(gain, w)), (name + '/bias', next(param_iter))]
    
    if func.startswith('G'):
        vars += layer('4x4/Dense', relu/4, flatten2(next(param_iter).transpose(1,0,2,3)))
        vars += layer('4x4/Conv', relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
        for res in range(3, resolution_log2 + 1):
            vars += layer('%dx%d/Conv0' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
            vars += layer('%dx%d/Conv1' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
        for lod in range(0, resolution_log2 - 1):
            vars += layer('ToRGB_lod%d' % lod, linear, next(param_iter)[np.newaxis, np.newaxis])

    if func.startswith('D'):
        vars += layer('FromRGB_lod0', relu, next(param_iter)[np.newaxis, np.newaxis])
        for res in range(resolution_log2, 2, -1):
            vars += layer('%dx%d/Conv0' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
            vars += layer('%dx%d/Conv1' % (2**res, 2**res), relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
            vars += layer('FromRGB_lod%d' % (resolution_log2 - (res - 1)), relu, next(param_iter)[np.newaxis, np.newaxis])
        vars += layer('4x4/Conv', relu, next(param_iter).transpose(2,3,1,0)[::-1,::-1])
        vars += layer('4x4/Dense0', relu, flatten2(next(param_iter)[:,:,::-1,::-1]).transpose())
        vars += layer('4x4/Dense1', linear, next(param_iter))

    vars += [('lod', state['toplevel_params']['cur_lod'])]

    return {
        'version':          2,
        'name':             func,
        'build_module_src': inspect.getsource(networks),
        'build_func_name':  theano_gan_remap[func],
        'static_kwargs':    spec,
        'variables':        vars} 
Example 79
Project: StructEngPy   Author: zhuoju36   File: spectrum.py    MIT License 4 votes vote down vote up
def wind_vibration_factor():         
    g=2.5
    I10=0.12
    xi1=0.01
    k_w=1.28
    f1=1/1.38
    w0=0.65
    k=0.944
    a1=0.155
    H=30
    B=max(100,2*H)
    rho_z=10*np.sqrt(H+60*np.e**(-H/60)-60)/H
    rho_x=10*np.sqrt(H+50*np.e**(-H/60)-50)/B
    mu_z=1.67
    phi1=1
    Bz=k*H**a1*rho_x*rho_z*phi1/mu_z
    x1=min([30*f1/np.sqrt(k_w*w0),5])
    R=np.sqrt(np.pi*x1**2/(6*xi1*(1+x1**2)**(4/3)))
    beta_z=1+2*g*I10*Bz*np.sqrt(1+R**2)
    print(beta_z)

#
#mass=2.22831e3*0.011*7850+641172+276159.5-12370.6
#print(mass)
##mass=(2.22831e3*0.021+3.4817e3*0.025+1.05e3*0.14+0.739e3*0.012)*7850+37202
#area=3.481e3
#print(mass/1000)
#print(mass/area)
##
#def comfortablity():
#    C=2
#    B=95.7
#    L=43.5
#    w_=12+0.3
#    p0=0.3
#    g=9.8
#    beta=0.02
#    w=w_*C*L*L
#    fn=2.47
#    Fp=p0*np.exp(-0.35*fn)
#    a_p=Fp/(beta*w)*g
##    print(w)
##    print(Fp)
##    print(a_p)
#
#weight=(3.482e3*0.020+2.631e3*0.012+649*0.008+1.25e3*0.012)*7.85
#print('weight(t)')
#print(weight)
#
#w1=274*1.2
#w2=(196+1000)*1.2
#print(w1) 
Example 80
Project: StructEngPy   Author: zhuoju36   File: dynamic.py    MIT License 4 votes vote down vote up
def response_spectrum(model:Model,spec,mdd,n=60,comb='CQC'):
    """
    spec: a {'T':period,'a':acceleration} dictionary of spectrum\n
    mdd: a list of modal damping ratio\n
    comb: combination method, 'CQC' or 'SRSS'
    """
    K=model.K_
    M=model.M_
    DOF=model.DOF
    w,f,T,mode=eigen_mode(model,DOF)
    mode[n:,:]=np.zeros((DOF-n,DOF))#use n modes only.
    mode[:,n:]=np.zeros((DOF,DOF-n))
    M_=np.dot(np.dot(mode.T,M),mode)#generalized mass
    K_=np.dot(np.dot(mode.T,K),mode)#generalized stiffness
    L_=np.dot(np.diag(M),mode)
    px=[]
    Vx=[]
    Xm=[]
    gamma=[]
    mx=np.diag(M)
    for i in range(len(mode)):
        #mass participate factor
        px.append(-np.dot(mode[:,i].T,mx))
        Vx.append(px[-1]**2)
        Xm.append(Vx[-1]/3/m)
        #modal participate factor
        gamma.append(L_[i]/M_[i,i])    
    S=np.zeros((DOF,mode.shape[0]))
    

    for i in range(mode.shape[1]):        
        xi=mdd[i]
        y=np.interp(T[i],spec['T'],spec['a'])
        y/=w[i]**2
        S[:,i]=gamma[i]*y*mode[:,i]

    if comb=='CQC':
        cqc=0    
        rho=np.diag(np.ones(mode.shape[1]))
        for i in range(mode.shape[1]):
            for j in range(mode.shape[1]):
                if i!=j:
                    r=T[i]/T[j]
                    rho[i,j]=8*xi**2*(1+r)*r**1.5/((1-r**2)**2+4*xi**2*r*(1+r)**2)
                cqc+=rho[i,j]*S[:,i]*S[:,j]
        cqc=np.sqrt(cqc)
        print(cqc)
    elif comb=='SRSS':
        srss=0
        for i in range(mode.shape[1]):
            srss+=S[:,i]**2
        srss=np.sqrt(srss)
        print(srss)