Python numpy.exp() Examples

The following are 30 code examples of numpy.exp(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: spectrum_painter.py    From spectrum_painter with MIT License 7 votes vote down vote up
def convert_image(self, filename):
        pic = img.imread(filename)
        # Set FFT size to be double the image size so that the edge of the spectrum stays clear
        # preventing some bandfilter artifacts
        self.NFFT = 2*pic.shape[1]

        # Repeat image lines until each one comes often enough to reach the desired line time
        ffts = (np.flipud(np.repeat(pic[:, :, 0], self.repetitions, axis=0) / 16.)**2.) / 256.

        # Embed image in center bins of the FFT
        fftall = np.zeros((ffts.shape[0], self.NFFT))
        startbin = int(self.NFFT/4)
        fftall[:, startbin:(startbin+pic.shape[1])] = ffts

        # Generate random phase vectors for the FFT bins, this is important to prevent high peaks in the output
        # The phases won't be visible in the spectrum
        phases = 2*np.pi*np.random.rand(*fftall.shape)
        rffts = fftall * np.exp(1j*phases)

        # Perform the FFT per image line, then concatenate them to form the final signal
        timedata = np.fft.ifft(np.fft.ifftshift(rffts, axes=1), axis=1) / np.sqrt(float(self.NFFT))
        linear = timedata.flatten()
        linear = linear / np.max(np.abs(linear))
        return linear 
Example #2
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def apply_cmap(zs, cmap, vmin=None, vmax=None, unit=None, logrescale=False):
    '''
    apply_cmap(z, cmap) applies the given cmap to the values in z; if vmin and/or vmax are passed,
      they are used to scale z.

    Note that this function can automatically rescale data into log-space if the colormap is a
    neuropythy log-space colormap such as log_eccentricity. To enable this behaviour use the
    optional argument logrescale=True.
    '''
    zs = pimms.mag(zs) if unit is None else pimms.mag(zs, unit)
    zs = np.asarray(zs, dtype='float')
    if pimms.is_str(cmap): cmap = matplotlib.cm.get_cmap(cmap)
    if logrescale:
        if vmin is None: vmin = np.log(np.nanmin(zs))
        if vmax is None: vmax = np.log(np.nanmax(zs))
        mn = np.exp(vmin)
        u = zdivide(nanlog(zs + mn) - vmin, vmax - vmin, null=np.nan)
    else:        
        if vmin is None: vmin = np.nanmin(zs)
        if vmax is None: vmax = np.nanmax(zs)
        u = zdivide(zs - vmin, vmax - vmin, null=np.nan)
    u[np.isnan(u)] = -np.inf
    return cmap(u) 
Example #3
Source File: model.py    From models with MIT License 6 votes vote down vote up
def predict_on_batch(self, x):
        # run feature collection pipeline for the batch
        soi = x.astype(str)  # make sure the type is right

        for i in range(len(soi)):
            if len(soi[i]) < 94:
                soi[i] = elongate_intron(soi[i])

        parameters_batch = self._construct_features_array(soi)

        don_cleavage_time = self.don_model.predict(parameters_batch)
        acc_cleavage_time = self.acc_model.predict(parameters_batch)

        cleavage_time = {'acc_cleavage_time': np.exp(acc_cleavage_time), 'don_cleavage_time': np.exp(don_cleavage_time)}

        return cleavage_time 
Example #4
Source File: model.py    From models with MIT License 6 votes vote down vote up
def predict_on_batch(self, x):
        # run feature collection pipeline for the batch
        soi = x["soi"].astype(str)  # make sure the type is right
        self.bp_indexes = x["bp_index"]

        for i in range(len(soi)):
            if len(soi[i]) < 94:
                soi[i] = elongate_intron(soi[i])

        parameters_batch = self._construct_features_array(soi)

        don_cleavage_time = self.don_model.predict(parameters_batch)
        acc_cleavage_time = self.acc_model.predict(parameters_batch)

        cleavage_time = {'acc_cleavage_time': np.exp(acc_cleavage_time), 'don_cleavage_time': np.exp(don_cleavage_time)}

        return cleavage_time 
Example #5
Source File: utils.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def pred_test(testing_data, exe, param_list=None, save_path=""):
    ret = numpy.zeros((testing_data.shape[0], 2))
    if param_list is None:
        for i in range(testing_data.shape[0]):
            exe.arg_dict['data'][:] = testing_data[i, 0]
            exe.forward(is_train=False)
            ret[i, 0] = exe.outputs[0].asnumpy()
            ret[i, 1] = numpy.exp(exe.outputs[1].asnumpy())
        numpy.savetxt(save_path, ret)
    else:
        for i in range(testing_data.shape[0]):
            pred = numpy.zeros((len(param_list),))
            for j in range(len(param_list)):
                exe.copy_params_from(param_list[j])
                exe.arg_dict['data'][:] = testing_data[i, 0]
                exe.forward(is_train=False)
                pred[j] = exe.outputs[0].asnumpy()
            ret[i, 0] = pred.mean()
            ret[i, 1] = pred.std()**2
        numpy.savetxt(save_path, ret)
    mse = numpy.square(ret[:, 0] - testing_data[:, 0] **3).mean()
    return mse, ret 
Example #6
Source File: tools_fri_doa_plane.py    From FRIDA with MIT License 6 votes vote down vote up
def mtx_updated_G(phi_recon, M, mtx_amp2visi_ri, mtx_fri2visi_ri):
    """
    Update the linear transformation matrix that links the FRI sequence to the
    visibilities by using the reconstructed Dirac locations.
    :param phi_recon: the reconstructed Dirac locations (azimuths)
    :param M: the Fourier series expansion is between -M to M
    :param p_mic_x: a vector that contains microphones' x-coordinates
    :param p_mic_y: a vector that contains microphones' y-coordinates
    :param mtx_freq2visi: the linear mapping from Fourier series to visibilities
    :return:
    """
    L = 2 * M + 1
    ms_half = np.reshape(np.arange(-M, 1, step=1), (-1, 1), order='F')
    phi_recon = np.reshape(phi_recon, (1, -1), order='F')
    mtx_amp2freq = np.exp(-1j * ms_half * phi_recon)  # size: (M + 1) x K
    mtx_amp2freq_ri = np.vstack((mtx_amp2freq.real, mtx_amp2freq.imag[:-1, :]))  # size: (2M + 1) x K
    mtx_fri2amp_ri = linalg.lstsq(mtx_amp2freq_ri, np.eye(L))[0]
    # projection mtx_freq2visi to the null space of mtx_fri2amp
    mtx_null_proj = np.eye(L) - np.dot(mtx_fri2amp_ri.T,
                                       linalg.lstsq(mtx_fri2amp_ri.T, np.eye(L))[0])
    G_updated = np.dot(mtx_amp2visi_ri, mtx_fri2amp_ri) + \
                np.dot(mtx_fri2visi_ri, mtx_null_proj)
    return G_updated 
Example #7
Source File: bdk_demo.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None):
    if grad is None:
        grad = nd.empty(theta.shape, theta.context)
    theta1 = theta.asnumpy()[0]
    theta2 = theta.asnumpy()[1]
    v1 = sigma1 ** 2
    v2 = sigma2 ** 2
    vx = sigmax ** 2
    denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp(
        -(X - theta1 - theta2) ** 2 / (2 * vx))
    grad_npy = numpy.zeros(theta.shape)
    grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx
                                    + numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
                                    X - theta1 - theta2) / vx) / denominator).sum() \
                  + theta1 / v1
    grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
    X - theta1 - theta2) / vx) / denominator).sum() \
                  + theta2 / v2
    grad[:] = grad_npy
    return grad 
Example #8
Source File: tools_fri_doa_plane.py    From FRIDA with MIT License 6 votes vote down vote up
def mtx_freq2visi(M, p_mic_x, p_mic_y):
    """
    build the matrix that maps the Fourier series to the visibility
    :param M: the Fourier series expansion is limited from -M to M
    :param p_mic_x: a vector that constains microphones x coordinates
    :param p_mic_y: a vector that constains microphones y coordinates
    :return:
    """
    num_mic = p_mic_x.size
    ms = np.reshape(np.arange(-M, M + 1, step=1), (1, -1), order='F')
    G = np.zeros((num_mic * (num_mic - 1), 2 * M + 1), dtype=complex, order='C')
    count_G = 0
    for q in range(num_mic):
        p_x_outer = p_mic_x[q]
        p_y_outer = p_mic_y[q]
        for qp in range(num_mic):
            if not q == qp:
                p_x_qqp = p_x_outer - p_mic_x[qp]
                p_y_qqp = p_y_outer - p_mic_y[qp]
                norm_p_qqp = np.sqrt(p_x_qqp ** 2 + p_y_qqp ** 2)
                phi_qqp = np.arctan2(p_y_qqp, p_x_qqp)
                G[count_G, :] = (-1j) ** ms * sp.special.jv(ms, norm_p_qqp) * \
                                np.exp(1j * ms * phi_qqp)
                count_G += 1
    return G 
Example #9
Source File: doa.py    From FRIDA with MIT License 6 votes vote down vote up
def compute_mode(self):
        """
        Pre-compute mode vectors from candidate locations (in spherical 
        coordinates).
        """
        if self.num_loc is None:
            raise ValueError('Lookup table appears to be empty. \
                Run build_lookup().')
        self.mode_vec = np.zeros((self.max_bin,self.M,self.num_loc), 
            dtype='complex64')
        if (self.nfft % 2 == 1):
            raise ValueError('Signal length must be even.')
        f = 1.0 / self.nfft * np.linspace(0, self.nfft / 2, self.max_bin) \
            * 1j * 2 * np.pi
        for i in range(self.num_loc):
            p_s = self.loc[:, i]
            for m in range(self.M):
                p_m = self.L[:, m]
                if (self.mode == 'near'):
                    dist = np.linalg.norm(p_m - p_s, axis=1)
                if (self.mode == 'far'):
                    dist = np.dot(p_s, p_m)
                # tau = np.round(self.fs*dist/self.c) # discrete - jagged
                tau = self.fs * dist / self.c  # "continuous" - smoother
                self.mode_vec[:, m, i] = np.exp(f * tau) 
Example #10
Source File: rnn_cell_demo.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def Perplexity(label, pred):
    """ Calculates prediction perplexity

    Args:
        label (mx.nd.array): labels array
        pred (mx.nd.array): prediction array

    Returns:
        float: calculated perplexity

    """

    # collapse the time, batch dimension
    label = label.reshape((-1,))
    pred = pred.reshape((-1, pred.shape[-1]))

    loss = 0.
    for i in range(pred.shape[0]):
        loss += -np.log(max(1e-10, pred[i][int(label[i])]))
    return np.exp(loss / label.size) 
Example #11
Source File: generators.py    From FRIDA with MIT License 6 votes vote down vote up
def gen_visibility(alphak, phi_k, pos_mic_x, pos_mic_y):
    """
    generate visibility from the Dirac parameter and microphone array layout
    :param alphak: Diracs' amplitudes
    :param phi_k: azimuths
    :param pos_mic_x: a vector that contains microphones' x coordinates
    :param pos_mic_y: a vector that contains microphones' y coordinates
    :return:
    """
    xk, yk = polar2cart(1, phi_k)
    num_mic = pos_mic_x.size
    visi = np.zeros((num_mic, num_mic), dtype=complex)
    for q in xrange(num_mic):
        p_x_outer = pos_mic_x[q]
        p_y_outer = pos_mic_y[q]
        for qp in xrange(num_mic):
            p_x_qqp = p_x_outer - pos_mic_x[qp]  # a scalar
            p_y_qqp = p_y_outer - pos_mic_y[qp]  # a scalar
            visi[qp, q] = np.dot(np.exp(-1j * (xk * p_x_qqp + yk * p_y_qqp)), alphak)
    return visi 
Example #12
Source File: vaegan_mxnet.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def KLDivergenceLoss():
    '''KLDivergenceLoss loss
    '''

    data = mx.sym.Variable('data')
    mu1, lv1 = mx.sym.split(data,  num_outputs=2, axis=0)
    mu2 = mx.sym.zeros_like(mu1)
    lv2 = mx.sym.zeros_like(lv1)

    v1 = mx.sym.exp(lv1)
    v2 = mx.sym.exp(lv2)
    mu_diff_sq = mx.sym.square(mu1 - mu2)
    dimwise_kld = .5 * (
    (lv2 - lv1) + mx.symbol.broadcast_div(v1, v2) + mx.symbol.broadcast_div(mu_diff_sq, v2) - 1.)
    KL = mx.symbol.sum(dimwise_kld, axis=1)

    KLloss = mx.symbol.MakeLoss(mx.symbol.mean(KL),name='KLloss')
    return KLloss 
Example #13
Source File: conceptnet_evaluate.py    From comet-commonsense with Apache License 2.0 6 votes vote down vote up
def compute_final_scores(self, average_loss, nums):
        average_loss["total_macro"] /= nums["total_macro"]
        average_loss["total_micro"] /= nums["total_micro"]

        if nums["negative_micro"]:
            average_loss["negative_macro"] /= nums["negative_macro"]
            average_loss["negative_micro"] /= nums["negative_micro"]
        else:
            average_loss["negative_macro"] = 0
            average_loss["negative_micro"] = 0

        average_loss["macro_diff"] = (average_loss["negative_macro"] -
                                      average_loss["total_macro"])
        average_loss["micro_diff"] = (average_loss["negative_micro"] -
                                      average_loss["total_micro"])

        average_loss["ppl_macro"] = np.exp(average_loss["total_macro"])
        average_loss["ppl_micro"] = np.exp(average_loss["total_micro"])

        return average_loss 
Example #14
Source File: cmag.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def __call__(self, x, y=None):
        if y is not None: x = (x,y)
        x = np.asarray(x)
        if len(x.shape) == 1: return self([x])[0]
        x = np.transpose(x) if x.shape[0] == 2 else x
        if not x.flags['WRITEABLE']: x = np.array(x)
        crd = self.coordinates
        sig = self.sigma
        wts = self._weight
        res = np.zeros(x.shape[0])
        for (sh, qd, bi) in zip(self.spatial_hashes, self.bin_query_distances, self.sigma_bins):
            neis = sh.query_ball_point(x, qd)
            res += [
                np.sum(w * np.exp(-0.5 * d2/s**2))
                for (ni,pt) in zip(neis,x)
                for ii in [bi[ni]]
                for (w,s,d2) in [(wts[ii], sig[ii], np.sum((crd[ii] - pt)**2, axis=1))]]
        return res 
Example #15
Source File: test_loss.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_bce_loss():
    N = 20
    data = mx.random.uniform(-1, 1, shape=(N, 20))
    label = mx.nd.array(np.random.randint(2, size=(N,)), dtype='float32')
    data_iter = mx.io.NDArrayIter(data, label, batch_size=10, label_name='label')
    output = get_net(1)
    l = mx.symbol.Variable('label')
    Loss = gluon.loss.SigmoidBinaryCrossEntropyLoss()
    loss = Loss(output, l)
    loss = mx.sym.make_loss(loss)
    mod = mx.mod.Module(loss, data_names=('data',), label_names=('label',))
    mod.fit(data_iter, num_epoch=200, optimizer_params={'learning_rate': 0.01},
            eval_metric=mx.metric.Loss(), optimizer='adam',
            initializer=mx.init.Xavier(magnitude=2))
    assert mod.score(data_iter, eval_metric=mx.metric.Loss())[0][1] < 0.01
    # Test against npy
    data = mx.random.uniform(-5, 5, shape=(10,))
    label = mx.random.uniform(0, 1, shape=(10,))
    mx_bce_loss = Loss(data, label).asnumpy()
    prob_npy = 1.0 / (1.0 + np.exp(-data.asnumpy()))
    label_npy = label.asnumpy()
    npy_bce_loss = - label_npy * np.log(prob_npy) - (1 - label_npy) * np.log(1 - prob_npy)
    assert_almost_equal(mx_bce_loss, npy_bce_loss, rtol=1e-4, atol=1e-5) 
Example #16
Source File: Utility.py    From fuku-ml with MIT License 6 votes vote down vote up
def kernel_matrix_xX(svm_model, original_x, original_X):

        if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'):
            K = (svm_model.zeta + svm_model.gamma * np.dot(original_x, original_X.T)) ** svm_model.Q
        elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'):
            K = np.exp(-svm_model.gamma * (cdist(original_X, np.atleast_2d(original_x), 'euclidean').T ** 2)).ravel()

        '''
        K = np.zeros((svm_model.data_num, svm_model.data_num))

        for i in range(svm_model.data_num):
            for j in range(svm_model.data_num):
                if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'):
                    K[i, j] = Kernel.polynomial_kernel(svm_model, original_x, original_X[j])
                elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'):
                    K[i, j] = Kernel.gaussian_kernel(svm_model, original_x, original_X[j])
        '''

        return K 
Example #17
Source File: NeuralNetwork.py    From fuku-ml with MIT License 5 votes vote down vote up
def tanh_prime(self, s):
        tanh_prime_output = np.zeros(s.shape)
        for i in range(s.shape[0]):
            tanh_prime_output[i] = 4.0 / (np.exp(2 * s[i]) + np.exp(-2 * s[i]) + 2)
        return tanh_prime_output 
Example #18
Source File: utils.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def npy_sigmoid(x):
    return 1/(1 + numpy.exp(-x)) 
Example #19
Source File: custom_softmax.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def forward(self, is_train, req, in_data, out_data, aux):
        x = in_data[0].asnumpy()
        y = np.exp(x - x.max(axis=1).reshape((x.shape[0], 1)))
        y /= y.sum(axis=1).reshape((x.shape[0], 1))
        self.assign(out_data[0], req[0], mx.nd.array(y)) 
Example #20
Source File: mlp.py    From padasip with MIT License 5 votes vote down vote up
def activation(self, x, f="sigmoid", der=False):
        """
        This function process values of layer outputs with activation function.

        **Args:**

        * `x` : array to process (1-dimensional array) 

        **Kwargs:**

        * `f` : activation function

        * `der` : normal output, or its derivation (bool)

        **Returns:**

        * values processed with activation function (1-dimensional array)
        
        """
        if f == "sigmoid":
            if der:
                return x * (1 - x)
            return 1. / (1 + np.exp(-x))
        elif f == "tanh":
            if der:
                return 1 - x**2 
            return (2. / (1 + np.exp(-2*x))) - 1 
Example #21
Source File: bdk_demo.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def forward(self, in_data, out_data):
        x = in_data[0]
        y = out_data[0]
        y[:] = (x - x.max(axis=1, keepdims=True)).astype('float32')
        y -= numpy.log(numpy.exp(y).sum(axis=1, keepdims=True)).astype('float32')
        # y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1)))
        # y /= y.sum(axis=1).reshape((x.shape[0], 1)) 
Example #22
Source File: LogisticRegression.py    From fuku-ml with MIT License 5 votes vote down vote up
def error_function(self, x, y, W):
        # need refector

        '''
        Error function to calculate error: cross entropy error
        '''

        error = np.log(1 + np.exp((-1) * y * np.inner(x, W)))

        return error 
Example #23
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def gaussian(f=Ellipsis, mu=0, sigma=1, scale=1, invert=False, normalize=False):
    '''
    gaussian() yields a potential function f(x) that calculates a Gaussian function over x; the
      formula used is given below.
    gaussian(g) yields a function h(x) such that, if f(x) is yielded by gaussian(), h(x) = f(g(x)).

    The formula employed by the Gaussian function is as follows, with mu, sigma, and scale all being
    parameters that one can provide via optional arguments:
      scale * exp(0.5 * ((x - mu) / sigma)**2)
    
    The following optional arguments may be given:
      * mu (default: 0) specifies the mean of the Gaussian.
      * sigma (default: 1) specifies the standard deviation (sigma) parameger of the Gaussian.
      * scale (default: 1) specifies the scale to use.
      * invert (default: False) specifies whether the Gaussian should be inverted. If inverted, then
        the formula, scale * exp(...), is replaced with scale * (1 - exp(...)).
      * normalize (default: False) specifies whether the result should be multiplied by the inverse
        of the area under the uninverted and unscaled curve; i.e., if normalize is True, the entire
        result is multiplied by 1/sqrt(2*pi*sigma**2).
    '''
    f = to_potential(f)
    F = exp(-0.5 * ((f - mu) / sigma)**2)
    if invert: F = 1 - F
    F = F * scale
    if normalize: F = F / (np.sqrt(2.0*np.pi) * sigma)
    return F 
Example #24
Source File: LogisticRegression.py    From fuku-ml with MIT License 5 votes vote down vote up
def theta(self, s):

        '''
        Theta sigmoid function
        '''

        s = np.where(s < -709, -709, s)

        return 1 / (1 + np.exp((-1) * s)) 
Example #25
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def jacobian(self, x, into=None):
        x = flattest(x)
        z = ErfPotential.coef * np.exp(-x**2)
        z = sps.diags(z)
        return safe_into(into, z) 
Example #26
Source File: ProbabilisticSVM.py    From fuku-ml with MIT License 5 votes vote down vote up
def error_function(self, x, y, W):

        svm_process_x = self.svm_score(x)
        svm_process_x = [1] + [svm_process_x]

        error = np.log(1 + np.exp((-1) * y * np.inner(svm_process_x, self.logistic_processor.W)))

        return error 
Example #27
Source File: models.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def visual_coordinates(polar_angles, eccentricities):
        '''
        mdl.cortical_coordinates is the coordinate matrix for the given retinotopy mesh model mdl's
        representation of the cortical surface.
        '''
        z = eccentricities * np.exp(1j * np.pi/180.0 * (90.0 - polar_angles))
        return pimms.imm_array([z.real, z.imag]) 
Example #28
Source File: models.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def angle_to_cortex(self, theta, rho):
        'See help(neuropythy.registration.RetinotopyModel.angle_to_cortex).'
        #TODO: This should be made to work correctly with visual area boundaries: this could be done
        # by, for each area (e.g., V2) looking at its boundaries (with V1 and V3) and flipping the
        # adjacent triangles so that there is complete coverage of each hemifield, guaranteed.
        if not pimms.is_vector(theta): return self.angle_to_cortex([theta], [rho])[0]
        theta = np.asarray(theta)
        rho = np.asarray(rho)
        zs = np.asarray(
            rho * np.exp([np.complex(z) for z in 1j * ((90.0 - theta)/180.0*np.pi)]),
            dtype=np.complex)
        coords = np.asarray([zs.real, zs.imag]).T
        if coords.shape[0] == 0: return np.zeros((0, len(self.visual_meshes), 2))
        # we step through each area in the forward model and return the appropriate values
        tx = self.transform
        res = np.transpose(
            [self.visual_meshes[area].interpolate(coords, 'cortical_coordinates', method='linear')
             for area in sorted(self.visual_meshes.keys())],
            (1,0,2))
        if tx is not None:
            res = np.asarray(
                [np.dot(tx, np.vstack((area_xy.T, np.ones(len(area_xy)))))[0:2].T
                 for area_xy in res])
        return res 
Example #29
Source File: Utility.py    From fuku-ml with MIT License 5 votes vote down vote up
def kernel_matrix(svm_model, original_X):

        if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'):
            K = (svm_model.zeta + svm_model.gamma * np.dot(original_X, original_X.T)) ** svm_model.Q
        elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'):
            pairwise_dists = squareform(pdist(original_X, 'euclidean'))
            K = np.exp(-svm_model.gamma * (pairwise_dists ** 2))

        '''
        K = np.zeros((svm_model.data_num, svm_model.data_num))

        for i in range(svm_model.data_num):
            for j in range(svm_model.data_num):
                if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'):
                    K[i, j] = Kernel.polynomial_kernel(svm_model, original_X[i], original_X[j])
                elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'):
                    K[i, j] = Kernel.gaussian_kernel(svm_model, original_X[i], original_X[j])
        '''

        return K 
Example #30
Source File: Utility.py    From fuku-ml with MIT License 5 votes vote down vote up
def gaussian_kernel(svm_model, x1, x2):

        return np.exp(-svm_model.gamma * (np.linalg.norm(x1 - x2) ** 2))