Python numpy.exp() Examples
The following are 30
code examples of numpy.exp().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.

Example #1
Source File: spectrum_painter.py From spectrum_painter with MIT License | 7 votes |
def convert_image(self, filename): pic = img.imread(filename) # Set FFT size to be double the image size so that the edge of the spectrum stays clear # preventing some bandfilter artifacts self.NFFT = 2*pic.shape[1] # Repeat image lines until each one comes often enough to reach the desired line time ffts = (np.flipud(np.repeat(pic[:, :, 0], self.repetitions, axis=0) / 16.)**2.) / 256. # Embed image in center bins of the FFT fftall = np.zeros((ffts.shape[0], self.NFFT)) startbin = int(self.NFFT/4) fftall[:, startbin:(startbin+pic.shape[1])] = ffts # Generate random phase vectors for the FFT bins, this is important to prevent high peaks in the output # The phases won't be visible in the spectrum phases = 2*np.pi*np.random.rand(*fftall.shape) rffts = fftall * np.exp(1j*phases) # Perform the FFT per image line, then concatenate them to form the final signal timedata = np.fft.ifft(np.fft.ifftshift(rffts, axes=1), axis=1) / np.sqrt(float(self.NFFT)) linear = timedata.flatten() linear = linear / np.max(np.abs(linear)) return linear
Example #2
Source File: conceptnet_evaluate.py From comet-commonsense with Apache License 2.0 | 6 votes |
def compute_final_scores(self, average_loss, nums): average_loss["total_macro"] /= nums["total_macro"] average_loss["total_micro"] /= nums["total_micro"] if nums["negative_micro"]: average_loss["negative_macro"] /= nums["negative_macro"] average_loss["negative_micro"] /= nums["negative_micro"] else: average_loss["negative_macro"] = 0 average_loss["negative_micro"] = 0 average_loss["macro_diff"] = (average_loss["negative_macro"] - average_loss["total_macro"]) average_loss["micro_diff"] = (average_loss["negative_micro"] - average_loss["total_micro"]) average_loss["ppl_macro"] = np.exp(average_loss["total_macro"]) average_loss["ppl_micro"] = np.exp(average_loss["total_micro"]) return average_loss
Example #3
Source File: generators.py From FRIDA with MIT License | 6 votes |
def gen_visibility(alphak, phi_k, pos_mic_x, pos_mic_y): """ generate visibility from the Dirac parameter and microphone array layout :param alphak: Diracs' amplitudes :param phi_k: azimuths :param pos_mic_x: a vector that contains microphones' x coordinates :param pos_mic_y: a vector that contains microphones' y coordinates :return: """ xk, yk = polar2cart(1, phi_k) num_mic = pos_mic_x.size visi = np.zeros((num_mic, num_mic), dtype=complex) for q in xrange(num_mic): p_x_outer = pos_mic_x[q] p_y_outer = pos_mic_y[q] for qp in xrange(num_mic): p_x_qqp = p_x_outer - pos_mic_x[qp] # a scalar p_y_qqp = p_y_outer - pos_mic_y[qp] # a scalar visi[qp, q] = np.dot(np.exp(-1j * (xk * p_x_qqp + yk * p_y_qqp)), alphak) return visi
Example #4
Source File: doa.py From FRIDA with MIT License | 6 votes |
def compute_mode(self): """ Pre-compute mode vectors from candidate locations (in spherical coordinates). """ if self.num_loc is None: raise ValueError('Lookup table appears to be empty. \ Run build_lookup().') self.mode_vec = np.zeros((self.max_bin,self.M,self.num_loc), dtype='complex64') if (self.nfft % 2 == 1): raise ValueError('Signal length must be even.') f = 1.0 / self.nfft * np.linspace(0, self.nfft / 2, self.max_bin) \ * 1j * 2 * np.pi for i in range(self.num_loc): p_s = self.loc[:, i] for m in range(self.M): p_m = self.L[:, m] if (self.mode == 'near'): dist = np.linalg.norm(p_m - p_s, axis=1) if (self.mode == 'far'): dist = np.dot(p_s, p_m) # tau = np.round(self.fs*dist/self.c) # discrete - jagged tau = self.fs * dist / self.c # "continuous" - smoother self.mode_vec[:, m, i] = np.exp(f * tau)
Example #5
Source File: tools_fri_doa_plane.py From FRIDA with MIT License | 6 votes |
def mtx_freq2visi(M, p_mic_x, p_mic_y): """ build the matrix that maps the Fourier series to the visibility :param M: the Fourier series expansion is limited from -M to M :param p_mic_x: a vector that constains microphones x coordinates :param p_mic_y: a vector that constains microphones y coordinates :return: """ num_mic = p_mic_x.size ms = np.reshape(np.arange(-M, M + 1, step=1), (1, -1), order='F') G = np.zeros((num_mic * (num_mic - 1), 2 * M + 1), dtype=complex, order='C') count_G = 0 for q in range(num_mic): p_x_outer = p_mic_x[q] p_y_outer = p_mic_y[q] for qp in range(num_mic): if not q == qp: p_x_qqp = p_x_outer - p_mic_x[qp] p_y_qqp = p_y_outer - p_mic_y[qp] norm_p_qqp = np.sqrt(p_x_qqp ** 2 + p_y_qqp ** 2) phi_qqp = np.arctan2(p_y_qqp, p_x_qqp) G[count_G, :] = (-1j) ** ms * sp.special.jv(ms, norm_p_qqp) * \ np.exp(1j * ms * phi_qqp) count_G += 1 return G
Example #6
Source File: tools_fri_doa_plane.py From FRIDA with MIT License | 6 votes |
def mtx_updated_G(phi_recon, M, mtx_amp2visi_ri, mtx_fri2visi_ri): """ Update the linear transformation matrix that links the FRI sequence to the visibilities by using the reconstructed Dirac locations. :param phi_recon: the reconstructed Dirac locations (azimuths) :param M: the Fourier series expansion is between -M to M :param p_mic_x: a vector that contains microphones' x-coordinates :param p_mic_y: a vector that contains microphones' y-coordinates :param mtx_freq2visi: the linear mapping from Fourier series to visibilities :return: """ L = 2 * M + 1 ms_half = np.reshape(np.arange(-M, 1, step=1), (-1, 1), order='F') phi_recon = np.reshape(phi_recon, (1, -1), order='F') mtx_amp2freq = np.exp(-1j * ms_half * phi_recon) # size: (M + 1) x K mtx_amp2freq_ri = np.vstack((mtx_amp2freq.real, mtx_amp2freq.imag[:-1, :])) # size: (2M + 1) x K mtx_fri2amp_ri = linalg.lstsq(mtx_amp2freq_ri, np.eye(L))[0] # projection mtx_freq2visi to the null space of mtx_fri2amp mtx_null_proj = np.eye(L) - np.dot(mtx_fri2amp_ri.T, linalg.lstsq(mtx_fri2amp_ri.T, np.eye(L))[0]) G_updated = np.dot(mtx_amp2visi_ri, mtx_fri2amp_ri) + \ np.dot(mtx_fri2visi_ri, mtx_null_proj) return G_updated
Example #7
Source File: model.py From models with MIT License | 6 votes |
def predict_on_batch(self, x): # run feature collection pipeline for the batch soi = x.astype(str) # make sure the type is right for i in range(len(soi)): if len(soi[i]) < 94: soi[i] = elongate_intron(soi[i]) parameters_batch = self._construct_features_array(soi) don_cleavage_time = self.don_model.predict(parameters_batch) acc_cleavage_time = self.acc_model.predict(parameters_batch) cleavage_time = {'acc_cleavage_time': np.exp(acc_cleavage_time), 'don_cleavage_time': np.exp(don_cleavage_time)} return cleavage_time
Example #8
Source File: model.py From models with MIT License | 6 votes |
def predict_on_batch(self, x): # run feature collection pipeline for the batch soi = x["soi"].astype(str) # make sure the type is right self.bp_indexes = x["bp_index"] for i in range(len(soi)): if len(soi[i]) < 94: soi[i] = elongate_intron(soi[i]) parameters_batch = self._construct_features_array(soi) don_cleavage_time = self.don_model.predict(parameters_batch) acc_cleavage_time = self.acc_model.predict(parameters_batch) cleavage_time = {'acc_cleavage_time': np.exp(acc_cleavage_time), 'don_cleavage_time': np.exp(don_cleavage_time)} return cleavage_time
Example #9
Source File: core.py From neuropythy with GNU Affero General Public License v3.0 | 6 votes |
def apply_cmap(zs, cmap, vmin=None, vmax=None, unit=None, logrescale=False): ''' apply_cmap(z, cmap) applies the given cmap to the values in z; if vmin and/or vmax are passed, they are used to scale z. Note that this function can automatically rescale data into log-space if the colormap is a neuropythy log-space colormap such as log_eccentricity. To enable this behaviour use the optional argument logrescale=True. ''' zs = pimms.mag(zs) if unit is None else pimms.mag(zs, unit) zs = np.asarray(zs, dtype='float') if pimms.is_str(cmap): cmap = matplotlib.cm.get_cmap(cmap) if logrescale: if vmin is None: vmin = np.log(np.nanmin(zs)) if vmax is None: vmax = np.log(np.nanmax(zs)) mn = np.exp(vmin) u = zdivide(nanlog(zs + mn) - vmin, vmax - vmin, null=np.nan) else: if vmin is None: vmin = np.nanmin(zs) if vmax is None: vmax = np.nanmax(zs) u = zdivide(zs - vmin, vmax - vmin, null=np.nan) u[np.isnan(u)] = -np.inf return cmap(u)
Example #10
Source File: cmag.py From neuropythy with GNU Affero General Public License v3.0 | 6 votes |
def __call__(self, x, y=None): if y is not None: x = (x,y) x = np.asarray(x) if len(x.shape) == 1: return self([x])[0] x = np.transpose(x) if x.shape[0] == 2 else x if not x.flags['WRITEABLE']: x = np.array(x) crd = self.coordinates sig = self.sigma wts = self._weight res = np.zeros(x.shape[0]) for (sh, qd, bi) in zip(self.spatial_hashes, self.bin_query_distances, self.sigma_bins): neis = sh.query_ball_point(x, qd) res += [ np.sum(w * np.exp(-0.5 * d2/s**2)) for (ni,pt) in zip(neis,x) for ii in [bi[ni]] for (w,s,d2) in [(wts[ii], sig[ii], np.sum((crd[ii] - pt)**2, axis=1))]] return res
Example #11
Source File: Utility.py From fuku-ml with MIT License | 6 votes |
def kernel_matrix_xX(svm_model, original_x, original_X): if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'): K = (svm_model.zeta + svm_model.gamma * np.dot(original_x, original_X.T)) ** svm_model.Q elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'): K = np.exp(-svm_model.gamma * (cdist(original_X, np.atleast_2d(original_x), 'euclidean').T ** 2)).ravel() ''' K = np.zeros((svm_model.data_num, svm_model.data_num)) for i in range(svm_model.data_num): for j in range(svm_model.data_num): if (svm_model.svm_kernel == 'polynomial_kernel' or svm_model.svm_kernel == 'soft_polynomial_kernel'): K[i, j] = Kernel.polynomial_kernel(svm_model, original_x, original_X[j]) elif (svm_model.svm_kernel == 'gaussian_kernel' or svm_model.svm_kernel == 'soft_gaussian_kernel'): K[i, j] = Kernel.gaussian_kernel(svm_model, original_x, original_X[j]) ''' return K
Example #12
Source File: utils.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def pred_test(testing_data, exe, param_list=None, save_path=""): ret = numpy.zeros((testing_data.shape[0], 2)) if param_list is None: for i in range(testing_data.shape[0]): exe.arg_dict['data'][:] = testing_data[i, 0] exe.forward(is_train=False) ret[i, 0] = exe.outputs[0].asnumpy() ret[i, 1] = numpy.exp(exe.outputs[1].asnumpy()) numpy.savetxt(save_path, ret) else: for i in range(testing_data.shape[0]): pred = numpy.zeros((len(param_list),)) for j in range(len(param_list)): exe.copy_params_from(param_list[j]) exe.arg_dict['data'][:] = testing_data[i, 0] exe.forward(is_train=False) pred[j] = exe.outputs[0].asnumpy() ret[i, 0] = pred.mean() ret[i, 1] = pred.std()**2 numpy.savetxt(save_path, ret) mse = numpy.square(ret[:, 0] - testing_data[:, 0] **3).mean() return mse, ret
Example #13
Source File: bdk_demo.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None): if grad is None: grad = nd.empty(theta.shape, theta.context) theta1 = theta.asnumpy()[0] theta2 = theta.asnumpy()[1] v1 = sigma1 ** 2 v2 = sigma2 ** 2 vx = sigmax ** 2 denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp( -(X - theta1 - theta2) ** 2 / (2 * vx)) grad_npy = numpy.zeros(theta.shape) grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx + numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * ( X - theta1 - theta2) / vx) / denominator).sum() \ + theta1 / v1 grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * ( X - theta1 - theta2) / vx) / denominator).sum() \ + theta2 / v2 grad[:] = grad_npy return grad
Example #14
Source File: rnn_cell_demo.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def Perplexity(label, pred): """ Calculates prediction perplexity Args: label (mx.nd.array): labels array pred (mx.nd.array): prediction array Returns: float: calculated perplexity """ # collapse the time, batch dimension label = label.reshape((-1,)) pred = pred.reshape((-1, pred.shape[-1])) loss = 0. for i in range(pred.shape[0]): loss += -np.log(max(1e-10, pred[i][int(label[i])])) return np.exp(loss / label.size)
Example #15
Source File: vaegan_mxnet.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def KLDivergenceLoss(): '''KLDivergenceLoss loss ''' data = mx.sym.Variable('data') mu1, lv1 = mx.sym.split(data, num_outputs=2, axis=0) mu2 = mx.sym.zeros_like(mu1) lv2 = mx.sym.zeros_like(lv1) v1 = mx.sym.exp(lv1) v2 = mx.sym.exp(lv2) mu_diff_sq = mx.sym.square(mu1 - mu2) dimwise_kld = .5 * ( (lv2 - lv1) + mx.symbol.broadcast_div(v1, v2) + mx.symbol.broadcast_div(mu_diff_sq, v2) - 1.) KL = mx.symbol.sum(dimwise_kld, axis=1) KLloss = mx.symbol.MakeLoss(mx.symbol.mean(KL),name='KLloss') return KLloss
Example #16
Source File: test_loss.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def test_bce_loss(): N = 20 data = mx.random.uniform(-1, 1, shape=(N, 20)) label = mx.nd.array(np.random.randint(2, size=(N,)), dtype='float32') data_iter = mx.io.NDArrayIter(data, label, batch_size=10, label_name='label') output = get_net(1) l = mx.symbol.Variable('label') Loss = gluon.loss.SigmoidBinaryCrossEntropyLoss() loss = Loss(output, l) loss = mx.sym.make_loss(loss) mod = mx.mod.Module(loss, data_names=('data',), label_names=('label',)) mod.fit(data_iter, num_epoch=200, optimizer_params={'learning_rate': 0.01}, eval_metric=mx.metric.Loss(), optimizer='adam', initializer=mx.init.Xavier(magnitude=2)) assert mod.score(data_iter, eval_metric=mx.metric.Loss())[0][1] < 0.01 # Test against npy data = mx.random.uniform(-5, 5, shape=(10,)) label = mx.random.uniform(0, 1, shape=(10,)) mx_bce_loss = Loss(data, label).asnumpy() prob_npy = 1.0 / (1.0 + np.exp(-data.asnumpy())) label_npy = label.asnumpy() npy_bce_loss = - label_npy * np.log(prob_npy) - (1 - label_npy) * np.log(1 - prob_npy) assert_almost_equal(mx_bce_loss, npy_bce_loss, rtol=1e-4, atol=1e-5)
Example #17
Source File: run_audio_attack.py From Black-Box-Audio with MIT License | 5 votes |
def get_new_pop(elite_pop, elite_pop_scores, pop_size): scores_logits = np.exp(elite_pop_scores - elite_pop_scores.max()) elite_pop_probs = scores_logits / scores_logits.sum() cand1 = elite_pop[np.random.choice(len(elite_pop), p=elite_pop_probs, size=pop_size)] cand2 = elite_pop[np.random.choice(len(elite_pop), p=elite_pop_probs, size=pop_size)] mask = np.random.rand(pop_size, elite_pop.shape[1]) < 0.5 next_pop = mask * cand1 + (1 - mask) * cand2 return next_pop
Example #18
Source File: kde.py From svviz with MIT License | 5 votes |
def evaluate(self, points): points = atleast_2d(points) d, m = points.shape if d != self.d: if d == 1 and m == self.d: # points was passed in as a row vector points = reshape(points, (self.d, 1)) m = 1 else: msg = "points have dimension %s, dataset has dimension %s" % (d, self.d) raise ValueError(msg) result = zeros((m,), dtype=np.float) if m >= self.n: # there are more points than data, so loop over data for i in range(self.n): diff = self.dataset[:, i, newaxis] - points tdiff = dot(self.inv_cov, diff) energy = sum(diff*tdiff,axis=0) / 2.0 result = result + exp(-energy) else: # loop over points for i in range(m): diff = self.dataset - points[:, i, newaxis] tdiff = dot(self.inv_cov, diff) energy = sum(diff * tdiff, axis=0) / 2.0 result[i] = sum(exp(-energy), axis=0) result = result / self._norm_factor return result
Example #19
Source File: tcpr.py From libTLDA with MIT License | 5 votes |
def learning_rate_t(self, t): """ Compute current learning rate after decay. Parameters ---------- t : int current iteration Returns ------- alpha : float current learning rate """ # Select rate decay if self.rate_decay == 'linear': # Linear dropoff between t=0 and t=T alpha = (self.max_iter - t)/(self.learning_rate*self.max_iter) elif self.rate_decay == 'quadratic': # Quadratic dropoff between t=0 and t=T alpha = ((self.max_iter - t)/(self.learning_rate*self.max_iter))**2 elif self.rate_decay == 'geometric': # Drop rate off inversely to time alpha = 1 / (self.learning_rate * t) elif self.rate_decay == 'exponential': # Exponential dropoff alpha = np.exp(-self.learning_rate * t) else: raise ValueError('Rate decay type unknown.') return alpha
Example #20
Source File: tcpr.py From libTLDA with MIT License | 5 votes |
def predict_proba(self, Z): """ Compute posteriors on new dataset. Parameters ---------- Z : array new data set (M samples by D features) Returns ------- preds : array label predictions (M samples by 1) """ # Data shape M, D = Z.shape # If classifier is trained, check for same dimensionality if self.is_trained: if not self.train_data_dim == D: raise ValueError('''Test data is of different dimensionality than training data.''') if self.loss in ['lda', 'qda']: # Compute probabilities under each distribution nLL = self.neg_log_likelihood(Z, self.parameters) # Compute likelihood probs = np.exp(-nLL) else: raise NotImplementedError('Loss function not implemented yet.') # Return posterior probabilities return probs
Example #21
Source File: rba.py From libTLDA with MIT License | 5 votes |
def posterior(self, psi): """ Class-posterior estimation. Parameters ---------- psi : array weighted data-classifier output (N samples by K classes) Returns ------- pyx : array class-posterior estimation (N samples by K classes) """ # Data shape N, K = psi.shape # Preallocate array pyx = np.zeros((N, K)) # Subtract maximum value for numerical stability psi = (psi.T - np.max(psi, axis=1).T).T # Loop over classes for k in range(K): # Estimate posterior p^(Y=y | x_i) pyx[:, k] = np.exp(psi[:, k]) / np.sum(np.exp(psi), axis=1) return pyx
Example #22
Source File: rba.py From libTLDA with MIT License | 5 votes |
def learning_rate_t(self, t): """ Compute current learning rate after decay. Parameters ---------- t : int current iteration Returns ------- alpha : float current learning rate """ # Select rate decay if self.rate_decay == 'linear': # Linear dropoff between t=0 and t=T alpha = (self.max_iter - t)/(self.learning_rate*self.max_iter) elif self.rate_decay == 'quadratic': # Quadratic dropoff between t=0 and t=T alpha = ((self.max_iter - t)/(self.learning_rate*self.max_iter))**2 elif self.rate_decay == 'geometric': # Drop rate off inversely to time alpha = 1 / (self.learning_rate * t) elif self.rate_decay == 'exponential': # Exponential dropoff alpha = np.exp(-self.learning_rate * t) else: raise ValueError('Rate decay type unknown.') return alpha
Example #23
Source File: model.py From osqf2015 with MIT License | 5 votes |
def scenario_values(cls, returns, neutral, current_vola): scenarios = neutral * np.exp(current_vola * returns) return scenarios
Example #24
Source File: model.py From osqf2015 with MIT License | 5 votes |
def compute_scenarios(self, d, n_scenarios=750): # identify returns dates = pd.to_datetime(d, unit='ms') max_date = dates[0].date() min_date = max_date.replace(year=max_date.year-3) logging.info('Computing returns between ') #, str(max_date), ' and ', str(min_date)) self.returns_df = self.df[min_date:max_date].ix[-n_scenarios-1:] neutral, vola = self.returns_df.ix[max_date][['Close', 'Vola']] scenarios = neutral * np.exp( vola * self.returns_df.ix[:-1].DevolLogReturns ) return scenarios, neutral
Example #25
Source File: atomic_evaluate.py From comet-commonsense with Apache License 2.0 | 5 votes |
def make_evaluator(opt, *args): if opt.exp == "generation": return AtomicGenerationEvaluator(opt, *args) else: return AtomicClassificationEvaluator(opt, *args)
Example #26
Source File: atomic_evaluate.py From comet-commonsense with Apache License 2.0 | 5 votes |
def compute_final_scores(self, average_loss, nums): average_loss["total_macro"] /= nums["total_macro"] average_loss["total_micro"] /= nums["total_micro"] average_loss["ppl_macro"] = np.exp(average_loss["total_macro"]) average_loss["ppl_micro"] = np.exp(average_loss["total_micro"]) return average_loss
Example #27
Source File: generators.py From FRIDA with MIT License | 5 votes |
def gen_sig_at_mic(sigmak2_k, phi_k, pos_mic_x, pos_mic_y, omega_band, sound_speed, SNR, Ns=256): """ generate complex base-band signal received at microphones :param sigmak2_k: the variance of the circulant complex Gaussian signal emitted by the K sources :param phi_k: source locations (azimuths) :param pos_mic_x: a vector that contains microphones' x coordinates :param pos_mic_y: a vector that contains microphones' y coordinates :param omega_band: mid-band (ANGULAR) frequency [radian/sec] :param sound_speed: speed of sound :param SNR: SNR for the received signal at microphones :param Ns: number of snapshots used to estimate the covariance matrix :return: y_mic: received (complex) signal at microphones """ num_mic = pos_mic_x.size xk, yk = polar2cart(1, phi_k) # source locations in cartesian coordinates # reshape to use broadcasting xk = np.reshape(xk, (1, -1), order='F') yk = np.reshape(yk, (1, -1), order='F') pos_mic_x = np.reshape(pos_mic_x, (-1, 1), order='F') pos_mic_y = np.reshape(pos_mic_y, (-1, 1), order='F') t = np.reshape(np.linspace(0, 10 * np.pi, num=Ns), (1, -1), order='F') K = sigmak2_k.size sigmak2_k = np.reshape(sigmak2_k, (-1, 1), order='F') # x_tilde_k size: K x length_of_t # circular complex Gaussian process x_tilde_k = np.sqrt(sigmak2_k / 2.) * (np.random.randn(K, Ns) + 1j * np.random.randn(K, Ns)) y_mic = np.dot(np.exp(-1j * (xk * pos_mic_x + yk * pos_mic_y) / (sound_speed / omega_band)), x_tilde_k * np.exp(1j * omega_band * t)) signal_energy = linalg.norm(y_mic, 'fro') ** 2 noise_energy = signal_energy / 10 ** (SNR * 0.1) sigma2_noise = noise_energy / (Ns * num_mic) noise = np.sqrt(sigma2_noise / 2.) * (np.random.randn(*y_mic.shape) + 1j * np.random.randn(*y_mic.shape)) y_mic_noisy = y_mic + noise return y_mic_noisy, y_mic
Example #28
Source File: generators.py From FRIDA with MIT License | 5 votes |
def gen_dirty_img(visi, pos_mic_x, pos_mic_y, omega_band, sound_speed, phi_plt): """ Compute the dirty image associated with the given measurements. Here the Fourier transform that is not measured by the microphone array is taken as zero. :param visi: the measured visibilites :param pos_mic_x: a vector contains microphone array locations (x-coordinates) :param pos_mic_y: a vector contains microphone array locations (y-coordinates) :param omega_band: mid-band (ANGULAR) frequency [radian/sec] :param sound_speed: speed of sound :param phi_plt: plotting grid (azimuth on the circle) to show the dirty image :return: """ img = np.zeros(phi_plt.size, dtype=complex) x_plt, y_plt = polar2cart(1, phi_plt) num_mic = pos_mic_x.size pos_mic_x_normalised = pos_mic_x / (sound_speed / omega_band) pos_mic_y_normalised = pos_mic_y / (sound_speed / omega_band) count_visi = 0 for q in xrange(num_mic): p_x_outer = pos_mic_x_normalised[q] p_y_outer = pos_mic_y_normalised[q] for qp in xrange(num_mic): if not q == qp: p_x_qqp = p_x_outer - pos_mic_x_normalised[qp] # a scalar p_y_qqp = p_y_outer - pos_mic_y_normalised[qp] # a scalar # <= the negative sign converts DOA to propagation vector img += visi[count_visi] * \ np.exp(-1j * (p_x_qqp * x_plt + p_y_qqp * y_plt)) count_visi += 1 return img / (num_mic * (num_mic - 1))
Example #29
Source File: tools_fri_doa_plane.py From FRIDA with MIT License | 5 votes |
def mtx_updated_G_multiband(phi_recon, M, mtx_amp2visi_ri, mtx_fri2visi_ri, num_bands): """ Update the linear transformation matrix that links the FRI sequence to the visibilities by using the reconstructed Dirac locations. :param phi_recon: the reconstructed Dirac locations (azimuths) :param M: the Fourier series expansion is between -M to M :param p_mic_x: a vector that contains microphones' x-coordinates :param p_mic_y: a vector that contains microphones' y-coordinates :param mtx_freq2visi: the linear mapping from Fourier series to visibilities :return: """ L = 2 * M + 1 ms_half = np.reshape(np.arange(-M, 1, step=1), (-1, 1), order='F') phi_recon = np.reshape(phi_recon, (1, -1), order='F') mtx_amp2freq = np.exp(-1j * ms_half * phi_recon) # size: (M + 1) x K mtx_amp2freq_ri = np.vstack((mtx_amp2freq.real, mtx_amp2freq.imag[:-1, :])) # size: (2M + 1) x K mtx_fri2amp_ri = linalg.lstsq(mtx_amp2freq_ri, np.eye(L))[0] # projection mtx_freq2visi to the null space of mtx_fri2amp mtx_null_proj = np.eye(L) - np.dot(mtx_fri2amp_ri.T, linalg.lstsq(mtx_fri2amp_ri.T, np.eye(L))[0]) G_updated = np.dot(mtx_amp2visi_ri, linalg.block_diag(*([mtx_fri2amp_ri] * num_bands)) ) + \ np.dot(mtx_fri2visi_ri, linalg.block_diag(*([mtx_null_proj] * num_bands)) ) return G_updated
Example #30
Source File: predict.py From Traffic_sign_detection_YOLO with MIT License | 5 votes |
def expit(x): return 1. / (1. + np.exp(-x))