Python scipy.empty() Examples
The following are 6
code examples of scipy.empty().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
scipy
, or try the search function
.
Example #1
Source File: testing.py From Mastering-Elasticsearch-7.0 with MIT License | 5 votes |
def fake_mldata(columns_dict, dataname, matfile, ordering=None): """Create a fake mldata data set. .. deprecated:: 0.20 Will be removed in version 0.22 Parameters ---------- columns_dict : dict, keys=str, values=ndarray Contains data as columns_dict[column_name] = array of data. dataname : string Name of data set. matfile : string or file object The file name string or the file-like object of the output file. ordering : list, default None List of column_names, determines the ordering in the data set. Notes ----- This function transposes all arrays, while fetch_mldata only transposes 'data', keep that into account in the tests. """ datasets = dict(columns_dict) # transpose all variables for name in datasets: datasets[name] = datasets[name].T if ordering is None: ordering = sorted(list(datasets.keys())) # NOTE: setting up this array is tricky, because of the way Matlab # re-packages 1D arrays datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)), dtype='object') for i, name in enumerate(ordering): datasets['mldata_descr_ordering'][0, i] = name scipy.io.savemat(matfile, datasets, oned_as='column')
Example #2
Source File: testing.py From Splunking-Crime with GNU Affero General Public License v3.0 | 5 votes |
def fake_mldata(columns_dict, dataname, matfile, ordering=None): """Create a fake mldata data set. Parameters ---------- columns_dict : dict, keys=str, values=ndarray Contains data as columns_dict[column_name] = array of data. dataname : string Name of data set. matfile : string or file object The file name string or the file-like object of the output file. ordering : list, default None List of column_names, determines the ordering in the data set. Notes ----- This function transposes all arrays, while fetch_mldata only transposes 'data', keep that into account in the tests. """ datasets = dict(columns_dict) # transpose all variables for name in datasets: datasets[name] = datasets[name].T if ordering is None: ordering = sorted(list(datasets.keys())) # NOTE: setting up this array is tricky, because of the way Matlab # re-packages 1D arrays datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)), dtype='object') for i, name in enumerate(ordering): datasets['mldata_descr_ordering'][0, i] = name scipy.io.savemat(matfile, datasets, oned_as='column')
Example #3
Source File: plinkfiles.py From ldpred with MIT License | 5 votes |
def parse_plink_snps(genotype_file, snp_indices): plinkf = plinkfile.PlinkFile(genotype_file) samples = plinkf.get_samples() num_individs = len(samples) num_snps = len(snp_indices) raw_snps = sp.empty((num_snps, num_individs), dtype='int8') # If these indices are not in order then we place them in the right place while parsing SNPs. snp_order = sp.argsort(snp_indices) ordered_snp_indices = list(snp_indices[snp_order]) ordered_snp_indices.reverse() # Iterating over file to load SNPs snp_i = 0 next_i = ordered_snp_indices.pop() line_i = 0 max_i = ordered_snp_indices[0] while line_i <= max_i: if line_i < next_i: next(plinkf) elif line_i == next_i: line = next(plinkf) snp = sp.array(line, dtype='int8') bin_counts = line.allele_counts() if bin_counts[-1] > 0: mode_v = sp.argmax(bin_counts[:2]) snp[snp == 3] = mode_v s_i = snp_order[snp_i] raw_snps[s_i] = snp if line_i < max_i: next_i = ordered_snp_indices.pop() snp_i += 1 line_i += 1 plinkf.close() assert snp_i == len(raw_snps), 'Parsing SNPs from plink file failed.' num_indivs = len(raw_snps[0]) freqs = sp.sum(raw_snps, 1, dtype='float32') / (2 * float(num_indivs)) return raw_snps, freqs
Example #4
Source File: liblinear.py From AVEC2018 with MIT License | 5 votes |
def csr_to_problem(x, prob): # Extra space for termination node and (possibly) bias term x_space = prob.x_space = scipy.empty((x.nnz+x.shape[0]*2), dtype=feature_node) prob.rowptr = x.indptr.copy() prob.rowptr[1:] += 2*scipy.arange(1,x.shape[0]+1) prob_ind = x_space["index"] prob_val = x_space["value"] prob_ind[:] = -1 if jit_enabled: csr_to_problem_jit(x.shape[0], x.data, x.indices, x.indptr, prob_val, prob_ind, prob.rowptr) else: csr_to_problem_nojit(x.shape[0], x.data, x.indices, x.indptr, prob_val, prob_ind, prob.rowptr)
Example #5
Source File: testing.py From twitter-stock-recommendation with MIT License | 5 votes |
def fake_mldata(columns_dict, dataname, matfile, ordering=None): """Create a fake mldata data set. Parameters ---------- columns_dict : dict, keys=str, values=ndarray Contains data as columns_dict[column_name] = array of data. dataname : string Name of data set. matfile : string or file object The file name string or the file-like object of the output file. ordering : list, default None List of column_names, determines the ordering in the data set. Notes ----- This function transposes all arrays, while fetch_mldata only transposes 'data', keep that into account in the tests. """ datasets = dict(columns_dict) # transpose all variables for name in datasets: datasets[name] = datasets[name].T if ordering is None: ordering = sorted(list(datasets.keys())) # NOTE: setting up this array is tricky, because of the way Matlab # re-packages 1D arrays datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)), dtype='object') for i, name in enumerate(ordering): datasets['mldata_descr_ordering'][0, i] = name scipy.io.savemat(matfile, datasets, oned_as='column')
Example #6
Source File: LDpred_inf.py From ldpred with MIT License | 4 votes |
def ldpred_inf(beta_hats, h2=0.1, n=1000, inf_shrink_matrices=None, reference_ld_mats=None, genotypes=None, ld_window_size=100, verbose=False): """ Apply the infinitesimal shrink w LD (which requires LD information). If reference_ld_mats are supplied, it uses those, otherwise it uses the LD in the genotype data. If genotypes are supplied, then it assumes that beta_hats and the genotypes are synchronized. """ n = float(n) if verbose: print('Doing LD correction') t0 = time.time() m = len(beta_hats) updated_betas = sp.empty(m) for i, wi in enumerate(range(0, m, ld_window_size)): start_i = wi stop_i = min(m, wi + ld_window_size) curr_window_size = stop_i - start_i if inf_shrink_matrices!=None: A_inv = inf_shrink_matrices[i] else: if reference_ld_mats != None: D = reference_ld_mats[i] else: if genotypes != None: X = genotypes[start_i: stop_i] num_indivs = X.shape[1] D = sp.dot(X, X.T) / num_indivs else: raise NotImplementedError A = ((m / h2) * sp.eye(curr_window_size) + (n / (1.0)) * D) A_inv = linalg.pinv(A) updated_betas[start_i: stop_i] = sp.dot(A_inv * n , beta_hats[start_i: stop_i]) # Adjust the beta_hats if verbose: sys.stdout.write('\r%0.2f%%' % (100.0 * (min(1, float(wi + ld_window_size) / m)))) sys.stdout.flush() t1 = time.time() t = (t1 - t0) if verbose: print('\nIt took %d minutes and %0.2f seconds to perform the Infinitesimal LD shrink' % (t / 60, t % 60)) return updated_betas