Python numpy.array() Examples
The following are 30
code examples of numpy.array().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.

Example #1
Source File: display_methods.py From indras_net with GNU General Public License v3.0 | 8 votes |
def create_lines(self, x, varieties): """ Draw just the data portion. """ lines = pd.DataFrame() for i, var in enumerate(varieties): self.legend.append(var) data = varieties[var]["data"] color = get_color(varieties[var], i) x_array = np.array(x) y_array = np.array(data) line = pd.DataFrame({"x": x_array, "y": y_array, "color": color, "var": var}) lines = lines.append(line, ignore_index=True, sort=False) return lines
Example #2
Source File: display_methods.py From indras_net with GNU General Public License v3.0 | 8 votes |
def get_arrays(self, varieties, var): x_array = np.array(varieties[var][X]) y_array = np.array(varieties[var][Y]) return (x_array, y_array)
Example #3
Source File: display_methods.py From indras_net with GNU General Public License v3.0 | 7 votes |
def get_arrays(self, varieties, var): x_array = np.array(varieties[var][X]) y_array = np.array(varieties[var][Y]) return (x_array, y_array)
Example #4
Source File: buyer_action_s.py From indras_net with GNU General Public License v3.0 | 7 votes |
def matrix_reduction(agent): matrix, res = agent["strategy"]["data_collection"](agent) col = len(matrix[0]) if col > len(matrix): # not enought for matrix reduction return -1 i = 0 x = [] while i < len(matrix) and len(x) == 0: A = numpy.array(matrix[i:i + col]) b = numpy.array(res[i:i + col]) try: x = numpy.linalg.solve(A, b) except numpy.linalg.LinAlgError: i += 1 if len(x) == 0: return -1 else: for emoji in agent["emoji_experienced"]: index = agent["emoji_experienced"][emoji] agent["emoji_scores"][emoji] = round(x[index][0], 2) agent["predicted_base_line"] = round(x[-1][0], 2) return 0
Example #5
Source File: TripletSampler.py From Caffe-Python-Data-Layer with BSD 2-Clause "Simplified" License | 7 votes |
def hard_negative_multilabel(self): """Hard Negative Sampling based on multilabel assumption Search the negative sample with largest distance (smallest sim) with the anchor within self._k negative samplels """ # During early iterations of sampling, use random sampling instead if self._iteration <= self._n: return self.random_multilabel() anchor_class_id, negative_class_id = np.random.choice( self._index.keys(), 2) anchor_id, positive_id = np.random.choice( self._index[anchor_class_id], 2) negative_ids = np.random.choice( self._index[negative_class_id], self._k) # calcualte the smallest simlarity one with negatives anchor_label = parse_label(self._labels[anchor_id]) positive_label = parse_label(self._labels[positive_id]) negative_labels = [parse_label(self._labels[negative_id]) for negative_id in negative_ids] p_sim = intersect_sim(anchor_label, positive_label) n_sims = np.array( [intersect_sim(anchor_label, negative_label) for negative_label in negative_labels]) min_sim_id = np.argmin(n_sims) negative_id = negative_ids[min_sim_id] n_sim = n_sims[min_sim_id] margin = p_sim - n_sim return (anchor_id, positive_id, negative_id, margin)
Example #6
Source File: estimator_utils.py From EDeN with MIT License | 7 votes |
def plot_stats(x=None, y=None, label=None, color='navy'): """plot_stats.""" y = np.array(y) y0 = y[0] y1 = y[1] y2 = y[2] y3 = y[3] y4 = y[4] plt.fill_between(x, y3, y4, color=color, alpha=0.08) plt.fill_between(x, y1, y2, color=color, alpha=0.08) plt.plot(x, y0, '-', lw=2, color=color, label=label) plt.plot(x, y0, linestyle='None', markerfacecolor='white', markeredgecolor=color, marker='o', markeredgewidth=2, markersize=8)
Example #7
Source File: run_audio_attack.py From Black-Box-Audio with MIT License | 6 votes |
def __init__(self, input_wave_file, output_wave_file, target_phrase): self.pop_size = 100 self.elite_size = 10 self.mutation_p = 0.005 self.noise_stdev = 40 self.noise_threshold = 1 self.mu = 0.9 self.alpha = 0.001 self.max_iters = 3000 self.num_points_estimate = 100 self.delta_for_gradient = 100 self.delta_for_perturbation = 1e3 self.input_audio = load_wav(input_wave_file).astype(np.float32) self.pop = np.expand_dims(self.input_audio, axis=0) self.pop = np.tile(self.pop, (self.pop_size, 1)) self.output_wave_file = output_wave_file self.target_phrase = target_phrase self.funcs = self.setup_graph(self.pop, np.array([toks.index(x) for x in target_phrase]))
Example #8
Source File: DataManager.py From Caffe-Python-Data-Layer with BSD 2-Clause "Simplified" License | 6 votes |
def load_all(self): """The function to load all data and labels Give: data: the list of raw data, needs to be decompressed (e.g., raw JPEG string) labels: numpy array, with each element is a string """ start = time.time() print("Start Loading Data from BCF {}".format( 'MEMORY' if self._bcf_mode == 'MEM' else 'FILE')) self._labels = np.loadtxt(self._label_fn).astype(str) if self._bcf.size() != self._labels.shape[0]: raise Exception("Number of samples in data" "and labels are not equal") else: for idx in range(self._bcf.size()): datum_str = self._bcf.get(idx) self._data.append(datum_str) end = time.time() print("Loading {} samples Done: Time cost {} seconds".format( len(self._data), end - start)) return self._data, self._labels
Example #9
Source File: BasePythonDataLayer.py From Caffe-Python-Data-Layer with BSD 2-Clause "Simplified" License | 6 votes |
def setup(self, bottom, top): layer_params = yaml.load(self.param_str) self._layer_params = layer_params # default batch_size = 256 self._batch_size = int(layer_params.get('batch_size', 256)) self._resize = layer_params.get('resize', -1) self._mean_file = layer_params.get('mean_file', None) self._source_type = layer_params.get('source_type', 'CSV') self._shuffle = layer_params.get('shuffle', False) # read image_mean from file and preload all data into memory # will read either file or array into self._mean self.set_mean() self.preload_db() self._compressed = self._layer_params.get('compressed', True) if not self._compressed: self.decompress_data()
Example #10
Source File: BasePythonDataLayer.py From Caffe-Python-Data-Layer with BSD 2-Clause "Simplified" License | 6 votes |
def get_next_minibatch(self): """Generate next mini-batch The return value is array of numpy array: [data, label] Reshape funcion will be called based on resutls of this function Needs to implement in each class """ pass
Example #11
Source File: TripletDataLayer.py From Caffe-Python-Data-Layer with BSD 2-Clause "Simplified" License | 6 votes |
def get_a_datum(self): """Get a datum: Sampling -> decode images -> stack numpy array """ sample = self._sampler.sample() if self._compressed: datum_ = [ extract_sample(self._data[id], self._mean, self._resize) for id in sample[:3]] else: datum_ = [self._data[id] for id in sample[:3]] if len(sample) == 4: datum_.append(sample[-1]) return datum_
Example #12
Source File: TripletDataLayer.py From Caffe-Python-Data-Layer with BSD 2-Clause "Simplified" License | 6 votes |
def get_next_minibatch(self): if self._prefetch: # get mini-batch from prefetcher batch = self._conn.recv() else: # generate using in-thread functions data = [] p_data = [] n_data = [] label = [] for i in range(self._batch_size): datum_ = self.get_a_datum() data.append(datum_[0]) p_data.append(datum_[1]) n_data.append(datum_[2]) if len(datum_) == 4: # datum and label / margin label.append(datum_[-1]) batch = [np.array(data), np.array(p_data), np.array(n_data)] if len(label): label = np.array(label).reshape(self._batch_size, 1, 1, 1) batch.append(label) return batch
Example #13
Source File: TripletDataLayer.py From Caffe-Python-Data-Layer with BSD 2-Clause "Simplified" License | 6 votes |
def get_next_minibatch(self): # generate using in-thread functions data = [] p_data = [] n_data = [] label = [] for i in range(self._batch_size): datum_ = self.get_a_datum() # print(len(datum_), ":".join([str(x.shape) for x in datum_])) data.append(datum_[0]) p_data.append(datum_[1]) n_data.append(datum_[2]) if len(datum_) == 4: # datum and label / margin label.append(datum_[-1]) batch = [np.array(data), np.array(p_data), np.array(n_data)] if len(label): label = np.array(label).reshape(self._batch_size, 1, 1, 1) batch.append(label) return batch
Example #14
Source File: __init__.py From vergeml with MIT License | 6 votes |
def load_predictions(env, nclasses): path = os.path.join(env.stats_dir(), "predictions.csv") if not os.path.exists(path): raise FileExistsError(path) with open(path, newline='') as csvfile: y_score = [] y_test = [] csv_reader = csv.reader(csvfile, dialect="excel") for row in csv_reader: assert len(row) == nclasses * 2 y_score.append(list(map(float, row[:nclasses]))) y_test.append(list(map(float, row[nclasses:]))) y_score = np.array(y_score) y_test = np.array(y_test) return y_test, y_score
Example #15
Source File: graph_layout.py From EDeN with MIT License | 6 votes |
def _scale(self, init_pos): _min = -0.5 _max = 0.5 pos = dict() max_x = max([init_pos[id][0] for id in init_pos]) min_x = min([init_pos[id][0] for id in init_pos]) max_y = max([init_pos[id][1] for id in init_pos]) min_y = min([init_pos[id][1] for id in init_pos]) for id in init_pos: x = init_pos[id][0] y = init_pos[id][1] # standardize x = (x - min_x) / (max_x - min_x) y = (y - min_y) / (max_y - min_y) # rescale x = x * (_max - _min) + _min y = y * (_max - _min) + _min pos[id] = np.array([x, y]) return pos
Example #16
Source File: estimator_utils.py From EDeN with MIT License | 6 votes |
def make_train_test_sets(pos_graphs, neg_graphs, test_proportion=.3, random_state=2): """make_train_test_sets.""" random.seed(random_state) random.shuffle(pos_graphs) random.shuffle(neg_graphs) pos_dim = len(pos_graphs) neg_dim = len(neg_graphs) tr_pos_graphs = pos_graphs[:-int(pos_dim * test_proportion)] te_pos_graphs = pos_graphs[-int(pos_dim * test_proportion):] tr_neg_graphs = neg_graphs[:-int(neg_dim * test_proportion)] te_neg_graphs = neg_graphs[-int(neg_dim * test_proportion):] tr_graphs = tr_pos_graphs + tr_neg_graphs te_graphs = te_pos_graphs + te_neg_graphs tr_targets = [1] * len(tr_pos_graphs) + [0] * len(tr_neg_graphs) te_targets = [1] * len(te_pos_graphs) + [0] * len(te_neg_graphs) tr_graphs, tr_targets = paired_shuffle(tr_graphs, tr_targets) te_graphs, te_targets = paired_shuffle(te_graphs, te_targets) return (tr_graphs, np.array(tr_targets)), (te_graphs, np.array(te_targets))
Example #17
Source File: ml.py From EDeN with MIT License | 6 votes |
def make_data_matrix(positive_data_matrix=None, negative_data_matrix=None, target=None): """make_data_matrix.""" assert(positive_data_matrix is not None), 'ERROR: expecting non null\ positive_data_matrix' if negative_data_matrix is None: negative_data_matrix = positive_data_matrix.multiply(-1) if target is None and negative_data_matrix is not None: yp = [1] * positive_data_matrix.shape[0] yn = [-1] * negative_data_matrix.shape[0] y = np.array(yp + yn) data_matrix = vstack( [positive_data_matrix, negative_data_matrix], format="csr") if target is not None: data_matrix = positive_data_matrix y = target return data_matrix, y
Example #18
Source File: sequence.py From EDeN with MIT License | 6 votes |
def _annotate_importance(self, seq, data_matrix): # compute distance from hyperplane as proxy of vertex importance if self.estimator is None: # if we do not provide an estimator then consider default margin of # 1 for all vertices scores = np.array([1] * data_matrix.shape[0]) else: if hasattr(self.estimator, 'decision_function'): scores = self.estimator.decision_function(data_matrix) elif hasattr(self.estimator, 'predict_proba'): scores = self.estimator.predict_proba(data_matrix) scores = scores[:, -1] # compute the list of sparse vectors representation vec = [] for i in range(data_matrix.shape[0]): vec.append(data_matrix.getrow(i)) return scores, vec
Example #19
Source File: problem.py From fenics-topopt with MIT License | 6 votes |
def lk(E=1.): """element stiffness matrix""" nu = 0.3 k = np.array([0.5 - nu / 6., 0.125 + nu / 8., -0.25 - nu / 12., -0.125 + 0.375 * nu, -0.25 + nu / 12., -0.125 - nu / 8., nu / 6., 0.125 - 0.375 * nu]) KE = E / (1 - nu**2) * np.array([ [k[0], k[1], k[2], k[3], k[4], k[5], k[6], k[7]], [k[1], k[0], k[7], k[6], k[5], k[4], k[3], k[2]], [k[2], k[7], k[0], k[5], k[6], k[3], k[4], k[1]], [k[3], k[6], k[5], k[0], k[7], k[2], k[1], k[4]], [k[4], k[5], k[6], k[7], k[0], k[1], k[2], k[3]], [k[5], k[4], k[3], k[2], k[1], k[0], k[7], k[6]], [k[6], k[3], k[4], k[1], k[2], k[7], k[0], k[5]], [k[7], k[2], k[1], k[4], k[3], k[6], k[5], k[0]]]) return KE
Example #20
Source File: problem.py From fenics-topopt with MIT License | 6 votes |
def lk(E=1.): """element stiffness matrix""" nu = 0.3 k = np.array([0.5 - nu / 6., 0.125 + nu / 8., -0.25 - nu / 12., -0.125 + 0.375 * nu, -0.25 + nu / 12., -0.125 - nu / 8., nu / 6., 0.125 - 0.375 * nu]) KE = E / (1 - nu**2) * np.array([ [k[0], k[1], k[2], k[3], k[4], k[5], k[6], k[7]], [k[1], k[0], k[7], k[6], k[5], k[4], k[3], k[2]], [k[2], k[7], k[0], k[5], k[6], k[3], k[4], k[1]], [k[3], k[6], k[5], k[0], k[7], k[2], k[1], k[4]], [k[4], k[5], k[6], k[7], k[0], k[1], k[2], k[3]], [k[5], k[4], k[3], k[2], k[1], k[0], k[7], k[6]], [k[6], k[3], k[4], k[1], k[2], k[7], k[0], k[5]], [k[7], k[2], k[1], k[4], k[3], k[6], k[5], k[0]]]) return KE
Example #21
Source File: test_utils_times.py From aospy with Apache License 2.0 | 6 votes |
def test_add_uniform_time_weights(): time = np.array([15, 46, 74]) data = np.zeros((3)) ds = xr.DataArray(data, coords=[time], dims=[TIME_STR], name='a').to_dataset() units_str = 'days since 2000-01-01 00:00:00' cal_str = 'noleap' ds[TIME_STR].attrs['units'] = units_str ds[TIME_STR].attrs['calendar'] = cal_str with pytest.raises(KeyError): ds[TIME_WEIGHTS_STR] ds = add_uniform_time_weights(ds) time_weights_expected = xr.DataArray( [1, 1, 1], coords=ds[TIME_STR].coords, name=TIME_WEIGHTS_STR) time_weights_expected.attrs['units'] = 'days' assert ds[TIME_WEIGHTS_STR].identical(time_weights_expected)
Example #22
Source File: test_utils_times.py From aospy with Apache License 2.0 | 6 votes |
def test_ensure_time_as_index_with_change(): # Time bounds array doesn't index time initially, which gets fixed. arr = xr.DataArray([-93], dims=[TIME_STR], coords={TIME_STR: [3]}) arr[TIME_STR].attrs['units'] = 'days since 2000-01-01 00:00:00' arr[TIME_STR].attrs['calendar'] = 'standard' ds = arr.to_dataset(name='a') ds.coords[TIME_WEIGHTS_STR] = xr.DataArray( [1], dims=[TIME_STR], coords={TIME_STR: arr[TIME_STR]} ) ds.coords[TIME_BOUNDS_STR] = xr.DataArray( [[3.5, 4.5]], dims=[TIME_STR, BOUNDS_STR], coords={TIME_STR: arr[TIME_STR]} ) ds = ds.isel(**{TIME_STR: 0}) actual = ensure_time_as_index(ds) expected = arr.to_dataset(name='a') expected.coords[TIME_WEIGHTS_STR] = xr.DataArray( [1], dims=[TIME_STR], coords={TIME_STR: arr[TIME_STR]} ) expected.coords[TIME_BOUNDS_STR] = xr.DataArray( [[3.5, 4.5]], dims=[TIME_STR, BOUNDS_STR], coords={TIME_STR: arr[TIME_STR]} ) xr.testing.assert_identical(actual, expected)
Example #23
Source File: run_audio_attack.py From Black-Box-Audio with MIT License | 5 votes |
def db(audio): if len(audio.shape) > 1: maxx = np.max(np.abs(audio), axis=1) return 20 * np.log10(maxx) if np.any(maxx != 0) else np.array([0]) maxx = np.max(np.abs(audio)) return 20 * np.log10(maxx) if maxx != 0 else np.array([0])
Example #24
Source File: run_audio_attack.py From Black-Box-Audio with MIT License | 5 votes |
def save_wav(audio, output_wav_file): wav.write(output_wav_file, 16000, np.array(np.clip(np.round(audio), -2**15, 2**15-1), dtype=np.int16)) print('output dB', db(audio))
Example #25
Source File: run_audio_attack.py From Black-Box-Audio with MIT License | 5 votes |
def setup_graph(self, input_audio_batch, target_phrase): batch_size = input_audio_batch.shape[0] weird = (input_audio_batch.shape[1] - 1) // 320 logits_arg2 = np.tile(weird, batch_size) dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32) dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32) pass_in = np.clip(input_audio_batch, -2**15, 2**15-1) seq_len = np.tile(weird, batch_size).astype(np.int32) with tf.variable_scope('', reuse=tf.AUTO_REUSE): inputs = tf.placeholder(tf.float32, shape=pass_in.shape, name='a') len_batch = tf.placeholder(tf.float32, name='b') arg2_logits = tf.placeholder(tf.int32, shape=logits_arg2.shape, name='c') arg1_dense = tf.placeholder(tf.float32, shape=dense_arg1.shape, name='d') arg2_dense = tf.placeholder(tf.int32, shape=dense_arg2.shape, name='e') len_seq = tf.placeholder(tf.int32, shape=seq_len.shape, name='f') logits = get_logits(inputs, arg2_logits) target = ctc_label_dense_to_sparse(arg1_dense, arg2_dense, len_batch) ctcloss = tf.nn.ctc_loss(labels=tf.cast(target, tf.int32), inputs=logits, sequence_length=len_seq) decoded, _ = tf.nn.ctc_greedy_decoder(logits, arg2_logits, merge_repeated=True) sess = tf.Session() saver = tf.train.Saver(tf.global_variables()) saver.restore(sess, "models/session_dump") func1 = lambda a, b, c, d, e, f: sess.run(ctcloss, feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f}) func2 = lambda a, b, c, d, e, f: sess.run([ctcloss, decoded], feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f}) return (func1, func2)
Example #26
Source File: run_audio_attack.py From Black-Box-Audio with MIT License | 5 votes |
def getctcloss(self, input_audio_batch, target_phrase, decode=False): batch_size = input_audio_batch.shape[0] weird = (input_audio_batch.shape[1] - 1) // 320 logits_arg2 = np.tile(weird, batch_size) dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32) dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32) pass_in = np.clip(input_audio_batch, -2**15, 2**15-1) seq_len = np.tile(weird, batch_size).astype(np.int32) if decode: return self.funcs[1](pass_in, batch_size, logits_arg2, dense_arg1, dense_arg2, seq_len) else: return self.funcs[0](pass_in, batch_size, logits_arg2, dense_arg1, dense_arg2, seq_len)
Example #27
Source File: tf_logits.py From Black-Box-Audio with MIT License | 5 votes |
def compute_mfcc(audio, **kwargs): """ Compute the MFCC for a given audio waveform. This is identical to how DeepSpeech does it, but does it all in TensorFlow so that we can differentiate through it. """ batch_size, size = audio.get_shape().as_list() audio = tf.cast(audio, tf.float32) # 1. Pre-emphasizer, a high-pass filter audio = tf.concat((audio[:, :1], audio[:, 1:] - 0.97*audio[:, :-1], np.zeros((batch_size,1000),dtype=np.float32)), 1) # 2. windowing into frames of 320 samples, overlapping windowed = tf.stack([audio[:, i:i+400] for i in range(0,size-320,160)],1) # 3. Take the FFT to convert to frequency space ffted = tf.spectral.rfft(windowed, [512]) ffted = 1.0 / 512 * tf.square(tf.abs(ffted)) # 4. Compute the Mel windowing of the FFT energy = tf.reduce_sum(ffted,axis=2)+1e-30 filters = np.load("filterbanks.npy").T feat = tf.matmul(ffted, np.array([filters]*batch_size,dtype=np.float32))+1e-30 # 5. Take the DCT again, because why not feat = tf.log(feat) feat = tf.spectral.dct(feat, type=2, norm='ortho')[:,:,:26] # 6. Amplify high frequencies for some reason _,nframes,ncoeff = feat.get_shape().as_list() n = np.arange(ncoeff) lift = 1 + (22/2.)*np.sin(np.pi*n/22) feat = lift*feat width = feat.get_shape().as_list()[1] # 7. And now stick the energy next to the features feat = tf.concat((tf.reshape(tf.log(energy),(-1,width,1)), feat[:, :, 1:]), axis=2) return feat
Example #28
Source File: vector_space.py From indras_net with GNU General Public License v3.0 | 5 votes |
def __init__(self, x=0, y=0): super().__init__() self.vector = np.array([x, y])
Example #29
Source File: vector_space.py From indras_net with GNU General Public License v3.0 | 5 votes |
def reverse(self): """ Reverse the vector. Reflection across line y = x. """ new_vec = np.array(np.flipud(self.vector)) return from_vector(new_vec)
Example #30
Source File: TripletDataLayer.py From Caffe-Python-Data-Layer with BSD 2-Clause "Simplified" License | 5 votes |
def get_a_datum(self): """Get a datum: Sampling -> decode images -> stack numpy array """ sample = self._sampler.sample() if self._compressed: datum_ = [ extract_sample(self._data[id], self._mean, self._resize) for id in sample[:3]] else: datum_ = [self._data[id] for id in sample[:3]] if len(sample) == 4: datum_.append(sample[-1]) return datum_