Python numpy.argmax() Examples
The following are 30
code examples of numpy.argmax().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
numpy
, or try the search function
.

Example #1
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_generate_gives_adversarial_example(self): x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1) x = tf.placeholder(tf.float32, x_val.shape) x_adv_p = self.attack.generate(x, over_shoot=0.02, max_iter=50, nb_candidate=2, clip_min=-5, clip_max=5) self.assertEqual(x_val.shape, x_adv_p.shape) x_adv = self.sess.run(x_adv_p, {x: x_val}) new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1) self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
Example #2
Source File: utils_tf.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def model_argmax(sess, x, predictions, samples, feed=None): """ Helper function that computes the current class prediction :param sess: TF session :param x: the input placeholder :param predictions: the model's symbolic output :param samples: numpy array with input samples (dims must match x) :param feed: An optional dictionary that is appended to the feeding dictionary before the session runs. Can be used to feed the learning phase of a Keras model for instance. :return: the argmax output of predictions, i.e. the current predicted class """ feed_dict = {x: samples} if feed is not None: feed_dict.update(feed) probabilities = sess.run(predictions, feed_dict) if samples.shape[0] == 1: return np.argmax(probabilities) else: return np.argmax(probabilities, axis=1)
Example #3
Source File: doa.py From FRIDA with MIT License | 6 votes |
def _peaks1D(self): if self.num_src == 1: self.src_idx[0] = np.argmax(self.P) self.sources[:, 0] = self.loc[:, self.src_idx[0]] self.phi_recon = self.theta[self.src_idx[0]] else: peak_idx = [] n = self.P.shape[0] for i in range(self.num_loc): # straightforward peak finding if self.P[i] >= self.P[(i-1)%n] and self.P[i] > self.P[(i+1)%n]: if len(peak_idx) == 0 or peak_idx[-1] != i-1: if not (i == self.num_loc and self.P[i] == self.P[0]): peak_idx.append(i) peaks = self.P[peak_idx] max_idx = np.argsort(peaks)[-self.num_src:] self.src_idx = [peak_idx[k] for k in max_idx] self.sources = self.loc[:, self.src_idx] self.phi_recon = self.theta[self.src_idx] self.num_src = len(self.src_idx) # ------------------Miscellaneous Functions---------------------#
Example #4
Source File: predict.py From Traffic_sign_detection_YOLO with MIT License | 6 votes |
def process_box(self, b, h, w, threshold): max_indx = np.argmax(b.probs) max_prob = b.probs[max_indx] label = self.meta['labels'][max_indx] if max_prob > threshold: left = int ((b.x - b.w/2.) * w) right = int ((b.x + b.w/2.) * w) top = int ((b.y - b.h/2.) * h) bot = int ((b.y + b.h/2.) * h) if left < 0 : left = 0 if right > w - 1: right = w - 1 if top < 0 : top = 0 if bot > h - 1: bot = h - 1 mess = '{}'.format(label) return (left, right, top, bot, mess, max_indx, max_prob) return None
Example #5
Source File: experiment.py From Neural-LP with MIT License | 6 votes |
def train(self): while (self.epoch < self.option.max_epoch and not self.early_stopped): self.one_epoch_train() self.one_epoch_valid() self.one_epoch_test() self.epoch += 1 model_path = self.saver.save(self.sess, self.option.model_path, global_step=self.epoch) print("Model saved at %s" % model_path) if self.early_stop(): self.early_stopped = True print("Early stopped at epoch %d" % (self.epoch)) all_test_in_top = [np.mean(x[1]) for x in self.test_stats] best_test_epoch = np.argmax(all_test_in_top) best_test = all_test_in_top[best_test_epoch] msg = "Best test in top: %0.4f at epoch %d." % (best_test, best_test_epoch + 1) print(msg) self.log_file.write(msg + "\n") pickle.dump([self.train_stats, self.valid_stats, self.test_stats], open(os.path.join(self.option.this_expsdir, "results.pckl"), "w"))
Example #6
Source File: custom_datasets.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __getitem__(self, index): img=self.adv_flat[self.sample_num,:] if(self.shuff == False): # shuff is true for non-pgd attacks img = torch.from_numpy(np.reshape(img,(3,32,32))) else: img = torch.from_numpy(img).type(torch.FloatTensor) target = np.argmax(self.adv_dict["adv_labels"],axis=1)[self.sample_num] # doing this so that it is consistent with all other datasets # to return a PIL Image if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) self.sample_num = self.sample_num + 1 return img, target
Example #7
Source File: chainer_alex.py From mlimages with MIT License | 6 votes |
def predict(limit): _limit = limit if limit > 0 else 5 td = TrainingData(LABEL_FILE, img_root=IMAGES_ROOT, mean_image_file=MEAN_IMAGE_FILE, image_property=IMAGE_PROP) label_def = LabelingMachine.read_label_def(LABEL_DEF_FILE) model = alex.Alex(len(label_def)) serializers.load_npz(MODEL_FILE, model) i = 0 for arr, im in td.generate(): x = np.ndarray((1,) + arr.shape, arr.dtype) x[0] = arr x = chainer.Variable(np.asarray(x), volatile="on") y = model.predict(x) p = np.argmax(y.data) print("predict {0}, actual {1}".format(label_def[p], label_def[im.label])) im.image.show() i += 1 if i >= _limit: break
Example #8
Source File: model.py From models with MIT License | 6 votes |
def _get_bp_indexes_labranchor(self, soi): """ Get indexes of branch point regions in given sequences. :param soi: batch of sequences of interest for introns (intron-3..intron+6) :return: array of predicted bp indexes """ encoded = [onehot(str(seq)[self.acc_i - 70:self.acc_i]) for seq in np.nditer(soi)] labr_in = np.stack(encoded, axis=0) out = self.labranchor.predict_on_batch(labr_in) # for each row, pick the base with max branchpoint probability, and get its index max_indexes = np.apply_along_axis(lambda x: self.acc_i - 70 + np.argmax(x), axis=1, arr=out) # self.write_bp(max_indexes) return max_indexes # TODO boilerplate # def write_bp(self, max_indexes): # max_indexes = [str(seq) for seq in np.nditer(max_indexes)] # with open(''.join([this_dir, "/../customBP/example_files/bp_idx_chr21_labr.txt"]), "a") as bp_idx_file: # bp_idx_file.write('\n'.join(max_indexes)) # bp_idx_file.write('\n') # bp_idx_file.close()
Example #9
Source File: custom_datasets.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __getitem__(self, index): img=self.adv_flat[self.sample_num,:] if(self.shuff == False): # shuff is true for non-pgd attacks img = torch.from_numpy(np.reshape(img,(3,32,32))) else: img = torch.from_numpy(img).type(torch.FloatTensor) target = np.argmax(self.adv_dict["adv_labels"],axis=1)[self.sample_num] # doing this so that it is consistent with all other datasets # to return a PIL Image if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) self.sample_num = self.sample_num + 1 return img, target
Example #10
Source File: custom_datasets.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def __getitem__(self, index): img=self.adv_flat[self.sample_num,:] if(self.transp == False): # shuff is true for non-pgd attacks img = torch.from_numpy(np.reshape(img,(28,28))) else: img = torch.from_numpy(img).type(torch.FloatTensor) target = np.argmax(self.adv_dict["adv_labels"],axis=1)[self.sample_num] # doing this so that it is consistent with all other datasets # to return a PIL Image if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) self.sample_num = self.sample_num + 1 return img, target
Example #11
Source File: test_conv.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def exec_mnist(model, train_dataiter, val_dataiter): # print logging by default logging.basicConfig(level=logging.DEBUG) console = logging.StreamHandler() console.setLevel(logging.DEBUG) logging.getLogger('').addHandler(console) model.fit(X=train_dataiter, eval_data=val_dataiter) logging.info('Finish fit...') prob = model.predict(val_dataiter) logging.info('Finish predict...') val_dataiter.reset() y = np.concatenate([batch.label[0].asnumpy() for batch in val_dataiter]).astype('int') py = np.argmax(prob, axis=1) acc1 = float(np.sum(py == y)) / len(y) logging.info('final accuracy = %f', acc1) assert(acc1 > 0.94) # run as a script
Example #12
Source File: ctc_metrics.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def accuracy(self, label, pred): """ Simple accuracy measure: number of 100% accurate predictions divided by total number """ hit = 0. total = 0. batch_size = label.shape[0] for i in range(batch_size): l = self._remove_blank(label[i]) p = [] for k in range(self.seq_len): p.append(np.argmax(pred[k * batch_size + i])) p = self.ctc_label(p) if len(p) == len(l): match = True for k, _ in enumerate(p): if p[k] != int(l[k]): match = False break if match: hit += 1.0 total += 1.0 assert total == batch_size return hit / total
Example #13
Source File: lstm_ocr_infer.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 6 votes |
def main(): parser = argparse.ArgumentParser() parser.add_argument("path", help="Path to the CAPTCHA image file") parser.add_argument("--prefix", help="Checkpoint prefix [Default 'ocr']", default='ocr') parser.add_argument("--epoch", help="Checkpoint epoch [Default 100]", type=int, default=100) args = parser.parse_args() init_state_names, init_state_arrays = lstm_init_states(batch_size=1) img = read_img(args.path) sample = SimpleBatch( data_names=['data'] + init_state_names, data=[mx.nd.array(img)] + init_state_arrays) mod = load_module(args.prefix, args.epoch, sample.data_names, sample.provide_data) mod.forward(sample) prob = mod.get_outputs()[0].asnumpy() prediction = CtcMetrics.ctc_label(np.argmax(prob, axis=-1).tolist()) # Predictions are 1 to 10 for digits 0 to 9 respectively (prediction 0 means no-digit) prediction = [p - 1 for p in prediction] print("Digits:", prediction) return
Example #14
Source File: adaptive_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def binary_refinement(sess,Best_X_adv, X_adv, Y, ALPHA, ub, lb, model, dataset='cifar'): num_samples = np.shape(X_adv)[0] print(dataset) if(dataset=="mnist"): X_place = tf.placeholder(tf.float32, shape=[1, 1, 28, 28]) else: X_place = tf.placeholder(tf.float32, shape=[1, 3, 32, 32]) pred = model(X_place) for i in range(num_samples): logits_op = sess.run(pred,feed_dict={X_place:X_adv[i:i+1,:,:,:]}) if(not np.argmax(logits_op) == np.argmax(Y[i,:])): # Success, increase alpha Best_X_adv[i,:,:,:] = X_adv[i,:,:,] lb[i] = ALPHA[i,0] else: ub[i] = ALPHA[i,0] ALPHA[i] = 0.5*(lb[i] + ub[i]) return ALPHA, Best_X_adv
Example #15
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_attack_strength(self): """ If clipping is not done at each iteration (not passing clip_min and clip_max to fgm), this attack fails by np.mean(orig_labels == new_labels) == .39. """ x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) x_adv = self.attack.generate_np(x_val, eps=1.0, ord=np.inf, clip_min=0.5, clip_max=0.7, nb_iter=5) orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1) new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1) self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
Example #16
Source File: layers.py From deep-learning-note with MIT License | 6 votes |
def forward(self, x): N, C, H, W = x.shape out_h = int(1 + (H - self.pool_h) / self.stride) out_w = int(1 + (W - self.pool_w) / self.stride) col = im2col(x, self.pool_h, self.pool_w, self.stride, self.pad) col = col.reshape(-1, self.pool_h * self.pool_w) arg_max = np.argmax(col, axis=1) out = np.max(col, axis=1) out = out.reshape(N, out_h, out_w, C).transpose(0, 3, 1, 2) self.x = x self.arg_max = arg_max return out
Example #17
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_generate_targeted_gives_adversarial_example(self): x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) feed_labs = np.zeros((100, 2)) feed_labs[np.arange(100), np.random.randint(0, 1, 100)] = 1 x = tf.placeholder(tf.float32, x_val.shape) y = tf.placeholder(tf.float32, feed_labs.shape) x_adv_p = self.attack.generate(x, max_iterations=100, binary_search_steps=3, initial_const=1, clip_min=-5, clip_max=5, batch_size=100, y_target=y) self.assertEqual(x_val.shape, x_adv_p.shape) x_adv = self.sess.run(x_adv_p, {x: x_val, y: feed_labs}) new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1) self.assertTrue(np.mean(np.argmax(feed_labs, axis=1) == new_labs) > 0.9)
Example #18
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_generate_np_targeted_gives_adversarial_example(self): x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) feed_labs = np.zeros((100, 2)) feed_labs[np.arange(100), np.random.randint(0, 1, 100)] = 1 x_adv = self.attack.generate_np(x_val, max_iterations=100, binary_search_steps=3, initial_const=1, clip_min=-5, clip_max=5, batch_size=100, y_target=feed_labs) new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1) self.assertTrue(np.mean(np.argmax(feed_labs, axis=1) == new_labs) > 0.9)
Example #19
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_generate_gives_adversarial_example(self): x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1) feed_labs = np.zeros((100, 2)) feed_labs[np.arange(100), orig_labs] = 1 x = tf.placeholder(tf.float32, x_val.shape) y = tf.placeholder(tf.float32, feed_labs.shape) x_adv_p = self.attack.generate(x, max_iterations=100, binary_search_steps=3, initial_const=1, clip_min=-5, clip_max=5, batch_size=100, y=y) self.assertEqual(x_val.shape, x_adv_p.shape) x_adv = self.sess.run(x_adv_p, {x: x_val, y: feed_labs}) new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1) self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
Example #20
Source File: models.py From neuropythy with GNU Affero General Public License v3.0 | 6 votes |
def cleaned_visual_areas(visual_areas, faces): ''' mdl.cleaned_visual_areas is the same as mdl.visual_areas except that vertices with visual area values of 0 (boundary values) are given the mode of their neighbors. ''' area_ids = np.array(visual_areas) boundaryNeis = {} for (b,inside) in [(b, set(inside)) for t in faces.T for (bound, inside) in [([i for i in t if area_ids[i] == 0], [i for i in t if area_ids[i] != 0])] if len(bound) > 0 and len(inside) > 0 for b in bound]: if b in boundaryNeis: boundaryNeis[b] |= inside else: boundaryNeis[b] = inside for (b,neis) in six.iteritems(boundaryNeis): area_ids[b] = np.argmax(np.bincount(area_ids[list(neis)])) return pimms.imm_array(np.asarray(area_ids, dtype=np.int))
Example #21
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_generate_np_targeted_gives_adversarial_example(self): x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) feed_labs = np.zeros((100, 2)) feed_labs[np.arange(100), np.random.randint(0, 1, 100)] = 1 x_adv = self.attack.generate_np(x_val, max_iterations=100, binary_search_steps=3, initial_const=1, clip_min=-5, clip_max=5, batch_size=100, y_target=feed_labs) new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1) self.assertTrue(np.mean(np.argmax(feed_labs, axis=1) == new_labs) > 0.9)
Example #22
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_generate_gives_adversarial_example(self): x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1) feed_labs = np.zeros((100, 2)) feed_labs[np.arange(100), orig_labs] = 1 x = tf.placeholder(tf.float32, x_val.shape) y = tf.placeholder(tf.float32, feed_labs.shape) x_adv_p = self.attack.generate(x, max_iterations=100, binary_search_steps=3, initial_const=1, clip_min=-5, clip_max=5, batch_size=100, y=y) self.assertEqual(x_val.shape, x_adv_p.shape) x_adv = self.sess.run(x_adv_p, {x: x_val, y: feed_labs}) new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1) self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
Example #23
Source File: 4_multi_classification.py From deep-learning-note with MIT License | 6 votes |
def predict_all(X, all_theta): rows = X.shape[0] params = X.shape[1] num_labels = all_theta.shape[0] # same as before, insert ones to match the shape X = np.insert(X, 0, values=np.ones(rows), axis=1) # convert to matrices X = np.matrix(X) all_theta = np.matrix(all_theta) # compute the class probability for each class on each training instance h = sigmoid(X * all_theta.T) # create array of the index with the maximum probability h_argmax = np.argmax(h, axis=1) # because our array was zero-indexed we need to add one for the true label prediction h_argmax = h_argmax + 1 return h_argmax
Example #24
Source File: test_attacks.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def test_attack_strength(self): """ If clipping is not done at each iteration (not using clip_min and clip_max), this attack fails by np.mean(orig_labels == new_labels) == .5 """ x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) x_adv = self.attack.generate_np(x_val, eps=1.0, eps_iter=0.05, clip_min=0.5, clip_max=0.7, nb_iter=5) orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1) new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1) self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
Example #25
Source File: 19_cnn.py From deep-learning-note with MIT License | 5 votes |
def accuracy(self, x, t, batch_size=100): if t.ndim != 1: t = np.argmax(t, axis=1) acc = 0.0 for i in range(int(x.shape[0] / batch_size)): tx = x[i * batch_size:(i + 1) * batch_size] tt = t[i * batch_size:(i + 1) * batch_size] y = self.predict(tx) y = np.argmax(y, axis=1) acc += np.sum(y == tt) return acc / x.shape[0]
Example #26
Source File: gather.py From models with MIT License | 5 votes |
def deduplicate_vars(df): diff_cols = df.columns.values[df.columns.astype(str).str.contains("diff")] assert len(diff_cols) == 1 return df.groupby(df.index).apply(lambda x: x.iloc[np.argmax(x[diff_cols[0]].values), :]) # Modify here: add the _isna column and average labranchor if needed also clump the variants together.
Example #27
Source File: 10_two_layer_net.py From deep-learning-note with MIT License | 5 votes |
def accuracy(self, x, t): y = self.predict(x) y = np.argmax(y, axis=1) if t.ndim != 1: t = np.argmax(t, axis=1) accuracy = np.sum(y == t) / float(x.shape[0]) return accuracy
Example #28
Source File: gather.py From models with MIT License | 5 votes |
def average_labranchor(df, model_name, col_types): import numpy as np # choose the maximum diff diff_cols = df.columns.values[df.columns.astype(str).str.contains("DIFF")] model_outputs = [int(el.split("_")[-1]) for el in diff_cols] model_outputs_order = np.argsort(model_outputs) # select the model output tha gives the maximum absolute difference max_col_id = df[diff_cols[model_outputs_order]].abs().values.argmax(axis=1) # # just to be sure it will work: assert np.all(df[diff_cols[model_outputs_order]].abs().values[np.arange(len(max_col_id)), max_col_id] == df[diff_cols].abs().max(axis=1).values) # averaged = {} usable_columns = df.columns.tolist() for ct in col_types: col_sel = [col for col in usable_columns if ct in col] usable_columns = [col for col in usable_columns if col not in col_sel] if len(col_sel) == 0: continue # average model_outputs = [int(el.split("_")[-1]) for el in col_sel] model_outputs_order = np.argsort(model_outputs) # use the column selection from before keep_vals = df[np.array(col_sel)[model_outputs_order]].values[np.arange(len(max_col_id)), max_col_id] averaged[model_name + ct.lower()] = keep_vals # return pd.DataFrame(averaged, index=df.index)
Example #29
Source File: model.py From Image-Caption-Generator with MIT License | 5 votes |
def generate_caption(model, tokenizer, image, max_length): # Seed the generation process in_text = 'startseq' # Iterate over the whole length of the sequence for _ in range(max_length): # Integer encode input sequence sequence = tokenizer.texts_to_sequences([in_text])[0] # Pad input sequence = pad_sequences([sequence], maxlen=max_length) # Predict next word # The model will output a prediction, which will be a probability distribution over all words in the vocabulary. yhat = model.predict([image,sequence], verbose=0) # The output vector representins a probability distribution where maximum probability is the predicted word position # Take output class with maximum probability and convert to integer yhat = np.argmax(yhat) # Map integer back to word word = int_to_word(yhat, tokenizer) # Stop if we cannot map the word if word is None: break # Append as input for generating the next word in_text += ' ' + word # Stop if we predict the end of the sequence if word == 'endseq': break return in_text
Example #30
Source File: test_utils.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def test_random_targets_one_hot_single_label(self): # Test utils.random_targets with a single one-hot encoded label gt = np.asarray([0, 0, 1, 0, 0]) gt = gt.reshape((1, 5)) gt_labels = np.argmax(gt, axis=1) rt = utils.random_targets(gt, 5) # Make sure random_targets returns a one-hot encoded labels self.assertTrue(len(rt.shape) == 2) rt_labels = np.argmax(rt, axis=1) # Make sure all labels are different from the correct labels self.assertTrue(np.all(rt_labels != gt_labels))