Python numpy.argmax() Examples

The following are 30 code examples for showing how to use numpy.argmax(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: FRIDA   Author: LCAV   File: doa.py    License: MIT License 6 votes vote down vote up
def _peaks1D(self):
        if self.num_src == 1:
            self.src_idx[0] = np.argmax(self.P)
            self.sources[:, 0] = self.loc[:, self.src_idx[0]]
            self.phi_recon = self.theta[self.src_idx[0]]
        else:
            peak_idx = []
            n = self.P.shape[0]
            for i in range(self.num_loc):
                # straightforward peak finding
                if self.P[i] >= self.P[(i-1)%n] and self.P[i] > self.P[(i+1)%n]:
                    if len(peak_idx) == 0 or peak_idx[-1] != i-1:
                        if not (i == self.num_loc and self.P[i] == self.P[0]):
                            peak_idx.append(i)

            peaks = self.P[peak_idx]
            max_idx = np.argsort(peaks)[-self.num_src:]
            self.src_idx = [peak_idx[k] for k in max_idx]
            self.sources = self.loc[:, self.src_idx]
            self.phi_recon = self.theta[self.src_idx]
            self.num_src = len(self.src_idx)


# ------------------Miscellaneous Functions---------------------# 
Example 2
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: predict.py    License: MIT License 6 votes vote down vote up
def process_box(self, b, h, w, threshold):
	max_indx = np.argmax(b.probs)
	max_prob = b.probs[max_indx]
	label = self.meta['labels'][max_indx]
	if max_prob > threshold:
		left  = int ((b.x - b.w/2.) * w)
		right = int ((b.x + b.w/2.) * w)
		top   = int ((b.y - b.h/2.) * h)
		bot   = int ((b.y + b.h/2.) * h)
		if left  < 0    :  left = 0
		if right > w - 1: right = w - 1
		if top   < 0    :   top = 0
		if bot   > h - 1:   bot = h - 1
		mess = '{}'.format(label)
		return (left, right, top, bot, mess, max_indx, max_prob)
	return None 
Example 3
Project: Neural-LP   Author: fanyangxyz   File: experiment.py    License: MIT License 6 votes vote down vote up
def train(self):
        while (self.epoch < self.option.max_epoch and not self.early_stopped):
            self.one_epoch_train()
            self.one_epoch_valid()
            self.one_epoch_test()
            self.epoch += 1
            model_path = self.saver.save(self.sess, 
                                         self.option.model_path,
                                         global_step=self.epoch)
            print("Model saved at %s" % model_path)
            
            if self.early_stop():
                self.early_stopped = True
                print("Early stopped at epoch %d" % (self.epoch))
        
        all_test_in_top = [np.mean(x[1]) for x in self.test_stats]
        best_test_epoch = np.argmax(all_test_in_top)
        best_test = all_test_in_top[best_test_epoch]
        
        msg = "Best test in top: %0.4f at epoch %d." % (best_test, best_test_epoch + 1)       
        print(msg)
        self.log_file.write(msg + "\n")
        pickle.dump([self.train_stats, self.valid_stats, self.test_stats],
                    open(os.path.join(self.option.this_expsdir, "results.pckl"), "w")) 
Example 4
Project: neural-fingerprinting   Author: StephanZheng   File: custom_datasets.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __getitem__(self, index):

        img=self.adv_flat[self.sample_num,:]

        if(self.shuff == False):
            # shuff is true for non-pgd attacks
            img = torch.from_numpy(np.reshape(img,(3,32,32)))
        else:
            img = torch.from_numpy(img).type(torch.FloatTensor)
        target = np.argmax(self.adv_dict["adv_labels"],axis=1)[self.sample_num]
        # doing this so that it is consistent with all other datasets
        # to return a PIL Image
        if self.transform is not None:
            img = self.transform(img)

        if self.target_transform is not None:
            target = self.target_transform(target)

        self.sample_num = self.sample_num + 1
        return img, target 
Example 5
Project: neural-fingerprinting   Author: StephanZheng   File: custom_datasets.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __getitem__(self, index):

        img=self.adv_flat[self.sample_num,:]

        if(self.shuff == False):
            # shuff is true for non-pgd attacks
            img = torch.from_numpy(np.reshape(img,(3,32,32)))
        else:
            img = torch.from_numpy(img).type(torch.FloatTensor)
        target = np.argmax(self.adv_dict["adv_labels"],axis=1)[self.sample_num]
        # doing this so that it is consistent with all other datasets
        # to return a PIL Image
        if self.transform is not None:
            img = self.transform(img)

        if self.target_transform is not None:
            target = self.target_transform(target)

        self.sample_num = self.sample_num + 1
        return img, target 
Example 6
Project: neural-fingerprinting   Author: StephanZheng   File: custom_datasets.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __getitem__(self, index):
        img=self.adv_flat[self.sample_num,:]
        if(self.transp == False):
            # shuff is true for non-pgd attacks
            img = torch.from_numpy(np.reshape(img,(28,28)))
        else:
            img = torch.from_numpy(img).type(torch.FloatTensor)
        target = np.argmax(self.adv_dict["adv_labels"],axis=1)[self.sample_num]
        # doing this so that it is consistent with all other datasets
        # to return a PIL Image

        if self.transform is not None:
            img = self.transform(img)
        if self.target_transform is not None:
            target = self.target_transform(target)
        self.sample_num = self.sample_num + 1
        return img, target 
Example 7
Project: neural-fingerprinting   Author: StephanZheng   File: adaptive_attacks.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def binary_refinement(sess,Best_X_adv,
                      X_adv, Y, ALPHA, ub, lb, model, dataset='cifar'):
    num_samples = np.shape(X_adv)[0]
    print(dataset)
    if(dataset=="mnist"):
        X_place = tf.placeholder(tf.float32, shape=[1, 1, 28, 28])
    else:
        X_place = tf.placeholder(tf.float32, shape=[1, 3, 32, 32])

    pred = model(X_place)
    for i in range(num_samples):
        logits_op = sess.run(pred,feed_dict={X_place:X_adv[i:i+1,:,:,:]})
        if(not np.argmax(logits_op) == np.argmax(Y[i,:])):
            # Success, increase alpha
            Best_X_adv[i,:,:,:] = X_adv[i,:,:,]
            lb[i] = ALPHA[i,0]
        else:
            ub[i] = ALPHA[i,0]
        ALPHA[i] = 0.5*(lb[i] + ub[i])
    return ALPHA, Best_X_adv 
Example 8
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_attack_strength(self):
        """
        If clipping is not done at each iteration (not passing clip_min and
        clip_max to fgm), this attack fails by
        np.mean(orig_labels == new_labels) == .39.
        """
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        x_adv = self.attack.generate_np(x_val, eps=1.0, ord=np.inf,
                                        clip_min=0.5, clip_max=0.7,
                                        nb_iter=5)

        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
        self.assertTrue(np.mean(orig_labs == new_labs) < 0.1) 
Example 9
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_generate_np_targeted_gives_adversarial_example(self):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        feed_labs = np.zeros((100, 2))
        feed_labs[np.arange(100), np.random.randint(0, 1, 100)] = 1
        x_adv = self.attack.generate_np(x_val, max_iterations=100,
                                        binary_search_steps=3,
                                        initial_const=1,
                                        clip_min=-5, clip_max=5,
                                        batch_size=100, y_target=feed_labs)

        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

        self.assertTrue(np.mean(np.argmax(feed_labs, axis=1) == new_labs)
                        > 0.9) 
Example 10
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_generate_gives_adversarial_example(self):

        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        feed_labs = np.zeros((100, 2))
        feed_labs[np.arange(100), orig_labs] = 1
        x = tf.placeholder(tf.float32, x_val.shape)
        y = tf.placeholder(tf.float32, feed_labs.shape)

        x_adv_p = self.attack.generate(x, max_iterations=100,
                                       binary_search_steps=3,
                                       initial_const=1,
                                       clip_min=-5, clip_max=5,
                                       batch_size=100, y=y)
        self.assertEqual(x_val.shape, x_adv_p.shape)
        x_adv = self.sess.run(x_adv_p, {x: x_val, y: feed_labs})

        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

        self.assertTrue(np.mean(orig_labs == new_labs) < 0.1) 
Example 11
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_generate_np_targeted_gives_adversarial_example(self):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        feed_labs = np.zeros((100, 2))
        feed_labs[np.arange(100), np.random.randint(0, 1, 100)] = 1
        x_adv = self.attack.generate_np(x_val, max_iterations=100,
                                        binary_search_steps=3,
                                        initial_const=1,
                                        clip_min=-5, clip_max=5,
                                        batch_size=100, y_target=feed_labs)

        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

        self.assertTrue(np.mean(np.argmax(feed_labs, axis=1) == new_labs) >
                        0.9) 
Example 12
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_generate_gives_adversarial_example(self):

        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        feed_labs = np.zeros((100, 2))
        feed_labs[np.arange(100), orig_labs] = 1
        x = tf.placeholder(tf.float32, x_val.shape)
        y = tf.placeholder(tf.float32, feed_labs.shape)

        x_adv_p = self.attack.generate(x, max_iterations=100,
                                       binary_search_steps=3,
                                       initial_const=1,
                                       clip_min=-5, clip_max=5,
                                       batch_size=100, y=y)
        self.assertEqual(x_val.shape, x_adv_p.shape)
        x_adv = self.sess.run(x_adv_p, {x: x_val, y: feed_labs})

        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

        self.assertTrue(np.mean(orig_labs == new_labs) < 0.1) 
Example 13
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_generate_gives_adversarial_example(self):

        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        x = tf.placeholder(tf.float32, x_val.shape)

        x_adv_p = self.attack.generate(x, over_shoot=0.02, max_iter=50,
                                       nb_candidate=2, clip_min=-5, clip_max=5)
        self.assertEqual(x_val.shape, x_adv_p.shape)
        x_adv = self.sess.run(x_adv_p, {x: x_val})

        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

        self.assertTrue(np.mean(orig_labs == new_labs) < 0.1) 
Example 14
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_attack_strength(self):
        """
        If clipping is not done at each iteration (not using clip_min and
        clip_max), this attack fails by
        np.mean(orig_labels == new_labels) == .5
        """
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        x_adv = self.attack.generate_np(x_val, eps=1.0, eps_iter=0.05,
                                        clip_min=0.5, clip_max=0.7,
                                        nb_iter=5)

        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
        self.assertTrue(np.mean(orig_labs == new_labs) < 0.1) 
Example 15
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_generate_targeted_gives_adversarial_example(self):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        feed_labs = np.zeros((100, 2))
        feed_labs[np.arange(100), np.random.randint(0, 1, 100)] = 1
        x = tf.placeholder(tf.float32, x_val.shape)
        y = tf.placeholder(tf.float32, feed_labs.shape)

        x_adv_p = self.attack.generate(x, max_iterations=100,
                                       binary_search_steps=3,
                                       initial_const=1,
                                       clip_min=-5, clip_max=5,
                                       batch_size=100, y_target=y)
        self.assertEqual(x_val.shape, x_adv_p.shape)
        x_adv = self.sess.run(x_adv_p, {x: x_val, y: feed_labs})

        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

        self.assertTrue(np.mean(np.argmax(feed_labs, axis=1) == new_labs)
                        > 0.9) 
Example 16
Project: neural-fingerprinting   Author: StephanZheng   File: utils_tf.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def model_argmax(sess, x, predictions, samples, feed=None):
    """
    Helper function that computes the current class prediction
    :param sess: TF session
    :param x: the input placeholder
    :param predictions: the model's symbolic output
    :param samples: numpy array with input samples (dims must match x)
    :param feed: An optional dictionary that is appended to the feeding
             dictionary before the session runs. Can be used to feed
             the learning phase of a Keras model for instance.
    :return: the argmax output of predictions, i.e. the current predicted class
    """
    feed_dict = {x: samples}
    if feed is not None:
        feed_dict.update(feed)
    probabilities = sess.run(predictions, feed_dict)

    if samples.shape[0] == 1:
        return np.argmax(probabilities)
    else:
        return np.argmax(probabilities, axis=1) 
Example 17
Project: mlimages   Author: icoxfog417   File: chainer_alex.py    License: MIT License 6 votes vote down vote up
def predict(limit):
    _limit = limit if limit > 0 else 5

    td = TrainingData(LABEL_FILE, img_root=IMAGES_ROOT, mean_image_file=MEAN_IMAGE_FILE, image_property=IMAGE_PROP)
    label_def = LabelingMachine.read_label_def(LABEL_DEF_FILE)
    model = alex.Alex(len(label_def))
    serializers.load_npz(MODEL_FILE, model)

    i = 0
    for arr, im in td.generate():
        x = np.ndarray((1,) + arr.shape, arr.dtype)
        x[0] = arr
        x = chainer.Variable(np.asarray(x), volatile="on")
        y = model.predict(x)
        p = np.argmax(y.data)
        print("predict {0}, actual {1}".format(label_def[p], label_def[im.label]))
        im.image.show()
        i += 1
        if i >= _limit:
            break 
Example 18
Project: models   Author: kipoi   File: model.py    License: MIT License 6 votes vote down vote up
def _get_bp_indexes_labranchor(self, soi):
        """
        Get indexes of branch point regions in given sequences.

        :param soi: batch of sequences of interest for introns (intron-3..intron+6)
        :return: array of predicted bp indexes
        """
        encoded = [onehot(str(seq)[self.acc_i - 70:self.acc_i]) for seq in np.nditer(soi)]
        labr_in = np.stack(encoded, axis=0)
        out = self.labranchor.predict_on_batch(labr_in)
        # for each row, pick the base with max branchpoint probability, and get its index
        max_indexes = np.apply_along_axis(lambda x: self.acc_i - 70 + np.argmax(x), axis=1, arr=out)
        # self.write_bp(max_indexes)
        return max_indexes

# TODO boilerplate
#    def write_bp(self, max_indexes):
#        max_indexes = [str(seq) for seq in np.nditer(max_indexes)]
#        with open(''.join([this_dir, "/../customBP/example_files/bp_idx_chr21_labr.txt"]), "a") as bp_idx_file:
#            bp_idx_file.write('\n'.join(max_indexes))
#            bp_idx_file.write('\n')
#            bp_idx_file.close() 
Example 19
Project: deep-learning-note   Author: wdxtub   File: layers.py    License: MIT License 6 votes vote down vote up
def forward(self, x):
        N, C, H, W = x.shape
        out_h = int(1 + (H - self.pool_h) / self.stride)
        out_w = int(1 + (W - self.pool_w) / self.stride)

        col = im2col(x, self.pool_h, self.pool_w, self.stride, self.pad)
        col = col.reshape(-1, self.pool_h * self.pool_w)

        arg_max = np.argmax(col, axis=1)
        out = np.max(col, axis=1)
        out = out.reshape(N, out_h, out_w, C).transpose(0, 3, 1, 2)

        self.x = x
        self.arg_max = arg_max

        return out 
Example 20
Project: deep-learning-note   Author: wdxtub   File: 4_multi_classification.py    License: MIT License 6 votes vote down vote up
def predict_all(X, all_theta):
    rows = X.shape[0]
    params = X.shape[1]
    num_labels = all_theta.shape[0]
    
    # same as before, insert ones to match the shape
    X = np.insert(X, 0, values=np.ones(rows), axis=1)
    
    # convert to matrices
    X = np.matrix(X)
    all_theta = np.matrix(all_theta)
    
    # compute the class probability for each class on each training instance
    h = sigmoid(X * all_theta.T)
    
    # create array of the index with the maximum probability
    h_argmax = np.argmax(h, axis=1)
    
    # because our array was zero-indexed we need to add one for the true label prediction
    h_argmax = h_argmax + 1
    
    return h_argmax 
Example 21
Project: neuropythy   Author: noahbenson   File: models.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def cleaned_visual_areas(visual_areas, faces):
        '''
        mdl.cleaned_visual_areas is the same as mdl.visual_areas except that vertices with visual
        area values of 0 (boundary values) are given the mode of their neighbors.
        '''
        area_ids = np.array(visual_areas)
        boundaryNeis = {}
        for (b,inside) in [(b, set(inside))
                           for t in faces.T
                           for (bound, inside) in [([i for i in t if area_ids[i] == 0],
                                                    [i for i in t if area_ids[i] != 0])]
                           if len(bound) > 0 and len(inside) > 0
                           for b in bound]:
            if b in boundaryNeis: boundaryNeis[b] |= inside
            else:                 boundaryNeis[b] =  inside
        for (b,neis) in six.iteritems(boundaryNeis):
            area_ids[b] = np.argmax(np.bincount(area_ids[list(neis)]))
        return pimms.imm_array(np.asarray(area_ids, dtype=np.int)) 
Example 22
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("path", help="Path to the CAPTCHA image file")
    parser.add_argument("--prefix", help="Checkpoint prefix [Default 'ocr']", default='ocr')
    parser.add_argument("--epoch", help="Checkpoint epoch [Default 100]", type=int, default=100)
    args = parser.parse_args()

    init_state_names, init_state_arrays = lstm_init_states(batch_size=1)
    img = read_img(args.path)

    sample = SimpleBatch(
        data_names=['data'] + init_state_names,
        data=[mx.nd.array(img)] + init_state_arrays)

    mod = load_module(args.prefix, args.epoch, sample.data_names, sample.provide_data)

    mod.forward(sample)
    prob = mod.get_outputs()[0].asnumpy()

    prediction = CtcMetrics.ctc_label(np.argmax(prob, axis=-1).tolist())
    # Predictions are 1 to 10 for digits 0 to 9 respectively (prediction 0 means no-digit)
    prediction = [p - 1 for p in prediction]
    print("Digits:", prediction)
    return 
Example 23
def accuracy(self, label, pred):
        """ Simple accuracy measure: number of 100% accurate predictions divided by total number """
        hit = 0.
        total = 0.
        batch_size = label.shape[0]
        for i in range(batch_size):
            l = self._remove_blank(label[i])
            p = []
            for k in range(self.seq_len):
                p.append(np.argmax(pred[k * batch_size + i]))
            p = self.ctc_label(p)
            if len(p) == len(l):
                match = True
                for k, _ in enumerate(p):
                    if p[k] != int(l[k]):
                        match = False
                        break
                if match:
                    hit += 1.0
            total += 1.0
        assert total == batch_size
        return hit / total 
Example 24
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_conv.py    License: Apache License 2.0 6 votes vote down vote up
def exec_mnist(model, train_dataiter, val_dataiter):
    # print logging by default
    logging.basicConfig(level=logging.DEBUG)
    console = logging.StreamHandler()
    console.setLevel(logging.DEBUG)
    logging.getLogger('').addHandler(console)

    model.fit(X=train_dataiter,
              eval_data=val_dataiter)
    logging.info('Finish fit...')
    prob = model.predict(val_dataiter)
    logging.info('Finish predict...')
    val_dataiter.reset()
    y = np.concatenate([batch.label[0].asnumpy() for batch in val_dataiter]).astype('int')
    py = np.argmax(prob, axis=1)
    acc1 = float(np.sum(py == y)) / len(y)
    logging.info('final accuracy = %f', acc1)
    assert(acc1 > 0.94)

# run as a script 
Example 25
Project: libTLDA   Author: wmkouw   File: tcpr.py    License: MIT License 5 votes vote down vote up
def predict(self, Z_):
        """
        Make predictions on new dataset.

        Parameters
        ----------
        Z : array
            new data set (M samples by D features)

        Returns
        -------
        preds : array
            label predictions (M samples by 1)

        """
        # Data shape
        M, D = Z_.shape

        # If classifier is trained, check for same dimensionality
        if self.is_trained:
            if not self.train_data_dim == D:
                raise ValueError('''Test data is of different dimensionality
                                 than training data.''')

        if self.loss in ['lda', 'qda']:

            # Compute probabilities under each distribution
            probs = self.neg_log_likelihood(Z_, self.parameters)

            # Take largest probability as predictions
            preds = np.argmax(probs, axis=1)

        # Map predictions back to original labels
        preds = self.classes[preds]

        # Return predictions array
        return preds 
Example 26
Project: libTLDA   Author: wmkouw   File: suba.py    License: MIT License 5 votes vote down vote up
def predict(self, Z, zscore=False):
        """
        Make predictions on new dataset.

        Parameters
        ----------
        Z : array
            new data set (M samples x D features)
        zscore : boolean
            whether to transform the data using z-scoring (def: false)

        Returns
        -------
        preds : array
            label predictions (M samples x 1)

        """
        # If classifier is trained, check for same dimensionality
        if self.is_trained:
            if not self.train_data_dim == Z.shape[1]:
                raise ValueError("""Test data is of different dimensionality
                                 than training data.""")

        # Call predict_proba() for posterior probabilities
        probs = self.predict_proba(Z, zscore=zscore)

        # Take maximum over classes for indexing class list
        preds = self.K[np.argmax(probs, axis=1)]

        # Return predictions array
        return preds 
Example 27
Project: libTLDA   Author: wmkouw   File: suba.py    License: MIT License 5 votes vote down vote up
def predict(self, Z, zscore=False):
        """
        Make predictions on new dataset.

        Parameters
        ----------
        Z : array
            new data set (M samples x D features)
        zscore : boolean
            whether to transform the data using z-scoring (def: false)

        Returns
        -------
        preds : array
            label predictions (M samples x 1)

        """
        # If classifier is trained, check for same dimensionality
        if self.is_trained:
            if not self.train_data_dim == Z.shape[1]:
                raise ValueError("""Test data is of different dimensionality
                                 than training data.""")

        # Call predict_proba() for posterior probabilities
        probs = self.predict_proba(Z, zscore=zscore)

        # Take maximum over classes for indexing class list
        preds = self.K[np.argmax(probs, axis=1)]

        # Return predictions array
        return preds 
Example 28
Project: libTLDA   Author: wmkouw   File: flda.py    License: MIT License 5 votes vote down vote up
def predict(self, Z_):
        """
        Make predictions on new dataset.

        Parameters
        ----------
        Z : array
            new data set (M samples by D features)

        Returns
        -------
        preds : array
            label predictions (M samples by 1)

        """
        # Data shape
        M, D = Z_.shape

        # If classifier is trained, check for same dimensionality
        if self.is_trained:
            if not self.train_data_dim == D:
                raise ValueError('''Test data is of different dimensionality
                                 than training data.''')

        # Predict target labels
        preds = np.argmax(np.dot(Z_, self.theta), axis=1)

        # Map predictions back to labels
        preds = self.classes[preds]

        # Return predictions array
        return preds 
Example 29
Project: libTLDA   Author: wmkouw   File: rba.py    License: MIT License 5 votes vote down vote up
def predict(self, Z):
        """
        Make predictions on new dataset.

        Parameters
        ----------
        Z : array
            new data set (M samples by D features)

        Returns
        -------
        preds : array
            label predictions (M samples by 1)

        """
        # Data shape
        M, D = Z.shape

        # If classifier is trained, check for same dimensionality
        if self.is_trained:
            if not self.train_data_dim == D:
                raise ValueError('''Test data is of different dimensionality
                                 than training data.''')

        # Compute posteriors
        post = self.predict_proba(Z)

        # Predictions through max-posteriors
        preds = np.argmax(post, axis=1)

        # Map predictions back to original labels
        return self.classes[preds] 
Example 30
Project: vergeml   Author: mme   File: imagenet.py    License: MIT License 5 votes vote down vote up
def _evaluate_final(self, model, xy_test, batch_size, history):
        res = {}
        pred_test = None

        if 'val_acc' in history.history:
            res['val_acc'] = max(history.history['val_acc'])
            rev_ix = -1 - list(reversed(history.history['val_acc'])).index(res['val_acc'])
            res['val_loss'] = history.history['val_loss'][rev_ix]

        res['acc'] = history.history['acc'][-1]
        res['loss'] = history.history['loss'][-1]

        if len(xy_test[0]):
            from sklearn.metrics import classification_report, roc_auc_score
            # evaluate with test data
            x_test, y_test = xy_test
            pred_test = model.predict(x_test, batch_size=batch_size, verbose=0)
            test_loss, test_acc = model.evaluate(x_test, y_test, batch_size=batch_size, verbose=0)
            res['test_loss'] = test_loss
            res['test_acc'] = test_acc

            report = classification_report(y_true = np.argmax(y_test, axis=1),
                                           y_pred = np.argmax(pred_test, axis=1),
                                           target_names=self.labels,
                                           digits=4,
                                           output_dict=True)

            res['auc'] = roc_auc_score(y_test.astype(np.int), pred_test)

            for label in self.labels:
                stats = report[label]
                res[label+"-precision"] = stats['precision']
                res[label+"-recall"] = stats['recall']
                res[label+"-f1"] = stats['f1-score']

        return pred_test, res