Python numpy.argmax() Examples

The following are code examples for showing how to use numpy.argmax(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: capsulenet.py    MIT License 10 votes vote down vote up
def test(model, data):
    x_test, y_test = data
    y_pred, x_recon = model.predict(x_test, batch_size=100)
    print('-'*50)
    print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1))/y_test.shape[0])

    import matplotlib.pyplot as plt
    from utils import combine_images
    from PIL import Image

    img = combine_images(np.concatenate([x_test[:50],x_recon[:50]]))
    image = img * 255
    Image.fromarray(image.astype(np.uint8)).save("real_and_recon.png")
    print()
    print('Reconstructed images are saved to ./real_and_recon.png')
    print('-'*50)
    plt.imshow(plt.imread("real_and_recon.png", ))
    plt.show() 
Example 2
Project: chainer-openai-transformer-lm   Author: soskek   File: train.py    MIT License 8 votes vote down vote up
def log():
    global best_score
    print("Logging")
    tr_logits, tr_cost = iter_apply(
        trX[:n_valid], trM[:n_valid], trY[:n_valid])
    va_logits, va_cost = iter_apply(vaX, vaM, vaY)
    tr_cost = tr_cost / len(trY[:n_valid])
    va_cost = va_cost / n_valid
    tr_acc = accuracy_score(trY[:n_valid], np.argmax(tr_logits, 1)) * 100.
    va_acc = accuracy_score(vaY, np.argmax(va_logits, 1)) * 100.
    logger.log(n_epochs=n_epochs, n_updates=n_updates, tr_cost=tr_cost,
               va_cost=va_cost, tr_acc=tr_acc, va_acc=va_acc)
    print('%d %d %.3f %.3f %.2f %.2f' %
          (n_epochs, n_updates, tr_cost, va_cost, tr_acc, va_acc))
    if submit:
        score = va_acc
        if score > best_score:
            best_score = score
            path = os.path.join(save_dir, desc, 'best_params')
            chainer.serializers.save_npz(make_path(path), model) 
Example 3
Project: building-boundary   Author: Geodan   File: regularize.py    MIT License 6 votes vote down vote up
def find_main_orientation(segments):
    """
    Checks which segment is supported by the most points and returns
    the orientation of this segment.

    Parameters
    ----------
    segments : list of BoundarySegment
        The boundary (wall) segments of the building (part).

    Returns
    -------
    main_orientation : float
        The orientation of the segment supported by the most points.
        In radians.
    """
    longest_segment = np.argmax([len(s.points) for s in segments])
    main_orientation = segments[longest_segment].orientation
    return main_orientation 
Example 4
Project: FRIDA   Author: LCAV   File: doa.py    MIT License 6 votes vote down vote up
def _peaks1D(self):
        if self.num_src == 1:
            self.src_idx[0] = np.argmax(self.P)
            self.sources[:, 0] = self.loc[:, self.src_idx[0]]
            self.phi_recon = self.theta[self.src_idx[0]]
        else:
            peak_idx = []
            n = self.P.shape[0]
            for i in range(self.num_loc):
                # straightforward peak finding
                if self.P[i] >= self.P[(i-1)%n] and self.P[i] > self.P[(i+1)%n]:
                    if len(peak_idx) == 0 or peak_idx[-1] != i-1:
                        if not (i == self.num_loc and self.P[i] == self.P[0]):
                            peak_idx.append(i)

            peaks = self.P[peak_idx]
            max_idx = np.argsort(peaks)[-self.num_src:]
            self.src_idx = [peak_idx[k] for k in max_idx]
            self.sources = self.loc[:, self.src_idx]
            self.phi_recon = self.theta[self.src_idx]
            self.num_src = len(self.src_idx)


# ------------------Miscellaneous Functions---------------------# 
Example 5
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: predict.py    MIT License 6 votes vote down vote up
def process_box(self, b, h, w, threshold):
	max_indx = np.argmax(b.probs)
	max_prob = b.probs[max_indx]
	label = self.meta['labels'][max_indx]
	if max_prob > threshold:
		left  = int ((b.x - b.w/2.) * w)
		right = int ((b.x + b.w/2.) * w)
		top   = int ((b.y - b.h/2.) * h)
		bot   = int ((b.y + b.h/2.) * h)
		if left  < 0    :  left = 0
		if right > w - 1: right = w - 1
		if top   < 0    :   top = 0
		if bot   > h - 1:   bot = h - 1
		mess = '{}'.format(label)
		return (left, right, top, bot, mess, max_indx, max_prob)
	return None 
Example 6
Project: Neural-LP   Author: fanyangxyz   File: experiment.py    MIT License 6 votes vote down vote up
def train(self):
        while (self.epoch < self.option.max_epoch and not self.early_stopped):
            self.one_epoch_train()
            self.one_epoch_valid()
            self.one_epoch_test()
            self.epoch += 1
            model_path = self.saver.save(self.sess, 
                                         self.option.model_path,
                                         global_step=self.epoch)
            print("Model saved at %s" % model_path)
            
            if self.early_stop():
                self.early_stopped = True
                print("Early stopped at epoch %d" % (self.epoch))
        
        all_test_in_top = [np.mean(x[1]) for x in self.test_stats]
        best_test_epoch = np.argmax(all_test_in_top)
        best_test = all_test_in_top[best_test_epoch]
        
        msg = "Best test in top: %0.4f at epoch %d." % (best_test, best_test_epoch + 1)       
        print(msg)
        self.log_file.write(msg + "\n")
        pickle.dump([self.train_stats, self.valid_stats, self.test_stats],
                    open(os.path.join(self.option.this_expsdir, "results.pckl"), "w")) 
Example 7
Project: Sessile.drop.analysis   Author: mvgorcum   File: edge_detection.py    GNU General Public License v3.0 6 votes vote down vote up
def linear_subpixel_detection(image,thresh):
    framesize=image.shape
    edgeleft=np.zeros(framesize[0])
    edgeright=np.zeros(framesize[0])
    for y in range(framesize[0]-1): #edge detection, go line by line on horizontal
        edgeleft[y]=np.argmax(image[y,0:framesize[1]]<thresh) #edge detection on pixel scale
        if edgeleft[y]!=0:
            subpxcorr=(thresh-np.float_(image[y,np.int(edgeleft[y]-1)]))/(np.float_(image[y,np.int(edgeleft[y])])-np.float_(image[y,np.int(edgeleft[y]-1)])) #subpixel correction with using corr=(threshold-intensity(edge-1))/(intensity(edge)-intensity(edge-1))
            edgeleft[y]=edgeleft[y]+subpxcorr-1 #add the correction and shift 1 pixel left, to plot on the edge properly
        edgeright[y]=np.int(framesize[1]-np.argmax(image[y,range(framesize[1]-1,0,-1)]<thresh))
        #same scheme for right edge, except the edge detection is done flipped, since np.argmax gives the first instance of the maximum value
        if edgeright[y]!=framesize[1]:
            subpxcorr=(thresh-np.float_(image[y,np.int(edgeright[y]-1)]))/(np.float_(image[y,np.int(edgeright[y])])-np.float_(image[y,np.int(edgeright[y]-1)]))
            edgeright[y]=edgeright[y]+subpxcorr-1
        else:
            edgeright[y]=0
    return edgeleft, edgeright; 
Example 8
Project: Sessile.drop.analysis   Author: mvgorcum   File: edge_detection.py    GNU General Public License v3.0 6 votes vote down vote up
def errorfunction_subpixel_detection(image,thresh):
    erffitsize=np.int(40)
    def errorfunction(x,xdata,y): #define errorfunction to fit with a least squares fit.
        return x[0]*(1+sp.special.erf(xdata*x[1]+x[2]))+x[3] - y;
    framesize=image.shape
    edgeleft=np.zeros(framesize[0])
    edgeright=np.zeros(framesize[0])
    for y in range(framesize[0]-1): #edge detection, go line by line on horizontal
        edgeleft[y]=np.argmax(image[y,0:framesize[1]]<thresh) #edge detection on pixel scale
        if (edgeleft[y]-erffitsize)>=0  and (edgeleft[y]-erffitsize)<=framesize[0]:
            fitparts=np.array(image[y,range(np.int(edgeleft[y])-erffitsize,np.int(edgeleft[y])+erffitsize)]) #take out part of the image around the edge to fit the error function
            guess=(max(fitparts)-min(fitparts))/2,-.22,0,min(fitparts) #initial guess for error function
            lstsqrsol=sp.optimize.least_squares(errorfunction,guess,args=(np.array(range(-erffitsize,erffitsize)),fitparts)) #least sqaures fit
            edgeleft[y]=edgeleft[y]-lstsqrsol.x[2]/lstsqrsol.x[1] #add the subpixel correction
        edgeright[y]=np.int(framesize[1]-np.argmax(image[y,range(framesize[1]-1,0,-1)]<thresh))#same scheme for right edge, except the edge detection is done flipped, since np.argmax gives the first instance of the maximum value
        if (edgeright[y]-erffitsize)>=0  and (edgeright[y]-erffitsize)<=framesize[0]:
            fitparts=np.array(image[y,range(np.int(edgeright[y])-erffitsize,np.int(edgeright[y])+erffitsize)])
            guess=(max(fitparts)-min(fitparts))/2,.22,0,min(fitparts)
            lstsqrsol=sp.optimize.least_squares(errorfunction,guess,args=(np.array(range(-erffitsize,erffitsize)),fitparts))
            edgeright[y]=edgeright[y]-lstsqrsol.x[2]/lstsqrsol.x[1]
        elif edgeright[y]==framesize[1]:
            edgeright[y]=0

    return edgeleft, edgeright; 
Example 9
Project: neural-fingerprinting   Author: StephanZheng   File: custom_datasets.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __getitem__(self, index):

        img=self.adv_flat[self.sample_num,:]

        if(self.shuff == False):
            # shuff is true for non-pgd attacks
            img = torch.from_numpy(np.reshape(img,(3,32,32)))
        else:
            img = torch.from_numpy(img).type(torch.FloatTensor)
        target = np.argmax(self.adv_dict["adv_labels"],axis=1)[self.sample_num]
        # doing this so that it is consistent with all other datasets
        # to return a PIL Image
        if self.transform is not None:
            img = self.transform(img)

        if self.target_transform is not None:
            target = self.target_transform(target)

        self.sample_num = self.sample_num + 1
        return img, target 
Example 10
Project: neural-fingerprinting   Author: StephanZheng   File: custom_datasets.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __getitem__(self, index):

        img=self.adv_flat[self.sample_num,:]

        if(self.shuff == False):
            # shuff is true for non-pgd attacks
            img = torch.from_numpy(np.reshape(img,(3,32,32)))
        else:
            img = torch.from_numpy(img).type(torch.FloatTensor)
        target = np.argmax(self.adv_dict["adv_labels"],axis=1)[self.sample_num]
        # doing this so that it is consistent with all other datasets
        # to return a PIL Image
        if self.transform is not None:
            img = self.transform(img)

        if self.target_transform is not None:
            target = self.target_transform(target)

        self.sample_num = self.sample_num + 1
        return img, target 
Example 11
Project: neural-fingerprinting   Author: StephanZheng   File: custom_datasets.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __getitem__(self, index):
        img=self.adv_flat[self.sample_num,:]
        if(self.transp == False):
            # shuff is true for non-pgd attacks
            img = torch.from_numpy(np.reshape(img,(28,28)))
        else:
            img = torch.from_numpy(img).type(torch.FloatTensor)
        target = np.argmax(self.adv_dict["adv_labels"],axis=1)[self.sample_num]
        # doing this so that it is consistent with all other datasets
        # to return a PIL Image

        if self.transform is not None:
            img = self.transform(img)
        if self.target_transform is not None:
            target = self.target_transform(target)
        self.sample_num = self.sample_num + 1
        return img, target 
Example 12
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_attack_strength(self):
        """
        If clipping is not done at each iteration (not passing clip_min and
        clip_max to fgm), this attack fails by
        np.mean(orig_labels == new_labels) == .39.
        """
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        x_adv = self.attack.generate_np(x_val, eps=1.0, ord=np.inf,
                                        clip_min=0.5, clip_max=0.7,
                                        nb_iter=5)

        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
        self.assertTrue(np.mean(orig_labs == new_labs) < 0.1) 
Example 13
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_generate_np_targeted_gives_adversarial_example(self):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        feed_labs = np.zeros((100, 2))
        feed_labs[np.arange(100), np.random.randint(0, 1, 100)] = 1
        x_adv = self.attack.generate_np(x_val, max_iterations=100,
                                        binary_search_steps=3,
                                        initial_const=1,
                                        clip_min=-5, clip_max=5,
                                        batch_size=100, y_target=feed_labs)

        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

        self.assertTrue(np.mean(np.argmax(feed_labs, axis=1) == new_labs)
                        > 0.9) 
Example 14
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_generate_gives_adversarial_example(self):

        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        feed_labs = np.zeros((100, 2))
        feed_labs[np.arange(100), orig_labs] = 1
        x = tf.placeholder(tf.float32, x_val.shape)
        y = tf.placeholder(tf.float32, feed_labs.shape)

        x_adv_p = self.attack.generate(x, max_iterations=100,
                                       binary_search_steps=3,
                                       initial_const=1,
                                       clip_min=-5, clip_max=5,
                                       batch_size=100, y=y)
        self.assertEqual(x_val.shape, x_adv_p.shape)
        x_adv = self.sess.run(x_adv_p, {x: x_val, y: feed_labs})

        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

        self.assertTrue(np.mean(orig_labs == new_labs) < 0.1) 
Example 15
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_generate_np_targeted_gives_adversarial_example(self):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        feed_labs = np.zeros((100, 2))
        feed_labs[np.arange(100), np.random.randint(0, 1, 100)] = 1
        x_adv = self.attack.generate_np(x_val, max_iterations=100,
                                        binary_search_steps=3,
                                        initial_const=1,
                                        clip_min=-5, clip_max=5,
                                        batch_size=100, y_target=feed_labs)

        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

        self.assertTrue(np.mean(np.argmax(feed_labs, axis=1) == new_labs) >
                        0.9) 
Example 16
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_generate_gives_adversarial_example(self):

        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        feed_labs = np.zeros((100, 2))
        feed_labs[np.arange(100), orig_labs] = 1
        x = tf.placeholder(tf.float32, x_val.shape)
        y = tf.placeholder(tf.float32, feed_labs.shape)

        x_adv_p = self.attack.generate(x, max_iterations=100,
                                       binary_search_steps=3,
                                       initial_const=1,
                                       clip_min=-5, clip_max=5,
                                       batch_size=100, y=y)
        self.assertEqual(x_val.shape, x_adv_p.shape)
        x_adv = self.sess.run(x_adv_p, {x: x_val, y: feed_labs})

        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

        self.assertTrue(np.mean(orig_labs == new_labs) < 0.1) 
Example 17
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_generate_gives_adversarial_example(self):

        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        x = tf.placeholder(tf.float32, x_val.shape)

        x_adv_p = self.attack.generate(x, over_shoot=0.02, max_iter=50,
                                       nb_candidate=2, clip_min=-5, clip_max=5)
        self.assertEqual(x_val.shape, x_adv_p.shape)
        x_adv = self.sess.run(x_adv_p, {x: x_val})

        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

        self.assertTrue(np.mean(orig_labs == new_labs) < 0.1) 
Example 18
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_attack_strength(self):
        """
        If clipping is not done at each iteration (not using clip_min and
        clip_max), this attack fails by
        np.mean(orig_labels == new_labels) == .5
        """
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        x_adv = self.attack.generate_np(x_val, eps=1.0, eps_iter=0.05,
                                        clip_min=0.5, clip_max=0.7,
                                        nb_iter=5)

        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
        self.assertTrue(np.mean(orig_labs == new_labs) < 0.1) 
Example 19
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_generate_targeted_gives_adversarial_example(self):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        feed_labs = np.zeros((100, 2))
        feed_labs[np.arange(100), np.random.randint(0, 1, 100)] = 1
        x = tf.placeholder(tf.float32, x_val.shape)
        y = tf.placeholder(tf.float32, feed_labs.shape)

        x_adv_p = self.attack.generate(x, max_iterations=100,
                                       binary_search_steps=3,
                                       initial_const=1,
                                       clip_min=-5, clip_max=5,
                                       batch_size=100, y_target=y)
        self.assertEqual(x_val.shape, x_adv_p.shape)
        x_adv = self.sess.run(x_adv_p, {x: x_val, y: feed_labs})

        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

        self.assertTrue(np.mean(np.argmax(feed_labs, axis=1) == new_labs)
                        > 0.9) 
Example 20
Project: neural-fingerprinting   Author: StephanZheng   File: utils_tf.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def model_argmax(sess, x, predictions, samples, feed=None):
    """
    Helper function that computes the current class prediction
    :param sess: TF session
    :param x: the input placeholder
    :param predictions: the model's symbolic output
    :param samples: numpy array with input samples (dims must match x)
    :param feed: An optional dictionary that is appended to the feeding
             dictionary before the session runs. Can be used to feed
             the learning phase of a Keras model for instance.
    :return: the argmax output of predictions, i.e. the current predicted class
    """
    feed_dict = {x: samples}
    if feed is not None:
        feed_dict.update(feed)
    probabilities = sess.run(predictions, feed_dict)

    if samples.shape[0] == 1:
        return np.argmax(probabilities)
    else:
        return np.argmax(probabilities, axis=1) 
Example 21
Project: mlimages   Author: icoxfog417   File: chainer_alex.py    MIT License 6 votes vote down vote up
def predict(limit):
    _limit = limit if limit > 0 else 5

    td = TrainingData(LABEL_FILE, img_root=IMAGES_ROOT, mean_image_file=MEAN_IMAGE_FILE, image_property=IMAGE_PROP)
    label_def = LabelingMachine.read_label_def(LABEL_DEF_FILE)
    model = alex.Alex(len(label_def))
    serializers.load_npz(MODEL_FILE, model)

    i = 0
    for arr, im in td.generate():
        x = np.ndarray((1,) + arr.shape, arr.dtype)
        x[0] = arr
        x = chainer.Variable(np.asarray(x), volatile="on")
        y = model.predict(x)
        p = np.argmax(y.data)
        print("predict {0}, actual {1}".format(label_def[p], label_def[im.label]))
        im.image.show()
        i += 1
        if i >= _limit:
            break 
Example 22
Project: models   Author: kipoi   File: model.py    MIT License 6 votes vote down vote up
def _get_bp_indexes_labranchor(self, soi):
        """
        Get indexes of branch point regions in given sequences.

        :param soi: batch of sequences of interest for introns (intron-3..intron+6)
        :return: array of predicted bp indexes
        """
        encoded = [onehot(str(seq)[self.acc_i - 70:self.acc_i]) for seq in np.nditer(soi)]
        labr_in = np.stack(encoded, axis=0)
        out = self.labranchor.predict_on_batch(labr_in)
        # for each row, pick the base with max branchpoint probability, and get its index
        max_indexes = np.apply_along_axis(lambda x: self.acc_i - 70 + np.argmax(x), axis=1, arr=out)
        # self.write_bp(max_indexes)
        return max_indexes

# TODO boilerplate
#    def write_bp(self, max_indexes):
#        max_indexes = [str(seq) for seq in np.nditer(max_indexes)]
#        with open(''.join([this_dir, "/../customBP/example_files/bp_idx_chr21_labr.txt"]), "a") as bp_idx_file:
#            bp_idx_file.write('\n'.join(max_indexes))
#            bp_idx_file.write('\n')
#            bp_idx_file.close() 
Example 23
Project: oslodatascience-rl   Author: Froskekongen   File: havakv_CartPole-A3C.py    MIT License 6 votes vote down vote up
def act(self, s):
        eps = self.getEpsilon()
        global frames
        frames = frames + 1

        if random.random() < eps:
            return random.randint(0, NUM_ACTIONS - 1)

        else:
            s = np.array([s])
            p = brain.predict_p(s)[0]

            # a = np.argmax(p)
            a = np.random.choice(NUM_ACTIONS, p=p)

            return a 
Example 24
Project: oslodatascience-rl   Author: Froskekongen   File: space_invaders3.py    MIT License 6 votes vote down vote up
def choose_action(random_action, probability):
    random_value = randint(0, 5) #np.random.uniform()

    if (random_action):
        return random_value
    else:
        return np.argmax(probability)
        # if np.argmax(probability) == 1:
        #     return 4
        # else:
        #     return 5


    # if random_value < probability:
    #     # signifies up in openai gym
    #     return 2
    # else:
    #     # signifies down in openai gym
    #     return 3 
Example 25
Project: oslodatascience-rl   Author: Froskekongen   File: space_invaders2.py    MIT License 6 votes vote down vote up
def choose_action(random_action, probability):
    random_value = randint(0, 5) #np.random.uniform()

    if (random_action):
        return random_value
    else:
        return np.argmax(probability)
        # if np.argmax(probability) == 1:
        #     return 4
        # else:
        #     return 5


    # if random_value < probability:
    #     # signifies up in openai gym
    #     return 2
    # else:
    #     # signifies down in openai gym
    #     return 3 
Example 26
Project: oslodatascience-rl   Author: Froskekongen   File: space_invaders.py    MIT License 6 votes vote down vote up
def choose_action(random_action, probability):
    random_value = randint(4, 5) #np.random.uniform()

    if (random_action):
        return random_value
    else:
        if np.argmax(probability) == 1:
            return 4
        else:
            return 5


    # if random_value < probability:
    #     # signifies up in openai gym
    #     return 2
    # else:
    #     # signifies down in openai gym
    #     return 3 
Example 27
Project: deep-learning-note   Author: wdxtub   File: layers.py    MIT License 6 votes vote down vote up
def forward(self, x):
        N, C, H, W = x.shape
        out_h = int(1 + (H - self.pool_h) / self.stride)
        out_w = int(1 + (W - self.pool_w) / self.stride)

        col = im2col(x, self.pool_h, self.pool_w, self.stride, self.pad)
        col = col.reshape(-1, self.pool_h * self.pool_w)

        arg_max = np.argmax(col, axis=1)
        out = np.max(col, axis=1)
        out = out.reshape(N, out_h, out_w, C).transpose(0, 3, 1, 2)

        self.x = x
        self.arg_max = arg_max

        return out 
Example 28
Project: deep-learning-note   Author: wdxtub   File: 4_multi_classification.py    MIT License 6 votes vote down vote up
def predict_all(X, all_theta):
    rows = X.shape[0]
    params = X.shape[1]
    num_labels = all_theta.shape[0]
    
    # same as before, insert ones to match the shape
    X = np.insert(X, 0, values=np.ones(rows), axis=1)
    
    # convert to matrices
    X = np.matrix(X)
    all_theta = np.matrix(all_theta)
    
    # compute the class probability for each class on each training instance
    h = sigmoid(X * all_theta.T)
    
    # create array of the index with the maximum probability
    h_argmax = np.argmax(h, axis=1)
    
    # because our array was zero-indexed we need to add one for the true label prediction
    h_argmax = h_argmax + 1
    
    return h_argmax 
Example 29
Project: chainer-openai-transformer-lm   Author: soskek   File: analysis.py    MIT License 5 votes vote down vote up
def rocstories(data_dir, pred_path, log_path):
    preds = pd.read_csv(pred_path, delimiter='\t')[
        'prediction'].values.tolist()
    _, _, _, labels = _rocstories(os.path.join(
        data_dir, 'cloze_test_test__spring2016 - cloze_test_ALL_test.csv'))
    test_accuracy = accuracy_score(labels, preds) * 100.
    logs = [json.loads(line) for line in open(log_path)][1:]
    best_validation_index = np.argmax([log['va_acc'] for log in logs])
    valid_accuracy = logs[best_validation_index]['va_acc']
    print('ROCStories Valid Accuracy: %.2f' % (valid_accuracy))
    print('ROCStories Test Accuracy:  %.2f' % (test_accuracy)) 
Example 30
Project: chainer-openai-transformer-lm   Author: soskek   File: analysis.py    MIT License 5 votes vote down vote up
def sst(data_dir, pred_path, log_path):
    preds = pd.read_csv(pred_path, delimiter='\t')[
        'prediction'].values.tolist()
    test_url = 'https://raw.githubusercontent.com/harvardnlp/sent-conv-torch/master/data/stsa.binary.test'
    path = chainer.dataset.cached_download(test_url)
    teX, teY = _sst(path)
    labels = teY
    test_accuracy = accuracy_score(labels, preds) * 100.
    logs = [json.loads(line) for line in open(log_path)][1:]
    best_validation_index = np.argmax([log['va_acc'] for log in logs])
    valid_accuracy = logs[best_validation_index]['va_acc']
    print('SST Valid Accuracy: %.2f' % (valid_accuracy))
    print('SST Test Accuracy:  %.2f' % (test_accuracy)) 
Example 31
Project: chainer-openai-transformer-lm   Author: soskek   File: train.py    MIT License 5 votes vote down vote up
def argmax(x): return np.argmax(x, 1) 
Example 32
Project: explirefit   Author: codogogo   File: confusion_matrix.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, labels = [], predictions = [], gold = [], one_hot_encoding = False, class_indices = False):
		# rows are true labels, columns predictions
		self.matrix = np.zeros(shape = (len(labels), len(labels)))
		self.labels = labels

		if len(predictions) != len(gold):
			raise ValueError("Predictions and gold labels do not have the same count.")
		for i in range(len(predictions)):
			index_pred = np.argmax(predictions[i]) if one_hot_encoding else (predictions[i] if class_indices else labels.index(predictions[i]))
			index_gold = np.argmax(gold[i]) if one_hot_encoding else (gold[i] if class_indices else labels.index(gold[i]))
			self.matrix[index_gold][index_pred] += 1

		if len(predictions) > 0: 
			self.compute_all_scores() 
Example 33
Project: explirefit   Author: codogogo   File: standard.py    Apache License 2.0 5 votes vote down vote up
def get_accuracy_seqlab(golds, preds):
	correct = 0.0
	total = 0.0
	for i in range(len(golds)):
		for j in range(len(golds[i])):
			if np.count_nonzero(golds[i][j]) > 0:
				total += 1.0
				lgold = np.argmax(golds[i][j])
				lpred = np.argmax(preds[i][j])
				if lgold == lpred:
					correct += 1.0
	acc = correct / total;
	return acc 
Example 34
Project: convseg   Author: chqiwang   File: tagger.py    MIT License 5 votes vote down vote up
def inference(self, scores, sequence_lengths=None):
        """
        Inference label sequence given scores.
        If transitions is given, then perform veterbi search, else perform greedy search.

        Args:
            scores: A numpy array with shape (batch, max_length, num_tags).
            sequence_lengths: A numpy array with shape (batch,).

        Returns:
            A numpy array with shape (batch, max_length).
        """

        if not self.parameters['use_crf']:
            return np.argmax(scores, 2)
        else:
            with tf.variable_scope(self.scope, reuse=True):
                transitions = tf.get_variable('transitions').eval(session=self.sess)
            paths = np.zeros(scores.shape[:2], dtype=INT_TYPE)
            for i in xrange(scores.shape[0]):
                tag_score, length = scores[i], sequence_lengths[i]
                if length == 0:
                    continue
                path, _ = crf.viterbi_decode(tag_score[:length], transitions)
                paths[i, :length] = path
            return paths 
Example 35
Project: prediction-constrained-topic-models   Author: dtak   File: select_best_runs_and_snapshots.py    MIT License 5 votes vote down vote up
def fetch_default_score(ranking_func_name):
    if ranking_func_name.count('argmax'):
        return -1.0 * np.inf
    elif ranking_func_name.count('argmin'):
        return +1.0 * np.inf
    else:
        raise ValueError("Unrecognized ranking_func_name %s" % ranking_func_name) 
Example 36
Project: prediction-constrained-topic-models   Author: dtak   File: select_best_runs_and_snapshots.py    MIT License 5 votes vote down vote up
def get_score_ranking_function_for_colname(score_name):
    if score_name.count("AUC"):
        return np.argmax
    elif score_name.count("LOSS"):
        return np.argmin
    else:
        return np.argmin 
Example 37
Project: DataHack2018   Author: InnovizTech   File: math_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def extract_quaternion(R):
    d = np.diagonal(R)
    t = np.sum(d)
    if t + 1 < 0.25:
        symmetric_mat = R + R.T
        asymmetric_mat = R - R.T
        symmetric_diag = np.diagonal(symmetric_mat)
        i_max = np.argmax(symmetric_diag)
        q = np.empty(4)
        if i_max == 0:
            q[1] = np.sqrt(symmetric_diag[0] - t + 1) / 2
            normalizer = 1 / q[1]
            q[2] = symmetric_mat[1, 0] / 4 * normalizer
            q[3] = symmetric_mat[2, 0] / 4 * normalizer
            q[0] = asymmetric_mat[2, 1] / 4 * normalizer
        elif i_max == 1:
            q[2] = np.sqrt(symmetric_diag[1] - t + 1) / 2
            normalizer = 1 / q[2]
            q[1] = symmetric_mat[1, 0] / 4 * normalizer
            q[3] = symmetric_mat[2, 1] / 4 * normalizer
            q[0] = asymmetric_mat[0, 2] / 4 * normalizer
        elif i_max == 2:
            q[3] = np.sqrt(symmetric_diag[2] - t + 1) / 2
            normalizer = 1 / q[3]
            q[1] = symmetric_mat[2, 0] / 4 * normalizer
            q[2] = symmetric_mat[1, 2] / 4 * normalizer
            q[0] = asymmetric_mat[1, 0] / 4 * normalizer
    else:
        r = np.sqrt(1+t)
        s = 0.5 / r
        q = np.array([0.5*r, (R[2, 1] - R[1, 2])*s, (R[0, 2] - R[2, 0])*s, (R[1, 0] - R[0, 1])*s])

    return q 
Example 38
Project: fbpconv_tf   Author: panakino   File: unet.py    GNU General Public License v3.0 5 votes vote down vote up
def error_rate(predictions, labels):
    """
    Return the error rate based on dense predictions and 1-hot labels.
    """

    return 100.0 - ( 100.0 *
        np.sum(np.argmax(predictions, 3) == np.argmax(labels, 3)) /
        (predictions.shape[0]*predictions.shape[1]*predictions.shape[2])) 
Example 39
Project: RandomFourierFeatures   Author: tiskw   File: PyRFF.py    MIT License 5 votes vote down vote up
def predict(self, X, **args):
        svc = RFFSVC(self.dim, self.std, self.W, **args)
        return np.argmax(np.dot(svc.conv(X), self.coef.T) + self.icpt.flatten(), axis = 1) 
Example 40
Project: Lane-And-Vehicle-Detection   Author: JustinHeaton   File: main.py    MIT License 5 votes vote down vote up
def blind_search(self, x, y, image):
        '''
        This function is applied in the first few frames and/or if the lane was not successfully detected
        in the previous frame. It uses a slinding window approach to detect peaks in a histogram of the
        binary thresholded image. Pixels in close proimity to the detected peaks are considered to belong
        to the lane lines.
        '''
        xvals = []
        yvals = []
        if self.found == False:
            i = 720
            j = 630
            histogram = np.sum(image[image.shape[0]//2:], axis=0)
            if self == Right:
                peak = np.argmax(histogram[image.shape[1]//2:]) + image.shape[1]//2
            else:
                peak = np.argmax(histogram[:image.shape[1]//2])
            while j >= 0:
                x_idx = np.where((((peak - 100) < x)&(x < (peak + 100))&((y > j) & (y < i))))
                x_window, y_window = x[x_idx], y[x_idx]
                if np.sum(x_window) != 0:
                    xvals.extend(x_window)
                    yvals.extend(y_window)
                if np.sum(x_window) > 100:
                    peak = np.int(np.mean(x_window))
                i -= 90
                j -= 90
        if np.sum(xvals) > 0:
            self.found = True
        else:
            yvals = self.Y
            xvals = self.X
        return xvals, yvals, self.found 
Example 41
Project: neural-fingerprinting   Author: StephanZheng   File: cw_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def attack(self, X, Y):
        """
        Perform the L_2 attack on the given images for the given targets.

        :param X: samples to generate advs
        :param Y: the original class labels
        If self.targeted is true, then the targets represents the target labels.
        If self.targeted is false, then targets are the original class labels.
        """
        nb_classes = Y.shape[1]

        # random select target class for targeted attack
        y_target = np.copy(Y)
        if self.TARGETED:
            for i in range(Y.shape[0]):
                current = int(np.argmax(Y[i]))
                target = np.random.choice(other_classes(nb_classes, current))
                y_target[i] = np.eye(nb_classes)[target]

        X_adv = np.zeros_like(X)
        for i in tqdm(range(0, X.shape[0], self.batch_size)):
            start = i
            end = i + self.batch_size
            end = np.minimum(end, X.shape[0])
            X_adv[start:end] = self.attack_batch(X[start:end], y_target[start:end])

        return X_adv 
Example 42
Project: neural-fingerprinting   Author: StephanZheng   File: cw_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def attack(self, X, Y):
        """
        Perform the L_2 attack on the given images for the given targets.

        :param X: samples to generate advs
        :param Y: the original class labels
        If self.targeted is true, then the targets represents the target labels.
        If self.targeted is false, then targets are the original class labels.
        """
        nb_classes = Y.shape[1]

        # random select target class for targeted attack
        y_target = np.copy(Y)
        if self.TARGETED:
            for i in range(Y.shape[0]):
                current = int(np.argmax(Y[i]))
                target = np.random.choice(other_classes(nb_classes, current))
                y_target[i] = np.eye(nb_classes)[target]

        X_adv = np.zeros_like(X)
        for i in tqdm(range(0, X.shape[0], self.batch_size)):
            start = i
            end = i + self.batch_size
            end = np.minimum(end, X.shape[0])
            X_adv[start:end] = self.attack_batch(X[start:end], y_target[start:end])

        return X_adv 
Example 43
Project: neural-fingerprinting   Author: StephanZheng   File: attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def saliency_map_method(sess, model, X, Y, theta, gamma, clip_min=None,
                        clip_max=None):
    """
    TODO
    :param sess:
    :param model: predictions or after-softmax
    :param X:
    :param Y:
    :param theta:
    :param gamma:
    :param clip_min:
    :param clip_max:
    :return:
    """
    nb_classes = Y.shape[1]
    # Define TF placeholder for the input
    x = tf.placeholder(tf.float32, shape=(None,) + X.shape[1:])
    # Define model gradients
    grads = jacobian_graph(model(x), x, nb_classes)
    X_adv = np.zeros_like(X)
    for i in tqdm(range(len(X))):
        current_class = int(np.argmax(Y[i]))
        target_class = np.random.choice(other_classes(nb_classes, current_class))
        X_adv[i], _, _ = jsma(
            sess, x, model(x), grads, X[i:(i+1)], target_class, theta=theta,
            gamma=gamma, increase=True, nb_classes=nb_classes,
            clip_min=clip_min, clip_max=clip_max
        )

    return X_adv 
Example 44
Project: neural-fingerprinting   Author: StephanZheng   File: adaptive_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def binary_refinement(sess,Best_X_adv,
                      X_adv, Y, ALPHA, ub, lb, model, dataset='cifar'):
    num_samples = np.shape(X_adv)[0]
    print(dataset)
    if(dataset=="mnist"):
        X_place = tf.placeholder(tf.float32, shape=[1, 1, 28, 28])
    else:
        X_place = tf.placeholder(tf.float32, shape=[1, 3, 32, 32])

    pred = model(X_place)
    for i in range(num_samples):
        logits_op = sess.run(pred,feed_dict={X_place:X_adv[i:i+1,:,:,:]})
        if(not np.argmax(logits_op) == np.argmax(Y[i,:])):
            # Success, increase alpha
            Best_X_adv[i,:,:,:] = X_adv[i,:,:,]
            lb[i] = ALPHA[i,0]
        else:
            ub[i] = ALPHA[i,0]
        ALPHA[i] = 0.5*(lb[i] + ub[i])
    return ALPHA, Best_X_adv 
Example 45
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def help_generate_np_gives_adversarial_example(self, ord, eps=.5, **kwargs):
        x_val, x_adv, delta = self.generate_adversarial_examples_np(ord, eps,
                                                                    **kwargs)
        self.assertClose(delta, eps)
        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
        self.assertTrue(np.mean(orig_labs == new_labs) < 0.5) 
Example 46
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_targeted_generate_np_gives_adversarial_example(self):
        random_labs = np.random.random_integers(0, 1, 100)
        random_labs_one_hot = np.zeros((100, 2))
        random_labs_one_hot[np.arange(100), random_labs] = 1

        _, x_adv, delta = self.generate_adversarial_examples_np(
            eps=.5, ord=np.inf, y_target=random_labs_one_hot)

        self.assertClose(delta, 0.5)

        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
        self.assertTrue(np.mean(random_labs == new_labs) > 0.7) 
Example 47
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_generate_np_does_not_cache_graph_computation_for_nb_iter(self):

        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        x_adv = self.attack.generate_np(x_val, eps=1.0, ord=np.inf,
                                        clip_min=-5.0, clip_max=5.0,
                                        nb_iter=10)

        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
        self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)

        ok = [False]
        old_grads = tf.gradients

        def fn(*x, **y):
            ok[0] = True
            return old_grads(*x, **y)
        tf.gradients = fn

        x_adv = self.attack.generate_np(x_val, eps=1.0, ord=np.inf,
                                        clip_min=-5.0, clip_max=5.0,
                                        nb_iter=11)

        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
        self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)

        tf.gradients = old_grads

        self.assertTrue(ok[0]) 
Example 48
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_generate_np_untargeted_gives_adversarial_example(self):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        x_adv = self.attack.generate_np(x_val, max_iterations=100,
                                        binary_search_steps=3,
                                        initial_const=1,
                                        clip_min=-5, clip_max=5,
                                        batch_size=10)

        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

        self.assertTrue(np.mean(orig_labs == new_labs) < 0.1) 
Example 49
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_generate_np_high_confidence_untargeted_examples(self):

        trivial_model = TrivialModel()

        for CONFIDENCE in [0, 2.3]:
            x_val = np.random.rand(10, 1) - .5
            x_val = np.array(x_val, dtype=np.float32)

            orig_labs = np.argmax(self.sess.run(trivial_model.get_logits(x_val)), axis=1)
            attack = CarliniWagnerL2(trivial_model, sess=self.sess)
            x_adv = attack.generate_np(x_val,
                                       max_iterations=100,
                                       binary_search_steps=2,
                                       learning_rate=1e-2,
                                       initial_const=1,
                                       clip_min=-10, clip_max=10,
                                       confidence=CONFIDENCE,
                                       batch_size=10)

            new_labs = self.sess.run(trivial_model.get_logits(x_adv))

            good_labs = new_labs[np.arange(10), 1 - orig_labs]
            bad_labs = new_labs[np.arange(10), orig_labs]

            self.assertTrue(np.mean(np.argmax(new_labs, axis=1) == orig_labs)
                            == 0)
            self.assertTrue(np.isclose(
                0, np.min(good_labs - (bad_labs + CONFIDENCE)), atol=1e-1)) 
Example 50
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_generate_np_untargeted_gives_adversarial_example(self):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        x_adv = self.attack.generate_np(x_val, max_iterations=100,
                                        binary_search_steps=3,
                                        initial_const=1,
                                        clip_min=-5, clip_max=5,
                                        batch_size=10)

        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

        self.assertTrue(np.mean(orig_labs == new_labs) < 0.1) 
Example 51
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_generate_np_high_confidence_targeted_examples(self):

        trivial_model = TrivialModel()

        for CONFIDENCE in [0, 2.3]:
            x_val = np.random.rand(10, 1) - .5
            x_val = np.array(x_val, dtype=np.float32)

            feed_labs = np.zeros((10, 2))
            feed_labs[np.arange(10), np.random.randint(0, 2, 10)] = 1
            attack = CarliniWagnerL2(trivial_model, sess=self.sess)
            x_adv = attack.generate_np(x_val,
                                       max_iterations=100,
                                       binary_search_steps=2,
                                       learning_rate=1e-2,
                                       initial_const=1,
                                       clip_min=-10, clip_max=10,
                                       confidence=CONFIDENCE,
                                       y_target=feed_labs,
                                       batch_size=10)

            new_labs = self.sess.run(trivial_model.get_logits(x_adv))

            good_labs = new_labs[np.arange(10), np.argmax(feed_labs, axis=1)]
            bad_labs = new_labs[np.arange(
                10), 1 - np.argmax(feed_labs, axis=1)]

            self.assertTrue(np.isclose(
                0, np.min(good_labs - (bad_labs + CONFIDENCE)), atol=1e-1))
            self.assertTrue(np.mean(np.argmax(new_labs, axis=1) ==
                                    np.argmax(feed_labs, axis=1)) > .9) 
Example 52
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_generate_np_targeted_gives_adversarial_example(self):
        x_val = np.random.rand(10, 1000)
        x_val = np.array(x_val, dtype=np.float32)

        feed_labs = np.zeros((10, 10))
        feed_labs[np.arange(10), np.random.randint(0, 9, 10)] = 1
        x_adv = self.attack.generate_np(x_val,
                                        clip_min=-5., clip_max=5.,
                                        y_target=feed_labs)
        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

        worked = np.mean(np.argmax(feed_labs, axis=1) == new_labs)
        self.assertTrue(worked > .9) 
Example 53
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_generate_np_gives_adversarial_example(self):
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        x_adv = self.attack.generate_np(x_val, over_shoot=0.02, max_iter=50,
                                        nb_candidate=2, clip_min=-5,
                                        clip_max=5)

        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

        self.assertTrue(np.mean(orig_labs == new_labs) < 0.1) 
Example 54
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_multiple_initial_random_step(self):
        """
        This test generates multiple adversarial examples until an adversarial
        example is generated with a different label compared to the original
        label. This is the procedure suggested in Madry et al. (2017).

        This test will fail if an initial random step is not taken (error>0.5).
        """
        x_val = np.random.rand(100, 2)
        x_val = np.array(x_val, dtype=np.float32)

        orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1)
        new_labs_multi = orig_labs.copy()

        # Generate multiple adversarial examples
        for i in range(10):
            x_adv = self.attack.generate_np(x_val, eps=.5, eps_iter=0.05,
                                            clip_min=0.5, clip_max=0.7,
                                            nb_iter=2)
            new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)

            # Examples for which we have not found adversarial examples
            I = (orig_labs == new_labs_multi)
            new_labs_multi[I] = new_labs[I]

        self.assertTrue(np.mean(orig_labs == new_labs_multi) < 0.1) 
Example 55
Project: neural-fingerprinting   Author: StephanZheng   File: test_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_random_targets_vector(self):
        # Test utils.random_targets with a vector of labels as the input
        gt_labels = np.asarray([0, 1, 2, 3])
        rt = utils.random_targets(gt_labels, 5)

        # Make sure random_targets returns a one-hot encoded labels
        self.assertTrue(len(rt.shape) == 2)
        rt_labels = np.argmax(rt, axis=1)

        # Make sure all labels are different from the correct labels
        self.assertTrue(np.all(rt_labels != gt_labels)) 
Example 56
Project: neural-fingerprinting   Author: StephanZheng   File: test_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_random_targets_one_hot(self):
        # Test utils.random_targets with one-hot encoded labels as the input
        gt = np.asarray([[0, 0, 1, 0, 0],
                         [1, 0, 0, 0, 0],
                         [0, 0, 0, 1, 0],
                         [1, 0, 0, 0, 0]])
        gt_labels = np.argmax(gt, axis=1)
        rt = utils.random_targets(gt, 5)

        # Make sure random_targets returns a one-hot encoded labels
        self.assertTrue(len(rt.shape) == 2)
        rt_labels = np.argmax(rt, axis=1)

        # Make sure all labels are different from the correct labels
        self.assertTrue(np.all(rt_labels != gt_labels)) 
Example 57
Project: neural-fingerprinting   Author: StephanZheng   File: test_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_random_targets_one_hot_single_label(self):
        # Test utils.random_targets with a single one-hot encoded label
        gt = np.asarray([0, 0, 1, 0, 0])
        gt = gt.reshape((1, 5))
        gt_labels = np.argmax(gt, axis=1)
        rt = utils.random_targets(gt, 5)

        # Make sure random_targets returns a one-hot encoded labels
        self.assertTrue(len(rt.shape) == 2)
        rt_labels = np.argmax(rt, axis=1)

        # Make sure all labels are different from the correct labels
        self.assertTrue(np.all(rt_labels != gt_labels)) 
Example 58
Project: neural-fingerprinting   Author: StephanZheng   File: utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def random_targets(gt, nb_classes):
    """
    Take in an array of correct labels and randomly select a different label
    for each label in the array. This is typically used to randomly select a
    target class in targeted adversarial examples attacks (i.e., when the
    search algorithm takes in both a source class and target class to compute
    the adversarial example).
    :param gt: the ground truth (correct) labels. They can be provided as a
               1D vector or 2D array of one-hot encoded labels.
    :param nb_classes: The number of classes for this task. The random class
                       will be chosen between 0 and nb_classes such that it
                       is different from the correct class.
    :return: A numpy array holding the randomly-selected target classes
             encoded as one-hot labels.
    """
    # If the ground truth labels are encoded as one-hot, convert to labels.
    if len(gt.shape) == 2:
        gt = np.argmax(gt, axis=1)

    # This vector will hold the randomly selected labels.
    result = np.zeros(gt.shape, dtype=np.int32)

    for class_ind in xrange(nb_classes):
        # Compute all indices in that class.
        in_cl = gt == class_ind
        size = np.sum(in_cl)

        # Compute the set of potential targets for this class.
        potential_targets = other_classes(nb_classes, class_ind)

        # Draw with replacement random targets among the potential targets.
        result[in_cl] = np.random.choice(potential_targets, size=size)

    # Encode vector of random labels as one-hot labels.
    result = to_categorical(result, nb_classes)
    result = result.astype(np.int32)

    return result 
Example 59
Project: models   Author: kipoi   File: gather.py    MIT License 5 votes vote down vote up
def average_labranchor(df, model_name, col_types):
    import numpy as np
    # choose the maximum diff
    diff_cols = df.columns.values[df.columns.astype(str).str.contains("DIFF")]
    model_outputs = [int(el.split("_")[-1]) for el in diff_cols]
    model_outputs_order = np.argsort(model_outputs)
    # select the model output tha gives the maximum absolute difference
    max_col_id = df[diff_cols[model_outputs_order]].abs().values.argmax(axis=1)
    #
    # just to be sure it will work:
    assert np.all(df[diff_cols[model_outputs_order]].abs().values[np.arange(len(max_col_id)), max_col_id] == df[diff_cols].abs().max(axis=1).values)
    #
    averaged = {}
    usable_columns = df.columns.tolist()
    for ct in col_types:
        col_sel = [col for col in usable_columns if ct in col]
        usable_columns = [col for col in usable_columns if col not in col_sel]
        if len(col_sel) == 0:
            continue
        # average
        model_outputs = [int(el.split("_")[-1]) for el in col_sel]
        model_outputs_order = np.argsort(model_outputs)
        # use the column selection from before
        keep_vals = df[np.array(col_sel)[model_outputs_order]].values[np.arange(len(max_col_id)), max_col_id]
        averaged[model_name + ct.lower()] = keep_vals
    #
    return pd.DataFrame(averaged, index=df.index) 
Example 60
Project: models   Author: kipoi   File: gather.py    MIT License 5 votes vote down vote up
def deduplicate_vars(df):
    diff_cols = df.columns.values[df.columns.astype(str).str.contains("diff")]
    assert len(diff_cols) == 1
    return df.groupby(df.index).apply(lambda x: x.iloc[np.argmax(x[diff_cols[0]].values), :])


# Modify here: add the _isna column and average labranchor if needed also clump the variants together. 
Example 61
Project: models   Author: kipoi   File: gather.py    MIT License 5 votes vote down vote up
def average_labranchor(df, model_name, col_types):
    import numpy as np
    # choose the maximum diff
    diff_cols = df.columns.values[df.columns.astype(str).str.contains("DIFF")]
    model_outputs = [int(el.split("_")[-1]) for el in diff_cols]
    model_outputs_order = np.argsort(model_outputs)
    # select the model output tha gives the maximum absolute difference
    max_col_id = df[diff_cols[model_outputs_order]].abs().values.argmax(axis=1)
    #
    # just to be sure it will work:
    assert np.all(df[diff_cols[model_outputs_order]].abs().values[np.arange(len(max_col_id)), max_col_id] == df[diff_cols].abs().max(axis=1).values)
    #
    averaged = {}
    usable_columns = df.columns.tolist()
    for ct in col_types:
        col_sel = [col for col in usable_columns if ct in col]
        usable_columns = [col for col in usable_columns if col not in col_sel]
        if len(col_sel) == 0:
            continue
        # average
        model_outputs = [int(el.split("_")[-1]) for el in col_sel]
        model_outputs_order = np.argsort(model_outputs)
        # use the column selection from before
        keep_vals = df[np.array(col_sel)[model_outputs_order]].values[np.arange(len(max_col_id)), max_col_id]
        averaged[model_name + ct.lower()] = keep_vals
    #
    return pd.DataFrame(averaged, index=df.index) 
Example 62
Project: models   Author: kipoi   File: gather.py    MIT License 5 votes vote down vote up
def deduplicate_vars(df):
    diff_cols = df.columns.values[df.columns.astype(str).str.contains("diff")]
    assert len(diff_cols) == 1
    return df.groupby(df.index).apply(lambda x: x.iloc[np.argmax(x[diff_cols[0]].values), :])


# Modify here: add the _isna column and average labranchor if needed also clump the variants together. 
Example 63
Project: oslodatascience-rl   Author: Froskekongen   File: space_invaders4.py    MIT License 5 votes vote down vote up
def choose_action(probability):
    return np.argmax(probability) 
Example 64
Project: Image-Caption-Generator   Author: dabasajay   File: model.py    MIT License 5 votes vote down vote up
def generate_caption(model, tokenizer, image, max_length):
	# Seed the generation process
	in_text = 'startseq'
	# Iterate over the whole length of the sequence
	for _ in range(max_length):
		# Integer encode input sequence
		sequence = tokenizer.texts_to_sequences([in_text])[0]
		# Pad input
		sequence = pad_sequences([sequence], maxlen=max_length)
		# Predict next word
		# The model will output a prediction, which will be a probability distribution over all words in the vocabulary.
		yhat = model.predict([image,sequence], verbose=0)
		# The output vector representins a probability distribution where maximum probability is the predicted word position
		# Take output class with maximum probability and convert to integer
		yhat = np.argmax(yhat)
		# Map integer back to word
		word = int_to_word(yhat, tokenizer)
		# Stop if we cannot map the word
		if word is None:
			break
		# Append as input for generating the next word
		in_text += ' ' + word
		# Stop if we predict the end of the sequence
		if word == 'endseq':
			break
	return in_text 
Example 65
Project: deep-learning-note   Author: wdxtub   File: 19_cnn.py    MIT License 5 votes vote down vote up
def accuracy(self, x, t, batch_size=100):
        if t.ndim != 1: t = np.argmax(t, axis=1)

        acc = 0.0

        for i in range(int(x.shape[0] / batch_size)):
            tx = x[i * batch_size:(i + 1) * batch_size]
            tt = t[i * batch_size:(i + 1) * batch_size]
            y = self.predict(tx)
            y = np.argmax(y, axis=1)
            acc += np.sum(y == tt)

        return acc / x.shape[0] 
Example 66
Project: deep-learning-note   Author: wdxtub   File: 10_two_layer_net.py    MIT License 5 votes vote down vote up
def accuracy(self, x, t):
        y = self.predict(x)
        y = np.argmax(y, axis=1)
        if t.ndim != 1: t = np.argmax(t, axis=1)

        accuracy = np.sum(y == t) / float(x.shape[0])
        return accuracy 
Example 67
Project: deep-learning-note   Author: wdxtub   File: 9_two_layer_net_naive.py    MIT License 5 votes vote down vote up
def accuracy(self, x, t):
        y = self.predict(x)
        y = np.argmax(y, axis=1)
        t = np.argmax(t, axis=1)

        accuracy = np.sum(y==t) / float(x.shape[0])
        return accuracy 
Example 68
Project: deep-learning-note   Author: wdxtub   File: multi_layer_net_extend.py    MIT License 5 votes vote down vote up
def accuracy(self, X, T):
        Y = self.predict(X, train_flg=False)
        Y = np.argmax(Y, axis=1)
        if T.ndim != 1: T = np.argmax(T, axis=1)

        accuracy = np.sum(Y == T) / float(X.shape[0])
        return accuracy 
Example 69
Project: deep-learning-note   Author: wdxtub   File: multi_layer_net.py    MIT License 5 votes vote down vote up
def accuracy(self, x, t):
        y = self.predict(x)
        y = np.argmax(y, axis=1)
        if t.ndim != 1 : t = np.argmax(t, axis=1)

        accuracy = np.sum(y == t) / float(x.shape[0])
        return accuracy 
Example 70
Project: deep-learning-note   Author: wdxtub   File: 1_generate_text.py    MIT License 5 votes vote down vote up
def sample(preds, temperature=1.0):
    # 给定模型预测,采样下一个字符的函数
    preds = np.asarray(preds).astype('float64')
    preds = np.log(preds) / temperature
    exp_preds = np.exp(preds)
    preds = exp_preds / np.sum(exp_preds)
    probas = np.random.multinomial(1, preds, 1)
    return np.argmax(probas) 
Example 71
Project: rl_3d   Author: avdmitry   File: agent_a3c.py    MIT License 5 votes vote down vote up
def GetAction(self, sess, state):
        state = state / 255.0
        a_dist, v, self.rnn_state = sess.run([self.policy, self.value, self.state_out],
                                             feed_dict={self.inputs: [state],
                                                        self.state_in[0]: self.rnn_state[0],
                                                        self.state_in[1]: self.rnn_state[1]})
        a = np.random.choice(a_dist[0], p=a_dist[0])
        a = np.argmax(a_dist == a)
        return a, v[0, 0] 
Example 72
Project: b2ac   Author: hbldh   File: overlap_functions.py    MIT License 4 votes vote down vote up
def quickhull(sample):
    """Calculates the convex hull of an arbitrary 2D point cloud.

    This is a pure Python version of the Quick Hull algorithm.
    It's based on the version of ``literateprograms``, but fixes some
    old-style Numeric function calls.

    This version works with numpy version > 1.2.1

    References:

    * `Literateprograms <http://en.literateprograms.org/Quickhull_(Python,_arrays)>`_
    * `Wikipedia <http://en.wikipedia.org/wiki/QuickHull>`_

    Code adapted from:

    `<http://members.home.nl/wim.h.bakker/python/quickhull2d.py>`_

    :param sample: Points to which the convex hull is desired to be found.
    :type sample: :py:class:`numpy.ndarray`
    :return: The convex hull of the points.
    :rtype: :py:class:`numpy.ndarray`

    """

    def calculate_convex_hull(sample):
        link = lambda a, b: np.concatenate((a, b[1:]))
        edge = lambda a, b: np.concatenate(([a], [b]))

        def dome(sample, base):
            h, t = base
            dists = np.dot(sample-h, np.dot(((0, -1), (1, 0)), (t - h)))
            outer = np.repeat(sample, dists > 0, axis=0)

            if len(outer):
                pivot = sample[np.argmax(dists)]
                return link(dome(outer, edge(h, pivot)),
                            dome(outer, edge(pivot, t)))
            else:
                return base

        if len(sample) > 2:
            axis = sample[:, 0]
            base = np.take(sample, [np.argmin(axis), np.argmax(axis)], axis=0)
            return link(dome(sample, base),
                        dome(sample, base[::-1]))
        else:
            return sample

    # Perform a reversal of points here to get points ordered clockwise instead of
    # counter clockwise that the QuickHull above returns.
    return calculate_convex_hull(sample)[::-1, :] 
Example 73
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: capsulenet-multi-gpu.py    MIT License 4 votes vote down vote up
def test(model, data):
    x_test, y_test = data
    y_pred, x_recon = model.predict(x_test, batch_size=100)
    print('-'*50)
    print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1))/y_test.shape[0])

    import matplotlib.pyplot as plt
    from utils import combine_images
    from PIL import Image

    img = combine_images(np.concatenate([x_test[:50],x_recon[:50]]))
    image = img * 255
    Image.fromarray(image.astype(np.uint8)).save("real_and_recon.png")
    print()
    print('Reconstructed images are saved to ./real_and_recon.png')
    print('-'*50)
    plt.imshow(plt.imread("real_and_recon.png", ))
    plt.show() 
Example 74
Project: neural-fingerprinting   Author: StephanZheng   File: attacks.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def basic_iterative_method(sess, model, X, Y, eps, eps_iter, nb_iter=50,
                           clip_min=None, clip_max=None, batch_size=256):
    """
    TODO
    :param sess:
    :param model: predictions or after-softmax
    :param X:
    :param Y:
    :param eps:
    :param eps_iter:
    :param nb_iter:
    :param clip_min:
    :param clip_max:
    :param batch_size:
    :return:
    """
    print("nb_iter",nb_iter)
    # Define TF placeholders for the input and output
    x = tf.placeholder(tf.float32, shape=(None,)+X.shape[1:])
    y = tf.placeholder(tf.float32, shape=(None,)+Y.shape[1:])
    # results will hold the adversarial inputs at each iteration of BIM;
    # thus it will have shape (nb_iter, n_samples, n_rows, n_cols, n_channels)
    results = np.zeros((nb_iter, X.shape[0],) + X.shape[1:])
    # Initialize adversarial samples as the original samples, set upper and
    # lower bounds
    X_adv = X
    X_min = X_adv - eps
    X_max = X_adv + eps
    print('Running BIM iterations...')
    # "its" is a dictionary that keeps track of the iteration at which each
    # sample becomes misclassified. The default value will be (nb_iter-1), the
    # very last iteration.
    def f(val):
        return lambda: val
    its = defaultdict(f(nb_iter-1))
    # Out keeps track of which samples have already been misclassified
    out = set()
    for i in tqdm(range(nb_iter)):
        adv_x = fgsm(
            x, model(x), eps=eps_iter,
            clip_min=clip_min, clip_max=clip_max, y=y
        )
        X_adv, = batch_eval(
            sess, [x, y], [adv_x],
            [X_adv, Y], feed={K.learning_phase(): 0},
            args={'batch_size': batch_size}
        )
        X_adv = np.maximum(np.minimum(X_adv, X_max), X_min)
        results[i] = X_adv
        # check misclassifieds
        predictions = model.predict_classes(X_adv, batch_size=512, verbose=0)
        misclassifieds = np.where(predictions != Y.argmax(axis=1))[0]
        for elt in misclassifieds:
            if elt not in out:
                its[elt] = i
                out.add(elt)

    return its, results 
Example 75
Project: neural-fingerprinting   Author: StephanZheng   File: test_attacks_tf.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def helper_pgd_attack(self,
                          unrolled_optimizer,
                          targeted,
                          nb_iters=20,
                          epsilon=.5,
                          clip_min=-5.,
                          clip_max=5.,
                          assert_threshold=0.5):
        def loss_fn(input_image, label, targeted):
            res = self.model.fprop(input_image)
            logits = res[self.model.O_LOGITS]
            multiplier = 1. if targeted else -1.
            return multiplier * tf.nn.sparse_softmax_cross_entropy_with_logits(
                labels=label, logits=logits)

        x_val_ph = tf.placeholder(tf.float32, shape=[100, 2])
        x_val = np.random.randn(100, 2).astype(np.float32)
        init_model_output = self.model.fprop(x_val_ph)
        init_model_logits = init_model_output[self.model.O_LOGITS]
        if targeted:
            labels = np.random.random_integers(0, 1, size=(100, ))
        else:

            labels = tf.stop_gradient(tf.argmax(init_model_logits, axis=1))

        def _project_perturbation(perturbation, epsilon, input_image):
            clipped_perturbation = tf.clip_by_value(perturbation, -epsilon,
                                                    epsilon)
            new_image = tf.clip_by_value(input_image + clipped_perturbation,
                                         clip_min, clip_max)
            return new_image - input_image

        x_adv = pgd_attack(
            loss_fn=partial(loss_fn, targeted=targeted),
            input_image=x_val_ph,
            label=labels,
            epsilon=epsilon,
            num_steps=nb_iters,
            optimizer=unrolled_optimizer,
            project_perturbation=_project_perturbation)

        final_model_output = self.model.fprop(x_adv)
        final_model_logits = final_model_output[self.model.O_LOGITS]

        if not targeted:
            logits1, logits2 = self.sess.run(
                [init_model_logits, final_model_logits],
                feed_dict={x_val_ph: x_val})
            preds1 = np.argmax(logits1, axis=1)
            preds2 = np.argmax(logits2, axis=1)

            self.assertTrue(
                np.mean(preds1 == preds2) < assert_threshold,
                np.mean(preds1 == preds2))

        else:
            logits_adv = self.sess.run(
                final_model_logits, feed_dict={x_val_ph: x_val})
            preds_adv = np.argmax(logits_adv, axis=1)

            self.assertTrue(np.mean(labels == preds_adv) > assert_threshold) 
Example 76
Project: neural-fingerprinting   Author: StephanZheng   File: attacks_tf.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def saliency_map(grads_target, grads_other, search_domain, increase):
    """
    TensorFlow implementation for computing saliency maps
    :param grads_target: a matrix containing forward derivatives for the
                         target class
    :param grads_other: a matrix where every element is the sum of forward
                        derivatives over all non-target classes at that index
    :param search_domain: the set of input indices that we are considering
    :param increase: boolean; true if we are increasing pixels, false otherwise
    :return: (i, j, search_domain) the two input indices selected and the
             updated search domain
    """
    # Compute the size of the input (the number of features)
    nf = len(grads_target)

    # Remove the already-used input features from the search space
    invalid = list(set(range(nf)) - search_domain)
    increase_coef = (2 * int(increase) - 1)
    grads_target[invalid] = -increase_coef * np.max(np.abs(grads_target))
    grads_other[invalid] = increase_coef * np.max(np.abs(grads_other))

    # Create a 2D numpy array of the sum of grads_target and grads_other
    target_sum = grads_target.reshape((1, nf)) + grads_target.reshape((nf, 1))
    other_sum = grads_other.reshape((1, nf)) + grads_other.reshape((nf, 1))

    # Create a mask to only keep features that match saliency map conditions
    if increase:
        scores_mask = ((target_sum > 0) & (other_sum < 0))
    else:
        scores_mask = ((target_sum < 0) & (other_sum > 0))

    # Create a 2D numpy array of the scores for each pair of candidate features
    scores = scores_mask * (-target_sum * other_sum)

    # A pixel can only be selected (and changed) once
    np.fill_diagonal(scores, 0)

    # Extract the best two pixels
    best = np.argmax(scores)
    p1, p2 = best % nf, best // nf

    # Remove used pixels from our search domain
    search_domain.discard(p1)
    search_domain.discard(p2)

    return p1, p2, search_domain 
Example 77
Project: neural-fingerprinting   Author: StephanZheng   File: utils.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def linear_extrapolation_plot(log_prob_adv_array, y, file_name,
                              min_epsilon=-10, max_epsilon=10,
                              num_points=21):
    """Generate linear extrapolation plot.

    Args:
        log_prob_adv_array: Numpy array containing log probabilities
        y: Tf placeholder for the labels
        file_name: Plot filename
        min_epsilon: Minimum value of epsilon over the interval
        max_epsilon: Maximum value of epsilon over the interval
        num_points: Number of points used to interpolate
    """
    import matplotlib
    matplotlib.use('Agg')
    import matplotlib.pyplot as plt

    figure = plt.figure()
    figure.canvas.set_window_title('Cleverhans: Linear Extrapolation Plot')

    correct_idx = np.argmax(y, axis=0)
    fig = plt.figure()
    plt.xlabel('Epsilon')
    plt.ylabel('Logits')
    x_axis = np.linspace(min_epsilon, max_epsilon, num_points)
    plt.xlim(min_epsilon - 1, max_epsilon + 1)
    for i in xrange(y.shape[0]):
        if i == correct_idx:
            ls = '-'
            linewidth = 5
        else:
            ls = '--'
            linewidth = 2
        plt.plot(
            x_axis,
            log_prob_adv_array[:, i],
            ls=ls,
            linewidth=linewidth,
            label='{}'.format(i))
    plt.legend(loc='best', fontsize=14)
    plt.show()
    fig.savefig(file_name)
    plt.clf()
    return figure 
Example 78
Project: YOLOv1_tensorflow_windows   Author: FatherRen   File: main.py    GNU General Public License v3.0 4 votes vote down vote up
def interpret_output(self, output):
        probs = np.zeros((self.cell_size, self.cell_size, self.boxes_per_cell, self.num_class))
        class_probs = np.reshape(output[0:self.boundary1], (self.cell_size, self.cell_size, self.num_class))
        scales = np.reshape(output[self.boundary1:self.boundary2],
                            (self.cell_size, self.cell_size, self.boxes_per_cell))
        boxes = np.reshape(output[self.boundary2:], (self.cell_size, self.cell_size, self.boxes_per_cell, 4))
        offset = np.array([np.arange(self.cell_size)] * self.cell_size * self.boxes_per_cell)
        offset = np.transpose(np.reshape(offset, [self.boxes_per_cell, self.cell_size, self.cell_size]), (1, 2, 0))

        boxes[:, :, :, 0] += offset
        boxes[:, :, :, 1] += np.transpose(offset, (1, 0, 2))
        boxes[:, :, :, :2] = 1.0 * boxes[:, :, :, 0:2] / self.cell_size
        boxes[:, :, :, 2:] = np.square(boxes[:, :, :, 2:])

        boxes *= self.image_size

        for i in range(self.boxes_per_cell):
            for j in range(self.num_class):
                probs[:, :, i, j] = np.multiply(class_probs[:, :, j], scales[:, :, i])

        filter_mat_probs = np.array(probs >= self.threshold, dtype='bool')
        filter_mat_boxes = np.nonzero(filter_mat_probs)
        boxes_filtered = boxes[filter_mat_boxes[0],
                               filter_mat_boxes[1], filter_mat_boxes[2]]
        probs_filtered = probs[filter_mat_probs]
        classes_num_filtered = np.argmax(filter_mat_probs,
                                         axis=3)[filter_mat_boxes[0], filter_mat_boxes[1], filter_mat_boxes[2]]

        argsort = np.array(np.argsort(probs_filtered))[::-1]
        boxes_filtered = boxes_filtered[argsort]
        probs_filtered = probs_filtered[argsort]
        classes_num_filtered = classes_num_filtered[argsort]

        for i in range(len(boxes_filtered)):
            if probs_filtered[i] == 0:
                continue
            for j in range(i + 1, len(boxes_filtered)):
                if self.iou(boxes_filtered[i], boxes_filtered[j]) > self.iou_threshold:
                    probs_filtered[j] = 0.0

        filter_iou = np.array(probs_filtered > 0.0, dtype='bool')
        boxes_filtered = boxes_filtered[filter_iou]
        probs_filtered = probs_filtered[filter_iou]
        classes_num_filtered = classes_num_filtered[filter_iou]

        result = []
        for i in range(len(boxes_filtered)):
            result.append([self.classes[classes_num_filtered[i]],
                           boxes_filtered[i][0],
                           boxes_filtered[i][1],
                           boxes_filtered[i][2],
                           boxes_filtered[i][3],
                           probs_filtered[i]])
        return result 
Example 79
Project: AnisotropicMultiStreamCNN   Author: AnnekeMeyer   File: dataAugmentation.py    MIT License 4 votes vote down vote up
def eul2quat(ax, ay, az, atol=1e-8):
    '''
    Translate between Euler angle (ZYX) order and quaternion representation of a rotation.
    Args:
        ax: X rotation angle in radians.
        ay: Y rotation angle in radians.
        az: Z rotation angle in radians.
        atol: tolerance used for stable quaternion computation (qs==0 within this tolerance).
    Return:
        Numpy array with three entries representing the vectorial component of the quaternion.

    '''
    # Create rotation matrix using ZYX Euler angles and then compute quaternion using entries.
    cx = np.cos(ax)
    cy = np.cos(ay)
    cz = np.cos(az)
    sx = np.sin(ax)
    sy = np.sin(ay)
    sz = np.sin(az)
    r = np.zeros((3, 3))
    r[0, 0] = cz * cy
    r[0, 1] = cz * sy * sx - sz * cx
    r[0, 2] = cz * sy * cx + sz * sx

    r[1, 0] = sz * cy
    r[1, 1] = sz * sy * sx + cz * cx
    r[1, 2] = sz * sy * cx - cz * sx

    r[2, 0] = -sy
    r[2, 1] = cy * sx
    r[2, 2] = cy * cx

    # Compute quaternion:
    qs = 0.5 * np.sqrt(r[0, 0] + r[1, 1] + r[2, 2] + 1)
    qv = np.zeros(3)
    # If the scalar component of the quaternion is close to zero, we
    # compute the vector part using a numerically stable approach
    if np.isclose(qs, 0.0, atol):
        i = np.argmax([r[0, 0], r[1, 1], r[2, 2]])
        j = (i + 1) % 3
        k = (j + 1) % 3
        w = np.sqrt(r[i, i] - r[j, j] - r[k, k] + 1)
        qv[i] = 0.5 * w
        qv[j] = (r[i, j] + r[j, i]) / (2 * w)
        qv[k] = (r[i, k] + r[k, i]) / (2 * w)
    else:
        denom = 4 * qs
        qv[0] = (r[2, 1] - r[1, 2]) / denom;
        qv[1] = (r[0, 2] - r[2, 0]) / denom;
        qv[2] = (r[1, 0] - r[0, 1]) / denom;
    return qv 
Example 80
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: gradcam.py    Apache License 2.0 4 votes vote down vote up
def _get_grad(net, image, class_id=None, conv_layer_name=None, image_grad=False):
    """This is an internal helper function that can be used for either of these
    but not both at the same time:
    1. Record the output and gradient of output of an intermediate convolutional layer.
    2. Record the gradients of the image.

    Parameters
    ----------
    image : NDArray
        Image to visuaize. This is an NDArray with the preprocessed image.
    class_id : int
        Category ID this image belongs to. If not provided,
        network's prediction will be used.
    conv_layer_name: str
        Name of the convolutional layer whose output and output's gradients need to be acptured.
    image_grad: bool
        Whether to capture gradients of the image."""

    if image_grad:
        image.attach_grad()
        Conv2D.capture_layer_name = None
        Activation.set_guided_backprop(True)
    else:
        # Tell convviz.Conv2D which layer's output and gradient needs to be recorded
        Conv2D.capture_layer_name = conv_layer_name
        Activation.set_guided_backprop(False)
    
    # Run the network
    with autograd.record(train_mode=False):
        out = net(image)
    
    # If user didn't provide a class id, we'll use the class that the network predicted
    if class_id == None:
        model_output = out.asnumpy()
        class_id = np.argmax(model_output)

    # Create a one-hot target with class_id and backprop with the created target
    one_hot_target = mx.nd.one_hot(mx.nd.array([class_id]), 1000)
    out.backward(one_hot_target, train_mode=False)

    if image_grad:
        return image.grad[0].asnumpy()
    else:
        # Return the recorded convolution output and gradient
        conv_out = Conv2D.conv_output
        return conv_out[0].asnumpy(), conv_out.grad[0].asnumpy()