Python matplotlib.pyplot.axis() Examples

The following are code examples for showing how to use matplotlib.pyplot.axis(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: good-semi-bad-gan   Author: christiancosgrove   File: good-semi.py    MIT License 6 votes vote down vote up
def plot(samples):
    width = min(12,int(np.sqrt(len(samples))))
    fig = plt.figure(figsize=(width, width))
    gs = gridspec.GridSpec(width, width)
    gs.update(wspace=0.05, hspace=0.05)

    for ind, sample in enumerate(samples):
        if ind >= width*width:
            break
        ax = plt.subplot(gs[ind])
        plt.axis('off')
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.set_aspect('equal')
        sample = sample * 0.5 + 0.5
        sample = np.transpose(sample, (1, 2, 0))
        plt.imshow(sample)

    return fig 
Example 2
Project: mmdetection   Author: open-mmlab   File: recall.py    Apache License 2.0 6 votes vote down vote up
def plot_num_recall(recalls, proposal_nums):
    """Plot Proposal_num-Recalls curve.

    Args:
        recalls(ndarray or list): shape (k,)
        proposal_nums(ndarray or list): same shape as `recalls`
    """
    if isinstance(proposal_nums, np.ndarray):
        _proposal_nums = proposal_nums.tolist()
    else:
        _proposal_nums = proposal_nums
    if isinstance(recalls, np.ndarray):
        _recalls = recalls.tolist()
    else:
        _recalls = recalls

    import matplotlib.pyplot as plt
    f = plt.figure()
    plt.plot([0] + _proposal_nums, [0] + _recalls)
    plt.xlabel('Proposal num')
    plt.ylabel('Recall')
    plt.axis([0, proposal_nums.max(), 0, 1])
    f.show() 
Example 3
Project: mmdetection   Author: open-mmlab   File: recall.py    Apache License 2.0 6 votes vote down vote up
def plot_iou_recall(recalls, iou_thrs):
    """Plot IoU-Recalls curve.

    Args:
        recalls(ndarray or list): shape (k,)
        iou_thrs(ndarray or list): same shape as `recalls`
    """
    if isinstance(iou_thrs, np.ndarray):
        _iou_thrs = iou_thrs.tolist()
    else:
        _iou_thrs = iou_thrs
    if isinstance(recalls, np.ndarray):
        _recalls = recalls.tolist()
    else:
        _recalls = recalls

    import matplotlib.pyplot as plt
    f = plt.figure()
    plt.plot(_iou_thrs + [1.0], _recalls + [0.])
    plt.xlabel('IoU')
    plt.ylabel('Recall')
    plt.axis([iou_thrs.min(), 1, 0, 1])
    f.show() 
Example 4
Project: DOTA_models   Author: ringringyi   File: plot_lfads.py    Apache License 2.0 6 votes vote down vote up
def plot_time_series(vals_bxtxn, bidx=None, n_to_plot=np.inf, scale=1.0,
                     color='r', title=None):

  if bidx is None:
    vals_txn = np.mean(vals_bxtxn, axis=0)
  else:
    vals_txn = vals_bxtxn[bidx,:,:]

  T, N = vals_txn.shape
  if n_to_plot > N:
    n_to_plot = N

  plt.plot(vals_txn[:,0:n_to_plot] + scale*np.array(range(n_to_plot)),
           color=color, lw=1.0)
  plt.axis('tight')
  if title:
    plt.title(title) 
Example 5
Project: MLP-Numpy-Implementation-Gradient-Descent-Backpropagation   Author: EsterHlav   File: support.py    MIT License 6 votes vote down vote up
def normalizeDataSet(data):
    X=data[0]
    m = np.mean(X, axis=1)
    std = np.std(X, axis=1)
    mX = np.repeat(m, X.shape[1]).reshape(X.shape)
    stdX = np.repeat(std, X.shape[1]).reshape(X.shape)
    X = (X-mX)/stdX
    newdata = (X,data[1])
    return newdata

# # example to load and test normalization of data:
# train, valid, test = load_MNIST_vector()
# train = normalizeDataSet(train)
# X = train[0]
# print(np.mean(X[2]))
# print(np.std(X[2])) 
Example 6
Project: MLP-Numpy-Implementation-Gradient-Descent-Backpropagation   Author: EsterHlav   File: support.py    MIT License 6 votes vote down vote up
def showImages(imgarray, indexes):
    # takes as input a (N*784) set of data and integers (indexes of image to show)
    # and print the corresponding image
    # figure out the size of figure
    n = len(indexes)
    w,l = shapeGrid(n)

    imgarrayX, imgarrayY = imgarray

    import matplotlib.pyplot as plt
    plt.figure(figsize=(8, 6))
    plt.subplots_adjust(hspace=1, wspace=0.3)
    for i in range(n):
        plt.subplot(w, l, i+1)
        pixels = np.array(imgarrayX[indexes[i]]*255).reshape((28, 28))
        s = "Label: {}".format(imgarrayY[indexes[i]])
        plt.title(s)
        plt.axis('off')
        plt.imshow(pixels, cmap='gray')
    plt.show() 
Example 7
Project: MLP-Numpy-Implementation-Gradient-Descent-Backpropagation   Author: EsterHlav   File: support.py    MIT License 6 votes vote down vote up
def showPredictedLabels(imgarray, indexes, labels):
    # takes as input a (N*784) set of data, integers (index of images to show) and labels predicted
    # and print the corresponding images as well as the real label and predicted labels

    # figure out the size of figure
    n = len(indexes)
    w,l = shapeGrid(n)

    imgarrayX, imgarrayY = imgarray
    import matplotlib.pyplot as plt
    plt.figure(figsize=(10, 8))
    plt.subplots_adjust(hspace=0.4, wspace=0.3)
    for i in range(n):
        plt.subplot(w, l, i+1)
        pixels = np.array(imgarrayX[indexes[i]]*255).reshape((28, 28))
        s = "True: {}, Pred: {}".format(imgarrayY[indexes[i]], labels[i])
        plt.title(s)
        plt.axis('off')
        plt.imshow(pixels, cmap='gray')
    plt.show()

# example to try
# idx = [2,9,10,387, 2839, 8473, 10, 89, 87, 1, 12, 26, 28]
# pred = [8, 2, 2, 0, 5, 7, 1, 3, 2, 0, 2, 6, 8]
# showPredictedLabels(valid, idx, pred) 
Example 8
Project: StyleGAN   Author: mgmk2   File: image_utils.py    Apache License 2.0 6 votes vote down vote up
def show_images(images, epoch=None, mode='show'):
    if mode not in ['show', 'pause']:
        raise ValueError('Unknown mode to show images: ' + mode)

    if images.shape[-1] == 1:
        x = images[:, :, :, 0]
        cmap = 'gray'
    else:
        x = images
        cmap = None
    fig = plt.figure(figsize=(4, 4))
    for i in range(16):
        plt.subplot(4, 4, i + 1)
        plt.imshow(x[i].clip(0, 1), cmap=cmap)
        plt.axis('off')
    if epoch is not None:
        fig.suptitle('epoch: {:}'.format(epoch))

    if mode == 'pause':
        plt.pause(.05)
    else:
        plt.show() 
Example 9
Project: Deformable-ConvNets   Author: guanfuchen   File: show_boxes.py    MIT License 6 votes vote down vote up
def show_boxes(im, dets, classes, scale = 1.0):
    plt.cla()
    plt.axis("off")
    plt.imshow(im)
    for cls_idx, cls_name in enumerate(classes):
        cls_dets = dets[cls_idx]
        for det in cls_dets:
            bbox = det[:4] * scale
            color = (rand(), rand(), rand())
            rect = plt.Rectangle((bbox[0], bbox[1]),
                                  bbox[2] - bbox[0],
                                  bbox[3] - bbox[1], fill=False,
                                  edgecolor=color, linewidth=2.5)
            plt.gca().add_patch(rect)

            if cls_dets.shape[1] == 5:
                score = det[-1]
                plt.gca().text(bbox[0], bbox[1],
                               '{:s} {:.3f}'.format(cls_name, score),
                               bbox=dict(facecolor=color, alpha=0.5), fontsize=9, color='white')
    plt.show()
    return im 
Example 10
Project: pyCEST   Author: pganssle   File: cjlib.py    MIT License 6 votes vote down vote up
def nnls_fit( te, y, t2 ):
    A = exp(- outer( te,  r_[ [1/t2a for t2a in t2], 0]) )

    if False:
        H = 0.0*diag(1*ones((A.shape[1],)))

        #H = diag(1*ones((A.shape[1],)))
        #H = H + diag(-1*ones((A.shape[1],)), k=1)[:-1,:-1]
        yt = zeros(( A.shape[1] ))
        Att = concatenate( (A, H), axis=0 )
        ytt = concatenate( (y, yt), axis=0 )

        x = scipy.optimize.nnls(Att, ytt)[0]
    else:
        x = scipy.optimize.nnls(A, y)[0]

    ## Compute the fitted data
    y_fit = inner(A, x)

    ## Compute the chi2
    chi2 = sqrt( sum( ( y - y_fit)**2 ) )

    return x, y_fit, chi2
#    return x, 0,0 
Example 11
Project: phoneticSimilarity   Author: ronggong   File: baseline1_oracle_GOP.py    GNU Affero General Public License v3.0 6 votes vote down vote up
def getObsLine(studentPhonemeLists, ii_line, hopsize_t, log_mel_reshaped, model_keras_cnn_0):
    line = studentPhonemeLists[ii_line][0]
    # start and end time
    time_start = line[0]
    time_end = line[1]
    frame_start = int(round(time_start / hopsize_t))
    frame_end = int(round(time_end / hopsize_t))

    # log_mel_reshape line
    log_mel_reshaped_line = log_mel_reshaped[frame_start: frame_end]
    log_mel_reshaped_line = np.expand_dims(log_mel_reshaped_line, axis=1)

    # emission probabilities
    obs_line = model_keras_cnn_0.predict(log_mel_reshaped_line, batch_size=128, verbose=0)
    obs_line = np.log(obs_line+1e-128)
    return obs_line 
Example 12
Project: gardenia   Author: xuzhenqi   File: util.py    GNU General Public License v3.0 6 votes vote down vote up
def show(img, show_max=False):
    '''show response map'''
    img = img - img.min()
    img = img / img.max()
    plt.imshow(img, cmap='gray')
    shape = img.shape
    idx = np.argmax(img)
    hi = idx / shape[1]
    wi = idx % shape[1]
    print hi, wi
    if show_max:
        plt.hold(True)
        plt.plot(wi, hi, 'r.', markersize=12)
        plt.axis('off')
        plt.axis('image')
    plt.show() 
Example 13
Project: gardenia   Author: xuzhenqi   File: util.py    GNU General Public License v3.0 6 votes vote down vote up
def errrate(pre, label):
    '''TODO: recompute the eye distance'''
    shape = pre.shape
    idx = np.argmax(np.reshape(label, (shape[0], shape[1], shape[2]*shape[3])),
                    axis=2)
    hl = idx / shape[3]
    wl = idx % shape[3]
    idx = np.argmax(np.reshape(pre, (shape[0], shape[1], shape[2]*shape[3])),
                    axis=2)
    hp = idx / shape[3]
    wp = idx % shape[3]
    eye = np.sqrt((hl[:, 36] - hl[:, 45])*(hl[:, 36] - hl[:, 45])
                  + (wl[:, 36] - wl[:, 45])*(wl[:, 36] - wl[:, 45]))
    err = np.sqrt((hp - hl) * (hp - hl) + (wp - wl) * (wp - wl)) \
        / np.reshape(eye, (shape[0], 1))
    return np.sum(err) / shape[0] / shape[1] 
Example 14
Project: Stock_Market_Forecast   Author: cuevas1208   File: matplot_graphs.py    MIT License 6 votes vote down vote up
def plot_histogram(p, t, dates, name='stock', confidence='???', forecast='???'):
    fig, ax = plt.subplots()

    ax.plot(p, C='g', label='prediction')
    ax.plot(t, C='b', label='ground truth')
    ax.legend()

    # window dimensions
    plt.axis([0, len(dates)/2, np.min(p)-1, np.max(t)+1])

    # x labels
    plt.xticks(range(len(dates)), dates)

    mng = plt.get_current_fig_manager()
    mng.window.state('zoomed')  # works fine on Windows!

    plt.xlabel('forecast for ' + str(forecast) + ' days from the above date')
    plt.ylabel('buy(1), hold(0), sell(-1)')
    plt.title(name + ' model confidence ' + str(confidence))

    plt.show() 
Example 15
Project: tripp   Author: mjamesruggiero   File: nlp.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def build_buzzword_ranking():
    data = [
        ("big data", 100, 15), ("hadoop", 95, 25), ("python", 75, 50),
        ("R", 50, 40), ("machine learning", 80, 20), ("statistics", 20, 60),
        ("data science", 60, 70), ("analytics", 90, 3),
        ("team player", 85, 85), ("dynamic", 2, 90), ("synergies", 70, 0),
        ("actionable insights", 40, 30), ("self-starter", 30, 50),
        ("think out of the box", 45, 10), ("customer focus", 65, 15),
        ("thought leadership", 35, 35)
    ]

    for word, job_popularity, resume_popularity in data:
        pyplot.text(job_popularity,
                    resume_popularity,
                    word,
                    ha='center',
                    va='center',
                    size=text_size(job_popularity + resume_popularity))
    pyplot.xlabel("Popularity of jobs postings")
    pyplot.ylabel("Popularity on resumes")
    pyplot.axis([0, 100, 0, 100])
    pyplot.xticks([])
    pyplot.yticks([])
    pyplot.show() 
Example 16
Project: gandlf   Author: codekansas   File: mnist_gan.py    MIT License 6 votes vote down vote up
def get_mnist_data(binarize=False):
    """Puts the MNIST data in the right format."""

    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    if binarize:
        X_test = np.where(X_test >= 10, 1, -1)
        X_train = np.where(X_train >= 10, 1, -1)
    else:
        X_train = (X_train.astype(np.float32) - 127.5) / 127.5
        X_test = (X_test.astype(np.float32) - 127.5) / 127.5

    X_train = np.expand_dims(X_train, axis=-1)
    X_test = np.expand_dims(X_test, axis=-1)

    y_train = np.expand_dims(y_train, axis=-1)
    y_test = np.expand_dims(y_test, axis=-1)

    return (X_train, y_train), (X_test, y_test) 
Example 17
Project: pohmm-keystroke   Author: vmonaco   File: plotting.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def plot_cdf(df, fit_fn, col):
    hmm = fit_fn(df)
    model_cdf_fn = hmm.cdf_fn(feature=col)
    empir_cdf_fn = ECDF(df[col].values)

    x = np.sort(np.unique(df[col].values))
    x = x[:int(0.9 * len(x))]

    plt.figure(figsize=(8, 8))

    plt.plot(x, empir_cdf_fn(x), color=sns.xkcd_rgb['denim blue'], label='Empirical')
    plt.plot(x, model_cdf_fn(x), 'k--', label='Predicted')
    plt.axis([x.min(), x.max(), 0, 1])
    plt.xlabel('$ \\tau $')
    plt.ylabel('Cumulative distribution')
    plt.legend(loc='lower right')
    return 
Example 18
Project: Chinese-Character-and-Calligraphic-Image-Processing   Author: MingtaoGuo   File: test.py    MIT License 6 votes vote down vote up
def test(self):

        list_ = os.listdir("./maps/val/")
        nums_file = list_.__len__()
        saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, "generator"))
        saver.restore(self.sess, "./save_para/model.ckpt")
        rand_select = np.random.randint(0, nums_file)
        INPUTS_CONDITION = np.zeros([1, self.img_h, self.img_w, 3])
        INPUTS = np.zeros([1, self.img_h, self.img_w, 3])
        img = np.array(Image.open(self.path + list_[rand_select]))
        img_h, img_w = img.shape[0], img.shape[1]
        INPUTS_CONDITION[0] = misc.imresize(img[:, img_w//2:], [self.img_h, self.img_w]) / 127.5 - 1.0
        INPUTS[0] = misc.imresize(img[:, :img_w//2], [self.img_h, self.img_w]) / 127.5 - 1.0
        [fake_img] = self.sess.run([self.inputs_fake], feed_dict={self.inputs_condition: INPUTS_CONDITION})
        out_img = np.concatenate((INPUTS_CONDITION[0], fake_img[0], INPUTS[0]), axis=1)
        Image.fromarray(np.uint8((out_img + 1.0)*127.5)).save("./results/1.jpg")
        plt.imshow(np.uint8((out_img + 1.0)*127.5))
        plt.grid("off")
        plt.axis("off")
        plt.show() 
Example 19
Project: Chinese-Character-and-Calligraphic-Image-Processing   Author: MingtaoGuo   File: test.py    MIT License 6 votes vote down vote up
def generator(self, inputs_condition):
        inputs = inputs_condition
        with tf.variable_scope("generator", reuse=tf.AUTO_REUSE):
            inputs1 = leaky_relu(conv2d("conv1", inputs, 64, 5, 2))#128x128x128
            inputs2 = leaky_relu(instanceNorm("in1", conv2d("conv2", inputs1, 128, 5, 2)))#64x64x256
            inputs3 = leaky_relu(instanceNorm("in2", conv2d("conv3", inputs2, 256, 5, 2)))#32x32x512
            inputs4 = leaky_relu(instanceNorm("in3", conv2d("conv4", inputs3, 512, 5, 2)))#16x16x512
            inputs5 = leaky_relu(instanceNorm("in4", conv2d("conv5", inputs4, 512, 5, 2)))#8x8x512
            inputs6 = leaky_relu(instanceNorm("in5", conv2d("conv6", inputs5, 512, 5, 2)))#4x4x512
            inputs7 = leaky_relu(instanceNorm("in6", conv2d("conv7", inputs6, 512, 5, 2)))#2x2x512
            inputs8 = leaky_relu(instanceNorm("in7", conv2d("conv8", inputs7, 512, 5, 2)))#1x1x512
            outputs1 = tf.nn.relu(tf.concat([tf.nn.dropout(instanceNorm("in9", deconv2d("dconv1", inputs8, 512, 5, 2)), 0.5), inputs7], axis=3))  # 2x2x512
            outputs2 = tf.nn.relu(tf.concat([tf.nn.dropout(instanceNorm("in10", deconv2d("dconv2", outputs1, 512, 5, 2)), 0.5), inputs6], axis=3))  # 4x4x512
            outputs3 = tf.nn.relu(tf.concat([tf.nn.dropout(instanceNorm("in11", deconv2d("dconv3", outputs2, 512, 5, 2)), 0.5), inputs5], axis=3))#8x8x512
            outputs4 = tf.nn.relu(tf.concat([instanceNorm("in12", deconv2d("dconv4", outputs3, 512, 5, 2)), inputs4], axis=3))#16x16x512
            outputs5 = tf.nn.relu(tf.concat([instanceNorm("in13", deconv2d("dconv5", outputs4, 256, 5, 2)), inputs3], axis=3))#32x32x256
            outputs6 = tf.nn.relu(tf.concat([instanceNorm("in14", deconv2d("dconv6", outputs5, 128, 5, 2)), inputs2], axis=3))#64x64x128
            outputs7 = tf.nn.relu(tf.concat([instanceNorm("in15", deconv2d("dconv7", outputs6, 64, 5, 2)), inputs1], axis=3))#128x128x64
            outputs8 = tf.nn.tanh((deconv2d("dconv8", outputs7, 3, 5, 2)))#256x256x3
            return outputs8 
Example 20
Project: GMRbasedGP   Author: NoemieJaquier   File: gmr.py    MIT License 6 votes vote down vote up
def multi_variate_normal(x, mu, sigma=None, log=True, inv_sigma=None):
	"""
	Multivariatve normal distribution PDF

	:param x:		np.array([nb_samples, nb_dim])
	:param mu: 		np.array([nb_dim])
	:param sigma: 	np.array([nb_dim, nb_dim])
	:param log: 	bool
	:return:
	"""
	dx = x - mu
	if sigma.ndim == 1:
		sigma = sigma[:, None]
		dx = dx[:, None]
		inv_sigma = np.linalg.inv(sigma) if inv_sigma is None else inv_sigma
		log_lik = -0.5 * np.sum(np.dot(dx, inv_sigma) * dx, axis=1) - 0.5 * np.log(np.linalg.det(2 * np.pi * sigma))
	else:
		inv_sigma = np.linalg.inv(sigma) if inv_sigma is None else inv_sigma
		log_lik = -0.5 * np.einsum('...j,...j', dx, np.einsum('...jk,...j->...k', inv_sigma, dx)) - 0.5 * np.log(np.linalg.det(2 * np.pi * sigma))

	return log_lik if log else np.exp(log_lik) 
Example 21
Project: razzy-spinner   Author: rafasashi   File: demo.py    GNU General Public License v3.0 6 votes vote down vote up
def _demo_plot(learning_curve_output, teststats, trainstats=None, take=None):
   testcurve = [teststats['initialerrors']]
   for rulescore in teststats['rulescores']:
       testcurve.append(testcurve[-1] - rulescore)
   testcurve = [1 - x/teststats['tokencount'] for x in testcurve[:take]]

   traincurve = [trainstats['initialerrors']]
   for rulescore in trainstats['rulescores']:
       traincurve.append(traincurve[-1] - rulescore)
   traincurve = [1 - x/trainstats['tokencount'] for x in traincurve[:take]]

   import matplotlib.pyplot as plt
   r = list(range(len(testcurve)))
   plt.plot(r, testcurve, r, traincurve)
   plt.axis([None, None, None, 1.0])
   plt.savefig(learning_curve_output) 
Example 22
Project: Collaborative-Learning-for-Weakly-Supervised-Object-Detection   Author: Sunarker   File: demo.py    MIT License 5 votes vote down vote up
def vis_detections(im, class_name, dets, thresh=0.5):
    """Draw detected bounding boxes."""
    inds = np.where(dets[:, -1] >= thresh)[0]
    if len(inds) == 0:
        return

    im = im[:, :, (2, 1, 0)]
    fig, ax = plt.subplots(figsize=(12, 12))
    ax.imshow(im, aspect='equal')
    for i in inds:
        bbox = dets[i, :4]
        score = dets[i, -1]

        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1], fill=False,
                          edgecolor='red', linewidth=3.5)
            )
        ax.text(bbox[0], bbox[1] - 2,
                '{:s} {:.3f}'.format(class_name, score),
                bbox=dict(facecolor='blue', alpha=0.5),
                fontsize=14, color='white')

    ax.set_title(('{} detections with '
                  'p({} | box) >= {:.1f}').format(class_name, class_name,
                                                  thresh),
                  fontsize=14)
    plt.axis('off')
    plt.tight_layout()
    plt.draw() 
Example 23
Project: FasterRCNN_TF_Py3   Author: upojzsb   File: demo.py    MIT License 5 votes vote down vote up
def vis_detections(im, class_name, dets, thresh=0.5):
    """Draw detected bounding boxes."""
    inds = np.where(dets[:, -1] >= thresh)[0]
    if len(inds) == 0:
        return

    im = im[:, :, (2, 1, 0)]
    fig, ax = plt.subplots(figsize=(12, 12))
    ax.imshow(im, aspect='equal')
    for i in inds:
        bbox = dets[i, :4]
        score = dets[i, -1]

        ax.add_patch(
            plt.Rectangle((bbox[0], bbox[1]),
                          bbox[2] - bbox[0],
                          bbox[3] - bbox[1], fill=False,
                          edgecolor='red', linewidth=3.5)
        )
        ax.text(bbox[0], bbox[1] - 2,
                '{:s} {:.3f}'.format(class_name, score),
                bbox=dict(facecolor='blue', alpha=0.5),
                fontsize=14, color='white')

    ax.set_title(('{} detections with '
                  'p({} | box) >= {:.1f}').format(class_name, class_name,
                                                  thresh),
                 fontsize=14)
    plt.axis('off')
    plt.tight_layout()
    plt.draw() 
Example 24
Project: mmdetection   Author: open-mmlab   File: recall.py    Apache License 2.0 5 votes vote down vote up
def _recalls(all_ious, proposal_nums, thrs):

    img_num = all_ious.shape[0]
    total_gt_num = sum([ious.shape[0] for ious in all_ious])

    _ious = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32)
    for k, proposal_num in enumerate(proposal_nums):
        tmp_ious = np.zeros(0)
        for i in range(img_num):
            ious = all_ious[i][:, :proposal_num].copy()
            gt_ious = np.zeros((ious.shape[0]))
            if ious.size == 0:
                tmp_ious = np.hstack((tmp_ious, gt_ious))
                continue
            for j in range(ious.shape[0]):
                gt_max_overlaps = ious.argmax(axis=1)
                max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps]
                gt_idx = max_ious.argmax()
                gt_ious[j] = max_ious[gt_idx]
                box_idx = gt_max_overlaps[gt_idx]
                ious[gt_idx, :] = -1
                ious[:, box_idx] = -1
            tmp_ious = np.hstack((tmp_ious, gt_ious))
        _ious[k, :] = tmp_ious

    _ious = np.fliplr(np.sort(_ious, axis=1))
    recalls = np.zeros((proposal_nums.size, thrs.size))
    for i, thr in enumerate(thrs):
        recalls[:, i] = (_ious >= thr).sum(axis=1) / float(total_gt_num)

    return recalls 
Example 25
Project: neural-fingerprinting   Author: StephanZheng   File: utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def random_targets(gt, nb_classes):
    """
    Take in an array of correct labels and randomly select a different label
    for each label in the array. This is typically used to randomly select a
    target class in targeted adversarial examples attacks (i.e., when the
    search algorithm takes in both a source class and target class to compute
    the adversarial example).
    :param gt: the ground truth (correct) labels. They can be provided as a
               1D vector or 2D array of one-hot encoded labels.
    :param nb_classes: The number of classes for this task. The random class
                       will be chosen between 0 and nb_classes such that it
                       is different from the correct class.
    :return: A numpy array holding the randomly-selected target classes
             encoded as one-hot labels.
    """
    # If the ground truth labels are encoded as one-hot, convert to labels.
    if len(gt.shape) == 2:
        gt = np.argmax(gt, axis=1)

    # This vector will hold the randomly selected labels.
    result = np.zeros(gt.shape, dtype=np.int32)

    for class_ind in xrange(nb_classes):
        # Compute all indices in that class.
        in_cl = gt == class_ind
        size = np.sum(in_cl)

        # Compute the set of potential targets for this class.
        potential_targets = other_classes(nb_classes, class_ind)

        # Draw with replacement random targets among the potential targets.
        result[in_cl] = np.random.choice(potential_targets, size=size)

    # Encode vector of random labels as one-hot labels.
    result = to_categorical(result, nb_classes)
    result = result.astype(np.int32)

    return result 
Example 26
Project: neural-fingerprinting   Author: StephanZheng   File: utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def pair_visual(original, adversarial, figure=None):
    """
    This function displays two images: the original and the adversarial sample
    :param original: the original input
    :param adversarial: the input after perterbations have been applied
    :param figure: if we've already displayed images, use the same plot
    :return: the matplot figure to reuse for future samples
    """
    import matplotlib.pyplot as plt

    # Squeeze the image to remove single-dimensional entries from array shape
    original = np.squeeze(original)
    adversarial = np.squeeze(adversarial)

    # Ensure our inputs are of proper shape
    assert(len(original.shape) == 2 or len(original.shape) == 3)

    # To avoid creating figures per input sample, reuse the sample plot
    if figure is None:
        plt.ion()
        figure = plt.figure()
        figure.canvas.set_window_title('Cleverhans: Pair Visualization')

    # Add the images to the plot
    perterbations = adversarial - original
    for index, image in enumerate((original, perterbations, adversarial)):
        figure.add_subplot(1, 3, index + 1)
        plt.axis('off')

        # If the image is 2D, then we have 1 color channel
        if len(image.shape) == 2:
            plt.imshow(image, cmap='gray')
        else:
            plt.imshow(image)

        # Give the plot some time to update
        plt.pause(0.01)

    # Draw the plot and return
    plt.show()
    return figure 
Example 27
Project: neural-fingerprinting   Author: StephanZheng   File: utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def grid_visual(data):
    """
    This function displays a grid of images to show full misclassification
    :param data: grid data of the form;
        [nb_classes : nb_classes : img_rows : img_cols : nb_channels]
    :return: if necessary, the matplot figure to reuse
    """
    import matplotlib.pyplot as plt

    # Ensure interactive mode is disabled and initialize our graph
    plt.ioff()
    figure = plt.figure()
    figure.canvas.set_window_title('Cleverhans: Grid Visualization')

    # Add the images to the plot
    num_cols = data.shape[0]
    num_rows = data.shape[1]
    num_channels = data.shape[4]
    current_row = 0
    for y in xrange(num_rows):
        for x in xrange(num_cols):
            figure.add_subplot(num_rows, num_cols, (x + 1) + (y * num_cols))
            plt.axis('off')

            if num_channels == 1:
                plt.imshow(data[x, y, :, :, 0], cmap='gray')
            else:
                plt.imshow(data[x, y, :, :, :])

    # Draw the plot and return
    plt.show()
    return figure 
Example 28
Project: Random-Erasing   Author: zhunzhong07   File: visualize.py    Apache License 2.0 5 votes vote down vote up
def show_mask_single(images, mask, Mean=(2, 2, 2), Std=(0.5,0.5,0.5)):
    im_size = images.size(2)

    # save for adding mask
    im_data = images.clone()
    for i in range(0, 3):
        im_data[:,i,:,:] = im_data[:,i,:,:] * Std[i] + Mean[i]    # unnormalize

    images = make_image(torchvision.utils.make_grid(images), Mean, Std)
    plt.subplot(2, 1, 1)
    plt.imshow(images)
    plt.axis('off')

    # for b in range(mask.size(0)):
    #     mask[b] = (mask[b] - mask[b].min())/(mask[b].max() - mask[b].min())
    mask_size = mask.size(2)
    # print('Max %f Min %f' % (mask.max(), mask.min()))
    mask = (upsampling(mask, scale_factor=im_size/mask_size))
    # mask = colorize(upsampling(mask, scale_factor=im_size/mask_size))
    # for c in range(3):
    #     mask[:,c,:,:] = (mask[:,c,:,:] - Mean[c])/Std[c]

    # print(mask.size())
    mask = make_image(torchvision.utils.make_grid(0.3*im_data+0.7*mask.expand_as(im_data)))
    # mask = make_image(torchvision.utils.make_grid(0.3*im_data+0.7*mask), Mean, Std)
    plt.subplot(2, 1, 2)
    plt.imshow(mask)
    plt.axis('off') 
Example 29
Project: Random-Erasing   Author: zhunzhong07   File: visualize.py    Apache License 2.0 5 votes vote down vote up
def show_mask(images, masklist, Mean=(2, 2, 2), Std=(0.5,0.5,0.5)):
    im_size = images.size(2)

    # save for adding mask
    im_data = images.clone()
    for i in range(0, 3):
        im_data[:,i,:,:] = im_data[:,i,:,:] * Std[i] + Mean[i]    # unnormalize

    images = make_image(torchvision.utils.make_grid(images), Mean, Std)
    plt.subplot(1+len(masklist), 1, 1)
    plt.imshow(images)
    plt.axis('off')

    for i in range(len(masklist)):
        mask = masklist[i].data.cpu()
        # for b in range(mask.size(0)):
        #     mask[b] = (mask[b] - mask[b].min())/(mask[b].max() - mask[b].min())
        mask_size = mask.size(2)
        # print('Max %f Min %f' % (mask.max(), mask.min()))
        mask = (upsampling(mask, scale_factor=im_size/mask_size))
        # mask = colorize(upsampling(mask, scale_factor=im_size/mask_size))
        # for c in range(3):
        #     mask[:,c,:,:] = (mask[:,c,:,:] - Mean[c])/Std[c]

        # print(mask.size())
        mask = make_image(torchvision.utils.make_grid(0.3*im_data+0.7*mask.expand_as(im_data)))
        # mask = make_image(torchvision.utils.make_grid(0.3*im_data+0.7*mask), Mean, Std)
        plt.subplot(1+len(masklist), 1, i+2)
        plt.imshow(mask)
        plt.axis('off')



# x = torch.zeros(1, 3, 3)
# out = colorize(x)
# out_im = make_image(out)
# plt.imshow(out_im)
# plt.show() 
Example 30
Project: deep-learning-note   Author: wdxtub   File: 16_basic_kernels.py    MIT License 5 votes vote down vote up
def show_images(images, rgb=True):
    gs = gridspec.GridSpec(1, len(images))
    for i, image in enumerate(images):
        plt.subplot(gs[0, i])
        if rgb:
            plt.imshow(image)
        else:
            image = image.reshape(image.shape[0], image.shape[1])
            plt.imshow(image, cmap='gray')
        plt.axis('off')
    plt.show() 
Example 31
Project: deep-learning-note   Author: wdxtub   File: 2_serving_and_predict.py    MIT License 5 votes vote down vote up
def show(idx, title):
  plt.figure()
  plt.imshow(test_images[idx].reshape(28,28))
  plt.axis('off')
  plt.title('\n\n{}'.format(title), fontdict={'size': 16})
  plt.show() 
Example 32
Project: black-widow   Author: BLQ-Software   File: run_interactive.py    MIT License 5 votes vote down vote up
def do_show(self, line):
        """Shows the network.

        Parameters
        ----------
        line : string
            A string containing command line arguments. Ignored.
        """
        try:
            # Get the pydot object for the network
            d = self.network.dump(self.output)

            # Set the dpi
            d.set_dpi(self.dpi)

            # Get a PNG string with the specified projection
            png_str = d.create_png(prog=self.proj)

            # Write the string to a StringIO object
            sio = StringIO()
            sio.write(png_str)
            sio.seek(0)

            # Show the image
            image = mpimg.imread(sio)
            plt.axis('off')
            plt.imshow(image)
            plt.show()
        except Exception as e:
            print e 
Example 33
Project: helloworld   Author: pip-uninstaller-python   File: matplotlibTest.py    GNU General Public License v2.0 5 votes vote down vote up
def main():
    # line
    x = np.linspace(-np.pi, np.pi, 256, endpoint=True)
    c, s = np.cos(x), np.sin(x)
    plt.figure(1)
    plt.plot(x, c, color="blue", linewidth=1.0, linestyle="-", label="COS", alpha=0.5)  # 自变量, 因变量
    plt.plot(x, s, "r.", label="SIN")  # 正弦  "-"/"r-"/"r."
    plt.title("COS & SIN")
    ax = plt.gca()
    ax.spines["right"].set_color("none")
    ax.spines["top"].set_color("none")
    ax.spines["left"].set_position(("data", 0))  # 横轴位置
    ax.spines["bottom"].set_position(("data", 0))  # 纵轴位置
    ax.xaxis.set_ticks_position("bottom")
    ax.yaxis.set_ticks_position("left")
    plt.xticks([-np.pi, -np.pi / 2.0, np.pi / 2, np.pi],
               [r'$-\pi/2$', r'$-\pi/2$', r'$0$', r'$+\pi/2$', r'$-\pi$'])
    plt.yticks(np.linspace(-1, 1, 5, endpoint=True))
    for label in ax.get_xticklabels() + ax.get_yticklabels():
        label.set_fontsize(16)
        label.set_bbox(dict(facecolor="white", edgecolor="None", alpha=0.2))
    plt.legend(loc="upper left")  # 左上角的显示图标
    plt.grid()  # 网格线
    # plt.axis([-1, 1, -0.5, 1])  # 显示范围
    plt.fill_between(x, np.abs(x) < 0.5, c, c < 0.5, color="green", alpha=0.25)
    t = 1
    plt.plot([t, t], [0, np.cos(t)], "y", linewidth=3, linestyle="--")
    # 注释
    plt.annotate("cos(1)", xy=(t, np.cos(1)), xycoords="data", xytext=(+10, +30),
                 textcoords="offset points", arrowprops=dict(arrowstyle="->", connectionstyle="arc3, rad=.2"))
    plt.show()


# Scatter --> 散点图 
Example 34
Project: DOTA_models   Author: ringringyi   File: plot_lfads.py    Apache License 2.0 5 votes vote down vote up
def plot_priors():
  g0s_prior_mean_bxn = train_modelvals['prior_g0_mean']
  g0s_prior_var_bxn = train_modelvals['prior_g0_var']
  g0s_post_mean_bxn = train_modelvals['posterior_g0_mean']
  g0s_post_var_bxn = train_modelvals['posterior_g0_var']

  plt.figure(figsize=(10,4), tight_layout=True);
  plt.subplot(1,2,1)
  plt.hist(g0s_post_mean_bxn.flatten(), bins=20, color='b');
  plt.hist(g0s_prior_mean_bxn.flatten(), bins=20, color='g');

  plt.title('Histogram of Prior/Posterior Mean Values')
  plt.subplot(1,2,2)
  plt.hist((g0s_post_var_bxn.flatten()), bins=20, color='b');
  plt.hist((g0s_prior_var_bxn.flatten()), bins=20, color='g');
  plt.title('Histogram of Prior/Posterior Log Variance Values')

  plt.figure(figsize=(10,10), tight_layout=True)
  plt.subplot(2,2,1)
  plt.imshow(g0s_prior_mean_bxn.T, interpolation='nearest', cmap='jet')
  plt.colorbar(fraction=0.025, pad=0.04)
  plt.title('Prior g0 means')

  plt.subplot(2,2,2)
  plt.imshow(g0s_post_mean_bxn.T, interpolation='nearest', cmap='jet')
  plt.colorbar(fraction=0.025, pad=0.04)
  plt.title('Posterior g0 means');

  plt.subplot(2,2,3)
  plt.imshow(g0s_prior_var_bxn.T, interpolation='nearest', cmap='jet')
  plt.colorbar(fraction=0.025, pad=0.04)
  plt.title('Prior g0 variance Values')

  plt.subplot(2,2,4)
  plt.imshow(g0s_post_var_bxn.T, interpolation='nearest', cmap='jet')
  plt.colorbar(fraction=0.025, pad=0.04)
  plt.title('Posterior g0 variance Values')

  plt.figure(figsize=(10,5))
  plt.stem(np.sort(np.log(g0s_post_mean_bxn.std(axis=0))));
  plt.title('Log standard deviation of h0 means'); 
Example 35
Project: cvpr2018-hnd   Author: kibok90   File: test.py    MIT License 5 votes vote down vote up
def counters_to_results_super(counters, results, pts, num_models):
    
    num_pts = pts.shape[0]
    
    for m in range(num_models):
        b_known_testable = counters['data'][m] > 0
        b_novel_testable = b_known_testable[-1]
        b_known_testable[-1] = False
        b_known_testable_any = b_known_testable.any()
        
        for p in range(num_pts):
            if b_known_testable_any:
                # results['acc']['known'][p,m] = \
                    # (counters['acc'][p,m][b_known_testable] / counters['data'][m][b_known_testable]).mean()
                results['acc']['known'][p,m] = counters['acc'][p,m][:-1].sum() / counters['data'][m][:-1].sum()
            if b_novel_testable:
                results['acc']['novel'][p,m] = counters['acc'][p,m][-1] / counters['data'][m][-1]
            
            if b_known_testable_any and b_novel_testable:
                results['acc']['harmonic'][p,m] = harmonic_mean(results['acc']['known'][p,m],
                                                                results['acc']['novel'][p,m])
            elif b_known_testable_any:
                results['acc']['harmonic'][p,m] = results['acc']['known'][p,m]
            elif b_novel_testable:
                results['acc']['harmonic'][p,m] = results['acc']['novel'][p,m]
    
    # find optimal points
    i_opt = {'global': 0, 'local': []}
    results['acc_opt'] = {'global': {'known': [], 'novel': [], 'harmonic': []},
                          'local' : {'known': [], 'novel': [], 'harmonic': []}}
    results['ths_opt'] = {'global': 0., 'local': []}
    
    i_opt['global'] = results['acc']['harmonic'].mean(axis=1).argmax(axis=0)
    i_opt['local']  = results['acc']['harmonic'].argmax(axis=0)
    for mtype in ['known', 'novel', 'harmonic']:
        results['acc_opt']['global'][mtype] = results['acc'][mtype][i_opt['global']]
        results['acc_opt']['local'][mtype]  = results['acc'][mtype][i_opt['local'], range(num_models)]
    results['ths_opt']['global'] = pts[i_opt['global']]
    results['ths_opt']['local']  = pts[i_opt['local']] 
Example 36
Project: CAFA_assessment_tool   Author: ashleyzhou972   File: plot.py    GNU General Public License v3.0 5 votes vote down vote up
def plotMultiple(title,listofResults,smooth):
    '''
    supply lists of precision+recall+name lists
    '''
    fontP = FontProperties()
    fontP.set_size('small')
    num = len(listofResults)
    pal=sns.color_palette("Paired", num)
    colors=pal.as_hex()
    for j,i in enumerate(listofResults):
        linetype = '-'
        if smooth=='Y':
            ax = plt.subplot()
            precision = curveSmooth(i)[0][1:]
            recall = curveSmooth(i)[1][1:]
            ax.plot(recall,precision,linetype,color=colors[j],label=i.method+':\nF=%s C=%s'%(i.opt,i.coverage)) 
            ax.plot(i.recall[int(i.thres*100)],i.precision[int(i.thres*100)],'o',color=colors[j])
        elif smooth=='N':
            ax = plt.subplot()
            ax.plot(i.recall,i.precision,linetype,color=colors[j],label=i.method+':\nF=%s C=%s'%(i.opt,i.coverage))
            ax.plot(i.recall[int(i.thres*100)],i.precision[int(i.thres*100)],'o',color=colors[j])
    plt.axis([0,1,0,1])
    box = ax.get_position()
    ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
    plt.yticks(numpy.arange(0,1,0.1))
    plt.xlabel('Recall')
    plt.ylabel('Precision')
    ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
    plt.title(title)
    figurename = os.path.join('./plots/',title)       
    plt.savefig(figurename,dpi=200)
    plt.close() 
Example 37
Project: ImageQA   Author: codedecde   File: plotAttention.py    MIT License 5 votes vote down vote up
def plotAttention (image_file, question, alpha, smooth=True):
    
    ## Parameters
    #
    # image_file : Path to image file.
    # question   : List of question string words (tokenised)
    # alpha      : NP array of size (len(question), 196) or List of len(question) NP vectors of shape (196, )
    # smooth     : Parameter for scaling alpha
    #

    img = LoadImage(image_file)
    n_words = len(question) + 1
    w = np.round(np.sqrt(n_words))
    h = np.ceil(np.float32(n_words) / w)
            
    plt.subplot(w, h, 1)
    plt.imshow(img)
    plt.axis('off')

    for ii in xrange(alpha.shape[0]):
        plt.subplot(w, h, ii+2)
        lab = question[ii]
        plt.text(0, 1, lab, backgroundcolor='white', fontsize=13)
        plt.text(0, 1, lab, color='black', fontsize=13)
        plt.imshow(img)
        if smooth:
            alpha_img = skimage.transform.pyramid_expand(alpha[ii].reshape(14,14), upscale=32)
        else:
            alpha_img = skimage.transform.resize(alpha[ii].reshape(14,14), [img.shape[0], img.shape[1]])
        plt.imshow(alpha_img, alpha=0.8)
        plt.set_cmap(cm.Greys_r)
        plt.axis('off') 
Example 38
Project: nxt-sketcher   Author: simondolle   File: printer.py    MIT License 5 votes vote down vote up
def get_xy(alpha, beta, structure_settings):

    r = structure_settings.r  # short arm length (attached to the rotative axis)
    a = structure_settings.a  # long arm length
    s = structure_settings.s  # pen distance

    xa = structure_settings.xa #left short arm x
    xb = structure_settings.xb #right short arm x


    # d is the first short arm extremity
    xd = xa - r * math.sin(alpha)
    yd = r * math.cos(alpha)

    # e is the first short arm extremity
    xe = xb - r * math.sin(beta)
    ye = r * math.cos(beta)

    de = compute_distance(xd, yd, xe, ye)

    #theta is the angle formed by de and the left long arm
    cos_theta = de/float(2 * a)
    cos_theta = min(cos_theta, 1.0)
    cos_theta = max(cos_theta, -1.0)
    theta = math.acos(cos_theta)

    #gamma is the angle formed by an horizontal axis and de
    tan_gamma = (ye-yd)/float(xe-xd)
    gamma = math.atan(tan_gamma)

    #lambda is the angle formed by an horizontal axis and the left long arm
    lam = theta + gamma
    xt = xd + a * math.cos(lam) - s * math.sin(lam)
    yt = yd + a * math.sin(lam) + s * math.cos(lam)

    return xt, yt 
Example 39
Project: nxt-sketcher   Author: simondolle   File: printer.py    MIT License 5 votes vote down vote up
def __init__(self):
        self.r = 3  # short arm length (attached to the rotative axis)
        self.a = 12  # long arm length
        self.s = 1  # pen distance

        self.xa = -5 #left short arm x
        self.xb = 5 #right short arm x

        self.gear_ratio = 3 
Example 40
Project: MLP-Numpy-Implementation-Gradient-Descent-Backpropagation   Author: EsterHlav   File: support.py    MIT License 5 votes vote down vote up
def softmax(x):
    # apply softmax on a vector

    log_c = np.max(x, axis=x.ndim - 1, keepdims=True)
    #for numerical stability
    y = np.sum(np.exp(x - log_c), axis=x.ndim - 1, keepdims=True)
    x = np.exp(x - log_c)/y

    return x 
Example 41
Project: Deformable-ConvNets   Author: guanfuchen   File: show_offset.py    MIT License 5 votes vote down vote up
def show_dpsroi_offset(im, boxes, offset, classes, trans_std=0.1):
    plt.cla
    for idx, bbox in enumerate(boxes):
        plt.figure(idx+1)
        plt.axis("off")
        plt.imshow(im)

        offset_w = np.squeeze(offset[idx, classes[idx]*2, :, :]) * trans_std
        offset_h = np.squeeze(offset[idx, classes[idx]*2+1, :, :]) * trans_std
        x1 = int(bbox[0])
        y1 = int(bbox[1])
        x2 = int(bbox[2])
        y2 = int(bbox[3])
        roi_width = x2-x1+1
        roi_height = y2-y1+1
        part_size = offset_w.shape[0]
        bin_size_w = roi_width / part_size
        bin_size_h = roi_height / part_size
        show_boxes_simple(bbox, color='b')
        for ih in range(part_size):
            for iw in range(part_size):
                sub_box = np.array([x1+iw*bin_size_w, y1+ih*bin_size_h,
                                    x1+(iw+1)*bin_size_w, y1+(ih+1)*bin_size_h])
                sub_offset = offset_h[ih, iw] * np.array([0, 1, 0, 1]) * roi_height \
                             + offset_w[ih, iw] * np.array([1, 0, 1, 0]) * roi_width
                sub_box = sub_box + sub_offset
                show_boxes_simple(sub_box)
        plt.show() 
Example 42
Project: Deformable-ConvNets   Author: guanfuchen   File: show_offset.py    MIT License 5 votes vote down vote up
def show_dconv_offset(im, all_offset, step=[2, 2], filter_size=3,
                      dilation=2, pad=2, plot_area=2, plot_level=3):
    vis_attr = {'filter_size': filter_size, 'dilation': dilation, 'pad': pad,
                'plot_area': plot_area, 'plot_level': plot_level}

    map_h = all_offset[0].shape[2]
    map_w = all_offset[0].shape[3]

    step_h = step[0]
    step_w = step[1]
    start_h = np.round(step_h / 2)
    start_w = np.round(step_w / 2)

    plt.figure()
    for im_h in range(start_h, map_h, step_h):
        for im_w in range(start_w, map_w, step_w):
            target_point = np.array([im_h, im_w])
            source_y = np.round(target_point[0] * im.shape[0] / map_h)
            source_x = np.round(target_point[1] * im.shape[1] / map_w)
            if source_y < plot_area or source_x < plot_area \
                    or source_y >= im.shape[0] - plot_area or source_x >= im.shape[1] - plot_area:
                continue

            cur_im = np.copy(im)
            source_points = get_bottom_position(vis_attr, [target_point], all_offset)
            cur_im = plot_according_to_point(vis_attr, cur_im, source_points, map_h, map_w)
            cur_im[source_y-plot_area:source_y+plot_area+1, source_x-plot_area:source_x+plot_area+1, :] = \
                np.tile(np.reshape([0, 255, 0], (1, 1, 3)), (2*plot_area+1, 2*plot_area+1, 1))


            plt.axis("off")
            plt.imshow(cur_im)
            plt.show(block=False)
            plt.pause(0.01)
            plt.clf() 
Example 43
Project: pyCEST   Author: pganssle   File: cjlib.py    MIT License 5 votes vote down vote up
def vimage(d):
    """ Very slow at this point """

    mimage(d[:,:,1])
    main_ax = gca()
    
    axslice  = axes([0.1, 0.1, 0.8, 0.05])
    axis('off')
    slice_slider = Slider(axslice, 'Slice', 1, d.shape[2], valinit=1)

    def update(val):
        axes(main_ax)
        mimage(d[:,:,int(slice_slider.val)])

    slice_slider.on_changed(update) 
Example 44
Project: pyCEST   Author: pganssle   File: cjlib.py    MIT License 5 votes vote down vote up
def mimage(d, cmap=gray):
    imshow( d )
    axis('image') # needed so that ginput doesn't resize the image
    clim([ prctile(d,1) , prctile(d, 99) ])
    xticks([])
    yticks([])
#    gca().get_axes().set_position([0,0,1,1]) #commented out by ny temporarily 
Example 45
Project: pyCEST   Author: pganssle   File: cjlib.py    MIT License 5 votes vote down vote up
def correctB0( target_freqs, freq, data, minp, percentileThreshold=60 ):

    ##  Calculate the threshold
    thresh = prctile( data[0], percentileThreshold )    

    ##  Create the pos and neg offset information
    output = zeros( concatenate( (array([len(target_freqs)]), data[0].shape), axis=0 ) )

    coords = array( np.nonzero( data[0] > thresh ) ).transpose()

    pbar = ProgressBar(widgets=['Calc Asym', Percentage(), Bar(), ETA()], maxval=coords.shape[0]).start()
    for ii,coord in enumerate(coords):
        ##  Get the data.
        s,r,c = coord

        for jj, tf in enumerate( target_freqs ):

            inds = nonzero( abs( freq - tf ) < 300 )[0]

            # Do the positive side
            mm = data[inds,s,r,c]
            tt = interpolate.splrep( freq[inds], mm, k=1 )

            output[jj,s,r,c] = interpolate.splev( tf + minp[s,r,c], tt )

        pbar.update(ii)

    pbar.finish()

    return output

##  Fix the asymmetry map based on the water offset calculated above 
Example 46
Project: pyCEST   Author: pganssle   File: cjlib.py    MIT License 5 votes vote down vote up
def meanT2( x, t2v ):
    t2 = [ dot( x[:,s,r,c], r_[t2v,1] )/sum(x[:,s,r,c]) for s in range(x.shape[1]) for r in range(x.shape[2]) for c in range(x.shape[3]) ]
    t2 = array(t2).reshape( x.shape[1:] )
    t2[ nonzero( x[:-1].sum(axis=0) == 0 ) ] = 0
    return t2 
Example 47
Project: phoneticSimilarity   Author: ronggong   File: baseline1_oracle_GOP.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def figurePlot(obs):
    # plot Error analysis figures
    plt.figure()
    plt.imshow(obs)
    plt.xlabel('phone indices', fontsize=12)
    plt.xlabel('frames', fontsize=12)
    plt.axis('tight')
    plt.show() 
Example 48
Project: phoneticSimilarity   Author: ronggong   File: baseline3_oracle_Embedding_classifier.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def figurePlot(obs):
    # plot Error analysis figures
    plt.figure()
    plt.imshow(obs)
    plt.xlabel('phone indices', fontsize=12)
    plt.xlabel('frames', fontsize=12)
    plt.axis('tight')
    plt.show() 
Example 49
Project: phoneticSimilarity   Author: ronggong   File: baseline4_oracle_Embedding_frame_level.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def figurePlot(obs):
    # plot Error analysis figures
    plt.figure()
    plt.imshow(obs)
    plt.xlabel('phone indices', fontsize=12)
    plt.xlabel('frames', fontsize=12)
    plt.axis('tight')
    plt.show() 
Example 50
Project: snn_global_pattern_induction   Author: chrhenning   File: mnist_data.py    Apache License 2.0 5 votes vote down vote up
def plot_sample(sample, interactive=False, file_name=None):
        """Plot a single MNIST sample.

        This method is thought to be helpful for evaluation and debugging
        purposes.

        Args:
            sample: An instance of the class Sample, that has been generated by
                the MNISTData class (it must contain the raw image data).
            interactive: Turn on interactive mode. Thus program will run in
                background while figure is displayed. The figure will be
                displayed until another one is displayed, the user closes it or
                the program has terminated. If this option is deactivated, the
                program will freeze until the user closes the figure.
            file_name: (optional) If a file name is provided, then the image
                will be written into a file instead of plotted to the screen.

        Returns:
        """
        plt.title('Label of shown sample: %d' % sample.label)
        plt.axis('off')
        if interactive:
            plt.ion()
        plt.imshow(sample.raw)
        if file_name is not None:
            plt.savefig(file_name, bbox_inches='tight')
        else:
            plt.show() 
Example 51
Project: RingNet   Author: soubhiksanyal   File: demo.py    MIT License 5 votes vote down vote up
def visualize(img, proc_param, verts, cam, img_name='test_image'):
    """
    Renders the result in original image coordinate frame.
    """
    cam_for_render, vert_shifted = vis_util.get_original(
        proc_param, verts, cam, img_size=img.shape[:2])

    # Render results
    rend_img_overlay = renderer(
        vert_shifted*1.0, cam=cam_for_render, img=img, do_alpha=True)
    rend_img = renderer(
        vert_shifted*1.0, cam=cam_for_render, img_size=img.shape[:2])
    rend_img_vp1 = renderer.rotated(
        vert_shifted, 30, cam=cam_for_render, img_size=img.shape[:2])

    import matplotlib.pyplot as plt
    fig = plt.figure(1)
    plt.clf()
    plt.subplot(221)
    plt.imshow(img)
    plt.title('input')
    plt.axis('off')
    plt.subplot(222)
    plt.imshow(rend_img_overlay)
    plt.title('3D Mesh overlay')
    plt.axis('off')
    plt.subplot(223)
    plt.imshow(rend_img)
    plt.title('3D mesh')
    plt.axis('off')
    plt.subplot(224)
    plt.imshow(rend_img_vp1)
    plt.title('diff vp')
    plt.axis('off')
    plt.draw()
    plt.show(block=False)
    fig.savefig(img_name + '.png')
    # import ipdb
    # ipdb.set_trace() 
Example 52
Project: RingNet   Author: soubhiksanyal   File: demo.py    MIT License 5 votes vote down vote up
def main(config, template_mesh):
    sess = tf.Session()
    model = RingNet_inference(config, sess=sess)
    input_img, proc_param, img = preprocess_image(config.img_path)
    vertices, flame_parameters = model.predict(np.expand_dims(input_img, axis=0), get_parameters=True)
    cams = flame_parameters[0][:3]
    visualize(img, proc_param, vertices[0], cams, img_name=config.out_folder + '/images/' + config.img_path.split('/')[-1][:-4])

    if config.save_obj_file:
        if not os.path.exists(config.out_folder + '/mesh'):
            os.mkdir(config.out_folder + '/mesh')
        mesh = Mesh(v=vertices[0], f=template_mesh.f)
        mesh.write_obj(config.out_folder + '/mesh/' + config.img_path.split('/')[-1][:-4] + '.obj')

    if config.save_flame_parameters:
        if not os.path.exists(config.out_folder + '/params'):
            os.mkdir(config.out_folder + '/params')
        flame_parameters_ = {'cam':  flame_parameters[0][:3], 'pose': flame_parameters[0][3:3+config.pose_params], 'shape': flame_parameters[0][3+config.pose_params:3+config.pose_params+config.shape_params],
         'expression': flame_parameters[0][3+config.pose_params+config.shape_params:]}
        np.save(config.out_folder + '/params/' + config.img_path.split('/')[-1][:-4] + '.npy', flame_parameters_)

    if config.neutralize_expression:
        from util.using_flame_parameters import make_prdicted_mesh_neutral
        if not os.path.exists(config.out_folder + '/neutral_mesh'):
            os.mkdir(config.out_folder + '/neutral_mesh')
        neutral_mesh = make_prdicted_mesh_neutral(config.out_folder + '/params/' + config.img_path.split('/')[-1][:-4] + '.npy', config.flame_model_path)
        neutral_mesh.write_obj(config.out_folder + '/neutral_mesh/' + config.img_path.split('/')[-1][:-4] + '.obj') 
Example 53
Project: gardenia   Author: xuzhenqi   File: util.py    GNU General Public License v3.0 5 votes vote down vote up
def errrate_v2(pre, label):
    '''TODO: recompute the eye distance'''
    shape = pre.shape
    wl = label[:, :, 0]
    hl = label[:, :, 1]
    idx = np.argmax(np.reshape(pre, (shape[0], shape[1], shape[2]*shape[3])),
                    axis=2)
    hp = idx / shape[3]
    wp = idx % shape[3]
    eye = np.sqrt((hl[:, 36] - hl[:, 45])*(hl[:, 36] - hl[:, 45])
                  + (wl[:, 36] - wl[:, 45])*(wl[:, 36] - wl[:, 45]))
    err = np.sqrt((hp - hl) * (hp - hl) + (wp - wl) * (wp - wl)) \
        / np.reshape(eye, (shape[0], 1))
    return np.sum(err) / shape[0] / shape[1] 
Example 54
Project: gardenia   Author: xuzhenqi   File: util.py    GNU General Public License v3.0 5 votes vote down vote up
def errrate_v3(pre, label):
    shape = pre.shape
    (hp, wp) = get_index(pre)
    wl = label[:, :, 0]
    hl = label[:, :, 1]
    eye = np.sqrt(np.square(hl[:, 36:42].mean(axis=1) -
                            hl[:, 42:48].mean(axis=1)) +
                  np.square(wl[:, 36:42].mean(axis=1) -
                            wl[:, 42:48].mean(axis=1)))
    err = np.sqrt((hp - hl) * (hp - hl) + (wp - wl) * (wp - wl)) \
        / np.reshape(eye, (shape[0], 1))
    return np.sum(err) / shape[0] / shape[1] 
Example 55
Project: gardenia   Author: xuzhenqi   File: util.py    GNU General Public License v3.0 5 votes vote down vote up
def errrate_v4(pre, label):
    shape = pre.shape
    wp = pre[:, :, 0]
    hp = pre[:, :, 1]
    wl = label[:, :, 0]
    hl = label[:, :, 1]
    eye = np.sqrt(np.square(hl[:, 36:42].mean(axis=1) -
                            hl[:, 42:48].mean(axis=1)) +
                  np.square(wl[:, 36:42].mean(axis=1) -
                            wl[:, 42:48].mean(axis=1)))
    err = np.sqrt((hp - hl) * (hp - hl) + (wp - wl) * (wp - wl)) \
        / np.reshape(eye, (shape[0], 1))
    return np.sum(err) / shape[0] / shape[1] 
Example 56
Project: gardenia   Author: xuzhenqi   File: util.py    GNU General Public License v3.0 5 votes vote down vote up
def get_index_mean(pre):
    shape = pre.shape
    h = np.reshape(np.array(range(shape[2])), (1, 1, shape[2], 1))
    w = np.reshape(np.array(range(shape[3])), (1, 1, 1, shape[3]))
    shape = np.ndarray((shape[0], shape[1], 2))
    shape[:, :, 0] = np.sum(np.sum(pre * w, axis=3), axis=2)
    shape[:, :, 1] = np.sum(np.sum(pre * h, axis=3), axis=2)
    return shape 
Example 57
Project: gardenia   Author: xuzhenqi   File: util.py    GNU General Public License v3.0 5 votes vote down vote up
def softmax(feat):
    '''
    :param feat: a M*N matrix,
    :return: the softmax of the second axis.
    '''
    feat_max = feat.max(1)
    feat = feat - feat_max.reshape((feat_max.shape[0], 1))
    feat_exp = np.exp(feat)
    feat_sum = feat_exp.sum(1)
    prob = np.divide(feat_exp, feat_sum.reshape((feat_sum.shape[0], 1)))
    return prob 
Example 58
Project: gardenia   Author: xuzhenqi   File: util.py    GNU General Public License v3.0 5 votes vote down vote up
def normalize(feat):
    '''
    :param feat: a N*N matrix
    :return: normalize of the second axis.
    '''
    feat_sum = feat.sum(1)
    feat = feat / feat_sum.reshape((feat_sum.shape[0], 1))
    return feat 
Example 59
Project: gardenia   Author: xuzhenqi   File: util.py    GNU General Public License v3.0 5 votes vote down vote up
def show_predict(img, shape, label=None):
    '''
    :param img: H*W*3 array
    :param label: 68*2 point labels
    :return: None
    '''
    if img is not None:
        plt.imshow(img)
        plt.axis('image')
    plt.hold(True)
    plt.plot(shape[::2], shape[1::2], 'r.', markersize=12)
    if label is not None:
        plt.plot(label[::2], label[1::2], 'g.', markersize=12)
    plt.axis('off')
    plt.show() 
Example 60
Project: Stock_Market_Forecast   Author: cuevas1208   File: helper_Functions.py    MIT License 5 votes vote down vote up
def dataSetInfo(X_train, y_train, X_test, y_test, X_valid, y_valid):
    """dataSetInfo
    Outout general information about the deta set, and displays a graph with the data distibution
    Input: X_train, y_train, X_test, y_test, X_valid, y_valid - Images and Labels for each data set
    Returns: None                                                            """
    n_train = len(X_train)
    n_test = len(X_test)
    image_shape = str(format(X_train[0].shape))
    classes, counts = np.unique(y_train, return_counts=True)
    n_classes = len(classes)

    print("Image Shape: {}".format(X_train[0].shape))
    print("Number of training samples =", n_train)
    print("Number of validation samples =", len(X_valid), (len(X_valid) * 100) / n_train, "% of training data")
    print("Number of testing samples =", n_test)
    print("Image data shape =", image_shape)
    print("Number of classes =", n_classes)

    # Visualize data set distribution
    bins = n_classes
    plt.hist(y_test, bins=bins, histtype='stepfilled', color='b', alpha=.5, label='Test')
    plt.hist(y_train, bins=bins, histtype='stepfilled', color='r', alpha=.5, label='Training')
    plt.hist(y_valid, bins=bins, histtype='stepfilled', color='g', alpha=.5, label='Validation')

    plt.title("Labels Histogram")
    plt.xlabel("Lable")
    plt.ylabel("Quantity")
    plt.axis([0, 43, 0, counts[0] + 1000])

    plt.legend()
    plt.show() 
Example 61
Project: tripp   Author: mjamesruggiero   File: clustering.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def decolor(asset, target_asset="/tmp/new-image.png"):
    """De-color PNG assets to a 5-color analog"""
    img = matimage.imread(asset)

    top_row = img[0]
    top_left_pixel = top_row[0]

    logging.info("destructuring the top left pixel")
    red, green, blue = top_left_pixel

    logging.info("about to read pixels")
    pixels = [pixel for row in img for pixel in row]
    clusterer = KMeans(5)

    logging.info("training... might take a while...")
    clusterer.train(pixels)

    def recolor(pixel):
        """map index of closest cluster to its mean"""
        cluster = clusterer.classify(pixel)
        return clusterer.means[cluster]

    new_img = [[recolor(pixel) for pixel in row] for row in img]

    logging.info("displaying image")

    pyplot.imshow(new_img)
    pyplot.axis('off')
    pyplot.savefig(target_asset) 
Example 62
Project: Chinese-Character-and-Calligraphic-Image-Processing   Author: MingtaoGuo   File: test.py    MIT License 5 votes vote down vote up
def discriminator(self, inputs, inputs_condition):
        inputs = tf.concat([inputs, inputs_condition], axis=3)
        inputs = tf.random_crop(inputs, [1, 70, 70, 2])
        with tf.variable_scope("discriminator", reuse=tf.AUTO_REUSE):
            with tf.variable_scope("conv1"):
                inputs = leaky_relu(conv2d("conv1", inputs, 64, 5, 2))
            with tf.variable_scope("conv2"):
                inputs = leaky_relu(instanceNorm("in1", conv2d("conv2", inputs, 128, 5, 2)))
            with tf.variable_scope("conv3"):
                inputs = leaky_relu(instanceNorm("in2", conv2d("conv3", inputs, 256, 5, 2)))
            with tf.variable_scope("conv4"):
                inputs = leaky_relu(instanceNorm("in3", conv2d("conv4", inputs, 512, 5, 2)))
            with tf.variable_scope("outputs"):
                inputs = conv2d("conv5", inputs, 1, 5, 1)
            return inputs 
Example 63
Project: GMRbasedGP   Author: NoemieJaquier   File: gmr.py    MIT License 5 votes vote down vote up
def init_params_kbins(self, data, nb_samples):
		"""
		K_bins GMM initialisation
		:param data: np.array((nb_dim, nb_data))
		:param nb_samples: number of demonstrations
		:return: None
		"""

		# Delimit the cluster bins for first demonstration
		nb_data = int(data.shape[1]/nb_samples)

		self.priors = np.ones(self.nb_states)/self.nb_states
		self.mu = [np.zeros(self.nb_dim) for n in range(self.nb_states)]
		self.sigma = [np.zeros((self.nb_dim, self.nb_dim)) for n in range(self.nb_states)]

		t_sep = list(map(int, np.round(np.linspace(0, nb_data, self.nb_states + 1))))

		for i in range(self.nb_states):
			# Get bins indices for each
			inds = []
			for n in range(nb_samples):
				inds += range(n*nb_data + t_sep[i], n*nb_data + t_sep[i+1])
			data_tmp = data[:, inds]

			self.mu[i] = np.mean(data_tmp, axis=1)
			self.sigma[i] = np.cov(data_tmp) + np.eye(self.nb_dim) * self.reg 
Example 64
Project: wingstructure   Author: akafliegdarmstadt   File: wing.py    MIT License 5 votes vote down vote up
def plot(self):
        import matplotlib.pyplot as plt
        
        # draw centerline 
        plt.axvline(x=0, linestyle='-.')
        
        # draw sections
        x_positions = []
        y_positions = []
        chord_lengths = []
        
        for section in self.sections:
            x = section.pos.x+self.x
            y = section.pos.y
            chord = section.chord
            
            plt.plot((y, y), (-x, -x-chord), 'r')
            x_positions.append(x)
            y_positions.append(y)
            chord_lengths.append(chord)
        
        y_positions = np.array(y_positions)
        
        # draw leading edge
        plt.plot(y_positions, -1*np.array(x_positions), 'b' )
        # draw trailing edge
        plt.plot(y_positions, -1*np.array(x_positions)-np.array(chord_lengths), 'b')
        
        # format 
        plt.axis('equal')
        plt.axis('off')
        plt.xlim(-1, max(y_positions)+1) 
Example 65
Project: neurips19-graph-protein-design   Author: jingraham   File: protein_features.py    MIT License 5 votes vote down vote up
def _dist(self, X, mask, eps=1E-6):
        """ Pairwise euclidean distances """
        # Convolutional network on NCHW
        mask_2D = torch.unsqueeze(mask,1) * torch.unsqueeze(mask,2)
        dX = torch.unsqueeze(X,1) - torch.unsqueeze(X,2)
        D = mask_2D * torch.sqrt(torch.sum(dX**2, 3) + eps)

        # Identify k nearest neighbors (including self)
        D_max, _ = torch.max(D, -1, keepdim=True)
        D_adjust = D + (1. - mask_2D) * D_max
        D_neighbors, E_idx = torch.topk(D_adjust, self.top_k, dim=-1, largest=False)
        mask_neighbors = gather_edges(mask_2D.unsqueeze(-1), E_idx)

        # Debug plot KNN
        # print(E_idx[:10,:10])
        # D_simple = mask_2D * torch.zeros(D.size()).scatter(-1, E_idx, torch.ones_like(knn_D))
        # print(D_simple)
        # fig = plt.figure(figsize=(4,4))
        # ax = fig.add_subplot(111)
        # D_simple = D.data.numpy()[0,:,:]
        # plt.imshow(D_simple, aspect='equal')
        # plt.axis('off')
        # plt.tight_layout()
        # plt.savefig('D_knn.pdf')
        # exit(0)
        return D_neighbors, E_idx, mask_neighbors 
Example 66
Project: neurips19-graph-protein-design   Author: jingraham   File: protein_features.py    MIT License 5 votes vote down vote up
def _rbf(self, D):
        # Distance radial basis function
        D_min, D_max, D_count = 0., 20., self.num_rbf
        D_mu = torch.linspace(D_min, D_max, D_count)
        D_mu = D_mu.view([1,1,1,-1])
        D_sigma = (D_max - D_min) / D_count
        D_expand = torch.unsqueeze(D, -1)
        RBF = torch.exp(-((D_expand - D_mu) / D_sigma)**2)

        # for i in range(D_count):
        #     fig = plt.figure(figsize=(4,4))
        #     ax = fig.add_subplot(111)
        #     rbf_i = RBF.data.numpy()[0,i,:,:]
        #     # rbf_i = D.data.numpy()[0,0,:,:]
        #     plt.imshow(rbf_i, aspect='equal')
        #     plt.axis('off')
        #     plt.tight_layout()
        #     plt.savefig('rbf{}.pdf'.format(i))
        #     print(np.min(rbf_i), np.max(rbf_i), np.mean(rbf_i))
        # exit(0)
        return RBF 
Example 67
Project: yolov3-detector   Author: ccerhan   File: main.py    MIT License 4 votes vote down vote up
def main():
    weights_path = 'data/yolov3.weights'
    config_path = 'data/yolov3.cfg'
    labels_path = 'data/coco.names'

    # Download yolov3.weights if it does not exist
    if not os.path.exists(weights_path):
        download_weights(weights_path)

    # Load the class labels and randomly generated colors
    labels, colors = load_labels(labels_path)

    # Load the sample image as numpy array (RGB)
    image = plt.imread('samples/dog.jpg')

    # Create YOLO detector
    model = yolo.Detector(config_path=config_path,
                          weights_path=weights_path,
                          input_size=(544, 608),
                          conf_thresh=0.5,
                          nms_thresh=0.4)

    if torch.cuda.is_available():
        model.cuda()

    # Perform detection for a single image
    detections = model(image)

    # Draw detected class labels and relevant bounding boxes
    plt.figure()
    fig, ax = plt.subplots(1)
    ax.imshow(image)

    if len(detections) > 0:
        for i, (x1, y1, x2, y2, obj_conf, cls_conf, cls_pred) in enumerate(detections[0]):
            x = round(x1.item())
            y = round(y1.item())
            w = round(x2.item() - x1.item())
            h = round(y2.item() - y1.item())

            label = labels[int(cls_pred)]
            color = colors[int(cls_pred)]

            ax.add_patch(patches.Rectangle((x, y), w, h, linewidth=2, edgecolor=color, facecolor='none'))
            plt.text(x, y, s=label, color='white', verticalalignment='top', bbox={'color': color, 'pad': 0})

            print(i, ':', label, 'x:', x, 'y:', y, 'w:', w, 'h:', h)

    plt.axis('off')
    plt.gca().xaxis.set_major_locator(ticker.NullLocator())
    plt.gca().yaxis.set_major_locator(ticker.NullLocator())

    plt.show()
    # plt.savefig('samples/dogs_.png', bbox_inches='tight', pad_inches=0.0)

    plt.close() 
Example 68
Project: FRIDA   Author: LCAV   File: generators.py    MIT License 4 votes vote down vote up
def gen_far_field_ir(doa, R, fs):
    """
    This function generates the impulse responses for all microphones for
    K sources in the far field.

    :param doa: (nd-array) The sources direction of arrivals. This should
                be a (D-1)xK array where D is the dimension (2 or 3) and K
                is the number of sources
    :param R: the locations of the microphones
    :param fs: sampling frequency

    :return ir: (ndarray) A KxMxL array containing all the fractional delay
                filters between each source (axis 0) and microphone (axis 1)
                L is the length of the filter
    """

    # make sure these guys are nd-arrays
    doa = np.array(doa)

    if doa.ndim == 0:
        doa = np.array([[doa]])

    elif doa.ndim == 1:
        doa = np.array([doa])

    # the number of microphones
    M = R.shape[1]
    dim = R.shape[0]

    # the number of sources
    K = doa.shape[1]

    # convert the spherical coordinates to unit propagation vectors
    p_vec = -unit_vec(doa)

    # the delays are the inner product between unit vectors and mic locations
    # set zero delay at earliest microphone
    delays = np.dot(p_vec.T, R) / pra.constants.get('c')
    delays -= delays.min()

    # figure out the maximal length of the impulse responses
    L = pra.constants.get('frac_delay_length')
    t_max = delays.max()
    D = int(L + np.ceil(np.abs(t_max * fs)))

    # the impulse response filter bank
    fb = np.zeros((K, M, D))

    # create all the impulse responses
    for k in xrange(K):
        for m in xrange(M):
            t = delays[k, m]
            delay_s = t * fs
            delay_i = int(np.round(delay_s))
            delay_f = delay_s - delay_i
            fb[k, m, delay_i:delay_i + (L - 1) + 1] += pra.fractional_delay(delay_f)

    return fb 
Example 69
Project: neural-fingerprinting   Author: StephanZheng   File: utils.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def get_logits_over_interval(sess, model, x_data, fgsm_params,
                             min_epsilon=-10., max_epsilon=10.,
                             num_points=21):
    """Get logits when the input is perturbed in an interval in adv direction.

    Args:
        sess: Tf session
        model: Model for which we wish to get logits.
        x_data: Numpy array corresponding to single data.
                point of shape [height, width, channels].
        fgsm_params: Parameters for generating adversarial examples.
        min_epsilon: Minimum value of epsilon over the interval.
        max_epsilon: Maximum value of epsilon over the interval.
        num_points: Number of points used to interpolate.

    Returns:
        Numpy array containing logits.

    Raises:
        ValueError if min_epsilon is larger than max_epsilon.
    """
    # Get the height, width and number of channels
    height = x_data.shape[0]
    width = x_data.shape[1]
    channels = x_data.shape[2]
    size = height * width * channels

    x_data = np.expand_dims(x_data, axis=0)
    import tensorflow as tf
    from cleverhans.attacks import FastGradientMethod

    # Define the data placeholder
    x = tf.placeholder(dtype=tf.float32,
                       shape=[1, height,
                              width,
                              channels],
                       name='x')
    # Define adv_x
    fgsm = FastGradientMethod(model, sess=sess)
    adv_x = fgsm.generate(x, **fgsm_params)

    if min_epsilon > max_epsilon:
        raise ValueError('Minimum epsilon is less than maximum epsilon')

    eta = tf.nn.l2_normalize(adv_x - x, dim=0)
    epsilon = tf.reshape(tf.lin_space(float(min_epsilon),
                                      float(max_epsilon),
                                      num_points),
                         (num_points, 1, 1, 1))
    lin_batch = x + epsilon * eta
    logits = model.get_logits(lin_batch)
    with sess.as_default():
        log_prob_adv_array = sess.run(logits,
                                      feed_dict={x: x_data})
    return log_prob_adv_array 
Example 70
Project: neural-fingerprinting   Author: StephanZheng   File: utils.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def linear_extrapolation_plot(log_prob_adv_array, y, file_name,
                              min_epsilon=-10, max_epsilon=10,
                              num_points=21):
    """Generate linear extrapolation plot.

    Args:
        log_prob_adv_array: Numpy array containing log probabilities
        y: Tf placeholder for the labels
        file_name: Plot filename
        min_epsilon: Minimum value of epsilon over the interval
        max_epsilon: Maximum value of epsilon over the interval
        num_points: Number of points used to interpolate
    """
    import matplotlib
    matplotlib.use('Agg')
    import matplotlib.pyplot as plt

    figure = plt.figure()
    figure.canvas.set_window_title('Cleverhans: Linear Extrapolation Plot')

    correct_idx = np.argmax(y, axis=0)
    fig = plt.figure()
    plt.xlabel('Epsilon')
    plt.ylabel('Logits')
    x_axis = np.linspace(min_epsilon, max_epsilon, num_points)
    plt.xlim(min_epsilon - 1, max_epsilon + 1)
    for i in xrange(y.shape[0]):
        if i == correct_idx:
            ls = '-'
            linewidth = 5
        else:
            ls = '--'
            linewidth = 2
        plt.plot(
            x_axis,
            log_prob_adv_array[:, i],
            ls=ls,
            linewidth=linewidth,
            label='{}'.format(i))
    plt.legend(loc='best', fontsize=14)
    plt.show()
    fig.savefig(file_name)
    plt.clf()
    return figure 
Example 71
Project: helloworld   Author: pip-uninstaller-python   File: matplotlibTest.py    GNU General Public License v2.0 4 votes vote down vote up
def scatter():
    fig = plt.figure()  # 建立一个表格
    fig.add_subplot(3, 3, 1)
    n = 128
    X = np.random.normal(0, 1, n)
    Y = np.random.normal(0, 1, n)
    T = np.arctan2(Y, X)  # 上色
    plt.axes([0.025, 0.025, 0.95, 0.95])  # 显示范围
    plt.scatter(X, Y, s=75, c=T, alpha=.5)  # s点的大小, c颜色, alpha透明度
    plt.xlim(-1.5, 1.5), plt.xticks([])  # x范围
    plt.ylim(-1.5, 1.5), plt.yticks([])  # x范围
    plt.axis()
    plt.title("scatter")
    plt.xlabel("x")
    plt.ylabel("y")
    # plt.show()

    # other
    fig = plt.figure()  # 建立一个表格
    ax = fig.add_subplot(3, 3, 1)
    n = 128
    X = np.random.normal(0, 1, n)
    Y = np.random.normal(0, 1, n)
    T = np.arctan2(Y, X)  # 上色
    # plt.axes([0.025, 0.025, 0.95, 0.95])  # 显示范围
    ax.scatter(X, Y, s=75, c=T, alpha=.5)  # s点的大小, c颜色, alpha透明度
    plt.xlim(-1.5, 1.5), plt.xticks([])  # x范围
    plt.ylim(-1.5, 1.5), plt.yticks([])  # x范围
    plt.axis()
    plt.title("scatter")
    plt.xlabel("x")
    plt.ylabel("y")
    # plt.show()

    # 散点图
    # fig = plt.figure()  # 建立一个表格
    fig.add_subplot(332)  # n>10不可用这个数值
    n = 10  # 10个点
    X = np.arange(n)  # 构建一个数列 0-9
    # 营造出来一种有变化的效果
    Y1 = (1 - X / float(n)) * np.random.uniform(0.5, 1.0, n)  # *随机数, 随机数范围在0.5-1.0之间
    Y2 = (1 - X / float(n)) * np.random.uniform(0.5, 1.0, n)
    # 然后画出来
    plt.bar(X, +Y1, facecolor='#9999ff', edgecolor='white')
    plt.bar(X, -Y2, facecolor='#ff9999', edgecolor='white')
    for x, y in zip(X, Y1):  # 添加注释 位置, 格式, ha水平位置, va垂直位置
        plt.text(x + 0.4, y + 0.05, '%.2f' % y, ha='center', va='bottom')
    for x, y in zip(X, Y1):
        plt.text(x + 0.4, -y - 0.05, '%.2f' % y, ha='center', va='top')
    plt.show()


# 柱状图 
Example 72
Project: cvpr2018-hnd   Author: kibok90   File: test.py    MIT License 4 votes vote down vote up
def print_results(work_name, results, save_path, hierarchical_measure=False, start_time=time.time()):

    if hierarchical_measure:
        mtypes = ['acc', 'HF']
    else:
        mtypes = ['acc']
    
    print(save_path)
    print('{work_name}; '.format(work_name=work_name), end='')
    print('{time:8.3f} s'.format(time=time.time()-start_time))
    for m, mtype in enumerate(mtypes):
        print('bias: {res:7.4f}; '.format(res=results['acc']['g_bias']), end='')
        print('{mtype:4s}'.format(mtype=mtype), end='')
        print('known: {res:5.2f}; '.format(res=results[mtype]['g_known']*100.), end='')
        print('novel: {res:5.2f}; '.format(res=results[mtype]['g_novel']*100.), end='')
        if mtype == 'acc':
            print('auc  : {res:5.2f}; '.format(res=results[mtype]['auc']*100.))
        else:
            print('hmean: {res:5.2f}; '.format(res=results[mtype]['g_harmonic']*100.))
    
        # plot known vs. novel
        plt.figure(m)
        plt.plot(results[mtype]['known'], results[mtype]['novel'], 'k.-')
        if mtype == 'HE':
            plt.xticks(np.arange(0., 11., 1.))
            plt.yticks(np.arange(0., 11., 1.))
            plt.axis([0., 10., 0., 10.])
        else:
            plt.xticks(np.arange(0., 1.1, .1))
            plt.yticks(np.arange(0., 1.1, .1))
            plt.axis([0., 1., 0., 1.])
        plt.grid()
        plt.xlabel('known class accuracy')
        plt.ylabel('novel class accuracy')
        plt.title('known: {res:5.2f}; '.format(res=results[mtype]['g_known']*100.) + \
                  'novel: {res:5.2f}; '.format(res=results[mtype]['g_novel']*100.) + \
                  'hmean: {res:5.2f}; '.format(res=results[mtype]['g_harmonic']*100.) + \
                  'auc  : {res:5.2f}; '.format(res=results[mtype]['auc']*100.)
                 )
        plt.savefig(save_path + '_' + work_name + '_' + mtype + '.png')
        plt.clf()
        plt.close() 
Example 73
Project: nxt-sketcher   Author: simondolle   File: printer.py    MIT License 4 votes vote down vote up
def display_reachable_area(points_per_lego_unit, angle, structure_settings, plot_errors, plot_actual_points):
    reachable_xs = []
    reachable_ys = []
    grid_to_angle = compute_grid_to_angle_inverse_kinematics(structure_settings, points_per_lego_unit, angle)
    for (x, y), (alpha, beta, _) in grid_to_angle.items():
        if plot_actual_points:
            x, y = get_xy(1./structure_settings.gear_ratio * alpha * degrees_to_radians, 1./structure_settings.gear_ratio * beta * degrees_to_radians, structure_settings)
        x, y = change_referential(x, y, angle)
        reachable_xs.append(x)
        reachable_ys.append(y)

    print_area = find_largest_rectange_quadratic(grid_to_angle, points_per_lego_unit)
    x0, y0, x1, y1 = print_area

    width = x1 - x0
    height = y1 - y0

    xt0, yt0 = change_referential(x0, y0, angle)
    xt1, yt1 = change_referential(x0, y1, angle)
    xt2, yt2 = change_referential(x1, y1, angle)
    xt3, yt3 = change_referential(x1, y0, angle)

    margin = 1

    min_xs = min(reachable_xs) - margin
    max_xs = max(reachable_xs) + margin

    min_ys = min(reachable_ys) - margin
    max_ys = max(reachable_ys) + margin

    if plot_errors:
        xi = np.linspace(min_xs, max_xs, 100)
        yi = np.linspace(min_ys, max_ys, 100)
        X, Y = np.meshgrid(xi, yi)
        errors = np.vectorize(compute_error)(X, Y)

        CS = plt.contourf(X, Y, errors, 15, cmap=plt.cm.rainbow, vmax=abs(errors).max(), vmin=0)
        plt.colorbar(CS)

    plt.scatter(reachable_xs, reachable_ys, marker='o', c='b', s=5)
    plt.plot([xt0, xt1, xt2, xt3, xt0], [yt0, yt1, yt2, yt3, yt0])


    plt.suptitle('width=%s height=%s'%(width, height), fontsize=14)
    plt.axis('equal')
    plt.show() 
Example 74
Project: euclid   Author: njpayne   File: clustering.py    GNU General Public License v2.0 4 votes vote down vote up
def univariate_selection(features, labels, **kwargs):

    #extract parameters
    is_regression = kwargs.get('is_regression', False)
    n_best = kwargs.get('n_best', 2)

    #select scoring function
    #For regression: f_regression
    #For classification: chi2 or f_classif
    if(is_regression):
        scoring_function = feature_selection.f_regression
    else:
        #chi2 requires non negative features
        if(features.min() < 0):
            scoring_function = feature_selection.f_classif
        else:
            scoring_function = feature_selection.chi2

    #establish the selection function
    selector = feature_selection.SelectKBest(scoring_function, k=n_best)

    #train the function
    selector.fit(features, labels.flatten())

    #get the scores
    feature_scores = selector.scores_

    #transform the data
    tranformed_data = selector.transform(features)
    
    #chart the results
    scores = -np.log10(selector.pvalues_)
    scores /= scores.max()

    X_indices = np.arange(features.shape[-1])

    plt.figure(1)
    plt.clf()
    plt.bar(X_indices - .45, scores, width=.2,
            label=r'Univariate score ($-Log(p_{value})$)', color='g')

    plt.title("Comparing feature selection")
    plt.xlabel('Feature number')
    plt.yticks(())
    plt.axis('tight')
    plt.legend(loc='upper right')

    pylab.savefig(os.path.join(results_location, "Univariate Selection %d Features" % n_best))

    return tranformed_data, feature_scores 
Example 75
Project: ddpg-curiosity-and-multi-criteria-her   Author: CDMCH   File: dynamics_loss_mapping.py    MIT License 4 votes vote down vote up
def generate_dynamics_loss_map_from_npy_records(self, file_prefix, delete_records=False):

        file_names = [file_name for file_name in os.listdir(self.working_dir)
                      if file_name.endswith(".npy") and file_name.startswith(file_prefix)]

        losses_locations_records = np.concatenate([np.load(os.path.join(self.working_dir, file_name)) for file_name in file_names],
                                          axis=0)

        max_heatmap_samples = 10000

        losses_locations_records = losses_locations_records[np.random.choice(len(losses_locations_records),
                                                             min(max_heatmap_samples, len(losses_locations_records)),
                                                             replace=False)]

        # print("\n\n\nlosses_locations_records:\n{}\n".format(losses_locations_records))
        # exit(0)

        # Add a minuscule amount of location variation in case agent doesn't move on a certain axis.

        # print("z min: {} max: {}".format(min(z), max(z)))

        losses_locations_records[:, 1:] = losses_locations_records[:, 1:] * 100 + (np.random.randn(*losses_locations_records[:, 1:].shape) / 1000)

        losses_locations_records = losses_locations_records.swapaxes(0, 1)

        z = losses_locations_records[0, :]

        idx = z.argsort()
        x, y, z = losses_locations_records[1, idx], losses_locations_records[2, idx], z[idx]

        plt.figure(figsize=(3, 3))
        plt.scatter(x, y, c=z, s=80, edgecolors='', cmap=plt.cm.jet, alpha=0.5)
        plt.colorbar()
        plt.xlim(0, 100)
        plt.ylim(0, 100)

        im = plt.imread(os.path.join(self.working_dir, 'level.png'))
        plt.imshow(im, extent=[0, 100, 0, 100], aspect='auto')
        plt.axis('equal')
        plt.axis('off')
        plt.margins(0, 0)
        plt.gca().xaxis.set_major_locator(plt.NullLocator())
        plt.gca().yaxis.set_major_locator(plt.NullLocator())
        plt.title("Dynamics Loss " + file_prefix)

        heatmap_image_path = os.path.join(self.working_dir, "{}_dynamics_loss.png".format(file_prefix))
        plt.savefig(heatmap_image_path, transparent=False, bbox_inches='tight', pad_inches=0)

        plt.close()

        if delete_records:
            for file_name in file_names:
                try:
                    os.remove(os.path.join(self.working_dir, file_name))
                except OSError:
                    pass

        return heatmap_image_path 
Example 76
Project: bbho   Author: DarkElement75   File: output_grapher.py    MIT License 4 votes vote down vote up
def graph_output(plot_2d_results, plot_3d_results, bbf_evaluation_i, bbf_evaluation_n, domain_x, domain_y, detail_n, test_means, bbf_inputs, bbf_evaluations, val1, val2):

  #Set the filename
  fname = "results/%02d" % bbf_evaluation_i

  #Plot our updates
  if plot_2d_results:
      plt.plot(domain_x, test_means)
      #plt.plot(domain_x, test_variances, 'r')
      #plt.plot(bbf_inputs, bbf_evaluations, 'bo')
      plt.scatter(bbf_inputs, bbf_evaluations, marker='o', c='b', s=100.0, label="Function Evaluations")
      plt.plot(domain_x, val1, 'r')
      plt.plot(domain_x, val2, 'r')
      #plt.plot(domain_x, bbf(domain_x), 'y')
      plt.savefig("%s.jpg" % fname, dpi=None, facecolor='w', edgecolor='w',
          orientation='portrait', papertype=None, format=None,
          transparent=False, bbox_inches='tight', pad_inches=0.1,
          frameon=None)
      plt.xlabel("X-Axis")
      plt.ylabel("Y-Axis")

      plt.legend(bbox_to_anchor=(1, 1), loc=1, borderaxespad=0.)
      plt.axis([0, 10, 0, 2])
      #plt.show()
      plt.gcf().clear()

  elif plot_3d_results:
      #So we only render on the last one(just erase this if you want all of them)
      if bbf_evaluation_i == bbf_evaluation_n-1:
          fig = plt.figure()
          ax = fig.add_subplot(111, projection='3d')
          #X & Y have to be matrices of all vertices
          #Z has to be matrix of outputs
          #Convert our vectors to compatible matrix counterparts
          Y = np.array([[i] for i in domain_y])

          X = np.tile(domain_x, (detail_n, 1))
          Y = np.tile(Y, (1, detail_n))

          #This ones easy, just reshape
          Z1 = test_means.reshape(detail_n, detail_n)
          #Z2 = test_variances.reshape(detail_n, detail_n)
          Z3 = (val1).reshape(detail_n, detail_n)
          Z4 = (val2).reshape(detail_n, detail_n)


          ax.plot_surface(X, Y, Z1, rstride=1, cstride=1, cmap=cm.coolwarm)
          #ax.plot_wireframe(X, Y, Z2, rstride=1, cstride=1)
          ax.plot_wireframe(X, Y, Z3, rstride=1, cstride=1)
          ax.plot_wireframe(X, Y, Z4, rstride=1, cstride=1)
          plt.savefig("%s.jpg" % fname, dpi=None, facecolor='w', edgecolor='w',
              orientation='portrait', papertype=None, format=None,
              transparent=False, bbox_inches='tight', pad_inches=0.1,
              frameon=None)

          plt.gcf().clear()
          #plt.show() 
Example 77
Project: GMRbasedGP   Author: NoemieJaquier   File: gmr.py    MIT License 4 votes vote down vote up
def gmm_em(self, data, maxiter=100, minstepsize=1e-5):
		"""
		GMM computation with EM algorithm
		:param data: np.array((nb_dim, nb_data))
		:param maxiter: max number of iterations for EM
		:param minstepsize: maximum increase of log likelihood
		:return: likelihood vector
		"""

		nb_min_steps = 5  # min num iterations
		nb_max_steps = maxiter  # max iterations
		max_diff_ll = minstepsize  # max log-likelihood increase

		nb_data = data.shape[1]

		LL = np.zeros(nb_max_steps)
		for it in range(nb_max_steps):

			# E - step
			L = np.zeros((self.nb_states, nb_data))
			L_log = np.zeros((self.nb_states, nb_data))
			xts = [np.zeros((self.nb_dim, nb_data))] * self.nb_states

			for i in range(self.nb_states):
				L_log[i, :] = np.log(self.priors[i]) + multi_variate_normal(data.T, self.mu[i], self.sigma[i], log=True)

			L = np.exp(L_log)
			GAMMA = L / np.sum(L, axis=0)
			# GAMMA = L / (np.sum(L, axis=0) + 1e-300)
			GAMMA2 = GAMMA / (np.sum(GAMMA, axis=1)[:, np.newaxis])

			# M-step
			for i in range(self.nb_states):
				# Update Mu
				self.mu[i] = np.sum(data*GAMMA2[i], axis=1)

				# Update Sigma
				xtmp = data - self.mu[i][:, None]
				self.sigma[i] = np.dot(xtmp, np.dot(np.diag(GAMMA2[i]), xtmp.T)) + np.eye(self.nb_dim)*self.reg

			# Update priors
			self.priors = np.mean(GAMMA, axis=1)

			LL[it] = np.mean(np.log(np.sum(L, axis=0) + 1e-300))

			# Check for convergence
			if it > nb_min_steps:
				if LL[it] - LL[it - 1] < max_diff_ll:
					print('Converged after %d iterations: %.3e' % (it, LL[it]), 'red', 'on_white')
					return LL[it], GAMMA

		print("GMM did not converge before reaching max iteration. Consider augmenting the number of max iterations.")
		return LL[it], GAMMA 
Example 78
Project: neurips19-graph-protein-design   Author: jingraham   File: protein_features.py    MIT License 4 votes vote down vote up
def _quaternions(self, R):
        """ Convert a batch of 3D rotations [R] to quaternions [Q]
            R [...,3,3]
            Q [...,4]
        """
        # Simple Wikipedia version
        # en.wikipedia.org/wiki/Rotation_matrix#Quaternion
        # For other options see math.stackexchange.com/questions/2074316/calculating-rotation-axis-from-rotation-matrix
        diag = torch.diagonal(R, dim1=-2, dim2=-1)
        Rxx, Ryy, Rzz = diag.unbind(-1)
        magnitudes = 0.5 * torch.sqrt(torch.abs(1 + torch.stack([
              Rxx - Ryy - Rzz, 
            - Rxx + Ryy - Rzz, 
            - Rxx - Ryy + Rzz
        ], -1)))
        _R = lambda i,j: R[:,:,:,i,j]
        signs = torch.sign(torch.stack([
            _R(2,1) - _R(1,2),
            _R(0,2) - _R(2,0),
            _R(1,0) - _R(0,1)
        ], -1))
        xyz = signs * magnitudes
        # The relu enforces a non-negative trace
        w = torch.sqrt(F.relu(1 + diag.sum(-1, keepdim=True))) / 2.
        Q = torch.cat((xyz, w), -1)
        Q = F.normalize(Q, dim=-1)

        # Axis of rotation
        # Replace bad rotation matrices with identity
        # I = torch.eye(3).view((1,1,1,3,3))
        # I = I.expand(*(list(R.shape[:3]) + [-1,-1]))
        # det = (
        #     R[:,:,:,0,0] * (R[:,:,:,1,1] * R[:,:,:,2,2] - R[:,:,:,1,2] * R[:,:,:,2,1])
        #     - R[:,:,:,0,1] * (R[:,:,:,1,0] * R[:,:,:,2,2] - R[:,:,:,1,2] * R[:,:,:,2,0])
        #     + R[:,:,:,0,2] * (R[:,:,:,1,0] * R[:,:,:,2,1] - R[:,:,:,1,1] * R[:,:,:,2,0])
        # )
        # det_mask = torch.abs(det.unsqueeze(-1).unsqueeze(-1))
        # R = det_mask * R + (1 - det_mask) * I

        # DEBUG
        # https://math.stackexchange.com/questions/2074316/calculating-rotation-axis-from-rotation-matrix
        # Columns of this are in rotation plane
        # A = R - I
        # v1, v2 = A[:,:,:,:,0], A[:,:,:,:,1]
        # axis = F.normalize(torch.cross(v1, v2), dim=-1)
        return Q 
Example 79
Project: neurips19-graph-protein-design   Author: jingraham   File: protein_features.py    MIT License 4 votes vote down vote up
def _hbonds(self, X, E_idx, mask_neighbors, eps=1E-3):
        """ Hydrogen bonds and contact map
        """
        X_atoms = dict(zip(['N', 'CA', 'C', 'O'], torch.unbind(X, 2)))

        # Virtual hydrogens
        X_atoms['C_prev'] = F.pad(X_atoms['C'][:,1:,:], (0,0,0,1), 'constant', 0)
        X_atoms['H'] = X_atoms['N'] + F.normalize(
             F.normalize(X_atoms['N'] - X_atoms['C_prev'], -1)
          +  F.normalize(X_atoms['N'] - X_atoms['CA'], -1)
        , -1)

        def _distance(X_a, X_b):
            return torch.norm(X_a[:,None,:,:] - X_b[:,:,None,:], dim=-1)

        def _inv_distance(X_a, X_b):
            return 1. / (_distance(X_a, X_b) + eps)

        # DSSP vacuum electrostatics model
        U = (0.084 * 332) * (
              _inv_distance(X_atoms['O'], X_atoms['N'])
            + _inv_distance(X_atoms['C'], X_atoms['H'])
            - _inv_distance(X_atoms['O'], X_atoms['H'])
            - _inv_distance(X_atoms['C'], X_atoms['N'])
        )

        HB = (U < -0.5).type(torch.float32)
        neighbor_HB = mask_neighbors * gather_edges(HB.unsqueeze(-1),  E_idx)
        # print(HB)
        # HB = F.sigmoid(U)
        # U_np = U.cpu().data.numpy()
        # # plt.matshow(np.mean(U_np < -0.5, axis=0))
        # plt.matshow(HB[0,:,:])
        # plt.colorbar()
        # plt.show()
        # D_CA = _distance(X_atoms['CA'], X_atoms['CA'])
        # D_CA = D_CA.cpu().data.numpy()
        # plt.matshow(D_CA[0,:,:] < contact_D)
        # # plt.colorbar()
        # plt.show()
        # exit(0)
        return neighbor_HB 
Example 80
Project: neurips19-graph-protein-design   Author: jingraham   File: protein_features.py    MIT License 4 votes vote down vote up
def _dihedrals(self, X, eps=1e-7):
        # First 3 coordinates are N, CA, C
        X = X[:,:,:3,:].reshape(X.shape[0], 3*X.shape[1], 3)

        # Shifted slices of unit vectors
        dX = X[:,1:,:] - X[:,:-1,:]
        U = F.normalize(dX, dim=-1)
        u_2 = U[:,:-2,:]
        u_1 = U[:,1:-1,:]
        u_0 = U[:,2:,:]
        # Backbone normals
        n_2 = F.normalize(torch.cross(u_2, u_1), dim=-1)
        n_1 = F.normalize(torch.cross(u_1, u_0), dim=-1)

        # Angle between normals
        cosD = (n_2 * n_1).sum(-1)
        cosD = torch.clamp(cosD, -1+eps, 1-eps)
        D = torch.sign((u_2 * n_1).sum(-1)) * torch.acos(cosD)

        # This scheme will remove phi[0], psi[-1], omega[-1]
        D = F.pad(D, (1,2), 'constant', 0)
        D = D.view((D.size(0), int(D.size(1)/3), 3))
        phi, psi, omega = torch.unbind(D,-1)

        # print(cosD.cpu().data.numpy().flatten())
        # print(omega.sum().cpu().data.numpy().flatten())

        # Bond angle calculation
        # A = torch.acos(-(u_1 * u_0).sum(-1))

        # DEBUG: Ramachandran plot
        # x = phi.cpu().data.numpy().flatten()
        # y = psi.cpu().data.numpy().flatten()
        # plt.scatter(x * 180 / np.pi, y * 180 / np.pi, s=1, marker='.')
        # plt.xlabel('phi')
        # plt.ylabel('psi')
        # plt.axis('square')
        # plt.grid()
        # plt.axis([-180,180,-180,180])
        # plt.show()

        # Lift angle representations to the circle
        D_features = torch.cat((torch.cos(D), torch.sin(D)), 2)
        return D_features