Python matplotlib.pyplot.show() Examples

The following are 30 code examples of matplotlib.pyplot.show(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module matplotlib.pyplot , or try the search function .
Example #1
Source File: data_augmentation.py    From Sound-Recognition-Tutorial with Apache License 2.0 10 votes vote down vote up
def demo_plot():
    audio = './data/esc10/audio/Dog/1-30226-A.ogg'
    y, sr = librosa.load(audio, sr=44100)
    y_ps = librosa.effects.pitch_shift(y, sr, n_steps=6)   # n_steps控制音调变化尺度
    y_ts = librosa.effects.time_stretch(y, rate=1.2)   # rate控制时间维度的变换尺度
    plt.subplot(311)
    plt.plot(y)
    plt.title('Original waveform')
    plt.axis([0, 200000, -0.4, 0.4])
    # plt.axis([88000, 94000, -0.4, 0.4])
    plt.subplot(312)
    plt.plot(y_ts)
    plt.title('Time Stretch transformed waveform')
    plt.axis([0, 200000, -0.4, 0.4])
    plt.subplot(313)
    plt.plot(y_ps)
    plt.title('Pitch Shift transformed waveform')
    plt.axis([0, 200000, -0.4, 0.4])
    # plt.axis([88000, 94000, -0.4, 0.4])
    plt.tight_layout()
    plt.show() 
Example #2
Source File: util.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 8 votes vote down vote up
def compute_roc(y_true, y_pred, plot=False):
    """
    TODO
    :param y_true: ground truth
    :param y_pred: predictions
    :param plot:
    :return:
    """
    fpr, tpr, _ = roc_curve(y_true, y_pred)
    auc_score = auc(fpr, tpr)
    if plot:
        plt.figure(figsize=(7, 6))
        plt.plot(fpr, tpr, color='blue',
                 label='ROC (AUC = %0.4f)' % auc_score)
        plt.legend(loc='lower right')
        plt.title("ROC Curve")
        plt.xlabel("FPR")
        plt.ylabel("TPR")
        plt.show()

    return fpr, tpr, auc_score 
Example #3
Source File: dataset.py    From neural-combinatorial-optimization-rl-tensorflow with MIT License 8 votes vote down vote up
def visualize_2D_trip(self, trip):
        plt.figure(figsize=(30,30))
        rcParams.update({'font.size': 22})

        # Plot cities
        plt.scatter(trip[:,0], trip[:,1], s=200)

        # Plot tour
        tour=np.array(list(range(len(trip))) + [0])
        X = trip[tour, 0]
        Y = trip[tour, 1]
        plt.plot(X, Y,"--", markersize=100)

        # Annotate cities with order
        labels = range(len(trip))
        for i, (x, y) in zip(labels,(zip(X,Y))):
            plt.annotate(i,xy=(x, y))  

        plt.xlim(0,100)
        plt.ylim(0,100)
        plt.show()


    # Heatmap of permutations (x=cities; y=steps) 
Example #4
Source File: dataset.py    From neural-combinatorial-optimization-rl-tensorflow with MIT License 7 votes vote down vote up
def visualize_2D_trip(self,trip,tw_open,tw_close):
        plt.figure(figsize=(30,30))
        rcParams.update({'font.size': 22})
        # Plot cities
        colors = ['red'] # Depot is first city
        for i in range(len(tw_open)-1):
            colors.append('blue')
        plt.scatter(trip[:,0], trip[:,1], color=colors, s=200)
        # Plot tour
        tour=np.array(list(range(len(trip))) + [0])
        X = trip[tour, 0]
        Y = trip[tour, 1]
        plt.plot(X, Y,"--", markersize=100)
        # Annotate cities with TW
        tw_open = np.rint(tw_open)
        tw_close = np.rint(tw_close)
        time_window = np.concatenate((tw_open,tw_close),axis=1)
        for tw, (x, y) in zip(time_window,(zip(X,Y))):
            plt.annotate(tw,xy=(x, y))  
        plt.xlim(0,60)
        plt.ylim(0,60)
        plt.show()


    # Heatmap of permutations (x=cities; y=steps) 
Example #5
Source File: plotFigures.py    From fullrmc with GNU Affero General Public License v3.0 7 votes vote down vote up
def plot(PDF, figName, imgpath, show=False, save=True):
    # plot
    output = PDF.get_constraint_value()
    plt.plot(PDF.experimentalDistances,PDF.experimentalPDF, 'ro', label="experimental", markersize=7.5, markevery=1 )
    plt.plot(PDF.shellsCenter, output["pdf"], 'k', linewidth=3.0,  markevery=25, label="total" )

    styleIndex = 0
    for key in output:
        val = output[key]
        if key in ("pdf_total", "pdf"):
            continue
        elif "inter" in key:
            plt.plot(PDF.shellsCenter, val, STYLE[styleIndex], markevery=5, label=key.split('rdf_inter_')[1] )
            styleIndex+=1
    plt.legend(frameon=False, ncol=1)
    # set labels
    plt.title("$\\chi^{2}=%.6f$"%PDF.squaredDeviations, size=20)
    plt.xlabel("$r (\AA)$", size=20)
    plt.ylabel("$g(r)$", size=20)
    # show plot
    if save: plt.savefig(figName)
    if show: plt.show()
    plt.close() 
Example #6
Source File: test.py    From MomentumContrast.pytorch with MIT License 6 votes vote down vote up
def show(mnist, targets, ret):
    target_ids = range(len(set(targets)))
    
    colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k', 'violet', 'orange', 'purple']
    
    plt.figure(figsize=(12, 10))
    
    ax = plt.subplot(aspect='equal')
    for label in set(targets):
        idx = np.where(np.array(targets) == label)[0]
        plt.scatter(ret[idx, 0], ret[idx, 1], c=colors[label], label=label)
    
    for i in range(0, len(targets), 250):
        img = (mnist[i][0] * 0.3081 + 0.1307).numpy()[0]
        img = OffsetImage(img, cmap=plt.cm.gray_r, zoom=0.5) 
        ax.add_artist(AnnotationBbox(img, ret[i]))
    
    plt.legend()
    plt.show() 
Example #7
Source File: util.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def compute_roc_rfeinman(probs_neg, probs_pos, plot=False):
    """
    TODO
    :param probs_neg:
    :param probs_pos:
    :param plot:
    :return:
    """
    probs = np.concatenate((probs_neg, probs_pos))
    labels = np.concatenate((np.zeros_like(probs_neg), np.ones_like(probs_pos)))
    fpr, tpr, _ = roc_curve(labels, probs)
    auc_score = auc(fpr, tpr)
    if plot:
        plt.figure(figsize=(7, 6))
        plt.plot(fpr, tpr, color='blue',
                 label='ROC (AUC = %0.4f)' % auc_score)
        plt.legend(loc='lower right')
        plt.title("ROC Curve")
        plt.xlabel("FPR")
        plt.ylabel("TPR")
        plt.show()

    return fpr, tpr, auc_score 
Example #8
Source File: helper.py    From Stock-Price-Prediction with MIT License 6 votes vote down vote up
def plot_mul(Y_hat, Y, pred_len):
    """
    PLots the predicted data versus true data

    Input: Predicted data, True Data, Length of prediction
    Output: return plot

    Note: Run from timeSeriesPredict.py
    """
    fig = plt.figure(facecolor='white')
    ax = fig.add_subplot(111)
    ax.plot(Y, label='Y')
    # Print the predictions in its respective series-length
    for i, j in enumerate(Y_hat):
        shift = [None for p in range(i * pred_len)]
        plt.plot(shift + j, label='Y_hat')
        plt.legend()
    plt.show() 
Example #9
Source File: utils.py    From deep-learning-note with MIT License 6 votes vote down vote up
def show(image):
    """
    Render a given numpy.uint8 2D array of pixel data.
    """
    plt.imshow(image, cmap='gray')
    plt.show() 
Example #10
Source File: dataset.py    From neural-combinatorial-optimization-rl-tensorflow with MIT License 6 votes vote down vote up
def visualize_sampling(self, permutations):
        max_length = len(permutations[0])
        grid = np.zeros([max_length,max_length]) # initialize heatmap grid to 0

        transposed_permutations = np.transpose(permutations)
        for t, cities_t in enumerate(transposed_permutations): # step t, cities chosen at step t
            city_indices, counts = np.unique(cities_t,return_counts=True,axis=0)
            for u,v in zip(city_indices, counts):
                grid[t][u]+=v # update grid with counts from the batch of permutations

        # plot heatmap
        fig = plt.figure()
        rcParams.update({'font.size': 22})
        ax = fig.add_subplot(1,1,1)
        ax.set_aspect('equal')
        plt.imshow(grid, interpolation='nearest', cmap='gray')
        plt.colorbar()
        plt.title('Sampled permutations')
        plt.ylabel('Time t')
        plt.xlabel('City i')
        plt.show() 
Example #11
Source File: asthama_search.py    From pepper-robot-programming with MIT License 6 votes vote down vote up
def _capture3dImage(self):
        # Depth Image in RGB

        # WARNING : The same Name could be used only six time.
        strName = "capture3dImage_{}".format(random.randint(1,10000000000))


        clientRGB = self.video_service.subscribeCamera(strName, AL_kDepthCamera, AL_kQVGA, 11, 10)
        imageRGB = self.video_service.getImageRemote(clientRGB)

        imageWidth  = imageRGB[0]
        imageHeight = imageRGB[1]
        array       = imageRGB[6]
        image_string = str(bytearray(array))

        # Create a PIL Image from our pixel array.
        im = Image.frombytes("RGB", (imageWidth, imageHeight), image_string)
        # Save the image.
        image_name_3d = "images/img3d-" + str(self.imageNo3d) + ".png"
        im.save(image_name_3d, "PNG") # Stored in images folder in the pwd, if not present then create one
        self.imageNo3d += 1
        im.show()

        return 
Example #12
Source File: asthama_search.py    From pepper-robot-programming with MIT License 6 votes vote down vote up
def _capture2dImage(self, cameraId):
        # Capture Image in RGB

        # WARNING : The same Name could be used only six time.
        strName = "capture2DImage_{}".format(random.randint(1,10000000000))

        clientRGB = self.video_service.subscribeCamera(strName, cameraId, AL_kVGA, 11, 10)
        imageRGB = self.video_service.getImageRemote(clientRGB)

        imageWidth   = imageRGB[0]
        imageHeight  = imageRGB[1]
        array        = imageRGB[6]
        image_string = str(bytearray(array))

        # Create a PIL Image from our pixel array.
        im = Image.frombytes("RGB", (imageWidth, imageHeight), image_string)

        # Save the image.
        image_name_2d = "images/img2d-" + str(self.imageNo2d) + ".png"
        im.save(image_name_2d, "PNG") # Stored in images folder in the pwd, if not present then create one
        self.imageNo2d += 1
        im.show()

        return 
Example #13
Source File: simulate_sin.py    From deep-learning-note with MIT License 6 votes vote down vote up
def run_eval(sess, test_X, test_y):
    ds = tf.data.Dataset.from_tensor_slices((test_X, test_y))
    ds = ds.batch(1)
    X, y = ds.make_one_shot_iterator().get_next()

    with tf.variable_scope("model", reuse=True):
        prediction, _, _ = lstm_model(X, [0.0], False)
        predictions = []
        labels = []
        for i in range(TESTING_EXAMPLES):
            p, l = sess.run([prediction, y])
            predictions.append(p)
            labels.append(l)

    predictions = np.array(predictions).squeeze()
    labels = np.array(labels).squeeze()
    rmse = np.sqrt(((predictions-labels) ** 2).mean(axis=0))
    print("Mean Square Error is: %f" % rmse)

    plt.figure()
    plt.plot(predictions, label='predictions')
    plt.plot(labels, label='real_sin')
    plt.legend()
    plt.show() 
Example #14
Source File: 3_linear_regression_raw.py    From deep-learning-note with MIT License 6 votes vote down vote up
def generate_dataset(true_w, true_b):
    num_examples = 1000

    features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)
    # 真实 label
    labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
    # 添加噪声
    labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
    # 展示下分布
    plt.scatter(features[:, 1].numpy(), labels.numpy(), 1)
    plt.show()
    
    return features, labels


# batch 读取数据集 
Example #15
Source File: inference.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def show_result_pyplot(model, img, result, score_thr=0.3, fig_size=(15, 10)):
    """Visualize the detection results on the image.

    Args:
        model (nn.Module): The loaded detector.
        img (str or np.ndarray): Image filename or loaded image.
        result (tuple[list] or list): The detection result, can be either
            (bbox, segm) or just bbox.
        score_thr (float): The threshold to visualize the bboxes and masks.
        fig_size (tuple): Figure size of the pyplot figure.
    """
    if hasattr(model, 'module'):
        model = model.module
    img = model.show_result(img, result, score_thr=score_thr, show=False)
    plt.figure(figsize=fig_size)
    plt.imshow(mmcv.bgr2rgb(img))
    plt.show() 
Example #16
Source File: h2o_ecg_pulse_detection.py    From keras-anomaly-detection with MIT License 6 votes vote down vote up
def plot_bidimensional(model, test, recon_error, layer, title):
    bidimensional_data = model.deepfeatures(test, layer).cbind(recon_error).as_data_frame()

    cmap = cm.get_cmap('Spectral')

    fig, ax = plt.subplots()
    bidimensional_data.plot(kind='scatter',
                            x='DF.L{}.C1'.format(layer + 1),
                            y='DF.L{}.C2'.format(layer + 1),
                            s=500,
                            c='Reconstruction.MSE',
                            title=title,
                            ax=ax,
                            colormap=cmap)
    layer_column = 'DF.L{}.C'.format(layer + 1)
    columns = [layer_column + '1', layer_column + '2']
    for k, v in bidimensional_data[columns].iterrows():
        ax.annotate(k, v, size=20, verticalalignment='bottom', horizontalalignment='left')
    fig.canvas.draw()
    plt.show() 
Example #17
Source File: test_bayestar.py    From dustmaps with GNU General Public License v2.0 6 votes vote down vote up
def atest_plot_samples(self):
        dm = np.linspace(4., 19., 1001)
        samples = []

        for dm_k in dm:
            d = 10.**(dm_k/5.-2.)
            samples.append(self._interp_ebv(self._test_data[0], d))

        samples = np.array(samples).T
        # print samples

        import matplotlib.pyplot as plt
        fig = plt.figure()
        ax = fig.add_subplot(1,1,1)
        for s in samples:
            ax.plot(dm, s, lw=2., alpha=0.5)

        plt.show() 
Example #18
Source File: plot_utils.py    From keras-anomaly-detection with MIT License 6 votes vote down vote up
def visualize_anomaly(y_true, reconstruction_error, threshold):
    error_df = pd.DataFrame({'reconstruction_error': reconstruction_error,
                             'true_class': y_true})
    print(error_df.describe())

    groups = error_df.groupby('true_class')
    fig, ax = plt.subplots()

    for name, group in groups:
        ax.plot(group.index, group.reconstruction_error, marker='o', ms=3.5, linestyle='',
                label="Fraud" if name == 1 else "Normal")

    ax.hlines(threshold, ax.get_xlim()[0], ax.get_xlim()[1], colors="r", zorder=100, label='Threshold')
    ax.legend()
    plt.title("Reconstruction error for different classes")
    plt.ylabel("Reconstruction error")
    plt.xlabel("Data point index")
    plt.show() 
Example #19
Source File: plot_utils.py    From keras-anomaly-detection with MIT License 5 votes vote down vote up
def plot_confusion_matrix(y_true, y_pred):
    conf_matrix = confusion_matrix(y_true, y_pred)

    plt.figure(figsize=(12, 12))
    sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt="d")
    plt.title("Confusion matrix")
    plt.ylabel('True class')
    plt.xlabel('Predicted class')
    plt.show() 
Example #20
Source File: run.py    From fullrmc with GNU Affero General Public License v3.0 5 votes vote down vote up
def load_and_plot_steps_benchmark(constraint="all", groupSize=13):
    benchmark = np.loadtxt(fname='benchmark_%sSteps_%iGroupSize_time.dat'%(constraint,groupSize) )
    tried     = np.loadtxt(fname='benchmark_%sSteps_%iGroupSize_tried.dat'%(constraint,groupSize) )
    accepted  = np.loadtxt(fname='benchmark_%sSteps_%iGroupSize_accepted.dat'%(constraint,groupSize) )
    # plot benchmark
    plt.plot(benchmark[:,0], benchmark[:,1])
    minY = min(benchmark[:,1])
    maxY = max(benchmark[:,1])
    # annotate tried(accepted)
    for i, txt in enumerate( accepted[:,-1] ):
        T = 100*float(tried[i,-1])/float(benchmark[i,0])
        A = 100*float(accepted[i,-1])/float(benchmark[i,0])
        plt.gca().annotate( "%.2f%% (%.2f%%)"%(T,A),  #str(int(tried[i,-1]))+" ("+str(int(txt))+")",
                            xy = (benchmark[i,0],benchmark[i,-1]),
                            rotation=90,
                            horizontalalignment='center',
                            verticalalignment='bottom')
    # show plot
    plt.legend(frameon=False, loc='upper left')
    plt.xlabel("Number of steps")
    plt.ylabel("Time per step (s)")
    plt.gcf().patch.set_facecolor('white')
    # set fig size
    #figSize = plt.gcf().get_size_inches()
    #figSize[1] = figSize[1]+figSize[1]/2.
    #plt.gcf().set_size_inches(figSize, forward=True)
    plt.ylim((None, maxY+0.3*(maxY-minY)))
    # save
    plt.savefig("benchmark_steps.png")
    # plot
    plt.show()



##########################################################################################
#####################################  RUN BENCHMARKS  ################################### 
Example #21
Source File: h2o_ecg_pulse_detection.py    From keras-anomaly-detection with MIT License 5 votes vote down vote up
def plot_stacked_time_series(df, title):
    stacked = df.stack()
    stacked = stacked.reset_index()
    total = [data[0].values for name, data in stacked.groupby('level_0')]
    # pd.DataFrame({idx: pos for idx, pos in enumerate(total)}, index=stacked['level_1']).plot(title=title)
    pd.DataFrame({idx: pos for idx, pos in enumerate(total)}).plot(title=title)
    plt.legend(bbox_to_anchor=(1.05, 1))
    plt.show() 
Example #22
Source File: mpl.py    From neural-pipeline with MIT License 5 votes vote down vote up
def realtime(self, is_realtime: bool) -> 'MPLMonitor':
        """
        Is need to show data updates in realtime

        :param is_realtime: is need realtime
        :return: self object
        """
        self._realtime = is_realtime 
Example #23
Source File: 16_basic_kernels.py    From deep-learning-note with MIT License 5 votes vote down vote up
def show_images(images, rgb=True):
    gs = gridspec.GridSpec(1, len(images))
    for i, image in enumerate(images):
        plt.subplot(gs[0, i])
        if rgb:
            plt.imshow(image)
        else:
            image = image.reshape(image.shape[0], image.shape[1])
            plt.imshow(image, cmap='gray')
        plt.axis('off')
    plt.show() 
Example #24
Source File: 16_basic_kernels.py    From deep-learning-note with MIT License 5 votes vote down vote up
def read_one_image(filename):
    ''' This method is to show how to read image from a file into a tensor.
    The output is a tensor object.
    '''
    image_string = tf.read_file(filename)
    image_decoded = tf.image.decode_image(image_string)
    image = tf.cast(image_decoded, tf.float32) / 256.0
    return image 
Example #25
Source File: 18_basic_tfrecord.py    From deep-learning-note with MIT License 5 votes vote down vote up
def read_tfrecord(tfrecod_file):
    label, shape, image = read_from_tfrecord([tfrecod_file])

    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        label, image, shape = sess.run([label, image, shape])
        coord.request_stop()
        coord.join(threads)

    print(label)
    print(shape)
    plt.imshow(image)
    plt.show() 
Example #26
Source File: 4_linear_regression_torch.py    From deep-learning-note with MIT License 5 votes vote down vote up
def generate_dataset(true_w, true_b):
    num_examples = 1000

    features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)
    # 真实 label
    labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
    # 添加噪声
    labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
    # 展示下分布
    plt.scatter(features[:, 1].numpy(), labels.numpy(), 1)
    plt.show()

    return features, labels 
Example #27
Source File: 38_gradient_descent.py    From deep-learning-note with MIT License 5 votes vote down vote up
def show_trace_2d(f, results):
    plt.plot(*zip(*results), '-o', color='#ff7f0e')
    x1, x2 = np.meshgrid(np.arange(-5.5, 1.0, 0.1), np.arange(-3.0, 1.0, 0.1))
    plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')
    plt.xlabel('x1')
    plt.ylabel('x2')
    plt.show() 
Example #28
Source File: 38_gradient_descent.py    From deep-learning-note with MIT License 5 votes vote down vote up
def show_trace(res):
    n = max(abs(min(res)), abs(max(res)), 10)
    f_line = np.arange(-n, n, 0.1)
    plt.plot(f_line, [x * x for x in f_line])
    plt.plot(res, [x * x for x in res], '-o')
    plt.xlabel('x')
    plt.ylabel('f(x)')
    plt.show() 
Example #29
Source File: utils.py    From deep-learning-note with MIT License 5 votes vote down vote up
def show_images(imgs, num_rows, num_cols, scale=2):
    figsize = (num_cols * scale, num_rows * scale)
    _, axes = plt.subplots(num_rows, num_cols, figsize=figsize)
    for i in range(num_rows):
        for j in range(num_cols):
            axes[i][j].imshow(imgs[i * num_cols + j])
            axes[i][j].axes.get_xaxis().set_visible(False)
            axes[i][j].axes.get_yaxis().set_visible(False)
    plt.show()
    return axes 
Example #30
Source File: utils.py    From deep-learning-note with MIT License 5 votes vote down vote up
def train_opt(optimizer_fn, states, hyperparams, features, labels,
              batch_size=10, num_epochs=2):
    # 初始化模型
    net, loss = linreg, squared_loss

    w = torch.nn.Parameter(torch.tensor(np.random.normal(0, 0.01, size=(features.shape[1], 1)), dtype=torch.float32),
                           requires_grad=True)
    b = torch.nn.Parameter(torch.zeros(1, dtype=torch.float32), requires_grad=True)

    def eval_loss():
        return loss(net(features, w, b), labels).mean().item()

    ls = [eval_loss()]
    data_iter = torch.utils.data.DataLoader(
        torch.utils.data.TensorDataset(features, labels), batch_size, shuffle=True)

    for _ in range(num_epochs):
        start = time.time()
        for batch_i, (X, y) in enumerate(data_iter):
            l = loss(net(X, w, b), y).mean()  # 使用平均损失

            # 梯度清零
            if w.grad is not None:
                w.grad.data.zero_()
                b.grad.data.zero_()

            l.backward()
            optimizer_fn([w, b], states, hyperparams)  # 迭代模型参数
            if (batch_i + 1) * batch_size % 100 == 0:
                ls.append(eval_loss())  # 每100个样本记录下当前训练误差
    # 打印结果和作图
    print('loss: %f, %f sec per epoch' % (ls[-1], time.time() - start))
    plt.plot(np.linspace(0, num_epochs, len(ls)), ls)
    plt.xlabel('epoch')
    plt.ylabel('loss')
    plt.show()


# 本函数与原书不同的是这里第一个参数优化器函数而不是优化器的名字
# 例如: optimizer_fn=torch.optim.SGD, optimizer_hyperparams={"lr": 0.05}