Python matplotlib.pyplot.grid() Examples

The following are code examples for showing how to use matplotlib.pyplot.grid(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: deep-nn-car   Author: scope-lab-vu   File: Controller.py    MIT License 7 votes vote down vote up
def animateGraph(self):
        global fig
        fig = plt.figure()
        ax1 = fig.add_subplot(1,1,1)
        # animate()
        # Description: Every 200 ms, get speed, steering angle, and displacement estimate and update dynamic graph
        def animate(i):
            lx,ly = self.getLocation()
            try:
                ax1.clear()
                ax1.plot(lx,ly)
                ax1.set_title("2D position estimate")
                ax1.set_ylabel(" Y displacement (m)")
                ax1.set_xlabel(" X displacement (m)")
            except:
                print('s')
        plt.grid(True)
        plt.subplots_adjust(hspace = 1,wspace = 0.6)
        ani = animation.FuncAnimation(fig, animate, interval=200)
        plt.show() 
Example 2
Project: CNN_based_indoor_localization   Author: weimengmeng1999   File: epoch_graph.py    GNU General Public License v3.0 6 votes vote down vote up
def loss_plot(self, loss_type):
        iters = range(len(self.losses[loss_type]))
        plt.figure()
        # acc
        plt.plot(iters, self.accuracy[loss_type], 'r', label='train acc')
        # loss
        plt.plot(iters, self.losses[loss_type], 'g', label='train loss')
        if loss_type == 'epoch':
            # val_acc
            plt.plot(iters, self.val_acc[loss_type], 'b', label='val acc')
            # val_loss
            plt.plot(iters, self.val_loss[loss_type], 'k', label='val loss')
        plt.grid(True)
        plt.xlabel(loss_type)
        plt.ylabel('acc-loss')
        plt.legend(loc="upper right")
        plt.show() 
Example 3
Project: Deep_Neural_Networks   Author: sarthak268   File: GAN_.py    BSD 2-Clause "Simplified" License 6 votes vote down vote up
def show_train_hist(hist, show = False, save = False, path = 'Train_hist.png'):
    x = range(len(hist['D_losses']))

    y1 = hist['D_losses']
    y2 = hist['G_losses']

    plt.plot(x, y1, label='D_loss')
    plt.plot(x, y2, label='G_loss')

    plt.xlabel('Epoch')
    plt.ylabel('Loss')

    plt.legend(loc=4)
    plt.grid(True)
    plt.tight_layout()

    if save:
        plt.savefig(path)

    if show:
        plt.show()
    else:
        plt.close()

# training parameters 
Example 4
Project: Deep_Neural_Networks   Author: sarthak268   File: GAN_cuda.py    BSD 2-Clause "Simplified" License 6 votes vote down vote up
def show_train_hist(hist, show = False, save = False, path = 'Train_hist.png'):
    x = range(len(hist['D_losses']))

    y1 = hist['D_losses']
    y2 = hist['G_losses']

    plt.plot(x, y1, label='D_loss')
    plt.plot(x, y2, label='G_loss')

    plt.xlabel('Epoch')
    plt.ylabel('Loss')

    plt.legend(loc=4)
    plt.grid(True)
    plt.tight_layout()

    if save:
        plt.savefig(path)

    if show:
        plt.show()
    else:
        plt.close()

# training parameters 
Example 5
Project: pohmm-keystroke   Author: vmonaco   File: plotting.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def plot_stationarity_examples(m, names):
    import matplotlib.cm as cm

    def plot_fn(ax, i):
        plt.grid(False)
        plt.imshow(m[i], origin='lower', interpolation='none', cmap=cm.Greys,
                   extent=[0.5, m[i].shape[0] + 0.5, 0.5, m[i].shape[1] + 0.5])

        ax.set_xticks(np.arange(1, m[i].shape[0] + 1))
        ax.set_yticks(np.arange(1, m[i].shape[1] + 1))

        plt.clim(m[i].values.mean() - 4 * m[i].values.std(), m[i].values.mean() + 4 * m[i].values.std())
        ax.text(0.5, 0.95, names[i], va='top', ha='center', transform=ax.transAxes, color='black', fontsize=15)
        return

    return plot6(plot_fn, xlabel='Train sample', ylabel='Predict sample') 
Example 6
Project: Chinese-Character-and-Calligraphic-Image-Processing   Author: MingtaoGuo   File: test.py    MIT License 6 votes vote down vote up
def test(self):

        list_ = os.listdir("./maps/val/")
        nums_file = list_.__len__()
        saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, "generator"))
        saver.restore(self.sess, "./save_para/model.ckpt")
        rand_select = np.random.randint(0, nums_file)
        INPUTS_CONDITION = np.zeros([1, self.img_h, self.img_w, 3])
        INPUTS = np.zeros([1, self.img_h, self.img_w, 3])
        img = np.array(Image.open(self.path + list_[rand_select]))
        img_h, img_w = img.shape[0], img.shape[1]
        INPUTS_CONDITION[0] = misc.imresize(img[:, img_w//2:], [self.img_h, self.img_w]) / 127.5 - 1.0
        INPUTS[0] = misc.imresize(img[:, :img_w//2], [self.img_h, self.img_w]) / 127.5 - 1.0
        [fake_img] = self.sess.run([self.inputs_fake], feed_dict={self.inputs_condition: INPUTS_CONDITION})
        out_img = np.concatenate((INPUTS_CONDITION[0], fake_img[0], INPUTS[0]), axis=1)
        Image.fromarray(np.uint8((out_img + 1.0)*127.5)).save("./results/1.jpg")
        plt.imshow(np.uint8((out_img + 1.0)*127.5))
        plt.grid("off")
        plt.axis("off")
        plt.show() 
Example 7
Project: stock_monitor   Author: icemoon1987   File: data_analyser.py    GNU General Public License v2.0 6 votes vote down vote up
def draw_relevance(self, data1, data2, data1_name, data2_name):

		norm = normalizer()

		data1_norm = norm.linear_normalize(data1)
		data2_norm = norm.linear_normalize(data2)

		plt.figure(1)
		plt.subplot(211)
		plt.plot(data1, 'b-', data2, 'r-')
		plt.legend([data1_name, data2_name], loc="upper left")

		plt.subplot(212)
		plt.plot(data1_norm, 'b-', data2_norm, 'r-')
		plt.legend([data1_name, data2_name], loc="upper left")
		plt.grid(True)
		plt.show()

		return 
Example 8
Project: python-machine-learning   Author: sho-87   File: plot.py    MIT License 6 votes vote down vote up
def plot_training(history):
    """Plot the training curve.
    
    Parameters:
    history -- numpy array/list of cost values over all training iterations
    
    Returns:
    Plot of the cost for each iteration of training
    
    """
    plt.plot(range(1, len(history)+1), history)
    plt.grid(True)
    plt.xlim(1, len(history))
    plt.ylim(min(history), max(history))
    
    plt.title("Training Curve")
    plt.xlabel("Iteration")
    plt.ylabel("Cost") 
Example 9
Project: Autoenv   Author: intelligent-control-lab   File: terrain.py    MIT License 6 votes vote down vote up
def generate_hills(width, height, nhills):
    '''
    @param width float, terrain width
    @param height float, terrain height
    @param nhills int, #hills to gen. #hills actually generted is sqrt(nhills)^2
    '''
    # setup coordinate grid
    xmin, xmax = -width/2.0, width/2.0
    ymin, ymax = -height/2.0, height/2.0
    x, y = np.mgrid[xmin:xmax:STEP, ymin:ymax:STEP]
    pos = np.empty(x.shape + (2,))
    pos[:, :, 0] = x; pos[:, :, 1] = y
    
    # generate hilltops
    xm, ym = np.mgrid[xmin:xmax:width/np.sqrt(nhills), ymin:ymax:height/np.sqrt(nhills)]
    mu = np.c_[xm.flat, ym.flat]
    sigma = float(width*height)/(nhills*8)
    for i in range(mu.shape[0]):
        mu[i] = multivariate_normal.rvs(mean=mu[i], cov=sigma)
    
    # generate hills
    sigma = sigma + sigma*np.random.rand(mu.shape[0])
    rvs = [ multivariate_normal(mu[i,:], cov=sigma[i]) for i in range(mu.shape[0]) ]
    hfield = np.max([ rv.pdf(pos) for rv in rvs ], axis=0)
    return x, y, hfield 
Example 10
Project: Autoenv   Author: intelligent-control-lab   File: terrain.py    MIT License 6 votes vote down vote up
def save_texture(x, y, hfield, fname, path=None):
    '''
    @param path, str (optional). If not provided, DEFAULT_PATH is used. Make sure this matches the <texturedir> of the
        <compiler> element in the env XML
    '''
    path = _checkpath(path)
    plt.figure()
    plt.contourf(x, y, -hfield, 100, cmap=TERRAIN_CMAP)
    xmin, xmax = x.min(), x.max()
    ymin, ymax = y.min(), y.max()
    # for some reason plt.grid does not work here, so generate gridlines manually
    for i in np.arange(xmin,xmax,0.5):
        plt.plot([i,i], [ymin,ymax], 'k', linewidth=0.1)
    for i in np.arange(ymin,ymax,0.5):
        plt.plot([xmin,xmax],[i,i], 'k', linewidth=0.1)
    plt.savefig(os.path.join(path, fname), bbox_inches='tight')
    plt.close() 
Example 11
Project: C3D-Action-Recognition   Author: lianggyu   File: train_c3d.py    MIT License 6 votes vote down vote up
def plot_history(history, result_dir):
    plt.plot(history.history['acc'], marker='.')
    plt.plot(history.history['val_acc'], marker='.')
    plt.title('model accuracy')
    plt.xlabel('epoch')
    plt.ylabel('accuracy')
    plt.grid()
    plt.legend(['acc', 'val_acc'], loc='lower right')
    plt.savefig(os.path.join(result_dir, 'model_accuracy.png'))
    plt.close()

    plt.plot(history.history['loss'], marker='.')
    plt.plot(history.history['val_loss'], marker='.')
    plt.title('model loss')
    plt.xlabel('epoch')
    plt.ylabel('loss')
    plt.grid()
    plt.legend(['loss', 'val_loss'], loc='upper right')
    plt.savefig(os.path.join(result_dir, 'model_loss.png'))
    plt.close() 
Example 12
Project: sumo-rl   Author: LucasAlegre   File: plot.py    MIT License 6 votes vote down vote up
def plot_figure(figsize=(12, 9), x_label='', y_label='', title=''):
    ax = plt.subplot()

    # manually change this:
    #plt.xlim([380, 399900])
    #plt.yticks([0]+[x for x in range(1500, 3001, 250)])
    #plt.ylim([1500, 3001])
    #for i in range(0,400000,100000):
    #    plt.axvline(x=i, color='k', linestyle='--')
    #plt.axvline(x=25000, color='k', linestyle='--')
    #plt.axvline(x=50000, color='k', linestyle='--')
    #plt.axvline(x=75000, color='k', linestyle='--')
    plt.grid(axis='y')
    #plt.text(8000,2850,'Context 1')
    #plt.text(28000,2850,'Context 2')
    #plt.text(44500,5000,'Context 1')
    #plt.text(64500,5000,'Context 2')

    ax.spines["top"].set_visible(False)
    ax.spines["right"].set_visible(False)
    ax.get_xaxis().tick_bottom()
    ax.get_yaxis().tick_left()
    plt.title(title)
    plt.xlabel(x_label)
    plt.ylabel(y_label) 
Example 13
Project: PythonNoiseReduction   Author: ZackWalsh57   File: waveformGrapher.py    MIT License 6 votes vote down vote up
def graphWaveforms():
    SAMPLES = globalQueue.sampleSettings[2] #Local Vars for easier use/calling
    FREQUENCY = globalQueue.sampleSettings[0]
    DURATION = globalQueue.sampleSettings[1]

    SIN_SAMPLES =  (numpy.sin(2*numpy.pi*numpy.arange(SAMPLES*DURATION)*FREQUENCY/SAMPLES)) #Make our sin samples
    INVERSE_SIN_SAMPLES = -1*SIN_SAMPLES #Inverse sin samples should just be a negated version of the sin samples globally
    #so this method of creation is ok.

    TIME = (numpy.arange(0.0, FREQUENCY/2, 1) / 100) * 2 * 2 #Calculations
    SIN_WAVE = (SIN_SAMPLES[0:FREQUENCY/2]) * FREQUENCY #Calculations
    INVERSE_SIN = (INVERSE_SIN_SAMPLES[0:FREQUENCY/2]) * FREQUENCY #Calculations

    plt.plot(TIME, SIN_WAVE, color='b')
    plt.plot(TIME, INVERSE_SIN, color='r')

    plt.xlabel('TIME (S)')
    plt.ylabel('FREQUENCY CHANGE')
    plt.title('WAVEFORM COMPARISONS')      #Plot settings
    plt.grid(True)
    plt.savefig("/WAVEFORMS.png")
    plt.show() #DRAW IT! 
Example 14
Project: pepper-robot-programming   Author: maverickjoy   File: asthama_search.py    MIT License 5 votes vote down vote up
def _initialisePlot(self):

        plt.rc('grid', linestyle=":", color='black')
        plt.rcParams['axes.facecolor'] = 'black'
        plt.rcParams['axes.edgecolor'] = 'white'
        plt.rcParams['grid.alpha'] = 1
        plt.rcParams['grid.color'] = "green"
        plt.grid(True)
        plt.xlim(self.PLOTXMIN, self.PLOTXMAX)
        plt.ylim(self.PLOTYMIN, self.PLOTYMAX)
        self.graph, = plt.plot([], [], 'o')

        return 
Example 15
Project: sfcc   Author: kv-kunalvyas   File: auxiliary.py    MIT License 5 votes vote down vote up
def plotLearningCurves(train, classifier):
    #P.show()
    X = train.values[:, 1::]
    y = train.values[:, 0]

    train_sizes, train_scores, test_scores = learning_curve(
            classifier, X, y, cv=10, n_jobs=-1, train_sizes=np.linspace(.1, 1., 10), verbose=0)

    train_scores_mean = np.mean(train_scores, axis=1)
    train_scores_std = np.std(train_scores, axis=1)
    test_scores_mean = np.mean(test_scores, axis=1)
    test_scores_std = np.std(test_scores, axis=1)

    plt.figure()
    plt.title("Learning Curves")
    plt.legend(loc="best")
    plt.xlabel("Training samples")
    plt.ylabel("Error Rate")
    plt.ylim((0, 1))
    plt.gca().invert_yaxis()
    plt.grid()

    # Plot the average training and test score lines at each training set size
    plt.plot(train_sizes, train_scores_mean, 'o-', color="b", label="Training score")
    plt.plot(train_sizes, test_scores_mean, 'o-', color="r", label="Test score")

    # Plot the std deviation as a transparent range at each training set size
    plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std,
                     alpha=0.1, color="b")
    plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std,
                     alpha=0.1, color="r")

    # Draw the plot and reset the y-axis
    plt.draw()
    plt.gca().invert_yaxis()

    # shuffle and split training and test sets
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25)
    classifier.fit(X_train, y_train)
    plt.show() 
Example 16
Project: RandomFourierFeatures   Author: tiskw   File: sample_rff_regression.py    MIT License 5 votes vote down vote up
def main():

    ### Fix seed for random fourier feature calclation
    pyrff.seed(111)

    ### Prepare training data
    Xs_train = np.linspace(0, 3, 21).reshape((21, 1))
    ys_train = np.sin(Xs_train**2)
    Xs_test  = np.linspace(0, 3, 101).reshape((101, 1))
    ys_test  = np.sin(Xs_test**2)

    ### Create classifier instance
    reg = pyrff.RFFRegression(dim_output = 8, std = 0.5)

    ### Train regression with random fourier features
    reg.fit(Xs_train, ys_train)

    ### Conduct prediction for the test data
    predict = reg.predict(Xs_test)

    ### Plot regression results
    mpl.figure(0)
    mpl.title("Regression for function y = sin(x^2) with RFF")
    mpl.xlabel("X")
    mpl.ylabel("Y")
    mpl.plot(Xs_train, ys_train, "o")
    mpl.plot(Xs_test,  ys_test,  ".")
    mpl.plot(Xs_test,  predict,  "-")
    mpl.legend(["Training data", "Test data", "Prediction by RFF regression"])
    mpl.grid()
    mpl.show() 
Example 17
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: utils.py    MIT License 5 votes vote down vote up
def plot(self, names=None):
        names = self.names if names == None else names
        numbers = self.numbers
        for _, name in enumerate(names):
            x = np.arange(len(numbers[name]))
            plt.plot(x, np.asarray(numbers[name]))
        plt.legend([self.title + '(' + name + ')' for name in names])
        plt.grid(True) 
Example 18
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: utils.py    MIT License 5 votes vote down vote up
def plot(self, names=None):
        plt.figure()
        plt.plot()
        legend_text = []
        for logger in self.loggers:
            legend_text += plot_overlap(logger, names)
        legend_text = ['WRN-28-10+Ours (error 17.65%)', 'WRN-28-10 (error 18.68%)']
        plt.legend(legend_text, loc=0)
        plt.ylabel('test error (%)')
        plt.xlabel('epoch')
        plt.grid(True) 
Example 19
Project: Random-Erasing   Author: zhunzhong07   File: logger.py    Apache License 2.0 5 votes vote down vote up
def plot(self, names=None):   
        names = self.names if names == None else names
        numbers = self.numbers
        for _, name in enumerate(names):
            x = np.arange(len(numbers[name]))
            plt.plot(x, np.asarray(numbers[name]))
        plt.legend([self.title + '(' + name + ')' for name in names])
        plt.grid(True) 
Example 20
Project: Random-Erasing   Author: zhunzhong07   File: logger.py    Apache License 2.0 5 votes vote down vote up
def plot(self, names=None):
        plt.figure()
        plt.plot()
        legend_text = []
        for logger in self.loggers:
            legend_text += plot_overlap(logger, names)
        legend_text = ['WRN-28-10+Ours (error 17.65%)', 'WRN-28-10 (error 18.68%)']
        plt.legend(legend_text, loc=0)
        plt.ylabel('test error (%)')
        plt.xlabel('epoch')
        plt.grid(True) 
Example 21
Project: neural-pipeline   Author: toodef   File: mpl.py    MIT License 5 votes vote down vote up
def place_plot(self, axis) -> None:
            self._axis = axis

            for n, v in self._prev_values.items():
                self._axis.scatter(v[1], v[0], label=n, c=self._colors[n])

            self._axis.set_ylabel(self._handle)
            self._axis.set_xlabel('epoch')
            self._axis.xaxis.set_major_locator(MaxNLocator(integer=True))
            self._axis.legend()
            plt.grid() 
Example 22
Project: helloworld   Author: pip-uninstaller-python   File: matplotlibTest.py    GNU General Public License v2.0 5 votes vote down vote up
def main():
    # line
    x = np.linspace(-np.pi, np.pi, 256, endpoint=True)
    c, s = np.cos(x), np.sin(x)
    plt.figure(1)
    plt.plot(x, c, color="blue", linewidth=1.0, linestyle="-", label="COS", alpha=0.5)  # 自变量, 因变量
    plt.plot(x, s, "r.", label="SIN")  # 正弦  "-"/"r-"/"r."
    plt.title("COS & SIN")
    ax = plt.gca()
    ax.spines["right"].set_color("none")
    ax.spines["top"].set_color("none")
    ax.spines["left"].set_position(("data", 0))  # 横轴位置
    ax.spines["bottom"].set_position(("data", 0))  # 纵轴位置
    ax.xaxis.set_ticks_position("bottom")
    ax.yaxis.set_ticks_position("left")
    plt.xticks([-np.pi, -np.pi / 2.0, np.pi / 2, np.pi],
               [r'$-\pi/2$', r'$-\pi/2$', r'$0$', r'$+\pi/2$', r'$-\pi$'])
    plt.yticks(np.linspace(-1, 1, 5, endpoint=True))
    for label in ax.get_xticklabels() + ax.get_yticklabels():
        label.set_fontsize(16)
        label.set_bbox(dict(facecolor="white", edgecolor="None", alpha=0.2))
    plt.legend(loc="upper left")  # 左上角的显示图标
    plt.grid()  # 网格线
    # plt.axis([-1, 1, -0.5, 1])  # 显示范围
    plt.fill_between(x, np.abs(x) < 0.5, c, c < 0.5, color="green", alpha=0.25)
    t = 1
    plt.plot([t, t], [0, np.cos(t)], "y", linewidth=3, linestyle="--")
    # 注释
    plt.annotate("cos(1)", xy=(t, np.cos(1)), xycoords="data", xytext=(+10, +30),
                 textcoords="offset points", arrowprops=dict(arrowstyle="->", connectionstyle="arc3, rad=.2"))
    plt.show()


# Scatter --> 散点图 
Example 23
Project: deep-nn-car   Author: scope-lab-vu   File: plotTool.py    MIT License 5 votes vote down vote up
def animateGraph():
    global fig
    fig = plt.figure()
    ax1 = fig.add_subplot(3,1,1)
    ax2 = fig.add_subplot(3,1,2)
    ax3 = fig.add_subplot(3,1,3)
    # animate()
    # Description: Every 200 ms, get speed, steering angle, and displacement estimate and update dynamic graph
    def animate(i):
        if (truncate): truncateGraph()
        (vx,vy,setSpeeds) = getSpeedInput()
        (sx,sy) = getSteerInput()
        (lx,ly) = getLocationInput()
        try:
            ax1.clear()
            ax1.set_ylim(-0.5,1.5)
            ax1.plot(vx,vy,label = "Current Speed")
            ax1.plot(vx,setSpeeds,label = "Set Speed")
            ax1.set_title("Speed Time Series")
            ax1.set_ylabel("Speed (m/s)")
            ax1.set_xlabel("Time (s)")
            ax1.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.)
            ax2.clear()
            ax2.set_ylim(-35,35)
            ax2.plot(sx,sy)
            ax2.set_title("Steering Time Series")
            ax2.set_ylabel("Steering Angel (deg)")
            ax2.set_xlabel("Time (s)")
            ax3.clear()
            ax3.plot(lx,ly)
            ax3.set_title("2D position estimate")
            ax3.set_ylabel(" Y displacement (m)")
            ax3.set_xlabel(" X displacement (m)")
        except:
            print('s')
    plt.grid(True)
    plt.subplots_adjust(hspace = 1,wspace = 0.6)
    ani = animation.FuncAnimation(fig, animate, interval=200)
    plt.show() 
Example 24
Project: Parallel.GAMIT   Author: demiangomez   File: pyETM1.py    GNU General Public License v3.0 5 votes vote down vote up
def plot_hist(self):

        import matplotlib.pyplot as plt
        import matplotlib.mlab as mlab
        from scipy.stats import norm

        L = [self.soln.x, self.soln.y, self.soln.z]

        if self.A is not None:

            residuals = np.sqrt(np.square(L[0]) + np.square(L[1]) + np.square(L[2])) - \
                        np.sqrt(np.square(np.dot(self.A, self.C[0])) + np.square(np.dot(self.A, self.C[1])) +
                                np.square(np.dot(self.A, self.C[2])))

            (mu, sigma) = norm.fit(residuals)

            n, bins, patches = plt.hist(residuals, 200, normed=1, alpha=0.75, facecolor='blue')

            y = mlab.normpdf(bins, mu, sigma)
            plt.plot(bins, y, 'r--', linewidth=2)
            plt.title(r'$\mathrm{Histogram\ of\ residuals (mm):}\ \mu=%.3f,\ \sigma=%.3f$' % (mu*1000, sigma*1000))
            plt.grid(True)

            plt.show() 
Example 25
Project: lirpg   Author: Hwhitetooth   File: gail-eval.py    MIT License 5 votes vote down vote up
def plot(env_name, bc_log, gail_log, stochastic):
    upper_bound = bc_log['upper_bound']
    bc_avg_ret = bc_log['avg_ret']
    gail_avg_ret = gail_log['avg_ret']
    plt.plot(CONFIG['traj_limitation'], upper_bound)
    plt.plot(CONFIG['traj_limitation'], bc_avg_ret)
    plt.plot(CONFIG['traj_limitation'], gail_avg_ret)
    plt.xlabel('Number of expert trajectories')
    plt.ylabel('Accumulated reward')
    plt.title('{} unnormalized scores'.format(env_name))
    plt.legend(['expert', 'bc-imitator', 'gail-imitator'], loc='lower right')
    plt.grid(b=True, which='major', color='gray', linestyle='--')
    if stochastic:
        title_name = 'result/{}-unnormalized-stochastic-scores.png'.format(env_name)
    else:
        title_name = 'result/{}-unnormalized-deterministic-scores.png'.format(env_name)
    plt.savefig(title_name)
    plt.close()

    bc_normalized_ret = bc_log['normalized_ret']
    gail_normalized_ret = gail_log['normalized_ret']
    plt.plot(CONFIG['traj_limitation'], np.ones(len(CONFIG['traj_limitation'])))
    plt.plot(CONFIG['traj_limitation'], bc_normalized_ret)
    plt.plot(CONFIG['traj_limitation'], gail_normalized_ret)
    plt.xlabel('Number of expert trajectories')
    plt.ylabel('Normalized performance')
    plt.title('{} normalized scores'.format(env_name))
    plt.legend(['expert', 'bc-imitator', 'gail-imitator'], loc='lower right')
    plt.grid(b=True, which='major', color='gray', linestyle='--')
    if stochastic:
        title_name = 'result/{}-normalized-stochastic-scores.png'.format(env_name)
    else:
        title_name = 'result/{}-normalized-deterministic-scores.png'.format(env_name)
    plt.ylim(0, 1.6)
    plt.savefig(title_name)
    plt.close() 
Example 26
Project: euclid   Author: njpayne   File: plot_learning_curve.py    GNU General Public License v2.0 5 votes vote down vote up
def plot_learning_curve_iter(estimator, title, cv = 10):

    test_scores = []
    test_std =[]
    iteration_count = []

    for i in range(len(estimator.grid_scores_)):
        #get the cross validation results from the estimator
        iteration_count.append(estimator.grid_scores_[i][0]["n_iter"])
        test_scores.append(estimator.grid_scores_[i].mean_validation_score)
        test_std.append(np.std(estimator.grid_scores_[i].cv_validation_scores))

    #convert arrays to numpy
    test_scores = np.array(test_scores)
    test_std = np.array(test_std)
    iteration_count = np.array(iteration_count)

    plt.figure()
    plt.title(title)
    plt.xlabel("Epochs")
    plt.ylabel("Score")
    plt.ylim(0.0, 1.1)
    plt.grid()
    #plt.semilogx(param_range, test_scores_mean, label="Cross-validation score", color="g")
    plt.fill_between(iteration_count, test_scores - test_std ,
                     test_scores + test_std , alpha=0.2, color="g")
    plt.plot(iteration_count, test_scores, 'o-', color="g",
             label="Cross-validation score")
    plt.legend(loc="best")

    return plt 
Example 27
Project: SLiPy   Author: glentner   File: Plot.py    GNU General Public License v2.0 5 votes vote down vote up
def grid(self, value):
        """
        Show grid on plot.
        """
        self.gridv = value
        plt.grid(value) 
Example 28
Project: PyTeCK   Author: pr-omethe-us   File: detect_peaks.py    MIT License 5 votes vote down vote up
def _plot(x, mph, mpd, threshold, edge, valley, ax, ind):
    """Plot results of the detect_peaks function, see its help.
    """

    try:
        import matplotlib.pyplot as plt
    except ImportError:
        print('matplotlib is not available.')
    else:
        if ax is None:
            _, ax = plt.subplots(1, 1, figsize=(8, 4))

        ax.plot(x, 'b', lw=1)
        if ind.size:
            label = 'valley' if valley else 'peak'
            label = label + 's' if ind.size > 1 else label
            ax.plot(ind, x[ind], '+', mfc=None, mec='r', mew=2, ms=8,
                    label='%d %s' % (ind.size, label))
            ax.legend(loc='best', framealpha=.5, numpoints=1)
        ax.set_xlim(-.02*x.size, x.size*1.02-1)
        ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()
        yrange = ymax - ymin if ymax > ymin else 1
        ax.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange)
        ax.set_xlabel('Data #', fontsize=14)
        ax.set_ylabel('Amplitude', fontsize=14)
        mode = 'Valley detection' if valley else 'Peak detection'
        ax.set_title("%s (mph=%s, mpd=%d, threshold=%s, edge='%s')"
                     % (mode, str(mph), mpd, str(threshold), edge))
        # plt.grid()
        plt.show() 
Example 29
Project: attack-graphs   Author: cyberImperial   File: benchmarks.py    MIT License 5 votes vote down vote up
def plot(file_name, all_stats, label, selector):
    for key, raw_data in sorted(all_stats.items()):
        batch_threads, slaves = key
        line = [selector(entry) for entry in raw_data][1:]
        if batch_threads > 1:
            plt.plot(line, label="slaves = {}, batch size = {}".format(slaves, batch_threads))
        else:
            plt.plot(line, label="slaves = {}".format(slaves))
    plt.xlabel('time')
    plt.ylabel(label)
    plt.grid(True)
    plt.legend()

    plt.savefig(os.path.join(ROOT, "simulation", "res", file_name))
    plt.gcf().clear() 
Example 30
Project: HardRLWithYoutube   Author: MaxSobolMark   File: gail-eval.py    MIT License 5 votes vote down vote up
def plot(env_name, bc_log, gail_log, stochastic):
    upper_bound = bc_log['upper_bound']
    bc_avg_ret = bc_log['avg_ret']
    gail_avg_ret = gail_log['avg_ret']
    plt.plot(CONFIG['traj_limitation'], upper_bound)
    plt.plot(CONFIG['traj_limitation'], bc_avg_ret)
    plt.plot(CONFIG['traj_limitation'], gail_avg_ret)
    plt.xlabel('Number of expert trajectories')
    plt.ylabel('Accumulated reward')
    plt.title('{} unnormalized scores'.format(env_name))
    plt.legend(['expert', 'bc-imitator', 'gail-imitator'], loc='lower right')
    plt.grid(b=True, which='major', color='gray', linestyle='--')
    if stochastic:
        title_name = 'result/{}-unnormalized-stochastic-scores.png'.format(env_name)
    else:
        title_name = 'result/{}-unnormalized-deterministic-scores.png'.format(env_name)
    plt.savefig(title_name)
    plt.close()

    bc_normalized_ret = bc_log['normalized_ret']
    gail_normalized_ret = gail_log['normalized_ret']
    plt.plot(CONFIG['traj_limitation'], np.ones(len(CONFIG['traj_limitation'])))
    plt.plot(CONFIG['traj_limitation'], bc_normalized_ret)
    plt.plot(CONFIG['traj_limitation'], gail_normalized_ret)
    plt.xlabel('Number of expert trajectories')
    plt.ylabel('Normalized performance')
    plt.title('{} normalized scores'.format(env_name))
    plt.legend(['expert', 'bc-imitator', 'gail-imitator'], loc='lower right')
    plt.grid(b=True, which='major', color='gray', linestyle='--')
    if stochastic:
        title_name = 'result/{}-normalized-stochastic-scores.png'.format(env_name)
    else:
        title_name = 'result/{}-normalized-deterministic-scores.png'.format(env_name)
    plt.ylim(0, 1.6)
    plt.savefig(title_name)
    plt.close() 
Example 31
Project: neat-python   Author: CodeReclaimers   File: visualize.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def plot_stats(statistics, ylog=False, view=False, filename='avg_fitness.svg'):
    """ Plots the population's average and best fitness. """
    if plt is None:
        warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
        return

    generation = range(len(statistics.most_fit_genomes))
    best_fitness = [c.fitness for c in statistics.most_fit_genomes]
    avg_fitness = np.array(statistics.get_fitness_mean())
    stdev_fitness = np.array(statistics.get_fitness_stdev())

    plt.plot(generation, avg_fitness, 'b-', label="average")
    #plt.plot(generation, avg_fitness - stdev_fitness, 'g-.', label="-1 sd")
    plt.plot(generation, avg_fitness + stdev_fitness, 'g-.', label="+1 sd")
    plt.plot(generation, best_fitness, 'r-', label="best")

    plt.title("Population's average and best fitness")
    plt.xlabel("Generations")
    plt.ylabel("Fitness")
    plt.grid()
    plt.legend(loc="best")
    if ylog:
        plt.gca().set_yscale('symlog')

    plt.savefig(filename)
    if view:
        plt.show()

    plt.close() 
Example 32
Project: neat-python   Author: CodeReclaimers   File: visualize.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def plot_stats(statistics, ylog=False, view=False, filename='avg_fitness.svg'):
    """ Plots the population's average and best fitness. """
    if plt is None:
        warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
        return

    generation = range(len(statistics.most_fit_genomes))
    best_fitness = [c.fitness for c in statistics.most_fit_genomes]
    avg_fitness = np.array(statistics.get_fitness_mean())
    stdev_fitness = np.array(statistics.get_fitness_stdev())

    plt.plot(generation, avg_fitness, 'b-', label="average")
    plt.plot(generation, avg_fitness - stdev_fitness, 'g-.', label="-1 sd")
    plt.plot(generation, avg_fitness + stdev_fitness, 'g-.', label="+1 sd")
    plt.plot(generation, best_fitness, 'r-', label="best")

    plt.title("Population's average and best fitness")
    plt.xlabel("Generations")
    plt.ylabel("Fitness")
    plt.grid()
    plt.legend(loc="best")
    if ylog:
        plt.gca().set_yscale('symlog')

    plt.savefig(filename)
    if view:
        plt.show()

    plt.close() 
Example 33
Project: neat-python   Author: CodeReclaimers   File: visualize.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def plot_stats(statistics, ylog=False, view=False, filename='avg_fitness.svg'):
    """ Plots the population's average and best fitness. """
    if plt is None:
        warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
        return

    generation = range(len(statistics.most_fit_genomes))
    best_fitness = [c.fitness for c in statistics.most_fit_genomes]
    avg_fitness = np.array(statistics.get_fitness_mean())
    #stdev_fitness = np.array(statistics.get_fitness_stdev())

    plt.plot(generation, avg_fitness, 'b-', label="average")
    #plt.plot(generation, avg_fitness - stdev_fitness, 'g-.', label="-1 sd")
    #plt.plot(generation, avg_fitness + stdev_fitness, 'g-.', label="+1 sd")
    plt.plot(generation, best_fitness, 'r-', label="best")

    plt.title("Population's average and best fitness")
    plt.xlabel("Generations")
    plt.ylabel("Fitness")
    plt.grid()
    plt.legend(loc="best")
    if ylog:
        plt.gca().set_yscale('symlog')

    plt.savefig(filename)
    if view:
        plt.show()

    plt.close() 
Example 34
Project: neat-python   Author: CodeReclaimers   File: visualize.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def plot_stats(statistics, ylog=False, view=False, filename='avg_fitness.svg'):
    """ Plots the population's average and best fitness. """
    if plt is None:
        warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
        return

    generation = range(len(statistics.most_fit_genomes))
    best_fitness = [c.fitness for c in statistics.most_fit_genomes]
    avg_fitness = np.array(statistics.get_fitness_mean())
    stdev_fitness = np.array(statistics.get_fitness_stdev())

    plt.plot(generation, avg_fitness, 'b-', label="average")
    #plt.plot(generation, avg_fitness - stdev_fitness, 'g-.', label="-1 sd")
    plt.plot(generation, avg_fitness + stdev_fitness, 'g-.', label="+1 sd")
    plt.plot(generation, best_fitness, 'r-', label="best")

    plt.title("Population's average and best fitness")
    plt.xlabel("Generations")
    plt.ylabel("Fitness")
    plt.grid()
    plt.legend(loc="best")
    if ylog:
        plt.gca().set_yscale('symlog')

    plt.savefig(filename)
    if view:
        plt.show()

    plt.close() 
Example 35
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn_all.py    MIT License 5 votes vote down vote up
def visual_result(hist):
    train_loss = hist.history['loss']
    val_loss = hist.history['val_loss']
    train_acc = hist.history['sparse_accuracy']
    val_acc = hist.history['val_sparse_accuracy']
    xc = range(nb_epoch)

    # Losses
    plt.figure(1, figsize=(7, 5))
    plt.plot(xc, train_loss)
    plt.plot(xc, val_loss)
    plt.xlabel('num of Epochs')
    plt.ylabel('loss')
    plt.title('train_loss vs val_loss')
    plt.grid(True)
    plt.legend(['train', 'val'])
    # use bmh, classic,ggplot for big pictures
    plt.style.available
    plt.style.use(['classic'])

    # Accuracy
    plt.figure(2, figsize=(7, 5))
    plt.plot(xc, train_acc)
    plt.plot(xc, val_acc)
    plt.xlabel('num of Epochs')
    plt.ylabel('accuracy')
    plt.title('train_acc vs val_acc')
    plt.grid(True)
    plt.legend(['train', 'val'], loc=4)
    # use bmh, classic,ggplot for big pictures
    plt.style.available
    plt.style.use(['classic'])


# Softmax cross-entropy loss function for segmentation 
Example 36
Project: Deep-Learning-for-HSI-classification   Author: luozm   File: cnn.py    MIT License 5 votes vote down vote up
def visual_result(hist):
    train_loss = hist.history['loss']
    val_loss = hist.history['val_loss']
    train_acc = hist.history['acc']
    val_acc = hist.history['val_acc']
    xc = range(nb_epoch)

    # Losses
    plt.figure(1, figsize=(7, 5))
    plt.plot(xc, train_loss)
    plt.plot(xc, val_loss)
    plt.xlabel('num of Epochs')
    plt.ylabel('loss')
    plt.title('train_loss vs val_loss')
    plt.grid(True)
    plt.legend(['train', 'val'])
    # use bmh, classic,ggplot for big pictures
    plt.style.available
    plt.style.use(['classic'])

    # Accuracy
    plt.figure(2, figsize=(7, 5))
    plt.plot(xc, train_acc)
    plt.plot(xc, val_acc)
    plt.xlabel('num of Epochs')
    plt.ylabel('accuracy')
    plt.title('train_acc vs val_acc')
    plt.grid(True)
    plt.legend(['train', 'val'], loc=4)
    # use bmh, classic,ggplot for big pictures
    plt.style.available
    plt.style.use(['classic'])


# Softmax cross-entropy loss function for segmentation 
Example 37
Project: RacingRobot   Author: sergionr2   File: warp_image.py    MIT License 5 votes vote down vote up
def imshow(im, name=""):  # pragma: no cover
    plt.figure(name)
    # BGR to RGB
    plt.imshow(im[:, :, ::-1])
    plt.grid(True) 
Example 38
Project: python-machine-learning   Author: sho-87   File: network.py    MIT License 5 votes vote down vote up
def plot_training_curve(self):
        """ Plot training curve """
        plt.plot(self.cost_history["iteration"], self.cost_history["cost"])
        plt.grid(True)
        plt.title("Training Curve")
        plt.xlabel("Iteration #")
        plt.ylabel("Cost")
        plt.show() 
Example 39
Project: python-machine-learning   Author: sho-87   File: network.py    MIT License 5 votes vote down vote up
def plot_accuracy_curve(self):
        """ Plot accuracy curve """
        plt.plot(self.accuracy_history["validation"]["epoch"],
                 self.accuracy_history["validation"]["score"],
                label="Validation")
        plt.plot(self.accuracy_history["test"]["epoch"],
                 self.accuracy_history["test"]["score"],
                label="Test")
        plt.grid(True)
        plt.title("Accuracy Curve")
        plt.xlabel("Epoch #")
        plt.ylabel("Accuracy")
        plt.legend(loc='best')
        plt.show() 
Example 40
Project: Life   Author: MyNameBeMrRandom   File: imaging.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def do_bar_chart(title, x_label, y_label, values, names):

    # Clear the plot.
    plt.clf()

    #Create a bar graph with grid lines
    plt.bar(names, values, width=0.5, zorder=3)
    plt.grid(zorder=0)

    # Add labels
    plt.ylabel(y_label)
    plt.xlabel(x_label)
    plt.title(title)

    # Rotate x-labels by 90 degrees
    plt.xticks(rotation=-90)

    # Make the layout of plot conform to the text
    plt.tight_layout()

    # Save the image to a buffer.
    bar_chart = BytesIO()
    plt.savefig(bar_chart)

    # Close the image.
    plt.close()

    # Return image
    bar_chart.seek(0)
    return bar_chart 
Example 41
Project: Life   Author: MyNameBeMrRandom   File: imaging.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def do_plot(title, x_label, y_label, values, names):

    # Clear the current figure
    plt.clf()

    # Create a plot and add grid lines.
    plt.plot(names, values, "-r", zorder=3)
    plt.grid(zorder=0)

    # Add text labels
    plt.ylabel(y_label)
    plt.xlabel(x_label)
    plt.title(title)

    # Rotate x-labels by 90 degrees
    plt.xticks(rotation=-90)

    # Make the layout of plot conform to the text
    plt.tight_layout()

    # Save the image to a buffer.
    plot = BytesIO()
    plt.savefig(plot)

    # Close the image.
    plt.close()

    # Return image
    plot.seek(0)
    return plot 
Example 42
Project: NAIST_DeepLearning   Author: autodrive   File: plot_activation.py    Apache License 2.0 5 votes vote down vote up
def main(min_x=-8, max_x=8):
    min_x, max_x = sorted([min_x, max_x])
    x_array = np.linspace(min_x, max_x, 101)

    x_array_step = get_x_step_array(min_x, max_x)

    symbols_to_be_processed_list = [
        {'label': 'sigmoid', 'f': get_sigmoid_function_sympy()},
        {'label': 'integrated sigmoid', 'f': get_integrated_sigmoid_sympy()},
    ]

    for fmt in ('pdf', 'png'):
        plt.clf()

        for d in symbols_to_be_processed_list:
            proc_sympy_function(x_array, d['f'], d['label'])

        y_ReLU = ReLU(x_array)
        plt.plot(x_array, y_ReLU, label='ReLU')

        y_step = step(x_array_step)
        plt.plot(x_array_step, y_step, label='step')

        plt.grid(True)
        plt.legend(loc=0)
        plt.axis('equal')
        plt.savefig('activation.%s' % fmt) 
Example 43
Project: NAIST_DeepLearning   Author: autodrive   File: plot_sigmoid.py    Apache License 2.0 5 votes vote down vote up
def main(min_x=-8, max_x=8):
    x_array = np.linspace(min_x, max_x, 101)

    process_them = [
        {'label': 'sigmoid', 'f': pa.get_sigmoid_function_sympy()},
    ]

    for fmt in ('pdf', 'png'):
        for d in process_them:
            pa.proc_sympy_function(x_array, d['f'], d['label'])

        plt.grid(True)
        plt.legend(loc=0)
        plt.axis('equal')
        plt.savefig('sigmoid.%s' % fmt) 
Example 44
Project: derplearning   Author: notkarol   File: shapes.py    MIT License 5 votes vote down vote up
def verify_plot(self, points, x, y, w, h):
        fig = plt.figure(figsize=(w / 10, h / 10))
        plt.plot(x, y, 'k-')
        plt.plot(points[:, 0], points[:, 1], 'ro')
        plt.grid()
        plt.xlim(0, w)
        plt.ylim(0, h)
        plt.gca().invert_yaxis()
        plt.savefig('plt.png', dpi=100, bbox_inches='tight')    
    
# Generate board size and plot 
Example 45
Project: optimization   Author: computeVision   File: solver.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def plot_energy(energy):
    plt.clf()
    plt.plot(energy)
    plt.xlabel('Epochs')
    plt.ylabel('Energy')
    plt.grid(True)
    plt.title("Energy Plot")
    plt.pause(0.001) 
Example 46
Project: moodle-mlbackend-python   Author: moodlehq   File: chart.py    GNU General Public License v3.0 5 votes vote down vote up
def store(self, X, y, figure_id=1):
        """Save the learning curve"""

        plt.figure(figure_id)
        plt.xlabel("Training samples")
        plt.ylabel("Error")

        train_sizes, train_scores, test_scores = learning_curve(
            self.classifier, X, y[:, 0])

        train_error_mean = 1 - np.mean(train_scores, axis=1)
        train_scores_std = np.std(train_scores, axis=1)
        test_error_mean = 1 - np.mean(test_scores, axis=1)
        test_scores_std = np.std(test_scores, axis=1)
        plt.grid()

        plt.fill_between(train_sizes, train_error_mean + train_scores_std,
                         train_error_mean - train_scores_std, alpha=0.1,
                         color="r")
        plt.fill_between(train_sizes, test_error_mean + test_scores_std,
                         test_error_mean - test_scores_std, alpha=0.1, color="g")
        plt.plot(train_sizes, train_error_mean, 'o-', color="r",
                 label="Training error")
        plt.plot(train_sizes, test_error_mean, 'o-', color="g",
                 label="Cross-validation error")
        plt.legend(loc="best")

        filepath = os.path.join(self.dirname, 'learning-curve.png')
        plt.savefig(filepath, format='png')

        if not os.path.isfile(filepath):
            return False

        return filepath 
Example 47
Project: pymovie   Author: bob-anderson-ok   File: ocr.py    MIT License 5 votes vote down vote up
def locate_timestamp_vertically(img, fig, showplot=False):
    vert_profile = []
    for i in range(img.shape[0]):
        vert_profile.append(np.mean(img[i, :]))

    med_val = np.median(vert_profile)
    max_val = np.max(vert_profile)
    thresh = (max_val - med_val) / 2 + med_val

    top = None
    bottom = None

    for i in range(len(vert_profile)):
        if vert_profile[i] >= thresh:
            top = i
            break

    for i in range(len(vert_profile) - 1, 0, -1):
        if vert_profile[i] >= thresh:
            bottom = i
            break

    if showplot:
        plt.figure(fig, figsize=(10, 6))
        plt.plot(vert_profile, 'bo')
        plt.xlabel('y (row) coordinate within field')
        plt.ylabel('Pixel averages across row')
        plt.title('The red lines show where, on the y axis, the timestamp characters are located')
        plt.vlines([top, bottom], ymin=np.min(vert_profile), ymax=np.max(vert_profile), color='r')
        plt.grid()
        plt.show()

    return top, bottom 
Example 48
Project: Autoenv   Author: intelligent-control-lab   File: cma_es_lib.py    MIT License 5 votes vote down vote up
def plot_axes_scaling(self, iabscissa=1):
        if not hasattr(self, 'D'):
            self.load()
        dat = self
        self._enter_plotting()
        pyplot.semilogy(dat.D[:, iabscissa], dat.D[:, 5:], '-b')
        pyplot.hold(True)
        pyplot.grid(True)
        ax = array(pyplot.axis())
        # ax[1] = max(minxend, ax[1])
        pyplot.axis(ax)
        pyplot.title('Principle Axes Lengths')
        # pyplot.xticks(xticklocs)
        self._xlabel(iabscissa)
        self._finalize_plotting()
        return self 
Example 49
Project: Autoenv   Author: intelligent-control-lab   File: cma_es_lib.py    MIT License 5 votes vote down vote up
def plot_correlations(self, iabscissa=1):
        """spectrum of correlation matrix and largest correlation"""
        if not hasattr(self, 'corrspec'):
            self.load()
        if len(self.corrspec) < 2:
            return self
        x = self.corrspec[:, iabscissa]
        y = self.corrspec[:, 6:]  # principle axes
        ys = self.corrspec[:, :6]  # "special" values

        from matplotlib.pyplot import semilogy, hold, text, grid, axis, title
        self._enter_plotting()
        semilogy(x, y, '-c')
        hold(True)
        semilogy(x[:], np.max(y, 1) / np.min(y, 1), '-r')
        text(x[-1], np.max(y[-1, :]) / np.min(y[-1, :]), 'axis ratio')
        if ys is not None:
            semilogy(x, 1 + ys[:, 2], '-b')
            text(x[-1], 1 + ys[-1, 2], '1 + min(corr)')
            semilogy(x, 1 - ys[:, 5], '-b')
            text(x[-1], 1 - ys[-1, 5], '1 - max(corr)')
            semilogy(x[:], 1 + ys[:, 3], '-k')
            text(x[-1], 1 + ys[-1, 3], '1 + max(neg corr)')
            semilogy(x[:], 1 - ys[:, 4], '-k')
            text(x[-1], 1 - ys[-1, 4], '1 - min(pos corr)')
        grid(True)
        ax = array(axis())
        # ax[1] = max(minxend, ax[1])
        axis(ax)
        title('Spectrum (roots) of correlation matrix')
        # pyplot.xticks(xticklocs)
        self._xlabel(iabscissa)
        self._finalize_plotting()
        return self 
Example 50
Project: BMSG-GAN   Author: akanimax   File: generate_loss_plots.py    MIT License 5 votes vote down vote up
def plot_loss(*loss_vals, plot_name="Loss plot",
              fig_size=(17, 7), save_path=None,
              legends=("discriminator", "generator")):
    """
    plot the discriminator loss values and save the plot if required
    :param loss_vals: (Variable Arg) numpy array or Sequence like for plotting values
    :param plot_name: Name of the plot
    :param fig_size: size of the generated figure (column_width, row_width)
    :param save_path: path to save the figure
    :param legends: list containing labels for loss plots' legends
                    len(legends) == len(loss_vals)
    :return:
    """
    assert len(loss_vals) == len(legends), "Not enough labels for legends"

    plt.figure(figsize=fig_size).suptitle(plot_name)
    plt.grid(True, which="both")
    plt.ylabel("loss value")
    plt.xlabel("spaced iterations")

    plt.axhline(y=0, color='k')
    plt.axvline(x=0, color='k')

    # plot all the provided loss values in a single plot
    plts = []
    for loss_val in loss_vals:
        plts.append(plt.plot(loss_val)[0])

    plt.legend(plts, legends, loc="upper right", fontsize=16)

    if save_path is not None:
        plt.savefig(save_path)