Python matplotlib.pyplot.ylim() Examples

The following are code examples for showing how to use matplotlib.pyplot.ylim(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: razzy-spinner   Author: rafasashi   File: util.py    GNU General Public License v3.0 9 votes vote down vote up
def _show_plot(x_values, y_values, x_labels=None, y_labels=None):
    try:
        import matplotlib.pyplot as plt
    except ImportError:
        raise ImportError('The plot function requires matplotlib to be installed.'
                         'See http://matplotlib.org/')

    plt.locator_params(axis='y', nbins=3)
    axes = plt.axes()
    axes.yaxis.grid()
    plt.plot(x_values, y_values, 'ro', color='red')
    plt.ylim(ymin=-1.2, ymax=1.2)
    plt.tight_layout(pad=5)
    if x_labels:
        plt.xticks(x_values, x_labels, rotation='vertical')
    if y_labels:
        plt.yticks([-1, 0, 1], y_labels, rotation='horizontal')
    # Pad margins so that markers are not clipped by the axes
    plt.margins(0.2)
    plt.show()

#////////////////////////////////////////////////////////////
#{ Parsing and conversion functions
#//////////////////////////////////////////////////////////// 
Example 2
Project: beta3_IRT   Author: yc14600   File: plots.py    MIT License 6 votes vote down vote up
def plot_item_parameters_corr(irt_prob_avg,difficulty,noise,disc=None):
    sns.set_context('paper')
    cls = sns.color_palette("BuGn_r")
    lgd = []

    f = plt.figure()
    plt.xlim([0.,1.])
    plt.ylim([0.,1.])
    
    
    plt.scatter(irt_prob_avg[noise>0],difficulty[noise>0],c=cls[3],s=60)
    lgd.append('noise item')
    if not disc is None:
        plt.scatter(irt_prob_avg[disc<0],difficulty[disc<0],c=cls[0],marker='+',facecolors='none')
        lgd.append('detected noise item')
    plt.scatter(irt_prob_avg[noise==0],difficulty[noise==0],facecolors='none',edgecolors='k',s=60)
    lgd.append('non-noise item')

    plt.title('Correlation between difficulty and response')
    plt.xlabel('Average response',fontsize=14)
    plt.ylabel('Difficulty',fontsize=14)
    l=plt.legend(lgd,frameon=True,fontsize=12)
    l.get_frame().set_edgecolor('g')
    return f 
Example 3
Project: beta3_IRT   Author: yc14600   File: plots.py    MIT License 6 votes vote down vote up
def vis_performance(gather_prec,gather_recal,path,asd='[email protected]',vtype='nfrac'):
    fig = plt.figure()      
    plt.plot(gather_recal.index, gather_recal.mean(axis=1),marker='o')
    plt.plot(gather_prec.index, gather_prec.mean(axis=1),marker='^')

    plt.errorbar(gather_recal.index, gather_recal.mean(axis=1), gather_recal.std(axis=1), linestyle='None')
    plt.errorbar(gather_prec.index, gather_prec.mean(axis=1), gather_prec.std(axis=1), linestyle='None')
    
    if vtype=='nfrac':
        plt.title('Precision and recall under different noise fractions')
        plt.xlabel('Noise fraction (percentile)')
        plt.ylim(-0.05,1.1)
        plt.yticks(np.arange(0,1.2,0.2))
        plt.legend(['Recall','Precision'],loc=0)
        plt.savefig(path+'gathered_dnoise_performance_nfrac_'+asd+'.pdf') 
    elif vtype=='astd':
        plt.title('Precision and recall under different prior SD')
        plt.xlabel('Prior standard deviation of discrimination')
        plt.xlim(0.5,3.25)
        plt.ylim(-0.05,1.1)
        plt.yticks(np.arange(0,1.2,0.2))
        plt.legend(['Recall','Precision'],loc=0)
        plt.savefig(path+'gathered_dnoise_performance_asd_nfrac20.pdf')
    plt.close(fig) 
Example 4
Project: PheKnowLator   Author: callahantiff   File: KGEmbeddingVisualizer.py    Apache License 2.0 6 votes vote down vote up
def plots_embeddings(colors, names, groups, legend_arg, label_size, tsne_size, title, title_size):

    # set up plot
    fig, ax = plt.subplots(figsize=(15, 10))
    ax.margins(0.05)

    # iterate through groups to layer the plot
    for name, group in groups:
        ax.plot(group.x, group.y, marker='o', linestyle='', ms=6, label=names[name],
                color=colors[name], mec='none', alpha=0.8)

    plt.legend(handles=legend_arg[0], fontsize=legend_arg[1], frameon=False, loc=legend_arg[2], ncol=legend_arg[3])

    ax.tick_params(labelsize=label_size)
    plt.ylim(-(tsne_size + 5), tsne_size)
    plt.xlim(-tsne_size, tsne_size)
    plt.title(title, fontsize=title_size)
    plt.show()
    plt.close() 
Example 5
Project: Kaggler   Author: jeongyoonlee   File: classification.py    MIT License 6 votes vote down vote up
def plot_pr_curve(y, p):
    precision, recall, _ = precision_recall_curve(y, p)

    plt.step(recall, precision, color='b', alpha=0.2, where='post')
    plt.fill_between(recall, precision, step='post', alpha=0.2, color='b')
    plt.xlabel('Recall')
    plt.ylabel('Precision')
    plt.ylim([0.0, 1.05])
    plt.xlim([0.0, 1.0]) 
Example 6
Project: euclid   Author: njpayne   File: plot_learning_curve.py    GNU General Public License v2.0 6 votes vote down vote up
def plot_validation_curve(estimator, X, y, title, param_name, param_range, cv = 10):
    train_scores, test_scores = validation_curve(
    estimator, X, y, param_name=param_name, param_range=param_range,
    cv=cv, scoring="accuracy", n_jobs=1)
    train_scores_mean = np.mean(train_scores, axis=1)
    train_scores_std = np.std(train_scores, axis=1)
    test_scores_mean = np.mean(test_scores, axis=1)
    test_scores_std = np.std(test_scores, axis=1)

    plt.figure()
    plt.title(title)
    plt.xlabel(param_name)
    plt.ylabel("Score")
    plt.ylim(0.0, 1.1)
    #plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
    plt.fill_between(param_range, train_scores_mean - train_scores_std,
                     train_scores_mean + train_scores_std, alpha=0.2, color="r")
    #plt.semilogx(param_range, test_scores_mean, label="Cross-validation score", color="g")
    plt.fill_between(param_range, test_scores_mean - test_scores_std,
                     test_scores_mean + test_scores_std, alpha=0.2, color="g")
    plt.plot(param_range, train_scores_mean, 'o-', color="r", label="Training score")
    plt.plot(param_range, test_scores_mean, 'o-', color="g", label="Cross-validation score")

    plt.legend(loc="best")
    return plt 
Example 7
Project: remixt   Author: amcpherson   File: cn_plot.py    MIT License 6 votes vote down vote up
def gc_plot(gc_table_filename, plot_filename):
    """ Plot the probability distribution of GC content for sampled reads

    Args:
        gc_table_filename (str): table of binned gc values
        plot_filename (str): plot PDF filename

    """
    gc_binned = pd.read_csv(gc_table_filename, sep='\t')

    fig = plt.figure(figsize=(4,4))

    plt.scatter(gc_binned['gc_bin'].values, gc_binned['mean'].values, c='k', s=4)
    plt.plot(gc_binned['gc_bin'].values, gc_binned['smoothed'].values, c='r')

    plt.xlabel('gc %')
    plt.ylabel('density')
    plt.xlim((-0.5, 100.5))
    plt.ylim((-0.01, gc_binned['mean'].max() * 1.1))

    plt.tight_layout()

    fig.savefig(plot_filename, format='pdf', bbox_inches='tight') 
Example 8
Project: ndparse   Author: neurodata   File: assess.py    Apache License 2.0 6 votes vote down vote up
def display_pr_curve(precision, recall):
    # following examples from sklearn

    # TODO:  f1 operating point

    import pylab as plt
    # Plot Precision-Recall curve
    plt.clf()
    plt.plot(recall, precision, label='Precision-Recall curve')
    plt.xlabel('Recall')
    plt.ylabel('Precision')
    plt.ylim([0.0, 1.05])
    plt.xlim([0.0, 1.0])
    plt.title('Precision-Recall example: Max f1={0:0.2f}'.format(max_f1))
    plt.legend(loc="lower left")
    plt.show() 
Example 9
Project: scicast   Author: iandriver   File: cluster.py    MIT License 6 votes vote down vote up
def plot_tree(dendr, path_filename, pos=None, save=False):
    icoord = scipy.array(dendr['icoord'])
    dcoord = scipy.array(dendr['dcoord'])
    color_list = scipy.array(dendr['color_list'])
    xmin, xmax = icoord.min(), icoord.max()
    ymin, ymax = dcoord.min(), dcoord.max()
    if pos:
        icoord = icoord[pos]
        dcoord = dcoord[pos]
    for xs, ys, color in zip(icoord, dcoord, color_list):
        plt.plot(xs, ys, color)
    plt.xlim(xmin-10, xmax + 0.1*abs(xmax))
    plt.ylim(ymin, ymax + 0.1*abs(ymax))
    if save:
        plt.savefig(os.path.join(path_filename,'plot_dendrogram.png'))
    plt.show()


# Create a nested dictionary from the ClusterNode's returned by SciPy 
Example 10
Project: EarlyWarning   Author: wjlei1990   File: train_eew_linear.py    GNU General Public License v3.0 6 votes vote down vote up
def plot_y(train_y, train_y_pred, test_y, test_y_pred, figname=None):
    plt.figure(figsize=(20, 10))
    plt.subplot(1, 2, 1)
    plt.scatter(train_y, train_y_pred, alpha=0.2, label="train")
    plt.plot([2, 8], [2, 8], '--', color="k")
    # plt.xlim([2.5, 7.5])
    # plt.ylim([2.5, 7.5])
    plt.title("Train")
    plt.legend()

    plt.subplot(1, 2, 2)
    plt.scatter(test_y, test_y_pred, color="r", alpha=0.2, label="test")
    plt.plot([2, 8], [2, 8], '--', color="k")
    plt.title("Test")
    plt.legend()
    # plt.xlim([2.5, 7.5])
    # plt.ylim([2.5, 7.5])
    if figname is None:
        plt.show()
    else:
        plt.savefig(figname) 
Example 11
Project: QCANet   Author: funalab   File: graph_draw.py    MIT License 6 votes vote down vote up
def graph_draw_volume(self, Time, SumVol, MeanVol, StdVol):
        SumVol = np.array(SumVol) * self.scale
        MeanVol = np.array(MeanVol) * self.scale
        StdVol = np.array(StdVol) * self.scale

        # Volume Mean & SD
        plt.figure()
        plt.plot(Time, MeanVol, color='blue')
        plt.fill_between(Time, np.array(MeanVol)-np.array(StdVol), np.array(MeanVol)+np.array(StdVol), color='blue', alpha=0.4)
        plt.legend(["Mean", "Std. Dev."],loc=1)
        plt.xlabel('Time [day]', size=12)
        plt.ylabel('Volume [$\mu m^{3}$]', size=12)
        if Time[-1] != 0:
            plt.xlim([0.0, round(Time[-1], 1)])
        plt.ylim([0.0, np.max(np.array(MeanVol)+np.array(StdVol)) + 1000])
        filename = self.opbase + self.psep + 'MeanStdVolume.pdf'
        plt.savefig(filename) 
Example 12
Project: QCANet   Author: funalab   File: graph_draw.py    MIT License 6 votes vote down vote up
def graph_draw_surface(self, Time, SumArea, MeanArea, StdArea):
        SumArea = np.array(SumArea) * self.scale
        MeanArea = np.array(MeanArea) * self.scale
        StdArea = np.array(StdArea) * self.scale

        # Surface Mean & SD
        plt.figure()
        plt.plot(Time, MeanArea, color='blue')
        plt.fill_between(Time, np.array(MeanArea)-np.array(StdArea), np.array(MeanArea)+np.array(StdArea), color='blue', alpha=0.4)
        plt.legend(["Mean", "Std. Dev."],loc=1)
        plt.xlabel('Time [day]', size=12)
        plt.ylabel('Surface Area [$\mu m^{2}$]', size=12)
        if Time[-1] != 0:
            plt.xlim([0.0, round(Time[-1], 1)])
        plt.ylim([0.0, np.max(np.array(MeanArea)+np.array(StdArea)) + 1000])
        filename = self.opbase + self.psep + 'MeanStdSurface.pdf'
        plt.savefig(filename) 
Example 13
Project: Aegis   Author: jlillywh   File: live_plotter.py    GNU General Public License v3.0 6 votes vote down vote up
def live_plotter(x_vec, y1_data, line1, identifier='', pause_time=0.1):
    if line1 == []:
        # this is the call to matplotlib that allows dynamic plotting
        plt.ion()
        fig = plt.figure(figsize=(8, 4))
        ax = fig.add_subplot(111)
        # create a variable for the line so we can later update it
        line1, = ax.plot(x_vec, y1_data, '-o', alpha=0.8)
        # update plot label/title
        plt.xlabel('Time')
        plt.ylabel('Depth [m]')
        plt.title('Reservoir Level'.format(identifier))
        plt.show()
    
    # after the figure, axis, and line are created, we only need to update the y-data
    line1.set_ydata(y1_data)
    # adjust limits if new data goes beyond bounds
    if np.min(y1_data) <= line1.axes.get_ylim()[0] or np.max(y1_data) >= line1.axes.get_ylim()[1]:
        plt.ylim([np.min(y1_data) - np.std(y1_data), np.max(y1_data) + np.std(y1_data)])
    # this pauses the data so the figure/axis can catch up - the amount of pause can be altered above
    plt.pause(pause_time)
    
    # return line so we can update it again in the next iteration
    return line1 
Example 14
Project: radiometric_normalization   Author: planetlabs   File: display.py    Apache License 2.0 6 votes vote down vote up
def plot_pixels(file_name, candidate_data_single_band,
                reference_data_single_band, limits=None, fit_line=None):

    logging.info('Display: Creating pixel plot - {}'.format(file_name))
    fig = plt.figure()
    plt.hexbin(
        candidate_data_single_band, reference_data_single_band, mincnt=1)
    if not limits:
        min_value = 0
        _, ymax = plt.gca().get_ylim()
        _, xmax = plt.gca().get_xlim()
        max_value = max([ymax, xmax])
        limits = [min_value, max_value]
    plt.plot(limits, limits, 'k-')
    if fit_line:
        start = limits[0] * fit_line.gain + fit_line.offset
        end = limits[1] * fit_line.gain + fit_line.offset
        plt.plot(limits, [start, end], 'g-')
    plt.xlim(limits)
    plt.ylim(limits)
    plt.xlabel('Candidate DNs')
    plt.ylabel('Reference DNs')
    fig.savefig(file_name, bbox_inches='tight')
    plt.close(fig) 
Example 15
Project: radiometric_normalization   Author: planetlabs   File: display.py    Apache License 2.0 6 votes vote down vote up
def plot_histograms(file_name, candidate_data_multiple_bands,
                    reference_data_multiple_bands=None,
                    # Default is for Blue-Green-Red-NIR:
                    colour_order=['b', 'g', 'r', 'y'],
                    x_limits=None, y_limits=None):
    logging.info('Display: Creating histogram plot - {}'.format(file_name))
    fig = plt.figure()
    plt.hold(True)
    for colour, c_band in zip(colour_order, candidate_data_multiple_bands):
        c_bh, c_bins = numpy.histogram(c_band, bins=256)
        plt.plot(c_bins[:-1], c_bh, color=colour, linestyle='-', linewidth=2)
    if reference_data_multiple_bands:
        for colour, r_band in zip(colour_order, reference_data_multiple_bands):
            r_bh, r_bins = numpy.histogram(r_band, bins=256)
            plt.plot(
                r_bins[:-1], r_bh, color=colour, linestyle='--', linewidth=2)
    plt.xlabel('DN')
    plt.ylabel('Number of pixels')
    if x_limits:
        plt.xlim(x_limits)
    if y_limits:
        plt.ylim(y_limits)
    fig.savefig(file_name, bbox_inches='tight')
    plt.close(fig) 
Example 16
Project: carla_py   Author: IamWangYunKai   File: draw_trajectory.py    MIT License 6 votes vote down vote up
def draw_trajectory(points):
    x = []
    y = []
    for point in points:
        x.append(point.x)
        y.append(point.y)
        
    fig = plt.figure(figsize=(5,5))
    ax = plt.gca()
    ax.set_title('Trajectory of Dataset')
    ax.set_xlabel('x/m')
    ax.set_ylabel('y/m')
    plt.xlim(-185, 285)
    plt.ylim(-235, 235)
    plt.plot(points[0].x, points[0].y, 'ro', alpha=0.8, label="start")
    plt.plot(points[-1].x, points[-1].y, 'go', alpha=0.8, label="end")
    plt.plot(x,y,"b-",linewidth=1, alpha=0.8, label="trajectory")
    plt.legend(loc='best')
    plt.show()
    fig.savefig('trajectory.pdf', bbox_inches='tight')
    plt.close(fig) 
Example 17
Project: python-machine-learning   Author: sho-87   File: plot.py    MIT License 6 votes vote down vote up
def plot_training(history):
    """Plot the training curve.
    
    Parameters:
    history -- numpy array/list of cost values over all training iterations
    
    Returns:
    Plot of the cost for each iteration of training
    
    """
    plt.plot(range(1, len(history)+1), history)
    plt.grid(True)
    plt.xlim(1, len(history))
    plt.ylim(min(history), max(history))
    
    plt.title("Training Curve")
    plt.xlabel("Iteration")
    plt.ylabel("Cost") 
Example 18
Project: pepper-robot-programming   Author: maverickjoy   File: asthama_search.py    MIT License 5 votes vote down vote up
def _updateAxes(self, x, y):

        if x >= self.PLOTXMAX :
            self.PLOTXMAX += x
        if x <= self.PLOTXMIN :
            self.PLOTXMIN += x
        if y >= self.PLOTYMAX :
            self.PLOTYMAX += y
        if y <= self.PLOTYMIN :
            self.PLOTYMIN += y

        plt.xlim(self.PLOTXMIN, self.PLOTXMAX)
        plt.ylim(self.PLOTYMIN, self.PLOTYMAX)

        return 
Example 19
Project: pepper-robot-programming   Author: maverickjoy   File: asthama_search.py    MIT License 5 votes vote down vote up
def _initialisePlot(self):

        plt.rc('grid', linestyle=":", color='black')
        plt.rcParams['axes.facecolor'] = 'black'
        plt.rcParams['axes.edgecolor'] = 'white'
        plt.rcParams['grid.alpha'] = 1
        plt.rcParams['grid.color'] = "green"
        plt.grid(True)
        plt.xlim(self.PLOTXMIN, self.PLOTXMAX)
        plt.ylim(self.PLOTYMIN, self.PLOTYMAX)
        self.graph, = plt.plot([], [], 'o')

        return 
Example 20
Project: sfcc   Author: kv-kunalvyas   File: auxiliary.py    MIT License 5 votes vote down vote up
def plotLearningCurves(train, classifier):
    #P.show()
    X = train.values[:, 1::]
    y = train.values[:, 0]

    train_sizes, train_scores, test_scores = learning_curve(
            classifier, X, y, cv=10, n_jobs=-1, train_sizes=np.linspace(.1, 1., 10), verbose=0)

    train_scores_mean = np.mean(train_scores, axis=1)
    train_scores_std = np.std(train_scores, axis=1)
    test_scores_mean = np.mean(test_scores, axis=1)
    test_scores_std = np.std(test_scores, axis=1)

    plt.figure()
    plt.title("Learning Curves")
    plt.legend(loc="best")
    plt.xlabel("Training samples")
    plt.ylabel("Error Rate")
    plt.ylim((0, 1))
    plt.gca().invert_yaxis()
    plt.grid()

    # Plot the average training and test score lines at each training set size
    plt.plot(train_sizes, train_scores_mean, 'o-', color="b", label="Training score")
    plt.plot(train_sizes, test_scores_mean, 'o-', color="r", label="Test score")

    # Plot the std deviation as a transparent range at each training set size
    plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std,
                     alpha=0.1, color="b")
    plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std,
                     alpha=0.1, color="r")

    # Draw the plot and reset the y-axis
    plt.draw()
    plt.gca().invert_yaxis()

    # shuffle and split training and test sets
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25)
    classifier.fit(X_train, y_train)
    plt.show() 
Example 21
Project: mmdetection   Author: open-mmlab   File: coco_error_analysis.py    Apache License 2.0 5 votes vote down vote up
def makeplot(rs, ps, outDir, class_name, iou_type):
    cs = np.vstack([
        np.ones((2, 3)),
        np.array([.31, .51, .74]),
        np.array([.75, .31, .30]),
        np.array([.36, .90, .38]),
        np.array([.50, .39, .64]),
        np.array([1, .6, 0])
    ])
    areaNames = ['allarea', 'small', 'medium', 'large']
    types = ['C75', 'C50', 'Loc', 'Sim', 'Oth', 'BG', 'FN']
    for i in range(len(areaNames)):
        area_ps = ps[..., i, 0]
        figure_tile = iou_type + '-' + class_name + '-' + areaNames[i]
        aps = [ps_.mean() for ps_ in area_ps]
        ps_curve = [
            ps_.mean(axis=1) if ps_.ndim > 1 else ps_ for ps_ in area_ps
        ]
        ps_curve.insert(0, np.zeros(ps_curve[0].shape))
        fig = plt.figure()
        ax = plt.subplot(111)
        for k in range(len(types)):
            ax.plot(rs, ps_curve[k + 1], color=[0, 0, 0], linewidth=0.5)
            ax.fill_between(
                rs,
                ps_curve[k],
                ps_curve[k + 1],
                color=cs[k],
                label=str('[{:.3f}'.format(aps[k]) + ']' + types[k]))
        plt.xlabel('recall')
        plt.ylabel('precision')
        plt.xlim(0, 1.)
        plt.ylim(0, 1.)
        plt.title(figure_tile)
        plt.legend()
        # plt.show()
        fig.savefig(outDir + '/{}.png'.format(figure_tile))
        plt.close(fig) 
Example 22
Project: cplot   Author: sunchaoatmo   File: cstimeserial.py    GNU General Public License v3.0 5 votes vote down vote up
def corplot(data,vname):
  filename=data.plotname+"_"+"".join(vname)
  outputformat="pdf"
  if outputformat=="pdf":
    pp = PdfPages(filename+'.pdf')
  else:
    page=0
  fig = plt.figure()
  gs0 = gridspec.GridSpec(1,1 )
  ax1 = plt.subplot(gs0[0])
  import numpy as np
  for casenumber,case in enumerate(data.plotlist):
    #units_cur=data.time[case][vname].units
    #calendar_cur=data.time[case][vname].calendar
    legname = sim_nicename.get(case,case)
    color1=tableau20[2*(casenumber)] 
    plt.plot(data.plotdata[case][vname][:],label=legname,color=color1,lw=0.8)
    leg=ax1.legend(loc=1,borderaxespad=0.,frameon=False, fontsize=6)

  plt.ylim([0.8,1.0])
  #plt.xlim([0.,150])
  
  if outputformat=="pdf":
    pp.savefig()
  else:
    figurename=filename+str(page)+"."+outputformat
    page+=1
    fig.savefig(figurename,format=outputformat,dpi=300) #,dpi=300)
  fig.clf()
  if outputformat=="pdf":
    pp.close() 
Example 23
Project: FCOS_GluonCV   Author: DetectionTeamUCAS   File: plot_history.py    Apache License 2.0 5 votes vote down vote up
def plot(self, labels=None, colors=None, y_lim=(0, 1),
             save_path=None, legend_loc='upper right'):
        r"""Update the training history

        Parameters
        ---------
        labels: list of str
            List of label names to plot.
        colors: list of str
            List of line colors.
        save_path: str
            Path to save the plot. Will plot to screen if is None.
        legend_loc: str
            location of legend. upper right by default.
        """
        import matplotlib.pyplot as plt

        if labels is None:
            labels = self.labels
        n = len(labels)

        line_lists = [None]*n
        if colors is None:
            colors = ['C'+str(i) for i in range(n)]
        else:
            assert len(colors) == n

        plt.ylim(y_lim)
        for i, lb in enumerate(labels):
            line_lists[i], = plt.plot(list(range(self.epochs)),
                                      self.history[lb],
                                      colors[i],
                                      label=lb)
        plt.legend(tuple(line_lists), labels, loc=legend_loc)
        if save_path is None:
            plt.show()
        else:
            save_path = os.path.expanduser(save_path)
            plt.savefig(save_path) 
Example 24
Project: DiscEvolution   Author: rbooth200   File: run_model.py    GNU General Public License v3.0 5 votes vote down vote up
def _plot_grid(model):
    grid = model.disc.grid 

    try:
        eps = model.disc.dust_frac.sum(0)
        plt.subplot(222)
        plt.loglog(grid.Rc, eps)
        plt.xlabel('$R$')
        plt.ylabel('$\epsilon$')
        plt.ylim(ymin=1e-4)
        plt.subplot(223)
        plt.loglog(grid.Rc, model.disc.Stokes()[1])
        plt.xlabel('$R$')
        plt.ylabel('$St$')
        plt.subplot(224)
        plt.loglog(grid.Rc, model.disc.grain_size[1])
        plt.xlabel('$R$') 
        plt.ylabel('$a\,[\mathrm{cm}]$')

        plt.subplot(221)
        l, = plt.loglog(grid.Rc, model.disc.Sigma_D.sum(0), '--')
        c = l.get_color()
    except AttributeError:
        c = None

    plt.loglog(grid.Rc, model.disc.Sigma_G, c=c)
    plt.xlabel('$R$')
    plt.ylabel('$\Sigma_\mathrm{G, D}$')
    plt.ylim(ymin=1e-5) 
Example 25
Project: DataSciUF-Tutorial-Student   Author: jdamiani27   File: class_vis.py    MIT License 5 votes vote down vote up
def prettyPicture(clf, X_test, y_test):
    x_min = 0.0; x_max = 10.5
    y_min = 0.0; y_max = 10.5
    
    # Plot the decision boundary. For that, we will assign a color to each
    # point in the mesh [x_min, m_max]x[y_min, y_max].
    h = .01  # step size in the mesh
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
    Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])

    # Put the result into a color plot
    Z = Z.reshape(xx.shape)
    plt.xlim(xx.min(), xx.max())
    plt.ylim(yy.min(), yy.max())

    plt.pcolormesh(xx, yy, Z, cmap=pl.cm.seismic)

    # Plot also the test points
    grade_sig = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii]==2]
    bumpy_sig = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii]==2]
    grade_bkg = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii]==4]
    bumpy_bkg = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii]==4]

    plt.scatter(grade_sig, bumpy_sig, color = "b", label="benign")
    plt.scatter(grade_bkg, bumpy_bkg, color = "r", label="malignant")
    plt.legend()
    plt.xlabel("uniformity_cell_shape")
    plt.ylabel("bare_nuclei")

    plt.savefig("test.png") 
Example 26
Project: Kaggler   Author: jeongyoonlee   File: classification.py    MIT License 5 votes vote down vote up
def plot_roc_curve(y, p):
    fpr, tpr, _ = roc_curve(y, p)

    plt.plot(fpr, tpr)
    plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate') 
Example 27
Project: lirpg   Author: Hwhitetooth   File: gail-eval.py    MIT License 5 votes vote down vote up
def plot(env_name, bc_log, gail_log, stochastic):
    upper_bound = bc_log['upper_bound']
    bc_avg_ret = bc_log['avg_ret']
    gail_avg_ret = gail_log['avg_ret']
    plt.plot(CONFIG['traj_limitation'], upper_bound)
    plt.plot(CONFIG['traj_limitation'], bc_avg_ret)
    plt.plot(CONFIG['traj_limitation'], gail_avg_ret)
    plt.xlabel('Number of expert trajectories')
    plt.ylabel('Accumulated reward')
    plt.title('{} unnormalized scores'.format(env_name))
    plt.legend(['expert', 'bc-imitator', 'gail-imitator'], loc='lower right')
    plt.grid(b=True, which='major', color='gray', linestyle='--')
    if stochastic:
        title_name = 'result/{}-unnormalized-stochastic-scores.png'.format(env_name)
    else:
        title_name = 'result/{}-unnormalized-deterministic-scores.png'.format(env_name)
    plt.savefig(title_name)
    plt.close()

    bc_normalized_ret = bc_log['normalized_ret']
    gail_normalized_ret = gail_log['normalized_ret']
    plt.plot(CONFIG['traj_limitation'], np.ones(len(CONFIG['traj_limitation'])))
    plt.plot(CONFIG['traj_limitation'], bc_normalized_ret)
    plt.plot(CONFIG['traj_limitation'], gail_normalized_ret)
    plt.xlabel('Number of expert trajectories')
    plt.ylabel('Normalized performance')
    plt.title('{} normalized scores'.format(env_name))
    plt.legend(['expert', 'bc-imitator', 'gail-imitator'], loc='lower right')
    plt.grid(b=True, which='major', color='gray', linestyle='--')
    if stochastic:
        title_name = 'result/{}-normalized-stochastic-scores.png'.format(env_name)
    else:
        title_name = 'result/{}-normalized-deterministic-scores.png'.format(env_name)
    plt.ylim(0, 1.6)
    plt.savefig(title_name)
    plt.close() 
Example 28
Project: euclid   Author: njpayne   File: plot_learning_curve.py    GNU General Public License v2.0 5 votes vote down vote up
def plot_learning_curve_iter(estimator, title, cv = 10):

    test_scores = []
    test_std =[]
    iteration_count = []

    for i in range(len(estimator.grid_scores_)):
        #get the cross validation results from the estimator
        iteration_count.append(estimator.grid_scores_[i][0]["n_iter"])
        test_scores.append(estimator.grid_scores_[i].mean_validation_score)
        test_std.append(np.std(estimator.grid_scores_[i].cv_validation_scores))

    #convert arrays to numpy
    test_scores = np.array(test_scores)
    test_std = np.array(test_std)
    iteration_count = np.array(iteration_count)

    plt.figure()
    plt.title(title)
    plt.xlabel("Epochs")
    plt.ylabel("Score")
    plt.ylim(0.0, 1.1)
    plt.grid()
    #plt.semilogx(param_range, test_scores_mean, label="Cross-validation score", color="g")
    plt.fill_between(iteration_count, test_scores - test_std ,
                     test_scores + test_std , alpha=0.2, color="g")
    plt.plot(iteration_count, test_scores, 'o-', color="g",
             label="Cross-validation score")
    plt.legend(loc="best")

    return plt 
Example 29
Project: HardRLWithYoutube   Author: MaxSobolMark   File: gail-eval.py    MIT License 5 votes vote down vote up
def plot(env_name, bc_log, gail_log, stochastic):
    upper_bound = bc_log['upper_bound']
    bc_avg_ret = bc_log['avg_ret']
    gail_avg_ret = gail_log['avg_ret']
    plt.plot(CONFIG['traj_limitation'], upper_bound)
    plt.plot(CONFIG['traj_limitation'], bc_avg_ret)
    plt.plot(CONFIG['traj_limitation'], gail_avg_ret)
    plt.xlabel('Number of expert trajectories')
    plt.ylabel('Accumulated reward')
    plt.title('{} unnormalized scores'.format(env_name))
    plt.legend(['expert', 'bc-imitator', 'gail-imitator'], loc='lower right')
    plt.grid(b=True, which='major', color='gray', linestyle='--')
    if stochastic:
        title_name = 'result/{}-unnormalized-stochastic-scores.png'.format(env_name)
    else:
        title_name = 'result/{}-unnormalized-deterministic-scores.png'.format(env_name)
    plt.savefig(title_name)
    plt.close()

    bc_normalized_ret = bc_log['normalized_ret']
    gail_normalized_ret = gail_log['normalized_ret']
    plt.plot(CONFIG['traj_limitation'], np.ones(len(CONFIG['traj_limitation'])))
    plt.plot(CONFIG['traj_limitation'], bc_normalized_ret)
    plt.plot(CONFIG['traj_limitation'], gail_normalized_ret)
    plt.xlabel('Number of expert trajectories')
    plt.ylabel('Normalized performance')
    plt.title('{} normalized scores'.format(env_name))
    plt.legend(['expert', 'bc-imitator', 'gail-imitator'], loc='lower right')
    plt.grid(b=True, which='major', color='gray', linestyle='--')
    if stochastic:
        title_name = 'result/{}-normalized-stochastic-scores.png'.format(env_name)
    else:
        title_name = 'result/{}-normalized-deterministic-scores.png'.format(env_name)
    plt.ylim(0, 1.6)
    plt.savefig(title_name)
    plt.close() 
Example 30
Project: ml-eeg   Author: pbrusco   File: visualizations.py    GNU General Public License v3.0 5 votes vote down vote up
def set_plot(plt, y_lim, window, t0, tf, marks, ax):
    plt.ylim(y_lim)
    plt.xlim(window)
    # plt.xticks([x/1000.0 for x in range(-2000, 101, 100) if (x/1000.0)>=t0 and (x/1000.0)<=tf])
    ax.axvline(marks[0], color="black")
    ax.axvline(marks[1], color="black") 
Example 31
Project: ml-eeg   Author: pbrusco   File: visualizations.py    GNU General Public License v3.0 5 votes vote down vote up
def lines(features_table, title=""):
    plt.figure()

    plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))

    for idx, row in features_table.iterrows():
        alpha = row.window_size / features_table.window_size.max()
        plt.hlines(y=row.feature_importances_folds_mean, lw=5, alpha=alpha, xmin=row.starting_time, xmax=row.end_time)

    plt.ylim([features_table.feature_importances_folds_mean.min(), features_table.feature_importances_folds_mean.max()])
    plt.xlim([features_table.starting_time.min(), features_table.end_time.max()])
    plt.draw() 
Example 32
Project: ml-eeg   Author: pbrusco   File: visualizations.py    GNU General Public License v3.0 5 votes vote down vote up
def window_bars(features_table, title="", fontsize=20):
    features_table.sort_values(["window_size", "starting_time"], ascending=False, inplace=True)
    fig = plt.figure()
    ax = fig.add_subplot(111)

    cmap = matplotlib.cm.get_cmap('Greys')
    plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
    vmin, vmax = (features_table.feature_importances_folds_mean.min(), features_table.feature_importances_folds_mean.max())
    norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)

    for idx, (_, row) in enumerate(features_table.iterrows()):
        val = row.feature_importances_folds_mean
        # plt.hlines(y=idx, lw=3, color=cmap(norm(val)), xmin=row.starting_time, xmax=row.end_time)
        p = patches.Rectangle(
            (row.starting_time, idx),  # (x, y)
            row.window_size,  # width
            1,  # height
            facecolor=cmap(norm(val)),
            # edgecolor="blue"
        )
        ax.add_patch(p)

    ax.set_title(title, fontsize=fontsize)
    plt.xlim([features_table.starting_time.min(), features_table.end_time.max()])
    plt.ylim([-1, len(features_table) + 2])

    divider = make_axes_locatable(ax)
    ax_colorbar = divider.append_axes('right', size='60%', pad=0.01)

    img = plt.imshow(np.array([[vmin, vmax]]), cmap=cmap)
    img.set_visible(False)
    plt.colorbar(img, cax=ax_colorbar, orientation="vertical")

    plt.draw() 
Example 33
Project: remixt   Author: amcpherson   File: cn_plot.py    MIT License 5 votes vote down vote up
def plot_cnv_genome_density(fig, transform, cnv):
    """ Plot major/minor copy number across the genome and as a density

    Args:
        fig (matplotlib.figure.Figure): figure to which plots are added
        transform (matplotlib.transform.Transform): transform for locating axes
        cnv (pandas.DataFrame): copy number table

    """

    box = matplotlib.transforms.Bbox([[0.05, 0.05], [0.65, 0.95]])
    ax = fig.add_axes(transform.transform_bbox(box))

    remixt.cn_plot.plot_cnv_genome(ax, cnv, mincopies=-1, maxcopies=6, major_col='major_raw', minor_col='minor_raw')
    ax.set_ylabel('Raw copy number')
    ylim = ax.get_ylim()

    box = matplotlib.transforms.Bbox([[0.7, 0.05], [0.95, 0.95]])
    ax = fig.add_axes(transform.transform_bbox(box))

    cov = 0.001
    data = cnv[['minor_raw', 'major_raw', 'length']].replace(np.inf, np.nan).dropna()
    remixt.utils.filled_density_weighted(
        ax, data['minor_raw'].values, data['length'].values,
        'blue', 0.5, ylim[0], ylim[1], cov, rotate=True)
    remixt.utils.filled_density_weighted(
        ax, data['major_raw'].values, data['length'].values,
        'red', 0.5, ylim[0], ylim[1], cov, rotate=True)
    ax.set_ylim(ylim)
    ax.set_xlabel('Density')
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)
    ax.xaxis.tick_bottom()
    ax.yaxis.tick_left()
    ax.yaxis.grid(True)

    return fig 
Example 34
Project: design_embeddings_jmd_2016   Author: IDEALLab   File: shape_plot.py    MIT License 5 votes vote down vote up
def plot_original_samples(points_per_axis, n_dim, inverse_transform, save_path, name,
                          variables, mirror=True):
    
    print "Plotting original samples ..."

    plt.rc("font", size=font_size)
    
    coords = variables
    coords_norm = preprocessing.MinMaxScaler().fit_transform(coords) # Min-Max normalization
    data_rec = inverse_transform(np.array(coords))
    indices = range(len(coords))

    if n_dim == 2:
        # Create a 2D plot
        fig = plt.figure()
        ax = fig.add_subplot(111)
        for i in indices:
            ax.scatter(coords_norm[i, 0], coords_norm[i, 1], s = 7)
            plot_shape(data_rec[i], coords_norm[i,0], coords_norm[i,1], ax, mirror, color='red', alpha=.7)

        ax.set_title(name, fontsize=20)
        plt.xlim(-0.1, 1.1)
        plt.ylim(-0.1, 1.1)
        plt.xlabel('s')
        plt.ylabel('t')
        plt.tight_layout()
        plt.savefig(save_path+'original_samples.eps', dpi=600)
        
        plt.close()
		
    else:
        print 'Cannot plot original samples for dimensionality other than 2!' 
Example 35
Project: design_embeddings_jmd_2016   Author: IDEALLab   File: shape_plot.py    MIT License 5 votes vote down vote up
def plot_original_grid(points_per_axis, n_dim, min_maxes, inverse_transform, save_path, name, mirror=True):
    
    print "Plotting original grid ..."

    plt.rc("font", size=font_size)
    lincoords = []
    
    for i in range(0,n_dim):
        lincoords.append(np.linspace(min_maxes[i][0],min_maxes[i][1],points_per_axis))
    coords = list(itertools.product(*lincoords)) # Create a list of coordinates in the semantic space
    coords_norm = preprocessing.MinMaxScaler().fit_transform(coords) # Min-Max normalization
    data_rec = inverse_transform(coords)

    indices = range(len(coords))

    if n_dim == 2:
        # Create a 2D plot
        fig = plt.figure()
        ax = fig.add_subplot(111)
        for i in indices:
            ax.scatter(coords_norm[i, 0], coords_norm[i, 1], s = 7)
            plot_shape(data_rec[i], coords_norm[i,0], coords_norm[i,1], ax, mirror, linewidth=2)

        ax.set_title(name, fontsize=20)
        plt.xlim(-0.1, 1.1)
        plt.ylim(-0.1, 1.1)
        plt.xlabel('s')
        plt.ylabel('t')
        plt.tight_layout()
        plt.savefig(save_path+'original_grid.eps', dpi=600)
        
        plt.close()
        
    else:
        print 'Cannot plot original grid for dimensionality other than 2!' 
Example 36
Project: design_embeddings_jmd_2016   Author: IDEALLab   File: intrinsic_dim.py    MIT License 5 votes vote down vote up
def mide(X, n_neighbors=None, verbose=0):
    ''' Manifold intrinsic dimension estimator 
    Returns both global intrinsic dimensionality and local intrinsic dimensionality
    '''
    
    # Initial guess
    if n_neighbors is None:
        k_min, k_max = get_k_range(X)
        n_neighbors = (k_min + k_max)/2
    neigh = NearestNeighbors().fit(X)
    dist, nbrs = neigh.kneighbors(n_neighbors=n_neighbors, return_distance=True)
    local_dims = lmse(X, nbrs, verbose=verbose)
    
    if verbose:
        visualize_graph(X, nbrs)
#        plt.figure()
#        plt.plot(local_dims, 'o')
#        plt.title('Local intrinsic dimensions')
#        plt.xlabel('Samples')
#        plt.ylabel('Local ID')
#        plt.ylim(1,4)
#        plt.show()
    
    # Smoothing, this can correct the wrong local dimension estimations
    local_dims = np.array(local_dims)
    X_dims = np.concatenate((X, local_dims.reshape(-1,1)), axis=1)
    b = np.mean(dist[:,-1]) * 5
    kde = KernelDensity(kernel='epanechnikov', bandwidth=b).fit(X_dims)
    for i in range(len(local_dims)):
        Xi = np.concatenate((np.repeat(X[i].reshape(1,-1), len(np.unique(local_dims)), axis=0), 
                             np.unique(local_dims).reshape(-1,1)), axis=1)
        kde_scores = kde.score_samples(Xi)
        local_dims[i] = Xi[np.argmax(kde_scores), -1]
        
    if verbose == 2:
        print local_dims
        
    intr_dim = int(round(np.mean(local_dims), 0))
    
    return intr_dim, local_dims 
Example 37
Project: pohmm-keystroke   Author: vmonaco   File: plotting.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def plot_SMAPE(pred):
    plt.plot(pred['event_idx'], pd.expanding_mean(pred['SMAPE_tau']), label='POHMM')
    plt.plot(pred['event_idx'], pd.expanding_mean(pred['SMAPE_baseline_tau']), linestyle='--', label='Baseline')
    plt.ylim(0, 1)
    plt.legend()
    return 
Example 38
Project: OCDVAE_ContinualLearning   Author: MrtnMndt   File: visualization.py    MIT License 5 votes vote down vote up
def visualize_classification_uncertainty(data_mus, data_sigmas, other_data_dicts, other_data_mu_key,
                                         other_data_sigma_key,
                                         data_name, num_samples, save_path):
    """
    Visualization of prediction uncertainty computed over multiple samples for each input.

    Parameters:
        data_mus (list or torch.Tensor): Encoded mu values for trained dataset's validation set.
        data_sigmas (list or torch.Tensor): Encoded sigma values for trained dataset's validation set.
        other_data_dicts (dictionary of dictionaries): A dataset with values per dictionary, among them mus and sigmas
        other_data_mu_key (str): Dictionary key for the mus
        other_data_sigma_key (str): Dictionary key for the sigmas
        data_name (str): Original dataset's name.
        num_samples (int): Number of used samples to obtain prediction values.
        save_path (str): Saving path.
    """

    data_mus = [y for x in data_mus for y in x]
    data_sigmas = [y for x in data_sigmas for y in x]

    plt.figure(figsize=(20, 14))
    plt.scatter(data_mus, data_sigmas, label=data_name, s=75, c=colors[0], alpha=1.0)

    c = 0
    for other_data_name, other_data_dict in other_data_dicts.items():
        other_data_mus = [y for x in other_data_dict[other_data_mu_key] for y in x]
        other_data_sigmas = [y for x in other_data_dict[other_data_sigma_key] for y in x]
        plt.scatter(other_data_mus, other_data_sigmas, label=other_data_name, s=75, c=colors[c], alpha=0.3,
                    marker='*')
        c += 1

    plt.xlabel("Prediction mean", fontsize=axes_font_size)
    plt.ylabel("Prediction standard deviation", fontsize=axes_font_size)
    plt.xlim(left=-0.05, right=1.05)
    plt.ylim(bottom=-0.05, top=0.55)
    plt.legend(loc=1, fontsize=legend_font_size)
    plt.savefig(os.path.join(save_path, data_name + '_vs_' + ",".join(list(other_data_dicts.keys())) +
                             '_classification_uncertainty_' + str(num_samples) + '_samples.pdf'),
                bbox_inches='tight') 
Example 39
Project: OCDVAE_ContinualLearning   Author: MrtnMndt   File: visualization.py    MIT License 5 votes vote down vote up
def visualize_weibull_outlier_probabilities(data_outlier_probs, other_data_outlier_probs_dict,
                                            data_name, save_path, tailsize):
    """
    Visualization of Weibull CDF outlier probabilites.

    Parameters:
        data_outlier_probs (np.array): Outlier probabilities for each input of the trained dataset's validation set.
        other_data_outlier_probs_dict (dictionary): Outlier probabilities for each input of an unseen dataset.
        data_name (str): Original trained dataset's name.
        save_path (str): Saving path.
        tailsize (int): Fitted Weibull model's tailsize.
    """

    data_outlier_probs = np.concatenate(data_outlier_probs, axis=0)

    data_weights = np.ones_like(data_outlier_probs) / float(len(data_outlier_probs))

    plt.figure(figsize=(20, 20))
    plt.hist(data_outlier_probs, label=data_name, weights=data_weights, bins=50, color=colors[0],
             alpha=1.0, edgecolor='white', linewidth=5)

    c = 0
    for other_data_name, other_data_outlier_probs in other_data_outlier_probs_dict.items():
        other_data_outlier_probs = np.concatenate(other_data_outlier_probs, axis=0)
        other_data_weights = np.ones_like(other_data_outlier_probs) / float(len(other_data_outlier_probs))
        plt.hist(other_data_outlier_probs, label=other_data_name, weights=other_data_weights,
                 bins=50, color=colors[c], alpha=0.5, edgecolor='white', linewidth=5)
        c += 1

    plt.title("Outlier probabilities: tailsize " + str(tailsize), fontsize=title_font_size)
    plt.xlabel("Outlier probability according to Weibull CDF", fontsize=axes_font_size)
    plt.ylabel("Percentage", fontsize=axes_font_size)
    plt.xlim(left=-0.05, right=1.05)
    plt.ylim(bottom=-0.05, top=1.05)
    plt.legend(loc=0)

    plt.savefig(os.path.join(save_path, data_name + '_' + ",".join(list(other_data_outlier_probs_dict.keys()))
                             + '_weibull_outlier_probabilities_tailsize_'
                             + str(tailsize) + '.png'), bbox_inches='tight') 
Example 40
Project: OCDVAE_ContinualLearning   Author: MrtnMndt   File: visualization.py    MIT License 5 votes vote down vote up
def visualize_openset_classification(data, other_data_dicts, dict_key, data_name,
                                     thresholds, save_path, tailsize):
    """
    Visualization of percentage of datasets considered as statistical outliers evaluated for different
    Weibull CDF rejection priors.

    Parameters:
        data (list): Dataset outlier percentages per rejection prior value for the trained dataset's validation set.
        other_data_dicts (dictionary of dictionaries):
            Dataset outlier percentages per rejection prior value for an unseen dataset.
        dict_key (str): Dictionary key of the values to visualize
        data_name (str): Original trained dataset's name.
        thresholds (list): List of integers with rejection prior values.
        save_path (str): Saving path.
        tailsize (int): Weibull model's tailsize.
    """

    lw = 10
    plt.figure(figsize=(20, 20))
    plt.plot(thresholds, data, label=data_name, color=colors[0], linestyle='solid', linewidth=lw)

    c = 0
    for other_data_name, other_data_dict in other_data_dicts.items():
        plt.plot(thresholds, other_data_dict[dict_key], label=other_data_name, color=colors[c],
                 linestyle=linestyles[c % len(linestyles)], linewidth=lw)
        c += 1

    plt.xlabel(r"Weibull CDF outlier rejection prior $\Omega_t$", fontsize=axes_font_size)
    plt.ylabel("Percentage of dataset outliers", fontsize=axes_font_size)
    plt.xlim(left=-0.05, right=1.05)
    plt.ylim(bottom=-0.05, top=1.05)
    plt.legend(loc=0, fontsize=legend_font_size - 15)
    plt.savefig(os.path.join(save_path, data_name + '_' + ",".join(list(other_data_dicts.keys())) +
                             '_outlier_classification' + '_tailsize_' + str(tailsize) + '.pdf'),
                bbox_inches='tight') 
Example 41
Project: OCDVAE_ContinualLearning   Author: MrtnMndt   File: visualization.py    MIT License 5 votes vote down vote up
def visualize_entropy_classification(data, other_data_dicts, dict_key, data_name,
                                     thresholds, save_path):
    """
    Visualization of percentage of datasets considered as statistical outliers evaluated for different
    entropy thresholds.

    Parameters:
        data (list): Dataset outlier percentages per rejection prior value for the trained dataset's validation set.
        other_data_dicts (dictionary of dictionaries):
            Dataset outlier percentages per rejection prior value for an unseen dataset.
        dict_key (str): Dictionary key of the values to visualize
        data_name (str): Original trained dataset's name.
        thresholds (list): List of integers with rejection prior values.
        save_path (str): Saving path.
    """

    lw = 10
    plt.figure(figsize=(20, 20))
    plt.plot(thresholds, data, label=data_name, color=colors[0], linestyle='solid', linewidth=lw)

    c = 0
    for other_data_name, other_data_dict in other_data_dicts.items():
        plt.plot(thresholds, other_data_dict[dict_key], label=other_data_name, color=colors[c],
                 linestyle=linestyles[c % len(linestyles)], linewidth=lw)
        c += 1

    plt.xlabel(r"Predictive entropy", fontsize=axes_font_size)
    plt.ylabel("Percentage of dataset outliers", fontsize=axes_font_size)
    plt.xlim(left=-0.05, right=thresholds[-1])
    plt.ylim(bottom=-0.05, top=1.05)
    plt.legend(loc=0, fontsize=legend_font_size - 15)
    plt.savefig(os.path.join(save_path, data_name + '_' + ",".join(list(other_data_dicts.keys())) +
                             '_entropy_outlier_classification' + '.pdf'),
                bbox_inches='tight') 
Example 42
Project: OCDVAE_ContinualLearning   Author: MrtnMndt   File: visualization.py    MIT License 5 votes vote down vote up
def visualize_reconstruction_classification(data, other_data_dicts, dict_key, data_name,
                                            thresholds, save_path, autoregression=False):
    """
    Visualization of percentage of datasets considered as statistical outliers evaluated for different
    entropy thresholds.

    Parameters:
        data (list): Dataset outlier percentages per rejection prior value for the trained dataset's validation set.
        other_data_dicts (dictionary of dictionaries):
            Dataset outlier percentages per rejection prior value for an unseen dataset.
        dict_key (str): Dictionary key of the values to visualize
        data_name (str): Original trained dataset's name.
        thresholds (list): List of integers with rejection prior values.
        save_path (str): Saving path.
    """

    lw = 10
    plt.figure(figsize=(20, 20))
    plt.plot(thresholds, data, label=data_name, color=colors[0], linestyle='solid', linewidth=lw)

    c = 0
    for other_data_name, other_data_dict in other_data_dicts.items():
        plt.plot(thresholds, other_data_dict[dict_key], label=other_data_name, color=colors[c],
                 linestyle=linestyles[c % len(linestyles)], linewidth=lw)
        c += 1

    if autoregression:
        plt.xlabel(r"Dataset reconstruction loss (bits per dim)", fontsize=axes_font_size)
    else:
        plt.xlabel(r"Dataset reconstruction loss (nats)", fontsize=axes_font_size)
    plt.ylabel("Percentage of dataset outliers", fontsize=axes_font_size)
    plt.xlim(left=-0.05, right=thresholds[-1])
    plt.ylim(bottom=-0.05, top=1.05)
    plt.legend(loc=0, fontsize=legend_font_size - 15)
    plt.savefig(os.path.join(save_path, data_name + '_' + ",".join(list(other_data_dicts.keys())) +
                             '_reconstruction_loss_outlier_classification' + '.pdf'), bbox_inches='tight') 
Example 43
Project: smart-cab   Author: naokishibuya   File: driver.py    MIT License 5 votes vote down vote up
def drawSuccessRates(self):
        fig = plt.figure()
        plt.plot(range(len(self.successRates)), self.successRates)
        plt.xlabel('# of episodes')
        plt.ylabel('success rate %')
        plt.xlim(0,len(self.successRates)-1)
        plt.ylim(0,101.0)
        fig.savefig('{}/success_{:03d}.png'.format(self.directory, self.experiment), bbox_inches='tight') 
Example 44
Project: smart-cab   Author: naokishibuya   File: driver.py    MIT License 5 votes vote down vote up
def drawCoverageRates(self):
        fig = plt.figure()
        plt.plot(range(len(self.coverageRates)), self.coverageRates)
        plt.xlabel('# of episodes')
        plt.ylabel('coverage rate %')
        plt.xlim(0,len(self.coverageRates)-1)
        plt.ylim(0,30.0)
        fig.savefig('{}/coverage_{:03d}.png'.format(self.directory, self.experiment), bbox_inches='tight') 
Example 45
Project: EarlyWarning   Author: wjlei1990   File: stats.py    GNU General Public License v3.0 5 votes vote down vote up
def plot_y(train_y, train_y_pred, test_y, test_y_pred):
    plt.figure()
    plt.subplot(1, 2, 1)
    plt.scatter(train_y, train_y_pred, alpha=0.5)
    plt.plot([2, 8], [2, 8])
    #plt.xlim([2.5, 7.5])
    #plt.ylim([2.5, 7.5])
    plt.title("Train")
    plt.subplot(1, 2, 2)
    plt.scatter(test_y, test_y_pred, alpha=0.5)
    plt.plot([2, 8], [2, 8])
    plt.title("Test")
    #plt.xlim([2.5, 7.5])
    #plt.ylim([2.5, 7.5])
    plt.show() 
Example 46
Project: Fall-Detection-with-CNN   Author: munnam77   File: temporalnetgeneral.py    MIT License 5 votes vote down vote up
def plot_training_info(case, metrics, save, history):
    # summarize history for accuracy
    plt.ioff()
    if 'accuracy' in metrics:     
        fig = plt.figure()
        plt.plot(history['acc'])
        plt.plot(history['val_acc'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'val'], loc='upper left')
        if save == True:
            plt.savefig(case + 'accuracy.png')
            plt.gcf().clear()
        else:
            plt.show()
        plt.close(fig)

    # summarize history for loss
    if 'loss' in metrics:
        fig = plt.figure()
        plt.plot(history['loss'])
        plt.plot(history['val_loss'])
        plt.title('model loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        #plt.ylim(1e-3, 1e-2)
        plt.yscale("log")
        plt.legend(['train', 'val'], loc='upper left')
        if save == True:
            plt.savefig(case + 'loss.png')
            plt.gcf().clear()
        else:
            plt.show()
        plt.close(fig) 
Example 47
Project: QCANet   Author: funalab   File: graph_draw.py    MIT License 5 votes vote down vote up
def graph_draw_centroid_2axis(self, cent_x, cent_y, axis):
        plt.figure()
        if axis is 'XY':
            plt.xlabel('X')
            plt.ylabel('Y')
            plt.xlim([0, self.x])
            plt.ylim([0, self.y])
        elif axis is 'YZ':
            plt.xlabel('Z')
            plt.ylabel('Y')
            plt.xlim([0, 51])
            plt.ylim([0, self.y])
        elif axis is 'ZX':
            plt.xlabel('X')
            plt.ylabel('Z')
            plt.xlim([0, self.x])
            plt.ylim([0, 51])
        cmap =  plt.get_cmap('jet')
        for i in range(len(cent_x)):
            colors = cmap(i / float(len(cent_x)))
            plt.plot(np.array(cent_x[i]), np.array(cent_y[i]), "o", color=colors, alpha=0.6, ms=3, mew=0.5)
        if axis is 'XY':
            filename = self.opbase + self.psep + 'Centroid-XY.pdf'
        elif axis is 'YZ':
            filename = self.opbase + self.psep + 'Centroid-YZ.pdf'
        elif axis is 'ZX':
            filename = self.opbase + self.psep + 'Centroid-ZX.pdf'
        plt.savefig(filename) 
Example 48
Project: fim_cancer   Author: Abraxos   File: cgpb_finder.py    GNU General Public License v3.0 5 votes vote down vote up
def __KM_analysis(self,duration_table,expressed_array,unexpressed_array,freq_set):
		data = {}
		expressed_T = []
		expressed_C = []
		unexpressed_T = []
		unexpressed_C = []
		for idx,row in enumerate(duration_table):
			if(idx>0):
				if row[0] in unexpressed_array and row[1] !=  "NA" and row[2] !=  "NA":
					unexpressed_T.append(float(row[1]))
					unexpressed_C.append(int(row[2]))
				elif row[0] in expressed_array and row[1] != "NA" and row[2] !=  "NA":
					expressed_T.append(float(row[1]))
					expressed_C.append(int(row[2]))

		results = logrank_test(expressed_T, unexpressed_T, expressed_C, unexpressed_C, alpha=.95 )
		if(results.p_value < .0006):
			ax = plt.subplot(111)
			kmf = KaplanMeierFitter()
			kmf.fit(expressed_T, event_observed=expressed_C, label="Satisfying")
			kmf.plot(ax=ax, ci_force_lines=False)
			kmf.fit(unexpressed_T, event_observed=unexpressed_C, label="None-Satisfying")
			kmf.plot(ax=ax, ci_force_lines=False)
			plt.ylim(0,1)
			plt.title("Lifespans ("+str(freq_set)+")")
			plt.show()	
		return results.p_value

	# public method that wraps the methods above... 
Example 49
Project: RTX   Author: RTXteam   File: LogReg.py    MIT License 5 votes vote down vote up
def plot_cutoff(self, dfs, title_post = ["Random Pairings", "True Negatives", "True Positives"], print_flag=True):
        """
        This plots the treats classification rate for every cutoff of a whole %

        :df: A pandas dataframe containing the predictions
        :title_post: A string containing the Last part of the title
        :print_flag: A boolian indicating whether to print exact numbers for the last 20% of cutoffs or not
        """
        if type(dfs) != list:
            dfs = [dfs]

        color = ["xkcd:dark magenta","xkcd:dark turquoise","xkcd:azure","xkcd:purple blue","xkcd:scarlet",
            "xkcd:orchid", "xkcd:pumpkin", "xkcd:gold", "xkcd:peach", "xkcd:neon green", "xkcd:grey blue"]
        c = 0

        for df in dfs:
            cutoffs = [x/100 for x in range(101)]
            cutoff_n = [df["treat_prob"][df["treat_prob"] >= cutoff].count()/len(df) for cutoff in cutoffs]

            plt.plot(cutoffs,cutoff_n,color[c],label=title_post[c])
            if print_flag:
                with pd.option_context('display.max_rows', None, 'display.max_columns', None):
                    print("\n",title_post[c], ":\n")
                    print(pd.DataFrame({"cutoff":cutoffs[80:],"count":cutoff_n[80:]}))
            c += 1
        plt.xlim([0, 1])
        plt.ylim([0, 1])
        plt.xlabel('Cutoff Prob')
        plt.ylabel('Rate of Postitive Predictions')
        plt.title('Prediction Rates of Treats Class')
        plt.legend(loc="lower left")
        plt.show() 
Example 50
Project: carla_py   Author: IamWangYunKai   File: get_data.py    MIT License 5 votes vote down vote up
def get_instruction(waypoints):
    global frame
    x = []
    y = []
    theta = math.atan2((waypoints[3].transform.location.y - waypoints[0].transform.location.y),
                       (waypoints[3].transform.location.x - waypoints[0].transform.location.x))
    for i in range(min(len(waypoints)-1, 50)):
        _x = waypoints[i].transform.location.x - waypoints[0].transform.location.x
        _y = waypoints[i].transform.location.y - waypoints[0].transform.location.y
        
        new_theta = math.pi/2-theta

        x_ = _x*math.cos(new_theta) - _y*math.sin(new_theta)
        y_ = _y*math.cos(new_theta) + _x*math.sin(new_theta)
        
        x.append(-x_)
        y.append(y_)
    
    scale = 20
    fig = plt.figure(figsize=(IMG_LENGTH/100,IMG_WIDTH/100))
    plt.xlim(-scale, scale)
    plt.ylim(0, scale)
    plt.axis('off')
    plt.plot(x,y,"r-",linewidth=50)
    fig.savefig(OUTPUT_PATH + '%06d' % frame + '_ins.png', bbox_inches='tight', dpi=400)
    plt.close(fig)