Python matplotlib.pyplot.xscale() Examples

The following are 30 code examples of matplotlib.pyplot.xscale(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module matplotlib.pyplot , or try the search function .
Example #1
Source File: test_axes.py    From ImageFusion with MIT License 6 votes vote down vote up
def test_markevery_log_scales():
    cases = [None,
         8,
         (30, 8),
         [16, 24, 30], [0,-1],
         slice(100, 200, 3),
         0.1, 0.3, 1.5,
         (0.0, 0.1), (0.45, 0.1)]

    cols = 3
    gs = matplotlib.gridspec.GridSpec(len(cases) // cols + 1, cols)

    delta = 0.11
    x = np.linspace(0, 10 - 2 * delta, 200) + delta
    y = np.sin(x) + 1.0 + delta

    for i, case in enumerate(cases):
        row = (i // cols)
        col = i % cols
        plt.subplot(gs[row, col])
        plt.title('markevery=%s' % str(case))
        plt.xscale('log')
        plt.yscale('log')
        plt.plot(x, y, 'o', ls='-', ms=4,  markevery=case) 
Example #2
Source File: find_learning_rate.py    From allennlp with Apache License 2.0 6 votes vote down vote up
def _save_plot(learning_rates: List[float], losses: List[float], save_path: str):

    try:
        import matplotlib

        matplotlib.use("Agg")  # noqa
        import matplotlib.pyplot as plt

    except ModuleNotFoundError as error:

        logger.warn(
            "To use allennlp find-learning-rate, please install matplotlib: pip install matplotlib>=2.2.3 ."
        )
        raise error

    plt.ylabel("loss")
    plt.xlabel("learning rate (log10 scale)")
    plt.xscale("log")
    plt.plot(learning_rates, losses)
    logger.info(f"Saving learning_rate vs loss plot to {save_path}.")
    plt.savefig(save_path) 
Example #3
Source File: plot_trajectories.py    From Auto-PyTorch with Apache License 2.0 6 votes vote down vote up
def get_pipeline_config_options(self):
        options = [
            ConfigOption('plot_logs', default=None, type='str', list=True),
            ConfigOption('output_folder', default=None, type='directory'),
            ConfigOption('agglomeration', default='mean', choices=['mean', 'median']),
            ConfigOption('scale_uncertainty', default=1, type=float),
            ConfigOption('font_size', default=12, type=int),
            ConfigOption('prefixes', default=["val"], list=True, choices=["", "train", "val", "test", "ensemble", "ensemble_test"]),
            ConfigOption('label_rename', default=False, type=to_bool),
            ConfigOption('skip_dataset_plots', default=False, type=to_bool),
            ConfigOption('plot_markers', default=False, type=to_bool),
            ConfigOption('plot_individual', default=False, type=to_bool),
            ConfigOption('plot_type', default="values", type=str, choices=["values", "losses"]),
            ConfigOption('xscale', default='log', type=str),
            ConfigOption('yscale', default='linear', type=str),
            ConfigOption('xmin', default=None, type=float),
            ConfigOption('xmax', default=None, type=float),
            ConfigOption('ymin', default=None, type=float),
            ConfigOption('ymax', default=None, type=float),
            ConfigOption('value_multiplier', default=1, type=float)
        ]
        return options 
Example #4
Source File: test_axes.py    From neural-network-animation with MIT License 6 votes vote down vote up
def test_markevery_log_scales():
    cases = [None,
         8,
         (30, 8),
         [16, 24, 30], [0,-1],
         slice(100, 200, 3),
         0.1, 0.3, 1.5,
         (0.0, 0.1), (0.45, 0.1)]

    cols = 3
    gs = matplotlib.gridspec.GridSpec(len(cases) // cols + 1, cols)

    delta = 0.11
    x = np.linspace(0, 10 - 2 * delta, 200) + delta
    y = np.sin(x) + 1.0 + delta

    for i, case in enumerate(cases):
        row = (i // cols)
        col = i % cols
        plt.subplot(gs[row, col])
        plt.title('markevery=%s' % str(case))
        plt.xscale('log')
        plt.yscale('log')
        plt.plot(x, y, 'o', ls='-', ms=4,  markevery=case) 
Example #5
Source File: gradev-demo.py    From allantools with GNU Lesser General Public License v3.0 6 votes vote down vote up
def example1():
    """
    Compute the GRADEV of a white phase noise. Compares two different 
    scenarios. 1) The original data and 2) ADEV estimate with gap robust ADEV.
    """
    N = 1000
    f = 1
    y = np.random.randn(1,N)[0,:]
    x = [xx for xx in np.linspace(1,len(y),len(y))]
    x_ax, y_ax, (err_l, err_h), ns = allan.gradev(y,data_type='phase',rate=f,taus=x)
    plt.errorbar(x_ax, y_ax,yerr=[err_l,err_h],label='GRADEV, no gaps')
    
    
    y[int(np.floor(0.4*N)):int(np.floor(0.6*N))] = np.NaN # Simulate missing data
    x_ax, y_ax, (err_l, err_h) , ns = allan.gradev(y,data_type='phase',rate=f,taus=x)
    plt.errorbar(x_ax, y_ax,yerr=[err_l,err_h], label='GRADEV, with gaps')
    plt.xscale('log')
    plt.yscale('log')
    plt.grid()
    plt.legend()
    plt.xlabel('Tau / s')
    plt.ylabel('Overlapping Allan deviation')
    plt.show() 
Example #6
Source File: dataset.py    From TheCannon with MIT License 6 votes vote down vote up
def diagnostics_SNR(self): 
        """ Plots SNR distributions of ref and test object spectra """
        print("Diagnostic for SNRs of reference and survey objects")
        fig = plt.figure()
        data = self.test_SNR
        plt.hist(data, bins=int(np.sqrt(len(data))), alpha=0.5, facecolor='r', 
                label="Survey Objects")
        data = self.tr_SNR
        plt.hist(data, bins=int(np.sqrt(len(data))), alpha=0.5, color='b',
                label="Ref Objects")
        plt.legend(loc='upper right')
        #plt.xscale('log')
        plt.title("SNR Comparison Between Reference and Survey Objects")
        #plt.xlabel("log(Formal SNR)")
        plt.xlabel("Formal SNR")
        plt.ylabel("Number of Objects")
        return fig 
Example #7
Source File: test_axes.py    From python3_ios with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_markevery_log_scales():
    cases = [None,
             8,
             (30, 8),
             [16, 24, 30], [0, -1],
             slice(100, 200, 3),
             0.1, 0.3, 1.5,
             (0.0, 0.1), (0.45, 0.1)]

    cols = 3
    gs = matplotlib.gridspec.GridSpec(len(cases) // cols + 1, cols)

    delta = 0.11
    x = np.linspace(0, 10 - 2 * delta, 200) + delta
    y = np.sin(x) + 1.0 + delta

    for i, case in enumerate(cases):
        row = (i // cols)
        col = i % cols
        plt.subplot(gs[row, col])
        plt.title('markevery=%s' % str(case))
        plt.xscale('log')
        plt.yscale('log')
        plt.plot(x, y, 'o', ls='-', ms=4,  markevery=case) 
Example #8
Source File: blackjack.py    From reinforcement-learning-an-introduction with MIT License 6 votes vote down vote up
def figure_5_3():
    true_value = -0.27726
    episodes = 10000
    runs = 100
    error_ordinary = np.zeros(episodes)
    error_weighted = np.zeros(episodes)
    for i in tqdm(range(0, runs)):
        ordinary_sampling_, weighted_sampling_ = monte_carlo_off_policy(episodes)
        # get the squared error
        error_ordinary += np.power(ordinary_sampling_ - true_value, 2)
        error_weighted += np.power(weighted_sampling_ - true_value, 2)
    error_ordinary /= runs
    error_weighted /= runs

    plt.plot(error_ordinary, label='Ordinary Importance Sampling')
    plt.plot(error_weighted, label='Weighted Importance Sampling')
    plt.xlabel('Episodes (log scale)')
    plt.ylabel('Mean square error')
    plt.xscale('log')
    plt.legend()

    plt.savefig('../images/figure_5_3.png')
    plt.close() 
Example #9
Source File: infinite_variance.py    From reinforcement-learning-an-introduction with MIT License 6 votes vote down vote up
def figure_5_4():
    runs = 10
    episodes = 100000
    for run in range(runs):
        rewards = []
        for episode in range(0, episodes):
            reward, trajectory = play()
            if trajectory[-1] == ACTION_END:
                rho = 0
            else:
                rho = 1.0 / pow(0.5, len(trajectory))
            rewards.append(rho * reward)
        rewards = np.add.accumulate(rewards)
        estimations = np.asarray(rewards) / np.arange(1, episodes + 1)
        plt.plot(estimations)
    plt.xlabel('Episodes (log scale)')
    plt.ylabel('Ordinary Importance Sampling')
    plt.xscale('log')

    plt.savefig('../images/figure_5_4.png')
    plt.close() 
Example #10
Source File: __init__.py    From typhon with MIT License 6 votes vote down vote up
def plot_alt_temp_mole(atmosphere=None, temp=None, alt_ref=None, mole=None):
    """Plot-helping function
    """
    if atmosphere is True:
        alt, pre, temp, mole, alt_ref = swifile(atmosphere)
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.plot(mole*1.e-6,alt_ref,'b-')  # ,label='Number density(SSL=60)')
    plt.xlabel('Number density [cm$^{-3}$]',fontsize=18,weight='bold')
    plt.xscale('log')
    plt.ylabel('Altitude [km]',fontsize=18,weight='bold')
    ax2=ax.twiny()
    ax2.plot(temp,alt_ref,'k-', label='Temperature')
    ax2.set_xlabel("Temperature [K]",fontsize=18,weight='bold')
    ax2.plot([],[],'b-', label='H$_{2}$O Number density')
    plt.legend()
    fig.tight_layout(pad=0.4)
    return fig 
Example #11
Source File: lr_finder.py    From keras_lr_finder with MIT License 6 votes vote down vote up
def plot_loss_change(self, sma=1, n_skip_beginning=10, n_skip_end=5, y_lim=(-0.01, 0.01)):
        """
        Plots rate of change of the loss function.
        Parameters:
            sma - number of batches for simple moving average to smooth out the curve.
            n_skip_beginning - number of batches to skip on the left.
            n_skip_end - number of batches to skip on the right.
            y_lim - limits for the y axis.
        """
        derivatives = self.get_derivatives(sma)[n_skip_beginning:-n_skip_end]
        lrs = self.lrs[n_skip_beginning:-n_skip_end]
        plt.ylabel("rate of loss change")
        plt.xlabel("learning rate (log scale)")
        plt.plot(lrs, derivatives)
        plt.xscale('log')
        plt.ylim(y_lim)
        plt.show() 
Example #12
Source File: learning_curve.py    From dota2-predictor with MIT License 6 votes vote down vote up
def _plot_matplotlib(subset_sizes, data_list, mmr):
    """ Plots learning curve using matplotlib backend.
    Args:
        subset_sizes: list of dataset sizes on which the evaluation was done
        data_list: list of ROC AUC scores corresponding to subset_sizes
        mmr: what MMR the data is taken from
    """
    plt.plot(subset_sizes, data_list[0], lw=2)
    plt.plot(subset_sizes, data_list[1], lw=2)

    plt.legend(['Cross validation error', 'Test error'])
    plt.xscale('log')
    plt.xlabel('Dataset size')
    plt.ylabel('Error')

    if mmr:
        plt.title('Learning curve plot for %d MMR' % mmr)
    else:
        plt.title('Learning curve plot')

    plt.show() 
Example #13
Source File: test_axes.py    From twitter-stock-recommendation with MIT License 6 votes vote down vote up
def test_markevery_log_scales():
    cases = [None,
             8,
             (30, 8),
             [16, 24, 30], [0, -1],
             slice(100, 200, 3),
             0.1, 0.3, 1.5,
             (0.0, 0.1), (0.45, 0.1)]

    cols = 3
    gs = matplotlib.gridspec.GridSpec(len(cases) // cols + 1, cols)

    delta = 0.11
    x = np.linspace(0, 10 - 2 * delta, 200) + delta
    y = np.sin(x) + 1.0 + delta

    for i, case in enumerate(cases):
        row = (i // cols)
        col = i % cols
        plt.subplot(gs[row, col])
        plt.title('markevery=%s' % str(case))
        plt.xscale('log')
        plt.yscale('log')
        plt.plot(x, y, 'o', ls='-', ms=4,  markevery=case) 
Example #14
Source File: opt_callbacks.py    From lumin with Apache License 2.0 6 votes vote down vote up
def plot(self, n_skip:int=0, n_max:Optional[int]=None, lim_y:Optional[Tuple[float,float]]=None) -> None:
        r'''
        Plot the loss as a function of the LR.

        Arguments:
            n_skip: Number of initial iterations to skip in plotting
            n_max: Maximum iteration number to plot
            lim_y: y-range for plotting
        '''

        # TODO: Decide on whether to keep this; could just pass to plot_lr_finders

        with sns.axes_style(self.plot_settings.style), sns.color_palette(self.plot_settings.cat_palette):
            plt.figure(figsize=(self.plot_settings.w_mid, self.plot_settings.h_mid))
            plt.plot(self.history['lr'][n_skip:n_max], self.history['loss'][n_skip:n_max], label='Training loss', color='g')
            if np.log10(self.lr_bounds[1])-np.log10(self.lr_bounds[0]) >= 3: plt.xscale('log')
            plt.ylim(lim_y)
            plt.grid(True, which="both")
            plt.legend(loc=self.plot_settings.leg_loc, fontsize=self.plot_settings.leg_sz)
            plt.xticks(fontsize=self.plot_settings.tk_sz, color=self.plot_settings.tk_col)
            plt.yticks(fontsize=self.plot_settings.tk_sz, color=self.plot_settings.tk_col)
            plt.ylabel("Loss", fontsize=self.plot_settings.lbl_sz, color=self.plot_settings.lbl_col)
            plt.xlabel("Learning rate", fontsize=self.plot_settings.lbl_sz, color=self.plot_settings.lbl_col)
            plt.show() 
Example #15
Source File: print_figures.py    From Generative_Continual_Learning with MIT License 6 votes vote down vote up
def plot_classif_perf(list_overall_best_score_classif, list_overall_best_score_classes_classif, list_num_samples,
                      Dataset):
    for iter, [scores_classif, dataset, method, num_task] in enumerate(list_overall_best_score_classif):

        if dataset == Dataset and num_task == 1 and method == "Baseline":

            scores_mean = scores_classif.mean(0)
            scores_std = scores_classif.std(0)

            # there should be only one curve by dataset
            plt.plot(list_num_samples, scores_mean)
            plt.fill_between(list_num_samples, scores_mean - scores_std, scores_mean + scores_std, alpha=0.4)

    plt.xscale('log')
    plt.xlabel("Number of Samples")
    plt.ylabel("Accuracy")
    plt.ylim([0, 100])
    plt.title('Accuracy in fonction number of samples used')
    plt.savefig(os.path.join(save_dir, Dataset + "_Accuracy_NbSamples.png"))
    plt.clf() 
Example #16
Source File: test_replayer.py    From pynvme with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_replay_pynvme_trace(nvme0, nvme0n1, accelerator=1.0):
    filename = sg.PopupGetFile('select the trace file to replay', 'pynvme')
    if filename:
        logging.info(filename)

        # format before replay
        nvme0n1.format(512)
        
        responce_time = [0]*1000000
        replay_logfile(filename, nvme0n1, nvme0.mdts, accelerator, responce_time)

        import matplotlib.pyplot as plt
        plt.plot(responce_time)
        plt.xlabel('useconds')
        plt.ylabel('# IO')
        plt.xlim(1, len(responce_time))
        plt.ylim(bottom=1)
        plt.xscale('log')
        plt.yscale('log')
        plt.title(filename)
        plt.tight_layout()

        plt.show() 
Example #17
Source File: test_examples.py    From pynvme with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_ioworker_performance(nvme0n1):
    import matplotlib.pyplot as plt

    output_io_per_second = []
    percentile_latency = dict.fromkeys([90, 99, 99.9, 99.99, 99.999])
    nvme0n1.ioworker(io_size=8,
                     lba_random=True,
                     read_percentage=100,
                     output_io_per_second=output_io_per_second,
                     output_percentile_latency=percentile_latency,
                     time=10).start().close()
    logging.info(output_io_per_second)
    logging.info(percentile_latency)

    X = []
    Y = []
    for _, k in enumerate(percentile_latency):
        X.append(k)
        Y.append(percentile_latency[k])

    plt.plot(X, Y)
    plt.xscale('log')
    plt.yscale('log')
    #plt.show() 
Example #18
Source File: distribution.py    From pyprob with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def plot(self, min_val=-10, max_val=10, step_size=0.1, figsize=(10, 5), xlabel=None, ylabel='Probability', xticks=None, yticks=None, log_xscale=False, log_yscale=False, file_name=None, show=True, fig=None, *args, **kwargs):
        if fig is None:
            if not show:
                mpl.rcParams['axes.unicode_minus'] = False
                plt.switch_backend('agg')
            fig = plt.figure(figsize=figsize)
            fig.tight_layout()
        xvals = np.arange(min_val, max_val, step_size)
        plt.plot(xvals, [torch.exp(self.log_prob(x)) for x in xvals], *args, **kwargs)
        if log_xscale:
            plt.xscale('log')
        if log_yscale:
            plt.yscale('log', nonposy='clip')
        if xticks is not None:
            plt.xticks(xticks)
        if yticks is not None:
            plt.xticks(yticks)
        # if xlabel is None:
        #     xlabel = self.name
        plt.xlabel(xlabel)
        plt.ylabel(ylabel)
        if file_name is not None:
            plt.savefig(file_name)
        if show:
            plt.show() 
Example #19
Source File: engine_mpl.py    From tellurium with Apache License 2.0 6 votes vote down vote up
def __init__(self, layout=PlottingLayout(), use_legend=True, xtitle=None, ytitle=None, title=None, 
                 linewidth=None, xlim=None, ylim=None, logx=None, logy=None, xscale=None, yscale=None, 
                 grid=None, ordinates=None, tag=None, labels=None, figsize=(9,6), savefig=None, dpi=None):
        super(MatplotlibFigure, self).__init__(title=title, layout=layout,
                                               xtitle=xtitle, ytitle=ytitle, logx=logx, logy=logy)
        self.use_legend = use_legend
        self.linewidth = linewidth
        self.xscale = xscale
        self.yscale = yscale
        self.grid = grid
        self.ordinates = ordinates
        self.tag = tag
        self.labels = labels
        self.figsize = figsize
        self.savefig = savefig
        self.dpi = dpi 
Example #20
Source File: utilities.py    From EvaluatingDPML with MIT License 6 votes vote down vote up
def plot_histogram(vector):
    mem = vector[:10000]
    non_mem = vector[10000:]
    data, bins, _ = plt.hist([mem, non_mem], bins=loss_range())
    plt.clf()
    mem_hist = np.array(data[0])
    non_mem_hist = np.array(data[1])
    plt.plot(bins[:-1], mem_hist / len(mem), 'k-', label='Members')
    plt.plot(bins[:-1], non_mem_hist / len(non_mem), 'k--', label='Non Members')
    plt.xscale('log')
    plt.xticks([10**-6, 10**-4, 10**-2, 10**0])
    plt.yticks(np.arange(0, 0.11, step=0.02))
    plt.ylim(0, 0.1)
    plt.xlabel('Per-Instance Loss')
    plt.ylabel('Fraction of Instances')
    plt.legend()
    plt.tight_layout()
    plt.show() 
Example #21
Source File: analyze_dimension_and_radius.py    From megaman with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def find_dimension_plot(avg_neighbors, radii, fit_range, savefig=False, fname='dimension_plot.png'):
    tickrange = np.append(np.arange(0, len(radii)-1, 10), len(radii)-1)
    try:
        m,b = polyfit(np.log(radii[fit_range]), np.log(avg_neighbors[fit_range]), 1)
    except:
        m = 0
        b = 0
    if MATPLOTLIB_LOADED:
        plt.scatter(radii, avg_neighbors)
        plt.plot(radii, avg_neighbors, color='red')
        plt.plot(radii[fit_range], np.exp(b)*radii[fit_range]**m, color='blue')
        plt.yscale('log')
        plt.xscale('log')
        plt.xlabel('radius')
        plt.ylabel('neighbors')
        plt.title('data dim='+repr(m)[:4] + "\nRadius = [" + str(np.min(radii)) + ", " + str(np.max(radii)) + "]")
        plt.xlim([np.min(radii), np.max(radii)])
        plt.xticks(np.round(radii[tickrange], 1), np.round(radii[tickrange], 1))
        plt.grid(b=True,which='minor')
        print('dim=', m )
        plt.show()
        if savefig:
            plt.savefig(fname, format='png')
    return(m) 
Example #22
Source File: performance.py    From spotlight with MIT License 6 votes vote down vote up
def plot(dims, sequence, factorization):

    import matplotlib
    matplotlib.use('Agg')  # NOQA
    import matplotlib.pyplot as plt
    import seaborn as sns

    sns.set_style("darkgrid")

    plt.ylabel("Speed improvement")
    plt.xlabel("Size of embedding layers")
    plt.title("Fitting speed (1.0 = no change)")
    plt.xscale('log')

    plt.plot(dims,
             1.0 / sequence,
             label='Sequence model')
    plt.plot(dims,
             1.0 / factorization,
             label='Factorization model')
    plt.legend(loc='lower right')
    plt.savefig('speed.png')
    plt.close() 
Example #23
Source File: test_axes.py    From coffeegrindsize with MIT License 6 votes vote down vote up
def test_markevery_log_scales():
    cases = [None,
             8,
             (30, 8),
             [16, 24, 30], [0, -1],
             slice(100, 200, 3),
             0.1, 0.3, 1.5,
             (0.0, 0.1), (0.45, 0.1)]

    cols = 3
    gs = matplotlib.gridspec.GridSpec(len(cases) // cols + 1, cols)

    delta = 0.11
    x = np.linspace(0, 10 - 2 * delta, 200) + delta
    y = np.sin(x) + 1.0 + delta

    for i, case in enumerate(cases):
        row = (i // cols)
        col = i % cols
        plt.subplot(gs[row, col])
        plt.title('markevery=%s' % str(case))
        plt.xscale('log')
        plt.yscale('log')
        plt.plot(x, y, 'o', ls='-', ms=4,  markevery=case) 
Example #24
Source File: lending_plots.py    From ml-fairness-gym with Apache License 2.0 6 votes vote down vote up
def plot_cumulative_recall_differences(cumulative_recalls, path):
  """Plot differences in cumulative recall between groups up to time T."""
  plt.figure(figsize=(8, 3))
  style = {'dynamic': '-', 'static': '--'}

  for setting, recalls in cumulative_recalls.items():
    abs_array = np.mean(np.abs(recalls[0::2, :] - recalls[1::2, :]), axis=0)

    plt.plot(abs_array, style[setting], label=setting)

  plt.title(
      'Recall gap for EO agent in dynamic vs static environments', fontsize=16)
  plt.yscale('log')
  plt.xscale('log')
  plt.ylabel('TPR gap', fontsize=16)
  plt.xlabel('# steps', fontsize=16)
  plt.grid(True)
  plt.legend()
  plt.tight_layout()
  _write(path) 
Example #25
Source File: test_high_l_stability.py    From starry with MIT License 6 votes vote down vote up
def test_high_l_stability(plot=False):
    map = starry.Map(ydeg=20, reflected=False)
    map[1:, :] = 1
    xo = np.logspace(-2, np.log10(2.0), 1000)
    yo = 0
    ro = 0.9
    flux = map.flux(xo=xo, yo=yo, ro=ro)
    bo = np.sqrt(xo ** 2 + yo ** 2)
    ksq = (1 - ro ** 2 - bo ** 2 + 2 * bo * ro) / (4 * bo * ro)

    if plot:
        plt.plot(ksq, flux)
        plt.xscale("log")
        plt.show()

    # Check for gross stability issues here
    assert np.std(flux[ksq > 1]) < 0.1 
Example #26
Source File: callbacks.py    From transformer-word-segmenter with Apache License 2.0 5 votes vote down vote up
def plot_loss(self):
        '''Helper function to quickly observe the learning rate experiment results.'''
        plt.plot(self.history['lr'], self.history['loss'])
        plt.xscale('log')
        plt.xlabel('Learning rate')
        plt.ylabel('Loss')
        plt.show() 
Example #27
Source File: train_generator.py    From Pix2Pix-Timbre-Transfer with MIT License 5 votes vote down vote up
def plot_loss_findlr(losses, lrs, output_name, n_skip_beginning=10, n_skip_end=5):
    """
    Plots the loss.
    Parameters:
        n_skip_beginning - number of batches to skip on the left.
        n_skip_end - number of batches to skip on the right.
    """
    plt.figure()
    plt.ylabel("loss")
    plt.xlabel("learning rate (log scale)")
    plt.plot(lrs[n_skip_beginning:-n_skip_end], losses[n_skip_beginning:-n_skip_end])
    plt.xscale('log')
    plt.savefig(output_name) 
Example #28
Source File: train.py    From Holocron with MIT License 5 votes vote down vote up
def plot_lr_finder(train_batch, model, data_loader, optimizer, criterion, device,
                   start_lr=1e-7, end_lr=1, loss_margin=1e-2):

    lrs, losses = holocron.utils.lr_finder(train_batch, model, data_loader,
                                           optimizer, criterion, device, start_lr=start_lr, end_lr=end_lr,
                                           stop_threshold=10, beta=0.95)
    # Plot Loss vs LR
    plt.plot(lrs[10:-5], losses[10:-5])
    plt.xscale('log')
    plt.xlabel('Learning Rate')
    plt.ylabel('Training loss')
    plt.grid(True, linestyle='--', axis='x')
    plt.show()
    sys.exit() 
Example #29
Source File: train.py    From Holocron with MIT License 5 votes vote down vote up
def plot_lr_finder(train_batch, model, data_loader, optimizer, criterion, device,
                   start_lr=1e-7, end_lr=1, loss_margin=1e-2):

    lrs, losses = holocron.utils.lr_finder(train_batch, model, data_loader,
                                           optimizer, criterion, device, start_lr=start_lr, end_lr=end_lr,
                                           stop_threshold=10, beta=0.95)
    # Plot Loss vs LR
    plt.plot(lrs[10:-5], losses[10:-5])
    plt.xscale('log')
    plt.xlabel('Learning Rate')
    plt.ylabel('Training loss')
    plt.grid(True, linestyle='--', axis='x')
    plt.show() 
Example #30
Source File: callbacks.py    From transformer-keras with Apache License 2.0 5 votes vote down vote up
def plot_loss(self):
        '''Helper function to quickly observe the learning rate experiment results.'''
        plt.plot(self.history['lr'], self.history['loss'])
        plt.xscale('log')
        plt.xlabel('Learning rate')
        plt.ylabel('Loss')
        plt.show()