Python pylab.xticks() Examples
The following are 30
code examples of pylab.xticks().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
pylab
, or try the search function
.
Example #1
Source File: View.py From Deep-Spying with Apache License 2.0 | 9 votes |
def plot_confusion_matrix(self, matrix, labels): if not self.to_save and not self.to_show: return pylab.figure() pylab.imshow(matrix, interpolation='nearest', cmap=pylab.cm.jet) pylab.title("Confusion Matrix") for i, vi in enumerate(matrix): for j, vj in enumerate(vi): pylab.annotate("%.1f" % vj, xy=(j, i), horizontalalignment='center', verticalalignment='center', fontsize=9) pylab.colorbar() classes = np.arange(len(labels)) pylab.xticks(classes, labels) pylab.yticks(classes, labels) pylab.ylabel('Expected label') pylab.xlabel('Predicted label')
Example #2
Source File: View.py From Deep-Spying with Apache License 2.0 | 6 votes |
def plot_barchart(self, data, labels, colors, xlabel, ylabel, xticks, legendloc=1): self.big_figure() index = np.arange(len(data[0][0])) bar_width = 0.25 pylab.grid("on", axis='y') pylab.ylim([0.5, 1.0]) for i in range(0, len(data)): rects = pylab.bar(bar_width / 2 + index + (i * bar_width), data[i][0], bar_width, alpha=0.5, color=colors[i], yerr=data[i][1], error_kw={'ecolor': '0.3'}, label=labels[i]) pylab.legend(loc=legendloc, prop={'size': 12}) pylab.xlabel(xlabel) pylab.ylabel(ylabel) pylab.xticks(bar_width / 2 + index + ((bar_width * (len(data[0]) + 1)) / len(data[0])), xticks)
Example #3
Source File: plot.py From SelfTarget with MIT License | 6 votes |
def plotVerticalHistSummary(all_result_outputs, label='', data_label='', y_label='', plot_label='', hist_width=1000, hist_bins=100, oligo_id_str='Oligo ID', val_str = 'Cut Rate', total_reads_str= 'Total Reads'): datas = [x[0][data_label][0] for x in all_result_outputs] sample_names = [shortDirLabel(x[1]) for x in all_result_outputs] merged_data = pd.merge(datas[0],datas[1],how='inner',on=oligo_id_str, suffixes=['', ' 2']) for i, data in enumerate(datas[2:]): merged_data = pd.merge(merged_data, data,how='inner',on=oligo_id_str, suffixes=['', ' %d' % (i+3)]) suffix = lambda i: ' %d' % (i+1) if i > 0 else '' xpos = [x*hist_width for x in range(len(sample_names))] PL.figure(figsize=(12,8)) for i,label1 in enumerate(sample_names): dvs = merged_data[val_str + suffix(i)] PL.hist(dvs, bins=hist_bins, bottom=i*hist_width, orientation='horizontal') PL.xticks(xpos, sample_names, rotation='vertical') PL.ylabel(y_label) PL.title(label) PL.show(block=False) PL.savefig(getPlotDir() + '/%s_%s.png' % (plot_label, label.replace(' ','_')), bbox_inches='tight')
Example #4
Source File: plot_old_new_predictions.py From SelfTarget with MIT License | 6 votes |
def plotKLBoxes(data): cols = [x for x in data.columns if 'KL' in x and 'Class KL' not in x and 'Old' not in x and 'Conventional' not in x and 'Combined' not in x] cols.reverse() cols_label, max_kl = 'KL', 9 PL.figure(figsize=(4,5)) pt = data.loc[(data['Combined v Predicted KL'] > 0.75) & (data['Combined v Predicted KL'] < 0.8) & (data['Old v New KL'] > 0.75) & (data['Old v New KL'] < 0.8)] print(pt['Old Oligo Id']) PL.boxplot([data[col] for col in cols], positions=range(len(cols)),patch_artist=True,boxprops=dict(facecolor='C2'),medianprops=dict(linewidth=2.5, color='C1'),showfliers=False) PL.xticks(range(len(cols)),[renameCol(x) for x in cols], rotation='vertical') for i,col in enumerate(cols): PL.text(i-0.15, np.median(data[col])+0.02, '%.2f' % np.median(data[col])) PL.ylabel(cols_label) PL.subplots_adjust(left=0.1,right=0.95,top=0.95, bottom=0.5) PL.show(block=False) saveFig('kl_compare_old_new_predicted_%s' % cols_label.replace(' ',''))
Example #5
Source File: compare_overbeek_profiles.py From SelfTarget with MIT License | 6 votes |
def plotInFrame(overbeek_inframes, ours_inframes, oof_sel_overbeek_ids, pred_results_dir): PL.figure(figsize=(4.2,4.2)) data = pd.read_csv(pred_results_dir + '/old_new_kl_predicted_summaries.txt', sep='\t').fillna(-1.0) label1, label2 = 'New 2x800x In Frame Perc', 'New 1600x In Frame Perc' xdata, ydata = data[label1], data[label2] PL.plot(xdata,ydata, '.', label='Synthetic between library (R=%.2f)' % pearsonr(xdata,ydata)[0], color='C0',alpha=0.15) PL.plot(overbeek_inframes, ours_inframes, '^', label='Synthetic vs Endogenous (R=%.2f)' % pearsonr(overbeek_inframes, ours_inframes)[0], color='C1') for (x,y,id) in zip(overbeek_inframes, ours_inframes, oof_sel_overbeek_ids): if abs(x-y) > 25.0: PL.text(x,y,id) PL.plot([0,100],[0,100],'k--') PL.ylabel('Percent In-Frame Mutations') PL.xlabel('Percent In-Frame Mutations') PL.legend() PL.xticks([],[]) PL.yticks([],[]) PL.show(block=False) saveFig('in_frame_full_scatter')
Example #6
Source File: plotting.py From smallrnaseq with GNU General Public License v3.0 | 6 votes |
def heatmap(df,fname=None,cmap='seismic',log=False): """Plot a heat map""" from matplotlib.colors import LogNorm f=plt.figure(figsize=(8,8)) ax=f.add_subplot(111) norm=None df=df.replace(0,.1) if log==True: norm=LogNorm(vmin=df.min().min(), vmax=df.max().max()) hm = ax.pcolor(df,cmap=cmap,norm=norm) plt.colorbar(hm,ax=ax,shrink=0.6,norm=norm) plt.yticks(np.arange(0.5, len(df.index), 1), df.index) plt.xticks(np.arange(0.5, len(df.columns), 1), df.columns, rotation=90) #ax.axvline(4, color='gray'); ax.axvline(8, color='gray') plt.tight_layout() if fname != None: f.savefig(fname+'.png') return ax
Example #7
Source File: functional_map.py From cmm with GNU General Public License v2.0 | 6 votes |
def plot_functional_map(C, newfig=True): vmax = max(np.abs(C.max()), np.abs(C.min())) vmin = -vmax C = ((C - vmin) / (vmax - vmin)) * 2 - 1 if newfig: pl.figure(figsize=(5,5)) else: pl.clf() ax = pl.gca() pl.pcolor(C[::-1], edgecolor=(0.9, 0.9, 0.9, 1), lw=0.5, vmin=-1, vmax=1, cmap=nice_mpl_color_map()) # colorbar tick_locs = [-1., 0.0, 1.0] tick_labels = ['min', 0, 'max'] bar = pl.colorbar() bar.locator = matplotlib.ticker.FixedLocator(tick_locs) bar.formatter = matplotlib.ticker.FixedFormatter(tick_labels) bar.update_ticks() ax.set_aspect(1) pl.xticks([]) pl.yticks([]) if newfig: pl.show()
Example #8
Source File: transfer_learning.py From plastering with MIT License | 6 votes |
def plot_confusion_matrix(test_label, pred): mapping = {1:'co2',2:'humidity',3:'pressure',4:'rmt',5:'status',6:'stpt',7:'flow',8:'HW sup',9:'HW ret',10:'CW sup',11:'CW ret',12:'SAT',13:'RAT',17:'MAT',18:'C enter',19:'C leave',21:'occu',30:'pos',31:'power',32:'ctrl',33:'fan spd',34:'timer'} cm_ = CM(test_label, pred) cm = normalize(cm_.astype(np.float), axis=1, norm='l1') fig = pl.figure() ax = fig.add_subplot(111) cax = ax.matshow(cm, cmap=Color.YlOrBr) fig.colorbar(cax) for x in range(len(cm)): for y in range(len(cm)): ax.annotate(str("%.3f(%d)"%(cm[x][y], cm_[x][y])), xy=(y,x), horizontalalignment='center', verticalalignment='center', fontsize=9) cm_cls =np.unique(np.hstack((test_label, pred))) cls = [] for c in cm_cls: cls.append(mapping[c]) pl.yticks(range(len(cls)), cls) pl.ylabel('True label') pl.xticks(range(len(cls)), cls) pl.xlabel('Predicted label') pl.title('Confusion Matrix (%.3f)'%(ACC(pred, test_label))) pl.show()
Example #9
Source File: plot_mh_analysis.py From SelfTarget with MIT License | 6 votes |
def compareMHK562lines(all_result_outputs, label='', y_axis = 'Percent Non-Null Reads', data_label='RegrLines'): dirnames = [x[1] for x in all_result_outputs] clrs = ['silver','grey','darkgreen','green','lightgreen','royalblue','dodgerblue','skyblue','mediumpurple','orchid','red','orange','salmon'] fig = PL.figure(figsize=(6,6)) leg_handles = [] mh_lens = [3,4,5,6,7,8,9,10,11,12,13,14,15] for mh_len, clr in zip(mh_lens,clrs): regr_lines = [x[0][data_label][mh_len] for x in all_result_outputs] mean_line = np.mean([x[:2] for x in regr_lines], axis=0) leg_handles.append(PL.plot(mean_line[0], mean_line[1], label='MH Len=%d (R=%.1f)' % (mh_len,np.mean([x[2] for x in regr_lines])) , linewidth=2, color=clr )[0]) PL.xlabel('Distance between nearest ends of\nmicrohomologous sequences',fontsize=16) PL.ylabel('Correspondng microhomology-mediated deletion\n as percent of total mutated reads',fontsize=16) PL.tick_params(labelsize=16) PL.legend(handles=[x for x in reversed(leg_handles)], loc='upper right') PL.ylim((0,80)) PL.xlim((0,20)) PL.xticks(range(0,21,5)) PL.show(block=False) saveFig('mh_regr_lines_K562')
Example #10
Source File: plot_i1_summaries.py From SelfTarget with MIT License | 6 votes |
def i1RepeatNucleotides(data, label=''): merged_data = mergeWithIndelData(data) nt_mean_percs, nts = [], ['A','T','G','C'] for nt in nts: nt_data = merged_data.loc[merged_data['Repeat Nucleotide Left'] == nt] nt_mean_percs.append((nt_data['I1_Rpt Left Reads - NonAmb']*100.0/nt_data['Total reads']).mean()) PL.figure(figsize=(3,3)) PL.bar(range(4),nt_mean_percs) for i in range(4): PL.text(i-0.25,nt_mean_percs[i]+0.8,'%.1f' % nt_mean_percs[i]) PL.xticks(range(4),nts) PL.ylim((0,26)) PL.xlabel('PAM distal nucleotide\nadjacent to the cut site') PL.ylabel('I1 repeated left nucleotide\nas percent of total mutated reads') PL.show(block=False) saveFig('i1_rtp_nt_%s' % label)
Example #11
Source File: plot_i1_summaries.py From SelfTarget with MIT License | 6 votes |
def plotMergedI1Repeats(all_result_outputs, label=''): merged_data = mergeSamples(all_result_outputs, ['I1_Rpt Left Reads - NonAmb','Total reads'], data_label='i1IndelData', merge_on=['Oligo Id','Repeat Nucleotide Left']) nt_mean_percs, nts = [], ['A','T','G','C'] for nt in nts: nt_data = merged_data.loc[merged_data['Repeat Nucleotide Left'] == nt] nt_mean_percs.append((nt_data['I1_Rpt Left Reads - NonAmb Sum']*100.0/nt_data['Total reads Sum']).mean()) PL.figure(figsize=(3,3)) PL.bar(range(4),nt_mean_percs) for i in range(4): PL.text(i-0.25,nt_mean_percs[i]+0.8,'%.1f' % nt_mean_percs[i]) PL.xticks(range(4),nts) PL.ylim((0,26)) PL.xlabel('PAM distal nucleotide\nadjacent to the cut site') PL.ylabel('I1 repeated left nucleotide\nas percent of total mutated reads') PL.show(block=False) saveFig('i1_rtp_nt')
Example #12
Source File: review_analysis.py From yelp with GNU Lesser General Public License v2.1 | 5 votes |
def simple_lineal_regression(file_path): records = ReviewETL.load_file(file_path) data = [[record['review_count']] for record in records] ratings = [record['stars'] for record in records] num_testing_records = int(len(ratings) * 0.8) training_data = data[:num_testing_records] testing_data = data[num_testing_records:] training_ratings = ratings[:num_testing_records] testing_ratings = ratings[num_testing_records:] # Create linear regression object regr = linear_model.LinearRegression() # Train the model using the training sets regr.fit(training_data, training_ratings) # The coefficients print('Coefficients: \n', regr.coef_) print('Intercept: \n', regr.intercept_) # The root mean square error print("RMSE: %.2f" % (np.mean( (regr.predict(testing_data) - testing_ratings) ** 2)) ** 0.5) print( 'Variance score: %.2f' % regr.score(testing_data, testing_ratings)) # Plot outputs import pylab as pl pl.scatter(testing_data, testing_ratings, color='black') pl.plot(testing_data, regr.predict(testing_data), color='blue', linewidth=3) pl.xticks(()) pl.yticks(()) pl.show()
Example #13
Source File: build_diagram.py From NEUCOGAR with GNU General Public License v2.0 | 5 votes |
def spikes_diagram(ts, gids, name, path): """ Function for making spike diagrams :param ts: (list) times :param gids: (list) global IDs of neurons :param name: (str) name of brain part :param path: (str) path to save results :return: None """ pylab.figure() color_marker = "." color_bar = "blue" color_edge = "black" ylabel = "Neuron ID" hist_binwidth = 5.0 location = pylab.axes([0.1, 0.3, 0.85, 0.6]) pylab.plot(ts, gids, color_marker) pylab.ylabel(ylabel) xlim = pylab.xlim() pylab.xticks([]) pylab.axes([0.1, 0.1, 0.85, 0.17]) t_bins = numpy.arange(numpy.amin(ts), numpy.amax(ts), hist_binwidth) n, bins = pylab.histogram(ts, bins=t_bins) num_neurons = len(numpy.unique(gids)) heights = (1000 * n / (hist_binwidth * num_neurons)) # FixMe t_bins[:-1] should work without cutting the end value pylab.bar(t_bins[:-1], heights, width=hist_binwidth, color=color_bar, edgecolor=color_edge) pylab.yticks([int(a) for a in numpy.linspace(0.0, int(max(heights) * 1.1) + 5, 4)]) pylab.ylabel("Rate (Hz)") pylab.xlabel("Time (ms)") pylab.grid(True) pylab.axes(location) pylab.title(name) pylab.xlim(xlim) pylab.draw() pylab.savefig("{0}{1}.png".format(path, name), dpi=dpi_n, format='png') pylab.close()
Example #14
Source File: plot_old_new.py From SelfTarget with MIT License | 5 votes |
def runAnalysis(): data = pd.read_csv(getHighDataDir() + '/old_new_kl_summaries.txt', sep='\t').fillna(-1.0) kl_cols = [x for x in data.columns if 'KL' in x and 'Class KL' not in x and 'Old v Old' not in x] max_kl = 9 PL.figure(figsize=(2.5,4)) bps= [] box_types = [('C2','Within Library'),('C0','Between Library')] for i,(clr,box_type) in enumerate(box_types): col_box_data = [data[col] for col in kl_cols if renameCol(col) == box_type] pos = [2*x + i + 1 for x in range(len(col_box_data))] print('KL', box_type, np.median(col_box_data, axis=1)) bps.append(PL.boxplot(col_box_data, positions=pos,patch_artist=True,boxprops=dict(facecolor=clr),showfliers=False)) PL.xticks([1.5,3.5,5.5],['Same\ngRNA','Other\ngRNA','Other\ngRNA\n(Rpt)']) PL.plot([2.5, 2.5],[0, max_kl],'-', color='silver') PL.plot([4.5, 4.5],[0, max_kl],'-', color='silver') PL.xlim((0.5,6.5)) PL.ylim((0,max_kl)) PL.ylabel('KL') PL.subplots_adjust(left=0.1,right=0.95,top=0.95, bottom=0.25) PL.legend([bp["boxes"][0] for bp in bps],[x[1] for x in box_types], loc='upper left') PL.show(block=False) saveFig('kl_compare_old_new_KL')
Example #15
Source File: plot_i1_summaries.py From SelfTarget with MIT License | 5 votes |
def plotDominantBars(all_result_outputs, label=''): pie_labels = ['I1_Rpt Left Reads - NonAmb','Ambiguous Rpt Reads','I1_Rpt Right Reads - NonAmb','I1_NonRpt Reads'] mci_merged_data = mergeSamples(all_result_outputs, [], data_label='i1IndelData') mci_merged_data['Equal MCI'] = (mci_merged_data['Most Common Indel']==mci_merged_data['Most Common Indel 2']) & (mci_merged_data['Most Common Indel']==mci_merged_data['Most Common Indel 3']) mci_merged_data['Is Dominant I1'] = (mci_merged_data['Equal MCI'] & (mci_merged_data['MCI Type'] == 'I1')) oligo_data = pd.read_csv(getHighDataDir() + '/ST_June_2017/data/self_target_oligos_details_with_pam_details.csv',sep='\t') remove_under = lambda x: x.replace('_','') oligo_data['Oligo Id'] = oligo_data['ID'].apply(remove_under) merged_mci_data = pd.merge(mci_merged_data, oligo_data[['Oligo Id','Guide']], how='inner',on='Oligo Id') nt_perc_i1, cnt_labels = [], [] nts = 'ATGC' for nt in nts: is_nt = lambda guide: (guide[-4] == nt) nt_data = merged_mci_data.loc[merged_mci_data['Guide'].apply(is_nt)] nt_perc_i1.append(sum(nt_data['Is Dominant I1'])*100.0/len(nt_data)) cnt_labels.append('%d/%d' % (sum(nt_data['Is Dominant I1']), len(nt_data))) PL.figure() PL.bar(range(4), nt_perc_i1, width=0.8) for i, cnt in enumerate(cnt_labels): PL.text(i-0.3,nt_perc_i1[i]+5.0,cnt) PL.xticks(range(4), [x for x in nts]) PL.xlabel('Nucleotide on Left of cut-site') PL.ylabel('Percent gRNAs with single nucleotide insertion\nas most common indel in all 3 replicates') PL.show(block=False) saveFig('I1_bar_3_rep')
Example #16
Source File: plot_mh_analysis.py From SelfTarget with MIT License | 5 votes |
def plotGCContent(all_result_outputs, label=''): #Merge data across samples unique_cols = ['Oligo ID','Indel', 'GC Content', 'MH Len', 'MH Dist'] datas = [x[0]['Data'][unique_cols + ['Indel Reads', 'Non-Null Reads']] for x in all_result_outputs] merged_data = datas[0] for i, data in enumerate(datas[1:]): merged_data = pd.merge(merged_data, data, on=unique_cols, suffixes=('','%d' % (i+2)), how='outer') suffix = lambda i: '%d' % (i+1) if i > 0 else '' merged_data['Indel Reads Sum'] = merged_data[['Indel Reads' + suffix(i) for i in range(len(datas))]].sum(axis=1) merged_data['Non-Null Reads Sum'] = merged_data[['Non-Null Reads' + suffix(i) for i in range(len(datas))]].sum(axis=1) #Compute mean regression lines across samples for each MH length mean_lines = {} for mh_len in range(2,16): if mh_len not in all_result_outputs[0][0]['RegrLines']: continue regr_lines = [x[0]['RegrLines'][mh_len][:2] for x in all_result_outputs] mean_lines[mh_len] = np.mean(regr_lines, axis=0) #Restrict to only MH dist in (0,10) and adjust for mh len-dist relationship for mh_len in [9]: compute_resid = lambda row: row['Perc Reads']# - getRegrValue(row['MH Len'],row['MH Dist'],mean_lines) sel_data = merged_data.loc[(merged_data['MH Len'] == mh_len) & (merged_data['MH Dist'] >= 0) & (merged_data['MH Dist'] <= 10)] sel_data['Perc Reads'] = sel_data['Indel Reads Sum']*100.0/sel_data['Non-Null Reads Sum'] sel_data['Perc Reads Residual'] = sel_data.apply(compute_resid, axis=1) PL.figure(figsize=(4,4)) gcs = sel_data['GC Content'].unique(); gcs.sort() boxdata_lk = {gc: sel_data.loc[sel_data['GC Content'] == gc]['Perc Reads Residual'] for gc in gcs} gcs = [gc for gc in gcs if len(boxdata_lk[gc])>20] #Limit to GC with at least 20 data points boxdata = [boxdata_lk[gc] for gc in gcs] print([len(x) for x in boxdata]) PL.boxplot(boxdata) PL.ylabel('Percent total mutated reads of MH-mediated deletion') PL.xlabel('GC content of microhomologous sequence') PL.title('Microhomology of length %d\n(at max 10 distance)' % mh_len) PL.xticks(range(1,len(gcs)+1),gcs) PL.show(block=False) saveFig('gc_content_mh%d' % mh_len)
Example #17
Source File: plot_kl_analysis.py From SelfTarget with MIT License | 5 votes |
def plotHeatMap(data, col='KL without null', label=''): #Compute and collate medians sel_cols = [x for x in data.columns if col in x] cmp_meds = data[sel_cols].median(axis=0) samples = sortSampleNames(getUniqueSamples(sel_cols)) cell_lines = ['CHO', 'E14TG2A', 'BOB','RPE1', 'HAP1','K562','eCAS9','TREX2'] sample_idxs = [(cell_lines.index(parseSampleName(x)[0]),x) for x in getUniqueSamples(sel_cols)] sample_idxs.sort() samples = [x[1] for x in sample_idxs] N = len(samples) meds = np.zeros((N,N)) for colname in sel_cols: dir1, dir2 = getDirsFromFilename(colname.split('$')[-1]) idx1, idx2 = samples.index(dir1), samples.index(dir2) meds[idx1,idx2] = cmp_meds[colname] meds[idx2,idx1] = cmp_meds[colname] for i in range(N): print(' '.join(['%.2f' % x for x in meds[i,:]])) print( np.median(meds[:,:-4],axis=0)) #Display in Heatmap PL.figure(figsize=(5,5)) PL.imshow(meds, cmap='hot_r', vmin = 0.0, vmax = 3.0, interpolation='nearest') PL.colorbar() PL.xticks(range(N)) PL.yticks(range(N)) PL.title("Median KL") # between %d mutational profiles (for %s with >%d mutated reads)" % (col, len(data), label, MIN_READS)) ax1 = PL.gca() ax1.set_yticklabels([getSimpleName(x) for x in samples], rotation='horizontal') ax1.set_xticklabels([getSimpleName(x) for x in samples], rotation='vertical') PL.subplots_adjust(left=0.25,right=0.95,top=0.95, bottom=0.25) PL.show(block=False) saveFig('median_kl_heatmap_cell_lines')
Example #18
Source File: plot.py From SelfTarget with MIT License | 5 votes |
def plotBoxPlotSummary(all_result_outputs, label='', data_label='', y_label='', plot_label='', cl_order=[]): data_values = [x[0][data_label][0].values for x in all_result_outputs] #sample_names = [getSimpleName(x[1]) + '\n(Median reads = %d)' % x[0][data_label][1] for x in all_result_outputs] sample_names = [getSimpleName(x[1]) for x in all_result_outputs] if len(cl_order)>0: cell_lines = [' '.join(x.split()[:-2]) for x in sample_names] print(cell_lines) reordered_data, reordered_sample_names = [],[] for cell_line in cl_order: for i, cline in enumerate(cell_lines): if cline == cell_line: reordered_data.append(data_values[i]) reordered_sample_names.append(sample_names[i]) sample_names = reordered_sample_names data_values = reordered_data PL.figure(figsize=(5,5)) for i,dvs in enumerate(data_values): print(np.median(dvs)) PL.boxplot([dvs], positions=[i], showfliers=True, sym='.', widths=0.8) PL.xticks(range(len(sample_names)), sample_names, rotation='vertical') PL.xlim((-0.5,len(sample_names)-0.5)) PL.ylim((0,5)) PL.ylabel(y_label) PL.title(label) PL.subplots_adjust(bottom=0.3) PL.show(block=False) saveFig( '%s_%s' % (plot_label, sanitizeLabel(label)))
Example #19
Source File: iris_recognition.py From GmdhPy with MIT License | 5 votes |
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues): plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(iris.target_names)) plt.xticks(tick_marks, iris.target_names, rotation=45) plt.yticks(tick_marks, iris.target_names) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label')
Example #20
Source File: eigenfaces.py From machine-learning with GNU General Public License v3.0 | 5 votes |
def plot_gallery(images, titles, h, w, n_row=3, n_col=4): """Helper function to plot a gallery of portraits""" pl.figure(figsize=(1.8 * n_col, 2.4 * n_row)) pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35) for i in range(n_row * n_col): pl.subplot(n_row, n_col, i + 1) pl.imshow(images[i].reshape((h, w)), cmap=pl.cm.gray) pl.title(titles[i], size=12) pl.xticks(()) pl.yticks(()) # plot the result of the prediction on a portion of the test set
Example #21
Source File: View.py From Deep-Spying with Apache License 2.0 | 5 votes |
def plot_sensor_data_and_label(self, title, timestamp, x, y, z, label_timestamp, label=None): if not self.to_save and not self.to_show: return self.big_figure() pylab.plot(timestamp, x, color='r', label='x') pylab.plot(timestamp, y, color='g', label='y') pylab.plot(timestamp, z, color='b', label='z') for i in range(0, len(label_timestamp)): if label is not None: if i != 0: pylab.axvline(label_timestamp[i], color="k", ls='dashed') else: pylab.axvline(label_timestamp[i], color="k", label="keystroke", ls='dashed') else: pylab.axvline(label_timestamp[i], color="k", ls='dashed') pylab.legend() pylab.title(title) pylab.xlabel('Time') pylab.ylabel('Amplitude') if label: pylab.xticks(label_timestamp, label)
Example #22
Source File: bandstructure.py From pyiron with BSD 3-Clause "New" or "Revised" License | 5 votes |
def plot(self): import pylab as plt q_ticks_int = [self.q_dist[i] for i in self.q_ticks] q_ticks_label = self.q_labels for i, q in enumerate(q_ticks_label): if q in self.translate_to_pylab: q_ticks_label[i] = self.translate_to_pylab[q] plt.plot(self.q_dist, self.ew_list) plt.xticks(q_ticks_int, q_ticks_label) for x in q_ticks_int: plt.axvline(x, color="black") return plt
Example #23
Source File: spectrogram.py From spectrum with BSD 3-Clause "New" or "Revised" License | 5 votes |
def plot(self, filename=None, vmin=None, vmax=None, cmap='jet_r'): import pylab pylab.clf() pylab.imshow(-np.log10(self.results[self._start_y:,:]), origin="lower", aspect="auto", cmap=cmap, vmin=vmin, vmax=vmax) pylab.colorbar() # Fix xticks XMAX = float(self.results.shape[1]) # The max integer on xaxis xpos = list(range(0, int(XMAX), int(XMAX/5))) xx = [int(this*100)/100 for this in np.array(xpos) / XMAX * self.duration] pylab.xticks(xpos, xx, fontsize=16) # Fix yticks YMAX = float(self.results.shape[0]) # The max integer on xaxis ypos = list(range(0, int(YMAX), int(YMAX/5))) yy = [int(this) for this in np.array(ypos) / YMAX * self.sampling] pylab.yticks(ypos, yy, fontsize=16) #pylab.yticks([1000,2000,3000,4000], [5500,11000,16500,22000], fontsize=16) #pylab.title("%s echoes" % filename.replace(".png", ""), fontsize=25) pylab.xlabel("Time (seconds)", fontsize=25) pylab.ylabel("Frequence (Hz)", fontsize=25) pylab.tight_layout() if filename: pylab.savefig(filename)
Example #24
Source File: active_learning.py From plastering with MIT License | 5 votes |
def plot_confusion_matrix(self, label_test, fn_test): fn_preds = self.clf.predict(fn_test) acc = accuracy_score(label_test, fn_preds) cm_ = CM(label_test, fn_preds) cm = normalize(cm_.astype(np.float), axis=1, norm='l1') fig = pl.figure() ax = fig.add_subplot(111) cax = ax.matshow(cm) fig.colorbar(cax) for x in range(len(cm)): for y in range(len(cm)): ax.annotate(str("%.3f(%d)"%(cm[x][y], cm_[x][y])), xy=(y,x), horizontalalignment='center', verticalalignment='center', fontsize=10) cm_cls =np.unique(np.hstack((label_test,fn_preds))) cls = [] for c in cm_cls: cls.append(mapping[c]) pl.yticks(range(len(cls)), cls) pl.ylabel('True label') pl.xticks(range(len(cls)), cls) pl.xlabel('Predicted label') pl.title('Mn Confusion matrix (%.3f)'%acc) pl.show()
Example #25
Source File: wordfreq_app.py From luscan-devel with GNU General Public License v2.0 | 5 votes |
def plot_word_freq_dist(text): fd = text.vocab() samples = fd.keys()[:50] values = [fd[sample] for sample in samples] values = [sum(values[:i+1]) * 100.0/fd.N() for i in range(len(values))] pylab.title(text.name) pylab.xlabel("Samples") pylab.ylabel("Cumulative Percentage") pylab.plot(values) pylab.xticks(range(len(samples)), [str(s) for s in samples], rotation=90) pylab.show()
Example #26
Source File: dependencygraph.py From luscan-devel with GNU General Public License v2.0 | 5 votes |
def malt_demo(nx=False): """ A demonstration of the result of reading a dependency version of the first sentence of the Penn Treebank. """ dg = DependencyGraph("""Pierre NNP 2 NMOD Vinken NNP 8 SUB , , 2 P 61 CD 5 NMOD years NNS 6 AMOD old JJ 2 NMOD , , 2 P will MD 0 ROOT join VB 8 VC the DT 11 NMOD board NN 9 OBJ as IN 9 VMOD a DT 15 NMOD nonexecutive JJ 15 NMOD director NN 12 PMOD Nov. NNP 9 VMOD 29 CD 16 NMOD . . 9 VMOD """) tree = dg.tree() print tree.pprint() if nx: #currently doesn't work import networkx as NX import pylab as P g = dg.nx_graph() g.info() pos = NX.spring_layout(g, dim=1) NX.draw_networkx_nodes(g, pos, node_size=50) #NX.draw_networkx_edges(g, pos, edge_color='k', width=8) NX.draw_networkx_labels(g, pos, dg.nx_labels) P.xticks([]) P.yticks([]) P.savefig('tree.png') P.show()
Example #27
Source File: c10_20_6_figures.py From Python-for-Finance-Second-Edition with MIT License | 5 votes |
def graph(text,text2=''): pl.xticks(()) pl.yticks(()) pl.xlim(0,30) pl.ylim(0,20) pl.plot([x,x],[0,3]) pl.text(x,-2,"X"); pl.text(0,x,"X") pl.text(x,x*1.7, text, ha='center', va='center',size=10, alpha=.5) pl.text(-5,10,text2,size=25)
Example #28
Source File: probability.py From luscan-devel with GNU General Public License v2.0 | 4 votes |
def plot(self, *args, **kwargs): """ Plot the given samples from the conditional frequency distribution. For a cumulative plot, specify cumulative=True. (Requires Matplotlib to be installed.) :param samples: The samples to plot :type samples: list :param title: The title for the graph :type title: str :param conditions: The conditions to plot (default is all) :type conditions: list """ try: import pylab except ImportError: raise ValueError('The plot function requires the matplotlib package (aka pylab).' 'See http://matplotlib.sourceforge.net/') cumulative = _get_kwarg(kwargs, 'cumulative', False) conditions = _get_kwarg(kwargs, 'conditions', self.conditions()) title = _get_kwarg(kwargs, 'title', '') samples = _get_kwarg(kwargs, 'samples', sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted if not "linewidth" in kwargs: kwargs["linewidth"] = 2 for condition in conditions: if cumulative: freqs = list(self[condition]._cumulative_frequencies(samples)) ylabel = "Cumulative Counts" legend_loc = 'lower right' else: freqs = [self[condition][sample] for sample in samples] ylabel = "Counts" legend_loc = 'upper right' # percents = [f * 100 for f in freqs] only in ConditionalProbDist? kwargs['label'] = str(condition) pylab.plot(freqs, *args, **kwargs) pylab.legend(loc=legend_loc) pylab.grid(True, color="silver") pylab.xticks(range(len(samples)), [unicode(s) for s in samples], rotation=90) if title: pylab.title(title) pylab.xlabel("Samples") pylab.ylabel(ylabel) pylab.show()
Example #29
Source File: probability.py From luscan-devel with GNU General Public License v2.0 | 4 votes |
def plot(self, *args, **kwargs): """ Plot samples from the frequency distribution displaying the most frequent sample first. If an integer parameter is supplied, stop after this many samples have been plotted. If two integer parameters m, n are supplied, plot a subset of the samples, beginning with m and stopping at n-1. For a cumulative plot, specify cumulative=True. (Requires Matplotlib to be installed.) :param title: The title for the graph :type title: str :param cumulative: A flag to specify whether the plot is cumulative (default = False) :type title: bool """ try: import pylab except ImportError: raise ValueError('The plot function requires the matplotlib package (aka pylab). ' 'See http://matplotlib.sourceforge.net/') if len(args) == 0: args = [len(self)] samples = list(islice(self, *args)) cumulative = _get_kwarg(kwargs, 'cumulative', False) if cumulative: freqs = list(self._cumulative_frequencies(samples)) ylabel = "Cumulative Counts" else: freqs = [self[sample] for sample in samples] ylabel = "Counts" # percents = [f * 100 for f in freqs] only in ProbDist? pylab.grid(True, color="silver") if not "linewidth" in kwargs: kwargs["linewidth"] = 2 if "title" in kwargs: pylab.title(kwargs["title"]) del kwargs["title"] pylab.plot(freqs, **kwargs) pylab.xticks(range(len(samples)), [unicode(s) for s in samples], rotation=90) pylab.xlabel("Samples") pylab.ylabel(ylabel) pylab.show()
Example #30
Source File: plot_pie_indel_summaries.py From SelfTarget with MIT License | 4 votes |
def plotD1(all_result_outputs, label=''): mci_merged_data = mergeSamples(all_result_outputs, [], data_label='perOligoMCI') mci_merged_data['Equal MCI'] = (mci_merged_data['Most Common Indel']==mci_merged_data['Most Common Indel 2']) & (mci_merged_data['Most Common Indel']==mci_merged_data['Most Common Indel 3']) mci_common = mci_merged_data.loc[mci_merged_data['Equal MCI']] pie_vals, pie_labels = [], [] dmci_data = mci_common.loc[(mci_common['MCI Type'] == 'D1')] #Note: type check discards equally most common indels spans_cutsite = lambda indel: tokFullIndel(indel)[2]['L'] < -1 and tokFullIndel(indel)[2]['R'] > 0 for nt in 'ATGC': is_mh = lambda alt_seq: len(alt_seq) >= 2 and alt_seq == (len(alt_seq)*nt) num_repeat_nt = len(dmci_data.loc[dmci_data['Altered Sequence'].apply(is_mh) & dmci_data['Most Common Indel'].apply(spans_cutsite)]) pie_vals.append(num_repeat_nt*100.0/len(dmci_data)) print(num_repeat_nt) pie_labels.append('Removal of %s\nfrom %s|%s' % (nt,nt,nt)) is_non_repeat = lambda seq: len(seq) < 2 or seq != (seq[0]*len(seq)) num_non_repeat = len(dmci_data.loc[dmci_data['Altered Sequence'].apply(is_non_repeat) | ~dmci_data['Most Common Indel'].apply(spans_cutsite)]) pie_vals.append(num_non_repeat*100.0/len(dmci_data)) print(num_non_repeat) pie_labels.append('Removal from non-repeat') PL.figure(figsize=(4,4)) PL.pie(pie_vals, labels=pie_labels, autopct='%.1f', labeldistance=1.1, counterclock=False, colors=OLD_COLORS) PL.title('Size 1 deletions that are\n"most common" for their gRNA in all 3 replicates\n(%d gRNAs from %d total)' % (len(dmci_data), len(mci_merged_data))) PL.show(block=False) saveFig('pie_chart_D1') oligo_data = pd.read_csv(getHighDataDir() + '/ST_June_2017/data/self_target_oligos_details_with_pam_details.csv',sep='\t') remove_under = lambda x: x.replace('_','') oligo_data['Oligo Id'] = oligo_data['ID'].apply(remove_under) merged_mci_data = pd.merge(mci_merged_data, oligo_data[['Oligo Id','Guide']], how='inner',on='Oligo Id') print(len(merged_mci_data)) nt_dbl_perc_d1, cnt_labels = [], [] is_d1 = lambda indel: (indel.split('_')[0] == 'D1') non_dbl_nt = lambda row: row['Guide'][-4] != row['Guide'][-3] nts = 'ATGC' for nt in nts: double_nt = lambda row: row['Guide'][-4:-2] == (nt+nt) dbl_data = merged_mci_data.loc[merged_mci_data.apply(double_nt,axis=1)] num_dbl_d1 = sum(dbl_data['Most Common Indel'].apply(is_d1) & dbl_data['Equal MCI'] & (dbl_data['Oligo Id']!='Oligo28137')) #Oligo28137: Corner case where a guide has CT|T and loses the C nt_dbl_perc_d1.append(num_dbl_d1*100.0/len(dbl_data)) cnt_labels.append('%d/%d' % (num_dbl_d1,len(dbl_data))) print(len(dbl_data)) non_dbl_data = merged_mci_data.loc[merged_mci_data.apply(non_dbl_nt,axis=1)] print(len(non_dbl_data)) num_non_dbl_d1 = sum(non_dbl_data['Most Common Indel'].apply(is_d1) & non_dbl_data['Equal MCI']) nt_dbl_perc_d1.append(num_non_dbl_d1*100.0/len(non_dbl_data)) cnt_labels.append('%d/%d' % (num_non_dbl_d1,len(non_dbl_data))) PL.figure() PL.bar(range(5), nt_dbl_perc_d1, width=0.8) for i, cnt in enumerate(cnt_labels): PL.text(i-0.3,nt_dbl_perc_d1[i]+5.0,cnt) PL.xticks(range(5), ['%s' % x*2 for x in nts] + ['Other']) PL.ylim((0,40)) PL.xlabel('Nucleotides on either side of cut site') PL.ylabel('Percent gRNAs with single nucleotide deletion\nas most common indel in all 3 replicates') PL.show(block=False) saveFig('D1_bar_3_rep')