Python past.builtins.xrange() Examples
The following are 30
code examples of past.builtins.xrange().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
past.builtins
, or try the search function
.
Example #1
Source File: plot.py From pypath with GNU General Public License v3.0 | 6 votes |
def htp_calculations(self): if not len(self.htdata): self.refc = collections.Counter( common.flat_list((r.pmid for r in e['references']) for e in self.pp.graph.es)) # percentage of high throughput interactions htsrcs_prev = set(self.pp.sources) self.prg = progress.Progress(self.upper - self.lower, 'Analysing HTP refs/interactions', 1) for htlim in reversed(xrange(self.lower, self.upper + 1)): self.prg.step() self.get_point(htlim) htsrcs_new = self.htdata[htlim]['htsrcs'] diff = htsrcs_new - htsrcs_prev htsrcs_prev = htsrcs_new if len(diff): sys.stdout.write( '\n\t:: %s: no references with more interaction than %u\n' % (', '.join(list(diff)), htlim - 1)) sys.stdout.flush()
Example #2
Source File: finder.py From barf-project with BSD 2-Clause "Simplified" License | 6 votes |
def _build_gadgets(self, gadget_tree_root): """Return a gadgets list. """ node_list = self._build_gadgets_rec(gadget_tree_root) return [RawGadget(n) for n in node_list] # TODO: Update x86 gadgets tests before uncommenting the following. # (this change breaks x86 gadgets tests.) # gadgets = [] # for node in node_list: # for i in xrange(len(node)): # gadgets.append(RawGadget(node[i:])) # return gadgets
Example #3
Source File: afc.py From pymobiledevice with GNU General Public License v3.0 | 6 votes |
def file_write(self, handle, data): MAXIMUM_WRITE_SIZE = 1 << 15 hh = struct.pack("<Q", handle) segments = int(len(data) / MAXIMUM_WRITE_SIZE) try: for i in xrange(segments): self.dispatch_packet(AFC_OP_WRITE, hh + data[i*MAXIMUM_WRITE_SIZE:(i+1)*MAXIMUM_WRITE_SIZE], this_length=48) s, d = self.receive_data() if s != AFC_E_SUCCESS: self.logger.error("file_write error: %d", s) break if len(data) % MAXIMUM_WRITE_SIZE: self.dispatch_packet(AFC_OP_WRITE, hh + data[segments*MAXIMUM_WRITE_SIZE:], this_length=48) s, d = self.receive_data() except: self.lockdown = LockdownClient() self.service = self.lockdown.startService(self.serviceName) self.file_write(handle,data) return s
Example #4
Source File: util.py From PyAthena with MIT License | 6 votes |
def get_chunks(df, chunksize=None): rows = len(df) if rows == 0: return if chunksize is None: chunksize = rows elif chunksize <= 0: raise ValueError("Chunk size argument must be greater than zero") chunks = int(rows / chunksize) + 1 for i in xrange(chunks): start_i = i * chunksize end_i = min((i + 1) * chunksize, rows) if start_i >= end_i: break yield df[start_i:end_i]
Example #5
Source File: proteomicsdb.py From pypath with GNU General Public License v3.0 | 6 votes |
def get_proteins(self, tissue_id, calculation_method=0, swissprot_only=1, no_isoform=1): ''' ''' for i in xrange(3): self.query( 'proteinpertissue', (tissue_id, calculation_method, swissprot_only, no_isoform, self.output_format), large=True) if hasattr(self.result, 'read'): break
Example #6
Source File: handleclient.py From B2HANDLE with Apache License 2.0 | 6 votes |
def __get_python_indices_for_key(self, key, list_of_entries): ''' Finds the indices of all entries that have a specific type. Important: This method finds the python indices of the list of entries! These are not the Handle System index values! :param key: The key (Handle Record type) :param list_of_entries: A list of the existing entries in which to find the indices. :return: A list of integers, the indices of the entries of type "key" in the given list. ''' indices = [] for i in xrange(len(list_of_entries)): if list_of_entries[i]['type'] == key: indices.append(i) return indices
Example #7
Source File: common.py From pypath with GNU General Public License v3.0 | 6 votes |
def gen_session_id(length=5): """Generates a random alphanumeric string. :arg int length: Optional, ``5`` by default. Specifies the length of the random string. :return: (*str*) -- Random alphanumeric string of the specified length. """ abc = '0123456789abcdefghijklmnopqrstuvwxyz' return ''.join(random.choice(abc) for i in xrange(length)) # XXX: Are you sure this is the way to compute Simpson's index?
Example #8
Source File: plot.py From pypath with GNU General Public License v3.0 | 6 votes |
def sort(self): colcyc = itertools.cycle(list(self.palette)) palcyc = [next(colcyc) for _ in xrange(len(self.x))] if self.order == 'x': self.ordr = np.array([self.x[i] for i in self.x.argsort()]) self.palette = sns.color_palette( [palcyc[i] for i in self.x.argsort()]) elif self.order == 'y': self.ordr = np.array([self.x[i] for i in self.y.argsort()]) self.palette = sns.color_palette( [palcyc[i] for i in self.y.argsort()]) elif len(set(self.order) & set(self.x)) == len(self.x): self.ordr = np.array(self.order) xl = list(self.x) self.palette = sns.color_palette( [palcyc[xl.index(i)] for i in self.ordr]) else: self.ordr = self.x if self.desc: self.ordr = self.ordr[::-1] self.palette = sns.color_palette(list(self.palette)[::-1])
Example #9
Source File: test_primitives.py From boofuzz with GNU General Public License v2.0 | 6 votes |
def test_s_mirror(self): test_group_values = [b"a", b"bb", b"ccc", b"dddd"] s_initialize("test_s_mirror") s_size("data", output_format="ascii", fuzzable=False, name="size") s_mirror("size", name="size_mirror") with s_block("data"): s_static("<") s_group("group_start", values=test_group_values) s_static(">") s_static("hello") s_static("</") s_mirror("group_start", name="group_end") s_static(">") req = s_get("test_s_mirror") for _ in xrange(len(test_group_values)): s_mutate() group_start_value = req.names["group_start"].render() self.assertEqual( int(req.names["size"].render()), len("<{0}>hello</{0}>".format(group_start_value.decode("utf-8"))) ) self.assertEqual(req.names["group_end"].render(), group_start_value) self.assertEqual(req.names["size_mirror"].render(), req.names["size"].render())
Example #10
Source File: ida_fuzz_library_extender.py From boofuzz with GNU General Public License v2.0 | 6 votes |
def find_ints(start_address): constants = [] # loop heads for head in Heads(start_address, SegEnd(start_address)): # if it's code, check for cmp instruction if isCode(GetFlags(head)): mnem = GetMnem(head) op1 = int(GetOperandValue(head, 1)) # if it's a cmp and it's immediate value is unique, add it to the list if "cmp" in mnem and op1 not in constants: constants.append(op1) print("Found %d constant values used in compares." % len(constants)) print("-----------------------------------------------------") for i in xrange(0, len(constants), 20): print(constants[i : i + 20]) return constants
Example #11
Source File: utils.py From quantized-mesh-tile with MIT License | 5 votes |
def computeNormals(vertices, faces): numVertices = len(vertices) numFaces = len(faces) normalsPerFace = [None] * numFaces areasPerFace = [0.0] * numFaces normalsPerVertex = np.zeros(vertices.shape, dtype=vertices.dtype) for i in xrange(0, numFaces): face = faces[i] v0 = vertices[face[0]] v1 = vertices[face[1]] v2 = vertices[face[2]] normal = np.cross(c3d.subtract(v1, v0), c3d.subtract(v2, v0)) area = triangleArea(v0, v1) areasPerFace[i] = area normalsPerFace[i] = normal for i in xrange(0, numFaces): face = faces[i] weightedNormal = [c * areasPerFace[i] for c in normalsPerFace[i]] for j in face: normalsPerVertex[j] = c3d.add(normalsPerVertex[j], weightedNormal) for i in xrange(0, numVertices): normalsPerVertex[i] = c3d.normalize(normalsPerVertex[i]) return normalsPerVertex
Example #12
Source File: parameter_supervisor.py From recnet with MIT License | 5 votes |
def pass_structure_dict(self, prm_structure): if "net_size" in prm_structure: self.struct["net_size" ] = prm_structure["net_size"] self.struct["hidden_layer" ] = prm_structure["net_size"].__len__() - 2 else: raise Warning("No net size") if "net_unit_type" in prm_structure: self.struct["net_unit_type" ] = prm_structure["net_unit_type"] if prm_structure["net_unit_type"].__len__() != self.struct["net_size" ].__len__(): raise Warning("Net size and unit type have no equal length") else: raise Warning("No net unit type") if "net_act_type" in prm_structure: self.struct["net_act_type" ] = prm_structure["net_act_type"] if prm_structure["net_act_type"].__len__() != self.struct["net_size" ].__len__(): raise Warning("Net size and act type have no equal length") else: self.struct["net_act_type" ] = ['tanh' for i in xrange(prm_structure["net_size"].__len__())] if "net_arch" in prm_structure: self.struct["net_arch" ] = prm_structure["net_arch"] if prm_structure["net_arch"].__len__() != self.struct["net_size" ].__len__(): raise Warning("Net size and net architecture have no equal length") else: raise Warning("No network architecture 'net_arch' ") self.struct["weight_numb"] = 0 if "identity_func" in prm_structure: #(currently corrupted) self.struct["identity_func"] = prm_structure["identity_func"] else: self.struct["identity_func"] = False ##### Passes parameters in optimize dictionary ########################################
Example #13
Source File: model.py From MemN2N-tensorflow with MIT License | 5 votes |
def train(self, data): N = int(math.ceil(len(data) / self.batch_size)) cost = 0 x = np.ndarray([self.batch_size, self.edim], dtype=np.float32) time = np.ndarray([self.batch_size, self.mem_size], dtype=np.int32) target = np.zeros([self.batch_size, self.nwords]) # one-hot-encoded context = np.ndarray([self.batch_size, self.mem_size]) x.fill(self.init_hid) for t in xrange(self.mem_size): time[:,t].fill(t) if self.show: from utils import ProgressBar bar = ProgressBar('Train', max=N) for idx in xrange(N): if self.show: bar.next() target.fill(0) for b in xrange(self.batch_size): m = random.randrange(self.mem_size, len(data)) target[b][data[m]] = 1 context[b] = data[m - self.mem_size:m] _, loss, self.step = self.sess.run([self.optim, self.loss, self.global_step], feed_dict={ self.input: x, self.time: time, self.target: target, self.context: context}) cost += np.sum(loss) if self.show: bar.finish() return cost/N/self.batch_size
Example #14
Source File: model.py From MemN2N-tensorflow with MIT License | 5 votes |
def test(self, data, label='Test'): N = int(math.ceil(len(data) / self.batch_size)) cost = 0 x = np.ndarray([self.batch_size, self.edim], dtype=np.float32) time = np.ndarray([self.batch_size, self.mem_size], dtype=np.int32) target = np.zeros([self.batch_size, self.nwords]) # one-hot-encoded context = np.ndarray([self.batch_size, self.mem_size]) x.fill(self.init_hid) for t in xrange(self.mem_size): time[:,t].fill(t) if self.show: from utils import ProgressBar bar = ProgressBar(label, max=N) m = self.mem_size for idx in xrange(N): if self.show: bar.next() target.fill(0) for b in xrange(self.batch_size): target[b][data[m]] = 1 context[b] = data[m - self.mem_size:m] m += 1 if m >= len(data): m = self.mem_size loss = self.sess.run([self.loss], feed_dict={self.input: x, self.time: time, self.target: target, self.context: context}) cost += np.sum(loss) if self.show: bar.finish() return cost/N/self.batch_size
Example #15
Source File: model.py From MemN2N-tensorflow with MIT License | 5 votes |
def run(self, train_data, test_data): if not self.is_test: for idx in xrange(self.nepoch): train_loss = np.sum(self.train(train_data)) test_loss = np.sum(self.test(test_data, label='Validation')) # Logging self.log_loss.append([train_loss, test_loss]) self.log_perp.append([math.exp(train_loss), math.exp(test_loss)]) state = { 'perplexity': math.exp(train_loss), 'epoch': idx, 'learning_rate': self.current_lr, 'valid_perplexity': math.exp(test_loss) } print(state) # Learning rate annealing if len(self.log_loss) > 1 and self.log_loss[idx][1] > self.log_loss[idx-1][1] * 0.9999: self.current_lr = self.current_lr / 1.5 self.lr.assign(self.current_lr).eval() if self.current_lr < 1e-5: break if idx % 10 == 0: self.saver.save(self.sess, os.path.join(self.checkpoint_dir, "MemN2N.model"), global_step = self.step.astype(int)) else: self.load() valid_loss = np.sum(self.test(train_data, label='Validation')) test_loss = np.sum(self.test(test_data, label='Test')) state = { 'valid_perplexity': math.exp(valid_loss), 'test_perplexity': math.exp(test_loss) } print(state)
Example #16
Source File: test_primitives.py From boofuzz with GNU General Public License v2.0 | 5 votes |
def test_string(self): s_initialize("STRING UNIT TEST 1") s_string("foo", size=200, name="sized_string") req = s_get("STRING UNIT TEST 1") self.assertEqual(len(req.names["sized_string"].render()), 200) # check that string padding and truncation are working correctly. for i in xrange(0, 50): s_mutate() self.assertEqual(len(req.names["sized_string"].render()), 200)
Example #17
Source File: scada.py From boofuzz with GNU General Public License v2.0 | 5 votes |
def dnp3(data, control_code=b"\x44", src=b"\x00\x00", dst=b"\x00\x00"): num_packets = int(math.ceil(float(len(data)) / 250.0)) packets = [] for i in xrange(num_packets): packet_slice = data[i * 250 : (i + 1) * 250] p = b"\x05\x64" p += six.int2byte(len(packet_slice)) p += control_code p += dst p += src chksum = struct.pack("<H", crc16(p)) p += chksum num_chunks = int(math.ceil(float(len(packet_slice) / 16.0))) # insert the fragmentation flags / sequence number. # first frag: 0x40, last frag: 0x80 frag_number = i if i == 0: frag_number |= 0x40 if i == num_packets - 1: frag_number |= 0x80 p += six.int2byte(frag_number) for x in xrange(num_chunks): chunk = packet_slice[i * 16 : (i + 1) * 16] chksum = struct.pack("<H", crc16(chunk)) p += chksum + chunk packets.append(p) return packets
Example #18
Source File: bitmap.py From bitmap with MIT License | 5 votes |
def nonzero(self): """ Get all non-zero bits """ return [i for i in xrange(self.size()) if self.test(i)]
Example #19
Source File: __init__.py From pymobiledevice with GNU General Public License v3.0 | 5 votes |
def hexdump(d): for i in xrange(0,len(d),16): data = d[i:i+16] print("%08X | %s | %s" % (i, hex(data).ljust(47), ascii(data)))
Example #20
Source File: utils.py From quantized-mesh-tile with MIT License | 5 votes |
def createCoordsPairs(l): coordsPairs = [] for i in xrange(0, len(l)): coordsPairs.append([l[i], l[(i + 2) % len(l)]]) return coordsPairs
Example #21
Source File: layer_master.py From recnet with MIT License | 5 votes |
def rec_ortho(self, rng, ndim, ndim_factor): W = np.concatenate([self.sqr_ortho(rng, ndim) for i in xrange(ndim_factor)], axis=1) return W
Example #22
Source File: afc.py From pymobiledevice with GNU General Public License v3.0 | 5 votes |
def list_to_dict(self, d): if PY3: d = d.decode('utf-8') t = d.split("\x00") t = t[:-1] assert len(t) % 2 == 0 res = {} for i in xrange(int(len(t)/2)): res[t[i*2]] = t[i*2 + 1] return res
Example #23
Source File: model.py From mem_absa with MIT License | 5 votes |
def run(self, train_data, test_data): print('training...') self.sess.run(self.A.assign(self.pre_trained_context_wt)) self.sess.run(self.B.assign(self.pre_trained_context_wt)) self.sess.run(self.ASP.assign(self.pre_trained_target_wt)) for idx in xrange(self.nepoch): print('epoch '+str(idx)+'...') train_loss, train_acc = self.train(train_data) test_loss, test_acc = self.test(test_data) print('train-loss=%.2f;train-acc=%.2f;test-acc=%.2f;' % (train_loss, train_acc, test_acc))
Example #24
Source File: gpcharts.py From GooPyCharts with Apache License 2.0 | 5 votes |
def combineData(xdata,ydata,xlabel): #if ydata is a simple vector, encapsulate it into a 2D list if type(ydata[1]) is not list: ydata = [[val] for val in ydata] #if xdata is time data, add HH:MM:SS if it is missing (just 00:00:00) if type(xdata[1]) is str: #check if first 4 characters of xdata is a valid year if len(xdata[1]) == 10 and int(xdata[1][:4]) > 0 and int(xdata[1][:4]) < 3000: xdata[1:] = [val+' 00:00:00' for val in xdata[1:]] #figure out independent variable headers # if there is a title row, use that title if type(ydata[0][0]) is str: data = [[xdata[0]] + ydata[0]] for i in xrange(1,len(xdata)): data.append([xdata[i]]+ydata[i]) # otherwise, use a default labeling else: header = [xlabel] for i in xrange(len(ydata[0])): header.append('data'+str(i+1)) data = [header] for i in xrange(len(xdata)): data.append([xdata[i]]+ydata[i]) return data #helper function, returns title as a valid JS identifier, prefixed by '_'.
Example #25
Source File: common.py From pypath with GNU General Public License v3.0 | 5 votes |
def read_xls(xls_file, sheet = '', csv_file = None, return_table = True): """ Generic function to read MS Excel XLS file, and convert one sheet to CSV, or return as a list of lists """ try: if hasattr(xls_file, 'read'): book = xlrd.open_workbook( file_contents = xls_file.read(), on_demand = True, ) else: book = xlrd.open_workbook(xls_file, on_demand = True) try: sheet = book.sheet_by_name(sheet) except xlrd.biffh.XLRDError: sheet = book.sheet_by_index(0) table = [[unicode(c.value) for c in sheet.row(i)] for i in xrange(sheet.nrows)] if csv_file: with open(csv_file, 'w') as csv: csv.write('\n'.join(['\t'.join(r) for r in table])) if not return_table: table = None book.release_resources() return table except IOError: sys.stdout.write('No such file: %s\n' % xls_file) sys.stdout.flush()
Example #26
Source File: cellphonedb.py From pypath with GNU General Public License v3.0 | 5 votes |
def cellphonedb_complex_annotations(): def get_uniprots(rec): return tuple( uniprot for uniprot in (rec['uniprot_%u' % i] for i in xrange(1, 5)) if uniprot ) def get_stoichiometry(rec): if not rec['stoichiometry']: return get_uniprots(rec) return tuple( mapping.map_name0(genesymbol, 'genesymbol', 'uniprot') for genesymbol in rec['stoichiometry'].split(';') ) def name_method(rec): comp = get_stoichiometry(rec) cplex = intera.Complex( name = rec['complex_name'], components = comp, sources = 'CellPhoneDB', ids = rec['complex_name'], ) return cplex return _cellphonedb_annotations( url = urls.urls['cellphonedb_git']['complexes'], name_method = name_method, )
Example #27
Source File: go.py From pypath with GNU General Public License v3.0 | 5 votes |
def annotate(graph, organism = 9606, aspects = ('C', 'F', 'P')): """ Adds Gene Ontology annotations to the nodes of a graph. :param igraph.Graph graph: Any ``igraph.Graph`` object with uniprot IDs in its ``name`` vertex attribute. """ aspects = aspects if type(aspects) in {list, tuple} else (aspects, ) graph.vs['go'] = [ {'C': set(), 'F': set(), 'P': set()} for _ in xrange(graph.vcount()) ] terms, annot = dataio.go_annotations_goa(organism = organism) prg = progress.Progress(graph.vcount(), 'Loading GO annotations', 9) for v in graph.vs: prg.step() for asp in aspects: if v['name'] in annot[asp]: v['go'][asp] = annot[asp][v['name']] prg.terminate() # old name as synonym
Example #28
Source File: pyreact.py From pypath with GNU General Public License v3.0 | 5 votes |
def cleanup_hook(self): """ Removes the used elements to free up memory. This method should not be called directly, ``BioPaxReader.iterate()`` calls it. """ if len(self.used_elements) > self.cleanup_period: for _ in xrange(int(self.cleanup_period / 2)): e = self.used_elements.pop() e.clear()
Example #29
Source File: common.py From pypath with GNU General Public License v3.0 | 5 votes |
def paginate(lst, size = 10): """ Yields sections of length ``size`` from list ``lst``. The last section might be shorter than ``size``. Following https://stackoverflow.com/a/3744502/854988. """ for i in xrange((len(lst) // size) + 1): yield lst[size * i:size * (i + 1)]
Example #30
Source File: plot.py From pypath with GNU General Public License v3.0 | 5 votes |
def set_grid(self): """ Sets up a grid according to the number of subplots, with one additional column of zero width on the left to have aligned y axis labels. """ self.gs = mpl.gridspec.GridSpec( self.nrows, self.ncols + 1, height_ratios=[1.0] * self.nrows, width_ratios=[0.0] + [1.0] * self.ncols) self.axes = list( map(lambda _: [None] * (self.ncols + 1), xrange(self.nrows)))