Python struct.unpack_from() Examples
The following are 30
code examples of struct.unpack_from().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
struct
, or try the search function
.

Example #1
Source File: systemconfig.py From macops with Apache License 2.0 | 7 votes |
def _GetMACFromData(data): """Unpacks and formats MAC address data. Args: data: buffer, usually an NSCFData object Returns: string containing the MAC address Raises: InterfaceError: if data can't be unpacked """ try: unpacked = struct.unpack_from('BBBBBB', data) except struct.error as e: logging.error('Could not unpack MAC address data: %s', e) raise InterfaceError(e) return ':'.join(['{:02x}'.format(i) for i in unpacked])
Example #2
Source File: parser.py From cronosparser with MIT License | 6 votes |
def parse_columns(text, base, count): # Parse the columns from the table definition. Columns start with # a short record length indicator, followed by type and sequence # information (each a short), and the name (prefixed by the length). columns = [] for i in range(count): if len(text[base:]) < 8: break col_len, = struct.unpack_from('H', text, base) base = base + 2 if len(text[base:]) < col_len: break col_data = text[base - 1:base - 1 + col_len] type_, col_id = struct.unpack_from('>HH', col_data, 0) text_len, = struct.unpack_from('>I', col_data, 4) col_name = decode_text(col_data[8:8 + text_len]) if col_name is None: continue columns.append({ 'id': col_id, 'name': col_name, 'type': type_ }) base = base + col_len return columns
Example #3
Source File: parser.py From cronosparser with MIT License | 6 votes |
def parse_record(meta, dat_fh): # Each data record is stored as a linked list of data fragments. The # metadata record holds the first and second offset, while all further # chunks are prefixed with the next offset. offset, length, next_offset, next_length = struct.unpack('<IHIH', meta) dat_fh.seek(offset) if length == 0: if next_length == 0 or next_length == 0xffff: return data = dat_fh.read(length) while next_length != 0 and next_length != 0xffff: dat_fh.seek(next_offset) next_data = dat_fh.read(min(252, next_length)) if len(next_data) < 4: break next_offset, = struct.unpack_from('<I', next_data) data += next_data[4:] if next_length > 252: next_length -= 252 else: next_length = 0 return data
Example #4
Source File: buddy.py From ds_store with MIT License | 6 votes |
def read(self, size_or_format): if isinstance(size_or_format, (str, unicode, bytes)): size = struct.calcsize(size_or_format) fmt = size_or_format else: size = size_or_format fmt = None if self._size - self._pos < size: raise BuddyError('Unable to read %lu bytes in block' % size) data = self._value[self._pos:self._pos + size] self._pos += size if fmt is not None: if isinstance(data, bytearray): return struct.unpack_from(fmt, bytes(data)) else: return struct.unpack(fmt, data) else: return data
Example #5
Source File: USBConfiguration.py From Facedancer with BSD 3-Clause "New" or "Revised" License | 6 votes |
def from_binary_descriptor(cls, data): """ Generates a new USBConfiguration object from a configuration descriptor, handling any attached subordiate descriptors. data: The raw bytes for the descriptor to be parsed. """ length = data[0] # Unpack the main colleciton of data into the descriptor itself. descriptor_type, total_length, num_interfaces, index, string_index, \ attributes, max_power = struct.unpack_from('<xBHBBBBB', data[0:length]) # Extract the subordinate descriptors, and parse them. interfaces = cls._parse_subordinate_descriptors(data[length:total_length]) return cls(index, string_index, interfaces, attributes, max_power, total_length)
Example #6
Source File: USBEndpoint.py From Facedancer with BSD 3-Clause "New" or "Revised" License | 6 votes |
def from_binary_descriptor(cls, data): """ Creates an endpoint object from a description of that endpoint. """ # Parse the core descriptor into its components... address, attributes, max_packet_size, interval = struct.unpack_from("xxBBHB", data) # ... and break down the packed fields. number = address & 0x7F direction = address >> 7 transfer_type = attributes & 0b11 sync_type = attributes >> 2 & 0b1111 usage_type = attributes >> 4 & 0b11 return cls(number, direction, transfer_type, sync_type, usage_type, max_packet_size, interval)
Example #7
Source File: program.py From py-uio with MIT License | 6 votes |
def _load_elf( self, exe ): # parse file header if exe[:7] != b'\x7fELF\x01\x01\x01': raise RuntimeError( "Invalid ELF32 header" ) if unpack( 'HH', exe, 0x10 ) != ( 2, 144 ): raise RuntimeError( "Not a TI-PRU executable" ) ( entry, phoff, phsz, nph ) = unpack( 'II10xHH', exe, 0x18 ) if self.entrypoint is None: if entry & 3: raise RuntimeError( "Entrypoint not word-aligned: 0x%x" % entry ) self.entrypoint = entry >> 2 elif entry != self.entrypoint << 2: warn( "Overriding entrypoint of ELF executable" ) for i in range( nph ): ( pt, *args ) = unpack( '8I', exe, phoff ) phoff += phsz if pt == 1: self._load_elf_segment( exe, *args ) elif pt == 0x70000000: pass # segment attributes else: warn( "Unknown program header type: 0x%x" % pt )
Example #8
Source File: winpmem.py From rekall with GNU General Public License v2.0 | 6 votes |
def ParseMemoryRuns(self): self.runs = [] result = win32file.DeviceIoControl( self.fd, INFO_IOCTRL, "", 102400, None) fmt_string = "Q" * len(self.FIELDS) self.memory_parameters = dict(zip(self.FIELDS, struct.unpack_from( fmt_string, result))) self.dtb = self.memory_parameters["CR3"] self.kdbg = self.memory_parameters["KDBG"] offset = struct.calcsize(fmt_string) for x in range(self.memory_parameters["NumberOfRuns"]): start, length = struct.unpack_from("QQ", result, x * 16 + offset) self.runs.append((start, length))
Example #9
Source File: win32.py From rekall with GNU General Public License v2.0 | 6 votes |
def ParseMemoryRuns(self, fhandle): # Set acquisition mode. If the driver does not support this mode it will # just fall back to the default. win32file.DeviceIoControl( fhandle, CTRL_IOCTRL, struct.pack("I", PMEM_MODE_PTE), 4, None) result = win32file.DeviceIoControl( fhandle, INFO_IOCTRL, b"", 102400, None) fmt_string = "Q" * len(self.FIELDS) self.memory_parameters = dict(zip(self.FIELDS, struct.unpack_from( fmt_string, result))) offset = struct.calcsize(fmt_string) for x in range(self.memory_parameters["NumberOfRuns"]): start, length = struct.unpack_from("QQ", result, x * 16 + offset) self.add_run(start, start, length, self.fhandle_as)
Example #10
Source File: kindleunpack.py From Lector with GNU General Public License v3.0 | 6 votes |
def processCONT(i, files, rscnames, sect, data): global DUMP # process a container header, most of this is unknown # right now only extract its EXTH dt = data[0:12] if dt == b"CONTBOUNDARY": rscnames.append(None) sect.setsectiondescription(i,"CONTAINER BOUNDARY") else: sect.setsectiondescription(i,"CONT Header") rscnames.append(None) if DUMP: cpage, = struct.unpack_from(b'>L', data, 12) contexth = data[48:] print("\n\nContainer EXTH Dump") dump_contexth(cpage, contexth) fname = "CONT_Header%05d.dat" % i outname= os.path.join(files.outdir, fname) with open(pathof(outname), 'wb') as f: f.write(data) return rscnames
Example #11
Source File: mobi_split.py From Lector with GNU General Public License v3.0 | 6 votes |
def nullsection(datain,secno): # make it zero-length without deleting it datalst = [] nsec = getint(datain,number_of_pdb_records,b'H') secstart, secend = getsecaddr(datain,secno) zerosecstart, zerosecend = getsecaddr(datain, 0) dif = secend-secstart datalst.append(datain[:first_pdb_record]) for i in range(0,secno+1): ofs, flgval = struct.unpack_from(b'>2L',datain,first_pdb_record+i*8) datalst.append(struct.pack(b'>L',ofs) + struct.pack(b'>L', flgval)) for i in range(secno+1, nsec): ofs, flgval = struct.unpack_from(b'>2L',datain,first_pdb_record+i*8) ofs = ofs - dif datalst.append(struct.pack(b'>L',ofs) + struct.pack(b'>L',flgval)) lpad = zerosecstart - (first_pdb_record + 8*nsec) if lpad > 0: datalst.append(b'\0' * lpad) datalst.append(datain[zerosecstart: secstart]) datalst.append(datain[secend:]) dataout = b''.join(datalst) return dataout
Example #12
Source File: mobi_index.py From Lector with GNU General Public License v3.0 | 6 votes |
def readTagSection(start, data): ''' Read tag section from given data. @param start: The start position in the data. @param data: The data to process. @return: Tuple of control byte count and list of tag tuples. ''' controlByteCount = 0 tags = [] if data[start:start+4] == b"TAGX": firstEntryOffset, = struct.unpack_from(b'>L', data, start + 0x04) controlByteCount, = struct.unpack_from(b'>L', data, start + 0x08) # Skip the first 12 bytes already read above. for i in range(12, firstEntryOffset, 4): pos = start + i tags.append((ord(data[pos:pos+1]), ord(data[pos+1:pos+2]), ord(data[pos+2:pos+3]), ord(data[pos+3:pos+4]))) return controlByteCount, tags
Example #13
Source File: mobi_uncompress.py From Lector with GNU General Public License v3.0 | 6 votes |
def loadHuff(self, huff): if huff[0:8] != b'HUFF\x00\x00\x00\x18': raise unpackException('invalid huff header') off1, off2 = struct.unpack_from(b'>LL', huff, 8) def dict1_unpack(v): codelen, term, maxcode = v&0x1f, v&0x80, v>>8 assert codelen != 0 if codelen <= 8: assert term maxcode = ((maxcode + 1) << (32 - codelen)) - 1 return (codelen, term, maxcode) self.dict1 = lmap(dict1_unpack, struct.unpack_from(b'>256L', huff, off1)) dict2 = struct.unpack_from(b'>64L', huff, off2) self.mincode, self.maxcode = (), () for codelen, mincode in enumerate((0,) + dict2[0::2]): self.mincode += (mincode << (32 - codelen), ) for codelen, maxcode in enumerate((0,) + dict2[1::2]): self.maxcode += (((maxcode + 1) << (32 - codelen)) - 1, ) self.dictionary = []
Example #14
Source File: cxxCompressor.py From compreffor with Apache License 2.0 | 6 votes |
def get_encoding(data_buffer, subrs): """Read a charstring's encoding stream out of a string buffer response from cffCompressor.cc""" pos = 0 num_calls = data_buffer[pos] pos += 1 enc = [] for j in range(num_calls): insertion_pos = struct.unpack_from('<I', data_buffer[pos:pos+4])[0] pos += 4 subr_index = struct.unpack_from('<I', data_buffer[pos:pos+4])[0] pos += 4 subrs[subr_index].freq += 1 enc.append((insertion_pos, subrs[subr_index])) return enc, pos
Example #15
Source File: info.py From aerospike-admin with Apache License 2.0 | 6 votes |
def _parse_session_info(data, field_count): i = 0 offset = 0 session_token = None session_ttl = None while i < field_count: field_len, field_id = struct.unpack_from("! I B", data, offset) field_len -= 1 offset += 5 if field_id == _SESSION_TOKEN_FIELD_ID: fmt_str = "%ds" % field_len session_token = struct.unpack_from(fmt_str, data, offset)[0] elif field_id == _SESSION_TTL_FIELD_ID: fmt_str = ">I" session_ttl = struct.unpack_from(fmt_str, data, offset)[0] offset += field_len i += 1 return session_token, session_ttl
Example #16
Source File: info.py From aerospike-admin with Apache License 2.0 | 6 votes |
def _info_request(sock, buf): # request over TCP try: sock.send(buf) # get response rsp_hdr = sock.recv(8) q = struct.unpack_from(proto_header_fmt, rsp_hdr, 0) sz = q[0] & 0xFFFFFFFFFFFF if sz > 0: rsp_data = _receivedata(sock, sz) except Exception as ex: raise IOError("Error: %s" % str(ex)) # parse out responses if sz == 0: return None return(rsp_data)
Example #17
Source File: logstashserver.py From sniffer with Apache License 2.0 | 6 votes |
def resolve_data_header(self): version, data_type, sequence = struct.unpack_from("!ccI", self.buffer, self.current_offset) if version != '2': raise RuntimeError('only support version 2') if data_type == 'J': # ordinary data self.sequence = sequence return LogStashSession.DATA_HEADER_READY elif data_type == 'C': # compress data self.sequence = 0 self.compress_data_length = sequence return LogStashSession.COMPRESS_HEADER_READY else: raise RuntimeError('invalid data type')
Example #18
Source File: pea.py From poc with BSD 3-Clause "New" or "Revised" License | 6 votes |
def parse_dsi(payload, expected_req_id): (flags, command, req_id, error_code, length, reserved) = struct.unpack_from('>BBHIII', payload) if command != 8: if flags != 1 or command != 2 or req_id != expected_req_id: print '[-] Bad DSI Header: %u %u %u' % (flags, command, req_id) sys.exit(0) if error_code != 0 and error_code != 4294962287: print '[-] The server responded to with an error code: ' + str(error_code) sys.exit(0) afp_data = payload[16:] if len(afp_data) != length: if command != 8: print '[-] Invalid length in DSI header: ' + str(length) + ' vs. ' + str(len(payload)) sys.exit(0) else: afp_data = afp_data[length:] afp_data = parse_dsi(afp_data, expected_req_id) return afp_data ## # List all the volumes on the remote server ##
Example #19
Source File: pea.py From poc with BSD 3-Clause "New" or "Revised" License | 6 votes |
def list_volumes(sock): print "[+] Listing volumes" send_request(sock, "\x00\x01", afp_getsrvrparms, "") resp = sock.recv(1024) afp_data = parse_dsi(resp, 1) (server_time, volumes) = struct.unpack_from('>IB', afp_data) print "[+] " + str(volumes) + " volumes are available:" afp_data = afp_data[5:] for i in range(volumes): string_length = struct.unpack_from('>h', afp_data) name = afp_data[2 : 2 + string_length[0]] print "\t-> " + name afp_data = afp_data[2 + string_length[0]:] return ## # Open a volume on the remote server ##
Example #20
Source File: tarfile.py From jawfish with MIT License | 5 votes |
def calc_chksums(buf): """Calculate the checksum for a member's header by summing up all characters except for the chksum field which is treated as if it was filled with spaces. According to the GNU tar sources, some tars (Sun and NeXT) calculate chksum with signed char, which will be different if there are chars in the buffer with the high bit set. So we calculate two checksums, unsigned and signed. """ unsigned_chksum = 256 + sum(struct.unpack_from("148B8x356B", buf)) signed_chksum = 256 + sum(struct.unpack_from("148b8x356b", buf)) return unsigned_chksum, signed_chksum
Example #21
Source File: pyT4.py From Sinopac-Order-API with MIT License | 5 votes |
def convert_stock_bytes_to_dict(stock_order_res_bytes): """委託回報為bytes。所以先轉為有結構的NameTuple,但每個item得從bytes to utf8""" stock_record_field = 'trade_type,account,code_id,ord_price,ord_qty,ord_seq,ord_date,effective_date,' \ 'ord_time,ord_no,ord_soruce,org_ord_seq,ord_bs,ord_type,market_id,price_type,ord_status,Msg' StockOrderRecord = namedtuple('StockOrderRecord', stock_record_field) stock_order_res_format = '2s15s6s6s3s6s8s8s6s5s3s6s1s2s1s1s2s60s' if len(stock_order_res_bytes) != struct.calcsize(stock_order_res_format): return stock_order_res_bytes stock_order_res = StockOrderRecord._make(struct.unpack_from(stock_order_res_format, stock_order_res_bytes)) stock_order_res_lst = [str(item, 'cp950') for item in stock_order_res] return StockOrderRecord(*stock_order_res_lst)._asdict()
Example #22
Source File: pyT4.py From Sinopac-Order-API with MIT License | 5 votes |
def convert_future_bytes_to_dict(future_order_res_bytes): future_record_field = 'trade_type,account,market_id,code_id,f_callput,ord_bs,ord_price,price_type,ord_qty,' \ 'ord_no,ord_seq,ord_type,oct_type,f_mttype,f_composit,c_futopt,c_code,c_callput,' \ 'c_buysell,c_price,c_quantity,ord_date,preord_date,ord_time,type,err_code,msg' FutureOrderRecord = namedtuple('FutureOrderRecord', future_record_field) future_order_res_format = '2s15s1s10s1s1s12s3s4s6s6s3s1s1s2s1s10s1s1s12s4s8s8s6s1s4s60s' if len(future_order_res_bytes) != struct.calcsize(future_order_res_format): return future_order_res_bytes future_order_res = FutureOrderRecord._make(struct.unpack_from(future_order_res_format, future_order_res_bytes)) future_order_res_lst = [str(item, 'cp950') for item in future_order_res] return FutureOrderRecord(*future_order_res_lst)._asdict()
Example #23
Source File: base.py From PyVESC with Creative Commons Attribution 4.0 International | 5 votes |
def unpack(msg_bytes): msg_id = struct.unpack_from(VESCMessage._endian_fmt + VESCMessage._id_fmt, msg_bytes, 0) msg_type = VESCMessage.msg_type(*msg_id) data = None if not (msg_type._string_field is None): # string field fmt_wo_string = msg_type._fmt_fields.replace('%u', '') fmt_wo_string = fmt_wo_string.replace('s', '') len_string = len(msg_bytes) - struct.calcsize(VESCMessage._endian_fmt + fmt_wo_string) - 1 fmt_w_string = msg_type._fmt_fields % (len_string) data = struct.unpack_from(VESCMessage._endian_fmt + fmt_w_string, msg_bytes, 1) else: data = list(struct.unpack_from(VESCMessage._endian_fmt + msg_type._fmt_fields, msg_bytes, 1)) for k, field in enumerate(data): try: if msg_type._field_scalars[k] != 0: data[k] = data[k]/msg_type._field_scalars[k] except (TypeError, IndexError) as e: print("Error ecountered on field " + msg_type.fields[k][0]) print(e) msg = msg_type(*data) if not (msg_type._string_field is None): string_field_name = msg_type._field_names[msg_type._string_field] setattr(msg, string_field_name, getattr(msg, string_field_name).decode('ascii')) return msg
Example #24
Source File: structure.py From PyVESC with Creative Commons Attribution 4.0 International | 5 votes |
def parse(buffer): """ Creates a Header by parsing the given buffer. :param buffer: buffer object. :return: Header object. """ return Header._make(struct.unpack_from(Header.fmt(buffer[0]), buffer, 0))
Example #25
Source File: structure.py From PyVESC with Creative Commons Attribution 4.0 International | 5 votes |
def parse(buffer, header): return Footer._make(struct.unpack_from(Footer.fmt(), buffer, header.payload_index + header.payload_length))
Example #26
Source File: parser.py From cronosparser with MIT License | 5 votes |
def vword(data): # A vodka word is a russian data unit, encompassing three bytes on good # days, with a flag in the fourth. word, = struct.unpack_from('<I', data) num = word & 0x00ffffff flags = (word & 0xff000000) >> 24 return num, flags
Example #27
Source File: parser.py From cronosparser with MIT License | 5 votes |
def parse_table(text, next_byte): # Once we've guessed a table definition location, we can start # parsing the name; followed by the two-letter table abbreviation # and the count of columns. next_len = ord(text[next_byte]) next_byte = next_byte + 1 if len(text) < next_byte + next_len + 10: return if ord(text[next_byte + next_len]) != 2: return # Get the table name. table_name = decode_text(text[next_byte:next_byte + next_len]) if table_name is None: return next_byte = next_byte + next_len + 1 # Get the table abbreviation. table_abbr = decode_text(text[next_byte:next_byte + 2]) if table_abbr is None: return next_byte = next_byte + 2 if ord(text[next_byte]) != 1: # raise CronosException('Table ID not ended by 0x01!') return next_byte = next_byte + 4 # Get the number of columns for the table. col_count, = struct.unpack_from('I', text, next_byte) return { 'name': table_name, 'abbr': table_abbr, 'columns': parse_columns(text, next_byte + 4, col_count), 'column_count': col_count }
Example #28
Source File: gls.py From PiRogue with GNU Affero General Public License v3.0 | 5 votes |
def toU32(bits): import struct return struct.unpack_from(">I", bits)[0]
Example #29
Source File: gls.py From PiRogue with GNU Affero General Public License v3.0 | 5 votes |
def toS32(bits): import struct return struct.unpack_from(">i", bits)[0]
Example #30
Source File: buddy.py From ds_store with MIT License | 5 votes |
def read(self, offset, size_or_format): """Read data at `offset', or raise an exception. `size_or_format' may either be a byte count, in which case we return raw data, or a format string for `struct.unpack', in which case we work out the size and unpack the data before returning it.""" # N.B. There is a fixed offset of four bytes(!) self._file.seek(offset + 4, os.SEEK_SET) if isinstance(size_or_format, (str, unicode)): size = struct.calcsize(size_or_format) fmt = size_or_format else: size = size_or_format fmt = None ret = self._file.read(size) if len(ret) < size: ret += b'\0' * (size - len(ret)) if fmt is not None: if isinstance(ret, bytearray): ret = struct.unpack_from(fmt, bytes(ret)) else: ret = struct.unpack(fmt, ret) return ret