Python struct.unpack_from() Examples

The following are 30 code examples of struct.unpack_from(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module struct , or try the search function .
Example #1
Source File: systemconfig.py    From macops with Apache License 2.0 7 votes vote down vote up
def _GetMACFromData(data):
  """Unpacks and formats MAC address data.

  Args:
    data: buffer, usually an NSCFData object
  Returns:
    string containing the MAC address
  Raises:
    InterfaceError: if data can't be unpacked
  """
  try:
    unpacked = struct.unpack_from('BBBBBB', data)
  except struct.error as e:
    logging.error('Could not unpack MAC address data: %s', e)
    raise InterfaceError(e)
  return ':'.join(['{:02x}'.format(i) for i in unpacked]) 
Example #2
Source File: win32.py    From rekall with GNU General Public License v2.0 6 votes vote down vote up
def ParseMemoryRuns(self, fhandle):
        # Set acquisition mode. If the driver does not support this mode it will
        # just fall back to the default.
        win32file.DeviceIoControl(
            fhandle, CTRL_IOCTRL,
            struct.pack("I", PMEM_MODE_PTE), 4, None)

        result = win32file.DeviceIoControl(
            fhandle, INFO_IOCTRL, b"", 102400, None)

        fmt_string = "Q" * len(self.FIELDS)
        self.memory_parameters = dict(zip(self.FIELDS, struct.unpack_from(
            fmt_string, result)))

        offset = struct.calcsize(fmt_string)
        for x in range(self.memory_parameters["NumberOfRuns"]):
            start, length = struct.unpack_from("QQ", result, x * 16 + offset)
            self.add_run(start, start, length, self.fhandle_as) 
Example #3
Source File: USBConfiguration.py    From Facedancer with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def from_binary_descriptor(cls, data):
        """
        Generates a new USBConfiguration object from a configuration descriptor,
        handling any attached subordiate descriptors.

        data: The raw bytes for the descriptor to be parsed.
        """

        length = data[0]

        # Unpack the main colleciton of data into the descriptor itself.
        descriptor_type, total_length, num_interfaces, index, string_index, \
            attributes, max_power = struct.unpack_from('<xBHBBBBB', data[0:length])

        # Extract the subordinate descriptors, and parse them.
        interfaces = cls._parse_subordinate_descriptors(data[length:total_length])
        return cls(index, string_index, interfaces, attributes, max_power, total_length) 
Example #4
Source File: USBEndpoint.py    From Facedancer with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def from_binary_descriptor(cls, data):
        """
        Creates an endpoint object from a description of that endpoint.
        """

        # Parse the core descriptor into its components...
        address, attributes, max_packet_size, interval = struct.unpack_from("xxBBHB", data)

        # ... and break down the packed fields.
        number        = address & 0x7F
        direction     = address >> 7
        transfer_type = attributes & 0b11
        sync_type     = attributes >> 2 & 0b1111
        usage_type    = attributes >> 4 & 0b11

        return cls(number, direction, transfer_type, sync_type, usage_type,
                   max_packet_size, interval) 
Example #5
Source File: cxxCompressor.py    From compreffor with Apache License 2.0 6 votes vote down vote up
def get_encoding(data_buffer, subrs):
    """Read a charstring's encoding stream out of a string buffer response
    from cffCompressor.cc"""

    pos = 0
    num_calls = data_buffer[pos]
    pos += 1
    enc = []
    for j in range(num_calls):
        insertion_pos = struct.unpack_from('<I', data_buffer[pos:pos+4])[0]
        pos += 4
        subr_index = struct.unpack_from('<I', data_buffer[pos:pos+4])[0]
        pos += 4
        subrs[subr_index].freq += 1
        enc.append((insertion_pos, subrs[subr_index]))
    return enc, pos 
Example #6
Source File: info.py    From aerospike-admin with Apache License 2.0 6 votes vote down vote up
def _parse_session_info(data, field_count):
    i = 0
    offset = 0
    session_token = None
    session_ttl = None
    while i < field_count:
        field_len, field_id = struct.unpack_from("! I B", data, offset)
        field_len -= 1
        offset += 5

        if field_id == _SESSION_TOKEN_FIELD_ID:
            fmt_str = "%ds" % field_len
            session_token = struct.unpack_from(fmt_str, data, offset)[0]

        elif field_id == _SESSION_TTL_FIELD_ID:
            fmt_str = ">I"
            session_ttl = struct.unpack_from(fmt_str, data, offset)[0]

        offset += field_len
        i += 1

    return session_token, session_ttl 
Example #7
Source File: info.py    From aerospike-admin with Apache License 2.0 6 votes vote down vote up
def _info_request(sock, buf):

    # request over TCP
    try:
        sock.send(buf)
        # get response
        rsp_hdr = sock.recv(8)
        q = struct.unpack_from(proto_header_fmt, rsp_hdr, 0)
        sz = q[0] & 0xFFFFFFFFFFFF
        if sz > 0:
            rsp_data = _receivedata(sock, sz)
    except Exception as ex:
        raise IOError("Error: %s" % str(ex))

    # parse out responses
    if sz == 0:
        return None

    return(rsp_data) 
Example #8
Source File: program.py    From py-uio with MIT License 6 votes vote down vote up
def _load_elf( self, exe ):
        # parse file header
        if exe[:7] != b'\x7fELF\x01\x01\x01':
            raise RuntimeError( "Invalid ELF32 header" )
        if unpack( 'HH', exe, 0x10 ) != ( 2, 144 ):
            raise RuntimeError( "Not a TI-PRU executable" )
        ( entry, phoff, phsz, nph ) = unpack( 'II10xHH', exe, 0x18 )

        if self.entrypoint is None:
            if entry & 3:
                raise RuntimeError( "Entrypoint not word-aligned: 0x%x" % entry )
            self.entrypoint = entry >> 2
        elif entry != self.entrypoint << 2:
            warn( "Overriding entrypoint of ELF executable" )

        for i in range( nph ):
            ( pt, *args ) = unpack( '8I', exe, phoff )
            phoff += phsz

            if pt == 1:
                self._load_elf_segment( exe, *args )
            elif pt == 0x70000000:
                pass  # segment attributes
            else:
                warn( "Unknown program header type: 0x%x" % pt ) 
Example #9
Source File: mobi_split.py    From Lector with GNU General Public License v3.0 6 votes vote down vote up
def nullsection(datain,secno):  # make it zero-length without deleting it
    datalst = []
    nsec = getint(datain,number_of_pdb_records,b'H')
    secstart, secend = getsecaddr(datain,secno)
    zerosecstart, zerosecend = getsecaddr(datain, 0)
    dif =  secend-secstart
    datalst.append(datain[:first_pdb_record])
    for i in range(0,secno+1):
        ofs, flgval = struct.unpack_from(b'>2L',datain,first_pdb_record+i*8)
        datalst.append(struct.pack(b'>L',ofs) + struct.pack(b'>L', flgval))
    for i in range(secno+1, nsec):
        ofs, flgval = struct.unpack_from(b'>2L',datain,first_pdb_record+i*8)
        ofs = ofs - dif
        datalst.append(struct.pack(b'>L',ofs) + struct.pack(b'>L',flgval))
    lpad = zerosecstart - (first_pdb_record + 8*nsec)
    if lpad > 0:
        datalst.append(b'\0' * lpad)
    datalst.append(datain[zerosecstart: secstart])
    datalst.append(datain[secend:])
    dataout = b''.join(datalst)
    return dataout 
Example #10
Source File: winpmem.py    From rekall with GNU General Public License v2.0 6 votes vote down vote up
def ParseMemoryRuns(self):
        self.runs = []

        result = win32file.DeviceIoControl(
            self.fd, INFO_IOCTRL, "", 102400, None)

        fmt_string = "Q" * len(self.FIELDS)
        self.memory_parameters = dict(zip(self.FIELDS, struct.unpack_from(
                    fmt_string, result)))

        self.dtb = self.memory_parameters["CR3"]
        self.kdbg = self.memory_parameters["KDBG"]

        offset = struct.calcsize(fmt_string)

        for x in range(self.memory_parameters["NumberOfRuns"]):
            start, length = struct.unpack_from("QQ", result, x * 16 + offset)
            self.runs.append((start, length)) 
Example #11
Source File: buddy.py    From ds_store with MIT License 6 votes vote down vote up
def read(self, size_or_format):
        if isinstance(size_or_format, (str, unicode, bytes)):
            size = struct.calcsize(size_or_format)
            fmt = size_or_format
        else:
            size = size_or_format
            fmt = None

        if self._size - self._pos < size:
            raise BuddyError('Unable to read %lu bytes in block' % size)

        data = self._value[self._pos:self._pos + size]
        self._pos += size
        
        if fmt is not None:
            if isinstance(data, bytearray):
                return struct.unpack_from(fmt, bytes(data))
            else:
                return struct.unpack(fmt, data)
        else:
            return data 
Example #12
Source File: mobi_index.py    From Lector with GNU General Public License v3.0 6 votes vote down vote up
def readTagSection(start, data):
    '''
    Read tag section from given data.

    @param start: The start position in the data.
    @param data: The data to process.
    @return: Tuple of control byte count and list of tag tuples.
    '''
    controlByteCount = 0
    tags = []
    if data[start:start+4] == b"TAGX":
        firstEntryOffset, = struct.unpack_from(b'>L', data, start + 0x04)
        controlByteCount, = struct.unpack_from(b'>L', data, start + 0x08)

        # Skip the first 12 bytes already read above.
        for i in range(12, firstEntryOffset, 4):
            pos = start + i
            tags.append((ord(data[pos:pos+1]), ord(data[pos+1:pos+2]), ord(data[pos+2:pos+3]), ord(data[pos+3:pos+4])))
    return controlByteCount, tags 
Example #13
Source File: parser.py    From cronosparser with MIT License 6 votes vote down vote up
def parse_record(meta, dat_fh):
    # Each data record is stored as a linked list of data fragments. The
    # metadata record holds the first and second offset, while all further
    # chunks are prefixed with the next offset.
    offset, length, next_offset, next_length = struct.unpack('<IHIH', meta)
    dat_fh.seek(offset)
    if length == 0:
        if next_length == 0 or next_length == 0xffff:
            return
    data = dat_fh.read(length)
    while next_length != 0 and next_length != 0xffff:
        dat_fh.seek(next_offset)
        next_data = dat_fh.read(min(252, next_length))
        if len(next_data) < 4:
            break
        next_offset, = struct.unpack_from('<I', next_data)
        data += next_data[4:]
        if next_length > 252:
            next_length -= 252
        else:
            next_length = 0
    return data 
Example #14
Source File: mobi_uncompress.py    From Lector with GNU General Public License v3.0 6 votes vote down vote up
def loadHuff(self, huff):
        if huff[0:8] != b'HUFF\x00\x00\x00\x18':
            raise unpackException('invalid huff header')
        off1, off2 = struct.unpack_from(b'>LL', huff, 8)

        def dict1_unpack(v):
            codelen, term, maxcode = v&0x1f, v&0x80, v>>8
            assert codelen != 0
            if codelen <= 8:
                assert term
            maxcode = ((maxcode + 1) << (32 - codelen)) - 1
            return (codelen, term, maxcode)
        self.dict1 = lmap(dict1_unpack, struct.unpack_from(b'>256L', huff, off1))

        dict2 = struct.unpack_from(b'>64L', huff, off2)
        self.mincode, self.maxcode = (), ()
        for codelen, mincode in enumerate((0,) + dict2[0::2]):
            self.mincode += (mincode << (32 - codelen), )
        for codelen, maxcode in enumerate((0,) + dict2[1::2]):
            self.maxcode += (((maxcode + 1) << (32 - codelen)) - 1, )

        self.dictionary = [] 
Example #15
Source File: parser.py    From cronosparser with MIT License 6 votes vote down vote up
def parse_columns(text, base, count):
    # Parse the columns from the table definition. Columns start with
    # a short record length indicator, followed by type and sequence
    # information (each a short), and the name (prefixed by the length).
    columns = []
    for i in range(count):
        if len(text[base:]) < 8:
            break
        col_len, = struct.unpack_from('H', text, base)
        base = base + 2
        if len(text[base:]) < col_len:
            break
        col_data = text[base - 1:base - 1 + col_len]
        type_, col_id = struct.unpack_from('>HH', col_data, 0)
        text_len, = struct.unpack_from('>I', col_data, 4)
        col_name = decode_text(col_data[8:8 + text_len])
        if col_name is None:
            continue
        columns.append({
            'id': col_id,
            'name': col_name,
            'type': type_
        })
        base = base + col_len
    return columns 
Example #16
Source File: kindleunpack.py    From Lector with GNU General Public License v3.0 6 votes vote down vote up
def processCONT(i, files, rscnames, sect, data):
    global DUMP
    # process a container header, most of this is unknown
    # right now only extract its EXTH
    dt = data[0:12]
    if dt == b"CONTBOUNDARY":
        rscnames.append(None)
        sect.setsectiondescription(i,"CONTAINER BOUNDARY")
    else:
        sect.setsectiondescription(i,"CONT Header")
        rscnames.append(None)
        if DUMP:
            cpage, = struct.unpack_from(b'>L', data, 12)
            contexth = data[48:]
            print("\n\nContainer EXTH Dump")
            dump_contexth(cpage, contexth)
            fname = "CONT_Header%05d.dat" % i
            outname= os.path.join(files.outdir, fname)
            with open(pathof(outname), 'wb') as f:
                f.write(data)
    return rscnames 
Example #17
Source File: logstashserver.py    From sniffer with Apache License 2.0 6 votes vote down vote up
def resolve_data_header(self):

        version, data_type, sequence = struct.unpack_from("!ccI", self.buffer, self.current_offset)
        if version != '2':
            raise RuntimeError('only support version 2')
        if data_type == 'J':
            # ordinary data
            self.sequence = sequence
            return LogStashSession.DATA_HEADER_READY
        elif data_type == 'C':
            # compress data
            self.sequence = 0
            self.compress_data_length = sequence
            return LogStashSession.COMPRESS_HEADER_READY
        else:
            raise RuntimeError('invalid data type') 
Example #18
Source File: pea.py    From poc with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def parse_dsi(payload, expected_req_id):
	(flags, command, req_id, error_code, length, reserved) = struct.unpack_from('>BBHIII', payload)
	if command != 8:
		if flags != 1 or command != 2 or req_id != expected_req_id:
			print '[-] Bad DSI Header: %u %u %u' % (flags, command, req_id)
			sys.exit(0)

		if error_code != 0 and error_code != 4294962287:
			print '[-] The server responded to with an error code: ' + str(error_code)
			sys.exit(0)

	afp_data = payload[16:]
	if len(afp_data) != length:
		if command != 8:
			print '[-] Invalid length in DSI header: ' + str(length) + ' vs. ' + str(len(payload))
			sys.exit(0)
		else:
			afp_data = afp_data[length:]
			afp_data = parse_dsi(afp_data, expected_req_id)

	return afp_data

##
# List all the volumes on the remote server
## 
Example #19
Source File: pea.py    From poc with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def list_volumes(sock):
	print "[+] Listing volumes"
	send_request(sock, "\x00\x01", afp_getsrvrparms, "")
	resp = sock.recv(1024)

	afp_data = parse_dsi(resp, 1)
	(server_time, volumes) = struct.unpack_from('>IB', afp_data)
	print "[+] " + str(volumes) + " volumes are available:"

	afp_data = afp_data[5:]
	for i in range(volumes):
		string_length = struct.unpack_from('>h', afp_data)
		name = afp_data[2 : 2 + string_length[0]]
		print "\t-> " + name
		afp_data = afp_data[2 + string_length[0]:]

	return

##
# Open a volume on the remote server
## 
Example #20
Source File: compat.py    From Python24 with MIT License 5 votes vote down vote up
def get_terminal_size():
        """
        Returns a tuple (x, y) representing the width(x) and the height(y)
        in characters of the terminal window.
        """
        def ioctl_GWINSZ(fd):
            try:
                import fcntl
                import termios
                import struct
                cr = struct.unpack_from(
                    'hh',
                    fcntl.ioctl(fd, termios.TIOCGWINSZ, '12345678')
                )
            except:
                return None
            if cr == (0, 0):
                return None
            return cr
        cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
        if not cr:
            try:
                fd = os.open(os.ctermid(), os.O_RDONLY)
                cr = ioctl_GWINSZ(fd)
                os.close(fd)
            except:
                pass
        if not cr:
            cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
        return int(cr[1]), int(cr[0]) 
Example #21
Source File: perf_test_ringbuffer.py    From ringbuffer with Apache License 2.0 5 votes vote down vote up
def verify_data(data):
    random_size = len(data) - 4
    random_data = data[:random_size]
    found_crc = get_crc32(random_data)
    (expected_crc,) = struct.unpack_from('>I', data, random_size)

    assert expected_crc == found_crc, 'Expected crc %r, found crc %r' % (
        expected_crc, found_crc) 
Example #22
Source File: mobi_index.py    From Lector with GNU General Public License v3.0 5 votes vote down vote up
def parseINDXHeader(self, data):
        "read INDX header"
        if not data[:4] == b'INDX':
            print("Warning: index section is not INDX")
            return False
        words = (
                'len', 'nul1', 'type', 'gen', 'start', 'count', 'code',
                'lng', 'total', 'ordt', 'ligt', 'nligt', 'nctoc'
        )
        num = len(words)
        values = struct.unpack(bstr('>%dL' % num), data[4:4*(num+1)])
        header = {}
        for n in range(num):
            header[words[n]] = values[n]

        ordt1 = None
        ordt2 = None

        ocnt, oentries, op1, op2, otagx  = struct.unpack_from(b'>LLLLL',data, 0xa4)
        if header['code'] == 0xfdea or ocnt != 0 or oentries > 0:
            # horribly hacked up ESP (sample) mobi books use two ORDT sections but never specify
            # them in the proper place in the header.  They seem to be codepage 65002 which seems
            # to be some sort of strange EBCDIC utf-8 or 16 encoded strings

            # so we need to look for them and store them away to process leading text
            # ORDT1 has 1 byte long entries, ORDT2 has 2 byte long entries
            # we only ever seem to use the seocnd but ...
            assert(ocnt == 1)
            assert(data[op1:op1+4] == b'ORDT')
            assert(data[op2:op2+4] == b'ORDT')
            ordt1 = struct.unpack_from(bstr('>%dB' % oentries), data, op1+4)
            ordt2 = struct.unpack_from(bstr('>%dH' % oentries), data, op2+4)

        if self.DEBUG:
            print("parsed INDX header:")
            for n in words:
                print(n, "%X" % header[n],)
            print("")
        return header, ordt1, ordt2 
Example #23
Source File: blorb.py    From xyppy with MIT License 5 votes vote down vote up
def from_chunk(cls, chunk):
        obj = cls()
        obj.name, obj.size, obj.data = chunk.name, chunk.size, chunk.data
        num_resources = struct.unpack_from('>I', chunk.data)[0]
        obj.resources = []
        for i in range(num_resources):
            usage, number, start = struct.unpack_from('>4sII', chunk.data[4+i*12:])
            obj.resources.append(Resource(usage, number, start))
        return obj 
Example #24
Source File: mobi_pagemap.py    From Lector with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, mh, data):
        self.mh = mh
        self.data = data
        self.pagenames = []
        self.pageoffsets = []
        self.pageMap = ''
        self.pm_len = 0
        self.pm_nn = 0
        self.pn_bits = 0
        self.pmoff = None
        self.pmstr = ''
        print("Extracting Page Map Information")
        rev_len, = struct.unpack_from(b'>L', self.data, 0x10)
        # skip over header, revision string length data, and revision string
        ptr = 0x14 + rev_len
        pm_1, self.pm_len, self.pm_nn, self.pm_bits  = struct.unpack_from(b'>4H', self.data, ptr)
        # print(pm_1, self.pm_len, self.pm_nn, self.pm_bits)
        self.pmstr = self.data[ptr+8:ptr+8+self.pm_len]
        self.pmoff = self.data[ptr+8+self.pm_len:]
        offsize = b">L"
        offwidth = 4
        if self.pm_bits == 16:
            offsize = b">H"
            offwidth = 2
        ptr = 0
        for i in range(self.pm_nn):
            od, = struct.unpack_from(offsize, self.pmoff, ptr)
            ptr += offwidth
            self.pageoffsets.append(od)
        self.pagenames, self.pageMap = _parseNames(self.pm_nn, self.pmstr) 
Example #25
Source File: mobi_split.py    From Lector with GNU General Public License v3.0 5 votes vote down vote up
def insertsection(datain,secno,secdata):  # insert a new section
    datalst = []
    nsec = getint(datain,number_of_pdb_records,b'H')
    # print("inserting secno" , secno,  "into" ,nsec, "sections")
    secstart,secend = getsecaddr(datain,secno)
    zerosecstart,zerosecend = getsecaddr(datain,0)
    dif = len(secdata)
    datalst.append(datain[:unique_id_seed])
    datalst.append(struct.pack(b'>L',2*(nsec+1)+1))
    datalst.append(datain[unique_id_seed+4:number_of_pdb_records])
    datalst.append(struct.pack(b'>H',nsec+1))
    newstart = zerosecstart + 8
    for i in range(0,secno):
        ofs, flgval = struct.unpack_from(b'>2L',datain,first_pdb_record+i*8)
        ofs += 8
        datalst.append(struct.pack(b'>L',ofs) + struct.pack(b'>L', flgval))
    datalst.append(struct.pack(b'>L', secstart + 8) + struct.pack(b'>L', (2*secno)))
    for i in range(secno,nsec):
        ofs, flgval = struct.unpack_from(b'>2L',datain,first_pdb_record+i*8)
        ofs = ofs + dif + 8
        flgval = 2*(i+1)
        datalst.append(struct.pack(b'>L',ofs) + struct.pack(b'>L',flgval))
    lpad = newstart - (first_pdb_record + 8*(nsec + 1))
    if lpad > 0:
        datalst.append(b'\0' * lpad)
    datalst.append(datain[zerosecstart:secstart])
    datalst.append(secdata)
    datalst.append(datain[secstart:])
    dataout = b''.join(datalst)
    return dataout 
Example #26
Source File: mobi_split.py    From Lector with GNU General Public License v3.0 5 votes vote down vote up
def deletesectionrange(datain,firstsec,lastsec):  # delete a range of sections
    datalst = []
    firstsecstart,firstsecend = getsecaddr(datain,firstsec)
    lastsecstart,lastsecend = getsecaddr(datain,lastsec)
    zerosecstart, zerosecend = getsecaddr(datain, 0)
    dif = lastsecend - firstsecstart + 8*(lastsec-firstsec+1)
    nsec = getint(datain,number_of_pdb_records,b'H')
    datalst.append(datain[:unique_id_seed])
    datalst.append(struct.pack(b'>L',2*(nsec-(lastsec-firstsec+1))+1))
    datalst.append(datain[unique_id_seed+4:number_of_pdb_records])
    datalst.append(struct.pack(b'>H',nsec-(lastsec-firstsec+1)))
    newstart = zerosecstart - 8*(lastsec-firstsec+1)
    for i in range(0,firstsec):
        ofs, flgval = struct.unpack_from(b'>2L',datain,first_pdb_record+i*8)
        ofs = ofs-8*(lastsec-firstsec+1)
        datalst.append(struct.pack(b'>L',ofs) + struct.pack(b'>L', flgval))
    for i in range(lastsec+1,nsec):
        ofs, flgval = struct.unpack_from(b'>2L',datain,first_pdb_record+i*8)
        ofs = ofs - dif
        flgval = 2*(i-(lastsec-firstsec+1))
        datalst.append(struct.pack(b'>L',ofs) + struct.pack(b'>L',flgval))
    lpad = newstart - (first_pdb_record + 8*(nsec - (lastsec - firstsec + 1)))
    if lpad > 0:
        datalst.append(b'\0' * lpad)
    datalst.append(datain[zerosecstart:firstsecstart])
    datalst.append(datain[lastsecend:])
    dataout = b''.join(datalst)
    return dataout 
Example #27
Source File: mobi_split.py    From Lector with GNU General Public License v3.0 5 votes vote down vote up
def getint(datain,ofs,sz=b'L'):
    i, = struct.unpack_from(b'>'+sz,datain,ofs)
    return i 
Example #28
Source File: kindleunpack.py    From Lector with GNU General Public License v3.0 5 votes vote down vote up
def processPrintReplica(metadata, files, rscnames, mh):
    global DUMP
    global WRITE_RAW_DATA
    rawML = mh.getRawML()
    if DUMP or WRITE_RAW_DATA:
        outraw = os.path.join(files.outdir,files.getInputFileBasename() + '.rawpr')
        with open(pathof(outraw),'wb') as f:
            f.write(rawML)

    fileinfo = []
    print("Print Replica ebook detected")
    try:
        numTables, = struct.unpack_from(b'>L', rawML, 0x04)
        tableIndexOffset = 8 + 4*numTables
        # for each table, read in count of sections, assume first section is a PDF
        # and output other sections as binary files
        for i in range(numTables):
            sectionCount, = struct.unpack_from(b'>L', rawML, 0x08 + 4*i)
            for j in range(sectionCount):
                sectionOffset, sectionLength, = struct.unpack_from(b'>LL', rawML, tableIndexOffset)
                tableIndexOffset += 8
                if j == 0:
                    entryName = os.path.join(files.outdir, files.getInputFileBasename() + ('.%03d.pdf' % (i+1)))
                else:
                    entryName = os.path.join(files.outdir, files.getInputFileBasename() + ('.%03d.%03d.data' % ((i+1),j)))
                with open(pathof(entryName), 'wb') as f:
                    f.write(rawML[sectionOffset:(sectionOffset+sectionLength)])
    except Exception as e:
        print('Error processing Print Replica: ' + str(e))

    fileinfo.append([None,'', files.getInputFileBasename() + '.pdf'])
    usedmap = {}
    for name in rscnames:
        if name is not None:
            usedmap[name] = 'used'
    opf = OPFProcessor(files, metadata, fileinfo, rscnames, False, mh, usedmap)
    opf.writeOPF() 
Example #29
Source File: mobi_dict.py    From Lector with GNU General Public License v3.0 5 votes vote down vote up
def offsets(self, value):
        rvalue, start, count, data = self.lookup(value)
        offset, = struct.unpack_from(b'>H', data, start + 4 + (2 * rvalue))
        if rvalue + 1 < count:
            nextOffset, = struct.unpack_from(b'>H',data, start + 4 + (2 * (rvalue + 1)))
        else:
            nextOffset = None
        return offset, nextOffset, data 
Example #30
Source File: mobi_dict.py    From Lector with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, infldatas):
        self.infldatas = infldatas
        self.starts = []
        self.counts = []
        for idata in self.infldatas:
            start, = struct.unpack_from(b'>L', idata, 0x14)
            count, = struct.unpack_from(b'>L', idata, 0x18)
            self.starts.append(start)
            self.counts.append(count)