Python struct.calcsize() Examples

The following are 30 code examples of struct.calcsize(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module struct , or try the search function .
Example #1
Source File: vachat.py    From The-chat-room with MIT License 10 votes vote down vote up
def run(self):
        print("VEDIO server starts...")
        self.sock.bind(self.ADDR)
        self.sock.listen(1)
        conn, addr = self.sock.accept()
        print("remote VEDIO client success connected...")
        data = "".encode("utf-8")
        payload_size = struct.calcsize("L")
        cv2.namedWindow('Remote', cv2.WINDOW_AUTOSIZE)
        while True:
            while len(data) < payload_size:
                data += conn.recv(81920)
            packed_size = data[:payload_size]
            data = data[payload_size:]
            msg_size = struct.unpack("L", packed_size)[0]
            while len(data) < msg_size:
                data += conn.recv(81920)
            zframe_data = data[:msg_size]
            data = data[msg_size:]
            frame_data = zlib.decompress(zframe_data)
            frame = pickle.loads(frame_data)
            cv2.imshow('Remote', frame)
            if cv2.waitKey(1) & 0xFF == 27:
                break 
Example #2
Source File: arm_chromeos.py    From script.module.inputstreamhelper with MIT License 6 votes vote down vote up
def chromeos_offset(self):
        """Calculate the Chrome OS losetup start offset"""
        part_format = '<16s16sQQQ72s'
        entries_start, entries_num, entry_size = self.gpt_header()  # assuming partition table is GPT
        lba_size = config.CHROMEOS_BLOCK_SIZE  # assuming LBA size
        self.seek_stream(entries_start * lba_size)

        if not calcsize(part_format) == entry_size:
            log(4, 'Partition table entries are not 128 bytes long')
            return 0

        for index in range(1, entries_num + 1):  # pylint: disable=unused-variable
            # Entry: type_guid, unique_guid, first_lba, last_lba, attr_flags, part_name
            _, _, first_lba, _, _, part_name = unpack(part_format, self.read_stream(entry_size))
            part_name = part_name.decode('utf-16').strip('\x00')
            if part_name == 'ROOT-A':  # assuming partition name is ROOT-A
                offset = first_lba * lba_size
                break

        if not offset:
            log(4, 'Failed to calculate losetup offset.')
            return 0

        return offset 
Example #3
Source File: buddy.py    From ds_store with MIT License 6 votes vote down vote up
def read(self, size_or_format):
        if isinstance(size_or_format, (str, unicode, bytes)):
            size = struct.calcsize(size_or_format)
            fmt = size_or_format
        else:
            size = size_or_format
            fmt = None

        if self._size - self._pos < size:
            raise BuddyError('Unable to read %lu bytes in block' % size)

        data = self._value[self._pos:self._pos + size]
        self._pos += size
        
        if fmt is not None:
            if isinstance(data, bytearray):
                return struct.unpack_from(fmt, bytes(data))
            else:
                return struct.unpack(fmt, data)
        else:
            return data 
Example #4
Source File: _definitions.py    From imageio-ffmpeg with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def get_platform():
    bits = struct.calcsize("P") * 8
    if sys.platform.startswith("linux"):
        return "linux{}".format(bits)
    elif sys.platform.startswith("win"):
        return "win{}".format(bits)
    elif sys.platform.startswith("cygwin"):
        return "win{}".format(bits)
    elif sys.platform.startswith("darwin"):
        return "osx{}".format(bits)
    else:  # pragma: no cover
        return None


# The Linux static builds (https://johnvansickle.com/ffmpeg/) are build
# for Linux kernels 2.6.32 and up (at the time of writing, ffmpeg v4.1).
# This corresponds to CentOS 6. This means we should use manylinux2010 and not
# manylinux1.
# manylinux1: https://www.python.org/dev/peps/pep-0513
# manylinux2010: https://www.python.org/dev/peps/pep-0571


# Platform string -> ffmpeg filename 
Example #5
Source File: aes-file-decrypt.py    From Effective-Python-Penetration-Testing with MIT License 6 votes vote down vote up
def decrypt_file(key, filename, chunk_size=24*1024):
        
    output_filename = os.path.splitext(filename)[0]

    with open(filename, 'rb') as infile:
        origsize = struct.unpack('<Q', infile.read(struct.calcsize('Q')))[0]
        iv = infile.read(16)
        decryptor = AES.new(key, AES.MODE_CBC, iv)

        with open(output_filename, 'wb') as outfile:
            while True:
                chunk = infile.read(chunk_size)
                if len(chunk) == 0:
                    break
                outfile.write(decryptor.decrypt(chunk))

            outfile.truncate(origsize) 
Example #6
Source File: winpmem.py    From rekall with GNU General Public License v2.0 6 votes vote down vote up
def ParseMemoryRuns(self):
        self.runs = []

        result = win32file.DeviceIoControl(
            self.fd, INFO_IOCTRL, "", 102400, None)

        fmt_string = "Q" * len(self.FIELDS)
        self.memory_parameters = dict(zip(self.FIELDS, struct.unpack_from(
                    fmt_string, result)))

        self.dtb = self.memory_parameters["CR3"]
        self.kdbg = self.memory_parameters["KDBG"]

        offset = struct.calcsize(fmt_string)

        for x in range(self.memory_parameters["NumberOfRuns"]):
            start, length = struct.unpack_from("QQ", result, x * 16 + offset)
            self.runs.append((start, length)) 
Example #7
Source File: registry.py    From rekall with GNU General Public License v2.0 6 votes vote down vote up
def _decode_data(self, data):
        """Decode the data according to our type."""
        valtype = str(self.Type)

        if valtype in ["REG_DWORD", "REG_DWORD_BIG_ENDIAN", "REG_QWORD"]:
            if len(data) != struct.calcsize(self.value_formats[valtype]):
                return obj.NoneObject(
                    "Value data did not match the expected data "
                    "size for a {0}".format(valtype))

        if valtype in ["REG_SZ", "REG_EXPAND_SZ", "REG_LINK"]:
            data = data.decode('utf-16-le', "ignore")

        elif valtype == "REG_MULTI_SZ":
            data = data.decode('utf-16-le', "ignore").split('\0')

        elif valtype in ["REG_DWORD", "REG_DWORD_BIG_ENDIAN", "REG_QWORD"]:
            data = struct.unpack(self.value_formats[valtype], data)[0]

        return data 
Example #8
Source File: win32.py    From rekall with GNU General Public License v2.0 6 votes vote down vote up
def ParseMemoryRuns(self, fhandle):
        # Set acquisition mode. If the driver does not support this mode it will
        # just fall back to the default.
        win32file.DeviceIoControl(
            fhandle, CTRL_IOCTRL,
            struct.pack("I", PMEM_MODE_PTE), 4, None)

        result = win32file.DeviceIoControl(
            fhandle, INFO_IOCTRL, b"", 102400, None)

        fmt_string = "Q" * len(self.FIELDS)
        self.memory_parameters = dict(zip(self.FIELDS, struct.unpack_from(
            fmt_string, result)))

        offset = struct.calcsize(fmt_string)
        for x in range(self.memory_parameters["NumberOfRuns"]):
            start, length = struct.unpack_from("QQ", result, x * 16 + offset)
            self.add_run(start, start, length, self.fhandle_as) 
Example #9
Source File: aff4_map.py    From pyaff4 with Apache License 2.0 6 votes vote down vote up
def LoadFromURN(self):
        map_urn = self.urn.Append("map")
        map_idx_urn = self.urn.Append("idx")

        # Parse the map out of the map stream. If the stream does not exist yet
        # we just start with an empty map.
        try:
            with self.resolver.AFF4FactoryOpen(map_idx_urn) as map_idx:
                self.targets = [rdfvalue.URN(utils.SmartUnicode(x))
                                for x in map_idx.Read(map_idx.Size()).splitlines()]

            with self.resolver.AFF4FactoryOpen(map_urn) as map_stream:
                read_length = struct.calcsize(Range.format_str)
                while 1:
                    data = map_stream.Read(read_length)
                    if not data:
                        break
                    range = self.deserializeMapPoint(data)
                    if range.length > 0:
                        self.tree.addi(range.map_offset, range.map_end, range)


        except IOError:
            traceback.print_exc()
            pass 
Example #10
Source File: p0f.py    From dionaea with GNU General Public License v2.0 6 votes vote down vote up
def handle_io_in(self, data):
        fmt = "IIB20s40sB30s30sBBBhHi"
        if len(data) != calcsize(fmt):
            return 0
        values = unpack(fmt, data)
        names=["magic","id","type","genre","detail","dist","link",
               "tos","fw","nat","real","score","mflags","uptime"]
        icd = incident(origin='dionaea.modules.python.p0f')
        for i in range(len(values)):
            s = values[i]
            if type(s) == bytes:
                if s.find(b'\x00'):
                    s = s[:s.find(b'\x00')]
                try:
                    s = s.decode("ascii")
                except UnicodeDecodeError:
                    logger.warning("Unable to decode p0f information %s=%r", i, s, exc_info=True)
                icd.set(names[i], s)
            elif type(s) == int:
                icd.set(names[i], str(s))
        icd.set('con',self.con)
        icd.report()
        self.close()
        return len(data) 
Example #11
Source File: win32.py    From multibootusb with GNU General Public License v2.0 5 votes vote down vote up
def findVolumeGuids():
    DiskExtent = collections.namedtuple(
        'DiskExtent', ['DiskNumber', 'StartingOffset', 'ExtentLength'])
    Volume = collections.namedtuple(
        'Volume', ['Guid', 'MediaType', 'DosDevice', 'Extents'])
    found = []
    h, guid = FindFirstVolume()
    while h and guid:
        #print (guid)
        #print (guid, win32file.GetDriveType(guid),
        #       win32file.QueryDosDevice(guid[4:-1]))
        hVolume = win32file.CreateFile(
            guid[:-1], win32con.GENERIC_READ,
            win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE,
            None, win32con.OPEN_EXISTING, win32con.FILE_ATTRIBUTE_NORMAL,  None)
        extents = []
        driveType = win32file.GetDriveType(guid)
        if driveType in [win32con.DRIVE_REMOVABLE, win32con.DRIVE_FIXED]:
            x = win32file.DeviceIoControl(
                hVolume, winioctlcon.IOCTL_VOLUME_GET_VOLUME_DISK_EXTENTS,
                None, 512, None)
            instream = io.BytesIO(x)
            numRecords = struct.unpack('<q', instream.read(8))[0]
            fmt = '<qqq'
            sz = struct.calcsize(fmt)
            while 1:
                b = instream.read(sz)
                if len(b) < sz:
                    break
                rec = struct.unpack(fmt, b)
                extents.append( DiskExtent(*rec) )
        vinfo = Volume(guid, driveType, win32file.QueryDosDevice(guid[4:-1]),
                       extents)
        found.append(vinfo)
        guid = FindNextVolume(h)
    return found 
Example #12
Source File: vachat.py    From The-chat-room with MIT License 5 votes vote down vote up
def run(self):
        global TERMINATE
        print("AUDIO server starts...")
        self.sock.bind(self.ADDR)
        self.sock.listen(1)
        conn, addr = self.sock.accept()
        print("remote AUDIO client success connected...")
        data = "".encode("utf-8")
        payload_size = struct.calcsize("L")
        self.stream = self.p.open(format=FORMAT,
                                  channels=CHANNELS,
                                  rate=RATE,
                                  output=True,
                                  frames_per_buffer=CHUNK
                                  )
        while True:
            if TERMINATE:
                self.sock.close()
                break
            while len(data) < payload_size:
                data += conn.recv(81920)
            packed_size = data[:payload_size]
            data = data[payload_size:]
            msg_size = struct.unpack("L", packed_size)[0]
            while len(data) < msg_size:
                data += conn.recv(81920)
            frame_data = data[:msg_size]
            data = data[msg_size:]
            frames = pickle.loads(frame_data)
            for frame in frames:
                self.stream.write(frame, CHUNK) 
Example #13
Source File: kuka_grasp_block_playback.py    From soccer-matlab with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def readLogFile(filename, verbose = True):
  f = open(filename, 'rb')
  
  print('Opened'),
  print(filename)

  keys = f.readline().decode('utf8').rstrip('\n').split(',')
  fmt = f.readline().decode('utf8').rstrip('\n')
  
  # The byte number of one record
  sz = struct.calcsize(fmt)
  # The type number of one record
  ncols = len(fmt)

  if verbose:
    print('Keys:'),
    print(keys)
    print('Format:'),
    print(fmt)
    print('Size:'),
    print(sz)
    print('Columns:'),
    print(ncols)

  # Read data
  wholeFile = f.read()
  # split by alignment word
  chunks = wholeFile.split(b'\xaa\xbb')
  log = list()
  for chunk in chunks:
    if len(chunk) == sz:
      values = struct.unpack(fmt, chunk)
      record = list()
      for i in range(ncols):
        record.append(values[i])
      log.append(record)

  return log

#clid = p.connect(p.SHARED_MEMORY) 
Example #14
Source File: quadruped_playback.py    From soccer-matlab with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def readLogFile(filename, verbose = True):
  f = open(filename, 'rb')
  
  print('Opened'),
  print(filename)

  keys = f.readline().decode('utf8').rstrip('\n').split(',')
  fmt = f.readline().decode('utf8').rstrip('\n')
  
  # The byte number of one record
  sz = struct.calcsize(fmt)
  # The type number of one record
  ncols = len(fmt)

  if verbose:
    print('Keys:'),
    print(keys)
    print('Format:'),
    print(fmt)
    print('Size:'),
    print(sz)
    print('Columns:'),
    print(ncols)

  # Read data
  wholeFile = f.read()
  # split by alignment word
  chunks = wholeFile.split(b'\xaa\xbb')
  print ("num chunks")
  print (len(chunks))
  
  log = list()
  for chunk in chunks:
    if len(chunk) == sz:
      values = struct.unpack(fmt, chunk)
      record = list()
      for i in range(ncols):
        record.append(values[i])
      log.append(record)

  return log 
Example #15
Source File: kuka_with_cube_playback.py    From soccer-matlab with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def readLogFile(filename, verbose = True):
  f = open(filename, 'rb')
  
  print('Opened'),
  print(filename)

  keys = f.readline().decode('utf8').rstrip('\n').split(',')
  fmt = f.readline().decode('utf8').rstrip('\n')
  
  # The byte number of one record
  sz = struct.calcsize(fmt)
  # The type number of one record
  ncols = len(fmt)

  if verbose:
    print('Keys:'),
    print(keys)
    print('Format:'),
    print(fmt)
    print('Size:'),
    print(sz)
    print('Columns:'),
    print(ncols)

  # Read data
  wholeFile = f.read()
  # split by alignment word
  chunks = wholeFile.split(b'\xaa\xbb')
  log = list()
  for chunk in chunks:
    if len(chunk) == sz:
      values = struct.unpack(fmt, chunk)
      record = list()
      for i in range(ncols):
        record.append(values[i])
      log.append(record)

  return log

#clid = p.connect(p.SHARED_MEMORY) 
Example #16
Source File: USBIP.py    From PythonUSBIP with The Unlicense 5 votes vote down vote up
def size(self):
        return struct.calcsize(self.format()) 
Example #17
Source File: __init__.py    From script.module.inputstreamhelper with MIT License 5 votes vote down vote up
def _supports_widevine():
        """Checks if Widevine is supported on the architecture/operating system/Kodi version."""
        if arch() not in config.WIDEVINE_SUPPORTED_ARCHS:
            log(4, 'Unsupported Widevine architecture found: {arch}', arch=arch())
            ok_dialog(localize(30004), localize(30007, arch=arch()))  # Widevine not available on this architecture
            return False

        if arch() == 'arm64' and system_os() != 'Android':
            import struct
            if struct.calcsize('P') * 8 == 64:
                log(4, 'Unsupported 64-bit userspace found. User needs 32-bit userspace on {arch}', arch=arch())
                ok_dialog(localize(30004), localize(30039))  # Widevine not available on ARM64
                return False

        if system_os() not in config.WIDEVINE_SUPPORTED_OS:
            log(4, 'Unsupported Widevine OS found: {os}', os=system_os())
            ok_dialog(localize(30004), localize(30011, os=system_os()))  # Operating system not supported by Widevine
            return False

        from distutils.version import LooseVersion  # pylint: disable=import-error,no-name-in-module,useless-suppression
        if LooseVersion(config.WIDEVINE_MINIMUM_KODI_VERSION[system_os()]) > LooseVersion(kodi_version()):
            log(4, 'Unsupported Kodi version for Widevine: {version}', version=kodi_version())
            ok_dialog(localize(30004), localize(30010, version=config.WIDEVINE_MINIMUM_KODI_VERSION[system_os()]))  # Kodi too old
            return False

        if 'WindowsApps' in translate_path('special://xbmcbin/'):  # uwp is not supported
            log(4, 'Unsupported UWP Kodi version detected.')
            ok_dialog(localize(30004), localize(30012))  # Windows Store Kodi falls short
            return False

        return True 
Example #18
Source File: arm_chromeos.py    From script.module.inputstreamhelper with MIT License 5 votes vote down vote up
def gpt_header(self):
        """Returns the needed parts of the GPT header, can be easily expanded if necessary"""
        header_fmt = '<8s4sII4x4Q16sQ3I'
        header_size = calcsize(header_fmt)
        lba_size = config.CHROMEOS_BLOCK_SIZE  # assuming LBA size
        self.seek_stream(lba_size)

        # GPT Header entries: signature, revision, header_size, header_crc32, (reserved 4x skipped,) current_lba, backup_lba,
        #                     first_usable_lba, last_usable_lba, disk_guid, start_lba_part_entries, num_part_entries,
        #                     size_part_entry, crc32_part_entries
        _, _, _, _, _, _, _, _, _, start_lba_part_entries, num_part_entries, size_part_entry, _ = unpack(header_fmt, self.read_stream(header_size))

        return (start_lba_part_entries, num_part_entries, size_part_entry) 
Example #19
Source File: arm_chromeos.py    From script.module.inputstreamhelper with MIT License 5 votes vote down vote up
def superblock(self):
        """Get relevant info from the superblock, assert it's an ext2 fs"""
        names = ('s_inodes_count', 's_blocks_count', 's_r_blocks_count', 's_free_blocks_count', 's_free_inodes_count', 's_first_data_block',
                 's_log_block_size', 's_log_frag_size', 's_blocks_per_group', 's_frags_per_group', 's_inodes_per_group', 's_mtime', 's_wtime',
                 's_mnt_count', 's_max_mnt_count', 's_magic', 's_state', 's_errors', 's_minor_rev_level', 's_lastcheck', 's_checkinterval',
                 's_creator_os', 's_rev_level', 's_def_resuid', 's_def_resgid', 's_first_ino', 's_inode_size', 's_block_group_nr',
                 's_feature_compat', 's_feature_incompat', 's_feature_ro_compat', 's_uuid', 's_volume_name', 's_last_mounted',
                 's_algorithm_usage_bitmap', 's_prealloc_block', 's_prealloc_dir_blocks')
        fmt = '<13I6H4I2HI2H3I16s16s64sI2B818x'
        fmt_len = calcsize(fmt)

        self.seek_stream(self.part_offset + 1024)  # superblock starts after 1024 byte
        pack = self.read_stream(fmt_len)
        sb_dict = dict(list(zip(names, unpack(fmt, pack))))

        sb_dict['s_magic'] = hex(sb_dict['s_magic'])
        assert sb_dict['s_magic'] == '0xef53'  # assuming/checking this is an ext2 fs

        block_groups_count1 = sb_dict['s_blocks_count'] / sb_dict['s_blocks_per_group']
        block_groups_count1 = int(block_groups_count1) if float(int(block_groups_count1)) == block_groups_count1 else int(block_groups_count1) + 1
        block_groups_count2 = sb_dict['s_inodes_count'] / sb_dict['s_inodes_per_group']
        block_groups_count2 = int(block_groups_count2) if float(int(block_groups_count2)) == block_groups_count2 else int(block_groups_count2) + 1
        assert block_groups_count1 == block_groups_count2
        sb_dict['block_groups_count'] = block_groups_count1

        self.blocksize = 1024 << sb_dict['s_log_block_size']

        return sb_dict 
Example #20
Source File: arm_chromeos.py    From script.module.inputstreamhelper with MIT License 5 votes vote down vote up
def block_group(self):
        """Get info about a block group"""
        names = ('bg_block_bitmap', 'bg_inode_bitmap', 'bg_inode_table', 'bg_free_blocks_count', 'bg_free_inodes_count', 'bg_used_dirs_count', 'bg_pad')
        fmt = '<3I4H12x'
        fmt_len = calcsize(fmt)

        pack = self.read_stream(fmt_len)
        blk = unpack(fmt, pack)

        blk_dict = dict(list(zip(names, blk)))

        return blk_dict 
Example #21
Source File: packetcodec.py    From lifx-python with GNU Affero General Public License v3.0 5 votes vote down vote up
def decode(self, bs):
        if len(bs) != calcsize(self.pack_str):
            print( 'could not decode %s' % (self.name, ) )
            print( tohex(bs))
            return
        data = unpack(self.pack_str, bs)
        self.data = dict( zip(self.pack_struct, data) ) 
Example #22
Source File: support.py    From jawfish with MIT License 5 votes vote down vote up
def calcobjsize(fmt):
    return struct.calcsize(_header + fmt + _align) 
Example #23
Source File: support.py    From jawfish with MIT License 5 votes vote down vote up
def calcvobjsize(fmt):
    return struct.calcsize(_vheader + fmt + _align) 
Example #24
Source File: pyT4.py    From Sinopac-Order-API with MIT License 5 votes vote down vote up
def convert_stock_bytes_to_dict(stock_order_res_bytes):
    """委託回報為bytes。所以先轉為有結構的NameTuple,但每個item得從bytes to utf8"""
    stock_record_field = 'trade_type,account,code_id,ord_price,ord_qty,ord_seq,ord_date,effective_date,' \
                         'ord_time,ord_no,ord_soruce,org_ord_seq,ord_bs,ord_type,market_id,price_type,ord_status,Msg'
    StockOrderRecord = namedtuple('StockOrderRecord', stock_record_field)
    stock_order_res_format = '2s15s6s6s3s6s8s8s6s5s3s6s1s2s1s1s2s60s'
    if len(stock_order_res_bytes) != struct.calcsize(stock_order_res_format):
        return stock_order_res_bytes
    stock_order_res = StockOrderRecord._make(struct.unpack_from(stock_order_res_format, stock_order_res_bytes))
    stock_order_res_lst = [str(item, 'cp950') for item in stock_order_res]
    return StockOrderRecord(*stock_order_res_lst)._asdict() 
Example #25
Source File: pyT4.py    From Sinopac-Order-API with MIT License 5 votes vote down vote up
def convert_future_bytes_to_dict(future_order_res_bytes):
    future_record_field = 'trade_type,account,market_id,code_id,f_callput,ord_bs,ord_price,price_type,ord_qty,' \
                          'ord_no,ord_seq,ord_type,oct_type,f_mttype,f_composit,c_futopt,c_code,c_callput,' \
                          'c_buysell,c_price,c_quantity,ord_date,preord_date,ord_time,type,err_code,msg'
    FutureOrderRecord = namedtuple('FutureOrderRecord', future_record_field)
    future_order_res_format = '2s15s1s10s1s1s12s3s4s6s6s3s1s1s2s1s10s1s1s12s4s8s8s6s1s4s60s'
    if len(future_order_res_bytes) != struct.calcsize(future_order_res_format):
        return future_order_res_bytes
    future_order_res = FutureOrderRecord._make(struct.unpack_from(future_order_res_format, future_order_res_bytes))
    future_order_res_lst = [str(item, 'cp950') for item in future_order_res]
    return FutureOrderRecord(*future_order_res_lst)._asdict() 
Example #26
Source File: types.py    From pyspark-cassandra with Apache License 2.0 5 votes vote down vote up
def _unpack(fmt, cvalue):
    stride = struct.calcsize(fmt)
    if len(cvalue) % stride != 0:
        raise ValueError('number of bytes must be a multiple of %s for format %s' % (stride, fmt))

    return [struct.unpack(cvalue[o:o + stride]) for o in range(len(cvalue) / stride, stride)] 
Example #27
Source File: pyogginfo.py    From pybass with Apache License 2.0 5 votes vote down vote up
def unpack(self, format, size = None):
		if size is None:
			size = struct.calcsize(format)
		return struct.unpack(format, self.read(size)) 
Example #28
Source File: packing.py    From pwnypack with MIT License 5 votes vote down vote up
def pack_size(fmt, endian=None, target=None):
    endian = endian if endian is not None else target.endian if target is not None else pwnypack.target.target.endian
    if fmt and fmt[0] not in '@=<>!':
        if endian is pwnypack.target.Target.Endian.little:
            fmt = '<' + fmt
        elif endian is pwnypack.target.Target.Endian.big:
            fmt = '>' + fmt
        else:
            raise NotImplementedError('Unsupported endianness: %s' % endian)
    return struct.calcsize(fmt) 
Example #29
Source File: base.py    From PyVESC with Creative Commons Attribution 4.0 International 5 votes vote down vote up
def __init__(cls, name, bases, clsdict):
        cls.can_id = None
        msg_id = clsdict['id']
        # make sure that message classes are final
        for klass in bases:
            if isinstance(klass, VESCMessage):
                raise TypeError("VESC messages cannot be inherited.")
        # check for duplicate id
        if msg_id in VESCMessage._msg_registry:
            raise TypeError("ID conflict with %s" % str(VESCMessage._msg_registry[msg_id]))
        else:
            VESCMessage._msg_registry[msg_id] = cls
        # initialize cls static variables
        cls._string_field = None
        cls._fmt_fields = ''
        cls._field_names = []
        cls._field_scalars = []
        for field, idx in zip(cls.fields, range(0, len(cls.fields))):
            cls._field_names.append(field[0])
            if len(field) >= 3:
                cls._field_scalars.append(field[2])
            if field[1] is 's':
                # string field, add % so we can vary the length
                cls._fmt_fields += '%u'
                cls._string_field = idx
            cls._fmt_fields += field[1]
        cls._full_msg_size = struct.calcsize(cls._fmt_fields)
        # check that at most 1 field is a string
        string_field_count = cls._fmt_fields.count('s')
        if string_field_count > 1:
            raise TypeError("Max number of string fields is 1.")
        if 'p' in cls._fmt_fields:
            raise TypeError("Field with format character 'p' detected. For string field use 's'.")
        super(VESCMessage, cls).__init__(name, bases, clsdict) 
Example #30
Source File: base.py    From PyVESC with Creative Commons Attribution 4.0 International 5 votes vote down vote up
def unpack(msg_bytes):
        msg_id = struct.unpack_from(VESCMessage._endian_fmt + VESCMessage._id_fmt, msg_bytes, 0)
        msg_type = VESCMessage.msg_type(*msg_id)
        data = None
        if not (msg_type._string_field is None):
            # string field
            fmt_wo_string = msg_type._fmt_fields.replace('%u', '')
            fmt_wo_string = fmt_wo_string.replace('s', '')
            len_string = len(msg_bytes) - struct.calcsize(VESCMessage._endian_fmt + fmt_wo_string) - 1
            fmt_w_string = msg_type._fmt_fields % (len_string)
            data = struct.unpack_from(VESCMessage._endian_fmt + fmt_w_string, msg_bytes, 1)
        else:
            data = list(struct.unpack_from(VESCMessage._endian_fmt + msg_type._fmt_fields, msg_bytes, 1))
            for k, field in enumerate(data):
                try:
                    if msg_type._field_scalars[k] != 0:
                        data[k] = data[k]/msg_type._field_scalars[k]
                except (TypeError, IndexError) as e:
                    print("Error ecountered on field " + msg_type.fields[k][0])
                    print(e)
        msg = msg_type(*data)
        if not (msg_type._string_field is None):
            string_field_name = msg_type._field_names[msg_type._string_field]
            setattr(msg,
                    string_field_name,
                    getattr(msg, string_field_name).decode('ascii'))
        return msg