Python numpy.little_endian() Examples

The following are 10 code examples for showing how to use numpy.little_endian(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: me-ica   Author: ME-ICA   File: netcdf.py    License: GNU Lesser General Public License v2.1 5 votes vote down vote up
def _write_var_data(self, name):
        var = self.variables[name]

        # Set begin in file header.
        the_beguine = self.fp.tell()
        self.fp.seek(var._begin)
        self._pack_begin(the_beguine)
        self.fp.seek(the_beguine)

        # Write data.
        if not var.isrec:
            self.fp.write(var.data.tostring())
            count = var.data.size * var.data.itemsize
            self.fp.write(asbytes('0') * (var._vsize - count))
        else:  # record variable
            # Handle rec vars with shape[0] < nrecs.
            if self._recs > len(var.data):
                shape = (self._recs,) + var.data.shape[1:]
                var.data.resize(shape)

            pos0 = pos = self.fp.tell()
            for rec in var.data:
                # Apparently scalars cannot be converted to big endian. If we
                # try to convert a ``=i4`` scalar to, say, '>i4' the dtype
                # will remain as ``=i4``.
                if not rec.shape and (rec.dtype.byteorder == '<' or
                        (rec.dtype.byteorder == '=' and LITTLE_ENDIAN)):
                    rec = rec.byteswap()
                self.fp.write(rec.tostring())
                # Padding
                count = rec.size * rec.itemsize
                self.fp.write(asbytes('0') * (var._vsize - count))
                pos += self._recsize
                self.fp.seek(pos)
            self.fp.seek(pos0 + var._vsize) 
Example 2
Project: me-ica   Author: ME-ICA   File: netcdf.py    License: GNU Lesser General Public License v2.1 5 votes vote down vote up
def _write_values(self, values):
        if hasattr(values, 'dtype'):
            nc_type = REVERSE[values.dtype.char, values.dtype.itemsize]
        else:
            types = [
                    (int, NC_INT),
                    (long, NC_INT),
                    (float, NC_FLOAT),
                    (basestring, NC_CHAR),
                    ]
            try:
                sample = values[0]
            except TypeError:
                sample = values
            for class_, nc_type in types:
                if isinstance(sample, class_): break

        typecode, size = TYPEMAP[nc_type]
        dtype_ = '>%s' % typecode

        values = asarray(values, dtype=dtype_)

        self.fp.write(asbytes(nc_type))

        if values.dtype.char == 'S':
            nelems = values.itemsize
        else:
            nelems = values.size
        self._pack_int(nelems)

        if not values.shape and (values.dtype.byteorder == '<' or
                (values.dtype.byteorder == '=' and LITTLE_ENDIAN)):
            values = values.byteswap()
        self.fp.write(values.tostring())
        count = values.size * values.itemsize
        self.fp.write(asbytes('0') * (-count % 4))  # pad 
Example 3
Project: lambda-packs   Author: ryfeus   File: netcdf.py    License: MIT License 5 votes vote down vote up
def _write_var_data(self, name):
        var = self.variables[name]

        # Set begin in file header.
        the_beguine = self.fp.tell()
        self.fp.seek(var._begin)
        self._pack_begin(the_beguine)
        self.fp.seek(the_beguine)

        # Write data.
        if not var.isrec:
            self.fp.write(var.data.tostring())
            count = var.data.size * var.data.itemsize
            self.fp.write(b'0' * (var._vsize - count))
        else:  # record variable
            # Handle rec vars with shape[0] < nrecs.
            if self._recs > len(var.data):
                shape = (self._recs,) + var.data.shape[1:]
                # Resize in-place does not always work since
                # the array might not be single-segment
                try:
                    var.data.resize(shape)
                except ValueError:
                    var.__dict__['data'] = np.resize(var.data, shape).astype(var.data.dtype)

            pos0 = pos = self.fp.tell()
            for rec in var.data:
                # Apparently scalars cannot be converted to big endian. If we
                # try to convert a ``=i4`` scalar to, say, '>i4' the dtype
                # will remain as ``=i4``.
                if not rec.shape and (rec.dtype.byteorder == '<' or
                        (rec.dtype.byteorder == '=' and LITTLE_ENDIAN)):
                    rec = rec.byteswap()
                self.fp.write(rec.tostring())
                # Padding
                count = rec.size * rec.itemsize
                self.fp.write(b'0' * (var._vsize - count))
                pos += self._recsize
                self.fp.seek(pos)
            self.fp.seek(pos0 + var._vsize) 
Example 4
Project: Computable   Author: ktraunmueller   File: netcdf.py    License: MIT License 5 votes vote down vote up
def _write_var_data(self, name):
        var = self.variables[name]

        # Set begin in file header.
        the_beguine = self.fp.tell()
        self.fp.seek(var._begin)
        self._pack_begin(the_beguine)
        self.fp.seek(the_beguine)

        # Write data.
        if not var.isrec:
            self.fp.write(var.data.tostring())
            count = var.data.size * var.data.itemsize
            self.fp.write(b'0' * (var._vsize - count))
        else:  # record variable
            # Handle rec vars with shape[0] < nrecs.
            if self._recs > len(var.data):
                shape = (self._recs,) + var.data.shape[1:]
                var.data.resize(shape)

            pos0 = pos = self.fp.tell()
            for rec in var.data:
                # Apparently scalars cannot be converted to big endian. If we
                # try to convert a ``=i4`` scalar to, say, '>i4' the dtype
                # will remain as ``=i4``.
                if not rec.shape and (rec.dtype.byteorder == '<' or
                        (rec.dtype.byteorder == '=' and LITTLE_ENDIAN)):
                    rec = rec.byteswap()
                self.fp.write(rec.tostring())
                # Padding
                count = rec.size * rec.itemsize
                self.fp.write(b'0' * (var._vsize - count))
                pos += self._recsize
                self.fp.seek(pos)
            self.fp.seek(pos0 + var._vsize) 
Example 5
Project: Computable   Author: ktraunmueller   File: netcdf.py    License: MIT License 5 votes vote down vote up
def _write_values(self, values):
        if hasattr(values, 'dtype'):
            nc_type = REVERSE[values.dtype.char, values.dtype.itemsize]
        else:
            types = [(t, NC_INT) for t in integer_types]
            types += [
                    (float, NC_FLOAT),
                    (str, NC_CHAR),
                    ]
            try:
                sample = values[0]
            except TypeError:
                sample = values
            for class_, nc_type in types:
                if isinstance(sample, class_):
                    break

        typecode, size = TYPEMAP[nc_type]
        dtype_ = '>%s' % typecode

        values = asarray(values, dtype=dtype_)

        self.fp.write(asbytes(nc_type))

        if values.dtype.char == 'S':
            nelems = values.itemsize
        else:
            nelems = values.size
        self._pack_int(nelems)

        if not values.shape and (values.dtype.byteorder == '<' or
                (values.dtype.byteorder == '=' and LITTLE_ENDIAN)):
            values = values.byteswap()
        self.fp.write(values.tostring())
        count = values.size * values.itemsize
        self.fp.write(b'0' * (-count % 4))  # pad 
Example 6
Project: GraphicDesignPatternByPython   Author: Relph1119   File: netcdf.py    License: MIT License 5 votes vote down vote up
def _write_var_data(self, name):
        var = self.variables[name]

        # Set begin in file header.
        the_beguine = self.fp.tell()
        self.fp.seek(var._begin)
        self._pack_begin(the_beguine)
        self.fp.seek(the_beguine)

        # Write data.
        if not var.isrec:
            self.fp.write(var.data.tostring())
            count = var.data.size * var.data.itemsize
            self._write_var_padding(var, var._vsize - count)
        else:  # record variable
            # Handle rec vars with shape[0] < nrecs.
            if self._recs > len(var.data):
                shape = (self._recs,) + var.data.shape[1:]
                # Resize in-place does not always work since
                # the array might not be single-segment
                try:
                    var.data.resize(shape)
                except ValueError:
                    var.__dict__['data'] = np.resize(var.data, shape).astype(var.data.dtype)

            pos0 = pos = self.fp.tell()
            for rec in var.data:
                # Apparently scalars cannot be converted to big endian. If we
                # try to convert a ``=i4`` scalar to, say, '>i4' the dtype
                # will remain as ``=i4``.
                if not rec.shape and (rec.dtype.byteorder == '<' or
                        (rec.dtype.byteorder == '=' and LITTLE_ENDIAN)):
                    rec = rec.byteswap()
                self.fp.write(rec.tostring())
                # Padding
                count = rec.size * rec.itemsize
                self._write_var_padding(var, var._vsize - count)
                pos += self._recsize
                self.fp.seek(pos)
            self.fp.seek(pos0 + var._vsize) 
Example 7
Project: Splunking-Crime   Author: nccgroup   File: netcdf.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def _write_var_data(self, name):
        var = self.variables[name]

        # Set begin in file header.
        the_beguine = self.fp.tell()
        self.fp.seek(var._begin)
        self._pack_begin(the_beguine)
        self.fp.seek(the_beguine)

        # Write data.
        if not var.isrec:
            self.fp.write(var.data.tostring())
            count = var.data.size * var.data.itemsize
            self.fp.write(b'0' * (var._vsize - count))
        else:  # record variable
            # Handle rec vars with shape[0] < nrecs.
            if self._recs > len(var.data):
                shape = (self._recs,) + var.data.shape[1:]
                # Resize in-place does not always work since 
                # the array might not be single-segment                              
                try:
                    var.data.resize(shape)
                except ValueError:
                    var.__dict__['data'] = np.resize(var.data, shape).astype(var.data.dtype)

            pos0 = pos = self.fp.tell()
            for rec in var.data:
                # Apparently scalars cannot be converted to big endian. If we
                # try to convert a ``=i4`` scalar to, say, '>i4' the dtype
                # will remain as ``=i4``.
                if not rec.shape and (rec.dtype.byteorder == '<' or
                        (rec.dtype.byteorder == '=' and LITTLE_ENDIAN)):
                    rec = rec.byteswap()
                self.fp.write(rec.tostring())
                # Padding
                count = rec.size * rec.itemsize
                self.fp.write(b'0' * (var._vsize - count))
                pos += self._recsize
                self.fp.seek(pos)
            self.fp.seek(pos0 + var._vsize) 
Example 8
Project: lambda-packs   Author: ryfeus   File: netcdf.py    License: MIT License 4 votes vote down vote up
def _write_values(self, values):
        if hasattr(values, 'dtype'):
            nc_type = REVERSE[values.dtype.char, values.dtype.itemsize]
        else:
            types = [(t, NC_INT) for t in integer_types]
            types += [
                    (float, NC_FLOAT),
                    (str, NC_CHAR)
                    ]
            # bytes index into scalars in py3k.  Check for "string" types
            if isinstance(values, text_type) or isinstance(values, binary_type):
                sample = values
            else:
                try:
                    sample = values[0]  # subscriptable?
                except TypeError:
                    sample = values     # scalar

            for class_, nc_type in types:
                if isinstance(sample, class_):
                    break

        typecode, size = TYPEMAP[nc_type]
        dtype_ = '>%s' % typecode
        # asarray() dies with bytes and '>c' in py3k.  Change to 'S'
        dtype_ = 'S' if dtype_ == '>c' else dtype_

        values = asarray(values, dtype=dtype_)

        self.fp.write(asbytes(nc_type))

        if values.dtype.char == 'S':
            nelems = values.itemsize
        else:
            nelems = values.size
        self._pack_int(nelems)

        if not values.shape and (values.dtype.byteorder == '<' or
                (values.dtype.byteorder == '=' and LITTLE_ENDIAN)):
            values = values.byteswap()
        self.fp.write(values.tostring())
        count = values.size * values.itemsize
        self.fp.write(b'0' * (-count % 4))  # pad 
Example 9
Project: GraphicDesignPatternByPython   Author: Relph1119   File: netcdf.py    License: MIT License 4 votes vote down vote up
def _write_att_values(self, values):
        if hasattr(values, 'dtype'):
            nc_type = REVERSE[values.dtype.char, values.dtype.itemsize]
        else:
            types = [(t, NC_INT) for t in integer_types]
            types += [
                    (float, NC_FLOAT),
                    (str, NC_CHAR)
                    ]
            # bytes index into scalars in py3k.  Check for "string" types
            if isinstance(values, text_type) or isinstance(values, binary_type):
                sample = values
            else:
                try:
                    sample = values[0]  # subscriptable?
                except TypeError:
                    sample = values     # scalar

            for class_, nc_type in types:
                if isinstance(sample, class_):
                    break

        typecode, size = TYPEMAP[nc_type]
        dtype_ = '>%s' % typecode
        # asarray() dies with bytes and '>c' in py3k.  Change to 'S'
        dtype_ = 'S' if dtype_ == '>c' else dtype_

        values = asarray(values, dtype=dtype_)

        self.fp.write(asbytes(nc_type))

        if values.dtype.char == 'S':
            nelems = values.itemsize
        else:
            nelems = values.size
        self._pack_int(nelems)

        if not values.shape and (values.dtype.byteorder == '<' or
                (values.dtype.byteorder == '=' and LITTLE_ENDIAN)):
            values = values.byteswap()
        self.fp.write(values.tostring())
        count = values.size * values.itemsize
        self.fp.write(b'\x00' * (-count % 4))  # pad 
Example 10
Project: Splunking-Crime   Author: nccgroup   File: netcdf.py    License: GNU Affero General Public License v3.0 4 votes vote down vote up
def _write_values(self, values):
        if hasattr(values, 'dtype'):
            nc_type = REVERSE[values.dtype.char, values.dtype.itemsize]
        else:
            types = [(t, NC_INT) for t in integer_types]
            types += [
                    (float, NC_FLOAT),
                    (str, NC_CHAR)
                    ]
            # bytes index into scalars in py3k.  Check for "string" types
            if isinstance(values, text_type) or isinstance(values, binary_type):
                sample = values
            else:
                try:
                    sample = values[0]  # subscriptable?
                except TypeError:
                    sample = values     # scalar

            for class_, nc_type in types:
                if isinstance(sample, class_):
                    break

        typecode, size = TYPEMAP[nc_type]
        dtype_ = '>%s' % typecode
        # asarray() dies with bytes and '>c' in py3k.  Change to 'S'
        dtype_ = 'S' if dtype_ == '>c' else dtype_

        values = asarray(values, dtype=dtype_)

        self.fp.write(asbytes(nc_type))

        if values.dtype.char == 'S':
            nelems = values.itemsize
        else:
            nelems = values.size
        self._pack_int(nelems)

        if not values.shape and (values.dtype.byteorder == '<' or
                (values.dtype.byteorder == '=' and LITTLE_ENDIAN)):
            values = values.byteswap()
        self.fp.write(values.tostring())
        count = values.size * values.itemsize
        self.fp.write(b'0' * (-count % 4))  # pad