Python numpy.index_exp() Examples

The following are 30 code examples of numpy.index_exp(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: arrayprint.py    From recruit with Apache License 2.0 6 votes vote down vote up
def _leading_trailing(a, edgeitems, index=()):
    """
    Keep only the N-D corners (leading and trailing edges) of an array.

    Should be passed a base-class ndarray, since it makes no guarantees about
    preserving subclasses.
    """
    axis = len(index)
    if axis == a.ndim:
        return a[index]

    if a.shape[axis] > 2*edgeitems:
        return concatenate((
            _leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]),
            _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])
        ), axis=axis)
    else:
        return _leading_trailing(a, edgeitems, index + np.index_exp[:]) 
Example #2
Source File: types.py    From nata with MIT License 6 votes vote down vote up
def is_basic_indexing(key: Any):
    indexing = np.index_exp[key]
    passes = []
    for ind in indexing:
        if isinstance(ind, (int, slice)):
            passes.append(True)
        elif ind is Ellipsis:
            passes.append(True)
        elif ind is np.newaxis:
            passes.append(True)
        else:
            passes.append(False)

    if all(passes):
        return True
    return False 
Example #3
Source File: axes.py    From nata with MIT License 6 votes vote down vote up
def __getitem__(
        self, key: Union[int, slice, Tuple[Union[int, slice]]]
    ) -> "GridAxis":
        if not is_basic_indexing(key):
            raise IndexError("Only basic indexing is supported!")

        key = np.index_exp[key]
        requires_new_axis = False

        # first index corresponds to temporal slicing if ndim == axis_dim + 1
        # or alternatively -> check len of the axis -> number of temporal slices
        if len(self) != 1:
            # revert dimensionality reduction
            if isinstance(key[0], int):
                requires_new_axis = True
        else:
            requires_new_axis = True

        return self.__class__(
            self.data[key][np.newaxis] if requires_new_axis else self.data[key],
            name=self.name,
            label=self.label,
            unit=self.unit,
            axis_type=self.axis_type,
        ) 
Example #4
Source File: arrayprint.py    From coffeegrindsize with MIT License 6 votes vote down vote up
def _leading_trailing(a, edgeitems, index=()):
    """
    Keep only the N-D corners (leading and trailing edges) of an array.

    Should be passed a base-class ndarray, since it makes no guarantees about
    preserving subclasses.
    """
    axis = len(index)
    if axis == a.ndim:
        return a[index]

    if a.shape[axis] > 2*edgeitems:
        return concatenate((
            _leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]),
            _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])
        ), axis=axis)
    else:
        return _leading_trailing(a, edgeitems, index + np.index_exp[:]) 
Example #5
Source File: arrayprint.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def _leading_trailing(a, edgeitems, index=()):
    """
    Keep only the N-D corners (leading and trailing edges) of an array.

    Should be passed a base-class ndarray, since it makes no guarantees about
    preserving subclasses.
    """
    axis = len(index)
    if axis == a.ndim:
        return a[index]

    if a.shape[axis] > 2*edgeitems:
        return concatenate((
            _leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]),
            _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])
        ), axis=axis)
    else:
        return _leading_trailing(a, edgeitems, index + np.index_exp[:]) 
Example #6
Source File: arrayprint.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def _leading_trailing(a, edgeitems, index=()):
    """
    Keep only the N-D corners (leading and trailing edges) of an array.

    Should be passed a base-class ndarray, since it makes no guarantees about
    preserving subclasses.
    """
    axis = len(index)
    if axis == a.ndim:
        return a[index]

    if a.shape[axis] > 2*edgeitems:
        return concatenate((
            _leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]),
            _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])
        ), axis=axis)
    else:
        return _leading_trailing(a, edgeitems, index + np.index_exp[:]) 
Example #7
Source File: arrayprint.py    From Carnets with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _leading_trailing(a, edgeitems, index=()):
    """
    Keep only the N-D corners (leading and trailing edges) of an array.

    Should be passed a base-class ndarray, since it makes no guarantees about
    preserving subclasses.
    """
    axis = len(index)
    if axis == a.ndim:
        return a[index]

    if a.shape[axis] > 2*edgeitems:
        return concatenate((
            _leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]),
            _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])
        ), axis=axis)
    else:
        return _leading_trailing(a, edgeitems, index + np.index_exp[:]) 
Example #8
Source File: arrayprint.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def _leading_trailing(a, edgeitems, index=()):
    """
    Keep only the N-D corners (leading and trailing edges) of an array.

    Should be passed a base-class ndarray, since it makes no guarantees about
    preserving subclasses.
    """
    axis = len(index)
    if axis == a.ndim:
        return a[index]

    if a.shape[axis] > 2*edgeitems:
        return concatenate((
            _leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]),
            _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])
        ), axis=axis)
    else:
        return _leading_trailing(a, edgeitems, index + np.index_exp[:]) 
Example #9
Source File: arrayprint.py    From lambda-packs with MIT License 6 votes vote down vote up
def _leading_trailing(a, edgeitems, index=()):
    """
    Keep only the N-D corners (leading and trailing edges) of an array.

    Should be passed a base-class ndarray, since it makes no guarantees about
    preserving subclasses.
    """
    axis = len(index)
    if axis == a.ndim:
        return a[index]

    if a.shape[axis] > 2*edgeitems:
        return concatenate((
            _leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]),
            _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])
        ), axis=axis)
    else:
        return _leading_trailing(a, edgeitems, index + np.index_exp[:]) 
Example #10
Source File: arrayprint.py    From Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda with MIT License 6 votes vote down vote up
def _leading_trailing(a, edgeitems, index=()):
    """
    Keep only the N-D corners (leading and trailing edges) of an array.

    Should be passed a base-class ndarray, since it makes no guarantees about
    preserving subclasses.
    """
    axis = len(index)
    if axis == a.ndim:
        return a[index]

    if a.shape[axis] > 2*edgeitems:
        return concatenate((
            _leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]),
            _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])
        ), axis=axis)
    else:
        return _leading_trailing(a, edgeitems, index + np.index_exp[:]) 
Example #11
Source File: h5io.py    From westpa with MIT License 6 votes vote down vote up
def from_string(cls, dsspec_string, default_h5file):
        alias = None
        
        h5file = default_h5file
        fields = dsspec_string.split(',')
        dsname = fields[0]
        slice = None
        
        for field in (field.strip() for field in fields[1:]):
            k,v = field.split('=')
            k = k.lower()
            if k == 'alias':
                alias = v
            elif k == 'slice':
                try:
                    slice = eval('numpy.index_exp' + v)
                except SyntaxError:
                    raise SyntaxError('invalid index expression {!r}'.format(v))
            elif k == 'file':
                h5file = v
            else:
                raise ValueError('invalid dataset option {!r}'.format(k))
            
        return cls(h5file, dsname, alias, slice) 
Example #12
Source File: h5io.py    From westpa with MIT License 6 votes vote down vote up
def get_iteration_slice(h5object, iter_start, iter_stop=None, iter_stride=None):
    '''Create a slice for data corresponding to iterations [iter_start,iter_stop),
    with stride iter_step, in the given ``h5object``.'''
    obj_iter_start, obj_iter_stop = get_iter_range(h5object)
    
    if iter_stop is None: iter_stop = iter_start+1
    if iter_stride is None: iter_stride = 1
    
    if iter_start < obj_iter_start:
        raise IndexError('data for iteration {} not available in dataset {!r}'.format(iter_start, h5object))
    elif iter_start > obj_iter_stop:
        raise IndexError('data for iteration {} not available in dataset {!r}'.format(iter_stop, h5object))
    
    start_index = iter_start - obj_iter_start
    stop_index = iter_stop - obj_iter_start
    return numpy.index_exp[start_index:stop_index:iter_stride]



        
    
###
# Axis label metadata
### 
Example #13
Source File: arrayprint.py    From predictive-maintenance-using-machine-learning with Apache License 2.0 6 votes vote down vote up
def _leading_trailing(a, edgeitems, index=()):
    """
    Keep only the N-D corners (leading and trailing edges) of an array.

    Should be passed a base-class ndarray, since it makes no guarantees about
    preserving subclasses.
    """
    axis = len(index)
    if axis == a.ndim:
        return a[index]

    if a.shape[axis] > 2*edgeitems:
        return concatenate((
            _leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]),
            _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])
        ), axis=axis)
    else:
        return _leading_trailing(a, edgeitems, index + np.index_exp[:]) 
Example #14
Source File: arrayprint.py    From twitter-stock-recommendation with MIT License 6 votes vote down vote up
def _leading_trailing(a, edgeitems, index=()):
    """
    Keep only the N-D corners (leading and trailing edges) of an array.

    Should be passed a base-class ndarray, since it makes no guarantees about
    preserving subclasses.
    """
    axis = len(index)
    if axis == a.ndim:
        return a[index]

    if a.shape[axis] > 2*edgeitems:
        return concatenate((
            _leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]),
            _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])
        ), axis=axis)
    else:
        return _leading_trailing(a, edgeitems, index + np.index_exp[:]) 
Example #15
Source File: rc_dataset.py    From RC-experiments with GNU General Public License v3.0 6 votes vote down vote up
def get_next_batch(self, mode, idx):
        """
        return next batch of data samples
        """
        batch_size = self.args.batch_size
        if mode == "train":
            dataset = self.train_data
            sample_num = self.train_sample_num
        elif mode == "valid":
            dataset = self.valid_data
            sample_num = self.valid_sample_num
        else:
            dataset = self.test_data
            sample_num = self.test_sample_num
        if mode == "train":
            start = self.train_idx[idx] * batch_size
            stop = (self.train_idx[idx] + 1) * batch_size
        else:
            start = idx * batch_size
            stop = (idx + 1) * batch_size if start < sample_num and (idx + 1) * batch_size < sample_num else -1
        samples = batch_size if stop != -1 else len(dataset[0]) - start
        _slice = np.index_exp[start:stop]
        return self.next_batch_feed_dict_by_dataset(dataset, _slice, samples) 
Example #16
Source File: arrayprint.py    From pySINDy with MIT License 6 votes vote down vote up
def _leading_trailing(a, edgeitems, index=()):
    """
    Keep only the N-D corners (leading and trailing edges) of an array.

    Should be passed a base-class ndarray, since it makes no guarantees about
    preserving subclasses.
    """
    axis = len(index)
    if axis == a.ndim:
        return a[index]

    if a.shape[axis] > 2*edgeitems:
        return concatenate((
            _leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]),
            _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])
        ), axis=axis)
    else:
        return _leading_trailing(a, edgeitems, index + np.index_exp[:]) 
Example #17
Source File: canvas.py    From plat with MIT License 6 votes vote down vote up
def additive_composite(src, src_mask, dst):
    '''
    Return the additive composite of src and dst.
    '''
    out = np.empty(dst.shape, dtype = 'float')
    alpha = np.index_exp[3:, :, :]
    rgb = np.index_exp[:3, :, :]
    if src_mask is not None:
        out[alpha] = np.maximum(src_mask,dst[alpha])
    else:
        out[alpha] = 1.0
    out[rgb] = np.maximum(src[rgb],dst[rgb])
    np.clip(out,0,1.0)
    return out

# gsize = 64
# gsize2 = gsize/2 
Example #18
Source File: w_pdist.py    From westpa with MIT License 6 votes vote down vote up
def process_args(self, args):
        self.progress.process_args(args)
        self.data_reader.process_args(args)
        self.input_dssynth.h5filename = self.data_reader.we_h5filename
        self.input_dssynth.process_args(args)
        self.dsspec = self.input_dssynth.dsspec
        
        # Carrying an open HDF5 file across a fork() seems to corrupt the entire HDF5 library
        # Open the WEST HDF5 file just long enough to process our iteration range, then close
        # and reopen in go() [which executes after the fork]
        with self.data_reader:
            self.iter_range.process_args(args)
        
        self.wt_dsspec = SingleIterDSSpec(self.data_reader.we_h5filename, 'seg_index', slice=numpy.index_exp['weight'])
        
        self.binspec = args.bins
        self.output_filename = args.output
        self.ignore_out_of_range = bool(args.ignore_out_of_range)
        self.compress_output = args.compress or False 
Example #19
Source File: grad.py    From ProxImaL with MIT License 5 votes vote down vote up
def forward(self, inputs, outputs):
        """The forward operator for n-d gradients.

        Reads from inputs and writes to outputs.
        """

        if self.implementation == Impl['halide'] and \
                (len(self.shape) == 3 or len(self.shape) == 4) and self.dims == 2:
            # Halide implementation
            if len(self.shape) == 3:
                tmpin = np.asfortranarray((inputs[0][..., np.newaxis]).astype(np.float32))
            else:
                tmpin = np.asfortranarray((inputs[0]).astype(np.float32))

            Halide('A_grad.cpp').A_grad(tmpin, self.tmpfwd)  # Call
            np.copyto(outputs[0], np.reshape(self.tmpfwd, self.shape))

        else:

            # Input
            f = inputs[0]

            # Build up index for shifted array
            ss = f.shape
            stack_arr = ()
            for j in range(self.dims):

                # Add grad for this dimension (same as index)
                il = ()
                for i in range(len(ss)):
                    if i == j:
                        il += np.index_exp[np.r_[1:ss[j], ss[j] - 1]]
                    else:
                        il += np.index_exp[:]

                fgrad_j = f[il] - f
                stack_arr += (fgrad_j,)

            # Stack all grads as new dimension
            np.copyto(outputs[0], np.stack(stack_arr, axis=-1)) 
Example #20
Source File: utils.py    From ProxImaL with MIT License 5 votes vote down vote up
def psnr(x, ref, pad=None, maxval=1.0):

    # Sheck size
    if ref.shape != x.shape:
        raise Exception("Wrong size in PSNR evaluation.")

    # Remove padding if necessary
    if pad is not None:

        ss = x.shape
        il = ()
        for j in range(len(ss)):
            if len(pad) >= j + 1 and pad[j] > 0:
                currpad = pad[j]
                il += np.index_exp[currpad:-currpad]
            else:
                il += np.index_exp[:]

        mse = np.mean((x[il] - ref[il])**2)
    else:
        mse = np.mean((x - ref)**2)

    # MSE
    if mse > np.finfo(float).eps:
        return 10.0 * np.log10(maxval**2 / mse)
    else:
        return np.inf


###############################################################################
# Noise estimation
###############################################################################

# Currently only implements one method 
Example #21
Source File: transformationOperations.py    From graphAttack with MIT License 5 votes vote down vote up
def __init__(self, inputA=None, indexExp=None):

        if indexExp is None:
            raise ValueError("Must provide index Expression as numpy.index_exp!")
        self.indexExp = indexExp
        super().__init__(inputA) 
Example #22
Source File: canvas.py    From plat with MIT License 5 votes vote down vote up
def __init__(self, width, height, xmin, xmax, ymin, ymax, mask_name, image_size, do_check_bounds, init_black=False):
        self.pixels = np.zeros((channels, height, width))
        if init_black:
            alpha_channel = np.index_exp[3:, :, :]
            self.pixels[alpha_channel] = 1.0
        self.canvas_xmin = 0
        self.canvas_xmax = width
        self.canvas_ymin = 0
        self.canvas_ymax = height
        self.xmin = xmin
        self.xmax = xmax
        self.ymin = ymin
        self.ymax = ymax

        self.do_check_bounds = do_check_bounds

        self.canvas_xspread = self.canvas_xmax - self.canvas_xmin
        self.canvas_yspread = self.canvas_ymax - self.canvas_ymin
        self.xspread = self.xmax - self.xmin
        self.yspread = self.ymax - self.ymin
        self.xspread_ratio = float(self.canvas_xspread) / self.xspread
        self.yspread_ratio = float(self.canvas_yspread) / self.yspread

        self.gsize = image_size
        self.gsize2 = image_size/2
        self.gsize4 = image_size/4

        if mask_name is not None:
            _, _, mask_images = anchors_from_image("mask/{}_mask{}.png".format(mask_name, image_size), image_size=(image_size, image_size))
            # _, _, mask_images = anchors_from_image("mask/rounded_mask{}.png".format(gsize), image_size=(gsize, gsize))
            # _, _, mask_images = anchors_from_image("mask/hexagons/hex1_{}_blur.png".format(gsize), image_size=(gsize, gsize))
            self.mask = mask_images[0][0]
        else:
            self.mask = None

    # To map
    # [A, B] --> [a, b]
    # use this formula
    # (val - A)*(b-a)/(B-A) + a
    # A,B is virtual
    # a,b is canvas 
Example #23
Source File: canvas.py    From plat with MIT License 5 votes vote down vote up
def alpha_composite(src, src_mask, dst):
    '''
    Return the alpha composite of src and dst.

    Parameters:
    src -- RGBA in range 0.0 - 1.0
    dst -- RGBA in range 0.0 - 1.0

    The algorithm comes from http://en.wikipedia.org/wiki/Alpha_compositing
    '''
    out = np.empty(dst.shape, dtype = 'float')

    src_shape = src.shape
    if src_shape[1] == 1 and src_shape[2] == 1:
        return out

    alpha = np.index_exp[3:, :, :]
    rgb = np.index_exp[:3, :, :]
    epsilon = 0.001
    if src_mask is not None:
        src_a = np.maximum(src_mask, epsilon)
    else:
        src_a = 1.0
    dst_a = np.maximum(dst[alpha], epsilon)
    out[alpha] = src_a+dst_a*(1-src_a)
    old_setting = np.seterr(invalid = 'ignore')
    out[rgb] = (src[rgb]*src_a + dst[rgb]*dst_a*(1-src_a))/out[alpha]
    np.seterr(**old_setting)
    np.clip(out,0,1.0)
    return out 
Example #24
Source File: voxels_torus.py    From python3_ios with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def midpoints(x):
    sl = ()
    for i in range(x.ndim):
        x = (x[sl + np.index_exp[:-1]] + x[sl + np.index_exp[1:]]) / 2.0
        sl += np.index_exp[:]
    return x

# prepare some coordinates, and attach rgb values to each 
Example #25
Source File: voxels_rgb.py    From python3_ios with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def midpoints(x):
    sl = ()
    for i in range(x.ndim):
        x = (x[sl + np.index_exp[:-1]] + x[sl + np.index_exp[1:]]) / 2.0
        sl += np.index_exp[:]
    return x

# prepare some coordinates, and attach rgb values to each 
Example #26
Source File: data_loader.py    From RegRCNN with Apache License 2.0 5 votes vote down vote up
def __init__(self, cf, data, sample_pids_w_replace=True, max_batches=None, raise_stop_iteration=False, seed=0):
        super(BatchGenerator, self).__init__(cf, data, sample_pids_w_replace=sample_pids_w_replace,
                                             max_batches=max_batches, raise_stop_iteration=raise_stop_iteration,
                                             seed=seed)

        self.chans = cf.channels if cf.channels is not None else np.index_exp[:]
        assert hasattr(self.chans, "__iter__"), "self.chans has to be list-like to maintain dims when slicing"

        self.crop_margin = np.array(self.cf.patch_size) / 8.  # min distance of ROI center to edge of cropped_patch.
        self.p_fg = 0.5
        self.empty_samples_max_ratio = 0.6

        self.balance_target_distribution(plot=sample_pids_w_replace) 
Example #27
Source File: data_loader.py    From RegRCNN with Apache License 2.0 5 votes vote down vote up
def __init__(self, cf, data, mode='test'):
        super(PatientBatchIterator, self).__init__(cf, data)

        self.patch_size = cf.patch_size_2D + [1] if cf.dim == 2 else cf.patch_size_3D
        self.chans = cf.channels if cf.channels is not None else np.index_exp[:]
        assert hasattr(self.chans, "__iter__"), "self.chans has to be list-like to maintain dims when slicing"

        if (mode=="validation" and hasattr(self.cf, 'val_against_exact_gt') and self.cf.val_against_exact_gt) or \
                (mode == 'test' and self.cf.test_against_exact_gt):
            self.gt_prefix = 'exact_'
            print("PatientIterator: Loading exact Ground Truths.")
        else:
            self.gt_prefix = ''

        self.patient_ix = 0  # running index over all patients in set 
Example #28
Source File: data_loader.py    From RegRCNN with Apache License 2.0 5 votes vote down vote up
def __init__(self, cf, data, sample_pids_w_replace=True, max_batches=None, raise_stop_iteration=False, seed=0):
        super(BatchGenerator, self).__init__(cf, data, sample_pids_w_replace=sample_pids_w_replace,
                                             max_batches=max_batches, raise_stop_iteration=raise_stop_iteration,
                                             seed=seed)

        self.chans = cf.channels if cf.channels is not None else np.index_exp[:]
        assert hasattr(self.chans, "__iter__"), "self.chans has to be list-like to maintain dims when slicing"

        self.crop_margin = np.array(self.cf.patch_size) / 8.  # min distance of ROI center to edge of cropped_patch.
        self.p_fg = 0.5
        self.empty_samples_max_ratio = 0.6

        self.balance_target_distribution(plot=sample_pids_w_replace) 
Example #29
Source File: data_loader.py    From RegRCNN with Apache License 2.0 5 votes vote down vote up
def __init__(self, cf, data, mode='test'):
        super(PatientBatchIterator, self).__init__(cf, data)

        self.patch_size = cf.patch_size_2D + [1] if cf.dim == 2 else cf.patch_size_3D
        self.chans = cf.channels if cf.channels is not None else np.index_exp[:]
        assert hasattr(self.chans, "__iter__"), "self.chans has to be list-like to maintain dims when slicing"

        self.patient_ix = 0  # running index over all patients in set 
Example #30
Source File: axes.py    From nata with MIT License 5 votes vote down vote up
def __getitem__(
        self, key: Union[int, slice, Tuple[Union[int, slice]]]
    ) -> "Axis":
        if not is_basic_indexing(key):
            raise IndexError("Only basic indexing is supported!")

        key = np.index_exp[key]
        requires_new_axis = False

        # > determine if axis extension is required
        # 1st index (temporal slicing) not hidden if ndim == axis_dim + 1
        # or alternatively -> check len of the axis -> number of temporal slices
        if len(self) != 1:
            # revert dimensionality reduction
            if isinstance(key[0], int):
                requires_new_axis = True
        else:
            requires_new_axis = True

        data = self.data[key]

        if requires_new_axis:
            data = data[np.newaxis]

        return self.__class__(
            data, name=self.name, label=self.label, unit=self.unit,
        )