Python mxnet.nd.NDArray() Examples

The following are 20 code examples of mxnet.nd.NDArray(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mxnet.nd , or try the search function .
Example #1
Source File: dataloader.py    From gluon-cv with Apache License 2.0 6 votes vote down vote up
def default_mp_pad_batchify_fn(data):
    """Use shared memory for collating data into batch, labels are padded to same shape"""
    if isinstance(data[0], nd.NDArray):
        out = nd.empty((len(data),) + data[0].shape, dtype=data[0].dtype,
                       ctx=context.Context('cpu_shared', 0))
        return nd.stack(*data, out=out)
    elif isinstance(data[0], tuple):
        data = zip(*data)
        return [default_mp_pad_batchify_fn(i) for i in data]
    else:
        data = np.asarray(data)
        batch_size = len(data)
        pad = max([l.shape[0] for l in data] + [1,])
        buf = np.full((batch_size, pad, data[0].shape[-1]), -1, dtype=data[0].dtype)
        for i, l in enumerate(data):
            buf[i][:l.shape[0], :] = l
        return nd.array(buf, dtype=data[0].dtype, ctx=context.Context('cpu_shared', 0)) 
Example #2
Source File: parallelized_loader.py    From gluon-ts with Apache License 2.0 6 votes vote down vote up
def _as_in_context(batch: dict, ctx: mx.Context) -> DataBatch:
    """Move data into new context, should only be in main process."""
    assert (
        not MPWorkerInfo.worker_process
    ), "This function is not meant to be used in workers."
    batch = {
        k: v.as_in_context(ctx) if isinstance(v, nd.NDArray)
        # Workaround due to MXNet not being able to handle NDArrays with 0 in shape properly:
        else (
            stack(v, False, v.dtype, ctx)
            if isinstance(v[0], np.ndarray) and 0 in v[0].shape
            else v
        )
        for k, v in batch.items()
    }
    return batch 
Example #3
Source File: dataloader.py    From panoptic-fpn-gluon with Apache License 2.0 6 votes vote down vote up
def default_mp_pad_batchify_fn(data):
    """Use shared memory for collating data into batch, labels are padded to same shape"""
    if isinstance(data[0], nd.NDArray):
        out = nd.empty((len(data),) + data[0].shape, dtype=data[0].dtype,
                       ctx=context.Context('cpu_shared', 0))
        return nd.stack(*data, out=out)
    elif isinstance(data[0], tuple):
        data = zip(*data)
        return [default_mp_pad_batchify_fn(i) for i in data]
    else:
        data = np.asarray(data)
        batch_size = len(data)
        pad = max([l.shape[0] for l in data] + [1,])
        buf = np.full((batch_size, pad, data[0].shape[-1]), -1, dtype=data[0].dtype)
        for i, l in enumerate(data):
            buf[i][:l.shape[0], :] = l
        return nd.array(buf, dtype=data[0].dtype, ctx=context.Context('cpu_shared', 0)) 
Example #4
Source File: parallelized_loader.py    From gluon-ts with Apache License 2.0 6 votes vote down vote up
def _pad_arrays(
    data: List[Union[np.ndarray, mx.nd.NDArray]], axis: int = 0,
) -> List[Union[np.ndarray, mx.nd.NDArray]]:
    assert isinstance(data[0], (np.ndarray, mx.nd.NDArray))
    is_mx = isinstance(data[0], mx.nd.NDArray)

    # MxNet causes a segfault when persisting 0-length arrays. As such,
    # we add a dummy pad of length 1 to 0-length dims.
    max_len = max(1, functools.reduce(max, (x.shape[axis] for x in data)))
    padded_data = []

    for x in data:
        # MxNet lacks the functionality to pad n-D arrays consistently.
        # We fall back to numpy if x is an mx.nd.NDArray.
        if is_mx:
            x = x.asnumpy()

        pad_size = max_len - x.shape[axis]
        pad_lengths = [(0, 0)] * x.ndim
        pad_lengths[axis] = (0, pad_size)
        x_padded = np.pad(x, mode="constant", pad_width=pad_lengths)

        padded_data.append(x_padded if not is_mx else mx.nd.array(x_padded))

    return padded_data 
Example #5
Source File: dataloader.py    From cascade_rcnn_gluon with Apache License 2.0 6 votes vote down vote up
def default_mp_pad_batchify_fn(data):
    """Use shared memory for collating data into batch, labels are padded to same shape"""
    if isinstance(data[0], nd.NDArray):
        out = nd.empty((len(data),) + data[0].shape, dtype=data[0].dtype,
                       ctx=context.Context('cpu_shared', 0))
        return nd.stack(*data, out=out)
    elif isinstance(data[0], tuple):
        data = zip(*data)
        return [default_mp_pad_batchify_fn(i) for i in data]
    else:
        data = np.asarray(data)
        batch_size = len(data)
        pad = max([l.shape[0] for l in data] + [1,])
        buf = np.full((batch_size, pad, data[0].shape[-1]), -1, dtype=data[0].dtype)
        for i, l in enumerate(data):
            buf[i][:l.shape[0], :] = l
        return nd.array(buf, dtype=data[0].dtype, ctx=context.Context('cpu_shared', 0)) 
Example #6
Source File: dataloader.py    From cascade_rcnn_gluon with Apache License 2.0 5 votes vote down vote up
def default_pad_batchify_fn(data):
    """Collate data into batch, labels are padded to same shape"""
    if isinstance(data[0], nd.NDArray):
        return nd.stack(*data)
    elif isinstance(data[0], tuple):
        data = zip(*data)
        return [default_pad_batchify_fn(i) for i in data]
    else:
        data = np.asarray(data)
        pad = max([l.shape[0] for l in data] + [1,])
        buf = np.full((len(data), pad, data[0].shape[-1]), -1, dtype=data[0].dtype)
        for i, l in enumerate(data):
            buf[i][:l.shape[0], :] = l
        return nd.array(buf, dtype=data[0].dtype) 
Example #7
Source File: utils.py    From autogluon with Apache License 2.0 5 votes vote down vote up
def smooth(label, classes, eta=0.1):
    if isinstance(label, nd.NDArray):
        label = [label]
    smoothed = [
        l.one_hot(classes, on_value=1 - eta + eta / classes, off_value=eta / classes)
        for l in label
    ]
    return smoothed 
Example #8
Source File: utils.py    From autogluon with Apache License 2.0 5 votes vote down vote up
def mixup_transform(label, classes, lam=1, eta=0.0):
    if isinstance(label, nd.NDArray):
        label = [label]
    res = []
    for l in label:
        y1 = l.one_hot(classes, on_value=1 - eta + eta / classes, off_value=eta / classes)
        y2 = l[::-1].one_hot(classes, on_value=1 - eta + eta / classes, off_value=eta / classes)
        res.append(lam * y1 + (1 - lam) * y2)
    return res 
Example #9
Source File: vector_embedder.py    From coach with Apache License 2.0 5 votes vote down vote up
def hybrid_forward(self, F: ModuleType, x: nd_sym_type, *args, **kwargs) -> nd_sym_type:
        """
        Used for forward pass through embedder network.

        :param F: backend api, either `nd` or `sym` (if block has been hybridized).
        :type F: nd or sym
        :param x: vector representing environment state, of shape (batch_size, in_channels).
        :return: embedding of environment state, of shape (batch_size, channels).
        """
        if isinstance(x, nd.NDArray) and len(x.shape) != 2 and self.scheme != EmbedderScheme.Empty:
            raise ValueError("Vector embedders expect the input size to have 2 dimensions. The given size is: {}"
                             .format(x.shape))
        return super(VectorEmbedder, self).hybrid_forward(F, x, *args, **kwargs) 
Example #10
Source File: parallelized_loader.py    From gluon-ts with Apache License 2.0 5 votes vote down vote up
def _is_stackable(
    arrays: List[Union[np.ndarray, mx.nd.NDArray, Any]], axis: int = 0,
) -> bool:
    """
    Check if elements are scalars, have too few dimensions, or their
    target axes have equal length; i.e. they are directly `stack` able.
    """
    if isinstance(arrays[0], (mx.nd.NDArray, np.ndarray)):
        s = set(arr.shape[axis] for arr in arrays)
        return len(s) <= 1 and arrays[0].shape[axis] != 0
    return True 
Example #11
Source File: parallelized_loader.py    From gluon-ts with Apache License 2.0 5 votes vote down vote up
def rebuild_ndarray(pid, fd, shape, dtype):
        """Rebuild ndarray from pickled shared memory"""
        # pylint: disable=no-value-for-parameter
        fd = fd.detach()
        return nd.NDArray(
            nd.ndarray._new_from_shared_mem(pid, fd, shape, dtype)
        ) 
Example #12
Source File: parallelized_loader.py    From gluon-ts with Apache License 2.0 5 votes vote down vote up
def rebuild_ndarray(*args):
        """Rebuild ndarray from pickled shared memory"""
        # pylint: disable=no-value-for-parameter
        return nd.NDArray(nd.ndarray._new_from_shared_mem(*args)) 
Example #13
Source File: dataloader.py    From dgl with Apache License 2.0 5 votes vote down vote up
def dgl_mp_batchify_fn(data):
    if isinstance(data[0], tuple):
        data = zip(*data)
        return [dgl_mp_batchify_fn(i) for i in data]
    
    for dt in data:
        if dt is not None:
            if isinstance(dt, dgl.DGLGraph):
                return [d for d in data if isinstance(d, dgl.DGLGraph)]
            elif isinstance(dt, nd.NDArray):
                pad = Pad(axis=(1, 2), num_shards=1, ret_length=False)
                data_list = [dt for dt in data if dt is not None]
                return pad(data_list) 
Example #14
Source File: train_imagenet.py    From MXNet-Deep-Learning-in-Action with Apache License 2.0 5 votes vote down vote up
def smooth(label, classes, eta=0.1):
    if isinstance(label, nd.NDArray):
        label = [label]
    smoothed = []
    for l in label:
        res = l.one_hot(classes, on_value = 1 - eta + eta/classes, off_value = eta/classes)
        smoothed.append(res)
    return smoothed 
Example #15
Source File: train_imagenet.py    From MXNet-Deep-Learning-in-Action with Apache License 2.0 5 votes vote down vote up
def mixup_transform(label, classes, lam=1, eta=0.0):
    if isinstance(label, nd.NDArray):
        label = [label]
    res = []
    for l in label:
        y1 = l.one_hot(classes, on_value = 1 - eta + eta/classes, off_value = eta/classes)
        y2 = l[::-1].one_hot(classes, on_value = 1 - eta + eta/classes, off_value = eta/classes)
        res.append(lam*y1 + (1-lam)*y2)
    return res 
Example #16
Source File: model.py    From LSTNet-Gluon with Apache License 2.0 5 votes vote down vote up
def forward(self, x):
        """
        :param nd.NDArray x: input data in NTC layout (N: batch-size, T: sequence len, C: channels)
        :return: output of LSTNet in NC layout
        :rtype nd.NDArray
        """
        # Convolution
        c = self.conv(x.transpose((0, 2, 1)))  # Transpose NTC to to NCT (a.k.a NCW) before convolution
        c = self.dropout(c)

        # GRU
        r = self.gru(c.transpose((2, 0, 1)))  # Transpose NCT to TNC before GRU
        r = r[-1]  # Only keep the last output
        r = self.dropout(r)  # Now in NC layout

        # Skip GRU
        # Slice off multiples of skip from convolution output
        skip_c = c[:, :, -(c.shape[2] // self.skip) * self.skip:]
        skip_c = skip_c.reshape(c.shape[0], c.shape[1], -1, self.skip)  # Reshape to NCT x skip
        skip_c = skip_c.transpose((2, 0, 3, 1))  # Transpose to T x N x skip x C
        skip_c = skip_c.reshape(skip_c.shape[0], -1, skip_c.shape[3])  # Reshape to Tx (Nxskip) x C
        s = self.skip_gru(skip_c)
        s = s[-1]  # Only keep the last output (now in (Nxskip) x C layout)
        s = s.reshape(x.shape[0], -1)  # Now in N x (skipxC) layout

        # FC layer
        fc = self.fc(nd.concat(r, s))  # NC layout

        # Autoregressive highway
        ar_x = x[:, -self.ar_window:, :]  # NTC layout
        ar_x = ar_x.transpose((0, 2, 1))  # NCT layout
        ar_x = ar_x.reshape(-1, ar_x.shape[2])  # (NC) x T layout
        ar = self.ar_fc(ar_x)
        ar = ar.reshape(x.shape[0], -1)  # NC layout

        # Add autoregressive and fc outputs
        res = fc + ar
        return res 
Example #17
Source File: dataloader.py    From panoptic-fpn-gluon with Apache License 2.0 5 votes vote down vote up
def default_pad_batchify_fn(data):
    """Collate data into batch, labels are padded to same shape"""
    if isinstance(data[0], nd.NDArray):
        return nd.stack(*data)
    elif isinstance(data[0], tuple):
        data = zip(*data)
        return [default_pad_batchify_fn(i) for i in data]
    else:
        data = np.asarray(data)
        pad = max([l.shape[0] for l in data] + [1,])
        buf = np.full((len(data), pad, data[0].shape[-1]), -1, dtype=data[0].dtype)
        for i, l in enumerate(data):
            buf[i][:l.shape[0], :] = l
        return nd.array(buf, dtype=data[0].dtype) 
Example #18
Source File: dataloader.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def tsn_mp_batchify_fn(data):
    """Collate data into batch. Use shared memory for stacking.
    Modify default batchify function for temporal segment networks.
    Change `nd.stack` to `nd.concat` since batch dimension already exists.
    """
    if isinstance(data[0], nd.NDArray):
        return nd.concat(*data, dim=0)
    elif isinstance(data[0], tuple):
        data = zip(*data)
        return [tsn_mp_batchify_fn(i) for i in data]
    else:
        data = np.asarray(data)
        return nd.array(data, dtype=data.dtype,
                        ctx=context.Context('cpu_shared', 0)) 
Example #19
Source File: dataloader.py    From gluon-cv with Apache License 2.0 5 votes vote down vote up
def default_pad_batchify_fn(data):
    """Collate data into batch, labels are padded to same shape"""
    if isinstance(data[0], nd.NDArray):
        return nd.stack(*data)
    elif isinstance(data[0], tuple):
        data = zip(*data)
        return [default_pad_batchify_fn(i) for i in data]
    else:
        data = np.asarray(data)
        pad = max([l.shape[0] for l in data] + [1,])
        buf = np.full((len(data), pad, data[0].shape[-1]), -1, dtype=data[0].dtype)
        for i, l in enumerate(data):
            buf[i][:l.shape[0], :] = l
        return nd.array(buf, dtype=data[0].dtype) 
Example #20
Source File: utils.py    From YOLO with MIT License 5 votes vote down vote up
def deal_output(y: nd.NDArray, s, b, c):
    """

    :param y:
    :param s:
    :param b:
    :param c:
    :return:
    """
    label = y[:, 0:s * s * c]
    preds = y[:, s * s * c: s * s * c + s * s * b]
    location = y[:, s * s * c + s * s * b:]
    label = nd.reshape(label, shape=(-1, s * s, c))
    location = nd.reshape(location, shape=(-1, s * s, b, 4))
    return label, preds, location