Python chainer.backends() Examples

The following are 30 code examples of chainer.backends(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module chainer , or try the search function .
Example #1
Source File: test_convolution_2d.py    From chainer with MIT License 6 votes vote down vote up
def forward_expected(self, inputs):
        """
        Current forward_expected implementation depends on
        F.convolution_2d itself and thus it's only capable
        of checking consistency between backends, not absolute
        correctness of computations
        """
        if self.nobias:
            x, W = inputs
            b = None
        else:
            x, W, b = inputs
        with chainer.using_config('use_ideep', 'never'):
            y_expected = F.convolution_2d(
                x, W, b, stride=self.stride, pad=self.pad,
                cover_all=self.cover_all, dilate=self.dilate,
                groups=self.groups)
        if self.old_numpy_fp16:
            return y_expected.array*0,
        return y_expected.array, 
Example #2
Source File: test_convolution_nd.py    From chainer with MIT License 6 votes vote down vote up
def forward_expected(self, inputs):
        """
        Current forward_expected implementation depends on
        F.convolution_nd itself and thus it's only capable
        of checking consistency between backends, not absolute
        correctness of computations
        """
        if self.nobias:
            x, W = inputs
            b = None
        else:
            x, W, b = inputs
        y_expected = F.convolution_nd(
            x, W, b, stride=self.stride, pad=self.pad,
            cover_all=self.cover_all, dilate=self.dilate,
            groups=self.groups)
        return y_expected.array, 
Example #3
Source File: test_deconvolution_nd.py    From chainer with MIT License 6 votes vote down vote up
def forward_expected(self, inputs):
        """
        Current forward_expected implementation depends on
        F.deconvolution_nd itself and thus it's only capable
        of checking consistency between backends, not absolute
        correctness of computations
        """
        if self.nobias:
            x, W = inputs
            b = None
        else:
            x, W, b = inputs
        y_expected = F.deconvolution_nd(
            x, W, b, stride=self.stride, pad=self.pad,
            outsize=self.outsize, dilate=self.dilate,
            groups=self.groups)
        return y_expected.array, 
Example #4
Source File: test_deconvolution_2d.py    From chainer with MIT License 6 votes vote down vote up
def forward_expected(self, inputs):
        """
        Current forward_expected implementation depends on
        F.deconvolution_2d itself and thus it's only capable
        of checking consistency between backends, not absolute
        correctness of computations
        """
        if self.nobias:
            x, W = inputs
            b = None
        else:
            x, W, b = inputs
        y_expected = F.deconvolution_2d(
            x, W, b, stride=self.stride, pad=self.pad,
            outsize=self.outsize, dilate=self.dilate,
            groups=self.groups)
        return y_expected.array, 
Example #5
Source File: test_multi_node_chain_list.py    From chainer with MIT License 6 votes vote down vote up
def check_crossing_model(gpu, param):
    communicator, rank_next, rank_prev = create_communicator(gpu)

    n, d = 100, 10
    X = np.random.randn(n, d).astype(param.dtype)
    Y = (np.random.rand(n) * 2).astype(np.int32)

    with chainer.using_config('dtype', param.dtype):
        if communicator.rank == 0:
            model = L.Classifier(Cross0(
                d, communicator, rank_next, rank_prev))
        else:
            model = L.Classifier(Cross1(
                d, communicator, rank_next, rank_prev))

        if gpu:
            model.to_device(cupy.cuda.Device())
            X = chainer.backends.cuda.to_gpu(X)
            Y = chainer.backends.cuda.to_gpu(Y)

        for i in range(n):
            err = model(X[i:i + 1], Y[i:i + 1])
            err.backward() 
Example #6
Source File: cuda.py    From chainer with MIT License 6 votes vote down vote up
def reduce(in_params, out_params, map_expr, reduce_expr, post_map_expr,
           identity, name, **kwargs):
    """Creates a global reduction kernel function.

    This function uses :func:`~chainer.backends.cuda.memoize` to cache the
    resulting kernel object, i.e. the resulting kernel object is cached for
    each argument combination and CUDA device.

    The arguments are the same as those for
    :class:`cupy.ReductionKernel`, except that the ``name`` argument is
    mandatory.

    """
    check_cuda_available()
    return cupy.ReductionKernel(
        in_params, out_params, map_expr, reduce_expr, post_map_expr,
        identity, name, **kwargs) 
Example #7
Source File: test_n_step_rnn.py    From chainer with MIT License 6 votes vote down vote up
def setup_communicator(gpu):
    if gpu:
        communicator = chainermn.create_communicator('flat')
        chainer.backends.cuda.get_device_from_id(
            communicator.intra_rank).use()
    else:
        communicator = chainermn.create_communicator('naive')

    if communicator.size < 2:
        pytest.skip('This test is for multinode only')

    rank_next = communicator.rank + 1
    rank_prev = communicator.rank - 1

    if rank_prev < 0:
        rank_prev = None

    if rank_next >= communicator.size:
        rank_next = None

    return communicator, rank_prev, rank_next 
Example #8
Source File: cuda.py    From chainer with MIT License 5 votes vote down vote up
def should_use_cudnn(level, lowest_version=0):
    """Determines if we should use cuDNN.

    This function checks ``chainer.config.use_cudnn``,
    ``chainer.backends.cuda.cudnn_enabled``, and the cuDNN version. Note that
    ``cudnn_enabled`` flag is fixed at loading of :mod:`chainer` module.

    Args:
        level (str): cuDNN use level. It must be either ``'==always'`` or
            ``'>=auto'``. ``'==always'`` indicates that the ``use_cudnn``
            config must be ``'always'`` to use cuDNN.
        lowest_version (int): Required lowest cuDNN version. It must be
            non-negative.

    Returns:
        bool: ``True`` if the caller should use cuDNN.

    """
    if _cudnn_version < lowest_version:
        return False

    if level not in _SHOULD_USE_CUDNN:
        raise ValueError('invalid cuDNN use level: %s '
                         '(must be either of "==always" or ">=auto")' %
                         repr(level))
    flags = _SHOULD_USE_CUDNN[level]

    use_cudnn = config.use_cudnn
    if use_cudnn not in flags:
        raise ValueError('invalid use_cudnn configuration: %s '
                         '(must be either of "always", "auto", or "never")' %
                         repr(use_cudnn))
    return flags[use_cudnn] 
Example #9
Source File: test_multi_node_chain_list.py    From chainer with MIT License 5 votes vote down vote up
def check_tuple_data_model(gpu, param):
    # This test only uses pairs (0, 1), (2, 3), ... (2m, 2m+1)
    communicator, rank_next, rank_prev = create_communicator(gpu)

    n, d = 100, 10
    X = np.random.randn(n, d).astype(param.dtype)
    Y = (np.random.rand(n) * 2).astype(np.int32)

    with chainer.using_config('dtype', param.dtype):
        if communicator.rank % 2 == 0:
            if communicator.rank == communicator.size - 1:
                # in case 2m is the right end with odd number of nodes
                return
            model = L.Classifier(
                TupleDataParent(communicator, d, rank_next))
        elif communicator.rank % 2 == 1:
            model = TupleDataChild(communicator, d, rank_prev)

        assert model is not None
        if gpu:
            model.to_device(cupy.cuda.Device())
            X = chainer.backends.cuda.to_gpu(X)
            Y = chainer.backends.cuda.to_gpu(Y)

        for i in range(n):
            if communicator.rank % 2 == 0:
                err = model(X[i:i + 1], Y[i:i + 1])
            elif communicator.rank % 2 == 1:
                err = model()
            assert err is not None
            err.backward() 
Example #10
Source File: device_resident.py    From chainer with MIT License 5 votes vote down vote up
def to_gpu(
            self,
            device: tp.Optional[types.CudaDeviceSpec] = None,
    ) -> 'DeviceResident':
        """Copies parameter variables and persistent values to GPU.

         .. deprecated:: v7.0.0
            Use :meth:`to_device` instead.

        This method does not handle non-registered attributes. If some of such
        attributes must be copied to GPU, the link implementation must
        override :meth:`~DeviceResident.device_resident_accept` to do so.

        .. warning::

            This method does not transfer the parameters if they are already on
            GPU. Use ``to_device`` to perform inter-GPU transfer.

        Args:
            device: Target device specifier. If omitted, the current device is
                used.

        Returns: self

        """
        cuda.check_cuda_available()
        cuda_device = cuda._get_device_or_current(device)
        device = chainer.backends.cuda.GpuDevice(cuda_device)
        visitor = _ToDeviceVisitor(
            device,
            entry_method_info=('to_gpu', {'device': device.device}),
            skip_between_cupy_devices=True,
            starting_device_resident=self)
        self.__to_device(visitor)
        return self 
Example #11
Source File: cuda.py    From chainer with MIT License 5 votes vote down vote up
def get_device(*args):
    """Gets the device from a device object, an ID integer or an array object.

    .. note::

        This API is deprecated since v3.0.0. Please use
        :func:`~chainer.backends.cuda.get_device_from_id`
        or :func:`~chainer.backends.cuda.get_device_from_array` instead.

    This is a convenient utility to select a correct device if the type of
    ``arg`` is unknown (i.e., one can use this function on arrays that may be
    on CPU or GPU). The returned device object supports the context management
    protocol of Python for the *with* statement.

    Args:
        args: Values to specify a GPU device. The first device object, integer
            or :class:`cupy.ndarray` object is used to select a device.
            If it is a device object, it is returned. If it is an integer,
            the corresponding device is returned. If it is a CuPy array,
            the device on which this array reside is returned. If any
            arguments are neither integers nor CuPy arrays, a dummy device
            object representing CPU is returned.

    Returns:
        Device object specified by given ``args``.

    .. seealso::
       See :class:`cupy.cuda.Device` for the device selection not by arrays.

    """
    warnings.warn('get_device is deprecated. Please use get_device_from_id or'
                  ' get_device_from_array instead.', DeprecationWarning)
    return _get_cuda_device(*args) 
Example #12
Source File: cuda.py    From chainer with MIT License 5 votes vote down vote up
def to_gpu(array, device=None, stream=None):
    """Copies the given CPU array to the specified device.

    Args:
        array (*array*, None, list or tuple):
            Array or arrays to be sent to GPU.
        device: CUDA device specifier. If ``None`` or :data:`cuda.DummyDevice`,
            the arrays will be copied to the current CUDA device.
        stream (~cupy.cuda.Stream): *(deprecated since v3.0.0)*
            CUDA stream. If not ``None``, the copy runs asynchronously.

    Returns:
        cupy.ndarray, list or tuple: Array or arrays on GPU.

        If some of the arrays are already on GPU, then this function just
        returns those arrays without performing any copy.

        If input arrays include `None`, it is returned as `None` as is.

    """
    if stream is not None:
        warnings.warn(
            'The stream option is deprecated in chainer.backends.cuda.to_gpu. '
            'Please remove it.', DeprecationWarning)

    check_cuda_available()
    if device is DummyDevice:
        device = cuda.Device()
    else:
        device = _get_device_or_current(device)

    return _backend._convert_arrays(
        array, lambda arr: _array_to_gpu(arr, device, stream)) 
Example #13
Source File: cuda.py    From chainer with MIT License 5 votes vote down vote up
def clear_memo():
    """Clears the memoized results for all functions decorated by memoize.

    This function works like :func:`cupy.clear_memo` as a counterpart for
    :func:`chainer.backends.cuda.memoize`. It can be used even if CUDA is
    not available. In such a case, this function does nothing.

    """
    if available:
        cupy.clear_memo()


# ------------------------------------------------------------------------------
# Kernel definition utility
# ------------------------------------------------------------------------------ 
Example #14
Source File: cuda.py    From chainer with MIT License 5 votes vote down vote up
def elementwise(in_params, out_params, operation, name, **kwargs):
    """Creates an elementwise kernel function.

    This function uses :func:`~chainer.backends.cuda.memoize` to cache the
    kernel object, i.e. the resulting kernel object is cached for each argument
    combination and CUDA device.

    The arguments are the same as those for
    :class:`cupy.ElementwiseKernel`, except that the ``name`` argument is
    mandatory.

    """
    check_cuda_available()
    return cupy.ElementwiseKernel(
        in_params, out_params, operation, name, **kwargs) 
Example #15
Source File: generate.py    From graph-nvp with MIT License 5 votes vote down vote up
def generate_mols(model, temp=0.7, z_mu=None, batch_size=20, true_adj=None, gpu=-1):
    """

    :param model: GraphNVP model
    :param z_mu: latent vector of a molecule
    :param batch_size:
    :param true_adj:
    :param gpu:
    :return:
    """
    xp = np
    if gpu >= 0:
        xp = chainer.backends.cuda.cupy

    z_dim = model.adj_size + model.x_size
    mu = xp.zeros([z_dim], dtype=xp.float32)
    sigma_diag = xp.ones([z_dim])

    if model.hyperparams.learn_dist:
        sigma_diag = xp.sqrt(xp.exp(model.ln_var.data)) * sigma_diag
        # sigma_diag = xp.exp(xp.hstack((model.ln_var_x.data, model.ln_var_adj.data)))

    sigma = temp * sigma_diag

    with chainer.no_backprop_mode():
        if z_mu is not None:
            mu = z_mu
            sigma = 0.01 * xp.eye(z_dim, dtype=xp.float32)
        z = xp.random.normal(mu, sigma, (batch_size, z_dim)).astype(xp.float32)
        adj, x = model.reverse(z, true_adj=true_adj)
    return adj, x 
Example #16
Source File: faster_rcnn_train_chain.py    From chainercv with MIT License 5 votes vote down vote up
def _fast_rcnn_loc_loss(pred_loc, gt_loc, gt_label, sigma):
    xp = chainer.backends.cuda.get_array_module(pred_loc)

    in_weight = xp.zeros_like(gt_loc)
    # Localization loss is calculated only for positive rois.
    in_weight[gt_label > 0] = 1
    loc_loss = _smooth_l1_loss(pred_loc, gt_loc, in_weight, sigma)
    # Normalize by total number of negtive and positive rois.
    loc_loss /= xp.sum(gt_label >= 0)
    return loc_loss 
Example #17
Source File: feature_predictor.py    From chainercv with MIT License 5 votes vote down vote up
def _average_crops(self, y, n_crop):
        if y.ndim == 4:
            warnings.warn(
                'Four dimensional features are averaged. '
                'If these are batch of 2D spatial features, '
                'their spatial information would be lost.')

        xp = chainer.backends.cuda.get_array_module(y)
        y = y.reshape((-1, n_crop) + y.shape[1:])
        y = xp.mean(y, axis=1)
        return y 
Example #18
Source File: faster_rcnn.py    From chainercv with MIT License 5 votes vote down vote up
def _list_to_flat(array_list):
    xp = chainer.backends.cuda.get_array_module(array_list[0])

    indices = xp.concatenate(
        [i * xp.ones((len(array),), dtype=np.int32) for
         i, array in enumerate(array_list)], axis=0)
    flat = xp.concatenate(array_list, axis=0)
    return flat, indices 
Example #19
Source File: mask_head.py    From chainercv with MIT License 5 votes vote down vote up
def decode(self, segms, bboxes, labels, sizes):
        """Decodes back to masks.

        Args:
            segms (iterable of arrays): An iterable of arrays of
                shape :math:`(R_n, n\_class, M, M)`.
            bboxes (iterable of arrays): An iterable of arrays of
                shape :math:`(R_n, 4)`.
            labels (iterable of arrays): An iterable of arrays of
                shape :math:`(R_n,)`.
            sizes (list of tuples of two ints): A list of
                :math:`(H_n, W_n)`, where :math:`H_n` and :math:`W_n`
                are height and width of the :math:`n`-th image.

        Returns:
            list of arrays:
            This list contains instance segmentation for each image
            in the batch.
            More precisely, this is a list of boolean arrays of shape
            :math:`(R'_n, H_n, W_n)`, where :math:`R'_n` is the number of
            bounding boxes in the :math:`n`-th image.
        """

        xp = chainer.backends.cuda.get_array_module(*segms)
        if xp != np:
            raise ValueError(
                'MaskHead.decode only supports numpy inputs for now.')
        masks = []
        for bbox, segm, label, size in zip(
                bboxes, segms, labels, sizes):
            if len(segm) > 0:
                masks.append(
                    segm_to_mask(segm[np.arange(len(label)), label + 1],
                                 bbox, size))
            else:
                masks.append(np.zeros((0,) + size, dtype=np.bool))
        return masks 
Example #20
Source File: faster_rcnn.py    From chainer-compiler with MIT License 5 votes vote down vote up
def _list_to_flat(array_list):
    xp = chainer.backends.cuda.get_array_module(array_list[0])

    indices = xp.concatenate(
        [i * xp.ones((len(array),), dtype=np.int32) for
         i, array in enumerate(array_list)], axis=0)
    flat = xp.concatenate(array_list, axis=0)
    return flat, indices 
Example #21
Source File: test_multi_node_chain_list.py    From chainer with MIT License 5 votes vote down vote up
def check_twisting_model(gpu, param):
    communicator, rank_next, rank_prev = create_communicator(gpu)

    n, d = 100, 10
    X = np.random.randn(n, d).astype(param.dtype)
    Y = (np.random.rand(n) * 2).astype(np.int32)

    with chainer.using_config('dtype', param.dtype):
        if communicator.rank == 0:
            model = L.Classifier(
                TwistFirst(d, communicator, rank_next))
        elif communicator.rank == communicator.size - 1:
            model = L.Classifier(
                TwistLast(d, communicator, rank_prev))
        else:
            model = L.Classifier(Twist(
                d, communicator, rank_prev, rank_next))

        if gpu:
            model.to_device(cupy.cuda.Device())
            X = chainer.backends.cuda.to_gpu(X)
            Y = chainer.backends.cuda.to_gpu(Y)

        for i in range(n):
            err = model(X[i:i + 1], Y[i:i + 1])
            err.backward() 
Example #22
Source File: test_multi_node_chain_list.py    From chainer with MIT License 5 votes vote down vote up
def check_branching_model(gpu, communicator, rank_next, rank_prev,
                          parent_model, param):
    n, d = 100, 10
    X = np.random.randn(n, d).astype(param.dtype)
    Y = (np.random.rand(n) * 2).astype(np.int32)

    with chainer.using_config('dtype', param.dtype):
        if communicator.rank == 0:
            rank_children = [rank for rank in range(1, communicator.size)]
            model = L.Classifier(parent_model(
                d, communicator, rank_children))
            if gpu:
                model.to_device(cupy.cuda.Device())
                X = chainer.backends.cuda.to_gpu(X)
                Y = chainer.backends.cuda.to_gpu(Y)

            for i in range(n):
                err = model(X[i:i + 1], Y[i:i + 1])
                err.backward()
        else:
            model = BranchChild(d, communicator, 0)
            if gpu:
                model.to_device(cupy.cuda.Device())

            for i in range(n):
                err = model()
                err.backward() 
Example #23
Source File: test_multi_node_chain_list.py    From chainer with MIT License 5 votes vote down vote up
def create_communicator(gpu):
    if gpu:
        communicator = chainermn.create_communicator('flat')
        chainer.backends.cuda.get_device_from_id(communicator.intra_rank).use()
    else:
        communicator = chainermn.create_communicator('naive')

    if communicator.size < 2:
        pytest.skip('This test is for multinode only')

    rank_next = (communicator.rank + 1) % communicator.size
    rank_prev = (communicator.rank - 1) % communicator.size
    return communicator, rank_next, rank_prev 
Example #24
Source File: test_n_step_rnn.py    From chainer with MIT License 5 votes vote down vote up
def check_heterogeneous_rnn(gpu, dtype):
    communicator, rank_prev, rank_next = setup_communicator(gpu)

    with chainer.using_config('dtype', dtype):
        n, n_vocab, l = 100, 8, 10
        # Number of model parameters are different among processes.
        n_hid = (communicator.rank + 1) * 10

        X = [np.random.randint(
            0, n_vocab, size=np.random.randint(l // 2, l + 1),
            dtype=np.int32)
            for _ in range(n)]
        Y = (np.random.rand(n) * 2).astype(dtype)
        model = Model(
            n_vocab, n_hid, communicator, rank_next,
            rank_prev)

        if gpu:
            model.to_device(cupy.cuda.Device())
            X = [chainer.backends.cuda.to_gpu(x) for x in X]
            Y = chainer.backends.cuda.to_gpu(Y)

        for i in range(n):
            err = model(X[i:i + 1], Y[i:i + 1])
            err.backward()

        # Check if backprop finishes without deadlock.
        assert True 
Example #25
Source File: test_n_step_rnn.py    From chainer with MIT License 5 votes vote down vote up
def check_homogeneous_rnn(gpu, dtype):
    communicator, rank_prev, rank_next = setup_communicator(gpu=gpu)

    n, n_vocab, l = 100, 8, 10
    # Number of model parameters are same among processes.
    n_hid = 2
    with chainer.using_config('dtype', dtype):
        X = [np.random.randint(
            0, n_vocab, size=np.random.randint(l // 2, l + 1),
            dtype=np.int32)
            for _ in range(n)]
        Y = (np.random.rand(n) * 2).astype(dtype)
        model = Model(
            n_vocab, n_hid, communicator, rank_next,
            rank_prev)

        if gpu:
            model.to_device(cupy.cuda.Device())
            X = [chainer.backends.cuda.to_gpu(x) for x in X]
            Y = chainer.backends.cuda.to_gpu(Y)

        for i in range(n):
            err = model(X[i:i + 1], Y[i:i + 1])
            err.backward()

        # Check if backprop finishes without deadlock.
        assert True 
Example #26
Source File: test_batch_normalization.py    From chainer with MIT License 5 votes vote down vote up
def test_invalid(self):
        eps = -0.1
        if chainer.backends.cuda.libcudnn.get_build_version() < 7500:
            eps = 2e-6
        with self.assertRaises(RuntimeError):
            functions.fixed_batch_normalization(*self.args, eps=eps) 
Example #27
Source File: test_dropout.py    From chainer with MIT License 5 votes vote down vote up
def test_call_cudnn_backward(self):
        with chainer.using_config('use_cudnn', self.use_cudnn):
            y = self.forward()
            y.grad = self.gy
            with testing.patch(
                    'chainer.backends.cuda.get_cudnn_dropout_states') as func:
                y.backward()
                assert func.called == (self.use_cudnn == 'always') 
Example #28
Source File: test_dropout.py    From chainer with MIT License 5 votes vote down vote up
def test_call_cudnn_forward(self):
        with chainer.using_config('use_cudnn', self.use_cudnn):
            with testing.patch(
                    'chainer.backends.cuda.get_cudnn_dropout_states') as func:
                self.forward()
                assert func.called == (self.use_cudnn == 'always') 
Example #29
Source File: test_copy.py    From chainer with MIT License 5 votes vote down vote up
def test_forward_str(self, src_backend_config, dst_backend_config):
        assert dst_backend_config.xp is not chainerx
        src_device = src_backend_config.device
        dst_device = dst_backend_config.device
        if dst_device.xp is numpy:
            dst_device_spec = '@numpy'
        elif dst_device.xp is chainer.backends.cuda.cupy:
            dst_device_spec = '@cupy:{}'.format(dst_device.device.id)
        else:
            assert False, dst_device

        self.check_forward(
            dst_device_spec,
            src_device,
            dst_device) 
Example #30
Source File: test_copy.py    From chainer with MIT License 5 votes vote down vote up
def test_forward_int(self, src_backend_config, dst_backend_config):
        assert dst_backend_config.xp is not chainerx
        src_device = src_backend_config.device
        dst_device = dst_backend_config.device
        if dst_device.xp is numpy:
            dst_device_spec = -1
        elif dst_device.xp is chainer.backends.cuda.cupy:
            dst_device_spec = dst_device.device.id
        else:
            assert False, dst_device

        self.check_forward(
            dst_device_spec,
            src_device,
            dst_device)