Python numpy.take_along_axis() Examples

The following are 30 code examples for showing how to use numpy.take_along_axis(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: mars   Author: mars-project   File: test_base_execute.py    License: Apache License 2.0 6 votes vote down vote up
def testSortIndicesExecution(self):
        # only 1 chunk when axis = -1
        raw = np.random.rand(100, 10)
        x = tensor(raw, chunk_size=10)

        r = sort(x, return_index=True)

        sr, si = self.executor.execute_tensors(r)
        np.testing.assert_array_equal(sr, np.take_along_axis(raw, si, axis=-1))

        x = tensor(raw, chunk_size=(22, 4))

        r = sort(x, return_index=True)

        sr, si = self.executor.execute_tensors(r)
        np.testing.assert_array_equal(sr, np.take_along_axis(raw, si, axis=-1))

        raw = np.random.rand(100)

        x = tensor(raw, chunk_size=23)

        r = sort(x, axis=0, return_index=True)

        sr, si = self.executor.execute_tensors(r)
        np.testing.assert_array_equal(sr, raw[si]) 
Example 2
Project: mars   Author: mars-project   File: test_base_execute.py    License: Apache License 2.0 6 votes vote down vote up
def testArgsort(self):
        # only 1 chunk when axis = -1
        raw = np.random.rand(100, 10)
        x = tensor(raw, chunk_size=10)

        xa = argsort(x)

        r = self.executor.execute_tensor(xa, concat=True)[0]
        np.testing.assert_array_equal(np.sort(raw), np.take_along_axis(raw, r, axis=-1))

        x = tensor(raw, chunk_size=(22, 4))

        xa = argsort(x)

        r = self.executor.execute_tensor(xa, concat=True)[0]
        np.testing.assert_array_equal(np.sort(raw), np.take_along_axis(raw, r, axis=-1))

        raw = np.random.rand(100)

        x = tensor(raw, chunk_size=23)

        xa = argsort(x, axis=0)

        r = self.executor.execute_tensor(xa, concat=True)[0]
        np.testing.assert_array_equal(np.sort(raw, axis=0), raw[r]) 
Example 3
Project: lingvo   Author: tensorflow   File: layers_test.py    License: Apache License 2.0 6 votes vote down vote up
def testRelativePositionalEmbeddingLayer(self):
    with self.session(use_gpu=False):
      radius = 3
      p = layers.RelativePositionalEmbeddingLayer.Params().Set(
          name='rel_position_emb', radius=radius, dim=4)
      layer = p.Instantiate()
      indices = np.array([-5, -2, 0, 1, 4], dtype=np.int32)
      pos_emb = layer.FPropDefaultTheta(tf.convert_to_tensor(indices))

      self.evaluate(tf.global_variables_initializer())
      actual_pos_emb, full_emb = self.evaluate([pos_emb, layer.vars.w])

      clipped_indices = np.vectorize(lambda x: max(-radius, min(radius, x)))(
          indices) + radius
      expected_output = np.take_along_axis(full_emb,
                                           np.expand_dims(clipped_indices, -1),
                                           0)
      print('expected_position_embs:', expected_output)
      print('actual_position_embs:', actual_pos_emb)
      self.assertAllClose(actual_pos_emb, expected_output) 
Example 4
Project: trax   Author: google   File: lax_numpy_test.py    License: Apache License 2.0 6 votes vote down vote up
def testTakeAlongAxis(self, x_shape, i_shape, dtype, axis, rng_factory):
    rng = rng_factory()
    i_shape = onp.array(i_shape)
    if axis is None:
      i_shape = [onp.prod(i_shape, dtype=onp.int64)]
    else:
      # Test the case where the size of the axis doesn't necessarily broadcast.
      i_shape[axis] *= 3
      i_shape = list(i_shape)
    def args_maker():
      x = rng(x_shape, dtype)
      n = onp.prod(x_shape, dtype=onp.int32) if axis is None else x_shape[axis]
      i = rng(i_shape, onp.int32) % (2 * n - 1) - (n - 1)
      return x, i

    lnp_op = lambda x, i: lnp.take_along_axis(x, i, axis=axis)

    if hasattr(onp, "take_along_axis"):
      onp_op = lambda x, i: onp.take_along_axis(x, i, axis=axis)
      self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True)
    self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True,
                          check_incomplete_shape=True) 
Example 5
Project: bayesmark   Author: uber   File: np_util.py    License: Apache License 2.0 6 votes vote down vote up
def cummin(x_val, x_key):
    """Get the cumulative minimum of `x_val` when ranked according to `x_key`.

    Parameters
    ----------
    x_val : :class:`numpy:numpy.ndarray` of shape (n, d)
        The array to get the cumulative minimum of along axis 0.
    x_key : :class:`numpy:numpy.ndarray` of shape (n, d)
        The array for ranking elements as to what is the minimum.

    Returns
    -------
    c_min : :class:`numpy:numpy.ndarray` of shape (n, d)
        The cumulative minimum array.
    """
    assert x_val.shape == x_key.shape
    assert x_val.ndim == 2
    assert not np.any(np.isnan(x_key)), "cummin not defined for nan key"

    n, _ = x_val.shape

    xm = np.minimum.accumulate(x_key, axis=0)
    idx = np.maximum.accumulate((x_key <= xm) * np.arange(n)[:, None])
    c_min = np.take_along_axis(x_val, idx, axis=0)
    return c_min 
Example 6
Project: MSN-Point-Cloud-Completion   Author: Colin97   File: emd_module.py    License: Apache License 2.0 6 votes vote down vote up
def test_emd():
    x1 = torch.rand(20, 8192, 3).cuda()
    x2 = torch.rand(20, 8192, 3).cuda()
    emd = emdModule()
    start_time = time.perf_counter()
    dis, assigment = emd(x1, x2, 0.05, 3000)
    print("Input_size: ", x1.shape)
    print("Runtime: %lfs" % (time.perf_counter() - start_time))
    print("EMD: %lf" % np.sqrt(dis.cpu()).mean())
    print("|set(assignment)|: %d" % assigment.unique().numel())
    assigment = assigment.cpu().numpy()
    assigment = np.expand_dims(assigment, -1)
    x2 = np.take_along_axis(x2, assigment, axis = 1)
    d = (x1 - x2) * (x1 - x2)
    print("Verified EMD: %lf" % np.sqrt(d.cpu().sum(-1)).mean())

#test_emd() 
Example 7
Project: mars   Author: mars-project   File: test_base_execute.py    License: Apache License 2.0 5 votes vote down vote up
def testPartitionIndicesExecution(self):
        # only 1 chunk when axis = -1
        raw = np.random.rand(100, 10)
        x = tensor(raw, chunk_size=10)

        kth = [2, 5, 9]
        r = partition(x, kth, return_index=True)

        pr, pi = self.executor.execute_tensors(r)
        np.testing.assert_array_equal(pr, np.take_along_axis(raw, pi, axis=-1))
        np.testing.assert_array_equal(np.sort(raw)[:, kth], pr[:, kth])

        x = tensor(raw, chunk_size=(22, 4))

        r = partition(x, kth, return_index=True)

        pr, pi = self.executor.execute_tensors(r)
        np.testing.assert_array_equal(pr, np.take_along_axis(raw, pi, axis=-1))
        np.testing.assert_array_equal(np.sort(raw)[:, kth], pr[:, kth])

        raw = np.random.rand(100)

        x = tensor(raw, chunk_size=23)

        r = partition(x, kth, axis=0, return_index=True)

        pr, pi = self.executor.execute_tensors(r)
        np.testing.assert_array_equal(pr, np.take_along_axis(raw, pi, axis=-1))
        np.testing.assert_array_equal(np.sort(raw)[kth], pr[kth]) 
Example 8
Project: mars   Author: mars-project   File: test_base_execute.py    License: Apache License 2.0 5 votes vote down vote up
def testArgpartitionExecution(self):
        # only 1 chunk when axis = -1
        raw = np.random.rand(100, 10)
        x = tensor(raw, chunk_size=10)

        kth = [6, 3, 8]
        pa = argpartition(x, kth)

        r = self.executor.execute_tensor(pa, concat=True)[0]
        np.testing.assert_array_equal(np.sort(raw)[:, kth], np.take_along_axis(raw, r, axis=-1)[:, kth])

        x = tensor(raw, chunk_size=(22, 4))

        pa = argpartition(x, kth)

        r = self.executor.execute_tensor(pa, concat=True)[0]
        np.testing.assert_array_equal(np.sort(raw)[:, kth], np.take_along_axis(raw, r, axis=-1)[:, kth])

        raw = np.random.rand(100)

        x = tensor(raw, chunk_size=23)

        pa = argpartition(x, kth, axis=0)

        r = self.executor.execute_tensor(pa, concat=True)[0]
        np.testing.assert_array_equal(np.sort(raw, axis=0)[kth], raw[r][kth]) 
Example 9
Project: mars   Author: mars-project   File: test_base_execute.py    License: Apache License 2.0 5 votes vote down vote up
def testTopkExecution(self):
        raw1, order1 = np.random.rand(5, 6, 7), None
        raw2 = np.empty((5, 6, 7), dtype=[('a', np.int32), ('b', np.float64)])
        raw2['a'] = np.random.randint(1000, size=(5, 6, 7), dtype=np.int32)
        raw2['b'] = np.random.rand(5, 6, 7)
        order2 = ['b', 'a']

        for raw, order in [(raw1, order1), (raw2, order2)]:
            for chunk_size in [7, 4]:
                a = tensor(raw, chunk_size=chunk_size)
                for axis in [0, 1, 2, None]:
                    size = raw.shape[axis] if axis is not None else raw.size
                    for largest in [True, False]:
                        for to_sort in [True, False]:
                            for parallel_kind in ['tree', 'psrs']:
                                for k in [2, size - 2, size, size + 2]:
                                    r = topk(a, k, axis=axis, largest=largest, sorted=to_sort,
                                             order=order, parallel_kind=parallel_kind)

                                    result = self.executor.execute_tensor(r, concat=True)[0]

                                    if not to_sort:
                                        result = self._handle_result(result, axis, largest, order)
                                    expected = self._topk_slow(raw, k, axis, largest, order)
                                    np.testing.assert_array_equal(result, expected)

                                    r = topk(a, k, axis=axis, largest=largest,
                                             sorted=to_sort, order=order,
                                             parallel_kind=parallel_kind,
                                             return_index=True)

                                    ta, ti = self.executor.execute_tensors(r)
                                    raw2 = raw
                                    if axis is None:
                                        raw2 = raw.flatten()
                                    np.testing.assert_array_equal(ta, np.take_along_axis(raw2, ti, axis))
                                    if not to_sort:
                                        ta = self._handle_result(ta, axis, largest, order)
                                    np.testing.assert_array_equal(ta, expected) 
Example 10
Project: dgl   Author: dmlc   File: tensor.py    License: Apache License 2.0 5 votes vote down vote up
def topk(input, k, dim, descending=True):
    topk_indices = argtopk(input, k, dim, descending)
    return np.take_along_axis(input, topk_indices, axis=dim) 
Example 11
Project: centerpose   Author: tensorboy   File: convert2onnx.py    License: MIT License 5 votes vote down vote up
def _gather_feat(feat, ind, mask=None):
    dim  = feat.shape[2]
    ind = np.repeat(ind[:, :, np.newaxis], dim, axis=2)
    feat = np.take_along_axis(feat, ind, 1) 
    if mask is not None:
        mask = np.expand_dims(mask, 2).reshape(feat.shape)
        feat = feat[mask]
        feat = feat.reshape(-1, dim)
    return feat 
Example 12
Project: imgclsmob   Author: osmr   File: centernet.py    License: MIT License 5 votes vote down vote up
def call(self, x, training=None):
        import numpy as np

        x_ = x.numpy()
        if not is_channels_first(self.data_format):
            x_ = x_.transpose((0, 3, 1, 2))

        heatmap = x_[:, :-4]
        wh = x_[:, -4:-2]
        reg = x_[:, -2:]
        batch, _, out_h, out_w = heatmap.shape

        heatmap_flat = heatmap.reshape((batch, -1))
        indices = np.argsort(heatmap_flat)[:, -self.topk:]
        scores = np.take_along_axis(heatmap_flat, indices=indices, axis=-1)
        topk_classes = (indices // (out_h * out_w)).astype(dtype=np.float32)
        topk_indices = indices % (out_h * out_w)
        topk_ys = (topk_indices // out_w).astype(dtype=np.float32)
        topk_xs = (topk_indices % out_w).astype(dtype=np.float32)
        center = reg.transpose((0, 2, 3, 1)).reshape((batch, -1, 2))
        wh = wh.transpose((0, 2, 3, 1)).reshape((batch, -1, 2))
        xs = np.take_along_axis(center[:, :, 0], indices=topk_indices, axis=-1)
        ys = np.take_along_axis(center[:, :, 1], indices=topk_indices, axis=-1)
        topk_xs = topk_xs + xs
        topk_ys = topk_ys + ys
        w = np.take_along_axis(wh[:, :, 0], indices=topk_indices, axis=-1)
        h = np.take_along_axis(wh[:, :, 1], indices=topk_indices, axis=-1)
        half_w = 0.5 * w
        half_h = 0.5 * h
        bboxes = tf.stack((topk_xs - half_w, topk_ys - half_h, topk_xs + half_w, topk_ys + half_h), axis=-1)

        bboxes = bboxes * self.scale
        topk_classes = tf.expand_dims(topk_classes, axis=-1)
        scores = tf.expand_dims(scores, axis=-1)
        result = tf.concat((bboxes, topk_classes, scores), axis=-1)
        return result 
Example 13
Project: imgclsmob   Author: osmr   File: centernet.py    License: MIT License 5 votes vote down vote up
def __call__(self, x):
        import numpy as np

        heatmap = x[:, :-4].array
        wh = x[:, -4:-2].array
        reg = x[:, -2:].array
        batch, _, out_h, out_w = heatmap.shape

        heatmap_flat = heatmap.reshape((batch, -1))
        indices = np.argsort(heatmap_flat)[:, -self.topk:]
        scores = np.take_along_axis(heatmap_flat, indices=indices, axis=-1)
        topk_classes = (indices // (out_h * out_w)).astype(dtype=np.float32)
        topk_indices = indices % (out_h * out_w)
        topk_ys = (topk_indices // out_w).astype(dtype=np.float32)
        topk_xs = (topk_indices % out_w).astype(dtype=np.float32)
        center = reg.transpose((0, 2, 3, 1)).reshape((batch, -1, 2))
        wh = wh.transpose((0, 2, 3, 1)).reshape((batch, -1, 2))
        xs = np.take_along_axis(center[:, :, 0], indices=topk_indices, axis=-1)
        ys = np.take_along_axis(center[:, :, 1], indices=topk_indices, axis=-1)
        topk_xs = topk_xs + xs
        topk_ys = topk_ys + ys
        w = np.take_along_axis(wh[:, :, 0], indices=topk_indices, axis=-1)
        h = np.take_along_axis(wh[:, :, 1], indices=topk_indices, axis=-1)
        half_w = 0.5 * w
        half_h = 0.5 * h
        bboxes = F.stack((topk_xs - half_w, topk_ys - half_h, topk_xs + half_w, topk_ys + half_h), axis=-1)

        bboxes = bboxes * self.scale
        topk_classes = F.expand_dims(topk_classes, axis=-1)
        scores = F.expand_dims(scores, axis=-1)
        result = F.concat((bboxes, topk_classes, scores), axis=-1)
        return result 
Example 14
Project: imgclsmob   Author: osmr   File: ntsnet_cub.py    License: MIT License 5 votes vote down vote up
def __call__(self, x):
        raw_pre_features = self.backbone(x)

        rpn_score = self.navigator_unit(raw_pre_features)
        rpn_score.to_cpu()
        all_cdds = [np.concatenate((y.reshape(-1, 1), self.edge_anchors.copy()), axis=1)
                    for y in rpn_score.array]
        top_n_cdds = [hard_nms(y, top_n=self.top_n, iou_thresh=0.25) for y in all_cdds]
        top_n_cdds = np.array(top_n_cdds)
        top_n_index = top_n_cdds[:, :, -1].astype(np.int64)
        top_n_index = np.array(top_n_index, dtype=np.int64)
        top_n_prob = np.take_along_axis(rpn_score.array, top_n_index, axis=1)

        batch = x.shape[0]
        x_pad = F.pad(x, pad_width=self.pad_width, mode="constant", constant_values=0)
        part_imgs = []
        for i in range(batch):
            for j in range(self.top_n):
                y0, x0, y1, x1 = tuple(top_n_cdds[i][j, 1:5].astype(np.int64))
                x_res = F.resize_images(
                    x_pad[i:i + 1, :, y0:y1, x0:x1],
                    output_shape=(224, 224))
                part_imgs.append(x_res)
        part_imgs = F.concat(tuple(part_imgs), axis=0)
        part_features = self.backbone_tail(self.backbone(part_imgs))

        part_feature = part_features.reshape((batch, self.top_n, -1))
        part_feature = part_feature[:, :self.num_cat, :]
        part_feature = part_feature.reshape((batch, -1))

        raw_features = self.backbone_tail(raw_pre_features)

        concat_out = F.concat((part_feature, raw_features), axis=1)
        concat_logits = self.concat_net(concat_out)

        if self.aux:
            raw_logits = self.backbone_classifier(raw_features)
            part_logits = self.partcls_net(part_features).reshape((batch, self.top_n, -1))
            return concat_logits, raw_logits, part_logits, top_n_prob
        else:
            return concat_logits 
Example 15
Project: trax   Author: google   File: lax_numpy_test.py    License: Apache License 2.0 5 votes vote down vote up
def testTakeAlongAxisIssue1521(self):
    # https://github.com/google/jax/issues/1521
    idx = lnp.repeat(lnp.arange(3), 10).reshape((30, 1))

    def f(x):
      y = x * lnp.arange(3.).reshape((1, 3))
      return lnp.take_along_axis(y, idx, -1).sum()

    check_grads(f, (1.,), order=1) 
Example 16
Project: pyxclib   Author: kunaldahiya   File: xc_metrics.py    License: MIT License 5 votes vote down vote up
def _eval_flags(indices, true_labels, inv_psp=None):
    if sp.issparse(true_labels):
        eval_flags = np.take_along_axis(true_labels.tocsc(),
                                        indices, axis=-1).todense()
    elif type(true_labels) == np.ndarray:
        eval_flags = np.take_along_axis(true_labels,
                                        indices, axis=-1)
    if inv_psp is not None:
        eval_flags = np.multiply(inv_psp[indices], eval_flags)
    return eval_flags 
Example 17
Project: pyxclib   Author: kunaldahiya   File: dense.py    License: MIT License 5 votes vote down vote up
def topk(values, indices=None, k=10, sorted=False):
    """
    Return topk values from a np.ndarray with support for optional
    second array

    Arguments:
    ---------
    values: np.ndarray
        select topk values based on this array
    indices: np.ndarray or None, optional, default=None
        second array; return corresponding entries for this array
        as well; useful for key, value pairs
    k: int, optional, default=10
        k in top-k
    sorted: boolean, optional, default=False
        Sort the topk values or not
    """
    assert values.shape[1] >= k, f"value has less than {k} values per row"
    if indices is not None:
        assert values.shape == indices.shape, \
            f"Shape of values {values.shape} != indices {indices.shape}"
    if not sorted:
        topk_args = np.argpartition(values, -k)[:, -k:]
    else:
        topk_args = np.argpartition(
            values, list(range(-k, 0)))[:, -k:][:, ::-1]
    out = np.take_along_axis(values, topk_args, axis=-1)
    if indices is not None:
        out = (out, np.take_along_axis(indices, topk_args, axis=-1)) 
Example 18
Project: deep_pipe   Author: neuro-ml   File: metrics.py    License: MIT License 5 votes vote down vote up
def cross_entropy_with_logits(target: np.ndarray, logits: np.ndarray, axis: int = 1,
                              reduce: Union[Callable, None] = np.mean):
    """
    A numerically stable cross entropy for numpy arrays.
    ``target`` and ``logits`` must have the same shape except for ``axis``.

    Parameters
    ----------
    target
        integer array of shape (d1, ..., di, dj, ..., dn)
    logits
        array of shape (d1, ..., di, k, dj, ..., dn)
    axis
        the axis containing the logits for each class: ``logits.shape[axis] == k``
    reduce
        the reduction operation to be applied to the final loss.
        If None - no reduction will be performed.
    """
    main = np.take_along_axis(logits, np.expand_dims(target, axis), axis)
    max_ = np.maximum(0, logits.max(axis, keepdims=True))

    loss = -main + max_ + np.log(np.exp(logits - max_).sum(axis, keepdims=True))
    loss = loss.squeeze(axis)

    if reduce is not None:
        loss = reduce(loss)
    return loss 
Example 19
Project: pb_bss   Author: fgnt   File: complex_bingham.py    License: MIT License 5 votes vote down vote up
def _remove_duplicate_eigenvalues(cls, covariance_eigenvalues, eps=1e-8):
        """
        >>> import pytest; pytest.skip('Bingham is to slow')
        >>> ComplexBingham._remove_duplicate_eigenvalues(np.array([0.5, 0.5]))[-1]
        array([0.5       , 0.50000001])

        Demonstrate the suboptimal behaviour for duplicate eigenvalues.
        >>> ComplexBingham._remove_duplicate_eigenvalues(np.array([0.2, 0.4, 0.4]), eps=0.02)[-1]
        array([0.2 , 0.4 , 0.42])

        This function sorts the eigenvalues
        >>> ComplexBingham._remove_duplicate_eigenvalues(np.array([0.9, 0.1]))
        (array([1, 0]), array([0.1, 0.9]))
        >>> ComplexBingham._remove_duplicate_eigenvalues(np.array([0.9, 0.06, 0.04]))
        (array([2, 1, 0]), array([0.04, 0.06, 0.9 ]))
        >>> ComplexBingham._remove_duplicate_eigenvalues(np.array([0.9, 0.04, 0.06]))
        (array([2, 0, 1]), array([0.04, 0.06, 0.9 ]))

        >>> ComplexBingham._remove_duplicate_eigenvalues(np.array([1, 0.0, 0.0]))
        (array([2, 0, 1]), array([0.00000000e+00, 1.00000000e-08, 1.00000001e+00]))
        """
        permutation = np.argsort(covariance_eigenvalues, axis=-1, )
        covariance_eigenvalues = np.take_along_axis(covariance_eigenvalues, permutation, axis=-1)
        diff = np.diff(covariance_eigenvalues, axis=-1)
        # eps = covariance_eigenvalues[..., -1] * eps
        # diff = np.maximum(diff, eps[..., None])
        diff = np.maximum(diff, eps)

        # This reconstruction is not optimal, but an error of 1e-8
        covariance_eigenvalues[..., 1:] = (
                covariance_eigenvalues[..., 0][..., None]
                + np.cumsum(diff, axis=-1)
        )

        # https://stackoverflow.com/a/55737198/5766934
        inverse_permutation = np.arange(permutation.shape[-1])[np.argsort(permutation, axis=-1)]
        return inverse_permutation, covariance_eigenvalues 
Example 20
Project: estimagic   Author: OpenSourceEconomics   File: numdiff_np.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _get_best_estimate_single_method(derivative, errors):
    """Select best derivative estimates element wise.

    Given a single method, e.g. central differences with 2 num_terms (see above), we get
    multiple Richardson approximations including estimated errors. Here we select the
    approximations which result in the lowest error element wise.

    Args:
        derivative (np.ndarray): Derivative estimates from Richardson approximation.
            First axis (axis 0) denotes the potentially multiple estimates. Following
            dimensions represent the dimension of the derivative, i.e. for a classical
            gradient ``derivative`` has 2 dimensions, while for a classical jacobian
            ``derivative`` has 3 dimensions.
        errors (np.ndarray): Error estimates of ``derivative`` estimates. Has the same
            shape as ``derivative``.

    Returns:
        derivative_minimal (np.ndarray): Best derivate estimates chosen with respect
            to minimizing ``errors``. Note that the best values are selected
            element-wise. Has shape ``(derivative.shape[1], derivative.shape[2])``.

        error_minimal (np.ndarray): Minimal errors selected element-wise along axis
            0 of ``errors``.

    """
    if derivative.shape[0] == 1:
        derivative_minimal = np.squeeze(derivative, axis=0)
        error_minimal = np.squeeze(errors, axis=0)
    else:

        minimizer = np.nanargmin(errors, axis=0)

        derivative_minimal = np.take_along_axis(
            derivative, minimizer[np.newaxis, :], axis=0
        )
        derivative_minimal = np.squeeze(derivative_minimal, axis=0)
        error_minimal = np.nanmin(errors, axis=0)

    return derivative_minimal, error_minimal 
Example 21
Project: estimagic   Author: OpenSourceEconomics   File: numdiff_np.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _get_best_estimate_along_methods(derivatives, errors):
    """Extract best derivative estimate over different methods.

    Given that for each method, where one method can be for example central differences
    with two num_terms (see above), we have selected a single best derivative estimate,
    we select the best derivative estimates element-wise over different methods, where
    again best is defined as minimizing the approximation error.

    Args:
        derivatives (OrderedDict): Dictionary containing derivative estimates for
            different methods.
        errors (OrderedDict): Dictionary containing error estimates for derivates stored
            in ``derivatives``.

    Returns:
        jac_minimal (np.ndarray): The optimal derivative estimate over different
            methods.

    """
    errors = np.stack(list(errors.values()))
    derivatives = np.stack(list(derivatives.values()))

    if derivatives.shape[0] == 1:
        jac_minimal = np.squeeze(derivatives, axis=0)
    else:
        minimizer = np.nanargmin(errors, axis=0)

        jac_minimal = np.take_along_axis(derivatives, minimizer[np.newaxis, :], axis=0)
        jac_minimal = np.squeeze(jac_minimal, axis=0)

    return jac_minimal 
Example 22
Project: bert4keras   Author: bojone   File: snippets.py    License: Apache License 2.0 5 votes vote down vote up
def beam_search(self, inputs, topk, states=None, min_ends=1):
        """beam search解码
        说明:这里的topk即beam size;
        返回:最优解码序列。
        """
        inputs = [np.array([i]) for i in inputs]
        output_ids, output_scores = self.first_output_ids, np.zeros(1)
        for step in range(self.maxlen):
            scores, states = self.predict(
                inputs, output_ids, states, 'logits'
            )  # 计算当前得分
            if step == 0:  # 第1步预测后将输入重复topk次
                inputs = [np.repeat(i, topk, axis=0) for i in inputs]
            scores = output_scores.reshape((-1, 1)) + scores  # 综合累积得分
            indices = scores.argpartition(-topk, axis=None)[-topk:]  # 仅保留topk
            indices_1 = indices // scores.shape[1]  # 行索引
            indices_2 = (indices % scores.shape[1]).reshape((-1, 1))  # 列索引
            output_ids = np.concatenate([output_ids[indices_1], indices_2],
                                        1)  # 更新输出
            output_scores = np.take_along_axis(
                scores, indices, axis=None
            )  # 更新得分
            end_counts = (output_ids == self.end_id).sum(1)  # 统计出现的end标记
            if output_ids.shape[1] >= self.minlen:  # 最短长度判断
                best_one = output_scores.argmax()  # 得分最大的那个
                if end_counts[best_one] == min_ends:  # 如果已经终止
                    return output_ids[best_one]  # 直接输出
                else:  # 否则,只保留未完成部分
                    flag = (end_counts < min_ends)  # 标记未完成序列
                    if not flag.all():  # 如果有已完成的
                        inputs = [i[flag] for i in inputs]  # 扔掉已完成序列
                        output_ids = output_ids[flag]  # 扔掉已完成序列
                        output_scores = output_scores[flag]  # 扔掉已完成序列
                        end_counts = end_counts[flag]  # 扔掉已完成end计数
                        topk = flag.sum()  # topk相应变化
        # 达到长度直接输出
        return output_ids[output_scores.argmax()] 
Example 23
Project: tinynn   Author: borgwang   File: layer.py    License: MIT License 5 votes vote down vote up
def forward(self, inputs):
        s_h, s_w = self.stride
        k_h, k_w = self.kernel_shape
        batch_sz, in_h, in_w, in_c = inputs.shape

        # zero-padding
        if self.padding is None:
            self.padding = get_padding_2d(
                (in_h, in_w), (k_h, k_w), self.padding_mode)
        X = np.pad(inputs, pad_width=self.padding, mode="constant")
        padded_h, padded_w = X.shape[1:3]
    
        out_h = (padded_h - k_h) // s_h + 1
        out_w = (padded_w - k_w) // s_w + 1

        # construct output matrix and argmax matrix
        max_pool = np.empty(shape=(batch_sz, out_h, out_w, in_c))
        argmax = np.empty(shape=(batch_sz, out_h, out_w, in_c), dtype=int)
        for r in range(out_h):
            r_start = r * s_h
            for c in range(out_w):
                c_start = c * s_w
                pool = X[:, r_start: r_start+k_h, c_start: c_start+k_w, :]
                pool = pool.reshape((batch_sz, -1, in_c))

                _argmax = np.argmax(pool, axis=1)[:, np.newaxis, :]
                argmax[:, r, c, :] = _argmax.squeeze()

                # get max elements
                _max_pool = np.take_along_axis(pool, _argmax, axis=1).squeeze()
                max_pool[:, r, c, :] = _max_pool

        self.X_shape = X.shape
        self.out_shape = (out_h, out_w)
        self.argmax = argmax
        return max_pool 
Example 24
Project: cupy   Author: cupy   File: indexing.py    License: MIT License 5 votes vote down vote up
def take_along_axis(a, indices, axis):
    """Take values from the input array by matching 1d index and data slices.

    Args:
        a (cupy.ndarray): Array to extract elements.
        indices (cupy.ndarray): Indices to take along each 1d slice of ``a``.
        axis (int): The axis to take 1d slices along.

    Returns:
        cupy.ndarray: The indexed result.

    .. seealso:: :func:`numpy.take_along_axis`
    """

    if indices.dtype.kind not in ('i', 'u'):
        raise IndexError('`indices` must be an integer array')

    if axis is None:
        a = a.ravel()
        axis = 0

    ndim = a.ndim

    if not (-ndim <= axis < ndim):
        raise numpy.AxisError('Axis overrun')

    axis %= a.ndim

    if ndim != indices.ndim:
        raise ValueError(
            '`indices` and `a` must have the same number of dimensions')

    fancy_index = []
    for i, n in enumerate(a.shape):
        if i == axis:
            fancy_index.append(indices)
        else:
            ind_shape = (1,) * i + (-1,) + (1,) * (ndim - i - 1)
            fancy_index.append(cupy.arange(n).reshape(ind_shape))

    return a[fancy_index] 
Example 25
Project: bayesmark   Author: uber   File: experiment_analysis.py    License: Apache License 2.0 5 votes vote down vote up
def get_perf_array(evals, evals_visible):
    """Get the actual (e.g., generalization loss) over iterations.

    Parameters
    ----------
    evals : :class:`numpy:numpy.ndarray` of shape (n_iter, n_batch, n_trials)
        The actual loss (e.g., generalization) for a given experiment.
    evals_visible : :class:`numpy:numpy.ndarray` of shape (n_iter, n_batch, n_trials)
        The observable loss (e.g., validation) for a given experiment.

    Returns
    -------
    perf_array : :class:`numpy:numpy.ndarray` of shape (n_iter, n_trials)
        The best performance so far at iteration i from `evals`. Where the best has been selected according to
        `evals_visible`.
    """
    n_iter, _, n_trials = evals.shape
    assert evals.size > 0, "perf array not supported for empty arrays"
    assert evals_visible.shape == evals.shape
    assert not np.any(np.isnan(evals))
    assert not np.any(np.isnan(evals_visible))

    idx = np.argmin(evals_visible, axis=1)
    perf_array = np.take_along_axis(evals, idx[:, None, :], axis=1).squeeze(axis=1)
    assert perf_array.shape == (n_iter, n_trials)

    visible_perf_array = np.min(evals_visible, axis=1)
    assert visible_perf_array.shape == (n_iter, n_trials)

    # Get the minimum from the visible loss
    perf_array = cummin(perf_array, visible_perf_array)
    return perf_array 
Example 26
Project: scikit-hubness   Author: VarIr   File: local_scaling.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fit(self, neigh_dist, neigh_ind, X=None, assume_sorted: bool = True, *args, **kwargs) -> LocalScaling:
        """ Fit the model using neigh_dist and neigh_ind as training data.

        Parameters
        ----------
        neigh_dist: np.ndarray, shape (n_samples, n_neighbors)
            Distance matrix of training objects (rows) against their
            individual k nearest neighbors (colums).

        neigh_ind: np.ndarray, shape (n_samples, n_neighbors)
            Neighbor indices corresponding to the values in neigh_dist.

        X: ignored

        assume_sorted: bool, default = True
            Assume input matrices are sorted according to neigh_dist.
            If False, these are sorted here.
        """
        # Check equal number of rows and columns
        check_consistent_length(neigh_ind, neigh_dist)
        check_consistent_length(neigh_ind.T, neigh_dist.T)

        # increment to include the k-th element in slicing
        k = self.k + 1

        # Find distances to the k-th neighbor (standard LS) or the k neighbors (NICDM)
        if assume_sorted:
            self.r_dist_train_ = neigh_dist[:, :k]
            self.r_ind_train_ = neigh_ind[:, :k]
        else:
            kth = np.arange(self.k)
            mask = np.argpartition(neigh_dist, kth=kth)[:, :k]
            self.r_dist_train_ = np.take_along_axis(neigh_dist, mask, axis=1)
            self.r_ind_train_ = np.take_along_axis(neigh_ind, mask, axis=1)

        return self 
Example 27
Project: numpyro   Author: pyro-ppl   File: diagnostics.py    License: Apache License 2.0 5 votes vote down vote up
def hpdi(x, prob=0.90, axis=0):
    """
    Computes "highest posterior density interval" (HPDI) which is the narrowest
    interval with probability mass ``prob``.

    :param numpy.ndarray x: the input array.
    :param float prob: the probability mass of samples within the interval.
    :param int axis: the dimension to calculate hpdi.
    :return: quantiles of ``x`` at ``(1 - prob) / 2`` and
        ``(1 + prob) / 2``.
    :rtype: numpy.ndarray
    """
    x = np.swapaxes(x, axis, 0)
    sorted_x = np.sort(x, axis=0)
    mass = x.shape[0]
    index_length = int(prob * mass)
    intervals_left = sorted_x[:(mass - index_length)]
    intervals_right = sorted_x[index_length:]
    intervals_length = intervals_right - intervals_left
    index_start = intervals_length.argmin(axis=0)
    index_end = index_start + index_length
    hpd_left = np.take_along_axis(sorted_x, index_start[None, ...], axis=0)
    hpd_left = np.swapaxes(hpd_left, axis, 0)
    hpd_right = np.take_along_axis(sorted_x, index_end[None, ...], axis=0)
    hpd_right = np.swapaxes(hpd_right, axis, 0)
    return np.concatenate([hpd_left, hpd_right], axis=axis) 
Example 28
Project: NNEF-Tools   Author: KhronosGroup   File: topk.py    License: Apache License 2.0 5 votes vote down vote up
def topk(data, axis, k):
    indices = np.flip(np.argsort(data, axis=axis), axis=axis).take(indices=range(k), axis=axis)
    values = np.take_along_axis(data, indices, axis=axis)
    return values, indices 
Example 29
Project: TractSeg   Author: MIC-DKFZ   File: fiber_utils.py    License: Apache License 2.0 5 votes vote down vote up
def get_best_original_peaks(peaks_pred, peaks_orig, peak_len_thr=0.1):
    """
    Find the peak from preaks_orig which is closest to the peak in peaks_pred.

    Args:
        peaks_pred: file containing 1 peak [x,y,z,3]
        peaks_orig: file containing 4 peaks [x,y,z,9]
        peak_len_thr: all peaks shorter than this threshold will be removed

    Returns:
        Image containing 1 peak [x,y,z,3]
    """

    def _get_most_aligned_peak(pred, orig):
        orig = np.array(orig)
        angle1 = abs(peak_utils.angle_last_dim(pred, orig[0]))
        angle2 = abs(peak_utils.angle_last_dim(pred, orig[1]))
        angle3 = abs(peak_utils.angle_last_dim(pred, orig[2]))
        argmax = np.argmax(np.stack([angle1, angle2, angle3], axis=-1), axis=-1)

        x, y, z = (orig.shape[1], orig.shape[2], orig.shape[3])
        return orig[tuple([argmax] + np.ogrid[:x, :y, :z])]
        # Other ways that would also work
        # return orig[argmax, np.arange(x)[:, None, None], np.arange(y)[:, None], np.arange(z)]
        # return np.take_along_axis(orig, argmax[None, ..., None], axis=0)[0]   # only supported in newest numpy version

    peaks_pred = np.nan_to_num(peaks_pred)
    peaks_orig = np.nan_to_num(peaks_orig)

    #Remove all peaks where predicted peaks are too short
    peaks_orig[np.linalg.norm(peaks_pred, axis=-1) < peak_len_thr] = 0

    best_orig = _get_most_aligned_peak(peaks_pred,
                                      [peaks_orig[:, :, :, 0:3],
                                       peaks_orig[:, :, :, 3:6],
                                       peaks_orig[:, :, :, 6:9]])
    return best_orig 
Example 30
Project: Carnets   Author: holzschu   File: test_quantity_non_ufuncs.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_take_along_axis(self):
        indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
        out = np.take_along_axis(self.q, indices, axis=0)
        expected = np.take_along_axis(self.q.value, indices,
                                      axis=0) * self.q.unit
        assert np.all(out == expected)