Python numpy.nanargmin() Examples

The following are 30 code examples of numpy.nanargmin(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: rdp_accountant.py    From privacy with Apache License 2.0 6 votes vote down vote up
def _compute_eps(orders, rdp, delta):
  """Compute epsilon given a list of RDP values and target delta.

  Args:
    orders: An array (or a scalar) of orders.
    rdp: A list (or a scalar) of RDP guarantees.
    delta: The target delta.

  Returns:
    Pair of (eps, optimal_order).

  Raises:
    ValueError: If input is malformed.

  """
  orders_vec = np.atleast_1d(orders)
  rdp_vec = np.atleast_1d(rdp)

  if len(orders_vec) != len(rdp_vec):
    raise ValueError("Input lists must have the same length.")

  eps = rdp_vec - math.log(delta) / (orders_vec - 1)

  idx_opt = np.nanargmin(eps)  # Ignore NaNs
  return eps[idx_opt], orders_vec[idx_opt] 
Example #2
Source File: hasher.py    From apollo with GNU General Public License v3.0 6 votes vote down vote up
def weighted_minhash(v, sample_size, rs, ln_cs, betas):
    if sample_size != rs.shape[0]:
        raise ValueError("Input sample size mismatch, expecting %d" % rs.shape[0])
    if len(v) != rs.shape[1]:
        raise ValueError("Input dimension mismatch, expecting %d" % rs.shape[1])

    hashvalues = numpy.zeros((sample_size, 2), dtype=numpy.uint32)
    vzeros = (v == 0)
    if vzeros.all():
        raise ValueError("Input is all zeros")
    v[vzeros] = numpy.nan
    vlog = numpy.log(v)
    v[vzeros] = 0
    for i in range(sample_size):
        t = numpy.floor((vlog / rs[i]) + betas[i])
        ln_y = (t - betas[i]) * rs[i]
        ln_a = ln_cs[i] - ln_y - rs[i]
        k = numpy.nanargmin(ln_a)
        hashvalues[i][0], hashvalues[i][1] = k, int(t[k])
    return hashvalues 
Example #3
Source File: standalone.py    From everest with MIT License 6 votes vote down vote up
def mouse_click(self, event):
        '''

        '''

        if event.mouseevent.inaxes == self.ax:

            # Index of nearest point
            i = np.nanargmin(
                ((event.mouseevent.xdata - self.x) / self.nx) ** 2)
            j = np.nanargmin(
                ((event.mouseevent.ydata - self.y) / self.ny) ** 2)
            self.last_i = i
            self.last_j = j

            # Toggle pixel
            if self.aperture[j, i]:
                self.aperture[j, i] = 0
            else:
                self.aperture[j, i] = 1

            # Update the contour
            self.update() 
Example #4
Source File: standalone.py    From everest with MIT License 6 votes vote down vote up
def mouse_drag(self, event):
        '''

        '''

        if event.inaxes == self.ax and event.button == 1:

            # Index of nearest point
            i = np.nanargmin(((event.xdata - self.x) / self.nx) ** 2)
            j = np.nanargmin(((event.ydata - self.y) / self.ny) ** 2)

            if (i == self.last_i) and (j == self.last_j):
                return
            else:
                self.last_i = i
                self.last_j = j

            # Toggle pixel
            if self.aperture[j, i]:
                self.aperture[j, i] = 0
            else:
                self.aperture[j, i] = 1

            # Update the contour
            self.update() 
Example #5
Source File: rdp_accountant.py    From models with Apache License 2.0 6 votes vote down vote up
def _compute_eps(orders, rdp, delta):
  """Compute epsilon given a list of RDP values and target delta.

  Args:
    orders: An array (or a scalar) of orders.
    rdp: A list (or a scalar) of RDP guarantees.
    delta: The target delta.

  Returns:
    Pair of (eps, optimal_order).

  Raises:
    ValueError: If input is malformed.

  """
  orders_vec = np.atleast_1d(orders)
  rdp_vec = np.atleast_1d(rdp)

  if len(orders_vec) != len(rdp_vec):
    raise ValueError("Input lists must have the same length.")

  eps = rdp_vec - math.log(delta) / (orders_vec - 1)

  idx_opt = np.nanargmin(eps)  # Ignore NaNs
  return eps[idx_opt], orders_vec[idx_opt] 
Example #6
Source File: persistence.py    From caml-mimic with MIT License 6 votes vote down vote up
def save_everything(args, metrics_hist_all, model, model_dir, params, criterion, evaluate=False):
    """
        Save metrics, model, params all in model_dir
    """
    save_metrics(metrics_hist_all, model_dir)
    params['model_dir'] = model_dir
    save_params_dict(params)

    if not evaluate:
        #save the model with the best criterion metric
        if not np.all(np.isnan(metrics_hist_all[0][criterion])):
            if criterion == 'loss_dev': 
                eval_val = np.nanargmin(metrics_hist_all[0][criterion])
            else:
                eval_val = np.nanargmax(metrics_hist_all[0][criterion])

            if eval_val == len(metrics_hist_all[0][criterion]) - 1:                

		#save state dict
                sd = model.cpu().state_dict()
                torch.save(sd, model_dir + "/model_best_%s.pth" % criterion)
                if args.gpu:
                    model.cuda()
    print("saved metrics, params, model to directory %s\n" % (model_dir)) 
Example #7
Source File: refinement.py    From DeepLabCut with GNU Lesser General Public License v3.0 6 votes vote down vote up
def OnKeyPressed(self, event=None):
        if event.GetKeyCode() == wx.WXK_RIGHT:
            self.nextImage(event=None)
        elif event.GetKeyCode() == wx.WXK_LEFT:
            self.prevImage(event=None)
        elif event.GetKeyCode() == wx.WXK_BACK:
            pos_abs = event.GetPosition()
            inv = self.axes.transData.inverted()
            pos_rel = list(inv.transform(pos_abs))
            pos_rel[1] = (
                self.axes.get_ylim()[0] - pos_rel[1]
            )  # Recall y-axis is inverted
            i = np.nanargmin(
                [self.calc_distance(*dp.point.center, *pos_rel) for dp in self.drs]
            )
            closest_dp = self.drs[i]
            msg = wx.MessageBox(
                "Do you want to remove the label %s ?" % closest_dp.bodyParts,
                "Remove!",
                wx.YES_NO | wx.ICON_WARNING,
            )
            if msg == 2:
                closest_dp.delete_data() 
Example #8
Source File: multiple_individuals_refinement_toolbox.py    From DeepLabCut with GNU Lesser General Public License v3.0 6 votes vote down vote up
def OnKeyPressed(self, event=None):
        if event.GetKeyCode() == wx.WXK_RIGHT:
            self.nextImage(event=None)
        elif event.GetKeyCode() == wx.WXK_LEFT:
            self.prevImage(event=None)
        elif event.GetKeyCode() == wx.WXK_BACK:
            pos_abs = event.GetPosition()
            inv = self.axes.transData.inverted()
            pos_rel = list(inv.transform(pos_abs))
            pos_rel[1] = (
                self.axes.get_ylim()[0] - pos_rel[1]
            )  # Recall y-axis is inverted
            i = np.nanargmin(
                [self.calc_distance(*dp.point.center, *pos_rel) for dp in self.drs]
            )
            closest_dp = self.drs[i]
            msg = wx.MessageBox(
                f"Do you want to remove the label {closest_dp.individual_name}:{closest_dp.bodyParts}?",
                "Remove!",
                wx.YES_NO | wx.ICON_WARNING,
            )
            if msg == 2:
                closest_dp.delete_data() 
Example #9
Source File: graph_utils.py    From nasbot with MIT License 5 votes vote down vote up
def dijkstra(A, source, non_edges_are_zero_or_inf='zero'):
  """ Run's dijkstra's on the vertex to produce the shortest path to all nodes.
      Just copyng the pseudo code in Wikipedia.
      non_edges_are_zero_or_inf indicate whether a non-edge is indicated as a 0 or
      inf in A.
  """
  vertex_is_remaining = np.array([1] * A.shape[0])
  all_vertices = np.array(range(A.shape[0]))
  all_dists = np.array([np.inf] * A.shape[0])
  all_dists[source] = 0
  while sum(vertex_is_remaining) > 0:
    # Find the minimum and remove it.
    rem_dists = deepcopy(all_dists)
    rem_dists[vertex_is_remaining == 0] = np.nan
    u = np.nanargmin(rem_dists)
    vertex_is_remaining[u] = 0
    if np.all(np.logical_not(np.isfinite(rem_dists))):
      break
    # Now apply dijkstra's updates
    if non_edges_are_zero_or_inf == 'zero':
      u_nbd = all_vertices[A[u] > 0]
    elif non_edges_are_zero_or_inf == 'inf':
      u_nbd = all_vertices[A[u] < np.inf]
    else:
      raise ValueError('non_edges_are_zero_or_inf should be \'zero\' or \'inf\'.')
    for v in u_nbd:
      alt = all_dists[u] + A[u][v]
      if alt < all_dists[v]:
        all_dists[v] = alt
  return all_dists 
Example #10
Source File: test_nanfunctions.py    From elasticintel with GNU General Public License v3.0 5 votes vote down vote up
def test_nanargmin(self):
        tgt = np.argmin(self.mat)
        for mat in self.integer_arrays():
            assert_equal(np.nanargmin(mat), tgt) 
Example #11
Source File: object_clustering.py    From clifford with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def assign_measurements_to_objects_matrix(objects, objects_measurements, object_type='generic',
                                          cuda=False, symmetric=False):
    """
    Assigns each object in objects_measurements to one in objects based on minimum cost
    """
    if cuda:
        matrix = object_set_cost_cuda_mvs(objects, objects_measurements)
    else:
        matrix = object_set_cost_matrix(objects, objects_measurements,
                                        object_type=object_type,
                                        symmetric=symmetric)
    labels = np.nanargmin(matrix, axis=0)
    costs = np.array([matrix[l, i] for i, l in enumerate(labels)])
    return [labels, costs] 
Example #12
Source File: _anuclim.py    From xclim with Apache License 2.0 5 votes vote down vote up
def _from_other_arg(criteria, output, op, freq):
    """Pick values from output based on operation returning an index from criteria.

    Parameters
    ----------
    criteria : DataArray
      Series on which operation returning index is applied.
    output : DataArray
      Series to be indexed.
    op : func
      Function returning an index, for example np.argmin, np.argmax, np.nanargmin, np.nanargmax.
    freq : str
      Temporal grouping.

    Returns
    -------
    DataArray
      Output values where criteria is met at the given frequency.
    """
    ds = xarray.Dataset(data_vars={"criteria": criteria, "output": output})
    dim = "time"

    def get_other_op(ds):
        all_nans = ds.criteria.isnull().all(dim=dim)
        index = op(ds.criteria.where(~all_nans, 0), dim=dim)
        return lazy_indexing(ds.output, index=index, dim=dim).where(~all_nans)

    return ds.resample(time=freq).map(get_other_op) 
Example #13
Source File: ch_ops.py    From chumpy with MIT License 5 votes vote down vote up
def argf(self, *args, **kwargs): return np.nanargmin(*args, **kwargs) 
Example #14
Source File: multiple_individuals_labeling_toolbox.py    From DeepLabCut with GNU Lesser General Public License v3.0 5 votes vote down vote up
def OnKeyPressed(self, event=None):
        if event.GetKeyCode() == wx.WXK_RIGHT:
            self.nextImage(event=None)
        elif event.GetKeyCode() == wx.WXK_LEFT:
            self.prevImage(event=None)
        elif event.GetKeyCode() == wx.WXK_DOWN:
            self.nextLabel(event=None)
        elif event.GetKeyCode() == wx.WXK_UP:
            self.previousLabel(event=None)
        elif event.GetKeyCode() == wx.WXK_BACK:
            pos_abs = event.GetPosition()
            inv = self.axes.transData.inverted()
            pos_rel = list(inv.transform(pos_abs))
            pos_rel[1] = (
                self.axes.get_ylim()[0] - pos_rel[1]
            )  # Recall y-axis is inverted
            i = np.nanargmin(
                [self.calc_distance(*dp.point.center, *pos_rel) for dp in self.drs]
            )
            closest_dp = self.drs[i]
            msg = wx.MessageBox(
                f"Do you want to remove the label {closest_dp.individual_names}:{closest_dp.bodyParts}?",
                "Remove!",
                wx.YES_NO | wx.ICON_WARNING,
            )
            if msg == 2:
                closest_dp.delete_data()
                self.buttonCounter[closest_dp.individual_names].remove(
                    closest_dp.bodyParts
                ) 
Example #15
Source File: numpy_like.py    From sdc with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def nanargmin(self):
    pass 
Example #16
Source File: getFilteredSkels.py    From tierpsy-tracker with MIT License 5 votes vote down vote up
def _h_getPerpContourInd(
        skeleton,
        skel_ind,
        contour_side1,
        contour_side2,
        contour_width):
    # get the closest point in the contour from a line perpedicular to the skeleton.
    #%%

    # get the slop of a line perpendicular to the keleton
    dR = skeleton[skel_ind + 1] - skeleton[skel_ind - 1]
    #m = dR[1]/dR[0]; M = -1/m
    a = -dR[0]
    b = +dR[1]

    c = b * skeleton[skel_ind, 1] - a * skeleton[skel_ind, 0]

    max_width_squared = np.max(contour_width)**2
    # modified from https://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line
    #a = M, b = -1

    # make sure we are not selecting a point that get traversed by a coiled
    # worm
    dist2cnt1 = np.sum((contour_side1 - skeleton[skel_ind])**2, axis=1)
    d1 = np.abs(a * contour_side1[:, 0] - b * contour_side1[:, 1] + c)
    d1[dist2cnt1 > max_width_squared] = np.nan

    dist2cnt2 = np.sum((contour_side2 - skeleton[skel_ind])**2, axis=1)
    d2 = np.abs(a * contour_side2[:, 0] - b * contour_side2[:, 1] + c)
    d2[dist2cnt2 > max_width_squared] = np.nan
    
    try:
        cnt1_ind = np.nanargmin(d1)
        cnt2_ind = np.nanargmin(d2)
    except ValueError:
        cnt1_ind = np.nan
        cnt2_ind = np.nan
    
    return cnt1_ind, cnt2_ind 
Example #17
Source File: test_nanfunctions.py    From predictive-maintenance-using-machine-learning with Apache License 2.0 5 votes vote down vote up
def test_nanargmin(self):
        tgt = np.argmin(self.mat)
        for mat in self.integer_arrays():
            assert_equal(np.nanargmin(mat), tgt) 
Example #18
Source File: test_nanfunctions.py    From ImageFusion with MIT License 5 votes vote down vote up
def test_nanargmin(self):
        tgt = np.argmin(self.mat)
        for mat in self.integer_arrays():
            assert_equal(np.nanargmin(mat), tgt) 
Example #19
Source File: test_sdc_numpy.py    From sdc with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def test_nanargmin(self):
        def ref_impl(a):
            return np.nanargmin(a)

        def sdc_impl(a):
            return numpy_like.nanargmin(a)

        sdc_func = self.jit(sdc_impl)

        cases = [[5, 2, 0, 333, -4], [3.3, 5.4, np.nan, 7.9, np.nan]]
        for case in cases:
            a = np.array(case)
            with self.subTest(data=case):
                np.testing.assert_array_equal(sdc_func(a), ref_impl(a)) 
Example #20
Source File: training.py    From caml-mimic with MIT License 5 votes vote down vote up
def early_stop(metrics_hist, criterion, patience):
    if not np.all(np.isnan(metrics_hist[criterion])):
        if len(metrics_hist[criterion]) >= patience:
            if criterion == 'loss_dev': 
                return np.nanargmin(metrics_hist[criterion]) < len(metrics_hist[criterion]) - patience
            else:
                return np.nanargmax(metrics_hist[criterion]) < len(metrics_hist[criterion]) - patience
    else:
        #keep training if criterion results have all been nan so far
        return False 
Example #21
Source File: detrender.py    From everest with MIT License 5 votes vote down vote up
def optimize_lambda(self, validation):
        '''
        Returns the index of :py:attr:`self.lambda_arr` that minimizes the
        validation scatter in the segment with minimum at the lowest value
        of :py:obj:`lambda`, with
        fractional tolerance :py:attr:`self.leps`.

        :param numpy.ndarray validation: The scatter in the validation set \
               as a function of :py:obj:`lambda`

        '''

        maxm = 0
        minr = len(validation)
        for n in range(validation.shape[1]):
            # The index that minimizes the scatter for this segment
            m = np.nanargmin(validation[:, n])
            if m > maxm:
                # The largest of the `m`s.
                maxm = m
            # The largest index with validation scatter within
            # `self.leps` of the minimum for this segment
            r = np.where((validation[:, n] - validation[m, n]) /
                         validation[m, n] <= self.leps)[0][-1]
            if r < minr:
                # The smallest of the `r`s
                minr = r
        return min(maxm, minr) 
Example #22
Source File: test_nanfunctions.py    From mxnet-lambda with Apache License 2.0 5 votes vote down vote up
def test_nanargmin(self):
        tgt = np.argmin(self.mat)
        for mat in self.integer_arrays():
            assert_equal(np.nanargmin(mat), tgt) 
Example #23
Source File: test_nanfunctions.py    From pySINDy with MIT License 5 votes vote down vote up
def test_nanargmin(self):
        tgt = np.argmin(self.mat)
        for mat in self.integer_arrays():
            assert_equal(np.nanargmin(mat), tgt) 
Example #24
Source File: test_interaction.py    From pySINDy with MIT License 5 votes vote down vote up
def test_nanfunctions_matrices_general():
    # Check that it works and that type and
    # shape are preserved
    # 2018-04-29: moved here from core.tests.test_nanfunctions
    mat = np.matrix(np.eye(3))
    for f in (np.nanargmin, np.nanargmax, np.nansum, np.nanprod,
              np.nanmean, np.nanvar, np.nanstd):
        res = f(mat, axis=0)
        assert_(isinstance(res, np.matrix))
        assert_(res.shape == (1, 3))
        res = f(mat, axis=1)
        assert_(isinstance(res, np.matrix))
        assert_(res.shape == (3, 1))
        res = f(mat)
        assert_(np.isscalar(res))

    for f in np.nancumsum, np.nancumprod:
        res = f(mat, axis=0)
        assert_(isinstance(res, np.matrix))
        assert_(res.shape == (3, 3))
        res = f(mat, axis=1)
        assert_(isinstance(res, np.matrix))
        assert_(res.shape == (3, 3))
        res = f(mat)
        assert_(isinstance(res, np.matrix))
        assert_(res.shape == (1, 3*3)) 
Example #25
Source File: Callbacks.py    From GroundedTranslation with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def early_stop_decision(self, epoch, val_metric, val_loss):
        '''
	Stop training if validation loss has stopped decreasing and
	validation BLEU score has not increased for --patience epochs.

        WARNING: quits with sys.exit(0).

	TODO: this doesn't yet support early stopping based on TER
        '''

	if val_loss < self.best_val_loss:
	    self.wait = 0
        elif val_metric > self.best_val_metric or self.args.no_early_stopping:
            self.wait = 0
        else:
            self.wait += 1
            if self.wait >= self.patience:
                # we have exceeded patience
                if val_loss > self.best_val_loss:
                    # and loss is no longer decreasing
                    logger.info("Epoch %d: early stopping", epoch)
                    handle = open("checkpoints/%s/summary"
                                  % self.args.run_string, "a")
                    handle.write("Early stopping because patience exceeded\n")
                    best_bleu = np.nanargmax(self.val_metric)
                    best_loss = np.nanargmin(self.val_loss)
                    logger.info("Best Metric: %d | val loss %.5f score %.2f",
                                best_bleu+1, self.val_loss[best_bleu],
                                self.val_metric[best_bleu])
                    logger.info("Best loss: %d | val loss %.5f score %.2f",
                                best_loss+1, self.val_loss[best_loss],
                                self.val_metric[best_loss])
                    handle.close()
                    sys.exit(0) 
Example #26
Source File: test_interaction.py    From predictive-maintenance-using-machine-learning with Apache License 2.0 5 votes vote down vote up
def test_nanfunctions_matrices_general():
    # Check that it works and that type and
    # shape are preserved
    # 2018-04-29: moved here from core.tests.test_nanfunctions
    mat = np.matrix(np.eye(3))
    for f in (np.nanargmin, np.nanargmax, np.nansum, np.nanprod,
              np.nanmean, np.nanvar, np.nanstd):
        res = f(mat, axis=0)
        assert_(isinstance(res, np.matrix))
        assert_(res.shape == (1, 3))
        res = f(mat, axis=1)
        assert_(isinstance(res, np.matrix))
        assert_(res.shape == (3, 1))
        res = f(mat)
        assert_(np.isscalar(res))

    for f in np.nancumsum, np.nancumprod:
        res = f(mat, axis=0)
        assert_(isinstance(res, np.matrix))
        assert_(res.shape == (3, 3))
        res = f(mat, axis=1)
        assert_(isinstance(res, np.matrix))
        assert_(res.shape == (3, 3))
        res = f(mat)
        assert_(isinstance(res, np.matrix))
        assert_(res.shape == (1, 3*3)) 
Example #27
Source File: numdiff_np.py    From estimagic with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _get_best_estimate_along_methods(derivatives, errors):
    """Extract best derivative estimate over different methods.

    Given that for each method, where one method can be for example central differences
    with two num_terms (see above), we have selected a single best derivative estimate,
    we select the best derivative estimates element-wise over different methods, where
    again best is defined as minimizing the approximation error.

    Args:
        derivatives (OrderedDict): Dictionary containing derivative estimates for
            different methods.
        errors (OrderedDict): Dictionary containing error estimates for derivates stored
            in ``derivatives``.

    Returns:
        jac_minimal (np.ndarray): The optimal derivative estimate over different
            methods.

    """
    errors = np.stack(list(errors.values()))
    derivatives = np.stack(list(derivatives.values()))

    if derivatives.shape[0] == 1:
        jac_minimal = np.squeeze(derivatives, axis=0)
    else:
        minimizer = np.nanargmin(errors, axis=0)

        jac_minimal = np.take_along_axis(derivatives, minimizer[np.newaxis, :], axis=0)
        jac_minimal = np.squeeze(jac_minimal, axis=0)

    return jac_minimal 
Example #28
Source File: numdiff_np.py    From estimagic with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _get_best_estimate_single_method(derivative, errors):
    """Select best derivative estimates element wise.

    Given a single method, e.g. central differences with 2 num_terms (see above), we get
    multiple Richardson approximations including estimated errors. Here we select the
    approximations which result in the lowest error element wise.

    Args:
        derivative (np.ndarray): Derivative estimates from Richardson approximation.
            First axis (axis 0) denotes the potentially multiple estimates. Following
            dimensions represent the dimension of the derivative, i.e. for a classical
            gradient ``derivative`` has 2 dimensions, while for a classical jacobian
            ``derivative`` has 3 dimensions.
        errors (np.ndarray): Error estimates of ``derivative`` estimates. Has the same
            shape as ``derivative``.

    Returns:
        derivative_minimal (np.ndarray): Best derivate estimates chosen with respect
            to minimizing ``errors``. Note that the best values are selected
            element-wise. Has shape ``(derivative.shape[1], derivative.shape[2])``.

        error_minimal (np.ndarray): Minimal errors selected element-wise along axis
            0 of ``errors``.

    """
    if derivative.shape[0] == 1:
        derivative_minimal = np.squeeze(derivative, axis=0)
        error_minimal = np.squeeze(errors, axis=0)
    else:

        minimizer = np.nanargmin(errors, axis=0)

        derivative_minimal = np.take_along_axis(
            derivative, minimizer[np.newaxis, :], axis=0
        )
        derivative_minimal = np.squeeze(derivative_minimal, axis=0)
        error_minimal = np.nanmin(errors, axis=0)

    return derivative_minimal, error_minimal 
Example #29
Source File: test_nanfunctions.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_nanargmin(self):
        tgt = np.argmin(self.mat)
        for mat in self.integer_arrays():
            assert_equal(np.nanargmin(mat), tgt) 
Example #30
Source File: test_interaction.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_nanfunctions_matrices_general():
    # Check that it works and that type and
    # shape are preserved
    # 2018-04-29: moved here from core.tests.test_nanfunctions
    mat = np.matrix(np.eye(3))
    for f in (np.nanargmin, np.nanargmax, np.nansum, np.nanprod,
              np.nanmean, np.nanvar, np.nanstd):
        res = f(mat, axis=0)
        assert_(isinstance(res, np.matrix))
        assert_(res.shape == (1, 3))
        res = f(mat, axis=1)
        assert_(isinstance(res, np.matrix))
        assert_(res.shape == (3, 1))
        res = f(mat)
        assert_(np.isscalar(res))

    for f in np.nancumsum, np.nancumprod:
        res = f(mat, axis=0)
        assert_(isinstance(res, np.matrix))
        assert_(res.shape == (3, 3))
        res = f(mat, axis=1)
        assert_(isinstance(res, np.matrix))
        assert_(res.shape == (3, 3))
        res = f(mat)
        assert_(isinstance(res, np.matrix))
        assert_(res.shape == (1, 3*3))