Python numpy.full_like() Examples

The following are 30 code examples of numpy.full_like(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: test_tracking.py    From pvlib-python with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_arrays_multi():
    apparent_zenith = np.array([[10, 10], [10, 10]])
    apparent_azimuth = np.array([[180, 180], [180, 180]])
    # singleaxis should fail for num dim > 1
    with pytest.raises(ValueError):
        tracking.singleaxis(apparent_zenith, apparent_azimuth,
                            axis_tilt=0, axis_azimuth=0,
                            max_angle=90, backtrack=True,
                            gcr=2.0/7.0)
    # uncomment if we ever get singleaxis to support num dim > 1 arrays
    # assert isinstance(tracker_data, dict)
    # expect = {'tracker_theta': np.full_like(apparent_zenith, 0),
    #           'aoi': np.full_like(apparent_zenith, 10),
    #           'surface_azimuth': np.full_like(apparent_zenith, 90),
    #           'surface_tilt': np.full_like(apparent_zenith, 0)}
    # for k, v in expect.items():
    #     assert_allclose(tracker_data[k], v) 
Example #2
Source File: test_data.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def test_power_transformer_nans(method):
    # Make sure lambda estimation is not influenced by NaN values
    # and that transform() supports NaN silently

    X = np.abs(X_1col)
    pt = PowerTransformer(method=method)
    pt.fit(X)
    lmbda_no_nans = pt.lambdas_[0]

    # concat nans at the end and check lambda stays the same
    X = np.concatenate([X, np.full_like(X, np.nan)])
    X = shuffle(X, random_state=0)

    pt.fit(X)
    lmbda_nans = pt.lambdas_[0]

    assert_almost_equal(lmbda_no_nans, lmbda_nans, decimal=5)

    X_trans = pt.transform(X)
    assert_array_equal(np.isnan(X_trans), np.isnan(X)) 
Example #3
Source File: categorize.py    From numcodecs with MIT License 6 votes vote down vote up
def decode(self, buf, out=None):

        # normalise input
        enc = ensure_ndarray(buf).view(self.astype)

        # flatten to simplify implementation
        enc = enc.reshape(-1, order='A')

        # setup output
        dec = np.full_like(enc, fill_value='', dtype=self.dtype)

        # apply decoding
        for i, l in enumerate(self.labels):
            dec[enc == (i + 1)] = l

        # handle output
        dec = ndarray_copy(dec, out)

        return dec 
Example #4
Source File: windowed_pass.py    From fastats with MIT License 6 votes vote down vote up
def windowed_pass_2d(x, win):
    """
    The same as windowed pass, but explicitly
    iterates over the `value()` return array
    and allocates it in the `result`.

    This allows 2-dimensional arrays to be returned
    from `value()` functions, before we support
    the behaviour properly using AST transforms.

    This allows for extremely fast iteration
    for items such as OLS, and at the same time
    calculating t-stats / r^2.
    """
    result = np.full_like(x, np.nan)
    for i in range(win, x.shape[0]+1):
        res = value(x[i-win:i])
        for j, j_val in enumerate(res):
            result[i-1, j] = j_val
    return result 
Example #5
Source File: test_stride_tricks.py    From Mastering-Elasticsearch-7.0 with MIT License 6 votes vote down vote up
def as_strided_writeable():
    arr = np.ones(10)
    view = as_strided(arr, writeable=False)
    assert_(not view.flags.writeable)

    # Check that writeable also is fine:
    view = as_strided(arr, writeable=True)
    assert_(view.flags.writeable)
    view[...] = 3
    assert_array_equal(arr, np.full_like(arr, 3))

    # Test that things do not break down for readonly:
    arr.flags.writeable = False
    view = as_strided(arr, writeable=False)
    view = as_strided(arr, writeable=True)
    assert_(not view.flags.writeable) 
Example #6
Source File: array_ops.py    From trax with Apache License 2.0 6 votes vote down vote up
def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None):  # pylint: disable=missing-docstring,redefined-outer-name
  """order, subok and shape arguments mustn't be changed."""
  if order != 'K':
    raise ValueError('Non-standard orders are not supported.')
  if not subok:
    raise ValueError('subok being False is not supported.')
  if shape:
    raise ValueError('Overriding the shape is not supported.')

  a = asarray(a).data
  dtype = dtype or utils.result_type(a)
  fill_value = asarray(fill_value, dtype=dtype)
  return arrays_lib.tensor_to_ndarray(
      tf.broadcast_to(fill_value.data, tf.shape(a)))


# TODO(wangpeng): investigate whether we can make `copy` default to False.
# TODO(wangpeng): utils.np_doc can't handle np.array because np.array is a
#   builtin function. Make utils.np_doc support builtin functions. 
Example #7
Source File: array_ops_test.py    From trax with Apache License 2.0 6 votes vote down vote up
def testFullLike(self):
    # List of 2-tuples of fill value and shape.
    data = [
        (5, ()),
        (5, (7,)),
        (5., (7,)),
        ([5, 8], (2,)),
        ([5, 8], (3, 2)),
        ([[5], [8]], (2, 3)),
        ([[5], [8]], (3, 2, 5)),
        ([[5.], [8.]], (3, 2, 5)),
    ]
    zeros_builders = [array_ops.zeros, np.zeros]
    for f, s in data:
      for fn1, fn2, arr_dtype in itertools.product(
          self.array_transforms, zeros_builders, self.all_types):
        fill_value = fn1(f)
        arr = fn2(s, arr_dtype)
        self.match(
            array_ops.full_like(arr, fill_value), np.full_like(arr, fill_value))
        for dtype in self.all_types:
          self.match(
              array_ops.full_like(arr, fill_value, dtype=dtype),
              np.full_like(arr, fill_value, dtype=dtype)) 
Example #8
Source File: gridfinder.py    From gridfinder with MIT License 6 votes vote down vote up
def estimate_mem_use(targets, costs):
    """Estimate memory usage in GB, probably not very accurate.

    Parameters
    ----------
    targets : numpy array
        2D array of targets.
    costs : numpy array
        2D array of costs.

    Returns
    -------
    est_mem : float
        Estimated memory requirement in GB.
    """

    # make sure these match the ones used in optimise below
    visited = np.zeros_like(targets, dtype=np.int8)
    dist = np.full_like(costs, np.nan, dtype=np.float32)
    prev = np.full_like(costs, np.nan, dtype=object)

    est_mem_arr = [targets, costs, visited, dist, prev]
    est_mem = len(pickle.dumps(est_mem_arr, -1))

    return est_mem / 1e9 
Example #9
Source File: sandbox_scene.py    From kite with GNU General Public License v3.0 6 votes vote down vote up
def setLOS(self, phi, theta):
        """Set the sandbox's LOS vector

        :param phi: phi in degree
        :type phi: int
        :param theta: theta in degree
        :type theta: int
        """
        if self.reference is not None:
            self._log.warning('Cannot change a referenced model!')
            return

        self._log.debug(
            'Changing model LOS to %d phi and %d theta', phi, theta)

        self.theta = num.full_like(self.theta, theta*r2d)
        self.phi = num.full_like(self.phi, phi*r2d)
        self.frame.updateExtent()

        self._clearModel()
        self.evChanged.notify() 
Example #10
Source File: test_stride_tricks.py    From lambda-packs with MIT License 6 votes vote down vote up
def as_strided_writeable():
    arr = np.ones(10)
    view = as_strided(arr, writeable=False)
    assert_(not view.flags.writeable)

    # Check that writeable also is fine:
    view = as_strided(arr, writeable=True)
    assert_(view.flags.writeable)
    view[...] = 3
    assert_array_equal(arr, np.full_like(arr, 3))

    # Test that things do not break down for readonly:
    arr.flags.writeable = False
    view = as_strided(arr, writeable=False)
    view = as_strided(arr, writeable=True)
    assert_(not view.flags.writeable) 
Example #11
Source File: train_confusion.py    From glc with Apache License 2.0 6 votes vote down vote up
def get_C_hat_transpose():
    probs = []
    net.eval()
    for batch_idx, (data, target) in enumerate(train_gold_deterministic_loader):
        # we subtract 10 because we added 10 to gold so we could identify which example is gold in train_phase2
        data, target = torch.autograd.Variable(data.cuda(), volatile=True),\
                       torch.autograd.Variable((target - num_classes).cuda(), volatile=True)

        # forward
        output = net(data)
        pred = F.softmax(output)
        probs.extend(list(pred.data.cpu().numpy()))

    probs = np.array(probs, dtype=np.float32)
    preds = np.argmax(probs, axis=1)
    C_hat = np.zeros([num_classes, num_classes])
    for i in range(len(train_data_gold.train_labels)):
        C_hat[int(np.rint(train_data_gold.train_labels[i] - num_classes)), preds[i]] += 1

    C_hat /= (np.sum(C_hat, axis=1, keepdims=True) + 1e-7)
    C_hat = C_hat * 0.99 + np.full_like(C_hat, 1/num_classes) * 0.01  # smoothing

    return C_hat.T.astype(np.float32) 
Example #12
Source File: test_stride_tricks.py    From vnpy_crypto with MIT License 6 votes vote down vote up
def as_strided_writeable():
    arr = np.ones(10)
    view = as_strided(arr, writeable=False)
    assert_(not view.flags.writeable)

    # Check that writeable also is fine:
    view = as_strided(arr, writeable=True)
    assert_(view.flags.writeable)
    view[...] = 3
    assert_array_equal(arr, np.full_like(arr, 3))

    # Test that things do not break down for readonly:
    arr.flags.writeable = False
    view = as_strided(arr, writeable=False)
    view = as_strided(arr, writeable=True)
    assert_(not view.flags.writeable) 
Example #13
Source File: _constraints.py    From ip-nonlinear-solver with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _is_feasible(kind, enforce_feasibility, f0):
    keyword = kind[0]
    if keyword == "equals":
        lb = np.asarray(kind[1], dtype=float)
        ub = np.asarray(kind[1], dtype=float)
    elif keyword == "greater":
        lb = np.asarray(kind[1], dtype=float)
        ub = np.full_like(lb, np.inf, dtype=float)
    elif keyword == "less":
        ub = np.asarray(kind[1], dtype=float)
        lb = np.full_like(ub, -np.inf, dtype=float)
    elif keyword == "interval":
        lb = np.asarray(kind[1], dtype=float)
        ub = np.asarray(kind[2], dtype=float)
    else:
        raise RuntimeError("Never be here.")

    return ((lb[enforce_feasibility] <= f0[enforce_feasibility]).all()
            and (f0[enforce_feasibility] <= ub[enforce_feasibility]).all()) 
Example #14
Source File: test_stride_tricks.py    From recruit with Apache License 2.0 6 votes vote down vote up
def as_strided_writeable():
    arr = np.ones(10)
    view = as_strided(arr, writeable=False)
    assert_(not view.flags.writeable)

    # Check that writeable also is fine:
    view = as_strided(arr, writeable=True)
    assert_(view.flags.writeable)
    view[...] = 3
    assert_array_equal(arr, np.full_like(arr, 3))

    # Test that things do not break down for readonly:
    arr.flags.writeable = False
    view = as_strided(arr, writeable=False)
    view = as_strided(arr, writeable=True)
    assert_(not view.flags.writeable) 
Example #15
Source File: test_stride_tricks.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def as_strided_writeable():
    arr = np.ones(10)
    view = as_strided(arr, writeable=False)
    assert_(not view.flags.writeable)

    # Check that writeable also is fine:
    view = as_strided(arr, writeable=True)
    assert_(view.flags.writeable)
    view[...] = 3
    assert_array_equal(arr, np.full_like(arr, 3))

    # Test that things do not break down for readonly:
    arr.flags.writeable = False
    view = as_strided(arr, writeable=False)
    view = as_strided(arr, writeable=True)
    assert_(not view.flags.writeable) 
Example #16
Source File: test_distance.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_old_wminkowski(self):
        with suppress_warnings() as wrn:
            wrn.filter(message="`wminkowski` is deprecated")
            w = np.array([1.0, 2.0, 0.5])
            for x, y in self.cases:
                dist1 = old_wminkowski(x, y, p=1, w=w)
                assert_almost_equal(dist1, 3.0)
                dist1p5 = old_wminkowski(x, y, p=1.5, w=w)
                assert_almost_equal(dist1p5, (2.0**1.5+1.0)**(2./3))
                dist2 = old_wminkowski(x, y, p=2, w=w)
                assert_almost_equal(dist2, np.sqrt(5))

            # test weights Issue #7893
            arr = np.arange(4)
            w = np.full_like(arr, 4)
            assert_almost_equal(old_wminkowski(arr, arr + 1, p=2, w=w), 8.0)
            assert_almost_equal(wminkowski(arr, arr + 1, p=2, w=w), 4.0) 
Example #17
Source File: art3d.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def set_3d_properties(self, zs=0, zdir='z'):
        xs = self.get_xdata()
        ys = self.get_ydata()

        try:
            # If *zs* is a list or array, then this will fail and
            # just proceed to juggle_axes().
            zs = np.full_like(xs, fill_value=float(zs))
        except TypeError:
            pass
        self._verts3d = juggle_axes(xs, ys, zs, zdir)
        self.stale = True 
Example #18
Source File: test_link.py    From chainer with MIT License 5 votes vote down vote up
def _setup_test_copyparams(self):
        self.link.x.grad.fill(0)
        self.link.y.grad.fill(1)
        self.link.u.initialize((2, 3))
        self.link.u.data.fill(0)
        self.link.u.grad.fill(1)
        self.link.v.cleargrad()
        gx = self.link.x.grad.copy()
        gy = self.link.y.grad.copy()
        gu = self.link.u.grad.copy()

        l = chainer.Link()
        with l.init_scope():
            l.x = chainer.Parameter(shape=(2, 3))
            l.y = chainer.Parameter(shape=2)
            l.u = chainer.Parameter(shape=(2, 3))
            l.v = chainer.Parameter(shape=(3, 2))
        l.x.data.fill(2)
        l.x.grad.fill(3)
        l.y.data.fill(4)
        l.y.grad.fill(5)
        l.u.data.fill(6)
        l.u.grad.fill(7)
        l.v.data.fill(8)
        l.v.grad.fill(9)
        l.add_persistent('p', numpy.full_like(self.link.p, 10))

        return l, (gx, gy, gu) 
Example #19
Source File: test_pooling.py    From chainer with MIT License 5 votes vote down vote up
def forward_chainer(self, inputs):
        x, = inputs
        y = chainer.functions.max_pooling_nd(
            x, ksize=self.ksize, stride=self.stride, pad=self.pad,
            cover_all=self.cover_all)
        # Convert -inf to finite numbers.
        y = chainer.functions.maximum(y, numpy.full_like(y.array, -1e4))
        return y, 
Example #20
Source File: test_tracking.py    From pvlib-python with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_horizon_tilted():
    # GH 569
    solar_azimuth = np.array([0, 180, 359])
    solar_zenith = np.full_like(solar_azimuth, 45)
    solar_azimuth = pd.Series(solar_azimuth)
    solar_zenith = pd.Series(solar_zenith)
    out = tracking.singleaxis(solar_zenith, solar_azimuth, axis_tilt=90,
                              axis_azimuth=180, backtrack=False, max_angle=180)
    expected = pd.DataFrame(np.array(
        [[ 180.,  45.,   0.,  90.],
         [   0.,  45., 180.,  90.],
         [ 179.,  45., 359.,  90.]]),
        columns=['tracker_theta', 'aoi', 'surface_azimuth', 'surface_tilt'])
    assert_frame_equal(out, expected) 
Example #21
Source File: photometric.py    From mmcv with Apache License 2.0 5 votes vote down vote up
def iminvert(img):
    """Invert (negate) an image.

    Args:
        img (ndarray): Image to be inverted.

    Returns:
        ndarray: The inverted image.
    """
    return np.full_like(img, 255) - img 
Example #22
Source File: orthogonal.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def roots_chebyt(n, mu=False):
    r"""Gauss-Chebyshev (first kind) quadrature.

    Computes the sample points and weights for Gauss-Chebyshev quadrature.
    The sample points are the roots of the n-th degree Chebyshev polynomial of
    the first kind, :math:`T_n(x)`.  These sample points and weights correctly
    integrate polynomials of degree :math:`2n - 1` or less over the interval
    :math:`[-1, 1]` with weight function :math:`f(x) = 1/\sqrt{1 - x^2}`.

    Parameters
    ----------
    n : int
        quadrature order
    mu : bool, optional
        If True, return the sum of the weights, optional.

    Returns
    -------
    x : ndarray
        Sample points
    w : ndarray
        Weights
    mu : float
        Sum of the weights

    See Also
    --------
    scipy.integrate.quadrature
    scipy.integrate.fixed_quad
    numpy.polynomial.chebyshev.chebgauss
    """
    m = int(n)
    if n < 1 or n != m:
        raise ValueError('n must be a positive integer.')
    x = _ufuncs._sinpi(np.arange(-m + 1, m, 2) / (2*m))
    w = np.full_like(x, pi/m)
    if mu:
        return x, w, pi
    else:
        return x, w 
Example #23
Source File: array_ops.py    From trax with Apache License 2.0 5 votes vote down vote up
def full(shape, fill_value, dtype=None):  # pylint: disable=redefined-outer-name
  """Returns an array with given shape and dtype filled with `fill_value`.

  Args:
    shape: A valid shape object. Could be a native python object or an object
       of type ndarray, numpy.ndarray or tf.TensorShape.
    fill_value: array_like. Could be an ndarray, a Tensor or any object that can
      be converted to a Tensor using `tf.convert_to_tensor`.
    dtype: Optional, defaults to dtype of the `fill_value`. The type of the
      resulting ndarray. Could be a python type, a NumPy type or a TensorFlow
      `DType`.

  Returns:
    An ndarray.

  Raises:
    ValueError: if `fill_value` can not be broadcast to shape `shape`.
  """
  fill_value = asarray(fill_value, dtype=dtype)
  if utils.isscalar(shape):
    shape = tf.reshape(shape, [1])
  return arrays_lib.tensor_to_ndarray(tf.broadcast_to(fill_value.data, shape))


# Using doc only here since np full_like signature doesn't seem to have the
# shape argument (even though it exists in the documentation online). 
Example #24
Source File: test_classification.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def test_balanced_accuracy_score(y_true, y_pred):
    macro_recall = recall_score(y_true, y_pred, average='macro',
                                labels=np.unique(y_true))
    with ignore_warnings():
        # Warnings are tested in test_balanced_accuracy_score_unseen
        balanced = balanced_accuracy_score(y_true, y_pred)
    assert balanced == pytest.approx(macro_recall)
    adjusted = balanced_accuracy_score(y_true, y_pred, adjusted=True)
    chance = balanced_accuracy_score(y_true, np.full_like(y_true, y_true[0]))
    assert adjusted == (balanced - chance) / (1 - chance) 
Example #25
Source File: test_loss.py    From Mastering-Elasticsearch-7.0 with MIT License 5 votes vote down vote up
def get_derivatives_helper(loss):
    """Return get_gradients() and get_hessians() functions for a given loss.
    """

    def get_gradients(y_true, raw_predictions):
        # create gradients and hessians array, update inplace, and return
        gradients = np.empty_like(raw_predictions, dtype=G_H_DTYPE)
        hessians = np.empty_like(raw_predictions, dtype=G_H_DTYPE)
        loss.update_gradients_and_hessians(gradients, hessians, y_true,
                                           raw_predictions)
        return gradients

    def get_hessians(y_true, raw_predictions):
        # create gradients and hessians array, update inplace, and return
        gradients = np.empty_like(raw_predictions, dtype=G_H_DTYPE)
        hessians = np.empty_like(raw_predictions, dtype=G_H_DTYPE)
        loss.update_gradients_and_hessians(gradients, hessians, y_true,
                                           raw_predictions)

        if loss.__class__.__name__ == 'LeastSquares':
            # hessians aren't updated because they're constant:
            # the value is 1 because the loss is actually an half
            # least squares loss.
            hessians = np.full_like(raw_predictions, fill_value=1)

        return hessians

    return get_gradients, get_hessians 
Example #26
Source File: test_numeric.py    From Computable with MIT License 5 votes vote down vote up
def test_filled_like(self):
        self.check_like_function(np.full_like, 0, True)
        self.check_like_function(np.full_like, 1, True)
        self.check_like_function(np.full_like, 1000, True)
        self.check_like_function(np.full_like, 123.456, True)
        self.check_like_function(np.full_like, np.inf, True) 
Example #27
Source File: usage_nsga2_custom.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def _do(self, problem, X, **kwargs):

        # The input of has the following shape (n_parents, n_matings, n_var)
        _, n_matings, n_var = X.shape

        # The output owith the shape (n_offsprings, n_matings, n_var)
        # Because there the number of parents and offsprings are equal it keeps the shape of X
        Y = np.full_like(X, None, dtype=np.object)

        # for each mating provided
        for k in range(n_matings):

            # get the first and the second parent
            a, b = X[0, k, 0], X[1, k, 0]

            # prepare the offsprings
            off_a = ["_"] * problem.n_characters
            off_b = ["_"] * problem.n_characters

            for i in range(problem.n_characters):
                if np.random.random() < 0.5:
                    off_a[i] = a[i]
                    off_b[i] = b[i]
                else:
                    off_a[i] = b[i]
                    off_b[i] = a[i]

            # join the character list and set the output
            Y[0, k, 0], Y[1, k, 0] = "".join(off_a), "".join(off_b)

        return Y 
Example #28
Source File: test_bounds.py    From fragile with MIT License 5 votes vote down vote up
def test_safe_margin(self, bounds_fixture: Bounds):
        new_bounds = bounds_fixture.safe_margin()
        assert numpy.allclose(new_bounds.low, bounds_fixture.low)
        assert numpy.allclose(new_bounds.high, bounds_fixture.high)
        low = numpy.full_like(bounds_fixture.low, -10)
        new_bounds = bounds_fixture.safe_margin(low=low)
        assert numpy.allclose(new_bounds.high, bounds_fixture.high)
        assert numpy.allclose(new_bounds.low, low)
        new_bounds = bounds_fixture.safe_margin(low=low, scale=2)
        assert numpy.allclose(new_bounds.high, bounds_fixture.high * 2)
        assert numpy.allclose(new_bounds.low, low * 2) 
Example #29
Source File: test_bounds.py    From fragile with MIT License 5 votes vote down vote up
def test_points_in_bounds(self, bounds_fixture):
        zeros = numpy.zeros((3, 3))
        assert all(bounds_fixture.points_in_bounds(zeros))
        tens = numpy.full_like(zeros, 10)
        assert not any(bounds_fixture.points_in_bounds(tens))
        tens = numpy.array([[-10, 0, 1], [0, 0, 0], [10, 10, 10]])
        assert sum(bounds_fixture.points_in_bounds(tens)) == 1 
Example #30
Source File: SOMClustering.py    From susi with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def modify_weight_matrix_batch(self, som_array, dist_weight_matrix, data):
        """Modify weight matrix of the SOM for the online algorithm.

        Parameters
        ----------
        som_array : np.array
            Weight vectors of the SOM
            shape = (self.n_rows, self.n_columns, X.shape[1])
        dist_weight_matrix : np.array of float
            Current distance weight of the SOM for the specific node
        data : np.array, optional
            True vector(s)
        learningrate : float
            Current learning rate of the SOM

        Returns
        -------
        np.array
            Weight vector of the SOM after the modification

        """
        # calculate numerator and divisor for the batch formula
        numerator = np.sum(
            [np.multiply(data[i], dist_weight_matrix[i].reshape(
                (self.n_rows, self.n_columns, 1)))
                for i in range(len(data))], axis=0)
        divisor = np.sum(dist_weight_matrix, axis=0).reshape(
            (self.n_rows, self.n_columns, 1))

        # update weights
        old_som = np.copy(som_array)
        new_som = np.divide(
            numerator,
            divisor,
            out=np.full_like(numerator, np.nan),
            where=(divisor != 0))

        # overwrite new nans with old entries
        new_som[np.isnan(new_som)] = old_som[np.isnan(new_som)]
        return new_som