Python numba.prange() Examples

The following are 30 code examples of numba.prange(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numba , or try the search function .
Example #1
Source File: pynndescent_.py    From pynndescent with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def generate_leaf_updates(leaf_block, dist_thresholds, data, dist):

    updates = [[(-1, -1, np.inf)] for i in range(leaf_block.shape[0])]

    for n in numba.prange(leaf_block.shape[0]):
        for i in range(leaf_block.shape[1]):
            p = leaf_block[n, i]
            if p < 0:
                break

            for j in range(i + 1, leaf_block.shape[1]):
                q = leaf_block[n, j]
                if q < 0:
                    break

                d = dist(data[p], data[q])
                if d < dist_thresholds[p] or d < dist_thresholds[q]:
                    updates[n].append((p, q, d))

    return updates 
Example #2
Source File: item_knn.py    From lkpy with MIT License 6 votes vote down vote up
def _scipy_sim_blocks(trmat, blocks, ptrs, min_sim, max_nbrs):
    "Compute the similarity matrix with blocked SciPy calls"
    nitems, nusers = trmat.shape
    nblocks = len(blocks)

    null = _empty_csr(1, 1, np.zeros(1, dtype=np.int32))
    res = [null for i in range(nblocks)]

    for bi in prange(nblocks):
        b = blocks[bi]
        bs, be = ptrs[bi]
        bres = _scipy_sim_block(b, bs, be, trmat, min_sim, max_nbrs, nitems)
        res[bi] = bres
        assert bres.nrows == be - bs

    return res 
Example #3
Source File: _Spread_numba.py    From pylops with GNU Lesser General Public License v3.0 6 votes vote down vote up
def _rmatvec_numba_onthefly(x, y, dims, dimsd, interp, fh):
    """numba implementation of adjoint mode with on-the-fly computations.
    See official documentation for description of variables
    """
    dim0, dim1 = dims
    x = x.reshape(dimsd)
    for ix0 in prange(dim0):
        for it in range(dim1):
            if interp:
                indices, dindices = fh(ix0, it)
            else:
                indices, dindices = fh(ix0, it)
            for i, indexfloat in enumerate(indices):
                index = int(indexfloat)
                if index != -9223372036854775808: # =int(np.nan)
                    if not interp:
                        y[ix0, it] += x[i, index]
                    else:
                        y[ix0, it] += x[i, index]*(1 - dindices[i]) + \
                                      x[i, index + 1]*dindices[i]
    return y.ravel() 
Example #4
Source File: _Spread_numba.py    From pylops with GNU Lesser General Public License v3.0 6 votes vote down vote up
def _rmatvec_numba_table(x, y, dims, dimsd, interp, table, dtable):
    """numba implementation of adjoint mode with table.
    See official documentation for description of variables
    """
    dim0, dim1 = dims
    x = x.reshape(dimsd)
    for ix0 in prange(dim0):
        for it in range(dim1):
            indices = table[ix0, it]
            if interp:
                dindices = dtable[ix0, it]

            for i, indexfloat in enumerate(indices):
                index = int(indexfloat)
                if index != -9223372036854775808: # =int(np.nan)
                    if not interp:
                        y[ix0, it] += x[i, index]
                    else:
                        y[ix0, it] += x[i, index]*(1 - dindices[i]) + \
                                      x[i, index + 1]*dindices[i]
    return y.ravel() 
Example #5
Source File: dsm_generation.py    From buzzard with Apache License 2.0 6 votes vote down vote up
def _diamond_step(DS_array, step_size, roughness):
    half_step = step_size // 2

    for i in nb.prange(0, DS_array.shape[0] // step_size):
        i = i * step_size + half_step
        for j in nb.prange(0, DS_array.shape[0] // step_size):
            j = j * step_size + half_step

            if DS_array[i,j] == -1.0:
                ul = DS_array[i - half_step, j - half_step]
                ur = DS_array[i - half_step, j + half_step]
                ll = DS_array[i + half_step, j - half_step]
                lr = DS_array[i + half_step, j + half_step]

                ave = (ul + ur + ll + lr) / 4.0
                rand_val = random.uniform(0, 1)
                DS_array[i, j] = roughness * rand_val + (1.0 - roughness) * ave 
Example #6
Source File: gradient_boosting.py    From pygbm with MIT License 6 votes vote down vote up
def _update_raw_predictions(leaves_data, raw_predictions):
    """Update raw_predictions by reading the predictions of the ith tree
    directly form the leaves.

    Can only be used for predicting the training data. raw_predictions
    contains the sum of the tree values from iteration 0 to i - 1. This adds
    the predictions of the ith tree to raw_predictions.

    Parameters
    ----------
    leaves_data: list of tuples (leaf.value, leaf.sample_indices)
        The leaves data used to update raw_predictions.
    raw_predictions : array-like, shape=(n_samples,)
        The raw predictions for the training data.
    """
    for leaf_idx in prange(len(leaves_data)):
        leaf_value, sample_indices = leaves_data[leaf_idx]
        for sample_idx in sample_indices:
            raw_predictions[sample_idx] += leaf_value 
Example #7
Source File: vector.py    From verde with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def jacobian_2d_numba(east, north, force_east, force_north, mindist, poisson, jac):
    "Calculate the Jacobian matrix using numba to speed things up."
    nforces = force_east.size
    npoints = east.size
    for i in numba.prange(npoints):  # pylint: disable=not-an-iterable
        for j in range(nforces):
            green_ee, green_nn, green_ne = GREENS_FUNC_2D_JIT(
                east[i] - force_east[j], north[i] - force_north[j], mindist, poisson
            )
            jac[i, j] = green_ee
            jac[i + npoints, j + nforces] = green_nn
            jac[i, j + nforces] = green_ne
            jac[i + npoints, j] = green_ne  # J is symmetric
    return jac


# JIT compile the Greens functions for use in numba functions 
Example #8
Source File: loss.py    From pygbm with MIT License 6 votes vote down vote up
def _update_gradients_hessians_binary_crossentropy(gradients, hessians,
                                                   y_true, raw_predictions):
    # Note: using LightGBM version (first mapping {0, 1} into {-1, 1})
    # will cause overflow issues in the exponential as we're using float32
    # precision.

    # shape (n_samples, 1) --> (n_samples,). reshape(-1) is more likely to
    # return a view.
    raw_predictions = raw_predictions.reshape(-1)
    n_samples = raw_predictions.shape[0]
    starts, ends, n_threads = get_threads_chunks(total_size=n_samples)
    for thread_idx in prange(n_threads):
        for i in range(starts[thread_idx], ends[thread_idx]):
            gradients[i] = _expit(raw_predictions[i]) - y_true[i]
            gradient_abs = np.abs(gradients[i])
            hessians[i] = gradient_abs * (1. - gradient_abs) 
Example #9
Source File: basic_workflow_parallel.py    From sdc with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def get_analyzed_data():
    df = pd.read_csv(FNAME)
    s_bonus = pd.Series(df['Bonus %'])
    s_first_name = pd.Series(df['First Name'])

    # Use explicit loop to compute the mean. It will be compiled as parallel loop
    m = 0.0
    for i in prange(s_bonus.size):
        m += s_bonus.values[i]
    m /= s_bonus.size

    names = s_first_name.sort_values()
    return m, names


# Printing names and their average bonus percent 
Example #10
Source File: sod.py    From pyod with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def _snn_imp(ind, ref_set_):
    """Internal function for fast snn calculation

    Parameters
    ----------
    ind : int
        Indices return by kNN.

    ref_set_ : int, optional (default=10)
        specifies the number of shared nearest neighbors to create the
        reference set. Note that ref_set must be smaller than n_neighbors.

    """
    n = ind.shape[0]
    _count = np.zeros(shape=(n, ref_set_), dtype=np.uint32)
    for i in nb.prange(n):
        temp = np.empty(n, dtype=np.uint32)
        test_element_set = set(ind[i])
        for j in nb.prange(n):
            temp[j] = len(set(ind[j]).intersection(test_element_set))
        temp[i] = np.iinfo(np.uint32).max
        _count[i] = np.argsort(temp)[::-1][1:ref_set_ + 1]

    return _count 
Example #11
Source File: scaling.py    From fastats with MIT License 6 votes vote down vote up
def min_max_parallel(A):
    """
    Standardise data by scaling data points by the sample minimum and maximum
    such that all data points lie in the range 0 to 1, equivalent to sklearn
    MinMaxScaler.

    Uses explicit parallel loop; may offer improved performance in some
    cases.
    """
    assert A.ndim > 1

    n = A.shape[1]
    res = empty_like(A, dtype=np_float64)

    for i in prange(n):
        data_i = A[:, i]
        data_min = np_min(data_i)
        res[:, i] = (data_i - data_min) / (np_max(data_i) - data_min)

    return res 
Example #12
Source File: reproduce_experiments_additional.py    From rocket with GNU General Public License v3.0 6 votes vote down vote up
def apply_kernels_jagged(X, kernels, input_lengths):

    weights, lengths, biases, dilations, paddings = kernels

    num_examples = len(X)
    num_kernels = len(weights)

    # initialise output
    _X = np.zeros((num_examples, num_kernels * 2)) # 2 features per kernel

    for i in prange(num_examples):

        for j in range(num_kernels):

            # skip incompatible kernels (effective length is "too big" without padding)
            if (input_lengths[i] + (2 * paddings[j])) > ((lengths[j] - 1) * dilations[j]):

                _X[i, (j * 2):((j * 2) + 2)] = \
                apply_kernel(X[i][:input_lengths[i]], weights[j][:lengths[j]], lengths[j], biases[j], dilations[j], paddings[j])

    return _X

# == additional convenience function =========================================== 
Example #13
Source File: rocket_functions.py    From rocket with GNU General Public License v3.0 6 votes vote down vote up
def apply_kernels(X, kernels):

    weights, lengths, biases, dilations, paddings = kernels

    num_examples = len(X)
    num_kernels = len(weights)

    # initialise output
    _X = np.zeros((num_examples, num_kernels * 2)) # 2 features per kernel

    for i in prange(num_examples):

        for j in range(num_kernels):

            _X[i, (j * 2):((j * 2) + 2)] = \
            apply_kernel(X[i], weights[j][:lengths[j]], lengths[j], biases[j], dilations[j], paddings[j])

    return _X 
Example #14
Source File: utils.py    From umap with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def fast_knn_indices(X, n_neighbors):
    """A fast computation of knn indices.

    Parameters
    ----------
    X: array of shape (n_samples, n_features)
        The input data to compute the k-neighbor indices of.

    n_neighbors: int
        The number of nearest neighbors to compute for each sample in ``X``.

    Returns
    -------
    knn_indices: array of shape (n_samples, n_neighbors)
        The indices on the ``n_neighbors`` closest points in the dataset.
    """
    knn_indices = np.empty((X.shape[0], n_neighbors), dtype=np.int32)
    for row in numba.prange(X.shape[0]):
        # v = np.argsort(X[row])  # Need to call argsort this way for numba
        v = X[row].argsort(kind="quicksort")
        v = v[:n_neighbors]
        knn_indices[row] = v
    return knn_indices 
Example #15
Source File: nndescent.py    From umap with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def initialized_nnd_search(data, indptr, indices, initialization, query_points, dist):
    for i in numba.prange(query_points.shape[0]):

        tried = set(initialization[0, i])

        while True:

            # Find smallest flagged vertex
            vertex = smallest_flagged(initialization, i)

            if vertex == -1:
                break
            candidates = indices[indptr[vertex] : indptr[vertex + 1]]
            for j in range(candidates.shape[0]):
                if (
                    candidates[j] == vertex
                    or candidates[j] == -1
                    or candidates[j] in tried
                ):
                    continue
                d = dist(data[candidates[j]], query_points[i])
                unchecked_heap_push(initialization, i, d, candidates[j], 1)
                tried.add(candidates[j])

    return initialization 
Example #16
Source File: numba_tools.py    From tridesclous with MIT License 6 votes vote down vote up
def peak_loop_minus(sigs, sig_center, mask_peaks, n_span, thresh, peak_sign, neighbours):
    for chan in prange(sig_center.shape[1]):
        for s in range(mask_peaks.shape[0]):
            if not mask_peaks[s, chan]:
                continue
            for neighbour in neighbours[chan, :]:
                if neighbour<0:
                    continue
                for i in range(n_span):
                    if chan != neighbour:
                        mask_peaks[s, chan] &= sig_center[s, chan] <= sig_center[s, neighbour]
                    mask_peaks[s, chan] &= sig_center[s, chan] < sigs[s+i, neighbour]
                    mask_peaks[s, chan] &= sig_center[s, chan]<=sigs[n_span+s+i+1, neighbour]                        
                    if not mask_peaks[s, chan]:
                        break
                if not mask_peaks[s, chan]:
                    break
    return mask_peaks 
Example #17
Source File: numba_tools.py    From tridesclous with MIT License 6 votes vote down vote up
def peak_loop_plus(sigs, sig_center, mask_peaks, n_span, thresh, peak_sign, neighbours):
    for chan in prange(sig_center.shape[1]):
        for s in range(mask_peaks.shape[0]):
            if not mask_peaks[s, chan]:
                continue
            for neighbour in neighbours[chan, :]:
                if neighbour<0:
                    continue
                for i in range(n_span):
                    if chan != neighbour:
                        mask_peaks[s, chan] &= sig_center[s, chan] >= sig_center[s, neighbour]
                    mask_peaks[s, chan] &= sig_center[s, chan] > sigs[s+i, neighbour]
                    mask_peaks[s, chan] &= sig_center[s, chan]>=sigs[n_span+s+i+1, neighbour]
                    if not mask_peaks[s, chan]:
                        break
                if not mask_peaks[s, chan]:
                    break
    return mask_peaks 
Example #18
Source File: numba_tools.py    From tridesclous with MIT License 6 votes vote down vote up
def numba_explore_shifts(long_waveform, one_center,  one_mask, maximum_jitter_shift):
    width, nb_chan = one_center.shape
    n = maximum_jitter_shift*2 +1
    
    all_dist = np.zeros((n, ), dtype=np.float32)
    
    for shift in prange(n):
        sum = 0
        for c in range(nb_chan):
            if one_mask[c]:
                for s in range(width):
                    d = long_waveform[shift+s, c] - one_center[s, c]
                    sum += d*d
        all_dist[shift] = sum
    
    return all_dist 
Example #19
Source File: numba_tools.py    From tridesclous with MIT License 6 votes vote down vote up
def numba_loop_sparse_dist_with_geometry(waveform, centers,  possibles_cluster_idx, rms_waveform_channel,channel_adjacency):
    
    nb_total_clus, width, nb_chan = centers.shape
    nb_clus = possibles_cluster_idx.size
    
    #rms_waveform_channel = np.sum(waveform**2, axis=0)#.astype('float32')
    waveform_distance = np.zeros((nb_clus,), dtype=np.float32)
    
    for clus in prange(len(possibles_cluster_idx)):
        cluster_idx = possibles_cluster_idx[clus]
        sum = 0
        for c in channel_adjacency:
            #~ if mask[cluster_idx, c]:
            for s in range(width):
                d = waveform[s, c] - centers[cluster_idx, s, c]
                sum += d*d
            #~ else:
                #~ sum +=rms_waveform_channel[c]
        waveform_distance[clus] = sum
    
    return waveform_distance 
Example #20
Source File: metrics.py    From tslearn with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def njit_lb_envelope(time_series, radius):
    sz, d = time_series.shape
    enveloppe_up = numpy.empty((sz, d))
    enveloppe_down = numpy.empty((sz, d))

    for i in prange(sz):
        min_idx = i - radius
        max_idx = i + radius + 1
        if min_idx < 0:
            min_idx = 0
        if max_idx > sz:
            max_idx = sz
        for di in prange(d):
            enveloppe_down[i, di] = numpy.min(time_series[min_idx:max_idx, di])
            enveloppe_up[i, di] = numpy.max(time_series[min_idx:max_idx, di])

    return enveloppe_down, enveloppe_up 
Example #21
Source File: shortlist.py    From pyxclib with MIT License 6 votes vote down vote up
def map_neighbors(indices, similarity, labels, top_k, pad_ind, pad_val):
    m = indices.shape[0]
    point_labels = np.full(
        (m, top_k), pad_ind, dtype=np.int64)
    point_label_sims = np.full(
        (m, top_k), pad_val, dtype=np.float32)
    for i in nb.prange(m):
        unique_point_labels, point_label_sim = map_one(
            labels[indices[i]], similarity[i], pad_ind)
        if top_k < len(unique_point_labels):
            top_indices = np.argsort(
                point_label_sim)[-1 * top_k:][::-1]
            point_labels[i] = unique_point_labels[top_indices]
            point_label_sims[i] = point_label_sim[top_indices]
        else:
            point_labels[i, :len(unique_point_labels)] = unique_point_labels
            point_label_sims[i, :len(unique_point_labels)] = point_label_sim
    return point_labels, point_label_sims 
Example #22
Source File: scaling.py    From fastats with MIT License 6 votes vote down vote up
def demean_parallel(A):
    """
    Subtract the mean from the supplied data column-wise.

    Uses explicit parallel loop; may offer improved performance in some
    cases.
    """
    assert A.ndim > 1

    n = A.shape[1]
    res = empty_like(A, dtype=np_float64)

    for i in prange(n):
        data_i = A[:, i]
        res[:, i] = data_i - mean(data_i)

    return res 
Example #23
Source File: pynndescent_.py    From pynndescent with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def degree_prune_internal(indptr, data, max_degree=20):
    for i in numba.prange(indptr.shape[0] - 1):
        row_data = data[indptr[i] : indptr[i + 1]]
        if row_data.shape[0] > max_degree:
            cut_value = np.sort(row_data)[max_degree]
            for j in range(indptr[i], indptr[i + 1]):
                if data[j] > cut_value:
                    data[j] = 0.0

    return 
Example #24
Source File: extractor.py    From ctapipe with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def neighbor_average_waveform(waveforms, neighbors, lwt):
    """
    Obtain the average waveform built from the neighbors of each pixel

    Parameters
    ----------
    waveforms : ndarray
        Waveforms stored in a numpy array.
        Shape: (n_pix, n_samples)
    neighbors : ndarray
        2D array where each row is [pixel index, one neighbor of that pixel].
        Changes per telescope.
        Can be obtained from
        `ctapipe.instrument.CameraGeometry.neighbor_matrix_where`.
    lwt: int
        Weight of the local pixel (0: peak from neighbors only,
        1: local pixel counts as much as any neighbor)

    Returns
    -------
    average_wf : ndarray
        Average of neighbor waveforms for each pixel.
        Shape: (n_pix, n_samples)

    """
    n_neighbors = neighbors.shape[0]
    sum_ = waveforms * lwt
    n = np.full(waveforms.shape, lwt, dtype=np.int32)
    for i in prange(n_neighbors):
        pixel = neighbors[i, 0]
        neighbor = neighbors[i, 1]
        sum_[pixel] += waveforms[neighbor]
        n[pixel] += 1
    return sum_ / n 
Example #25
Source File: validation.py    From umap with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def make_trustworthiness_calculator(metric):  # pragma: no cover
    @numba.njit(parallel=True)
    def trustworthiness_vector_lowmem(source, indices_embedded, max_k):

        n_samples = indices_embedded.shape[0]
        trustworthiness = np.zeros(max_k + 1, dtype=np.float64)
        dist_vector = np.zeros(n_samples, dtype=np.float64)

        for i in range(n_samples):

            for j in numba.prange(n_samples):
                dist_vector[j] = metric(source[i], source[j])

            indices_source = np.argsort(dist_vector)

            for j in range(max_k):

                rank = 0
                while indices_source[rank] != indices_embedded[i, j]:
                    rank += 1

                for k in range(j + 1, max_k + 1):
                    if rank > k:
                        trustworthiness[k] += rank - k

        for k in range(1, max_k + 1):
            trustworthiness[k] = 1.0 - trustworthiness[k] * (
                2.0 / (n_samples * k * (2.0 * n_samples - 3.0 * k - 1.0))
            )

        trustworthiness[0] = 1.0

        return trustworthiness

    return trustworthiness_vector_lowmem 
Example #26
Source File: item_knn.py    From lkpy with MIT License 5 votes vote down vote up
def _predict_sum(model, nitems, nrange, ratings, rated, targets):
    "Sum-of-similarities prediction function"
    min_nbrs, max_nbrs = nrange
    scores = np.full(nitems, np.nan, dtype=np.float_)

    for i in prange(targets.shape[0]):
        iidx = targets[i]
        rptr = model.rowptrs[iidx]
        rend = model.rowptrs[iidx + 1]

        score = 0
        nnbrs = 0

        for j in range(rptr, rend):
            nidx = model.colinds[j]
            if not rated[nidx]:
                continue

            nnbrs = nnbrs + 1
            score = score + model.values[j]

            if max_nbrs > 0 and nnbrs >= max_nbrs:
                break

        if nnbrs < min_nbrs:
            continue

        scores[iidx] = score

    return scores 
Example #27
Source File: pynndescent_.py    From pynndescent with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def diversify(indices, distances, data, dist, epsilon=0.01):

    for i in numba.prange(indices.shape[0]):

        new_indices = [indices[i, 0]]
        new_distances = [distances[i, 0]]
        for j in range(1, indices.shape[1]):
            if indices[i, j] < 0:
                break

            flag = True
            for k in range(len(new_indices)):
                c = new_indices[k]
                d = dist(data[indices[i, j]], data[c])
                if new_distances[k] > FLOAT32_EPS and d < epsilon * distances[i, j]:
                    flag = False
                    break

            if flag:
                new_indices.append(indices[i, j])
                new_distances.append(distances[i, j])

        for j in range(indices.shape[1]):
            if j < len(new_indices):
                indices[i, j] = new_indices[j]
                distances[i, j] = new_distances[j]
            else:
                indices[i, j] = -1
                distances[i, j] = np.inf

    return indices, distances 
Example #28
Source File: pynndescent_.py    From pynndescent with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def diversify_csr(
    graph_indptr, graph_indices, graph_data, source_data, dist, epsilon=0.01
):
    n_nodes = graph_indptr.shape[0] - 1

    for i in numba.prange(n_nodes):

        current_indices = graph_indices[graph_indptr[i] : graph_indptr[i + 1]]
        current_data = graph_data[graph_indptr[i] : graph_indptr[i + 1]]

        order = np.argsort(current_data)
        retained = np.ones(order.shape[0], dtype=np.int8)

        for idx in range(1, order.shape[0]):

            j = order[idx]

            for k in range(idx):
                if retained[k] == 1:
                    d = dist(
                        source_data[current_indices[j]],
                        source_data[current_indices[k]],
                    )
                    if current_data[k] > FLOAT32_EPS and d < epsilon * current_data[j]:
                        retained[j] = 0
                        break

        for idx in range(order.shape[0]):
            j = order[idx]
            if retained[j] == 0:
                graph_data[graph_indptr[i] + j] = 0

    return 
Example #29
Source File: nonrigid.py    From suite2p with GNU General Public License v3.0 5 votes vote down vote up
def block_interp(ymax1, xmax1, mshy, mshx, yup, xup):
    """ interpolate from ymax1 to mshy to create coordinate transforms """
    for t in prange(ymax1.shape[0]):
        # y shifts for blocks to coordinate map
        map_coordinates(ymax1[t], mshy.copy(), mshx.copy(), yup[t])
        # x shifts for blocks to coordinate map
        map_coordinates(xmax1[t], mshy.copy(), mshx.copy(), xup[t]) 
Example #30
Source File: block_parallel_plsa.py    From enstop with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def log_likelihood_by_blocks(
    block_rows_ndarray,
    block_cols_ndarray,
    block_vals_ndarray,
    p_w_given_z,
    p_z_given_d,
):
    result = 0.0
    k = p_z_given_d.shape[2]

    for i in numba.prange(block_rows_ndarray.shape[0]):
        for j in range(block_rows_ndarray.shape[1]):
            for nz_idx in range(block_rows_ndarray.shape[2]):
                if block_rows_ndarray[i, j, nz_idx] < 0:
                    break

                d = block_rows_ndarray[i, j, nz_idx]
                w = block_cols_ndarray[i, j, nz_idx]
                x = block_vals_ndarray[i, j, nz_idx]

                p_w_given_d = 0.0
                for z in range(k):
                    p_w_given_d += p_w_given_z[j, z, w] * p_z_given_d[i, d, z]

                result += x * np.log(p_w_given_d)

    return result