Python numpy.cumsum() Examples

The following are 30 code examples for showing how to use numpy.cumsum(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: cgp-cnn   Author: sg-nm   File: cgp.py    License: MIT License 6 votes vote down vote up
def active_net_list(self):
        net_list = [["input", 0, 0]]
        active_cnt = np.arange(self.net_info.input_num + self.net_info.node_num + self.net_info.out_num)
        active_cnt[self.net_info.input_num:] = np.cumsum(self.is_active)

        for n, is_a in enumerate(self.is_active):
            if is_a:
                t = self.gene[n][0]
                if n < self.net_info.node_num:    # intermediate node
                    type_str = self.net_info.func_type[t]
                else:    # output node
                    type_str = self.net_info.out_type[t]

                connections = [active_cnt[self.gene[n][i+1]] for i in range(self.net_info.max_in_num)]
                net_list.append([type_str] + connections)
        return net_list


# CGP with (1 + \lambda)-ES 
Example 2
Project: fullrmc   Author: bachiraoun   File: Collection.py    License: GNU Affero General Public License v3.0 6 votes vote down vote up
def step_function(x, center=0, FWHM=0.1, height=1, check=True):
    """
    Compute a step function as the cumulative summation of a gaussian
    distribution of a given vector.

    :Parameters:
        #. x (numpy.ndarray): The vector to compute the gaussian. gaussian
           is computed as a function of x.
        #. center (number): The center of the step function which is the
           the center of the gaussian.
        #. FWHM (number): The Full Width at Half Maximum of the gaussian.
        #. height (number): The height of the step function.
        #. check (boolean): whether to check arguments before generating
           vectors.
    """
    if check:
        assert is_number(height), LOGGER.error("height must be a number")
        height = FLOAT_TYPE(height)
    g  = gaussian(x, center=center, FWHM=FWHM, normalize=False, check=check)
    sf = np.cumsum(g)
    sf /= sf[-1]
    return (sf*height).astype(FLOAT_TYPE) 
Example 3
Project: DOTA_models   Author: ringringyi   File: utils.py    License: Apache License 2.0 6 votes vote down vote up
def calc_pr(gt, out, wt=None):
  if wt is None:
    wt = np.ones((gt.size,1))

  gt = gt.astype(np.float64).reshape((-1,1))
  wt = wt.astype(np.float64).reshape((-1,1))
  out = out.astype(np.float64).reshape((-1,1))

  gt = gt*wt
  tog = np.concatenate([gt, wt, out], axis=1)*1.
  ind = np.argsort(tog[:,2], axis=0)[::-1]
  tog = tog[ind,:]
  cumsumsortgt = np.cumsum(tog[:,0])
  cumsumsortwt = np.cumsum(tog[:,1])
  prec = cumsumsortgt / cumsumsortwt
  rec = cumsumsortgt / np.sum(tog[:,0])

  ap = voc_ap(rec, prec)
  return ap, rec, prec 
Example 4
Project: robosuite   Author: StanfordVL   File: demo_sampler_wrapper.py    License: MIT License 6 votes vote down vote up
def sample(self):
        """
        This is the core sampling method. Samples a state from a
        demonstration, in accordance with the configuration.
        """

        # chooses a sampling scheme randomly based on the mixing ratios
        seed = random.uniform(0, 1)
        ratio = np.cumsum(self.scheme_ratios)
        ratio = ratio > seed
        for i, v in enumerate(ratio):
            if v:
                break

        sample_method = getattr(self, self.sample_method_dict[self.sampling_schemes[i]])
        return sample_method() 
Example 5
Project: fine-lm   Author: akzaidi   File: algorithmic.py    License: MIT License 6 votes vote down vote up
def zipf_distribution(nbr_symbols, alpha):
  """Helper function: Create a Zipf distribution.

  Args:
    nbr_symbols: number of symbols to use in the distribution.
    alpha: float, Zipf's Law Distribution parameter. Default = 1.5.
      Usually for modelling natural text distribution is in
      the range [1.1-1.6].

  Returns:
    distr_map: list of float, Zipf's distribution over nbr_symbols.

  """
  tmp = np.power(np.arange(1, nbr_symbols + 1), -alpha)
  zeta = np.r_[0.0, np.cumsum(tmp)]
  return [x / zeta[-1] for x in zeta] 
Example 6
Project: PHATE   Author: KrishnaswamyLab   File: tree.py    License: GNU General Public License v2.0 6 votes vote down vote up
def gen_dla(
    n_dim=100, n_branch=20, branch_length=100, rand_multiplier=2, seed=37, sigma=4
):
    np.random.seed(seed)
    M = np.cumsum(-1 + rand_multiplier * np.random.rand(branch_length, n_dim), 0)
    for i in range(n_branch - 1):
        ind = np.random.randint(branch_length)
        new_branch = np.cumsum(
            -1 + rand_multiplier * np.random.rand(branch_length, n_dim), 0
        )
        M = np.concatenate([M, new_branch + M[ind, :]])

    noise = np.random.normal(0, sigma, M.shape)
    M = M + noise

    # returns the group labels for each point to make it easier to visualize
    # embeddings
    C = np.array([i // branch_length for i in range(n_branch * branch_length)])

    return M, C 
Example 7
Project: pfilter   Author: johnhw   File: pfilter.py    License: MIT License 6 votes vote down vote up
def residual_resample(weights):
    n = len(weights)
    indices = np.zeros(n, np.uint32)
    # take int(N*w) copies of each weight
    num_copies = (n * weights).astype(np.uint32)
    k = 0
    for i in range(n):
        for _ in range(num_copies[i]):  # make n copies
            indices[k] = i
            k += 1
    # use multinormial resample on the residual to fill up the rest.
    residual = weights - num_copies  # get fractional part
    residual /= np.sum(residual)
    cumsum = np.cumsum(residual)
    cumsum[-1] = 1
    indices[k:n] = np.searchsorted(cumsum, np.random.uniform(0, 1, n - k))
    return indices 
Example 8
Project: pfilter   Author: johnhw   File: pfilter.py    License: MIT License 6 votes vote down vote up
def create_indices(positions, weights):
    n = len(weights)
    indices = np.zeros(n, np.uint32)
    cumsum = np.cumsum(weights)
    i, j = 0, 0
    while i < n:
        if positions[i] < cumsum[j]:
            indices[i] = j
            i += 1
        else:
            j += 1

    return indices


### end rlabbe's resampling functions 
Example 9
Project: gnocchi   Author: gnocchixyz   File: carbonara.py    License: Apache License 2.0 6 votes vote down vote up
def unserialize(cls, data, block_size, back_window):
        uncompressed = lz4.block.decompress(data)
        nb_points = (
            len(uncompressed) // cls._SERIALIZATION_TIMESTAMP_VALUE_LEN
        )

        try:
            timestamps = numpy.frombuffer(uncompressed, dtype='<Q',
                                          count=nb_points)
            values = numpy.frombuffer(
                uncompressed, dtype='<d',
                offset=nb_points * cls._SERIALIZATION_TIMESTAMP_LEN)
        except ValueError:
            raise InvalidData

        return cls.from_data(
            numpy.cumsum(timestamps),
            values,
            block_size=block_size,
            back_window=back_window) 
Example 10
Project: pyscf   Author: pyscf   File: kccsd_uhf.py    License: Apache License 2.0 6 votes vote down vote up
def vector_to_amplitudes(vec, nmo, nocc, nkpts=1):
    nocca, noccb = nocc
    nmoa, nmob = nmo
    nvira, nvirb = nmoa - nocca, nmob - noccb
    sizes = (nkpts*nocca*nvira, nkpts*noccb*nvirb,
             nkpts**3*nocca**2*nvira**2, nkpts**3*nocca*noccb*nvira*nvirb,
             nkpts**3*noccb**2*nvirb**2)
    sections = np.cumsum(sizes[:-1])
    t1a, t1b, t2aa, t2ab, t2bb = np.split(vec, sections)

    t1a = t1a.reshape(nkpts,nocca,nvira)
    t1b = t1b.reshape(nkpts,noccb,nvirb)
    t2aa = t2aa.reshape(nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira)
    t2ab = t2ab.reshape(nkpts,nkpts,nkpts,nocca,noccb,nvira,nvirb)
    t2bb = t2bb.reshape(nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb)
    return (t1a,t1b), (t2aa,t2ab,t2bb) 
Example 11
Project: pyscf   Author: pyscf   File: mpi.py    License: Apache License 2.0 6 votes vote down vote up
def work_balanced_partition(tasks, costs=None):
    if costs is None:
        costs = numpy.ones(tasks)
    if rank == 0:
        segsize = float(sum(costs)) / pool.size
        loads = []
        cum_costs = numpy.cumsum(costs)
        start_id = 0
        for k in range(pool.size):
            stop_id = numpy.argmin(abs(cum_costs - (k+1)*segsize)) + 1
            stop_id = max(stop_id, start_id+1)
            loads.append([start_id,stop_id])
            start_id = stop_id
        comm.bcast(loads)
    else:
        loads = comm.bcast()
    if rank < len(loads):
        start, stop = loads[rank]
        return tasks[start:stop]
    else:
        return tasks[:0] 
Example 12
Project: pyscf   Author: pyscf   File: uccsd.py    License: Apache License 2.0 6 votes vote down vote up
def vector_to_amplitudes(vector, nmo, nocc):
    nocca, noccb = nocc
    nmoa, nmob = nmo
    nvira, nvirb = nmoa-nocca, nmob-noccb
    nocc = nocca + noccb
    nvir = nvira + nvirb
    nov = nocc * nvir
    size = nov + nocc*(nocc-1)//2*nvir*(nvir-1)//2
    if vector.size == size:
        #return ccsd.vector_to_amplitudes_s4(vector, nmo, nocc)
        raise RuntimeError('Input vector is GCCSD vecotr')
    else:
        sizea = nocca * nvira + nocca*(nocca-1)//2*nvira*(nvira-1)//2
        sizeb = noccb * nvirb + noccb*(noccb-1)//2*nvirb*(nvirb-1)//2
        sections = np.cumsum([sizea, sizeb])
        veca, vecb, t2ab = np.split(vector, sections)
        t1a, t2aa = ccsd.vector_to_amplitudes_s4(veca, nmoa, nocca)
        t1b, t2bb = ccsd.vector_to_amplitudes_s4(vecb, nmob, noccb)
        t2ab = t2ab.copy().reshape(nocca,noccb,nvira,nvirb)
        return (t1a,t1b), (t2aa,t2ab,t2bb) 
Example 13
Project: pyscf   Author: pyscf   File: ucisd.py    License: Apache License 2.0 6 votes vote down vote up
def cisdvec_to_amplitudes(civec, nmo, nocc):
    norba, norbb = nmo
    nocca, noccb = nocc
    nvira = norba - nocca
    nvirb = norbb - noccb
    nooa = nocca * (nocca-1) // 2
    nvva = nvira * (nvira-1) // 2
    noob = noccb * (noccb-1) // 2
    nvvb = nvirb * (nvirb-1) // 2
    size = (1, nocca*nvira, noccb*nvirb, nocca*noccb*nvira*nvirb,
            nooa*nvva, noob*nvvb)
    loc = numpy.cumsum(size)
    c0 = civec[0]
    c1a = civec[loc[0]:loc[1]].reshape(nocca,nvira)
    c1b = civec[loc[1]:loc[2]].reshape(noccb,nvirb)
    c2ab = civec[loc[2]:loc[3]].reshape(nocca,noccb,nvira,nvirb)
    c2aa = _unpack_4fold(civec[loc[3]:loc[4]], nocca, nvira)
    c2bb = _unpack_4fold(civec[loc[4]:loc[5]], noccb, nvirb)
    return c0, (c1a,c1b), (c2aa,c2ab,c2bb) 
Example 14
Project: NeuroKit   Author: neuropsychology   File: ecg_findpeaks.py    License: MIT License 6 votes vote down vote up
def _ecg_findpeaks_MWA(signal, window_size):
    """From https://github.com/berndporr/py-ecg-detectors/"""

    mwa = np.zeros(len(signal))
    sums = np.cumsum(signal)

    def get_mean(begin, end):
        if begin == 0:
            return sums[end - 1] / end

        dif = sums[end - 1] - sums[begin - 1]
        return dif / (end - begin)

    for i in range(len(signal)):  # pylint: disable=C0200
        if i < window_size:
            section = signal[0:i]
        else:
            section = get_mean(i - window_size, i)

        if i != 0:
            mwa[i] = np.mean(section)
        else:
            mwa[i] = signal[i]

    return mwa 
Example 15
Project: NeuroKit   Author: neuropsychology   File: signal_changepoints.py    License: MIT License 6 votes vote down vote up
def _signal_changepoints_cost_mean(signal):
    """Cost function for a normally distributed signal with a changing mean."""
    i_variance_2 = 1 / (np.var(signal) ** 2)
    cmm = [0.0]
    cmm.extend(np.cumsum(signal))

    cmm2 = [0.0]
    cmm2.extend(np.cumsum(np.abs(signal)))

    def cost(start, end):
        cmm2_diff = cmm2[end] - cmm2[start]
        cmm_diff = pow(cmm[end] - cmm[start], 2)
        i_diff = end - start
        diff = cmm2_diff - cmm_diff
        return (diff / i_diff) * i_variance_2

    return cost 
Example 16
Project: DETAD   Author: HumamAlwassel   File: sensitivity_analysis.py    License: MIT License 6 votes vote down vote up
def compute_mAP_N(result,this_cls_pred,this_cls_gt):
    ap = np.zeros(len(result.tiou_thresholds))
    tp = np.zeros((len(result.tiou_thresholds), len(this_cls_pred)))
    fp = np.zeros((len(result.tiou_thresholds), len(this_cls_pred)))

    for tidx, tiou in enumerate(result.tiou_thresholds): 
        fp[tidx,pd.isnull(this_cls_pred[result.matched_gt_id_cols[tidx]]).values] = 1
        tp[tidx,~(pd.isnull(this_cls_pred[result.matched_gt_id_cols[tidx]]).values)] = 1

    tp_cumsum = np.cumsum(tp, axis=1).astype(np.float)
    fp_cumsum = np.cumsum(fp, axis=1).astype(np.float)
    recall_cumsum = tp_cumsum / len(np.unique(this_cls_gt['gt-id']))
    precision_cumsum = recall_cumsum * result.average_num_instance_per_class / (recall_cumsum * result.average_num_instance_per_class + fp_cumsum)

    for tidx in range(len(result.tiou_thresholds)):
        ap[tidx] = interpolated_prec_rec(precision_cumsum[tidx,:], recall_cumsum[tidx,:])
    
    return ap.mean()

# Initialize true positive and false positive vectors. 
Example 17
Project: pyfilter   Author: tingiskhan   File: resampling.py    License: MIT License 6 votes vote down vote up
def filterpy_systematic_resample(weights, u):
    """
    ___NOTE___: This is the systematic resampling function from:
        https://github.com/rlabbe/filterpy/blob/master/filterpy/monte_carlo/resampling.py,
    i.e. __NOT MINE__, modified to take as input the offsetting random variable.
    """
    N = len(weights)

    # make N subdivisions, and choose positions with a consistent random offset
    positions = (u + np.arange(N)) / N

    indexes = np.zeros(N, 'i')
    cumulative_sum = np.cumsum(weights)
    i, j = 0, 0
    while i < N:
        if positions[i] < cumulative_sum[j]:
            indexes[i] = j
            i += 1
        else:
            j += 1
    return indexes 
Example 18
Project: libTLDA   Author: wmkouw   File: tcpr.py    License: MIT License 5 votes vote down vote up
def project_simplex(self, v, z=1.0):
        """
        Project vector onto simplex using sorting.

        Reference: "Efficient Projections onto the L1-Ball for Learning in High
        Dimensions (Duchi, Shalev-Shwartz, Singer, Chandra, 2006)."

        Parameters
        ----------
        v : array
            vector to be projected (n dimensions by 0)
        z : float
            constant (def: 1.0)

        Returns
        -------
        w : array
            projected vector (n dimensions by 0)

        """
        # Number of dimensions
        n = v.shape[0]

        # Sort vector
        mu = np.sort(v, axis=0)[::-1]

        # Find rho
        C = np.cumsum(mu) - z
        j = np.arange(n) + 1
        rho = j[mu - C/j > 0][-1]

        # Define theta
        theta = C[mu - C/j > 0][-1] / float(rho)

        # Subtract theta from original vector and cap at 0
        w = np.maximum(v - theta, 0)

        # Return projected vector
        return w 
Example 19
Project: subword-qac   Author: clovaai   File: metric.py    License: MIT License 5 votes vote down vote up
def mrr_summary(ranks, pranks, seens, n_candidates):
    ranks = np.array(ranks)
    pranks = np.array(pranks)

    n = np.zeros(3, dtype=int)
    rank = np.zeros((3, n_candidates + 1), dtype=int)
    prank = np.zeros((3, n_candidates + 1), dtype=int)
    reciprocal = np.array([0.] + [1. / r for r in range(1, n_candidates + 1)]).reshape(1, -1)
    for s, r, pr in zip(seens, ranks, pranks):
        for i in [1 - s, 2]:
            n[i] += 1
            rank[i, r] += 1
            prank[i, pr] += 1
    mrr = np.cumsum(rank * reciprocal, 1) / n.reshape((3, 1))
    pmrr = np.cumsum(prank * reciprocal, 1) / n.reshape((3, 1))
    
    logs = []
    for i in range(1, n_candidates + 1):
        i_str = ' '.join(f"{mrr[s, i]:.4f} ({seen_str})" for s, seen_str in enumerate(['seen', 'unseen', 'all']))
        logs.append(f"mrr @{i:-2d}: {i_str}")
    logs.append(" ")
    for i in range(1, n_candidates + 1):
        i_str = ' '.join(f"{pmrr[s, i]:.4f} ({seen_str})" for s, seen_str in enumerate(['seen', 'unseen', 'all']))
        logs.append(f"pmrr @{i:-2d}: {i_str}")
    logs.append(" ")
    return logs 
Example 20
Project: deep-learning-note   Author: wdxtub   File: 4_numpy_100.py    License: MIT License 5 votes vote down vote up
def moving_averate(a, n=3):
    ret = np.cumsum(a, dtype=float)
    ret[n:] = ret[n:] - ret[:-n]
    return ret[n-1:] / n 
Example 21
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def t(distances,coordinates):
        n = coordinates.shape[1]
        if distances is None: distances = np.ones(n - 1)
        t = np.cumsum(np.pad(distances, (1,0), 'constant'))
        t.setflags(write=False)
        return t 
Example 22
Project: fullrmc   Author: bachiraoun   File: Collection.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def set_weights(self, weights=None):
        """
        Set generator's weights.

        :Parameters:
            #. weights (None, list, numpy.ndarray): The weights scheme.
               The length defines the number of bins and the edges.
               The length of weights array defines the resolution of the
               biased numbers generation. If None is given, ones array of
               length 10000 is automatically generated.
        """
        # set original weights
        if weights is None:
           self.__bins = 10000
           self.__originalWeights = np.ones(self.__bins)
        else:
            assert isinstance(weights, (list, set, tuple, np.ndarray)), LOGGER.error("weights must be a list of numbers")
            if isinstance(weights,  np.ndarray):
                assert len(weights.shape)==1, LOGGER.error("weights must be uni-dimensional")
            wgts = []
            assert len(weights)>=100, LOGGER.error("weights minimum length allowed is 100")
            for w in weights:
                assert is_number(w), LOGGER.error("weights items must be numbers")
                w = FLOAT_TYPE(w)
                assert w>=0, LOGGER.error("weights items must be positive")
                wgts.append(w)
            self.__originalWeights = np.array(wgts, dtype=FLOAT_TYPE)
            self.__bins = len(self.__originalWeights)
        # set bin width
        self.__binWidth     = FLOAT_TYPE(self.rang/self.__bins)
        self.__halfBinWidth = FLOAT_TYPE(self.__binWidth/2.)
        # set scheme
        self.__scheme = np.cumsum( self.__originalWeights ) 
Example 23
Project: fullrmc   Author: bachiraoun   File: Collection.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def set_weights(self, weights):
        """
        Set the generator integer numbers weights.

        :Parameters:
            #. weights (None, list, numpy.ndarray): The weights scheme.
               The length must be equal to the range between lowerLimit and
               upperLimit. If None is given, ones array of length
               upperLimit-lowerLimit+1 is automatically generated.
        """
        if weights is None:
            self.__originalWeights = np.ones(self.upperLimit-self.lowerLimit+1)
        else:
            assert isinstance(weights, (list, set, tuple, np.ndarray)), LOGGER.error("weights must be a list of numbers")
            if isinstance(weights,  np.ndarray):
                assert len(weights.shape)==1, LOGGER.error("weights must be uni-dimensional")
            wgts = []
            assert len(weights)==self.upperLimit-self.lowerLimit+1, LOGGER.error("weights length must be exactly equal to 'upperLimit-lowerLimit+1' which is %i"%self.upperLimit-self.lowerLimit+1)
            for w in weights:
                assert is_number(w), LOGGER.error("weights items must be numbers")
                w = FLOAT_TYPE(w)
                assert w>=0, LOGGER.error("weights items must be positive")
                wgts.append(w)
            self.__originalWeights = np.array(wgts, dtype=FLOAT_TYPE)
        # set bins
        self.__bins = len( self.__originalWeights )
        # set scheme
        self.__scheme = np.cumsum( self.__originalWeights ) 
Example 24
Project: fullrmc   Author: bachiraoun   File: MoveGenerator.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def set_selection_scheme(self):
        """ Set selection scheme. """
        cumsumWeights = np.cumsum(self.__generatorsWeight, dtype=FLOAT_TYPE)
        self.__selectionScheme = cumsumWeights/cumsumWeights[-1] 
Example 25
Project: fullrmc   Author: bachiraoun   File: RandomSelectors.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def _set_selection_scheme(self):
        """ Sets selection scheme. """
        cumsumWeights = np.cumsum(self.__weights, dtype=FLOAT_TYPE)
        self._selectionScheme = cumsumWeights/cumsumWeights[-1] 
Example 26
Project: fullrmc   Author: bachiraoun   File: RandomSelectors.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def _set_selection_scheme(self):
        """ Sets selection scheme. """
        self._selectionScheme = np.cumsum(self.weights, dtype=FLOAT_TYPE) 
Example 27
def _recall_prec(self, record, count):
        """ get recall and precision from internal records """
        record = np.delete(record, np.where(record[:, 1].astype(int) == 0)[0], axis=0)
        sorted_records = record[record[:,0].argsort()[::-1]]
        tp = np.cumsum(sorted_records[:, 1].astype(int) == 1)
        fp = np.cumsum(sorted_records[:, 1].astype(int) == 2)
        if count <= 0:
            recall = tp * 0.0
        else:
            recall = tp / float(count)
        prec = tp.astype(float) / (tp + fp)
        return recall, prec 
Example 28
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: model.py    License: Apache License 2.0 5 votes vote down vote up
def act(self, ps):
        us = np.random.uniform(size=ps.shape[0])[:, np.newaxis]
        as_ = (np.cumsum(ps, axis=1) > us).argmax(axis=1)
        return as_ 
Example 29
Project: PolarSeg   Author: edwardzhou130   File: ptBEV.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def grp_range_torch(a,dev):
    idx = torch.cumsum(a,0)
    id_arr = torch.ones(idx[-1],dtype = torch.int64,device=dev)
    id_arr[0] = 0
    id_arr[idx[:-1]] = -a[:-1]+1
    return torch.cumsum(id_arr,0) 
Example 30
Project: cvpr2018-hnd   Author: kibok90   File: samplers.py    License: MIT License 5 votes vote down vote up
def balanced_shuffle(labels, num_epochs=50, path=None, start_time=time.time()):

    order_path = '{path}/balanced_order_{num_epochs}.h5' \
                       .format(path=path, num_epochs=num_epochs)
    if path is not None and os.path.isfile(order_path):
        with h5py.File(order_path, 'r') as f:
            order = f['order'][:]
    else:
        evenness = 5 # batch_size | evenness*num_classes
        classes = np.unique(labels.numpy())
        num_classes = len(classes)
        loc_data_per_class = [np.argwhere(labels.numpy() == k).flatten() for k in classes]
        num_data_per_class = [(labels.numpy() == k).sum() for k in classes]
        max_data_per_class = max(num_data_per_class)
        num_loc_split = (max_data_per_class // evenness) * np.ones(evenness, dtype=int)
        num_loc_split[:(max_data_per_class % evenness)] += 1
        loc_split = [0]
        loc_split.extend(np.cumsum(num_loc_split).tolist())
        order = -np.ones([num_epochs, max_data_per_class*num_classes], dtype=int)
        for epoch in range(num_epochs):
            order_e = -np.ones([max_data_per_class, num_classes], dtype=int)
            for k in classes:
                loc_k = np.random.permutation(loc_data_per_class[k])
                for i in range(evenness):
                    loc_i = loc_k[loc_split[i]:loc_split[i+1]]
                    order_e[i:(len(loc_i)*evenness+i):evenness, k] = loc_i
            order[epoch] = order_e.flatten()
            print_freq = min([100, (num_epochs-1) // 5 + 1])
            print_me = (epoch == 0 or epoch == num_epochs-1 or (epoch+1) % print_freq == 0)
            if print_me:
                print('{epoch:4d}/{num_epochs:4d} e; '.format(epoch=epoch+1, num_epochs=num_epochs), end='')
                print('generate balanced random order; {time:8.3f} s'.format(time=time.time()-start_time))
        
        if path is not None:
            with h5py.File(order_path, 'w') as f:
                f.create_dataset('order', data=order, compression='gzip', compression_opts=9)
    
    print('balanced random order; {time:8.3f} s'.format(time=time.time()-start_time))
    return torch.from_numpy(order)