Python numpy.random.shuffle() Examples

The following are 30 code examples of numpy.random.shuffle(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy.random , or try the search function .
Example #1
Source File: loader.py    From Detectron.pytorch with MIT License 6 votes vote down vote up
def __iter__(self):
        if cfg.TRAIN.ASPECT_GROUPING:
            # indices for aspect grouping awared permutation
            n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH)
            round_num_data = n * cfg.TRAIN.IMS_PER_BATCH
            indices = np.arange(round_num_data)
            npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH))  # inplace shuffle
            if rem != 0:
                indices = np.append(indices, np.arange(round_num_data, round_num_data + rem))
            ratio_index = self.ratio_index[indices]
            ratio_list_minibatch = self.ratio_list_minibatch[indices]
        else:
            rand_perm = npr.permutation(self.num_data)
            ratio_list = self.ratio_list[rand_perm]
            ratio_index = self.ratio_index[rand_perm]
            # re-calculate minibatch ratio list
            ratio_list_minibatch = cal_minibatch_ratio(ratio_list)

        return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist())) 
Example #2
Source File: srez_main.py    From srez with MIT License 6 votes vote down vote up
def prepare_dirs(delete_train_dir=False):
    # Create checkpoint dir (do not delete anything)
    if not tf.gfile.Exists(FLAGS.checkpoint_dir):
        tf.gfile.MakeDirs(FLAGS.checkpoint_dir)
    
    # Cleanup train dir
    if delete_train_dir:
        if tf.gfile.Exists(FLAGS.train_dir):
            tf.gfile.DeleteRecursively(FLAGS.train_dir)
        tf.gfile.MakeDirs(FLAGS.train_dir)

    # Return names of training files
    if not tf.gfile.Exists(FLAGS.dataset) or \
       not tf.gfile.IsDirectory(FLAGS.dataset):
        raise FileNotFoundError("Could not find folder `%s'" % (FLAGS.dataset,))

    filenames = tf.gfile.ListDirectory(FLAGS.dataset)
    filenames = sorted(filenames)
    random.shuffle(filenames)
    filenames = [os.path.join(FLAGS.dataset, f) for f in filenames]

    return filenames 
Example #3
Source File: loader.py    From DIoU-pytorch-detectron with GNU General Public License v3.0 6 votes vote down vote up
def __iter__(self):
        if cfg.TRAIN.ASPECT_GROUPING:
            # indices for aspect grouping awared permutation
            n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH)
            round_num_data = n * cfg.TRAIN.IMS_PER_BATCH
            indices = np.arange(round_num_data)
            npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH))  # inplace shuffle
            if rem != 0:
                indices = np.append(indices, np.arange(round_num_data, round_num_data + rem))
            ratio_index = self.ratio_index[indices]
            ratio_list_minibatch = self.ratio_list_minibatch[indices]
        else:
            rand_perm = npr.permutation(self.num_data)
            ratio_list = self.ratio_list[rand_perm]
            ratio_index = self.ratio_index[rand_perm]
            # re-calculate minibatch ratio list
            ratio_list_minibatch = cal_minibatch_ratio(ratio_list)

        return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist())) 
Example #4
Source File: loader.py    From Detectron.pytorch with MIT License 6 votes vote down vote up
def __iter__(self):
        if cfg.TRAIN.ASPECT_GROUPING:
            # indices for aspect grouping awared permutation
            n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH)
            round_num_data = n * cfg.TRAIN.IMS_PER_BATCH
            indices = np.arange(round_num_data)
            npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH))  # inplace shuffle
            if rem != 0:
                indices = np.append(indices, np.arange(round_num_data, round_num_data + rem))
            ratio_index = self.ratio_index[indices]
            ratio_list_minibatch = self.ratio_list_minibatch[indices]
        else:
            rand_perm = npr.permutation(self.num_data)
            ratio_list = self.ratio_list[rand_perm]
            ratio_index = self.ratio_index[rand_perm]
            # re-calculate minibatch ratio list
            ratio_list_minibatch = cal_minibatch_ratio(ratio_list)

        return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist())) 
Example #5
Source File: loader.py    From detectron-self-train with MIT License 6 votes vote down vote up
def __iter__(self):
        if cfg.TRAIN.ASPECT_GROUPING:
            # indices for aspect grouping awared permutation
            n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH)
            round_num_data = n * cfg.TRAIN.IMS_PER_BATCH
            indices = np.arange(round_num_data)
            npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH))  # inplace shuffle
            if rem != 0:
                indices = np.append(indices, np.arange(round_num_data, round_num_data + rem))
            ratio_index = self.ratio_index[indices]
            ratio_list_minibatch = self.ratio_list_minibatch[indices]
        else:
            rand_perm = npr.permutation(self.num_data)
            ratio_list = self.ratio_list[rand_perm]
            ratio_index = self.ratio_index[rand_perm]
            # re-calculate minibatch ratio list
            ratio_list_minibatch = cal_minibatch_ratio(ratio_list)

        return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist())) 
Example #6
Source File: loader.py    From FPN-Pytorch with MIT License 6 votes vote down vote up
def __iter__(self):
        if cfg.TRAIN.ASPECT_GROUPING:
            # indices for aspect grouping awared permutation
            n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH)
            round_num_data = n * cfg.TRAIN.IMS_PER_BATCH
            indices = np.arange(round_num_data)
            npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH))  # inplace shuffle
            if rem != 0:
                indices = np.append(indices, np.arange(round_num_data, round_num_data + rem))
            ratio_index = self.ratio_index[indices]
            ratio_list_minibatch = self.ratio_list_minibatch[indices]
        else:
            rand_perm = npr.permutation(self.num_data)
            ratio_list = self.ratio_list[rand_perm]
            ratio_index = self.ratio_index[rand_perm]
            # re-calculate minibatch ratio list
            ratio_list_minibatch = cal_minibatch_ratio(ratio_list)

        return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist())) 
Example #7
Source File: loader.py    From Large-Scale-VRD.pytorch with MIT License 6 votes vote down vote up
def __iter__(self):
        if cfg.TRAIN.ASPECT_GROUPING:
            # indices for aspect grouping awared permutation
            n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH)
            round_num_data = n * cfg.TRAIN.IMS_PER_BATCH
            indices = np.arange(round_num_data)
            npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH))  # inplace shuffle
            if rem != 0:
                indices = np.append(indices, np.arange(round_num_data, round_num_data + rem))
            ratio_index = self.ratio_index[indices]
            ratio_list_minibatch = self.ratio_list_minibatch[indices]
        else:
            rand_perm = npr.permutation(self.num_data)
            ratio_list = self.ratio_list[rand_perm]
            ratio_index = self.ratio_index[rand_perm]
            # re-calculate minibatch ratio list
            ratio_list_minibatch = cal_minibatch_ratio(ratio_list)

        return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist())) 
Example #8
Source File: loader_rel.py    From Large-Scale-VRD.pytorch with MIT License 6 votes vote down vote up
def __iter__(self):
        if cfg.TRAIN.ASPECT_GROUPING:
            # indices for aspect grouping awared permutation
            n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH)
            round_num_data = n * cfg.TRAIN.IMS_PER_BATCH
            indices = np.arange(round_num_data)
            npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH))  # inplace shuffle
            if rem != 0:
                indices = np.append(indices, np.arange(round_num_data, round_num_data + rem))
            ratio_index = self.ratio_index[indices]
            ratio_list_minibatch = self.ratio_list_minibatch[indices]
        else:
            rand_perm = npr.permutation(self.num_data)
            ratio_list = self.ratio_list[rand_perm]
            ratio_index = self.ratio_index[rand_perm]
            # re-calculate minibatch ratio list
            ratio_list_minibatch = cal_minibatch_ratio(ratio_list)

        return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist())) 
Example #9
Source File: loader.py    From PMFNet with MIT License 6 votes vote down vote up
def __iter__(self):
        if cfg.TRAIN.ASPECT_GROUPING:
            # indices for aspect grouping awared permutation
            n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH)
            round_num_data = n * cfg.TRAIN.IMS_PER_BATCH
            indices = np.arange(round_num_data)
            npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH))  # inplace shuffle
            if rem != 0:
                indices = np.append(indices, np.arange(round_num_data, round_num_data + rem))
            ratio_index = self.ratio_index[indices]
            ratio_list_minibatch = self.ratio_list_minibatch[indices]
        else:
            rand_perm = npr.permutation(self.num_data)
            ratio_list = self.ratio_list[rand_perm]
            ratio_index = self.ratio_index[rand_perm]
            # re-calculate minibatch ratio list
            ratio_list_minibatch = cal_minibatch_ratio(ratio_list)

        return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist())) 
Example #10
Source File: base.py    From cesi with Apache License 2.0 6 votes vote down vote up
def optim(self, xys):					# Performs actual optimization
		idx 		= np.arange(len(xys))					# Index for every triple in dataset
		self.batch_size = int(np.ceil(len(xys) / self.nbatches))	  	# Calculte batch size (n_obsv / n_batches)
		batch_idx 	= np.arange(self.batch_size, len(xys), self.batch_size) # np.arange(start, stop, step) -> To get split positions (10,50,10) = [10,20,30,40]

		for self.epoch in range(1, self.max_epochs + 1): 	# Running for maximum number of epochs
			# shuffle training examples
			self.pre_epoch()				# Set loss = 0
			shuffle(idx)					# Shuffle the indexes of triples

			# store epoch for callback
			self.epoch_start = timeit.default_timer()	# Measuring time

			# process mini-batches
			for batch in np.split(idx, batch_idx):		# Get small subset of triples from training data
				bxys = [xys[z] for z in batch]		# Get triples present in the selected batch
				self.process_batch(bxys)		# Perform SGD using them

			# check callback function, if false return
			for f in self.post_epoch:			# Perform post epoch operation is specified
				if not f(self): break 
Example #11
Source File: loader.py    From PANet with MIT License 6 votes vote down vote up
def __iter__(self):
        if cfg.TRAIN.ASPECT_GROUPING:
            # indices for aspect grouping awared permutation
            n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH)
            round_num_data = n * cfg.TRAIN.IMS_PER_BATCH
            indices = np.arange(round_num_data)
            npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH))  # inplace shuffle
            if rem != 0:
                indices = np.append(indices, np.arange(round_num_data, round_num_data + rem))
            ratio_index = self.ratio_index[indices]
            ratio_list_minibatch = self.ratio_list_minibatch[indices]
        else:
            rand_perm = npr.permutation(self.num_data)
            ratio_list = self.ratio_list[rand_perm]
            ratio_index = self.ratio_index[rand_perm]
            # re-calculate minibatch ratio list
            ratio_list_minibatch = cal_minibatch_ratio(ratio_list)

        return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist())) 
Example #12
Source File: base.py    From scikit-kge with MIT License 6 votes vote down vote up
def _optim(self, xys):
        idx = np.arange(len(xys))
        self.batch_size = np.ceil(len(xys) / self.nbatches)
        batch_idx = np.arange(self.batch_size, len(xys), self.batch_size)

        for self.epoch in range(1, self.max_epochs + 1):
            # shuffle training examples
            self._pre_epoch()
            shuffle(idx)

            # store epoch for callback
            self.epoch_start = timeit.default_timer()

            # process mini-batches
            for batch in np.split(idx, batch_idx):
                # select indices for current batch
                bxys = [xys[z] for z in batch]
                self._process_batch(bxys)

            # check callback function, if false return
            for f in self.post_epoch:
                if not f(self):
                    break 
Example #13
Source File: loader.py    From Context-aware-ZSR with MIT License 6 votes vote down vote up
def _reset_iter(self):
        if cfg.TRAIN.ASPECT_GROUPING:
            # indices for aspect grouping awared permutation
            n, rem = divmod(self.num_data, cfg.TRAIN.IMS_PER_BATCH)
            round_num_data = n * cfg.TRAIN.IMS_PER_BATCH
            indices = np.arange(round_num_data)
            npr.shuffle(indices.reshape(-1, cfg.TRAIN.IMS_PER_BATCH))  # inplace shuffle
            if rem != 0:
                indices = np.append(indices, np.arange(round_num_data, round_num_data + rem))
            self._ratio_index = self.ratio_index[indices]
            self._ratio_list_minibatch = self.ratio_list_minibatch[indices]
        else:
            rand_perm = npr.permutation(self.num_data)
            ratio_list = self.ratio_list[rand_perm]
            self._ratio_index = self.ratio_index[rand_perm]
            # re-calculate minibatch ratio list
            self._ratio_list_minibatch = cal_minibatch_ratio(ratio_list)

        self.iter_counter = 0
        self._ratio_index = self._ratio_index.tolist()
        self._ratio_list_minibatch = self._ratio_list_minibatch.tolist() 
Example #14
Source File: util.py    From MicroTokenizer with MIT License 5 votes vote down vote up
def itershuffle(iterable, bufsize=1000):
    """Shuffle an iterator. This works by holding `bufsize` items back
    and yielding them sometime later. Obviously, this is not unbiased –
    but should be good enough for batching. Larger bufsize means less bias.
    From https://gist.github.com/andres-erbsen/1307752

    iterable (iterable): Iterator to shuffle.
    bufsize (int): Items to hold back.
    YIELDS (iterable): The shuffled iterator.
    """
    iterable = iter(iterable)
    buf = []
    try:
        while True:
            for i in range(random.randint(1, bufsize-len(buf))):
                buf.append(iterable.next())
            random.shuffle(buf)
            for i in range(random.randint(1, bufsize)):
                if buf:
                    yield buf.pop()
                else:
                    break
    except StopIteration:
        random.shuffle(buf)
        while buf:
            yield buf.pop()
        raise StopIteration 
Example #15
Source File: test_regression.py    From elasticintel with GNU General Public License v3.0 5 votes vote down vote up
def test_shuffle_of_array_of_objects(self):
        # Test that permuting an array of objects will not cause
        # a segfault on garbage collection.
        # See gh-7719
        np.random.seed(1234)
        a = np.array([np.arange(1), np.arange(4)])

        for _ in range(1000):
            np.random.shuffle(a)

        # Force Garbage Collection - should not segfault.
        import gc
        gc.collect() 
Example #16
Source File: test_regression.py    From elasticintel with GNU General Public License v3.0 5 votes vote down vote up
def test_shuffle_of_array_of_different_length_strings(self):
        # Test that permuting an array of different length strings
        # will not cause a segfault on garbage collection
        # Tests gh-7710
        np.random.seed(1234)

        a = np.array(['a', 'a' * 1000])

        for _ in range(100):
            np.random.shuffle(a)

        # Force Garbage Collection - should not segfault.
        import gc
        gc.collect() 
Example #17
Source File: test_regression.py    From twitter-stock-recommendation with MIT License 5 votes vote down vote up
def test_shuffle_of_array_of_objects(self):
        # Test that permuting an array of objects will not cause
        # a segfault on garbage collection.
        # See gh-7719
        np.random.seed(1234)
        a = np.array([np.arange(1), np.arange(4)])

        for _ in range(1000):
            np.random.shuffle(a)

        # Force Garbage Collection - should not segfault.
        import gc
        gc.collect() 
Example #18
Source File: test_regression.py    From keras-lambda with MIT License 5 votes vote down vote up
def test_shuffle_mixed_dimension(self):
        # Test for trac ticket #2074
        for t in [[1, 2, 3, None],
                  [(1, 1), (2, 2), (3, 3), None],
                  [1, (2, 2), (3, 3), None],
                  [(1, 1), 2, 3, None]]:
            np.random.seed(12345)
            shuffled = list(t)
            random.shuffle(shuffled)
            assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]]) 
Example #19
Source File: MMTransE.py    From MTransE with Apache License 2.0 5 votes vote down vote up
def train_intersect_1epoch(self, shuffle=True, const_decay=1.0, sampling=False, L1=False):
        num_lan = len(self.languages)
        sum = 0.0
        count = 0
        index = None
        if shuffle == True:
            RD.shuffle(self.intersect_index)
            index = self.intersect_index
        else:
            index = range(len(self.intersect_index))
        for x in index:
            line = self.intersect_triples[x]
            count += 1
            if count % 50000 == 0:
                print "Scanned ",count," on intersect graph"

            transfer_index = ''
            for i in range(num_lan):
                for j in range(num_lan):
                    if i == j:
                        continue
                    l_left = self.languages[i]
                    l_right = self.languages[j]
                    transfer_index = l_left + l_right
                    this_transfer = self.transfer[transfer_index]
                    sum += self.gradient_decent(this_transfer, self.models[l_left].vec_e[line[i][0]], self.models[l_right].vec_e[line[j][0]], const_decay, L1)
                    sum += self.gradient_decent(this_transfer, self.models[l_left].vec_e[line[i][2]], self.models[l_right].vec_e[line[j][2]], const_decay, L1)
                    sum += self.gradient_decent(this_transfer, self.models[l_left].vec_r[line[i][1]], self.models[l_right].vec_r[line[j][1]], const_decay, L1)
        return sum 
Example #20
Source File: sampling.py    From dstc8-reddit-corpus with MIT License 5 votes vote down vote up
def __call__(self, dlgs, group_configs):
    if not group_configs:
      return dlgs

    def make_indices(max_n, limit=-1, shuffle=False):
      inds = list(range(max_n))
      if shuffle:
        random.shuffle(inds)
      if limit > 0:
        inds = inds[:limit]
      return inds

    cfg = group_configs.pop(0)
    grouped_dlgs_dict = OrderedDict()

    for dlg in dlgs:
      if len(dlg) <= cfg.group_level:
        continue

      group_key = dlg[cfg.group_level][TURN_ID]

      if group_key not in grouped_dlgs_dict:
        grouped_dlgs_dict[group_key] = []

      grouped_dlgs_dict[group_key].append(dlg)

    all_groups = list(grouped_dlgs_dict.keys())
    inds = make_indices(len(all_groups), cfg.n_groups, cfg.shuffle_groups)
    groups = [all_groups[i] for i in inds]
    final_dlgs = []

    for g in groups:
      sub_grouped_dlgs = self.__call__(
        grouped_dlgs_dict[g], group_configs[:])
      inds = make_indices(len(sub_grouped_dlgs),
                          cfg.n_per_group, cfg.shuffle_within_groups)
      final_sub_grouped_dlgs = [sub_grouped_dlgs[i] for i in inds]
      final_dlgs.extend(final_sub_grouped_dlgs)

    return final_dlgs 
Example #21
Source File: MMTransE.py    From MTransE with Apache License 2.0 5 votes vote down vote up
def __init__(self, dim = 100, save_dir = 'model_MtransE.bin'):
        self.dim = dim
        self.languages = []
        self.rate = 0.01 #learning rate
        self.trained_epochs = 0
        self.save_dir = save_dir
        #single-language models of each language
        self.models = {}
        self.triples = {}
        # cross-lingual linear transfer
        self.transfer = {}
        #intersect graph
        self.intersect_triples = np.array([0])
        #shuffle index for intersect triples
        self.intersect_index = np.array([0]) 
Example #22
Source File: test_regression.py    From ImageFusion with MIT License 5 votes vote down vote up
def test_shuffle_mixed_dimension(self):
        # Test for trac ticket #2074
        for t in [[1, 2, 3, None],
                  [(1, 1), (2, 2), (3, 3), None],
                  [1, (2, 2), (3, 3), None],
                  [(1, 1), 2, 3, None]]:
            np.random.seed(12345)
            shuffled = list(t)
            random.shuffle(shuffled)
            assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]]) 
Example #23
Source File: base.py    From scikit-kge with MIT License 5 votes vote down vote up
def _pre_epoch(self):
        self.nviolations = 0
        if self.samplef is None:
            shuffle(self.pxs)
            shuffle(self.nxs) 
Example #24
Source File: shufflingbatchiterator.py    From theanolm with Apache License 2.0 5 votes vote down vote up
def _reset(self, shuffle=True):
        """Resets the read pointer back to the beginning of the data set. If
        ``shuffle`` is set to True, also creates a new random order for
        iterating the input lines.

        :type shuffle: bool
        :param shuffle: also shuffles the input sentences, unless set to False
        """

        self._next_line = 0
        if shuffle:
            logging.debug("Generating a random order of input lines.")

            samples = []
            for (start, stop), sample_size in \
                zip(self._sentence_pointers.pointer_ranges, self._sample_sizes):

                population = numpy.arange(start, stop, dtype='int64')
                # No duplicates, unless we need more sentences than there are
                # in the file.
                replace = sample_size > len(population)
                sample = random.choice(population, sample_size, replace=replace)
                samples.append(sample)
            self._order = numpy.concatenate(samples)
            for _ in range(10):
                random.shuffle(self._order) 
Example #25
Source File: test_regression.py    From mxnet-lambda with Apache License 2.0 5 votes vote down vote up
def test_shuffle_of_array_of_objects(self):
        # Test that permuting an array of objects will not cause
        # a segfault on garbage collection.
        # See gh-7719
        np.random.seed(1234)
        a = np.array([np.arange(1), np.arange(4)])

        for _ in range(1000):
            np.random.shuffle(a)

        # Force Garbage Collection - should not segfault.
        import gc
        gc.collect() 
Example #26
Source File: test_regression.py    From mxnet-lambda with Apache License 2.0 5 votes vote down vote up
def test_shuffle_of_array_of_different_length_strings(self):
        # Test that permuting an array of different length strings
        # will not cause a segfault on garbage collection
        # Tests gh-7710
        np.random.seed(1234)

        a = np.array(['a', 'a' * 1000])

        for _ in range(100):
            np.random.shuffle(a)

        # Force Garbage Collection - should not segfault.
        import gc
        gc.collect() 
Example #27
Source File: test_regression.py    From mxnet-lambda with Apache License 2.0 5 votes vote down vote up
def test_shuffle_mixed_dimension(self):
        # Test for trac ticket #2074
        for t in [[1, 2, 3, None],
                  [(1, 1), (2, 2), (3, 3), None],
                  [1, (2, 2), (3, 3), None],
                  [(1, 1), 2, 3, None]]:
            np.random.seed(12345)
            shuffled = list(t)
            random.shuffle(shuffled)
            assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]]) 
Example #28
Source File: test_regression.py    From keras-lambda with MIT License 5 votes vote down vote up
def test_shuffle_of_array_of_different_length_strings(self):
        # Test that permuting an array of different length strings
        # will not cause a segfault on garbage collection
        # Tests gh-7710
        np.random.seed(1234)

        a = np.array(['a', 'a' * 1000])

        for _ in range(100):
            np.random.shuffle(a)

        # Force Garbage Collection - should not segfault.
        import gc
        gc.collect() 
Example #29
Source File: test_regression.py    From keras-lambda with MIT License 5 votes vote down vote up
def test_shuffle_of_array_of_objects(self):
        # Test that permuting an array of objects will not cause
        # a segfault on garbage collection.
        # See gh-7719
        np.random.seed(1234)
        a = np.array([np.arange(1), np.arange(4)])

        for _ in range(1000):
            np.random.shuffle(a)

        # Force Garbage Collection - should not segfault.
        import gc
        gc.collect() 
Example #30
Source File: base.py    From cesi with Apache License 2.0 5 votes vote down vote up
def pre_epoch(self):
		self.nviolations = 0
		if self.samplef is None:
			shuffle(self.pxs)
			shuffle(self.nxs)