Python threading.Barrier() Examples
The following are 30
code examples of threading.Barrier().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
threading
, or try the search function
.

Example #1
Source File: test_namedreversesemaphore.py From etesync-dav with GNU General Public License v3.0 | 6 votes |
def test_different_key_different_thread_wait(self): name1 = str(uuid.uuid4()) name2 = str(uuid.uuid4()) barrier = threading.Barrier(2) thread = ExThread(target=thread_run, args=(name2, True), daemon=True) thread.start() thread.join() lock1 = NamedReverseSemaphore(name1) with lock1: thread = ExThread(target=thread_run, args=(name2, True, barrier), daemon=True) thread.start() barrier.wait() # FIXME: hack to make sure we acquired the lock in the other thread time.sleep(0.2) thread.join()
Example #2
Source File: test_namedreversesemaphore.py From etesync-dav with GNU General Public License v3.0 | 6 votes |
def test_multiple_keys_different_thread(self): name1 = str(uuid.uuid4()) name2 = str(uuid.uuid4()) name3 = str(uuid.uuid4()) barrier = threading.Barrier(3) threads = [] lock1 = NamedReverseSemaphore(name1) with lock1: threads.insert(0, ExThread(target=thread_run, args=(name2, True, barrier), daemon=True)) threads[0].start() threads.insert(0, ExThread(target=thread_run, args=(name3, True, barrier), daemon=True)) threads[0].start() barrier.wait() # FIXME: hack to make sure we acquired the lock in the other thread time.sleep(0.2) for thread in threads: thread.join()
Example #3
Source File: test_namedreversesemaphore.py From etesync-dav with GNU General Public License v3.0 | 6 votes |
def test_multiple_keys_multiple_times_different_thread(self): name1 = str(uuid.uuid4()) name2 = str(uuid.uuid4()) name3 = str(uuid.uuid4()) barrier = threading.Barrier(5) threads = [] lock1 = NamedReverseSemaphore(name1) with lock1: threads.insert(0, ExThread(target=thread_run, args=(name2, True, barrier), daemon=True)) threads[0].start() threads.insert(0, ExThread(target=thread_run, args=(name2, True, barrier), daemon=True)) threads[0].start() threads.insert(0, ExThread(target=thread_run, args=(name3, True, barrier), daemon=True)) threads[0].start() threads.insert(0, ExThread(target=thread_run, args=(name3, True, barrier), daemon=True)) threads[0].start() barrier.wait() # FIXME: hack to make sure we acquired the lock in the other thread time.sleep(0.2) for thread in threads: thread.join()
Example #4
Source File: in_process_communicator.py From CrypTen with MIT License | 6 votes |
def __init__(self, rank, world_size, init_ttp=False): self.world_size = world_size self.rank = rank self.reset_communication_stats() self._name = f"rank{rank}" with InProcessCommunicator.lock: if InProcessCommunicator.mailbox is None: InProcessCommunicator.mailbox = [ Queue() for _ in range(self.world_size) ] # This prevents one thread from running ahead of the others and doing # multiple puts that would show up in the get calls below InProcessCommunicator.barrier = threading.Barrier(self.world_size) # logging: level = logging.getLogger().level logging.getLogger().setLevel(logging.INFO) logging.info("==================") logging.info("InProcessCommunicator with rank %d" % self.rank) logging.info("==================") logging.info("World size = %d" % self.get_world_size()) logging.getLogger().setLevel(level)
Example #5
Source File: test_memory.py From cupy with MIT License | 6 votes |
def test_allocator_thread_local(self): def thread_body(self): new_pool = memory.MemoryPool() with cupy.cuda.using_allocator(new_pool.malloc): assert memory.get_allocator() == new_pool.malloc threading.Barrier(2) arr = cupy.zeros(128, dtype=cupy.int64) threading.Barrier(2) self.assertEqual(arr.data.mem.size, new_pool.used_bytes()) threading.Barrier(2) assert memory.get_allocator() == self.pool.malloc with cupy.cuda.Device(0): t = threading.Thread(target=thread_body, args=(self,)) t.daemon = True t.start() threading.Barrier(2) assert memory.get_allocator() == self.pool.malloc arr = cupy.ones(256, dtype=cupy.int64) threading.Barrier(2) self.assertEqual(arr.data.mem.size, self.pool.used_bytes()) threading.Barrier(2) t.join()
Example #6
Source File: synchronize.py From ironpython3 with Apache License 2.0 | 6 votes |
def wait(self, timeout=None): self._cond.acquire() try: if self._flag.acquire(False): self._flag.release() else: self._cond.wait(timeout) if self._flag.acquire(False): self._flag.release() return True return False finally: self._cond.release() # # Barrier #
Example #7
Source File: test_threads.py From tskit with MIT License | 6 votes |
def run_multiple_writers(self, writer, num_writers=32): barrier = threading.Barrier(num_writers) def writer_proxy(thread_index, results): barrier.wait() # Attempts to operate on a table while locked should raise a RuntimeError try: writer(thread_index, results) results[thread_index] = 0 except RuntimeError: results[thread_index] = 1 results = run_threads(writer_proxy, num_writers) failures = sum(results) successes = num_writers - failures # Note: we would like to insist that #failures is > 0, but this is too # stochastic to guarantee for test purposes. self.assertGreaterEqual(failures, 0) self.assertGreater(successes, 0)
Example #8
Source File: test_threading.py From loguru with MIT License | 6 votes |
def test_safe_logging(): barrier = Barrier(2) counter = itertools.count() sink = NonSafeSink(1) logger.add(sink, format="{message}", catch=False) def threaded(): barrier.wait() logger.info("___{}___", next(counter)) threads = [Thread(target=threaded) for _ in range(2)] for thread in threads: thread.start() for thread in threads: thread.join() logger.remove() assert sink.written in ("___0___\n___1___\n", "___1___\n___0___\n")
Example #9
Source File: barriers.py From Learning-Concurrency-in-Python with MIT License | 5 votes |
def run(self): print("Thread {} working on something".format(threading.current_thread())) time.sleep(random.randint(1,10)) print("Thread {} is joining {} waiting on Barrier".format(threading.current_thread(), self.barrier.n_waiting)) self.barrier.wait() print("Barrier has been lifted, continuing with work")
Example #10
Source File: runner.py From eventsourcing with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__(self, *args: Any, **kwargs: Any): super(SteppingMultiThreadedRunner, self).__init__(*args, **kwargs) self.seen_prompt_events: Dict[str, Event] = {} self.fetch_barrier: Optional[Barrier] = None self.execute_barrier: Optional[Barrier] = None self.application_threads: Dict[str, BarrierControlledApplicationThread] = {} self.clock_thread = None self.stop_event = Event()
Example #11
Source File: runner.py From eventsourcing with BSD 3-Clause "New" or "Revised" License | 5 votes |
def start(self) -> None: super(SteppingMultiThreadedRunner, self).start() parties = 1 + len(self.processes) self.fetch_barrier = Barrier(parties) self.execute_barrier = Barrier(parties) # Create an event for each process. for process_name in self.processes: self.seen_prompt_events[process_name] = Event() # Construct application threads. for process_name, process in self.processes.items(): process_instance_id = process_name thread = BarrierControlledApplicationThread( process=process, fetch_barrier=self.fetch_barrier, execute_barrier=self.execute_barrier, stop_event=self.stop_event, ) self.application_threads[process_instance_id] = thread # Start application threads. for thread in self.application_threads.values(): thread.start() # Start clock thread. self.clock_thread = BarrierControllingClockThread( normal_speed=self.normal_speed, scale_factor=self.scale_factor, tick_interval=self.tick_interval, fetch_barrier=self.fetch_barrier, execute_barrier=self.execute_barrier, stop_event=self.stop_event, is_verbose=self.is_verbose, ) self.clock_thread.start()
Example #12
Source File: runner.py From eventsourcing with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__( self, process: ProcessApplication, fetch_barrier: Barrier, execute_barrier: Barrier, stop_event: Event, ): super(BarrierControlledApplicationThread, self).__init__(daemon=True) self.app = process self.fetch_barrier = fetch_barrier self.execute_barrier = execute_barrier self.stop_event = stop_event
Example #13
Source File: runner.py From eventsourcing with BSD 3-Clause "New" or "Revised" License | 5 votes |
def __init__( self, normal_speed: int, scale_factor: int, tick_interval: Optional[Union[int, float]], fetch_barrier: Barrier, execute_barrier: Barrier, stop_event: Event, is_verbose: bool = False, ): super(BarrierControllingClockThread, self).__init__(daemon=True) # Todo: Remove the redundancy here. self.normal_speed = normal_speed self.scale_factor = scale_factor self.tick_interval = tick_interval self.fetch_barrier = fetch_barrier self.execute_barrier = execute_barrier self.stop_event = stop_event self.last_tick_time = 0.0 self.last_process_time = 0.0 self.all_tick_durations: Deque = deque() self.tick_adjustment: float = 0.0 self.is_verbose = is_verbose if self.tick_interval: self.tick_durations_window_size = max( 1, int(round(1 / self.tick_interval, 0)) ) else: self.tick_durations_window_size = 100
Example #14
Source File: cnn_util.py From benchmarks with Apache License 2.0 | 5 votes |
def roll_numpy_batches(array, batch_size, shift_ratio): """Moves a proportion of batches from start to the end of the array. This function moves a proportion of batches, specified by `shift_ratio`, from the starts of the array to the end. The number of batches moved is rounded down to the nearest integer. For example, ``` roll_numpy_batches([1, 2, 3, 4, 5, 6], 2, 0.34) == [3, 4, 5, 6, 1, 2] ``` Args: array: A Numpy array whose first dimension is the batch dimension. batch_size: The batch size. shift_ratio: Proportion of batches to move from the start of the array to the end of the array. Returns: A new Numpy array, with a proportion of the batches at the start of `array` moved to the end. """ num_items = array.shape[0] assert num_items % batch_size == 0 num_batches = num_items // batch_size starting_batch = int(num_batches * shift_ratio) starting_item = starting_batch * batch_size return np.roll(array, -starting_item, axis=0) # For Python 2.7 compatibility, we do not use threading.Barrier.
Example #15
Source File: cnn_util.py From benchmarks with Apache License 2.0 | 5 votes |
def __init__(self, sess, put_ops, batch_group_size, use_python32_barrier): self.sess = sess self.num_gets = 0 self.put_ops = put_ops self.batch_group_size = batch_group_size self.done_event = threading.Event() if (use_python32_barrier and sys.version_info[0] == 3 and sys.version_info[1] >= 2): self.put_barrier = threading.Barrier(2) else: self.put_barrier = Barrier(2)
Example #16
Source File: cnn_util.py From parallax with Apache License 2.0 | 5 votes |
def __init__(self, sess, put_ops, batch_group_size): self.sess = sess self.num_gets = 0 self.put_ops = put_ops self.batch_group_size = batch_group_size self.done_event = threading.Event() if (FLAGS.use_python32_barrier and sys.version_info[0] == 3 and sys.version_info[1] >= 2): self.put_barrier = threading.Barrier(2) else: self.put_barrier = Barrier(2)
Example #17
Source File: test_functools.py From Fluid-Designer with GNU General Public License v3.0 | 5 votes |
def test_lru_cache_threaded2(self): # Simultaneous call with the same arguments n, m = 5, 7 start = threading.Barrier(n+1) pause = threading.Barrier(n+1) stop = threading.Barrier(n+1) @self.module.lru_cache(maxsize=m*n) def f(x): pause.wait(10) return 3 * x self.assertEqual(f.cache_info(), (0, 0, m*n, 0)) def test(): for i in range(m): start.wait(10) self.assertEqual(f(i), 3 * i) stop.wait(10) threads = [threading.Thread(target=test) for k in range(n)] with support.start_threads(threads): for i in range(m): start.wait(10) stop.reset() pause.wait(10) start.reset() stop.wait(10) pause.reset() self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1))
Example #18
Source File: _test_multiprocessing.py From Fluid-Designer with GNU General Public License v3.0 | 5 votes |
def setUp(self): self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
Example #19
Source File: _test_multiprocessing.py From Fluid-Designer with GNU General Public License v3.0 | 5 votes |
def test_action(self): """ Test the 'action' callback """ results = self.DummyList() barrier = self.Barrier(self.N, action=AppendTrue(results)) self.run_threads(self._test_action_f, (barrier, results)) self.assertEqual(len(results), 1)
Example #20
Source File: _test_multiprocessing.py From Fluid-Designer with GNU General Public License v3.0 | 5 votes |
def test_abort_and_reset(self): """ Test that a barrier can be reset after being broken. """ results1 = self.DummyList() results2 = self.DummyList() results3 = self.DummyList() barrier2 = self.Barrier(self.N) self.run_threads(self._test_abort_and_reset_f, (self.barrier, barrier2, results1, results2, results3)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertEqual(len(results3), self.N)
Example #21
Source File: _test_multiprocessing.py From Fluid-Designer with GNU General Public License v3.0 | 5 votes |
def test_single_thread(self): b = self.Barrier(1) b.wait() b.wait()
Example #22
Source File: test_locks.py From Fluid-Designer with GNU General Public License v3.0 | 5 votes |
def run_deadlock_avoidance_test(self, create_deadlock): NLOCKS = 10 locks = [self.LockType(str(i)) for i in range(NLOCKS)] pairs = [(locks[i], locks[(i+1)%NLOCKS]) for i in range(NLOCKS)] if create_deadlock: NTHREADS = NLOCKS else: NTHREADS = NLOCKS - 1 barrier = threading.Barrier(NTHREADS) results = [] def _acquire(lock): """Try to acquire the lock. Return True on success, False on deadlock.""" try: lock.acquire() except self.DeadlockError: return False else: return True def f(): a, b = pairs.pop() ra = _acquire(a) barrier.wait() rb = _acquire(b) results.append((ra, rb)) if rb: b.release() if ra: a.release() lock_tests.Bunch(f, NTHREADS).wait_for_finished() self.assertEqual(len(results), NTHREADS) return results
Example #23
Source File: synchronize.py From Fluid-Designer with GNU General Public License v3.0 | 5 votes |
def wait(self, timeout=None): with self._cond: if self._flag.acquire(False): self._flag.release() else: self._cond.wait(timeout) if self._flag.acquire(False): self._flag.release() return True return False # # Barrier #
Example #24
Source File: synchronize.py From Imogen with MIT License | 5 votes |
def wait(self, timeout=None): with self._cond: if self._flag.acquire(False): self._flag.release() else: self._cond.wait(timeout) if self._flag.acquire(False): self._flag.release() return True return False # # Barrier #
Example #25
Source File: cnn_util.py From deeplearning-benchmark with Apache License 2.0 | 5 votes |
def log_fn(log): print(log) if FLAGS.flush_stdout: sys.stdout.flush() # For Python 2.7 compatibility, we do not use threading.Barrier.
Example #26
Source File: cnn_util.py From deeplearning-benchmark with Apache License 2.0 | 5 votes |
def __init__(self, sess, put_ops, batch_group_size): self.sess = sess self.num_gets = 0 self.put_ops = put_ops self.batch_group_size = batch_group_size self.done_event = threading.Event() if (FLAGS.use_python32_barrier and sys.version_info[0] == 3 and sys.version_info[1] >= 2): self.put_barrier = threading.Barrier(2) else: self.put_barrier = Barrier(2)
Example #27
Source File: _test_multiprocessing.py From ironpython3 with Apache License 2.0 | 5 votes |
def setUp(self): self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
Example #28
Source File: _test_multiprocessing.py From ironpython3 with Apache License 2.0 | 5 votes |
def test_action(self): """ Test the 'action' callback """ results = self.DummyList() barrier = self.Barrier(self.N, action=AppendTrue(results)) self.run_threads(self._test_action_f, (barrier, results)) self.assertEqual(len(results), 1)
Example #29
Source File: _test_multiprocessing.py From ironpython3 with Apache License 2.0 | 5 votes |
def test_abort_and_reset(self): """ Test that a barrier can be reset after being broken. """ results1 = self.DummyList() results2 = self.DummyList() results3 = self.DummyList() barrier2 = self.Barrier(self.N) self.run_threads(self._test_abort_and_reset_f, (self.barrier, barrier2, results1, results2, results3)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertEqual(len(results3), self.N)
Example #30
Source File: _test_multiprocessing.py From ironpython3 with Apache License 2.0 | 5 votes |
def test_default_timeout(self): """ Test the barrier's default timeout """ barrier = self.Barrier(self.N, timeout=0.5) results = self.DummyList() self.run_threads(self._test_default_timeout_f, (barrier, results)) self.assertEqual(len(results), barrier.parties)