Python threading.Barrier() Examples

The following are code examples for showing how to use threading.Barrier(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: etesync-dav   Author: etesync   File: test_namedreversesemaphore.py    GNU General Public License v3.0 6 votes vote down vote up
def test_different_key_different_thread_wait(self):
        name1 = str(uuid.uuid4())
        name2 = str(uuid.uuid4())
        barrier = threading.Barrier(2)

        thread = ExThread(target=thread_run, args=(name2, True), daemon=True)
        thread.start()
        thread.join()

        lock1 = NamedReverseSemaphore(name1)
        with lock1:
            thread = ExThread(target=thread_run, args=(name2, True, barrier), daemon=True)
            thread.start()
            barrier.wait()
            # FIXME: hack to make sure we acquired the lock in the other thread
            time.sleep(0.2)
        thread.join() 
Example 2
Project: etesync-dav   Author: etesync   File: test_namedreversesemaphore.py    GNU General Public License v3.0 6 votes vote down vote up
def test_multiple_keys_different_thread(self):
        name1 = str(uuid.uuid4())
        name2 = str(uuid.uuid4())
        name3 = str(uuid.uuid4())
        barrier = threading.Barrier(3)

        threads = []

        lock1 = NamedReverseSemaphore(name1)
        with lock1:
            threads.insert(0, ExThread(target=thread_run, args=(name2, True, barrier), daemon=True))
            threads[0].start()
            threads.insert(0, ExThread(target=thread_run, args=(name3, True, barrier), daemon=True))
            threads[0].start()

            barrier.wait()
            # FIXME: hack to make sure we acquired the lock in the other thread
            time.sleep(0.2)

        for thread in threads:
            thread.join() 
Example 3
Project: etesync-dav   Author: etesync   File: test_namedreversesemaphore.py    GNU General Public License v3.0 6 votes vote down vote up
def test_multiple_keys_multiple_times_different_thread(self):
        name1 = str(uuid.uuid4())
        name2 = str(uuid.uuid4())
        name3 = str(uuid.uuid4())
        barrier = threading.Barrier(5)

        threads = []

        lock1 = NamedReverseSemaphore(name1)
        with lock1:
            threads.insert(0, ExThread(target=thread_run, args=(name2, True, barrier), daemon=True))
            threads[0].start()
            threads.insert(0, ExThread(target=thread_run, args=(name2, True, barrier), daemon=True))
            threads[0].start()
            threads.insert(0, ExThread(target=thread_run, args=(name3, True, barrier), daemon=True))
            threads[0].start()
            threads.insert(0, ExThread(target=thread_run, args=(name3, True, barrier), daemon=True))
            threads[0].start()

            barrier.wait()
            # FIXME: hack to make sure we acquired the lock in the other thread
            time.sleep(0.2)

        for thread in threads:
            thread.join() 
Example 4
Project: CrypTen   Author: facebookresearch   File: in_process_communicator.py    MIT License 6 votes vote down vote up
def __init__(self, rank, world_size):
        self.world_size = world_size
        self.rank = rank
        self.reset_communication_stats()

        with InProcessCommunicator.lock:
            if InProcessCommunicator.mailbox is None:
                InProcessCommunicator.mailbox = [
                    Queue() for _ in range(self.world_size)
                ]

                # This prevents one thread from running ahead of the others and doing
                # multiple puts that would show up in the get calls below
                InProcessCommunicator.barrier = threading.Barrier(self.world_size)

        # logging:
        level = logging.getLogger().level
        logging.getLogger().setLevel(logging.INFO)
        logging.info("==================")
        logging.info("InProcessCommunicator with rank %d" % self.rank)
        logging.info("==================")

        logging.info("World size = %d" % self.get_world_size())
        logging.getLogger().setLevel(level) 
Example 5
Project: cupy   Author: cupy   File: test_memory.py    MIT License 6 votes vote down vote up
def test_allocator_thread_local(self):
        def thread_body(self):
            new_pool = memory.MemoryPool()
            with memory.using_allocator(new_pool.malloc):
                assert memory.get_allocator() == new_pool.malloc
                threading.Barrier(2)
                arr = cupy.zeros(128, dtype=cupy.int64)
                threading.Barrier(2)
                self.assertEqual(arr.data.mem.size, new_pool.used_bytes())
                threading.Barrier(2)
            assert memory.get_allocator() == self.pool.malloc

        with cupy.cuda.Device(0):
            t = threading.Thread(target=thread_body, args=(self,))
            t.daemon = True
            t.start()
            threading.Barrier(2)
            assert memory.get_allocator() == self.pool.malloc
            arr = cupy.ones(256, dtype=cupy.int64)
            threading.Barrier(2)
            self.assertEqual(arr.data.mem.size, self.pool.used_bytes())
            threading.Barrier(2)
            t.join() 
Example 6
Project: garage   Author: clchiou   File: test_actors.py    MIT License 6 votes vote down vote up
def test_kill(self):
        for graceful in (True, False):
            greeter = Greeter()
            self.assertFalse(greeter._get_future().done())
            self.assertEqual('Hello world', greeter.greet().result())

            greeter._kill(graceful=graceful)

            with self.assertRaisesRegex(
                    actors.ActorError, r'actor has been killed'):
                greeter.greet()

            greeter._get_future().result(timeout=1)
            self.assertTrue(greeter._get_future().done())

        blocker = Blocker()
        barrier = threading.Barrier(2)
        event = threading.Event()
        blocker.wait(barrier, event)
        barrier.wait()
        blocker._kill(graceful=False)
        self.assertFalse(blocker._get_future().done())
        self.assertFalse(blocker._Stub__msg_queue) 
Example 7
Project: ACE   Author: ace-ecosystem   File: hunter.py    Apache License 2.0 6 votes vote down vote up
def __init__(self, enabled=None, name=None, description=None, type=None,
                       frequency=None, tags=[]):

        self.enabled = enabled
        self.name = name
        self.description = description
        self.type = type
        self.frequency = frequency
        self.tags = tags

        # the last time the hunt was executed
        # see the last_executed_time property
        #self.last_executed_time = None # datetime.datetime

        # a threading.RLock that is held while executing
        self.execution_lock = threading.RLock()

        # a way for the controlling thread to wait for the hunt execution thread to start
        self.startup_barrier = threading.Barrier(2)

        # if this is True then we're executing the Hunt outside of normal operations
        # in that case we don't want to record any of the execution time stamps
        self.manual_hunt = False 
Example 8
Project: Learning-Concurrency-in-Python   Author: PacktPublishing   File: barriers.py    MIT License 5 votes vote down vote up
def run(self):
      print("Thread {} working on something".format(threading.current_thread()))
      time.sleep(random.randint(1,10))
      print("Thread {} is joining {} waiting on Barrier".format(threading.current_thread(), self.barrier.n_waiting))
      self.barrier.wait()
      
      print("Barrier has been lifted, continuing with work") 
Example 9
Project: NiujiaoDebugger   Author: MrSrc   File: test_functools.py    GNU General Public License v3.0 5 votes vote down vote up
def test_lru_cache_threaded2(self):
        # Simultaneous call with the same arguments
        n, m = 5, 7
        start = threading.Barrier(n+1)
        pause = threading.Barrier(n+1)
        stop = threading.Barrier(n+1)
        @self.module.lru_cache(maxsize=m*n)
        def f(x):
            pause.wait(10)
            return 3 * x
        self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
        def test():
            for i in range(m):
                start.wait(10)
                self.assertEqual(f(i), 3 * i)
                stop.wait(10)
        threads = [threading.Thread(target=test) for k in range(n)]
        with support.start_threads(threads):
            for i in range(m):
                start.wait(10)
                stop.reset()
                pause.wait(10)
                start.reset()
                stop.wait(10)
                pause.reset()
                self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1)) 
Example 10
Project: NiujiaoDebugger   Author: MrSrc   File: _test_multiprocessing.py    GNU General Public License v3.0 5 votes vote down vote up
def setUp(self):
        self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout) 
Example 11
Project: NiujiaoDebugger   Author: MrSrc   File: _test_multiprocessing.py    GNU General Public License v3.0 5 votes vote down vote up
def test_action(self):
        """
        Test the 'action' callback
        """
        results = self.DummyList()
        barrier = self.Barrier(self.N, action=AppendTrue(results))
        self.run_threads(self._test_action_f, (barrier, results))
        self.assertEqual(len(results), 1) 
Example 12
Project: NiujiaoDebugger   Author: MrSrc   File: _test_multiprocessing.py    GNU General Public License v3.0 5 votes vote down vote up
def test_abort_and_reset(self):
        """
        Test that a barrier can be reset after being broken.
        """
        results1 = self.DummyList()
        results2 = self.DummyList()
        results3 = self.DummyList()
        barrier2 = self.Barrier(self.N)

        self.run_threads(self._test_abort_and_reset_f,
                         (self.barrier, barrier2, results1, results2, results3))
        self.assertEqual(len(results1), 0)
        self.assertEqual(len(results2), self.N-1)
        self.assertEqual(len(results3), self.N) 
Example 13
Project: NiujiaoDebugger   Author: MrSrc   File: _test_multiprocessing.py    GNU General Public License v3.0 5 votes vote down vote up
def test_single_thread(self):
        b = self.Barrier(1)
        b.wait()
        b.wait() 
Example 14
Project: NiujiaoDebugger   Author: MrSrc   File: test_locks.py    GNU General Public License v3.0 5 votes vote down vote up
def run_deadlock_avoidance_test(self, create_deadlock):
        NLOCKS = 10
        locks = [self.LockType(str(i)) for i in range(NLOCKS)]
        pairs = [(locks[i], locks[(i+1)%NLOCKS]) for i in range(NLOCKS)]
        if create_deadlock:
            NTHREADS = NLOCKS
        else:
            NTHREADS = NLOCKS - 1
        barrier = threading.Barrier(NTHREADS)
        results = []

        def _acquire(lock):
            """Try to acquire the lock. Return True on success,
            False on deadlock."""
            try:
                lock.acquire()
            except self.DeadlockError:
                return False
            else:
                return True

        def f():
            a, b = pairs.pop()
            ra = _acquire(a)
            barrier.wait()
            rb = _acquire(b)
            results.append((ra, rb))
            if rb:
                b.release()
            if ra:
                a.release()
        lock_tests.Bunch(f, NTHREADS).wait_for_finished()
        self.assertEqual(len(results), NTHREADS)
        return results 
Example 15
Project: NiujiaoDebugger   Author: MrSrc   File: synchronize.py    GNU General Public License v3.0 5 votes vote down vote up
def wait(self, timeout=None):
        with self._cond:
            if self._flag.acquire(False):
                self._flag.release()
            else:
                self._cond.wait(timeout)

            if self._flag.acquire(False):
                self._flag.release()
                return True
            return False

#
# Barrier
# 
Example 16
Project: eventsourcing   Author: johnbywater   File: runner.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, *args: Any, **kwargs: Any):
        super(SteppingMultiThreadedRunner, self).__init__(*args, **kwargs)
        self.seen_prompt_events: Dict[str, Event] = {}
        self.fetch_barrier: Optional[Barrier] = None
        self.execute_barrier: Optional[Barrier] = None
        self.application_threads: Dict[str, BarrierControlledApplicationThread] = {}
        self.clock_thread = None
        self.stop_event = Event() 
Example 17
Project: eventsourcing   Author: johnbywater   File: runner.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def start(self) -> None:
        super(SteppingMultiThreadedRunner, self).start()
        parties = 1 + len(self.processes)
        self.fetch_barrier = Barrier(parties)
        self.execute_barrier = Barrier(parties)

        # Create an event for each process.
        for process_name in self.processes:
            self.seen_prompt_events[process_name] = Event()

        # Construct application threads.
        for process_name, process in self.processes.items():
            process_instance_id = process_name

            thread = BarrierControlledApplicationThread(
                process=process,
                fetch_barrier=self.fetch_barrier,
                execute_barrier=self.execute_barrier,
                stop_event=self.stop_event,
            )
            self.application_threads[process_instance_id] = thread

        # Start application threads.
        for thread in self.application_threads.values():
            thread.start()

        # Start clock thread.
        self.clock_thread = BarrierControllingClockThread(
            normal_speed=self.normal_speed,
            scale_factor=self.scale_factor,
            tick_interval=self.tick_interval,
            fetch_barrier=self.fetch_barrier,
            execute_barrier=self.execute_barrier,
            stop_event=self.stop_event,
            is_verbose=self.is_verbose,
        )
        self.clock_thread.start() 
Example 18
Project: eventsourcing   Author: johnbywater   File: runner.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(
        self,
        process: ProcessApplication,
        fetch_barrier: Barrier,
        execute_barrier: Barrier,
        stop_event: Event,
    ):
        super(BarrierControlledApplicationThread, self).__init__(daemon=True)
        self.app = process
        self.fetch_barrier = fetch_barrier
        self.execute_barrier = execute_barrier
        self.stop_event = stop_event 
Example 19
Project: eventsourcing   Author: johnbywater   File: runner.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(
        self,
        normal_speed: int,
        scale_factor: int,
        tick_interval: Optional[Union[int, float]],
        fetch_barrier: Barrier,
        execute_barrier: Barrier,
        stop_event: Event,
        is_verbose: bool = False,
    ):
        super(BarrierControllingClockThread, self).__init__(daemon=True)
        # Todo: Remove the redundancy here.
        self.normal_speed = normal_speed
        self.scale_factor = scale_factor
        self.tick_interval = tick_interval
        self.fetch_barrier = fetch_barrier
        self.execute_barrier = execute_barrier
        self.stop_event = stop_event
        self.last_tick_time = 0.0
        self.last_process_time = 0.0
        self.all_tick_durations: Deque = deque()
        self.tick_adjustment: float = 0.0
        self.is_verbose = is_verbose
        if self.tick_interval:

            self.tick_durations_window_size = max(
                1, int(round(1 / self.tick_interval, 0))
            )
        else:
            self.tick_durations_window_size = 100 
Example 20
Project: benchmarks   Author: tensorflow   File: cnn_util.py    Apache License 2.0 5 votes vote down vote up
def roll_numpy_batches(array, batch_size, shift_ratio):
  """Moves a proportion of batches from start to the end of the array.

  This function moves a proportion of batches, specified by `shift_ratio`, from
  the starts of the array to the end. The number of batches moved is rounded
  down to the nearest integer. For example,

  ```
  roll_numpy_batches([1, 2, 3, 4, 5, 6], 2, 0.34) == [3, 4, 5, 6, 1, 2]
  ```

  Args:
    array: A Numpy array whose first dimension is the batch dimension.
    batch_size: The batch size.
    shift_ratio: Proportion of batches to move from the start of the array to
      the end of the array.
  Returns:
    A new Numpy array, with a proportion of the batches at the start of `array`
    moved to the end.
  """
  num_items = array.shape[0]
  assert num_items % batch_size == 0
  num_batches = num_items // batch_size
  starting_batch = int(num_batches * shift_ratio)
  starting_item = starting_batch * batch_size
  return np.roll(array, -starting_item, axis=0)


# For Python 2.7 compatibility, we do not use threading.Barrier. 
Example 21
Project: benchmarks   Author: tensorflow   File: cnn_util.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, sess, put_ops, batch_group_size, use_python32_barrier):
    self.sess = sess
    self.num_gets = 0
    self.put_ops = put_ops
    self.batch_group_size = batch_group_size
    self.done_event = threading.Event()
    if (use_python32_barrier and
        sys.version_info[0] == 3 and sys.version_info[1] >= 2):
      self.put_barrier = threading.Barrier(2)
    else:
      self.put_barrier = Barrier(2) 
Example 22
Project: irc-rss-feed-bot   Author: impredicative   File: bot.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def _setup_channels(self) -> None:
        instance = config.INSTANCE
        channels = instance['feeds']
        channels_str = ', '.join(channels)
        log.debug('Setting up threads and queues for %s channels (%s) and their feeds with %s currently active '
                  'threads.', len(channels), channels_str, threading.active_count())
        num_feeds_setup = 0
        num_reads_daily = 0
        barriers_parties: Dict[str, int] = {}
        for channel, channel_config in channels.items():
            log.debug('Setting up threads and queue for %s.', channel)
            num_channel_feeds = len(channel_config)
            self.CHANNEL_JOIN_EVENTS[channel] = threading.Event()
            self.CHANNEL_QUEUES[channel] = queue.Queue(maxsize=num_channel_feeds * 2)
            threading.Thread(target=self._msg_channel, name=f'ChannelMessenger-{channel}',
                             args=(channel,)).start()
            for feed, feed_config in channel_config.items():
                threading.Thread(target=self._read_feed, name=f'FeedReader-{channel}-{feed}',
                                 args=(channel, feed)).start()
                num_feeds_setup += 1
                num_reads_daily += \
                    (24 / max(config.PERIOD_HOURS_MIN, feed_config.get('period', config.PERIOD_HOURS_DEFAULT)))
                if feed_config.get('group'):
                    group = feed_config['group']
                    barriers_parties[group] = barriers_parties.get(group, 0) + 1
            log.debug('Finished setting up threads and queue for %s and its %s feeds with %s currently active threads.',
                      channel, num_channel_feeds, threading.active_count())
        for barrier, parties in barriers_parties.items():
            self.FEED_GROUP_BARRIERS[barrier] = threading.Barrier(parties)
        log.info('Finished setting up %s channels (%s) and their %s feeds with %s currently active threads.',
                 len(channels), channels_str, num_feeds_setup, threading.active_count())
        log.info('Ignoring any caches, %s URL reads are expected daily, i.e. once every %s on an average.',
                 f'{round(num_reads_daily):n}', timedelta_desc(datetime.timedelta(days=1) / num_reads_daily))

# Refs: https://tools.ietf.org/html/rfc1459 https://modern.ircdocs.horse 
Example 23
Project: cqp-sdk-for-py37-native   Author: crud-boy   File: test_functools.py    GNU General Public License v2.0 5 votes vote down vote up
def test_lru_cache_threaded2(self):
        # Simultaneous call with the same arguments
        n, m = 5, 7
        start = threading.Barrier(n+1)
        pause = threading.Barrier(n+1)
        stop = threading.Barrier(n+1)
        @self.module.lru_cache(maxsize=m*n)
        def f(x):
            pause.wait(10)
            return 3 * x
        self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
        def test():
            for i in range(m):
                start.wait(10)
                self.assertEqual(f(i), 3 * i)
                stop.wait(10)
        threads = [threading.Thread(target=test) for k in range(n)]
        with support.start_threads(threads):
            for i in range(m):
                start.wait(10)
                stop.reset()
                pause.wait(10)
                start.reset()
                stop.wait(10)
                pause.reset()
                self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1)) 
Example 24
Project: aws-lambda-runtime-pypy   Author: uscheller   File: synchronize.py    Apache License 2.0 5 votes vote down vote up
def wait(self, timeout=None):
        with self._cond:
            if self._flag.acquire(False):
                self._flag.release()
            else:
                self._cond.wait(timeout)

            if self._flag.acquire(False):
                self._flag.release()
                return True
            return False

#
# Barrier
# 
Example 25
Project: setup   Author: mindbender-studio   File: synchronize.py    MIT License 5 votes vote down vote up
def wait(self, timeout=None):
        with self._cond:
            if self._flag.acquire(False):
                self._flag.release()
            else:
                self._cond.wait(timeout)

            if self._flag.acquire(False):
                self._flag.release()
                return True
            return False

#
# Barrier
# 
Example 26
Project: parallax   Author: snuspl   File: cnn_util.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, sess, put_ops, batch_group_size):
        self.sess = sess
        self.num_gets = 0
        self.put_ops = put_ops
        self.batch_group_size = batch_group_size
        self.done_event = threading.Event()
        if (FLAGS.use_python32_barrier and
                sys.version_info[0] == 3 and sys.version_info[1] >= 2):
            self.put_barrier = threading.Barrier(2)
        else:
            self.put_barrier = Barrier(2) 
Example 27
Project: chipy2019   Author: ramalho   File: real_barrier.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def main():
    barrier = threading.Barrier(total_threads, show_barrier)
    counter_lock = threading.Lock()
    threads = [Thread(work, t, barrier)
                for t in range(total_threads)]
    for thread in threads:
        thread.join() 
Example 28
Project: yappi   Author: mottosso   File: test_functionality.py    MIT License 5 votes vote down vote up
def test_barrier(self):
        yappi.start()
        b = threading.Barrier(2, timeout=1)
        def worker():
            try:
                b.wait()
            except threading.BrokenBarrierError:
                pass
            except Exception:
                raise Exception("BrokenBarrierError not raised")
        t1 = threading.Thread(target=worker)
        t1.start()
        #b.wait()
        t1.join()
        yappi.stop() 
Example 29
Project: deeplearning-benchmark   Author: awslabs   File: cnn_util.py    Apache License 2.0 5 votes vote down vote up
def log_fn(log):
  print(log)
  if FLAGS.flush_stdout:
    sys.stdout.flush()


# For Python 2.7 compatibility, we do not use threading.Barrier. 
Example 30
Project: deeplearning-benchmark   Author: awslabs   File: cnn_util.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, sess, put_ops, batch_group_size):
    self.sess = sess
    self.num_gets = 0
    self.put_ops = put_ops
    self.batch_group_size = batch_group_size
    self.done_event = threading.Event()
    if (FLAGS.use_python32_barrier and
        sys.version_info[0] == 3 and sys.version_info[1] >= 2):
      self.put_barrier = threading.Barrier(2)
    else:
      self.put_barrier = Barrier(2) 
Example 31
Project: garage   Author: clchiou   File: test_executors.py    MIT License 5 votes vote down vote up
def test_priority(self):
        actual = []
        b = threading.Barrier(2)
        with executors.PriorityExecutor(1, default_priority=0) as executor:
            executor.submit_with_priority(-1, b.wait)
            fs = [
                executor.submit_with_priority(i, actual.append, i)
                for i in (0, 5, 2, 3, 4, 1)
            ]
            b.wait()
        for f in fs:
            f.get_result()
        self.assertEqual(actual, [0, 1, 2, 3, 4, 5]) 
Example 32
Project: garage   Author: clchiou   File: test_executors.py    MIT License 5 votes vote down vote up
def test_default_priority(self):
        actual = []
        b = threading.Barrier(2)
        with executors.PriorityExecutor(1, default_priority=0) as executor:
            executor.submit(b.wait)
            fs = [
                executor.submit(actual.append, i) for i in (0, 5, 2, 3, 4, 1)
            ]
            b.wait()
        for f in fs:
            f.get_result()
        # Heap order is not stable.
        self.assertEqual(set(actual), {0, 5, 2, 3, 4, 1}) 
Example 33
Project: garage   Author: clchiou   File: test_executors.py    MIT License 5 votes vote down vote up
def test_fifo(self):
        actual = []
        b = threading.Barrier(2)
        with executors.Executor(1) as executor:
            executor.submit(b.wait)
            fs = [
                executor.submit(actual.append, i) for i in (0, 5, 2, 3, 4, 1)
            ]
            b.wait()
        for f in fs:
            f.get_result()
        self.assertEqual(actual, [0, 5, 2, 3, 4, 1]) 
Example 34
Project: garage   Author: clchiou   File: test_actors.py    MIT License 5 votes vote down vote up
def test_busy(self):
        blocker = Blocker()

        barrier = threading.Barrier(2)
        event = threading.Event()
        future = blocker.wait(barrier, event)

        barrier.wait()

        event.set()
        future.result() 
Example 35
Project: garage   Author: clchiou   File: test_queues.py    MIT License 5 votes vote down vote up
def test_close_while_blocked(self):
        queue = queues.Queue()
        for kwargs in [{}, {'timeout': 10}]:
            barrier = threading.Barrier(2)
            with ThreadPoolExecutor(1) as executor:
                future = executor.submit(
                    call_func, barrier, queue.get, kwargs)
                barrier.wait()
                # XXX: I hope that, after sleep, the executor thread is
                # blocked inside get() or it will raise Empty.
                time.sleep(0.01)
                queue.close()
                with self.assertRaises(queues.Closed):
                    future.result()

        queue = queues.Queue(capacity=1)
        queue.put(1)  # Make it full.
        for kwargs in [{'item': 1}, {'item': 1, 'timeout': 10}]:
            barrier = threading.Barrier(2)
            with ThreadPoolExecutor(1) as executor:
                future = executor.submit(
                    call_func, barrier, queue.put, kwargs)
                barrier.wait()
                # XXX: I hope that, after sleep, the executor thread is
                # blocked inside put() or it will raise Full.
                time.sleep(0.01)
                queue.close()
                with self.assertRaises(queues.Closed):
                    future.result() 
Example 36
Project: garage   Author: clchiou   File: test_isolate.py    MIT License 5 votes vote down vote up
def test_isolate(self):

        def run():
            with v8.isolate() as isolate, isolate.context() as context:
                barrier.wait()
                self.assertEqual(
                    'hello world', context.execute('"hello world"'))

        N = 3
        barrier = threading.Barrier(N)
        threads = [threading.Thread(target=run) for _ in range(N)]
        for thread in threads:
            thread.start()
        for thread in threads:
            thread.join() 
Example 37
Project: pvcheck   Author: claudio-unipv   File: interactiveformatter.py    MIT License 5 votes vote down vote up
def begin_session(self):
        self._err_count = self._warn_count = self._ok_count = 0
        self._running = True
        # Start the UI thread
        self._initialization_barrier = threading.Barrier(2)
        self._thread = threading.Thread(target=self._thread_body)
        self._thread.start()
        self._initialization_barrier.wait() 
Example 38
Project: AOFP   Author: DingXiaoH   File: cnn_util.py    MIT License 5 votes vote down vote up
def roll_numpy_batches(array, batch_size, shift_ratio):
  """Moves a proportion of batches from start to the end of the array.

  This function moves a proportion of batches, specified by `shift_ratio`, from
  the starts of the array to the end. The number of batches moved is rounded
  down to the nearest integer. For example,

  ```
  roll_numpy_batches([1, 2, 3, 4, 5, 6], 2, 0.34) == [3, 4, 5, 6, 1, 2]
  ```

  Args:
    array: A Numpy array whose first dimension is the batch dimension.
    batch_size: The batch size.
    shift_ratio: Proportion of batches to move from the start of the array to
      the end of the array.
  Returns:
    A new Numpy array, with a proportion of the batches at the start of `array`
    moved to the end.
  """
  num_items = array.shape[0]
  assert num_items % batch_size == 0
  num_batches = num_items // batch_size
  starting_batch = int(num_batches * shift_ratio)
  starting_item = starting_batch * batch_size
  return np.roll(array, -starting_item, axis=0)


# For Python 2.7 compatibility, we do not use threading.Barrier. 
Example 39
Project: AOFP   Author: DingXiaoH   File: cnn_util.py    MIT License 5 votes vote down vote up
def __init__(self, sess, put_ops, batch_group_size, use_python32_barrier):
    self.sess = sess
    self.num_gets = 0
    self.put_ops = put_ops
    self.batch_group_size = batch_group_size
    self.done_event = threading.Event()
    if (use_python32_barrier and
        sys.version_info[0] == 3 and sys.version_info[1] >= 2):
      self.put_barrier = threading.Barrier(2)
    else:
      self.put_barrier = Barrier(2) 
Example 40
Project: tf-imagenet   Author: balancap   File: cnn_util.py    Apache License 2.0 5 votes vote down vote up
def log_fn(log):
  print(log)
  if FLAGS.flush_stdout:
    sys.stdout.flush()


# For Python 2.7 compatibility, we do not use threading.Barrier. 
Example 41
Project: tf-imagenet   Author: balancap   File: cnn_util.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, sess, put_ops, batch_group_size):
    self.sess = sess
    self.num_gets = 0
    self.put_ops = put_ops
    self.batch_group_size = batch_group_size
    self.done_event = threading.Event()
    if (FLAGS.use_python32_barrier and
        sys.version_info[0] == 3 and sys.version_info[1] >= 2):
      self.put_barrier = threading.Barrier(2)
    else:
      self.put_barrier = Barrier(2) 
Example 42
Project: injectpy   Author: rafales   File: scoping_test.py    MIT License 5 votes vote down vote up
def test_singleton_scope_is_thread_safe(request: Any) -> None:
    """
    When using singleton scope we must ensure that concurrent access
    is safe between different threads.
    """
    kernel = Kernel()

    kernel.bind(InMemoryFileSystem, lifetime=Lifetime.singleton)
    instances: List[InMemoryFileSystem] = []
    NUM_THREADS = 5
    barrier = threading.Barrier(NUM_THREADS, timeout=1)

    # we need to set "switch interval" to something really low to
    # trigger this problem reliably.
    interval = sys.getswitchinterval()
    request.addfinalizer(lambda: sys.setswitchinterval(interval))
    sys.setswitchinterval(0.00000001)

    def worker() -> None:
        barrier.wait()
        inst = kernel.get(InMemoryFileSystem)
        instances.append(inst)

    threads = [threading.Thread(target=worker) for i in range(NUM_THREADS)]
    for thread in threads:
        thread.start()

    for thread in threads:
        thread.join()

    assert len(instances) == NUM_THREADS
    print(instances)
    for inst in instances[1:]:
        assert instances[0] is inst 
Example 43
Project: RibbaPi   Author: stahlfabrik   File: abstract_animation.py    GNU General Public License v3.0 5 votes vote down vote up
def run(self):
        """This is the run method from threading.Thread"""
        #TODO threading.Barrier to sync with ribbapi
        #print("Starting")

        self.started = time.time()
        self._running = True
        self.animate()

    # def start(self): 
Example 44
Project: GoogleAi   Author: nattimmis   File: cnn_util.py    Apache License 2.0 5 votes vote down vote up
def roll_numpy_batches(array, batch_size, shift_ratio):
  """Moves a proportion of batches from start to the end of the array.

  This function moves a proportion of batches, specified by `shift_ratio`, from
  the starts of the array to the end. The number of batches moved is rounded
  down to the nearest integer. For example,

  ```
  roll_numpy_batches([1, 2, 3, 4, 5, 6], 2, 0.34) == [3, 4, 5, 6, 1, 2]
  ```

  Args:
    array: A Numpy array whose first dimension is the batch dimension.
    batch_size: The batch size.
    shift_ratio: Proportion of batches to move from the start of the array to
      the end of the array.
  Returns:
    A new Numpy array, with a proportion of the batches at the start of `array`
    moved to the end.
  """
  num_items = array.shape[0]
  assert num_items % batch_size == 0
  num_batches = num_items // batch_size
  starting_batch = int(num_batches * shift_ratio)
  starting_item = starting_batch * batch_size
  return np.roll(array, -starting_item, axis=0)


# For Python 2.7 compatibility, we do not use threading.Barrier. 
Example 45
Project: GoogleAi   Author: nattimmis   File: cnn_util.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, sess, put_ops, batch_group_size, use_python32_barrier):
    self.sess = sess
    self.num_gets = 0
    self.put_ops = put_ops
    self.batch_group_size = batch_group_size
    self.done_event = threading.Event()
    if (use_python32_barrier and
        sys.version_info[0] == 3 and sys.version_info[1] >= 2):
      self.put_barrier = threading.Barrier(2)
    else:
      self.put_barrier = Barrier(2) 
Example 46
Project: CarlaScenarioLoader   Author: MrMushroom   File: timed_event_handler.py    MIT License 5 votes vote down vote up
def __init__(self):
        self.__currentSimTime = None
        self.__previousSimTime = None
        self.__events = None
        self.__subscribers = {}
        self.__syncBarrier = Barrier(1) # (1): Simulator Control Blocks too
        self.__syncLock = Lock()
        self.__isStarted = False
        self.__cleared = True 
Example 47
Project: CarlaScenarioLoader   Author: MrMushroom   File: timed_event_handler.py    MIT License 5 votes vote down vote up
def subscribe(self, name, updateMethod):
        self.__syncLock.acquire()
        if self.__syncBarrier.n_waiting != 0:
            raise Exception(name, "tried to subscribe during runtime (syncBarrier has", self.__syncBarrier.n_waiting, "threads waiting)")
        if name in self.__subscribers:
            raise Exception(name, "already subscribed")
        else:
            self.__syncBarrier = Barrier(self.__syncBarrier.parties + 1)
            self.__subscribers[name] = updateMethod
        self.__syncLock.release() 
Example 48
Project: CarlaScenarioLoader   Author: MrMushroom   File: timed_event_handler.py    MIT License 5 votes vote down vote up
def unsubscribe(self, name):
        self.__syncLock.acquire()
        if self.__syncBarrier.n_waiting != 0 and not self.__syncBarrier.broken:
            raise Exception(name, "tried to unsubscribe during runtime (syncBarrier has", self.__syncBarrier.n_waiting, "threads waiting)")
        del self.__subscribers[name]
        self.__syncBarrier = Barrier(self.__syncBarrier.parties - 1)
        self.__syncLock.release() 
Example 49
Project: Learning-Concurrency-in-Python-Video-   Author: PacktPublishing   File: barriers.py    MIT License 5 votes vote down vote up
def run(self):
      print("Thread {} working on something".format(threading.current_thread()))
      time.sleep(random.randint(1,10))
      print("Thread {} is joining {} waiting on Barrier".format(threading.current_thread(), self.barrier.n_waiting))
      self.barrier.wait()
      
      print("Barrier has been lifted, continuing with work") 
Example 50
Project: docker   Author: getavalon   File: synchronize.py    MIT License 5 votes vote down vote up
def wait(self, timeout=None):
        with self._cond:
            if self._flag.acquire(False):
                self._flag.release()
            else:
                self._cond.wait(timeout)

            if self._flag.acquire(False):
                self._flag.release()
                return True
            return False

#
# Barrier
# 
Example 51
Project: Project-New-Reign---Nemesis-Main   Author: ShikyoKira   File: test_functools.py    GNU General Public License v3.0 5 votes vote down vote up
def test_lru_cache_threaded2(self):
        # Simultaneous call with the same arguments
        n, m = 5, 7
        start = threading.Barrier(n+1)
        pause = threading.Barrier(n+1)
        stop = threading.Barrier(n+1)
        @self.module.lru_cache(maxsize=m*n)
        def f(x):
            pause.wait(10)
            return 3 * x
        self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
        def test():
            for i in range(m):
                start.wait(10)
                self.assertEqual(f(i), 3 * i)
                stop.wait(10)
        threads = [threading.Thread(target=test) for k in range(n)]
        with support.start_threads(threads):
            for i in range(m):
                start.wait(10)
                stop.reset()
                pause.wait(10)
                start.reset()
                stop.wait(10)
                pause.reset()
                self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1)) 
Example 52
Project: Project-New-Reign---Nemesis-Main   Author: ShikyoKira   File: _test_multiprocessing.py    GNU General Public License v3.0 5 votes vote down vote up
def setUp(self):
        self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout) 
Example 53
Project: Project-New-Reign---Nemesis-Main   Author: ShikyoKira   File: _test_multiprocessing.py    GNU General Public License v3.0 5 votes vote down vote up
def test_action(self):
        """
        Test the 'action' callback
        """
        results = self.DummyList()
        barrier = self.Barrier(self.N, action=AppendTrue(results))
        self.run_threads(self._test_action_f, (barrier, results))
        self.assertEqual(len(results), 1) 
Example 54
Project: Project-New-Reign---Nemesis-Main   Author: ShikyoKira   File: _test_multiprocessing.py    GNU General Public License v3.0 5 votes vote down vote up
def test_abort_and_reset(self):
        """
        Test that a barrier can be reset after being broken.
        """
        results1 = self.DummyList()
        results2 = self.DummyList()
        results3 = self.DummyList()
        barrier2 = self.Barrier(self.N)

        self.run_threads(self._test_abort_and_reset_f,
                         (self.barrier, barrier2, results1, results2, results3))
        self.assertEqual(len(results1), 0)
        self.assertEqual(len(results2), self.N-1)
        self.assertEqual(len(results3), self.N) 
Example 55
Project: Project-New-Reign---Nemesis-Main   Author: ShikyoKira   File: _test_multiprocessing.py    GNU General Public License v3.0 5 votes vote down vote up
def test_single_thread(self):
        b = self.Barrier(1)
        b.wait()
        b.wait() 
Example 56
Project: Project-New-Reign---Nemesis-Main   Author: ShikyoKira   File: test_locks.py    GNU General Public License v3.0 5 votes vote down vote up
def run_deadlock_avoidance_test(self, create_deadlock):
            NLOCKS = 10
            locks = [self.LockType(str(i)) for i in range(NLOCKS)]
            pairs = [(locks[i], locks[(i+1)%NLOCKS]) for i in range(NLOCKS)]
            if create_deadlock:
                NTHREADS = NLOCKS
            else:
                NTHREADS = NLOCKS - 1
            barrier = threading.Barrier(NTHREADS)
            results = []

            def _acquire(lock):
                """Try to acquire the lock. Return True on success,
                False on deadlock."""
                try:
                    lock.acquire()
                except self.DeadlockError:
                    return False
                else:
                    return True

            def f():
                a, b = pairs.pop()
                ra = _acquire(a)
                barrier.wait()
                rb = _acquire(b)
                results.append((ra, rb))
                if rb:
                    b.release()
                if ra:
                    a.release()
            lock_tests.Bunch(f, NTHREADS).wait_for_finished()
            self.assertEqual(len(results), NTHREADS)
            return results 
Example 57
Project: Project-New-Reign---Nemesis-Main   Author: ShikyoKira   File: synchronize.py    GNU General Public License v3.0 5 votes vote down vote up
def wait(self, timeout=None):
        with self._cond:
            if self._flag.acquire(False):
                self._flag.release()
            else:
                self._cond.wait(timeout)

            if self._flag.acquire(False):
                self._flag.release()
                return True
            return False

#
# Barrier
# 
Example 58
Project: anki-search-inside-add-card   Author: fonol   File: synchronize.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def wait(self, timeout=None):
        with self._cond:
            if self._flag.acquire(False):
                self._flag.release()
            else:
                self._cond.wait(timeout)

            if self._flag.acquire(False):
                self._flag.release()
                return True
            return False

#
# Barrier
# 
Example 59
Project: cells   Author: AlesTsurko   File: test_functools.py    MIT License 5 votes vote down vote up
def test_lru_cache_threaded2(self):
        # Simultaneous call with the same arguments
        n, m = 5, 7
        start = threading.Barrier(n+1)
        pause = threading.Barrier(n+1)
        stop = threading.Barrier(n+1)
        @self.module.lru_cache(maxsize=m*n)
        def f(x):
            pause.wait(10)
            return 3 * x
        self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
        def test():
            for i in range(m):
                start.wait(10)
                self.assertEqual(f(i), 3 * i)
                stop.wait(10)
        threads = [threading.Thread(target=test) for k in range(n)]
        with support.start_threads(threads):
            for i in range(m):
                start.wait(10)
                stop.reset()
                pause.wait(10)
                start.reset()
                stop.wait(10)
                pause.reset()
                self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1)) 
Example 60
Project: cells   Author: AlesTsurko   File: _test_multiprocessing.py    MIT License 5 votes vote down vote up
def setUp(self):
        self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout) 
Example 61
Project: cells   Author: AlesTsurko   File: _test_multiprocessing.py    MIT License 5 votes vote down vote up
def test_action(self):
        """
        Test the 'action' callback
        """
        results = self.DummyList()
        barrier = self.Barrier(self.N, action=AppendTrue(results))
        self.run_threads(self._test_action_f, (barrier, results))
        self.assertEqual(len(results), 1) 
Example 62
Project: cells   Author: AlesTsurko   File: _test_multiprocessing.py    MIT License 5 votes vote down vote up
def test_default_timeout(self):
        """
        Test the barrier's default timeout
        """
        barrier = self.Barrier(self.N, timeout=0.5)
        results = self.DummyList()
        self.run_threads(self._test_default_timeout_f, (barrier, results))
        self.assertEqual(len(results), barrier.parties) 
Example 63
Project: cells   Author: AlesTsurko   File: _test_multiprocessing.py    MIT License 5 votes vote down vote up
def test_single_thread(self):
        b = self.Barrier(1)
        b.wait()
        b.wait() 
Example 64
Project: NiujiaoDebugger   Author: MrSrc   File: _test_multiprocessing.py    GNU General Public License v3.0 4 votes vote down vote up
def test_event(self):
        event = self.Event()
        wait = TimingWrapper(event.wait)

        # Removed temporarily, due to API shear, this does not
        # work with threading._Event objects. is_set == isSet
        self.assertEqual(event.is_set(), False)

        # Removed, threading.Event.wait() will return the value of the __flag
        # instead of None. API Shear with the semaphore backed mp.Event
        self.assertEqual(wait(0.0), False)
        self.assertTimingAlmostEqual(wait.elapsed, 0.0)
        self.assertEqual(wait(TIMEOUT1), False)
        self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)

        event.set()

        # See note above on the API differences
        self.assertEqual(event.is_set(), True)
        self.assertEqual(wait(), True)
        self.assertTimingAlmostEqual(wait.elapsed, 0.0)
        self.assertEqual(wait(TIMEOUT1), True)
        self.assertTimingAlmostEqual(wait.elapsed, 0.0)
        # self.assertEqual(event.is_set(), True)

        event.clear()

        #self.assertEqual(event.is_set(), False)

        p = self.Process(target=self._test_event, args=(event,))
        p.daemon = True
        p.start()
        self.assertEqual(wait(), True)
        p.join()

#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#

# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value.  We use the class DummyList
# for the same purpose. 
Example 65
Project: python-netsurv   Author: sofia-netsurv   File: brain_multiprocessing.py    MIT License 4 votes vote down vote up
def _multiprocessing_managers_transform():
    return astroid.parse(
        """
    import array
    import threading
    import multiprocessing.pool as pool

    import six

    class Namespace(object):
        pass

    class Value(object):
        def __init__(self, typecode, value, lock=True):
            self._typecode = typecode
            self._value = value
        def get(self):
            return self._value
        def set(self, value):
            self._value = value
        def __repr__(self):
            return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
        value = property(get, set)

    def Array(typecode, sequence, lock=True):
        return array.array(typecode, sequence)

    class SyncManager(object):
        Queue = JoinableQueue = six.moves.queue.Queue
        Event = threading.Event
        RLock = threading.RLock
        BoundedSemaphore = threading.BoundedSemaphore
        Condition = threading.Condition
        Barrier = threading.Barrier
        Pool = pool.Pool
        list = list
        dict = dict
        Value = Value
        Array = Array
        Namespace = Namespace
        __enter__ = lambda self: self
        __exit__ = lambda *args: args

        def start(self, initializer=None, initargs=None):
            pass
        def shutdown(self):
            pass
    """
    ) 
Example 66
Project: python-netsurv   Author: sofia-netsurv   File: brain_multiprocessing.py    MIT License 4 votes vote down vote up
def _multiprocessing_managers_transform():
    return astroid.parse(
        """
    import array
    import threading
    import multiprocessing.pool as pool

    import six

    class Namespace(object):
        pass

    class Value(object):
        def __init__(self, typecode, value, lock=True):
            self._typecode = typecode
            self._value = value
        def get(self):
            return self._value
        def set(self, value):
            self._value = value
        def __repr__(self):
            return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
        value = property(get, set)

    def Array(typecode, sequence, lock=True):
        return array.array(typecode, sequence)

    class SyncManager(object):
        Queue = JoinableQueue = six.moves.queue.Queue
        Event = threading.Event
        RLock = threading.RLock
        BoundedSemaphore = threading.BoundedSemaphore
        Condition = threading.Condition
        Barrier = threading.Barrier
        Pool = pool.Pool
        list = list
        dict = dict
        Value = Value
        Array = Array
        Namespace = Namespace
        __enter__ = lambda self: self
        __exit__ = lambda *args: args

        def start(self, initializer=None, initargs=None):
            pass
        def shutdown(self):
            pass
    """
    ) 
Example 67
Project: cqp-sdk-for-py37-native   Author: crud-boy   File: pickletester.py    GNU General Public License v2.0 4 votes vote down vote up
def test_unpickle_module_race(self):
        # https://bugs.python.org/issue34572
        locker_module = dedent("""
        import threading
        barrier = threading.Barrier(2)
        """)
        locking_import_module = dedent("""
        import locker
        locker.barrier.wait()
        class ToBeUnpickled(object):
            pass
        """)

        os.mkdir(TESTFN)
        self.addCleanup(shutil.rmtree, TESTFN)
        sys.path.insert(0, TESTFN)
        self.addCleanup(sys.path.remove, TESTFN)
        with open(os.path.join(TESTFN, "locker.py"), "wb") as f:
            f.write(locker_module.encode('utf-8'))
        with open(os.path.join(TESTFN, "locking_import.py"), "wb") as f:
            f.write(locking_import_module.encode('utf-8'))
        self.addCleanup(forget, "locker")
        self.addCleanup(forget, "locking_import")

        import locker

        pickle_bytes = (
            b'\x80\x03clocking_import\nToBeUnpickled\nq\x00)\x81q\x01.')

        # Then try to unpickle two of these simultaneously
        # One of them will cause the module import, and we want it to block
        # until the other one either:
        #   - fails (before the patch for this issue)
        #   - blocks on the import lock for the module, as it should
        results = []
        barrier = threading.Barrier(3)
        def t():
            # This ensures the threads have all started
            # presumably barrier release is faster than thread startup
            barrier.wait()
            results.append(pickle.loads(pickle_bytes))

        t1 = threading.Thread(target=t)
        t2 = threading.Thread(target=t)
        t1.start()
        t2.start()

        barrier.wait()
        # could have delay here
        locker.barrier.wait()

        t1.join()
        t2.join()

        from locking_import import ToBeUnpickled
        self.assertEqual(
            [type(x) for x in results],
            [ToBeUnpickled] * 2) 
Example 68
Project: conf   Author: XonqNopp   File: brain_multiprocessing.py    GNU General Public License v3.0 4 votes vote down vote up
def _multiprocessing_managers_transform():
    return astroid.parse('''
    import array
    import threading
    import multiprocessing.pool as pool

    import six

    class Namespace(object):
        pass

    class Value(object):
        def __init__(self, typecode, value, lock=True):
            self._typecode = typecode
            self._value = value
        def get(self):
            return self._value
        def set(self, value):
            self._value = value
        def __repr__(self):
            return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
        value = property(get, set)

    def Array(typecode, sequence, lock=True):
        return array.array(typecode, sequence)

    class SyncManager(object):
        Queue = JoinableQueue = six.moves.queue.Queue
        Event = threading.Event
        RLock = threading.RLock
        BoundedSemaphore = threading.BoundedSemaphore
        Condition = threading.Condition
        Barrier = threading.Barrier
        Pool = pool.Pool
        list = list
        dict = dict
        Value = Value
        Array = Array
        Namespace = Namespace
        __enter__ = lambda self: self
        __exit__ = lambda *args: args
        
        def start(self, initializer=None, initargs=None):
            pass
        def shutdown(self):
            pass
    ''') 
Example 69
Project: conf   Author: XonqNopp   File: brain_multiprocessing.py    GNU General Public License v3.0 4 votes vote down vote up
def _multiprocessing_managers_transform():
    return astroid.parse('''
    import array
    import threading
    import multiprocessing.pool as pool

    import six

    class Namespace(object):
        pass

    class Value(object):
        def __init__(self, typecode, value, lock=True):
            self._typecode = typecode
            self._value = value
        def get(self):
            return self._value
        def set(self, value):
            self._value = value
        def __repr__(self):
            return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
        value = property(get, set)

    def Array(typecode, sequence, lock=True):
        return array.array(typecode, sequence)

    class SyncManager(object):
        Queue = JoinableQueue = six.moves.queue.Queue
        Event = threading.Event
        RLock = threading.RLock
        BoundedSemaphore = threading.BoundedSemaphore
        Condition = threading.Condition
        Barrier = threading.Barrier
        Pool = pool.Pool
        list = list
        dict = dict
        Value = Value
        Array = Array
        Namespace = Namespace
        __enter__ = lambda self: self
        __exit__ = lambda *args: args
        
        def start(self, initializer=None, initargs=None):
            pass
        def shutdown(self):
            pass
    ''') 
Example 70
Project: vim-python-function-expander   Author: ColinKennedy   File: brain_multiprocessing.py    MIT License 4 votes vote down vote up
def _multiprocessing_managers_transform():
    return astroid.parse('''
    import array
    import threading
    import multiprocessing.pool as pool

    import six

    class Namespace(object):
        pass

    class Value(object):
        def __init__(self, typecode, value, lock=True):
            self._typecode = typecode
            self._value = value
        def get(self):
            return self._value
        def set(self, value):
            self._value = value
        def __repr__(self):
            return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
        value = property(get, set)

    def Array(typecode, sequence, lock=True):
        return array.array(typecode, sequence)

    class SyncManager(object):
        Queue = JoinableQueue = six.moves.queue.Queue
        Event = threading.Event
        RLock = threading.RLock
        BoundedSemaphore = threading.BoundedSemaphore
        Condition = threading.Condition
        Barrier = threading.Barrier
        Pool = pool.Pool
        list = list
        dict = dict
        Value = Value
        Array = Array
        Namespace = Namespace
        __enter__ = lambda self: self
        __exit__ = lambda *args: args
        
        def start(self, initializer=None, initargs=None):
            pass
        def shutdown(self):
            pass
    ''') 
Example 71
Project: pySINDy   Author: luckystarufo   File: brain_multiprocessing.py    MIT License 4 votes vote down vote up
def _multiprocessing_managers_transform():
    return astroid.parse(
        """
    import array
    import threading
    import multiprocessing.pool as pool

    import six

    class Namespace(object):
        pass

    class Value(object):
        def __init__(self, typecode, value, lock=True):
            self._typecode = typecode
            self._value = value
        def get(self):
            return self._value
        def set(self, value):
            self._value = value
        def __repr__(self):
            return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
        value = property(get, set)

    def Array(typecode, sequence, lock=True):
        return array.array(typecode, sequence)

    class SyncManager(object):
        Queue = JoinableQueue = six.moves.queue.Queue
        Event = threading.Event
        RLock = threading.RLock
        BoundedSemaphore = threading.BoundedSemaphore
        Condition = threading.Condition
        Barrier = threading.Barrier
        Pool = pool.Pool
        list = list
        dict = dict
        Value = Value
        Array = Array
        Namespace = Namespace
        __enter__ = lambda self: self
        __exit__ = lambda *args: args

        def start(self, initializer=None, initargs=None):
            pass
        def shutdown(self):
            pass
    """
    ) 
Example 72
Project: garage   Author: clchiou   File: test_executors.py    MIT License 4 votes vote down vote up
def test_shutdown_graceful(self):
        executor = executors.Executor(4)
        event1 = threading.Event()
        event2 = threading.Event()
        try:

            start_barrier = threading.Barrier(3)

            def func():
                start_barrier.wait()
                event1.wait()

            f1 = executor.submit(func)
            f2 = executor.submit(func)
            f3 = executor.submit(event2.wait)

            start_barrier.wait()

            for stub in executor.stubs:
                self.assertFalse(stub.future.is_completed())

            event2.set()
            self.assertTrue(f3.get_result(timeout=1))

            with self.assertLogs(executors.__name__) as cm:
                items = executor.shutdown(graceful=True, timeout=0.001)

            self.assertEqual(len(cm.output), 1)
            self.assertRegex(cm.output[0], r'not join 2 executor')

            self.assertFalse(f1.is_completed())
            self.assertFalse(f2.is_completed())
            self.assertEqual(items, [])

            counts = {True: 0, False: 0}
            for stub in executor.stubs:
                counts[stub.future.is_completed()] += 1
            self.assertEqual(counts, {True: 2, False: 2})

            event1.set()

            self.assertIsNone(f1.get_result(timeout=1))
            self.assertIsNone(f2.get_result(timeout=1))

            for stub in executor.stubs:
                self.assertTrue(stub.future.is_completed())

        finally:
            event1.set()
            event2.set()
            executor.shutdown() 
Example 73
Project: garage   Author: clchiou   File: test_executors.py    MIT License 4 votes vote down vote up
def test_shutdown_not_graceful(self):
        executor = executors.Executor(2)
        event = threading.Event()
        try:

            start_barrier = threading.Barrier(3)

            def func():
                start_barrier.wait()
                event.wait()

            f1 = executor.submit(func)
            f2 = executor.submit(func)
            f3 = executor.submit(event.wait)

            start_barrier.wait()

            for stub in executor.stubs:
                self.assertFalse(stub.future.is_completed())

            with self.assertLogs(executors.__name__) as cm:
                items = executor.shutdown(graceful=False)

            self.assertEqual(len(cm.output), 1)
            self.assertRegex(cm.output[0], r'drop 1 tasks')

            self.assertFalse(f1.is_completed())
            self.assertFalse(f2.is_completed())
            self.assertFalse(f3.is_completed())
            self.assertEqual([m.future for m in items], [f3])

            event.set()

            self.assertIsNone(f1.get_result(timeout=1))
            self.assertIsNone(f2.get_result(timeout=1))

            for stub in executor.stubs:
                self.assertTrue(stub.future.is_completed())

        finally:
            event.set()
            executor.shutdown() 
Example 74
Project: Light_control   Author: laurent-colas   File: brain_multiprocessing.py    The Unlicense 4 votes vote down vote up
def _multiprocessing_managers_transform():
    return astroid.parse('''
    import array
    import threading
    import multiprocessing.pool as pool

    import six

    class Namespace(object):
        pass

    class Value(object):
        def __init__(self, typecode, value, lock=True):
            self._typecode = typecode
            self._value = value
        def get(self):
            return self._value
        def set(self, value):
            self._value = value
        def __repr__(self):
            return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
        value = property(get, set)

    def Array(typecode, sequence, lock=True):
        return array.array(typecode, sequence)

    class SyncManager(object):
        Queue = JoinableQueue = six.moves.queue.Queue
        Event = threading.Event
        RLock = threading.RLock
        BoundedSemaphore = threading.BoundedSemaphore
        Condition = threading.Condition
        Barrier = threading.Barrier
        Pool = pool.Pool
        list = list
        dict = dict
        Value = Value
        Array = Array
        Namespace = Namespace
        __enter__ = lambda self: self
        __exit__ = lambda *args: args
        
        def start(self, initializer=None, initargs=None):
            pass
        def shutdown(self):
            pass
    ''') 
Example 75
Project: ncappzoo   Author: movidius   File: benchmark_ncs.py    MIT License 4 votes vote down vote up
def infer_async_thread_proc(net, exec_net: ExecutableNetwork, dev_thread_request_id: int,
                            image_list: list,
                            first_image_index:int, last_image_index:int,
                            num_total_inferences: int, result_list: list, result_index:int,
                            start_barrier: threading.Barrier, end_barrier: threading.Barrier,
                            simultaneous_infer_per_thread:int, infer_result_queue:queue.Queue, input_blob, output_blob):


    # Sync with the main start barrier
    start_barrier.wait()
    
    # Start times for the fps counter
    start_time = time.time()
    end_time = start_time

    handle_list = [None]*simultaneous_infer_per_thread
    image_index = first_image_index
    image_result_start_index = 0

    inferences_per_req = int(num_total_inferences/simultaneous_infer_per_thread)
    # For each thread, 6 async inference requests will be created
    for outer_index in range(0, inferences_per_req):
        # Start the simultaneous async inferences
        for infer_id in range(0, simultaneous_infer_per_thread):
            new_request_id = dev_thread_request_id + infer_id
            handle_list[infer_id] = exec_net.start_async(request_id = new_request_id, inputs={input_blob: image_list[image_index]})
            image_index += 1
            if (image_index > last_image_index):
                image_index = first_image_index

        # Wait for the simultaneous async inferences to finish.
        for wait_index in range(0, simultaneous_infer_per_thread):
            infer_status = handle_list[wait_index].wait()
            result = handle_list[wait_index].outputs[output_blob]
            top_index = numpy.argsort(result, axis = 1)[0, -1:][::-1]
            top_index = top_index[0]
            prob = result[0][top_index]
            infer_result_queue.put((top_index, prob))

            handle_list[wait_index] = None



    # Save the time spent on inferences within this inference thread and associated reader thread
    end_time = time.time()
    total_inference_time = end_time - start_time
    result_list[result_index] = total_inference_time

    print("Thread " + str(result_index) + " end barrier reached")

    # Wait for all inference threads to finish
    end_barrier.wait()


# main entry point for program. we'll call main() to do what needs to be done. 
Example 76
Project: Blackjack-Tracker   Author: martinabeleda   File: brain_multiprocessing.py    MIT License 4 votes vote down vote up
def _multiprocessing_managers_transform():
    return astroid.parse('''
    import array
    import threading
    import multiprocessing.pool as pool

    import six

    class Namespace(object):
        pass

    class Value(object):
        def __init__(self, typecode, value, lock=True):
            self._typecode = typecode
            self._value = value
        def get(self):
            return self._value
        def set(self, value):
            self._value = value
        def __repr__(self):
            return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
        value = property(get, set)

    def Array(typecode, sequence, lock=True):
        return array.array(typecode, sequence)

    class SyncManager(object):
        Queue = JoinableQueue = six.moves.queue.Queue
        Event = threading.Event
        RLock = threading.RLock
        BoundedSemaphore = threading.BoundedSemaphore
        Condition = threading.Condition
        Barrier = threading.Barrier
        Pool = pool.Pool
        list = list
        dict = dict
        Value = Value
        Array = Array
        Namespace = Namespace
        __enter__ = lambda self: self
        __exit__ = lambda *args: args
        
        def start(self, initializer=None, initargs=None):
            pass
        def shutdown(self):
            pass
    ''') 
Example 77
Project: lightnion   Author: spring-epfl   File: path.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def emitter(
        output_queue,
        kill_queue,
        control_host,
        control_port,
        batch=32,
        target=1024,
        nb_worker=default_nb_worker):

    # (shut up stem)
    logging.getLogger(stem.__name__).setLevel(logging.ERROR)

    barrier = threading.Barrier(nb_worker)
    path_queue = queue.Queue(maxsize=batch)
    batch_size = target // nb_worker + 1

    workers = []
    for _ in range(nb_worker):
        workers.append(
            worker(
                control_host,
                control_port,
                barrier,
                path_queue,
                batch_size))

    for w in workers:
        w.start()

    guard, middle, exit = path_queue.get()
    output_queue.put(guard)
    output_queue.put((middle, exit))

    while any([not w.finished for w in workers]):
        new_guard, middle, exit = path_queue.get()
        if new_guard != guard:
            continue

        output_queue.put((middle, exit))
        if kill_queue.qsize() > 0:
            for w in workers:
                w.finished = True

    # (cleanup is useless as the process will die, but do it nonetheless)
    barrier.abort()
    for w in workers:
        w.dead = True

    try:
        for i in range(path_queue.qsize()):
            path_queue.get_nowait()
    except queue.Empty:
        pass

    for w in workers:
        w.join(0.1) 
Example 78
Project: Project-New-Reign---Nemesis-Main   Author: ShikyoKira   File: _test_multiprocessing.py    GNU General Public License v3.0 4 votes vote down vote up
def test_event(self):
        event = self.Event()
        wait = TimingWrapper(event.wait)

        # Removed temporarily, due to API shear, this does not
        # work with threading._Event objects. is_set == isSet
        self.assertEqual(event.is_set(), False)

        # Removed, threading.Event.wait() will return the value of the __flag
        # instead of None. API Shear with the semaphore backed mp.Event
        self.assertEqual(wait(0.0), False)
        self.assertTimingAlmostEqual(wait.elapsed, 0.0)
        self.assertEqual(wait(TIMEOUT1), False)
        self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)

        event.set()

        # See note above on the API differences
        self.assertEqual(event.is_set(), True)
        self.assertEqual(wait(), True)
        self.assertTimingAlmostEqual(wait.elapsed, 0.0)
        self.assertEqual(wait(TIMEOUT1), True)
        self.assertTimingAlmostEqual(wait.elapsed, 0.0)
        # self.assertEqual(event.is_set(), True)

        event.clear()

        #self.assertEqual(event.is_set(), False)

        p = self.Process(target=self._test_event, args=(event,))
        p.daemon = True
        p.start()
        self.assertEqual(wait(), True)

#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#

# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value.  We use the class DummyList
# for the same purpose. 
Example 79
Project: cells   Author: AlesTsurko   File: pickletester.py    MIT License 4 votes vote down vote up
def test_unpickle_module_race(self):
        # https://bugs.python.org/issue34572
        locker_module = dedent("""
        import threading
        barrier = threading.Barrier(2)
        """)
        locking_import_module = dedent("""
        import locker
        locker.barrier.wait()
        class ToBeUnpickled(object):
            pass
        """)

        os.mkdir(TESTFN)
        self.addCleanup(shutil.rmtree, TESTFN)
        sys.path.insert(0, TESTFN)
        self.addCleanup(sys.path.remove, TESTFN)
        with open(os.path.join(TESTFN, "locker.py"), "wb") as f:
            f.write(locker_module.encode('utf-8'))
        with open(os.path.join(TESTFN, "locking_import.py"), "wb") as f:
            f.write(locking_import_module.encode('utf-8'))
        self.addCleanup(forget, "locker")
        self.addCleanup(forget, "locking_import")

        import locker

        pickle_bytes = (
            b'\x80\x03clocking_import\nToBeUnpickled\nq\x00)\x81q\x01.')

        # Then try to unpickle two of these simultaneously
        # One of them will cause the module import, and we want it to block
        # until the other one either:
        #   - fails (before the patch for this issue)
        #   - blocks on the import lock for the module, as it should
        results = []
        barrier = threading.Barrier(3)
        def t():
            # This ensures the threads have all started
            # presumably barrier release is faster than thread startup
            barrier.wait()
            results.append(pickle.loads(pickle_bytes))

        t1 = threading.Thread(target=t)
        t2 = threading.Thread(target=t)
        t1.start()
        t2.start()

        barrier.wait()
        # could have delay here
        locker.barrier.wait()

        t1.join()
        t2.join()

        from locking_import import ToBeUnpickled
        self.assertEqual(
            [type(x) for x in results],
            [ToBeUnpickled] * 2) 
Example 80
Project: cells   Author: AlesTsurko   File: _test_multiprocessing.py    MIT License 4 votes vote down vote up
def test_event(self):
        event = self.Event()
        wait = TimingWrapper(event.wait)

        # Removed temporarily, due to API shear, this does not
        # work with threading._Event objects. is_set == isSet
        self.assertEqual(event.is_set(), False)

        # Removed, threading.Event.wait() will return the value of the __flag
        # instead of None. API Shear with the semaphore backed mp.Event
        self.assertEqual(wait(0.0), False)
        self.assertTimingAlmostEqual(wait.elapsed, 0.0)
        self.assertEqual(wait(TIMEOUT1), False)
        self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)

        event.set()

        # See note above on the API differences
        self.assertEqual(event.is_set(), True)
        self.assertEqual(wait(), True)
        self.assertTimingAlmostEqual(wait.elapsed, 0.0)
        self.assertEqual(wait(TIMEOUT1), True)
        self.assertTimingAlmostEqual(wait.elapsed, 0.0)
        # self.assertEqual(event.is_set(), True)

        event.clear()

        #self.assertEqual(event.is_set(), False)

        p = self.Process(target=self._test_event, args=(event,))
        p.daemon = True
        p.start()
        self.assertEqual(wait(), True)
        p.join()

#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#

# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value.  We use the class DummyList
# for the same purpose.