Python threading.BoundedSemaphore() Examples

The following are 30 code examples for showing how to use threading.BoundedSemaphore(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module threading , or try the search function .

Example 1
Project: mqtt2sql   Author: curzon01   File: mqtt2sql.py    License: GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, args_):
        self._args = args_
        self.write2sql_thread = None
        self.pool_sqlconnections = BoundedSemaphore(value=self._args.sql_max_connection)
        self.userdata = {
            'haveresponse' : False,
            'starttime'    : time.time()
        }
        self.mqttc, ret = self.mqtt_connect(
            host=self._args.mqtt_host,
            port=self._args.mqtt_port,
            username=self._args.mqtt_username,
            password=self._args.mqtt_password,
            keepalive=self._args.mqtt_keepalive,
            cafile=self._args.mqtt_cafile,
            certfile=self._args.mqtt_certfile,
            keyfile=self._args.mqtt_keyfile,
            insecure=self._args.mqtt_insecure,
            userdata=self.userdata
            )
        if ret != ExitCode.OK:
            SignalHandler.exitus(ret, '{}:{} failed - [{}] {}'.format(self._args.mqtt_host, self._args.mqtt_port, ret, mqtt.error_string(ret))) 
Example 2
Project: olympe   Author: Parrot-Developers   File: expectations.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, *args, stream_timeout=None, max_parallel_processing=1, **kwds):
        """
        :param scheduler: the decorated scheduler
        :param stream_timeout: the default timeout value in seconds used by StreamScheduler.join
        :param max_parallel_processing: the maximum number of parallelized expectation
            processing (defaults to 1)
        """
        queue_size = 1024
        self._attr.stream_scheduler = Namespace()
        self._attr.stream_scheduler.timeout = stream_timeout
        self._attr.stream_scheduler.max_parallel_processing = max_parallel_processing
        self._attr.stream_scheduler.token_count = threading.BoundedSemaphore(
            max_parallel_processing
        )
        self._attr.stream_scheduler.expectation_queue = deque([], queue_size)
        self._attr.stream_scheduler.pending_expectations = set()
        self._attr.stream_scheduler.on_done_condition = threading.Condition() 
Example 3
Project: cluster-insight   Author: google   File: global_state.py    License: Apache License 2.0 6 votes vote down vote up
def init_caches_and_synchronization(self):
    """Initializes all caches."""
    self._nodes_cache = simple_cache.SimpleCache(
        constants.MAX_CACHED_DATA_AGE_SECONDS,
        constants.CACHE_DATA_CLEANUP_AGE_SECONDS)
    self._pods_cache = simple_cache.SimpleCache(
        constants.MAX_CACHED_DATA_AGE_SECONDS,
        constants.CACHE_DATA_CLEANUP_AGE_SECONDS)
    self._services_cache = simple_cache.SimpleCache(
        constants.MAX_CACHED_DATA_AGE_SECONDS,
        constants.CACHE_DATA_CLEANUP_AGE_SECONDS)
    self._rcontrollers_cache = simple_cache.SimpleCache(
        constants.MAX_CACHED_DATA_AGE_SECONDS,
        constants.CACHE_DATA_CLEANUP_AGE_SECONDS)

    self._bounded_semaphore = threading.BoundedSemaphore(
        constants.MAX_CONCURRENT_COMPUTE_GRAPH) 
Example 4
Project: ironpython2   Author: IronLanguages   File: test_threading.py    License: Apache License 2.0 6 votes vote down vote up
def test_BoundedSemaphore_limit(self):
        # BoundedSemaphore should raise ValueError if released too often.
        for limit in range(1, 10):
            bs = threading.BoundedSemaphore(limit)
            threads = [threading.Thread(target=bs.acquire)
                       for _ in range(limit)]
            for t in threads:
                t.start()
            for t in threads:
                t.join()
            threads = [threading.Thread(target=bs.release)
                       for _ in range(limit)]
            for t in threads:
                t.start()
            for t in threads:
                t.join()
            self.assertRaises(ValueError, bs.release) 
Example 5
Project: oss-ftp   Author: aliyun   File: test_threading.py    License: MIT License 6 votes vote down vote up
def test_BoundedSemaphore_limit(self):
        # BoundedSemaphore should raise ValueError if released too often.
        for limit in range(1, 10):
            bs = threading.BoundedSemaphore(limit)
            threads = [threading.Thread(target=bs.acquire)
                       for _ in range(limit)]
            for t in threads:
                t.start()
            for t in threads:
                t.join()
            threads = [threading.Thread(target=bs.release)
                       for _ in range(limit)]
            for t in threads:
                t.start()
            for t in threads:
                t.join()
            self.assertRaises(ValueError, bs.release) 
Example 6
Project: OpenDoor   Author: stanislav-web   File: worker.py    License: GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, queue, num_threads, timeout=0):
        """
        Init thread worker
        :param Queue.Queue queue: simple queue object
        :param int num_threads: threads numbers
        :param int timeout: delay timeout
        """

        super(Worker, self).__init__()
        self.__semaphore = BoundedSemaphore(num_threads)
        self.__event = Event()
        self.__event.set()
        self.__empty = False
        self.__running = True
        self.__queue = queue
        self.__timeout = timeout
        self.counter = 0 
Example 7
Project: Fluid-Designer   Author: Microvellum   File: test_threading.py    License: GNU General Public License v3.0 6 votes vote down vote up
def test_BoundedSemaphore_limit(self):
        # BoundedSemaphore should raise ValueError if released too often.
        for limit in range(1, 10):
            bs = threading.BoundedSemaphore(limit)
            threads = [threading.Thread(target=bs.acquire)
                       for _ in range(limit)]
            for t in threads:
                t.start()
            for t in threads:
                t.join()
            threads = [threading.Thread(target=bs.release)
                       for _ in range(limit)]
            for t in threads:
                t.start()
            for t in threads:
                t.join()
            self.assertRaises(ValueError, bs.release) 
Example 8
Project: StegCracker   Author: Paradoxis   File: cracker.py    License: MIT License 6 votes vote down vote up
def __init__(self, file: str, output: str, line_count: int,
                 threads: int = 8, chunk_size: int = 64, quiet: bool = False,
                 verbose: bool = False):
        """
        Cracker constructor
        :param threads: Number of threads to attempt to crack the signature
        :param file: File to (attempt) to crack
        :param output: Output file to write the file to
        :param chunk_size: Number of passwords to attempt per thread
        """
        self.lock = BoundedSemaphore()
        self.pool = ThreadPool(processes=threads)
        self.thread_count = threads

        self.quiet = quiet
        self.verbose = verbose
        self.file = file
        self.output = output
        self.chunk_size = chunk_size
        self.line_count = line_count or 1
        self.has_error = False
        self.iterable = None

        self.attempts = 0
        self.password = None 
Example 9
Project: aws-with   Author: aws-samples   File: workplan.py    License: Apache License 2.0 6 votes vote down vote up
def execute_work_plan(logger, options, commands_list):
    """ run through commands_list and run various commands in the thread pool """
    logger.info("Executing work plan across a thread pool of size: %s", options.threads)
    utils.GLOBALS["main_thread_lock"] = threading.Lock()
    utils.GLOBALS["thread_pool_lock"] = threading.BoundedSemaphore(options.threads)
    utils.GLOBALS["thread_count"] = len(commands_list)
    logger.debug("Locks created, task list size = %s", utils.GLOBALS["thread_count"])

    # obtain the main thread lock...
    logger.debug("Acquiring main thread lock")
    utils.GLOBALS["main_thread_lock"].acquire()

    for cmd in commands_list:
        logger.debug("waiting for next thread to be available")
        utils.GLOBALS["thread_pool_lock"].acquire()
        logger.debug("thread is available, starting thread")
        threading.Thread(target=commands.run_command, args=(logger, options, cmd, )).start()

    # block on the main thread lock being released...
    logger.debug("Blocking main thread, waiting on commands to finish")
    utils.GLOBALS["main_thread_lock"].acquire()
    logger.debug("Main thread lock released, working on output") 
Example 10
Project: ironpython3   Author: IronLanguages   File: test_threading.py    License: Apache License 2.0 6 votes vote down vote up
def test_BoundedSemaphore_limit(self):
        # BoundedSemaphore should raise ValueError if released too often.
        for limit in range(1, 10):
            bs = threading.BoundedSemaphore(limit)
            threads = [threading.Thread(target=bs.acquire)
                       for _ in range(limit)]
            for t in threads:
                t.start()
            for t in threads:
                t.join()
            threads = [threading.Thread(target=bs.release)
                       for _ in range(limit)]
            for t in threads:
                t.start()
            for t in threads:
                t.join()
            self.assertRaises(ValueError, bs.release) 
Example 11
Project: oneshellcrack   Author: L-codes   File: oneshellcrack.py    License: GNU General Public License v3.0 6 votes vote down vote up
def main():
    global semaphore, sends

    signal.signal(signal.SIGINT, interrupt_handler)
    args = commandline()
    print(' ( Shell:{shell}, Numbers:{max_request}, Threads:{max_threads}, Retry:{max_retry} )\n'.format(**args.__dict__))

    semaphore = BoundedSemaphore(value=args.max_threads)
    stopwatch_start = time.time()
    for i, payload in enumerate(create_payload(args), 1):
        if attack:
            sends = i
            semaphore.acquire()
            t = Thread(target=crack, args=(i, args, payload))
            t.setDaemon(True)
            t.start()

    for _ in range(args.max_threads):
        semaphore.acquire()

    stopwatch = time.time() - stopwatch_start
    words = args.max_request * sends if sends else pwd_total
    speed = words / stopwatch if stopwatch else 0
    msg = '[Success] Password: {}'.format(pwd) if pwd else '[Failed] No password found'
    print('\n\n{msg}\n[Finish] {words} words in {stopwatch:.3f} seconds. ({speed:.0f} w/s)'.format(**locals())) 
Example 12
Project: gcblue   Author: gcblue   File: test_threading.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_BoundedSemaphore_limit(self):
        # BoundedSemaphore should raise ValueError if released too often.
        for limit in range(1, 10):
            bs = threading.BoundedSemaphore(limit)
            threads = [threading.Thread(target=bs.acquire)
                       for _ in range(limit)]
            for t in threads:
                t.start()
            for t in threads:
                t.join()
            threads = [threading.Thread(target=bs.release)
                       for _ in range(limit)]
            for t in threads:
                t.start()
            for t in threads:
                t.join()
            self.assertRaises(ValueError, bs.release) 
Example 13
def test_BoundedSemaphore_limit(self):
        # BoundedSemaphore should raise ValueError if released too often.
        for limit in range(1, 10):
            bs = threading.BoundedSemaphore(limit)
            threads = [threading.Thread(target=bs.acquire)
                       for _ in range(limit)]
            for t in threads:
                t.start()
            for t in threads:
                t.join()
            threads = [threading.Thread(target=bs.release)
                       for _ in range(limit)]
            for t in threads:
                t.start()
            for t in threads:
                t.join()
            self.assertRaises(ValueError, bs.release) 
Example 14
Project: forseti-security   Author: forseti-security   File: gce_firewall_enforcer_test.py    License: Apache License 2.0 6 votes vote down vote up
def setUp(self):
        """Set up.

        Creates a FirewallEnforcer object with current and expected rules set to
        an empty FirewallRules object.
        """
        super(FirewallEnforcerTest, self).setUp()

        self.expected_rules = fe.FirewallRules(constants.TEST_PROJECT)
        self.current_rules = fe.FirewallRules(constants.TEST_PROJECT)

        self.project_sema = threading.BoundedSemaphore(value=1)

        self.enforcer = fe.FirewallEnforcer(
            constants.TEST_PROJECT, self.gce_api_client, self.expected_rules,
            self.current_rules, self.project_sema, None) 
Example 15
Project: forseti-security   Author: forseti-security   File: enabled_apis_rules_engine.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 global_configs,  # pylint: disable= unused-argument
                 rule_defs=None,
                 snapshot_timestamp=None):
        """Initialize.

        Args:
            global_configs (dict): Global configurations.
            rule_defs (dict): The parsed dictionary of rules from the YAML
                definition file.
            snapshot_timestamp (str): The snapshot to lookup data.
        """
        super(EnabledApisRuleBook, self).__init__()
        self._rules_sema = threading.BoundedSemaphore(value=1)
        self.resource_rules_map = collections.defaultdict(set)
        if not rule_defs:
            self.rule_defs = {}
        else:
            self.rule_defs = rule_defs
            self.add_rules(rule_defs)
        if snapshot_timestamp:
            self.snapshot_timestamp = snapshot_timestamp 
Example 16
Project: forseti-security   Author: forseti-security   File: iam_rules_engine.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 # TODO: To remove the unused global-configs here, it will be
                 # necessary to also update the base rules engine.
                 global_configs,  # pylint: disable= unused-argument
                 rule_defs=None,
                 snapshot_timestamp=None):
        """Initialize.

        Args:
            global_configs (dict): Global configurations.
            rule_defs (dict): The parsed dictionary of rules from the YAML
                definition file.
            snapshot_timestamp (str): The snapshot to lookup data.
        """
        super(IamRuleBook, self).__init__()
        self._rules_sema = threading.BoundedSemaphore(value=1)
        self.resource_rules_map = {}
        if not rule_defs:
            self.rule_defs = {}
        else:
            self.rule_defs = rule_defs
            self.add_rules(rule_defs)
        if snapshot_timestamp:
            self.snapshot_timestamp = snapshot_timestamp 
Example 17
Project: forseti-security   Author: forseti-security   File: iap_rules_engine.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(self, global_configs, rule_defs=None, snapshot_timestamp=None):
        """Initialization.

        Args:
            global_configs (dict): Global configurations.
            rule_defs (list): IAP rule definition dicts
            snapshot_timestamp (int): Snapshot timestamp.
        """
        super(IapRuleBook, self).__init__()
        del global_configs
        self._rules_sema = threading.BoundedSemaphore(value=1)
        self.resource_rules_map = {}
        if not rule_defs:
            self.rule_defs = {}
        else:
            self.rule_defs = rule_defs
            self.add_rules(rule_defs)
        self.snapshot_timestamp = snapshot_timestamp 
Example 18
Project: forseti-security   Author: forseti-security   File: audit_logging_rules_engine.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 global_configs,  # pylint: disable= unused-argument
                 rule_defs=None,
                 snapshot_timestamp=None):
        """Initialize.

        Args:
            global_configs (dict): Global configurations.
            rule_defs (dict): The parsed dictionary of rules from the YAML
                definition file.
            snapshot_timestamp (str): The snapshot to lookup data.
        """
        super(AuditLoggingRuleBook, self).__init__()
        self._rules_sema = threading.BoundedSemaphore(value=1)
        self.resource_rules_map = collections.defaultdict(set)
        if not rule_defs:
            self.rule_defs = {}
        else:
            self.rule_defs = rule_defs
            self.add_rules(rule_defs)
        if snapshot_timestamp:
            self.snapshot_timestamp = snapshot_timestamp 
Example 19
Project: forseti-security   Author: forseti-security   File: retention_rules_engine.py    License: Apache License 2.0 6 votes vote down vote up
def __init__(self, rule_defs=None):
        """Initialization.

        Args:
            rule_defs (dict): rule definitons
        """
        super(RetentionRuleBook, self).__init__()
        self._rules_sema = threading.BoundedSemaphore(value=1)

        self.resource_rules_map = {
            applies_to: collections.defaultdict(set)
            for applies_to in SUPPORTED_RETENTION_RES_TYPES}
        if not rule_defs:
            self.rule_defs = {}
        else:
            self.rule_defs = rule_defs
            self.add_rules(rule_defs) 
Example 20
Project: dwave-cloud-client   Author: dwavesystems   File: test_concurrency.py    License: Apache License 2.0 6 votes vote down vote up
def test_fallback(self):
        """Without priority specified, it falls back to ThreadPoolExecutor mode."""

        counter = threading.BoundedSemaphore(value=3)

        def worker():
            counter.acquire(blocking=False)

        with PriorityThreadPoolExecutor(max_workers=3) as executor:
            fs = [executor.submit(worker) for _ in range(3)]
            concurrent.futures.wait(fs)

        self.assertFalse(counter.acquire(blocking=False))

        # verify executor shutdown (all threads stopped)
        self.assertFalse(any(t.is_alive() for t in executor._threads)) 
Example 21
Project: subtake   Author: kp625544   File: sublist3r.py    License: GNU General Public License v2.0 5 votes vote down vote up
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
        subdomains = subdomains or []
        base_url = 'https://dnsdumpster.com/'
        self.live_subdomains = []
        self.engine_name = "DNSdumpster"
        self.threads = 70
        self.lock = threading.BoundedSemaphore(value=self.threads)
        self.q = q
        super(DNSdumpster, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
        return 
Example 22
Project: subtake   Author: kp625544   File: sublist3r.py    License: GNU General Public License v2.0 5 votes vote down vote up
def __init__(self, subdomains, ports):
        self.subdomains = subdomains
        self.ports = ports
        self.threads = 20
        self.lock = threading.BoundedSemaphore(value=self.threads) 
Example 23
Project: ironpython2   Author: IronLanguages   File: test_threading.py    License: Apache License 2.0 5 votes vote down vote up
def test_various_ops(self):
        # This takes about n/3 seconds to run (about n/3 clumps of tasks,
        # times about 1 second per clump).
        NUMTASKS = 10

        # no more than 3 of the 10 can run at once
        sema = threading.BoundedSemaphore(value=3)
        mutex = threading.RLock()
        numrunning = Counter()

        threads = []

        for i in range(NUMTASKS):
            t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
            threads.append(t)
            self.assertIsNone(t.ident)
            self.assertRegexpMatches(repr(t), r'^<TestThread\(.*, initial\)>$')
            t.start()

        if verbose:
            print 'waiting for all tasks to complete'
        for t in threads:
            t.join(NUMTASKS)
            self.assertFalse(t.is_alive())
            self.assertNotEqual(t.ident, 0)
            self.assertIsNotNone(t.ident)
            self.assertRegexpMatches(repr(t), r'^<TestThread\(.*, \w+ -?\d+\)>$')
        if verbose:
            print 'all tasks done'
        self.assertEqual(numrunning.get(), 0) 
Example 24
Project: ironpython2   Author: IronLanguages   File: test_contextlib.py    License: Apache License 2.0 5 votes vote down vote up
def testWithBoundedSemaphore(self):
        lock = threading.BoundedSemaphore()
        def locked():
            if lock.acquire(False):
                lock.release()
                return False
            else:
                return True
        self.boilerPlate(lock, locked)

# This is needed to make the test actually run under regrtest.py! 
Example 25
Project: selenium-wire   Author: wkeeling   File: server.py    License: MIT License 5 votes vote down vote up
def __init__(self, max_threads, *args, **kwargs):
        self.sema = threading.BoundedSemaphore(value=max_threads)
        super().__init__(*args, **kwargs) 
Example 26
Project: Yuki-Chan-The-Auto-Pentest   Author: Yukinoshita47   File: sublist3r.py    License: MIT License 5 votes vote down vote up
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
        subdomains = subdomains or []
        base_url = 'https://dnsdumpster.com/'
        self.live_subdomains = []
        self.engine_name = "DNSdumpster"
        self.threads = 70
        self.lock = threading.BoundedSemaphore(value=self.threads)
        self.q = q
        super(DNSdumpster, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
        return 
Example 27
Project: Yuki-Chan-The-Auto-Pentest   Author: Yukinoshita47   File: sublist3r.py    License: MIT License 5 votes vote down vote up
def __init__(self, subdomains, ports):
        self.subdomains = subdomains
        self.ports = ports
        self.threads = 20
        self.lock = threading.BoundedSemaphore(value=self.threads) 
Example 28
Project: ITWSV   Author: penetrate2hack   File: sublist3r.py    License: MIT License 5 votes vote down vote up
def __init__(self, domain, subdomains=None, q=None, silent=False, verbose=True):
        subdomains = subdomains or []
        base_url = 'https://dnsdumpster.com/'
        self.live_subdomains = []
        self.engine_name = "DNSdumpster"
        self.threads = 70
        self.lock = threading.BoundedSemaphore(value=self.threads)
        self.q = q
        super(DNSdumpster, self).__init__(base_url, self.engine_name, domain, subdomains, q=q, silent=silent, verbose=verbose)
        return 
Example 29
Project: ITWSV   Author: penetrate2hack   File: sublist3r.py    License: MIT License 5 votes vote down vote up
def __init__(self, subdomains, ports):
        self.subdomains = subdomains
        self.ports = ports
        self.threads = 20
        self.lock = threading.BoundedSemaphore(value=self.threads) 
Example 30
Project: BinderFilter   Author: dxwu   File: test_threading.py    License: MIT License 5 votes vote down vote up
def test_various_ops(self):
        # This takes about n/3 seconds to run (about n/3 clumps of tasks,
        # times about 1 second per clump).
        NUMTASKS = 10

        # no more than 3 of the 10 can run at once
        sema = threading.BoundedSemaphore(value=3)
        mutex = threading.RLock()
        numrunning = Counter()

        threads = []

        for i in range(NUMTASKS):
            t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
            threads.append(t)
            self.assertEqual(t.ident, None)
            self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
            t.start()

        if verbose:
            print 'waiting for all tasks to complete'
        for t in threads:
            t.join(NUMTASKS)
            self.assertTrue(not t.is_alive())
            self.assertNotEqual(t.ident, 0)
            self.assertFalse(t.ident is None)
            self.assertTrue(re.match('<TestThread\(.*, \w+ -?\d+\)>', repr(t)))
        if verbose:
            print 'all tasks done'
        self.assertEqual(numrunning.get(), 0)