Python threading.active_count() Examples

The following are code examples for showing how to use threading.active_count(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: NiujiaoDebugger   Author: MrSrc   File: test_threading.py    GNU General Public License v3.0 7 votes vote down vote up
def test_dummy_thread_after_fork(self):
        # Issue #14308: a dummy thread in the active list doesn't mess up
        # the after-fork mechanism.
        code = """if 1:
            import _thread, threading, os, time

            def background_thread(evt):
                # Creates and registers the _DummyThread instance
                threading.current_thread()
                evt.set()
                time.sleep(10)

            evt = threading.Event()
            _thread.start_new_thread(background_thread, (evt,))
            evt.wait()
            assert threading.active_count() == 2, threading.active_count()
            if os.fork() == 0:
                assert threading.active_count() == 1, threading.active_count()
                os._exit(0)
            else:
                os.wait()
        """
        _, out, err = assert_python_ok("-c", code)
        self.assertEqual(out, b'')
        self.assertEqual(err, b'') 
Example 2
Project: pyblish-win   Author: pyblish   File: test_threading.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_dummy_thread_after_fork(self):
        # Issue #14308: a dummy thread in the active list doesn't mess up
        # the after-fork mechanism.
        code = """if 1:
            import thread, threading, os, time

            def background_thread(evt):
                # Creates and registers the _DummyThread instance
                threading.current_thread()
                evt.set()
                time.sleep(10)

            evt = threading.Event()
            thread.start_new_thread(background_thread, (evt,))
            evt.wait()
            assert threading.active_count() == 2, threading.active_count()
            if os.fork() == 0:
                assert threading.active_count() == 1, threading.active_count()
                os._exit(0)
            else:
                os.wait()
        """
        _, out, err = assert_python_ok("-c", code)
        self.assertEqual(out, '')
        self.assertEqual(err, '') 
Example 3
Project: zmirror   Author: aploium   File: zmirror.py    MIT License 6 votes vote down vote up
def zmirror_status():
    """返回服务器的一些状态信息"""
    if request.remote_addr and request.remote_addr != '127.0.0.1':
        return generate_simple_resp_page(b'Only 127.0.0.1 are allowed', 403)
    output = ""
    output += strx('extract_real_url_from_embedded_url', extract_real_url_from_embedded_url.cache_info())
    output += strx('\nis_content_type_streamed', is_mime_streamed.cache_info())
    output += strx('\nembed_real_url_to_embedded_url', embed_real_url_to_embedded_url.cache_info())
    output += strx('\ncheck_global_ua_pass', check_global_ua_pass.cache_info())
    output += strx('\nextract_mime_from_content_type', extract_mime_from_content_type.cache_info())
    output += strx('\nis_content_type_using_cdn', is_content_type_using_cdn.cache_info())
    output += strx('\nis_ua_in_whitelist', is_content_type_using_cdn.cache_info())
    output += strx('\nis_mime_represents_text', is_mime_represents_text.cache_info())
    output += strx('\nis_domain_match_glob_whitelist', is_domain_match_glob_whitelist.cache_info())
    output += strx('\nverify_ip_hash_cookie', verify_ip_hash_cookie.cache_info())
    output += strx('\nis_denied_because_of_spider', is_denied_because_of_spider.cache_info())
    output += strx('\nis_ip_not_in_allow_range', is_ip_not_in_allow_range.cache_info())
    output += strx('\n\ncurrent_threads_number', threading.active_count())
    # output += strx('\nclient_requests_text_rewrite', client_requests_text_rewrite.cache_info())
    # output += strx('\nextract_url_path_and_query', extract_url_path_and_query.cache_info())

    output += strx('\n----------------\n')
    output += strx('\ndomain_alias_to_target_set', domain_alias_to_target_set)

    return "<pre>" + output + "</pre>\n" 
Example 4
Project: aridi   Author: dpgon   File: scan.py    GNU General Public License v3.0 6 votes vote down vote up
def _ipscan(ip, precheck):
    global ports
    ports = []
    threads = []

    for port in precheck.portnames:
        t = threading.Thread(target=_opentcpport, args=(ip, port))
        threads.append(t)

    limit = getrlimit(RLIMIT_NOFILE)[0] - 100
    for item in threads:
        while threading.active_count() > limit:
            pass
        item.start()

    while threading.active_count() > 1:
        pass

    return ports 
Example 5
Project: flight-lab   Author: google   File: main.py    Apache License 2.0 6 votes vote down vote up
def _on_command(self, command):
    if command == controller_pb2.SystemCommand.EXIT:
      self.exit()
      return

    if command == controller_pb2.SystemCommand.DEBUG:
      self.logger.debug('{0} threads are alive.'.format(
          threading.active_count()))
      for thread in threading.enumerate():
        self.logger.debug('Thread (name="{0}")'.format(thread.name))
      return

    for component in self._components:
      pattern.run_as_thread(
          name='{0}.on_command({1})'.format(component.name, command),
          target=component.on_command,
          kwargs={'command': command}) 
Example 6
Project: sea   Author: shanbay   File: test_server.py    MIT License 6 votes vote down vote up
def test_server(app, logstream):
    s = Server(app)
    assert not s._stopped

    def log_started(s):
        app.logger.warn('started!')

    def log_stopped(s):
        app.logger.warn('stopped!')

    server_started.connect(log_started)
    server_stopped.connect(log_stopped)

    with mock.patch('time.sleep', new=lambda s: os.kill(os.getpid(), signal.SIGINT)):
        assert s.run()
        assert threading.active_count() > 1
        assert s._stopped

    content = logstream.getvalue()
    assert 'started!' in content and 'stopped!' in content 
Example 7
Project: cascade-server   Author: mitre   File: async.py    Apache License 2.0 6 votes vote down vote up
def enable_async():
    global enabled

    if enabled:
        return enabled

    if threading.active_count() > 3:
        # This number used to be 1, but a gevent patch or something else changed this so it starts with 3 threads
        logger.warning('{} threads already running. gvent monkey patching disabled...'.format(threading.active_count()))
        enabled = False

    else:
        logger.debug('Monkey patching using gevent')
        gevent.monkey.patch_all()
        enabled = True

    return enabled 
Example 8
Project: ironpython2   Author: IronLanguages   File: test_threading.py    Apache License 2.0 6 votes vote down vote up
def test_dummy_thread_after_fork(self):
        # Issue #14308: a dummy thread in the active list doesn't mess up
        # the after-fork mechanism.
        code = """if 1:
            import thread, threading, os, time

            def background_thread(evt):
                # Creates and registers the _DummyThread instance
                threading.current_thread()
                evt.set()
                time.sleep(10)

            evt = threading.Event()
            thread.start_new_thread(background_thread, (evt,))
            evt.wait()
            assert threading.active_count() == 2, threading.active_count()
            if os.fork() == 0:
                assert threading.active_count() == 1, threading.active_count()
                os._exit(0)
            else:
                os.wait()
        """
        _, out, err = assert_python_ok("-c", code)
        self.assertEqual(out, '')
        self.assertEqual(err, '') 
Example 9
Project: pymachinetalk   Author: machinekit   File: anddemo.py    MIT License 6 votes vote down vote up
def main():
    basic = BasicClass()

    print('starting')
    basic.start()

    try:
        while True:
            time.sleep(0.5)
    except KeyboardInterrupt:
        pass

    print('stopping threads')
    basic.stop()

    # wait for all threads to terminate
    while threading.active_count() > 1:
        time.sleep(0.1)

    print('threads stopped')
    sys.exit(0) 
Example 10
Project: pymachinetalk   Author: machinekit   File: logger.py    MIT License 6 votes vote down vote up
def main():
    logger = Logger()

    print('starting')
    logger.start()

    try:
        while True:
            time.sleep(0.5)
    except KeyboardInterrupt:
        pass

    print('stopping threads')
    logger.stop()

    # wait for all threads to terminate
    while threading.active_count() > 1:
        time.sleep(0.1)

    print('threads stopped')
    sys.exit(0) 
Example 11
Project: Blockly-rduino-communication   Author: technologiescollege   File: test_threading.py    GNU General Public License v3.0 6 votes vote down vote up
def test_dummy_thread_after_fork(self):
        # Issue #14308: a dummy thread in the active list doesn't mess up
        # the after-fork mechanism.
        code = """if 1:
            import _thread, threading, os, time

            def background_thread(evt):
                # Creates and registers the _DummyThread instance
                threading.current_thread()
                evt.set()
                time.sleep(10)

            evt = threading.Event()
            _thread.start_new_thread(background_thread, (evt,))
            evt.wait()
            assert threading.active_count() == 2, threading.active_count()
            if os.fork() == 0:
                assert threading.active_count() == 1, threading.active_count()
                os._exit(0)
            else:
                os.wait()
        """
        _, out, err = assert_python_ok("-c", code)
        self.assertEqual(out, b'')
        self.assertEqual(err, b'') 
Example 12
Project: cqp-sdk-for-py37-native   Author: crud-boy   File: test_threading.py    GNU General Public License v2.0 6 votes vote down vote up
def test_dummy_thread_after_fork(self):
        # Issue #14308: a dummy thread in the active list doesn't mess up
        # the after-fork mechanism.
        code = """if 1:
            import _thread, threading, os, time

            def background_thread(evt):
                # Creates and registers the _DummyThread instance
                threading.current_thread()
                evt.set()
                time.sleep(10)

            evt = threading.Event()
            _thread.start_new_thread(background_thread, (evt,))
            evt.wait()
            assert threading.active_count() == 2, threading.active_count()
            if os.fork() == 0:
                assert threading.active_count() == 1, threading.active_count()
                os._exit(0)
            else:
                os.wait()
        """
        _, out, err = assert_python_ok("-c", code)
        self.assertEqual(out, b'')
        self.assertEqual(err, b'') 
Example 13
Project: oss-ftp   Author: aliyun   File: test_threading.py    MIT License 6 votes vote down vote up
def test_dummy_thread_after_fork(self):
        # Issue #14308: a dummy thread in the active list doesn't mess up
        # the after-fork mechanism.
        code = """if 1:
            import thread, threading, os, time

            def background_thread(evt):
                # Creates and registers the _DummyThread instance
                threading.current_thread()
                evt.set()
                time.sleep(10)

            evt = threading.Event()
            thread.start_new_thread(background_thread, (evt,))
            evt.wait()
            assert threading.active_count() == 2, threading.active_count()
            if os.fork() == 0:
                assert threading.active_count() == 1, threading.active_count()
                os._exit(0)
            else:
                os.wait()
        """
        _, out, err = assert_python_ok("-c", code)
        self.assertEqual(out, '')
        self.assertEqual(err, '') 
Example 14
Project: mistika-hyperspeed   Author: bovesan   File: consolidate.py    Apache License 2.0 6 votes vote down vote up
def gui_stack_add(self, stack_path):
        if stack_path in self.stacks:
            return
        self.stacks[stack_path] = Stack(stack_path)
        stack = self.stacks[stack_path]
        row_iter = self.stacks_treestore.append(None, [stack_path, 0.0, '0%', False, False])
        row_path = self.stacks_treestore.get_path(row_iter)
        stack.row_reference = gtk.TreeRowReference(self.stacks_treestore, row_path)
        # for dependency in stack.dependencies:
        #     self.dependencies_treestore.append(None, [dependency.name])
        # print 'creating thread'
        t = threading.Thread(target=self.get_dependencies, args=[stack])
        self.threads.append(t)
        t.setDaemon(True)
        t.start()
        # print 'started thread'
        # print threading.active_count() 
Example 15
Project: TimeMachine   Author: DroidTest   File: executor.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def output(self):
        
        with open(RunParameters.OUTPUT_FILE, "a") as csv_file:
            writer = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
            while (time.time() - self.start_time) < self.time_limit:
                
                #computing snapshots
                num_snapshots = 0
                for key in self.state_graph.states:
                    if self.state_graph.states[key].solid:
                        num_snapshots = num_snapshots+1
                
                #read coverage
                coverage_manager.pull_coverage_files("temp")
                coverage_manager.compute_current_coverage()             # output in coverage.txt
                current_coverage = coverage_manager.read_current_coverage()

                # write files
                writer.writerow([str(int(time.time()-self.start_time)), str(len(self.state_graph.states)),str(num_snapshots), str(self.num_restore), str(current_coverage)])
                time.sleep(120)
                
                print "current threads:  " + str(threading.active_count())


        csv_file.close() 
Example 16
Project: zmirror   Author: ttestdock   File: zmirror.py    MIT License 6 votes vote down vote up
def zmirror_status():
    """返回服务器的一些状态信息"""
    if request.remote_addr and request.remote_addr != '127.0.0.1':
        return generate_simple_resp_page(b'Only 127.0.0.1 are allowed', 403)
    output = ""
    output += strx('extract_real_url_from_embedded_url', extract_real_url_from_embedded_url.cache_info())
    output += strx('\nis_content_type_streamed', is_mime_streamed.cache_info())
    output += strx('\nembed_real_url_to_embedded_url', embed_real_url_to_embedded_url.cache_info())
    output += strx('\ncheck_global_ua_pass', check_global_ua_pass.cache_info())
    output += strx('\nextract_mime_from_content_type', extract_mime_from_content_type.cache_info())
    output += strx('\nis_content_type_using_cdn', is_content_type_using_cdn.cache_info())
    output += strx('\nis_ua_in_whitelist', is_content_type_using_cdn.cache_info())
    output += strx('\nis_mime_represents_text', is_mime_represents_text.cache_info())
    output += strx('\nis_domain_match_glob_whitelist', is_domain_match_glob_whitelist.cache_info())
    output += strx('\nverify_ip_hash_cookie', verify_ip_hash_cookie.cache_info())
    output += strx('\nis_denied_because_of_spider', is_denied_because_of_spider.cache_info())
    output += strx('\nis_ip_not_in_allow_range', is_ip_not_in_allow_range.cache_info())
    output += strx('\n\ncurrent_threads_number', threading.active_count())
    # output += strx('\nclient_requests_text_rewrite', client_requests_text_rewrite.cache_info())
    # output += strx('\nextract_url_path_and_query', extract_url_path_and_query.cache_info())

    output += strx('\n----------------\n')
    output += strx('\ndomain_alias_to_target_set', domain_alias_to_target_set)

    return "<pre>" + output + "</pre>\n" 
Example 17
Project: distributed_framework   Author: ydf0509   File: base_consumer.py    Apache License 2.0 6 votes vote down vote up
def __init__(self, queue_name, fucntion_name, params):
        self.queue_name = queue_name
        self.function = fucntion_name
        publish_time = _get_publish_time(params)
        if publish_time:
            self.publish_time_str = time_util.DatetimeConverter(publish_time).datetime_str
        function_params = delete_keys_and_return_new_dict(params, ['publish_time', 'publish_time_format', 'extra'])
        self.params = function_params
        self.params_str = json.dumps(function_params)
        self.result = ''
        self.run_times = 0
        self.exception = ''
        self.time_start = time.time()
        self.time_cost = None
        self.success = False
        self.current_thread = ConsumersManager.get_concurrent_info()
        self.total_thread = threading.active_count()
        self.set_log_level(20) 
Example 18
Project: distributed_framework   Author: ydf0509   File: custom_threadpool_executor.py    Apache License 2.0 6 votes vote down vote up
def show_current_threads_num(sleep_time=60, process_name='', block=False):
    process_name = sys.argv[0] if process_name == '' else process_name

    def _show_current_threads_num():
        while True:
            # logger_show_current_threads_num.info(f'{process_name} 进程 的 并发数量是 -->  {threading.active_count()}')
            nb_print(f'{process_name} 进程 的 线程数量是 -->  {threading.active_count()}')
            time.sleep(sleep_time)

    if process_name not in process_name_set:
        if block:
            _show_current_threads_num()
        else:
            t = threading.Thread(target=_show_current_threads_num, daemon=True)
            t.start()
        process_name_set.add(process_name) 
Example 19
Project: execnet   Author: pytest-dev   File: test_multi.py    MIT License 6 votes vote down vote up
def test_safe_terminate(execmodel):
    if execmodel.backend != "threading":
        pytest.xfail(
            "execution model %r does not support task count" % execmodel.backend
        )
    import threading

    active = threading.active_count()
    l = []

    def term():
        sleep(3)

    def kill():
        l.append(1)

    safe_terminate(execmodel, 1, [(term, kill)] * 10)
    assert len(l) == 10
    sleep(0.1)
    gc.collect()
    assert execmodel.active_count() == active 
Example 20
Project: execnet   Author: pytest-dev   File: test_multi.py    MIT License 6 votes vote down vote up
def test_safe_terminate2(execmodel):
    if execmodel.backend != "threading":
        pytest.xfail(
            "execution model %r does not support task count" % execmodel.backend
        )
    import threading

    active = threading.active_count()
    l = []

    def term():
        return

    def kill():
        l.append(1)

    safe_terminate(execmodel, 3, [(term, kill)] * 10)
    assert len(l) == 0
    sleep(0.1)
    gc.collect()
    assert threading.active_count() == active 
Example 21
Project: malcode   Author: moonsea   File: winfailgenasm.py    GNU General Public License v3.0 5 votes vote down vote up
def multiGenAsm(filepath):
    count = threading.active_count()
    print '[+]count:', count

    # while (count >= ThreadMax):
    #     count = threading.active_count()
    #     print '[+]Threadcount:', count

    my_thread = threading.Thread(target=genAsm, args=(filepath, ))
    my_thread.start()
    # my_thread.join()

    if (count == ThreadMax):
        my_thread.join() 
Example 22
Project: Learning-Concurrency-in-Python   Author: PacktPublishing   File: totalThreads.py    MIT License 5 votes vote down vote up
def main():
  for i in range(random.randint(2,50)):
    thread = threading.Thread(target=myThread, args=(i,))
    thread.start()

  time.sleep(4)
  print("Total Number of Active Threads: {}".format(threading.active_count())) 
Example 23
Project: ngo-addons-backport   Author: camptocamp   File: web_services.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def exp_get_stats(self):
        res = "OpenERP server: %d threads\n" % threading.active_count()
        res += netsvc.Server.allStats()
        return res 
Example 24
Project: aridi   Author: dpgon   File: scan.py    GNU General Public License v3.0 5 votes vote down vote up
def _localnetworkarpscan(iface, sourcemac, network):
    global waiting

    sourceip = network.split("/")[0]
    network = ip_network(network, strict=False)

    threads = []

    ans = threading.Thread(target=_scanans, args=(iface, ))

    for i in network:
        t = threading.Thread(target=_arppacket, args=(str(i), sourceip, sourcemac, iface))
        threads.append(t)

    ans.start()

    for item in threads:
        item.start()

    # when all packets are out, it should has only 2 threads, the main and scanans()
    while threading.active_count() > 2:
        pass

    # Wait 5 seconds an answer
    sleep(5)

    waiting = False

    while threading.active_count() > 1:
        pass

    waiting = True 
Example 25
Project: moler   Author: nokia   File: asyncio_runner.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def system_resources_usage():
    curr_fds_open = current_process.num_fds()
    curr_threads_nb = threading.active_count()
    return curr_fds_open, curr_threads_nb 
Example 26
Project: moler   Author: nokia   File: conftest.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def system_resources_usage():
    curr_fds_open = current_process.num_fds()
    curr_threads_nb = threading.active_count()
    return curr_fds_open, curr_threads_nb 
Example 27
Project: irc-rss-feed-bot   Author: impredicative   File: bot.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def _setup_channels(self) -> None:
        instance = config.INSTANCE
        channels = instance['feeds']
        channels_str = ', '.join(channels)
        log.debug('Setting up threads and queues for %s channels (%s) and their feeds with %s currently active '
                  'threads.', len(channels), channels_str, threading.active_count())
        num_feeds_setup = 0
        num_reads_daily = 0
        barriers_parties: Dict[str, int] = {}
        for channel, channel_config in channels.items():
            log.debug('Setting up threads and queue for %s.', channel)
            num_channel_feeds = len(channel_config)
            self.CHANNEL_JOIN_EVENTS[channel] = threading.Event()
            self.CHANNEL_QUEUES[channel] = queue.Queue(maxsize=num_channel_feeds * 2)
            threading.Thread(target=self._msg_channel, name=f'ChannelMessenger-{channel}',
                             args=(channel,)).start()
            for feed, feed_config in channel_config.items():
                threading.Thread(target=self._read_feed, name=f'FeedReader-{channel}-{feed}',
                                 args=(channel, feed)).start()
                num_feeds_setup += 1
                num_reads_daily += \
                    (24 / max(config.PERIOD_HOURS_MIN, feed_config.get('period', config.PERIOD_HOURS_DEFAULT)))
                if feed_config.get('group'):
                    group = feed_config['group']
                    barriers_parties[group] = barriers_parties.get(group, 0) + 1
            log.debug('Finished setting up threads and queue for %s and its %s feeds with %s currently active threads.',
                      channel, num_channel_feeds, threading.active_count())
        for barrier, parties in barriers_parties.items():
            self.FEED_GROUP_BARRIERS[barrier] = threading.Barrier(parties)
        log.info('Finished setting up %s channels (%s) and their %s feeds with %s currently active threads.',
                 len(channels), channels_str, num_feeds_setup, threading.active_count())
        log.info('Ignoring any caches, %s URL reads are expected daily, i.e. once every %s on an average.',
                 f'{round(num_reads_daily):n}', timedelta_desc(datetime.timedelta(days=1) / num_reads_daily))

# Refs: https://tools.ietf.org/html/rfc1459 https://modern.ircdocs.horse 
Example 28
Project: ACE   Author: IntegralDefense   File: views.py    Apache License 2.0 5 votes vote down vote up
def debug():

    import threading
    import os

    message = """

current thread id = {}
current pid = {}
thread count = {}

""".format(threading.get_ident(), os.getpid(), threading.active_count())
    
    return message, 200 
Example 29
Project: ACE   Author: IntegralDefense   File: network_semaphore.py    Apache License 2.0 5 votes vote down vote up
def start_failsafe_monitor(self):
        self.failsafe_thread = Thread(target=self.failsafe_loop, name="Failsafe {0}".format(self.semaphore_name))
        self.failsafe_thread.daemon = True
        self.failsafe_thread.start()
        #record_metric(METRIC_THREAD_COUNT, threading.active_count()) 
Example 30
Project: ACE   Author: IntegralDefense   File: network_semaphore.py    Apache License 2.0 5 votes vote down vote up
def start(self):
        # TODO option to daemonize
        self.server_thread = Thread(target=self.server_loop, name="Network Server")
        self.server_thread.start()
        #record_metric(METRIC_THREAD_COUNT, threading.active_count())

        self.monitor_thread = Thread(target=self.monitor_loop, name="Monitor")
        self.monitor_thread.daemon = True
        self.monitor_thread.start()
        #record_metric(METRIC_THREAD_COUNT, threading.active_count()) 
Example 31
Project: Overhead2   Author: hacknation-anonops   File: overhead2.py    GNU General Public License v3.0 5 votes vote down vote up
def main():
    threads_pool = []                        # Stores all active threads
    Header()                                 # Prints the header
    options = get_parser()                   # Get the arguments
    config = check_input(vars(options.parse_args()))

    # PRINT INFO BANNER
    status = check_address(config["url"])
    separator = "#"+"="*40+"#"
    print("{0} \n# Target: {1}\n# Threads: {2}\n# Status: {3}\n# Type: {4}\n# Proxy: {5}\n{0}".format(
        separator,
        config["url"],
        config["threads"],
        "online" if status else "offline",
        config["type"],
        config["proxy"]))
    if not status:
        exit(0)
    print("\n> Press enter to launch the attack")
    input()
    # START DE ATTACK 
    starting_time = time.time()
    for c in range(config["threads"]):
        t = Get(config) if config["type"] == "GET" else Post(config)
        threads_pool.append(t)
        t.start()

    # Starts a new checker thread 
    checker = Checker(config)
    checker.start()

    while threading.active_count() > 0:
        now = round(time.time() -starting_time, 1 )
        try:
            print("\r[>] Number of hits: {0}       |   Time: {1}".format(success, now), end="\r")
        except KeyboardInterrupt:
            checker.on = False
            for th in threads_pool:
                th.on = False
    print("\n[!] All threads have been stoped")
    exit(0) 
Example 32
Project: pymachinetalk   Author: machinekit   File: ncurses.py    MIT License 5 votes vote down vote up
def main():
    mkconfig = config.Config()
    mkini = os.getenv("MACHINEKIT_INI")
    if mkini is None:
        mkini = mkconfig.MACHINEKIT_INI
    if not os.path.isfile(mkini):
        sys.stderr.write("MACHINEKIT_INI " + mkini + " does not exist\n")
        sys.exit(1)

    mki = configparser.ConfigParser()
    mki.read(mkini)
    uuid = mki.get("MACHINEKIT", "MKUUID")

    ui = TerminalUI(uuid=uuid, use_curses=True)
    ui.start()

    try:
        while True:
            time.sleep(0.5)
    except KeyboardInterrupt:
        pass

    print("stopping threads")
    ui.stop()

    # wait for all threads to terminate
    while threading.active_count() > 1:
        time.sleep(0.5)

    print("threads stopped")
    sys.exit(0) 
Example 33
Project: pymachinetalk   Author: machinekit   File: ipc-server.py    MIT License 5 votes vote down vote up
def main():
    mkconfig = config.Config()
    mkini = os.getenv("MACHINEKIT_INI")
    if mkini is None:
        mkini = mkconfig.MACHINEKIT_INI
    if not os.path.isfile(mkini):
        sys.stderr.write("MACHINEKIT_INI " + mkini + " does not exist\n")
        sys.exit(1)

    mki = configparser.ConfigParser()
    mki.read(mkini)
    uuid = mki.get("MACHINEKIT", "MKUUID")
    # remote = mki.getint("MACHINEKIT", "REMOTE")

    ipcServer = IPCServer(uuid=uuid)
    ipcServer.start()

    try:
        while True:
            time.sleep(0.5)
    except KeyboardInterrupt:
        pass

    print("stopping threads")
    ipcServer.stop()

    # wait for all threads to terminate
    while threading.active_count() > 1:
        time.sleep(0.1)

    print("threads stopped")
    sys.exit(0) 
Example 34
Project: pymachinetalk   Author: machinekit   File: halremote.py    MIT License 5 votes vote down vote up
def main():
    basic = BasicClass()
    basic.start()

    # wait for all threads to terminate
    while threading.active_count() > 1:
        time.sleep(0.1)

    print("threads stopped")
    sys.exit(0) 
Example 35
Project: Tinychat-Bot--Discontinued   Author: Tinychat   File: tinybot.py    MIT License 5 votes vote down vote up
def do_media_info(self):
        """ Shows basic media info. """
        if self.is_client_mod:
            self.send_private_msg('*Playlist Length:* ' + str(len(self.media.track_list)), self.active_user.nick)
            self.send_private_msg('*Track List Index:* ' + str(self.media.track_list_index), self.active_user.nick)
            self.send_private_msg('*Elapsed Track Time:* ' +
                                  self.format_time(self.media.elapsed_track_time()), self.active_user.nick)
            self.send_private_msg('*Active Track:* ' + str(self.media.has_active_track()), self.active_user.nick)
            self.send_private_msg('*Active Threads:* ' + str(threading.active_count()), self.active_user.nick) 
Example 36
Project: wsgi_status   Author: i2tsuki   File: monitor.py    MIT License 5 votes vote down vote up
def is_threadmodel(self):
        if threading.active_count() > 1:
            return True
        return False 
Example 37
Project: oss-ftp   Author: aliyun   File: __init__.py    MIT License 5 votes vote down vote up
def stop(self):
        """Stop serving (also disconnecting all currently connected
        clients) by telling the serve_forever() loop to stop and
        waits until it does.
        """
        if not self._serving:
            raise RuntimeError("Server not started yet")
        if not self._stopped:
            self._serving = False
            self._stopped = True
            self.join(timeout=3)
            if threading.active_count() > 1:
                warn("test FTP server thread is still running")
            self._flag_stopped.wait() 
Example 38
Project: oss-ftp   Author: aliyun   File: __init__.py    MIT License 5 votes vote down vote up
def stop(self):
        """Stop serving (also disconnecting all currently connected
        clients) by telling the serve_forever() loop to stop and
        waits until it does.
        """
        if not self._serving:
            raise RuntimeError("Server not started yet")
        if not self._stopped:
            self._serving = False
            self._stopped = True
            self.join(timeout=3)
            if threading.active_count() > 1:
                warn("test FTP server thread is still running")
            self._flag_stopped.wait() 
Example 39
Project: sp2cp   Author: oupirum   File: autoposter.py    MIT License 5 votes vote down vote up
def _watch_for_replies(self, post_id):
		seen = set()
		while not self._stopped:
			self._watcher_pause()

			replies = []
			try:
				replies = self._get_replies(post_id)
			except requests.exceptions.HTTPError as err:
				print('')
				print('HTTPError:', err.response.status_code, err.response.reason)
				n_left = threading.active_count() - 3
				print('watchers left:', n_left)
				if n_left == 0:
					os.kill(os.getpid(), signal.SIGINT)
				break
			except Exception as err:
				print('')
				print(err)

			for reply in replies:
				if reply.id not in seen:
					seen.add(reply.id)

					print('')
					print('============== NEW REPLY =======================================================')
					print(self._comment)
					print('->', reply.comment)
					print('https://2ch.hk/%s/res/%s.html#%s'
							% (OPTS.board, self._thread_id, post_id))

					self._reply(reply) 
Example 40
Project: github-snooze-button   Author: tdsmith   File: snooze.py    MIT License 5 votes vote down vote up
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("config")
    args = parser.parse_args()

    config = parse_config(args.config)
    for name, repo in config.items():
        github_auth = (repo["github_username"], repo["github_password"])
        snooze_label = repo["snooze_label"]
        ignore_members_of = repo["ignore_members_of"]
        callback = lambda event, message: github_callback(event, message, github_auth,
                                                          snooze_label, ignore_members_of)
        listener = RepositoryListener(
            callbacks=[callback],
            events=LISTEN_EVENTS,
            **repo)
        t = threading.Thread(target=poll_forever, args=(listener, repo["poll_interval"]))
        t.daemon = True
        t.start()
    while True:
        # wait forever for a signal or an unusual termination
        if threading.active_count() < len(config) + 1:
            logging.error("Child polling thread quit!")
            return False
        time.sleep(1)
    return True 
Example 41
Project: Rubrik-Scripts   Author: Assured-DP   File: archivereport.py    GNU General Public License v3.0 5 votes vote down vote up
def waitForThreads():
	slowDown = True
	while slowDown:
		if threading.active_count() > maxThreadCount:
			print("Max Threads of "+str(maxThreadCount)+" reached, waiting for threads to complete")
			time.sleep(5)
		else:
			slowDown = False		
	
# SLA Object Count -- Calculate total objects connected to an SLA 
Example 42
Project: OpenDoor   Author: stanislav-web   File: threadpool.py    GNU General Public License v3.0 5 votes vote down vote up
def pause(self):
        """
        ThreadPool pause
        :raise KeyboardInterrupt
        :return: None
        """

        self.is_started = False
        tpl.info(key='stop_threads', threads=len(self.__workers))

        try:
            while 0 < threading.active_count():
                for worker in threading.enumerate():
                    if threading.current_thread().__class__.__name__ != '_MainThread':
                        worker.pause()
                time.sleep(2)

                char = tpl.prompt(key='option_prompt')
                if char.lower() == 'e':
                    raise KeyboardInterrupt
                elif char.lower() == 'c':
                    self.resume()
                    break
                else:
                    continue

        except (SystemExit, KeyboardInterrupt):
            raise KeyboardInterrupt 
Example 43
Project: arb2   Author: mikem5   File: arb2.py    MIT License 5 votes vote down vote up
def setMarketBooks(self):


                if self.entered_arb != 1:
                        if self.arb_counter > 0:
                            self.arb_counter -= 1

                        prior = threading.active_count()
                        p_timeout = time.time()
                        for m in self.markets:
                                t = threading.Thread(target = m.setBalance)
                                t.start()


                        while (threading.active_count() > prior) and (time.time() - 60  < p_timeout):
                            time.sleep(1)


                # So arb has happened, we should set the counter to 3, then
                # continue with normal getting of balances.

                else:
                        # Set to default of 4, so we check books four times,
                        # then we should be good.
                        self.arb_counter = 6
                        for m in self.markets:
                                    m.setBalance()
                        self.sumBooks()
                        self.logger(self.stringFormat(0,0,0,2))


        # This is going to process our initial book versus our current book
        # If it is way out of wack it will call fixBook which will initiate
        # a trade to correct the imbalance. 
Example 44
Project: housemon   Author: jeffeb3   File: display.py    MIT License 5 votes vote down vote up
def set_title():
    clock.set_text([u'%s' % time.asctime()])
    threads.set_text([u"Threads:\n%d" % threading.active_count()])
    hostname.set_text([u"%s:\n%s" % (platform.node(), socket.gethostbyname(platform.node() + '.local'))])
    uptime.set_text([u"Uptime:\n%s" % uptime_text()]) 
Example 45
Project: apt-mirror-python   Author: sir-xw   File: __init__.py    GNU General Public License v2.0 5 votes vote down vote up
def download_worker(wget_args, rsync_args, logfile, task_queue):
    while 1:
        try:
            url = task_queue.get(block=False)
            schema, filepath = url.split('://', 1)
            if schema == 'rsync':
                subprocess.call(['mkdir', '-p', os.path.dirname(filepath)])
                subprocess.call(
                    rsync_args + ['--log-file', logfile, url, filepath])
            else:
                subprocess.call(wget_args + ['-o', logfile, url])
        except queue.Empty:
            break
    output("[" + str(threading.active_count() - 2) + "]... ") 
Example 46
Project: Catan   Author: Japjappedulap   File: CatanFlask.py    MIT License 5 votes vote down vote up
def classic_map():
    print(threading.active_count())
    return classic() 
Example 47
Project: Catan   Author: Japjappedulap   File: CatanFlask.py    MIT License 5 votes vote down vote up
def catan_help():
    print(threading.active_count())
    return app.send_static_file('index.html') 
Example 48
Project: Catan   Author: Japjappedulap   File: CatanFlask.py    MIT License 5 votes vote down vote up
def extended_map():
    print(threading.active_count())
    return extended() 
Example 49
Project: AVrecordeR   Author: JRodrigoF   File: AVrecordeR.py    GNU General Public License v2.0 5 votes vote down vote up
def stop_AVrecording(filename):
	
	audio_thread.stop() 
	frame_counts = video_thread.frame_counts
	elapsed_time = time.time() - video_thread.start_time
	recorded_fps = frame_counts / elapsed_time
	print "total frames " + str(frame_counts)
	print "elapsed time " + str(elapsed_time)
	print "recorded fps " + str(recorded_fps)
	video_thread.stop() 

	# Makes sure the threads have finished
	while threading.active_count() > 1:
		time.sleep(1)

	
#	 Merging audio and video signal
	
	if abs(recorded_fps - 6) >= 0.01:    # If the fps rate was higher/lower than expected, re-encode it to the expected
										
		print "Re-encoding"
		cmd = "ffmpeg -r " + str(recorded_fps) + " -i temp_video.avi -pix_fmt yuv420p -r 6 temp_video2.avi"
		subprocess.call(cmd, shell=True)
	
		print "Muxing"
		cmd = "ffmpeg -ac 2 -channel_layout stereo -i temp_audio.wav -i temp_video2.avi -pix_fmt yuv420p " + filename + ".avi"
		subprocess.call(cmd, shell=True)
	
	else:
		
		print "Normal recording\nMuxing"
		cmd = "ffmpeg -ac 2 -channel_layout stereo -i temp_audio.wav -i temp_video.avi -pix_fmt yuv420p " + filename + ".avi"
		subprocess.call(cmd, shell=True)

		print ".."




# Required and wanted processing of final files 
Example 50
Project: heyChicken   Author: aikochan   File: heyChickenUtility.py    GNU General Public License v2.0 5 votes vote down vote up
def request_status(client_socket):
	# print "current thread: ", threading.current_thread()
	# print "thread active count: ", threading.active_count()
	incoming_msg = send_message(MSG_REQ_STATUS, client_socket, STATUS_RETRIES)
	if incoming_msg is not None:
		tokens = incoming_msg.split()
		if tokens:
			receive_status(tokens)
	global lets_shutdown, timer	
	if not lets_shutdown:
		timer = Timer(STATUS_POLLING_INTERVAL, request_status, [client_socket,])
		timer.start()
	else:
		sys.exit(0) 
Example 51
Project: distributed_framework   Author: ydf0509   File: base_consumer.py    Apache License 2.0 5 votes vote down vote up
def get_concurrent_info(cls):
        concurrent_info = ''
        if cls.global_concurrent_mode == 1:
            concurrent_info = f'[{threading.current_thread()}  {threading.active_count()}]'
        elif cls.global_concurrent_mode == 2:
            concurrent_info = f'[{gevent.getcurrent()}  {threading.active_count()}]'
        elif cls.global_concurrent_mode == 3:
            # noinspection PyArgumentList
            concurrent_info = f'[{eventlet.getcurrent()}  {threading.active_count()}]'
        return concurrent_info 
Example 52
Project: distributed_framework   Author: ydf0509   File: custom_threadpool_executor.py    Apache License 2.0 5 votes vote down vote up
def get_current_threads_num():
    return threading.active_count() 
Example 53
Project: pg_compare   Author: WTFox   File: utils.py    MIT License 5 votes vote down vote up
def load_table_details_for_both_dbs(*databases):
    """ Load all needed data from both databases into memory. """

    if PGCOMPARE_NO_ASYNC:
        for db in databases:
            db.get_details_for_tables()

        return

    threads = []
    stop_event = threading.Event()
    initial_active_threads = threading.active_count()
    for db in databases:
        process = threading.Thread(target=db.get_details_for_tables)
        process.start()
        threads.append(process)

    while threading.active_count() > initial_active_threads:
        try:
            time.sleep(0.1)
        except (KeyboardInterrupt, SystemExit):
            stop_event.set()

        if stop_event.is_set():
            for db in databases:
                db._close_all()

            sys.exit(1)

    return 
Example 54
Project: pyDraw   Author: carlossilva2   File: Core.py    MIT License 5 votes vote down vote up
def search(self):
    if active_count() > len(self.active_threads):
      for t in enumerate_thread():
        if t not in self.active_threads:
          self.add(t)
    else:
      for t in self.active_threads:
        if t not in enumerate_thread():
          self.active_threads.remove(t)
          self.length = len(self.active_threads) 
Example 55
Project: spodernet   Author: TimDettmers   File: batching.py    MIT License 5 votes vote down vote up
def __del__(self):
        log.debug('Stopping threads...')
        for worker in self.loaders:
            worker.stop()

        log.debug('Waiting for threads to finish...')
        while threading.active_count() > 0:
            time.sleep(0.1) 
Example 56
Project: tss18-robotsinmusicalimprovisation   Author: Roboy   File: Simulation_GUI.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def btn_run_clicked(self):
        # print("before {}".format(self.vae_thread_alive))
        # print(threading.active_count())
        if not self.is_running:
            vae_thread = threading.Thread(target=vae_interact, args=(self,))
            vae_thread.setDaemon(True)
            vae_thread.start()
            self.is_running = vae_thread.is_alive()
            print("after {}".format(self.vae_thread_alive))
        # vae_process = Process(target=vae_interact, args=(self,))
        # vae_process.start() 
Example 57
Project: easybuggy4django   Author: k-tamura   File: views.py    MIT License 5 votes vote down vote up
def thread_leak(request):
    d = {
        'title': _('title.threadleak.page'),
        'note': _('msg.note.threadleak'),
    }
    t1 = threading.Thread(target=active_threads_count, name="atc")
    t1.start()
    d['count'] = threading.active_count()
    return render(request, 'threadleak.html', d) 
Example 58
Project: easybuggy4django   Author: k-tamura   File: views.py    MIT License 5 votes vote down vote up
def active_threads_count():
    while True:
        logger.info("Current thread count: " + str(threading.active_count()))
        sleep(100) 
Example 59
Project: nakatomi   Author: pierce403   File: nakatomi_agent.py    Apache License 2.0 5 votes vote down vote up
def main():
  while True:
    if threading.active_count() < 3:
      notifylock=False
      print("[+] Active Threads: %s" % threading.active_count())
      t = threading.Thread(target=scan)
      t.start()
    else:
      if notifylock is False:
        print("[+] Thread pool exhausted")
      notifylock=True

    time.sleep(1) 
Example 60
Project: bruteforce-universal   Author: Castorps   File: proxy_scraper.py    GNU General Public License v3.0 5 votes vote down vote up
def scrape(self):
        self.proxy_source_log = {}
        proxy_sources = set()
        thread_list = []

        # load proxy sources
        with open(self.path_proxy_sources_file, 'r', encoding='utf-8', errors='ignore') as proxy_sources_file:
            for line in proxy_sources_file:
                if '://' in line:
                    proxy_source = line.replace('\n', '').replace('\r', '').replace('\t', '').replace(' ', '')
                    proxy_sources.add(proxy_source)

        # scrape proxy sources
        for proxy_source in proxy_sources:
            while threading.active_count() >= 10:
                sleep(0.5)

            t = threading.Thread(target=self.scrape_website, args=[proxy_source])
            t.start()
            thread_list.append(t)

        # wait for scraping sources to be finished
        for t in thread_list:
            t.join()

        # log how many proxies each proxy source provided
        with open(self.path_proxy_sources_log_file, 'w+', encoding='utf-8', errors='ignore') as proxy_source_log_file:
            for proxy_source in sorted(self.proxy_source_log):
                proxy_source_log_file.write(proxy_source + ' ; ' + str(self.proxy_source_log[proxy_source]) + '\n') 
Example 61
Project: pywebsocket   Author: google   File: standalone.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def run(self):
        while True:
            thread_name_list = []
            for thread in threading.enumerate():
                thread_name_list.append(thread.name)
            self._logger.info(
                "%d active threads: %s",
                threading.active_count(),
                ', '.join(thread_name_list))
            time.sleep(self._interval_in_sec) 
Example 62
Project: pyresp   Author: zachasme   File: __init__.py    MIT License 5 votes vote down vote up
def join_agents():
    """Wait for all threads to finish

    For now we just sleep to keep main thread alive
    this way it can still catch ctrl+c
    """
    while active_count() > 1:
        sleep(1) 
Example 63
Project: Practice-Problems   Author: UALR-ACM   File: threading_example3_solution.py    MIT License 5 votes vote down vote up
def calculateMass(threadName, delay, counter):
    for i in range(counter):
        time.sleep(delay)
        print('{0}: on iteration {1}'.format(threadName, i+1))

    # Mass has been calculated. Store final computation.
    # If there are 3 active threads, we know the density 
    # hasn't been calculated yet, so wait.
    while threading.active_count() == 3:
        time.sleep(0.5)

    calculations['mass'] = calculations['density'] / 2
    print('{} finished calculating mass...'.format(threadName)) 
Example 64
Project: serializekiller   Author: johndekroon   File: serializekiller.py    The Unlicense 4 votes vote down vote up
def worker():
    global threads
    content = read_file(args.file)

    for line in content:
        if ":" in line:
            item = line.strip().split(':')
            if item[0] not in target_list:
                target_list[item[0]] = [item[1]]
            else:
                target_list[item[0]].append(item[1])
        else:
            if line.strip() not in target_list:
                target_list[line.strip()] = []

    print str(len(target_list)) + " targets found."
    total_jobs = len(target_list)
    current = 0

    for host in target_list:
        current += 1
        while threading.active_count() > threads:
            mutex.acquire()
            print " ! We have more threads running than allowed. Current: {} Max: {}.".format(threading.active_count(), threads)
            mutex.release()
            if threads < 100:
                threads += 1
            sys.stdout.flush()
            time.sleep(2)
        mutex.acquire()
        print " # Starting test {} of {} on {}.".format(current, total_jobs, host)
        sys.stdout.flush()
        mutex.release()
        threading.Thread(target=nmap, args=(host, False, 1)).start()

    # We're done!
    while threading.active_count() > 2:
        mutex.acquire()
        print " # Waiting for everybody to come back. Still {} active.".format(threading.active_count() - 1)
        sys.stdout.flush()
        mutex.release()
        time.sleep(4)

    mutex.acquire()
    print
    print " => scan done. " + str(shellCounter) + " vulnerable hosts found."
    print "Execution time: " + str(datetime.now() - startTime)
    mutex.release()
    exit() 
Example 65
Project: sd-agent-core-plugins   Author: serverdensity   File: test_ssh.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def test_ssh(self):
        config = {
            'instances': [{
                'host': 'io.netgarage.org',
                'port': 22,
                'username': 'level1',
                'password': 'level1',
                'sftp_check': False,
                'private_key_file': '',
                'add_missing_keys': True
            }, {
                'host': 'localhost',
                'port': 22,
                'username': 'test',
                'password': 'yodawg',
                'sftp_check': False,
                'private_key_file': '',
                'add_missing_keys': True
            }, {
                'host': 'wronghost',
                'port': 22,
                'username': 'datadog01',
                'password': 'abcd',
                'sftp_check': False,
                'private_key_file': '',
                'add_missing_keys': True
            },
            ]
        }

        agentConfig = {}
        self.check = load_check('ssh_check', config, agentConfig)

        nb_threads = threading.active_count()

        # Testing that connection will work
        self.check.check(config['instances'][0])

        service = self.check.get_service_checks()
        self.assertEqual(service[0].get('status'), AgentCheck.OK)
        self.assertEqual(service[0].get('message'), None)
        self.assertEqual(service[0].get('tags'), ["instance:io.netgarage.org-22"])

        # Testing that bad authentication will raise exception
        self.assertRaises(Exception, self.check.check, config['instances'][1])
        # Testing that bad hostname will raise exception
        self.assertRaises(Exception, self.check.check, config['instances'][2])
        service_fail = self.check.get_service_checks()
        # Check failure status
        self.assertEqual(service_fail[0].get('status'), AgentCheck.CRITICAL)
        # Check that we've closed all connections, if not we're leaking threads
        self.assertEqual(nb_threads, threading.active_count()) 
Example 66
Project: ACE   Author: IntegralDefense   File: network_semaphore.py    Apache License 2.0 4 votes vote down vote up
def server_loop(self):
        while not self.shutdown:
            try:
                self.server_socket = socket.socket() # defaults to AF_INET, SOCK_STREAM
                self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
                self.server_socket.bind((self.bind_address, self.bind_port))
                self.server_socket.listen(5)

                while not self.shutdown:
                    logging.debug("waiting for next connection on {}:{}".format(self.bind_address, self.bind_port))
                    client_socket, remote_address = self.server_socket.accept()
                    remote_host, remote_port = remote_address
                    logging.info("got connection from {0}:{1}".format(remote_host, remote_port))
                    if self.shutdown:
                        return

                    allowed = False
                    remote_host_ipv4 = ipaddress.ip_address(remote_host)
                    for ipv4_network in self.allowed_ipv4:
                        if remote_host_ipv4 in ipv4_network:
                            allowed = True
                            break

                    if not allowed:
                        logging.warning("blocking invalid remote host {0}".format(remote_host))
                        try:
                            client_socket.close()
                        except:
                            pass

                        continue

                    # start a thread to deal with this client
                    t = Thread(target=self.client_loop, args=(remote_host, remote_port, client_socket), name="Client {0}".format(remote_host))
                    t.daemon = True
                    t.start()
                    #record_metric(METRIC_THREAD_COUNT, threading.active_count())
                    
            except Exception as e:
                logging.error("uncaught exception: {0}".format(str(e)))
                report_exception()

                # TODO clean up socket stuff to restart

                if not self.shutdown:
                    time.sleep(1) 
Example 67
Project: vnpy_crypto   Author: birforce   File: test_memory_leaks.py    MIT License 4 votes vote down vote up
def execute(self, fun, *args, **kwargs):
        """Test a callable."""
        def call_many_times():
            for x in xrange(loops):
                self._call(fun, *args, **kwargs)
            del x
            gc.collect()

        tolerance = kwargs.pop('tolerance_', None) or self.tolerance
        loops = kwargs.pop('loops_', None) or self.loops
        retry_for = kwargs.pop('retry_for_', None) or self.retry_for

        # warm up
        for x in range(10):
            self._call(fun, *args, **kwargs)
        self.assertEqual(gc.garbage, [])
        self.assertEqual(threading.active_count(), 1)
        self.assertEqual(thisproc.children(), [])

        # Get 2 distinct memory samples, before and after having
        # called fun repeadetly.
        # step 1
        call_many_times()
        mem1 = self._get_mem()
        # step 2
        call_many_times()
        mem2 = self._get_mem()

        diff1 = mem2 - mem1
        if diff1 > tolerance:
            # This doesn't necessarily mean we have a leak yet.
            # At this point we assume that after having called the
            # function so many times the memory usage is stabilized
            # and if there are no leaks it should not increase
            # anymore.
            # Let's keep calling fun for 3 more seconds and fail if
            # we notice any difference.
            ncalls = 0
            stop_at = time.time() + retry_for
            while time.time() <= stop_at:
                self._call(fun, *args, **kwargs)
                ncalls += 1

            del stop_at
            gc.collect()
            mem3 = self._get_mem()
            diff2 = mem3 - mem2

            if mem3 > mem2:
                # failure
                extra_proc_mem = bytes2human(diff1 + diff2)
                print("exta proc mem: %s" % extra_proc_mem, file=sys.stderr)
                msg = "+%s after %s calls, +%s after another %s calls, "
                msg += "+%s extra proc mem"
                msg = msg % (
                    bytes2human(diff1), loops, bytes2human(diff2), ncalls,
                    extra_proc_mem)
                self.fail(msg) 
Example 68
Project: cb-response-smb1-utility   Author: redcanaryco   File: smb1-util.py    MIT License 4 votes vote down vote up
def process_sensors(cb, query_base=None, update=False, max_threads=None,
                    debug=False, ignore_hosts=None):
    """Fetch all sensor objects associated with the cb server instance, and
    keep basic state as they are processed.
    """

    if query_base is not None:
        query_result = cb.select(Sensor).where(query_base)
    else:
        query_result = cb.select(Sensor)
    query_result_len = len(query_result)

    q = Queue()

    # unique_sensors exists because we sometimes see the same sensor ID
    # returned multiple times in the paginated query results for
    # cb.select(Sensor).  
    unique_sensors = set()

    for sensor in query_result:
        if sensor.id in unique_sensors:
            continue
        else:
            unique_sensors.add(sensor.id)
            q.put(sensor)

    threads = []
    while not q.empty():
        active_threads = threading.active_count()
        available_threads = max_threads - active_threads

        if available_threads > 0:
            for i in range(available_threads):
                sensor = q.get()
                t = threading.Thread(target=process_sensor, 
                                    args=(cb, sensor, update, debug, ignore_hosts))
                threads.append(t)
                t.start()

                if debug: log_info('Threads: {0}\tQ Size: {1}'.format(threading.active_count(), q.qsize()))

                if q.empty():
                    break
        else:
            if debug: log_info('No available threads. Waiting.')
            sleep(1) 
Example 69
Project: 4scanner   Author: pboardman   File: thread_scanner.py    MIT License 4 votes vote down vote up
def scan(self):
        """
        Start the scanning/download process.
        """
        while True:
            if self.quota_mb:
                self.check_quota()

            self.logger.info("Searching threads...")

            try:
                json_file = json.load(open(self.keywords_file))
            except ValueError:
                self.logger.critical("Your JSON file is malformed. Quitting.")
                exit(1)

            for search in json_file["searches"]:
                # Getting imageboard to search
                chan = self.get_imageboard(search)
                # Checking conditions
                condition = self.get_condition(search)
                # Check if we need to check for duplicate when downloading
                dupe_check = self.get_check_duplicate(search)
                # Getting output folder name
                folder_name = search["folder_name"]
                # Get tag list (if any)
                tag_list = self.get_tag_list(search)
                # if this is true we will search only the subject field
                subject_only = self.get_subject_only(search)
                board = search["board"]
                keywords = self.get_keyword(search)

                try:
                    catalog_json = self.get_catalog_json(board, chan)

                    for keyword in keywords:
                        threads_id = self.scan_thread(keyword, catalog_json, subject_only)

                        for thread_id in list(set(threads_id)):
                            if thread_id not in currently_downloading and not self.was_downloaded(thread_id):
                                self.download_thread(thread_id, chan, board,
                                                folder_name, self.output,
                                                condition, dupe_check,
                                                tag_list)
                            # Used to keep track of what is currently downloading
                            currently_downloading.append(thread_id)
                except urllib.error.HTTPError as err:
                    self.logger.warning("Error while opening {0} catalog page. "
                                "Retrying during next scan.".format(board))
                    pass

            active_downloads = threading.active_count()-1
            self.logger.info("{0} threads currently downloading.".format(active_downloads))
            self.logger.info("Searching again in {0} minutes!".format(str(int(self.wait_time / 60))))
            time.sleep(self.wait_time) 
Example 70
Project: teleport   Author: tp4a   File: test_memory_leaks.py    Apache License 2.0 4 votes vote down vote up
def execute(self, fun, *args, **kwargs):
        """Test a callable."""
        def call_many_times():
            for x in xrange(loops):
                self._call(fun, *args, **kwargs)
            del x
            gc.collect()

        tolerance = kwargs.pop('tolerance_', None) or self.tolerance
        loops = kwargs.pop('loops_', None) or self.loops
        retry_for = kwargs.pop('retry_for_', None) or self.retry_for

        # warm up
        for x in range(10):
            self._call(fun, *args, **kwargs)
        self.assertEqual(gc.garbage, [])
        self.assertEqual(threading.active_count(), 1)
        self.assertEqual(thisproc.children(), [])

        # Get 2 distinct memory samples, before and after having
        # called fun repeadetly.
        # step 1
        call_many_times()
        mem1 = self._get_mem()
        # step 2
        call_many_times()
        mem2 = self._get_mem()

        diff1 = mem2 - mem1
        if diff1 > tolerance:
            # This doesn't necessarily mean we have a leak yet.
            # At this point we assume that after having called the
            # function so many times the memory usage is stabilized
            # and if there are no leaks it should not increase
            # anymore.
            # Let's keep calling fun for 3 more seconds and fail if
            # we notice any difference.
            ncalls = 0
            stop_at = time.time() + retry_for
            while time.time() <= stop_at:
                self._call(fun, *args, **kwargs)
                ncalls += 1

            del stop_at
            gc.collect()
            mem3 = self._get_mem()
            diff2 = mem3 - mem2

            if mem3 > mem2:
                # failure
                extra_proc_mem = bytes2human(diff1 + diff2)
                print("exta proc mem: %s" % extra_proc_mem, file=sys.stderr)
                msg = "+%s after %s calls, +%s after another %s calls, "
                msg += "+%s extra proc mem"
                msg = msg % (
                    bytes2human(diff1), loops, bytes2human(diff2), ncalls,
                    extra_proc_mem)
                self.fail(msg) 
Example 71
Project: deskOrg   Author: saleguas   File: test_memory_leaks.py    MIT License 4 votes vote down vote up
def execute(self, fun, *args, **kwargs):
        """Test a callable."""
        def call_many_times():
            for x in xrange(loops):
                self._call(fun, *args, **kwargs)
            del x
            gc.collect()

        tolerance = kwargs.pop('tolerance_', None) or self.tolerance
        loops = kwargs.pop('loops_', None) or self.loops
        retry_for = kwargs.pop('retry_for_', None) or self.retry_for

        # warm up
        for x in range(10):
            self._call(fun, *args, **kwargs)
        self.assertEqual(gc.garbage, [])
        self.assertEqual(threading.active_count(), 1)
        self.assertEqual(thisproc.children(), [])

        # Get 2 distinct memory samples, before and after having
        # called fun repeadetly.
        # step 1
        call_many_times()
        mem1 = self._get_mem()
        # step 2
        call_many_times()
        mem2 = self._get_mem()

        diff1 = mem2 - mem1
        if diff1 > tolerance:
            # This doesn't necessarily mean we have a leak yet.
            # At this point we assume that after having called the
            # function so many times the memory usage is stabilized
            # and if there are no leaks it should not increase
            # anymore.
            # Let's keep calling fun for 3 more seconds and fail if
            # we notice any difference.
            ncalls = 0
            stop_at = time.time() + retry_for
            while time.time() <= stop_at:
                self._call(fun, *args, **kwargs)
                ncalls += 1

            del stop_at
            gc.collect()
            mem3 = self._get_mem()
            diff2 = mem3 - mem2

            if mem3 > mem2:
                # failure
                extra_proc_mem = bytes2human(diff1 + diff2)
                print("exta proc mem: %s" % extra_proc_mem, file=sys.stderr)
                msg = "+%s after %s calls, +%s after another %s calls, "
                msg += "+%s extra proc mem"
                msg = msg % (
                    bytes2human(diff1), loops, bytes2human(diff2), ncalls,
                    extra_proc_mem)
                self.fail(msg) 
Example 72
Project: FancyWord   Author: EastonLee   File: test_memory_leaks.py    GNU General Public License v3.0 4 votes vote down vote up
def execute(self, fun, *args, **kwargs):
        """Test a callable."""
        def call_many_times():
            for x in xrange(loops):
                self._call(fun, *args, **kwargs)
            del x
            gc.collect()

        tolerance = kwargs.pop('tolerance_', None) or self.tolerance
        loops = kwargs.pop('loops_', None) or self.loops
        retry_for = kwargs.pop('retry_for_', None) or self.retry_for

        self._call(fun, *args, **kwargs)
        self.assertEqual(gc.garbage, [])
        self.assertEqual(threading.active_count(), 1)

        # Get 2 distinct memory samples, before and after having
        # called fun repeadetly.
        # step 1
        call_many_times()
        mem1 = self._get_mem()
        # step 2
        call_many_times()
        mem2 = self._get_mem()

        diff1 = mem2 - mem1
        if diff1 > tolerance:
            # This doesn't necessarily mean we have a leak yet.
            # At this point we assume that after having called the
            # function so many times the memory usage is stabilized
            # and if there are no leaks it should not increase
            # anymore.
            # Let's keep calling fun for 3 more seconds and fail if
            # we notice any difference.
            ncalls = 0
            stop_at = time.time() + retry_for
            while time.time() <= stop_at:
                self._call(fun, *args, **kwargs)
                ncalls += 1

            del stop_at
            gc.collect()
            mem3 = self._get_mem()
            diff2 = mem3 - mem2

            if mem3 > mem2:
                # failure
                self.fail("+%s after %s calls, +%s after another %s calls" % (
                    bytes2human(diff1),
                    loops,
                    bytes2human(diff2),
                    ncalls
                )) 
Example 73
Project: smarthome   Author: smarthomeNG   File: smarthome.py    GNU General Public License v3.0 4 votes vote down vote up
def stop(self, signum=None, frame=None):
        """
        This method is used to stop SmartHomeNG and all it's threads
        """
        self.shng_status = {'code': 31, 'text': 'Stopping'}

        self.alive = False
        self._logger.info("stop: Number of Threads: {}".format(threading.activeCount()))

        self.items.stop()
        self.scheduler.stop()
        if self.plugins is not None:
            self.plugins.stop()
        if self.modules is not None:
            self.modules.stop()
        self.connections.close()

        self.shng_status = {'code': 32, 'text': 'Stopping: Stopping threads'}

        for thread in threading.enumerate():
            if thread.name != 'Main':
                try:
                    thread.join(1)
                except Exception as e:
                    pass

        if threading.active_count() > 1:
            header_logged = False
            for thread in threading.enumerate():
                if thread.name != 'Main' and thread.name[0] !=  '_':
                    if not header_logged:
                        self._logger.warning("The following threads have not been terminated properly by their plugins (please report to the plugin's author):")
                        header_logged = True
                    self._logger.warning("-Thread: {}, still alive".format(thread.name))
            if header_logged:
                self._logger.warning("SmartHomeNG stopped")
        else:
            self._logger.warning("SmartHomeNG stopped")

        self.shng_status = {'code': 33, 'text': 'Stopped'}

        lib.daemon.remove_pidfile(PIDFILE)

        logging.shutdown()
        exit() 
Example 74
Project: ProcessFS   Author: mherrmann   File: test_memory_leaks.py    MIT License 4 votes vote down vote up
def execute(self, fun, *args, **kwargs):
        """Test a callable."""
        def call_many_times():
            for x in xrange(loops):
                self._call(fun, *args, **kwargs)
            del x
            gc.collect()

        tolerance = kwargs.pop('tolerance_', None) or self.tolerance
        loops = kwargs.pop('loops_', None) or self.loops
        retry_for = kwargs.pop('retry_for_', None) or self.retry_for

        # warm up
        for x in range(10):
            self._call(fun, *args, **kwargs)
        self.assertEqual(gc.garbage, [])
        self.assertEqual(threading.active_count(), 1)
        self.assertEqual(thisproc.children(), [])

        # Get 2 distinct memory samples, before and after having
        # called fun repeadetly.
        # step 1
        call_many_times()
        mem1 = self._get_mem()
        # step 2
        call_many_times()
        mem2 = self._get_mem()

        diff1 = mem2 - mem1
        if diff1 > tolerance:
            # This doesn't necessarily mean we have a leak yet.
            # At this point we assume that after having called the
            # function so many times the memory usage is stabilized
            # and if there are no leaks it should not increase
            # anymore.
            # Let's keep calling fun for 3 more seconds and fail if
            # we notice any difference.
            ncalls = 0
            stop_at = time.time() + retry_for
            while time.time() <= stop_at:
                self._call(fun, *args, **kwargs)
                ncalls += 1

            del stop_at
            gc.collect()
            mem3 = self._get_mem()
            diff2 = mem3 - mem2

            if mem3 > mem2:
                # failure
                extra_proc_mem = bytes2human(diff1 + diff2)
                print("exta proc mem: %s" % extra_proc_mem, file=sys.stderr)
                msg = "+%s after %s calls, +%s after another %s calls, "
                msg += "+%s extra proc mem"
                msg = msg % (
                    bytes2human(diff1), loops, bytes2human(diff2), ncalls,
                    extra_proc_mem)
                self.fail(msg) 
Example 75
Project: tvalacarta   Author: tvalacarta   File: download_and_play.py    GNU General Public License v3.0 4 votes vote down vote up
def download_and_play(url,file_name,download_path,show_dialog=True):
    # Lanza thread
    logger.info("[download_and_play.py] Active threads "+str(threading.active_count()))
    logger.info("[download_and_play.py] "+repr(threading.enumerate()))
    logger.info("[download_and_play.py] Starting download thread...")
    download_thread = DownloadThread(url,file_name,download_path)
    download_thread.start()
    logger.info("[download_and_play.py] Download thread started")
    logger.info("[download_and_play.py] Active threads "+str(threading.active_count()))
    logger.info("[download_and_play.py] "+repr(threading.enumerate()))

    # Espera
    logger.info("[download_and_play.py] Waiting...")

    while True:
        cancelled=False

        if show_dialog:
            dialog = xbmcgui.DialogProgress()
            dialog.create('Descargando...', 'Cierra esta ventana para empezar la reproducción')
            dialog.update(0)

            while not cancelled and download_thread.is_alive():
                dialog.update( download_thread.get_progress() , "Cancela esta ventana para empezar la reproducción", "Velocidad: "+str(int(download_thread.get_speed()/1024))+" KB/s "+str(download_thread.get_actual_size())+"MB de "+str(download_thread.get_total_size())+"MB" , "Tiempo restante: "+str( downloadtools.sec_to_hms(download_thread.get_remaining_time())) )
                xbmc.sleep(1000)

                if dialog.iscanceled():
                    cancelled=True
                    break

            dialog.close()
        else:
            xbmc.executebuiltin((u'XBMC.Notification("Iniciando", "Iniciando descarga en segundo plano...", 300)'))
            xbmc.sleep(3000)

        logger.info("[download_and_play.py] End of waiting")

        # Lanza el reproductor
        player = CustomPlayer()
        player.set_download_thread(download_thread)
        player.PlayStream( download_thread.get_file_name() )

        # Fin de reproducción
        logger.info("[download_and_play.py] Fin de reproducción")

        if player.is_stopped():
            logger.info("[download_and_play.py] Terminado por el usuario")
            break
        else:
            if not download_thread.is_alive():
                logger.info("[download_and_play.py] La descarga ha terminado")
                break
            else:
                logger.info("[download_and_play.py] Continua la descarga")

    # Cuando el reproductor acaba, si continúa descargando lo para ahora
    logger.info("[download_and_play.py] Download thread alive="+str(download_thread.is_alive()))
    if download_thread.is_alive():
        logger.info("[download_and_play.py] Killing download thread")
        download_thread.force_stop() 
Example 76
Project: freight   Author: getsentry   File: execute_task.py    Apache License 2.0 4 votes vote down vote up
def execute_deploy(deploy_id):
    logging.debug(
        "ExecuteDeploy fired with %d active thread(s)", threading.active_count()
    )

    with lock(redis, f"deploy:{deploy_id}", timeout=5):
        deploy = Deploy.query.get(deploy_id)
        task = Task.query.get(deploy.task_id)
        if not task:
            logging.warning("ExecuteDeploy fired with missing Deploy(id=%s)", deploy_id)
            return

        if task.status not in (TaskStatus.pending, TaskStatus.in_progress):
            logging.warning(
                "ExecuteDeploy fired with finished Deploy(id=%s)", deploy_id
            )
            return

        task.date_started = datetime.utcnow()
        task.status = TaskStatus.in_progress
        db.session.add(task)
        db.session.commit()

    send_task_notifications(task, NotifierEvent.TASK_STARTED)

    provider_config = task.provider_config

    # wipe the log incase this is a retry
    LogChunk.query.filter(LogChunk.task_id == task.id).delete()

    taskrunner = TaskRunner(
        task=task,
        timeout=provider_config.get("timeout", current_app.config["DEFAULT_TIMEOUT"]),
        read_timeout=provider_config.get(
            "read_timeout", current_app.config["DEFAULT_READ_TIMEOUT"]
        ),
    )
    taskrunner.start()
    taskrunner.wait()

    # reload the task from the database due to subprocess changes
    db.session.expire(task)
    db.session.refresh(task)

    if task.status in (TaskStatus.pending, TaskStatus.in_progress):
        logging.error("Task(id=%s) did not finish cleanly", task.id)
        task.status = TaskStatus.failed
        task.date_finished = datetime.utcnow()
        db.session.add(task)
        db.session.commit()

    send_task_notifications(task, NotifierEvent.TASK_FINISHED) 
Example 77
Project: zipdictcracker   Author: himadriganguly   File: zipdictcracker.py    GNU General Public License v3.0 4 votes vote down vote up
def main():
	parser = argparse.ArgumentParser('Dictionary Based Zip File Password Cracker')
	parser.add_argument('file', type=str, help='The Zip File Name. You can also provide the full path of the ZipFile')
	parser.add_argument('dictionary', type=str, help='The Dictionary File Name. You can also provide the full path of the Dictionary File')
	args = parser.parse_args()
	
	global found

	try:
		print('===========================================')
		print('\nLoding Files\n')
		print('===========================================')
		zFile = zipfile.ZipFile(args.file)
		passFile = open(args.dictionary, 'rb')
	except Exception as e:
		print(e)
		sys.exit(1)
	
	print('\nFiles Loaded\n')
	print('===========================================')
	print('\nCracking Password Using Dictionary Attack\n')
	print('===========================================\n')
	lock = threading.Lock()
	for line in passFile:
		password = line.strip()
		# print('{}\r'.format(password))
		if found != True:
			t = Thread(target=extractFile, args=(zFile, password, lock, passFile))
			t.start()
	
	while (threading.active_count() > 1):
		if threading.active_count() == 1 and found != True:
			print(found)
			print('===========================================')
			print('\nPassword Not Found In Dictionary\n')
			print('===========================================')
			sys.exit()
		elif threading.active_count() == 1 and found == True:
			passFile.close()
			print('\n===========================================\n')
			print('Exiting From Application\n')
			print('===========================================\n') 
Example 78
Project: Midas   Author: Mazuh   File: midas_scraper.py    MIT License 4 votes vote down vote up
def report_employees_details(employees_basics_filename=EMPLOYEES_BASICS_FILENAME,
                             target_details_ds_filename=EMPLOYEES_DETAILS_DS_FILENAME):
    """
    Scrapes details data of each employee and puts them on a csv file.
    The employees basics list should be already stored in a
    local file based on a given filename param.
    """
    # ready...
    employees_basics = None
    with open(employees_basics_filename.format(_time()), 'r') as employees_basics_file:
        employees_basics = json.loads(employees_basics_file.read())

    with open(target_details_ds_filename.format(_time()), 'w', newline='') as details_ds_file:
        ds_writer = csv.DictWriter(details_ds_file, fieldnames=[
            'index',
            'name',
            'cpf',
            'campus',
            'class',
            'situationBond',
            'organizationalUnit',
            'campus',
            'hasTrustPosition',
            'employeeSince',
            'urlRemunerationSufix'
        ])

        ds_writer.writeheader()

        # aim...
        scrapers_q = Queue()

        for employee_index in employees_basics:
            scrapers_q.put(threading.Thread(
                target=_scrap_employee_details,
                args=(employees_basics, employee_index, ds_writer)
            ))

        # fire!
        while not scrapers_q.empty():
            if threading.active_count() == 1:
                for _ in range(MAX_HTTP_CONNECTIONS):
                    if not scrapers_q.empty():
                        scrapers_q.get().start()
            else:
                time.sleep(1)

        while threading.active_count() != 1:
            print('Waiting for remaining requests...')
            time.sleep(1)

        print('Done. Dataset as CSV successfully assembled.') 
Example 79
Project: PCTRL   Author: USTC-INFINITELAB   File: pof_01.py    Apache License 2.0 4 votes vote down vote up
def run (self):
        while core.running:
            #print ('DeferredSender running...','thread_count:',threading.active_count()) # cc:
    
            with self._lock:
                cons = self._dataForConnection.keys()
    
            rlist, wlist, elist = select.select([self._waker], cons, cons, 5)
            if not core.running: break
    
            with self._lock:
                if len(rlist) > 0:
                    self._waker.pongAll()
        
                for con in elist:
                    try:
                        del self._dataForConnection[con]
                    except:
                        pass
        
                for con in wlist:
                    try:
                        alldata = self._dataForConnection[con]
                        while len(alldata):
                            data = alldata[0]
                            try:
                                l = con.sock.send(data)
                                if l != len(data):
                                    alldata[0] = data[l:]
                                    break
                                del alldata[0]
                            except socket.error as (errno, strerror):
                                if errno != EAGAIN:
                                    con.msg("DeferredSender/Socket error: " + strerror)
                                    log.error("pof_01.DeferredSender.run --- disconnect")  #CC
                                    con.disconnect()
                                    del self._dataForConnection[con]
                                break
                            except:
                                con.msg("Unknown error doing deferred sending")
                                break
                        if len(alldata) == 0:
                            try:
                                del self._dataForConnection[con]
                                if len(self._dataForConnection) == 0:
                                    self.sending = False
                                    break
                            except:
                                pass
                    except: 
Example 80
Project: PCTRL   Author: USTC-INFINITELAB   File: of_01.py    Apache License 2.0 4 votes vote down vote up
def run (self):
        while core.running:
          print ('DeferredSender running...','thread_count:',threading.active_count()) #print information
    
          with self._lock:
            cons = self._dataForConnection.keys()
    
          rlist, wlist, elist = select.select([self._waker], cons, cons, 5)
          if not core.running: break
    
          with self._lock:
            if len(rlist) > 0:
              self._waker.pongAll()
    
            for con in elist:
              try:
                del self._dataForConnection[con]
              except:
                pass
    
            for con in wlist:
              try:
                alldata = self._dataForConnection[con]
                while len(alldata):
                  data = alldata[0]
                  try:
                    l = con.sock.send(data)
                    if l != len(data):
                      alldata[0] = data[l:]
                      break
                    del alldata[0]
                  except socket.error as (errno, strerror):
                    if errno != EAGAIN:
                      con.msg("DeferredSender/Socket error: " + strerror)
                      con.disconnect()
                      del self._dataForConnection[con]
                    break
                  except:
                    con.msg("Unknown error doing deferred sending")
                    break
                if len(alldata) == 0:
                  try:
                    del self._dataForConnection[con]
                    if len(self._dataForConnection) == 0:
                      self.sending = False
                      break
                  except:
                    pass
              except: