Python threading.Lock() Examples

The following are 30 code examples of threading.Lock(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module threading , or try the search function .
Example #1
Source File: Lego.py    From Legobot with GNU General Public License v2.0 6 votes vote down vote up
def __init__(
            self, baseplate, lock: threading.Lock, log_file=None, acl=None):
        """
        :param baseplate: the baseplate Lego, which should be \
                          the same instance of Lego for all Legos
        :param lock: a threading lock, which should be the same \
                     instance of threading.Lock for all Legos
        """
        super().__init__()
        if not lock:
            raise LegoError("Lock expected but not provided!")
        self.baseplate = baseplate
        self.children = []
        self.lock = lock
        self.log_file = log_file
        self.acl = acl 
Example #2
Source File: webCrawler.py    From Learning-Concurrency-in-Python with MIT License 6 votes vote down vote up
def main():
  print("Starting our Web Crawler")
  baseUrl = input("Website > ")
  numberOfThreads = input("No Threads > ")

  linksToCrawl = queue.Queue()
  urlLock = threading.Lock()
  linksToCrawl.put(baseUrl)
  haveVisited = []
  crawlers = []
  errorLinks = []

  for i in range(int(numberOfThreads)):
    crawler = Crawler(baseUrl, linksToCrawl, haveVisited, errorLinks, urlLock)
    crawler.start()
    crawlers.append(crawler)

  for crawler in crawlers:
    crawler.join()

  print("Total Number of Pages Visited {}".format(len(haveVisited)))
  print("Total Number of Pages with Errors {}".format(len(errorLinks))) 
Example #3
Source File: scheduling.py    From me-ica with GNU Lesser General Public License v2.1 6 votes vote down vote up
def __init__(self, result_container=None, verbose=False):
        """Initialize the scheduler.

        result_container -- Instance of ResultContainer that is used to store
            the results (default is None, in which case a ListResultContainer
            is used).
        verbose -- If True then status messages will be printed to sys.stdout.
        """
        if result_container is None:
            result_container = OrderedResultContainer()
        self.result_container = result_container
        self.verbose = verbose
        self._n_open_tasks = 0  # number of tasks that are currently running
        # count the number of submitted tasks, also used for the task index
        self._task_counter = 0
        self._lock = threading.Lock()
        self._last_callable = None  # last callable is stored
        # task index of the _last_callable, can be *.5 if updated between tasks
        self._last_callable_index = -1.0

    ## public read only properties ## 
Example #4
Source File: change_notify.py    From smbprotocol with MIT License 6 votes vote down vote up
def __init__(self, open):
        """
        A class that encapsulates a FileSystemWatcher over SMB. It is designed to make it easy to run the watcher in
        the background and provide an event that is fired when the server notifies that a change has occurred. It is
        up to the caller to action on that event through their own sync or asynchronous implementation.

        :param open: The Open() class of a directory to watch for change notifications.
        """
        self.open = open
        self.response_event = threading.Event()

        self._t_on_response = threading.Thread(target=self._on_response)
        self._t_on_response.daemon = True
        self._t_exc = None
        self._request = None
        self._file_actions = None
        self._result_lock = threading.Lock()  # Used to ensure the result is only processed once 
Example #5
Source File: tracer.py    From opentracing-python with Apache License 2.0 6 votes vote down vote up
def __init__(self, scope_manager=None):
        """Initialize a MockTracer instance."""

        scope_manager = ThreadLocalScopeManager() \
            if scope_manager is None else scope_manager
        super(MockTracer, self).__init__(scope_manager)

        self._propagators = {}
        self._finished_spans = []
        self._spans_lock = Lock()

        # Simple-as-possible (consecutive for repeatability) id generation.
        self._next_id = 0
        self._next_id_lock = Lock()

        self._register_required_propagators() 
Example #6
Source File: misphere.py    From pysphere with MIT License 6 votes vote down vote up
def send(self, msg_id, params=None, token=None):
        if msg_id not in self.session.locks:
            self.session.locks[msg_id] = threading.Lock()
        print("[LOCK] ", msg_id)
        if self.session.locks[msg_id].locked():
            return 0
        else:
            self.session.locks[msg_id].acquire()
        if token is None:
            token = self.session.token
        payload = self.create_payload(msg_id, token, params)
        data = json.dumps(payload).encode()
        print(data)
        timediff = time.time() - self.last_send
        if timediff < 0.5:
            time.sleep(0.5 - timediff)
        self.last_send = time.time()
        return self.socket.send(data) 
Example #7
Source File: soundqueue.py    From pymumble with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, mumble_object):
        self.mumble_object = mumble_object
        
        self.queue = deque()
        self.start_sequence = None
        self.start_time = None

        self.receive_sound = True
        
        self.lock = Lock()
        
        # to be sure, create every supported decoders for all users
        # sometime, clients still use a codec for a while after server request another...
        self.decoders = {
                    PYMUMBLE_AUDIO_TYPE_CELT_ALPHA: pycelt.CeltDecoder(PYMUMBLE_SAMPLERATE, 1, "0.7"),
                    PYMUMBLE_AUDIO_TYPE_CELT_BETA: pycelt.CeltDecoder(PYMUMBLE_SAMPLERATE, 1, "0.11"),
                    PYMUMBLE_AUDIO_TYPE_OPUS: pyopus.OpusDecoder(PYMUMBLE_SAMPLERATE, 1)                    
        } 
Example #8
Source File: soundoutput.py    From pymumble with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, mumble_object, audio_per_packet, bandwidth):
        """
        audio_per_packet=packet audio duration in sec
        bandwidth=maximum total outgoing bandwidth
        """
        self.mumble_object = mumble_object
        
        self.Log = self.mumble_object.Log
        
        self.pcm = ""
        self.lock = threading.Lock()
        
        self.codec = None  # codec currently requested by the server
        self.encoder = None  # codec instance currently used to encode
        self.encoder_framesize = None  # size of an audio frame for the current codec (OPUS=audio_per_packet, CELT=0.01s)
        
        self.set_audio_per_packet(audio_per_packet)
        self.set_bandwidth(bandwidth)

        self.codec_type = None  # codec type number to be used in audio packets
        self.target = 0  # target is not implemented yet, so always 0
        
        self.sequence_start_time = 0  # time of sequence 1
        self.sequence_last_time = 0  # time of the last emitted packet
        self.sequence = 0  # current sequence 
Example #9
Source File: span.py    From opentracing-python with Apache License 2.0 6 votes vote down vote up
def __init__(
            self,
            tracer,
            operation_name=None,
            context=None,
            parent_id=None,
            tags=None,
            start_time=None):
        super(MockSpan, self).__init__(tracer, context)
        self._tracer = tracer
        self._lock = Lock()

        self.operation_name = operation_name
        self.start_time = start_time
        self.parent_id = parent_id
        self.tags = tags if tags is not None else {}
        self.finish_time = -1
        self.finished = False
        self.logs = [] 
Example #10
Source File: test_IRC.py    From Legobot with GNU General Public License v2.0 6 votes vote down vote up
def test_initialization():
        baseplate = Lego.start(None, threading.Lock())
        baseplate_proxy = baseplate.proxy()
        baseplate_proxy.add_child(IRC,  # nosec
                                  channels=['#foo'],
                                  nickname='test_nick',
                                  server='foo.testing',
                                  port=6667,
                                  use_ssl=False,
                                  username='test_username',
                                  password='test_password')
        # Cleanup
        children = baseplate_proxy.children.get()
        for child in children:
            child.stop()
        baseplate.stop() 
Example #11
Source File: visuals.py    From python-esppy with Apache License 2.0 6 votes vote down vote up
def __init__(self,visuals,datasource,**kwargs):
        Chart.__init__(self,visuals,datasource,**kwargs)
        self.layout.overflow = "auto"
        if self.hasOpt("image") == False:
            raise Exception("you must specify the image property")
        self._entries = {}
        self._lock = threading.Lock()
        self._imageWidth = self.getOpt("image_width",300)
        self._imageHeight = self.getOpt("image_height",300)
        orientation = self.getOpt("orientation","vertical")

        if orientation == "horizontal":
            self.layout.width = self.getOpt("width","800px")
            self.layout.height = str(self._imageHeight + 100) + "px"
        else:
            self.layout.width = str(self._imageWidth + 80) + "px"
            self.layout.height = self.getOpt("height","800px")

        self._detection = None 
Example #12
Source File: queue.py    From jawfish with MIT License 6 votes vote down vote up
def __init__(self, maxsize=0):
        self.maxsize = maxsize
        self._init(maxsize)

        # mutex must be held whenever the queue is mutating.  All methods
        # that acquire mutex must release it before returning.  mutex
        # is shared between the three conditions, so acquiring and
        # releasing the conditions also acquires and releases mutex.
        self.mutex = threading.Lock()

        # Notify not_empty whenever an item is added to the queue; a
        # thread waiting to get is notified then.
        self.not_empty = threading.Condition(self.mutex)

        # Notify not_full whenever an item is removed from the queue;
        # a thread waiting to put is notified then.
        self.not_full = threading.Condition(self.mutex)

        # Notify all_tasks_done whenever the number of unfinished tasks
        # drops to zero; thread waiting to join() is notified to resume
        self.all_tasks_done = threading.Condition(self.mutex)
        self.unfinished_tasks = 0 
Example #13
Source File: EventDispatcher.py    From rtp_cluster with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def __init__(self, freq = 100.0):
        EventDispatcher2.state_lock.acquire()
        if EventDispatcher2.ed_inum != 0:
            EventDispatcher2.state_lock.release()
            raise StdException('BZZZT, EventDispatcher2 has to be singleton!')
        EventDispatcher2.ed_inum = 1
        EventDispatcher2.state_lock.release()
        self.tcbs_lock = Lock()
        self.tlisteners = []
        self.slisteners = []
        self.signals_pending = []
        self.last_ts = MonoTime()
        self.my_ident = get_ident()
        self.elp = ElPeriodic(freq)
        self.elp.CFT_enable(signal.SIGURG)
        self.bands = [(freq, 0),] 
Example #14
Source File: replay_buffer.py    From HardRLWithYoutube with MIT License 6 votes vote down vote up
def __init__(self, buffer_shapes, size_in_transitions, T, sample_transitions):
        """Creates a replay buffer.

        Args:
            buffer_shapes (dict of ints): the shape for all buffers that are used in the replay
                buffer
            size_in_transitions (int): the size of the buffer, measured in transitions
            T (int): the time horizon for episodes
            sample_transitions (function): a function that samples from the replay buffer
        """
        self.buffer_shapes = buffer_shapes
        self.size = size_in_transitions // T
        self.T = T
        self.sample_transitions = sample_transitions

        # self.buffers is {key: array(size_in_episodes x T or T+1 x dim_key)}
        self.buffers = {key: np.empty([self.size, *shape])
                        for key, shape in buffer_shapes.items()}

        # memory management
        self.current_size = 0
        self.n_transitions_stored = 0

        self.lock = threading.Lock() 
Example #15
Source File: cosmosdb_partitioned_storage.py    From botbuilder-python with MIT License 6 votes vote down vote up
def __init__(self, config: CosmosDbPartitionedConfig):
        """Create the storage object.

        :param config:
        """
        super(CosmosDbPartitionedStorage, self).__init__()
        self.config = config
        self.client = None
        self.database = None
        self.container = None
        self.compatability_mode_partition_key = False
        # Lock used for synchronizing container creation
        self.__lock = Lock()
        if config.key_suffix is None:
            config.key_suffix = ""
        if not config.key_suffix.__eq__(""):
            if config.compatibility_mode:
                raise Exception(
                    "compatibilityMode cannot be true while using a keySuffix."
                )
            suffix_escaped = CosmosDbKeyEscape.sanitize_key(config.key_suffix)
            if not suffix_escaped.__eq__(config.key_suffix):
                raise Exception(
                    f"Cannot use invalid Row Key characters: {config.key_suffix} in keySuffix."
                ) 
Example #16
Source File: replay_buffer.py    From lirpg with MIT License 6 votes vote down vote up
def __init__(self, buffer_shapes, size_in_transitions, T, sample_transitions):
        """Creates a replay buffer.

        Args:
            buffer_shapes (dict of ints): the shape for all buffers that are used in the replay
                buffer
            size_in_transitions (int): the size of the buffer, measured in transitions
            T (int): the time horizon for episodes
            sample_transitions (function): a function that samples from the replay buffer
        """
        self.buffer_shapes = buffer_shapes
        self.size = size_in_transitions // T
        self.T = T
        self.sample_transitions = sample_transitions

        # self.buffers is {key: array(size_in_episodes x T or T+1 x dim_key)}
        self.buffers = {key: np.empty([self.size, *shape])
                        for key, shape in buffer_shapes.items()}

        # memory management
        self.current_size = 0
        self.n_transitions_stored = 0

        self.lock = threading.Lock() 
Example #17
Source File: update_manager.py    From Paradrop with Apache License 2.0 6 votes vote down vote up
def __init__(self, reactor):
        self.reactor = reactor

        self.updateLock = threading.Lock()
        self.updateQueue = []

        # Map update_id -> update object.
        self.active_changes = {}

        # TODO: Ideally, load this from file so that change IDs are unique
        # across system reboots.
        self.next_change_id = 1

        ###########################################################################################
        # Launch the first update call, NOTE that you have to use callInThread!!
        # This happens because the perform_updates should run in its own thread,
        # it makes blocking calls and such... so if we *don't* use callInThread
        # then this function WILL BLOCK THE MAIN EVENT LOOP (ie. you cannot send any data)
        #
        # FIXME: It should be noted that calling updateLock.acquire(), e.g.
        # from add_update, also blocks the main thread.  Synchronizing access
        # to the work queue should be reworked.
        ###########################################################################################
        self.reactor.callInThread(self._perform_updates) 
Example #18
Source File: messages.py    From pymumble with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self):
        self.cmd_id = None
        self.lock = Lock()
        
        self.cmd = None
        self.parameters = None
        self.response = None 
Example #19
Source File: queue.py    From jawfish with MIT License 5 votes vote down vote up
def __init__(self, maxsize=0):
        self.maxsize = maxsize
        self._init(maxsize)
        # mutex must be held whenever the queue is mutating.  All methods
        # that acquire mutex must release it before returning.  mutex
        # is shared between the three conditions, so acquiring and
        # releasing the conditions also acquires and releases mutex.
        self.mutex = _threading.Lock()
        # Notify not_empty whenever an item is added to the queue; a
        # thread waiting to get is notified then.
        self.not_empty = _threading.Condition(self.mutex)
        # Notify not_full whenever an item is removed from the queue;
        # a thread waiting to put is notified then.
        self.not_full = _threading.Condition(self.mutex)
        # Notify all_tasks_done whenever the number of unfinished tasks
        # drops to zero; thread waiting to join() is notified to resume
        self.all_tasks_done = _threading.Condition(self.mutex)
        self.unfinished_tasks = 0 
Example #20
Source File: test_config.py    From esmlab with Apache License 2.0 5 votes vote down vote up
def test_set_hard_to_copyables():
    import threading

    with _config.set(x=threading.Lock()):
        with _config.set(y=1):
            pass 
Example #21
Source File: comm.py    From overhaul-distillation with MIT License 5 votes vote down vote up
def __init__(self):
        self._result = None
        self._lock = threading.Lock()
        self._cond = threading.Condition(self._lock) 
Example #22
Source File: data_generators.py    From FasterRCNN_KERAS with Apache License 2.0 5 votes vote down vote up
def __init__(self, it):
		self.it = it
		self.lock = threading.Lock() 
Example #23
Source File: utils.py    From stoq with Apache License 2.0 5 votes vote down vote up
def ratelimited():
    """
    Thread safe decorator to rate limit a function

    """

    lock = threading.Lock()

    def decorator(func):
        last_call = time.perf_counter()

        @wraps(func)
        async def ratelimit(*args, **kwargs):
            limit = kwargs.get("ratelimit", None)
            if limit:
                count, seconds = limit.split("/")
                interval = int(seconds) / int(count)
                lock.acquire()
                nonlocal last_call
                elapsed = time.perf_counter() - last_call
                left_to_wait = interval - elapsed

                if left_to_wait > 0:
                    time.sleep(left_to_wait)

                last_call = time.perf_counter()

                lock.release()

            try:
                kwargs.pop("ratelimit")
            except KeyError:
                pass

            return await func(*args, **kwargs)

        return ratelimit

    return decorator 
Example #24
Source File: scrapy_6v.py    From PT-help with MIT License 5 votes vote down vote up
def __init__(self, host, port, user, password, db):
        self._commit_lock = Lock()
        self.db = pymysql.connect(host=host, port=port, user=user, password=password, db=db, charset='utf8') 
Example #25
Source File: test_adapter.py    From botbuilder-python with MIT License 5 votes vote down vote up
def __init__(
        self,
        logic: Coroutine = None,
        template_or_conversation: Union[Activity, ConversationReference] = None,
        send_trace_activities: bool = False,
    ):
        """
        Creates a new TestAdapter instance.
        :param logic:
        :param conversation: A reference to the conversation to begin the adapter state with.
        """
        super(TestAdapter, self).__init__()
        self.logic = logic
        self._next_id: int = 0
        self._user_tokens: List[UserToken] = []
        self._magic_codes: List[TokenMagicCode] = []
        self._conversation_lock = Lock()
        self.exchangeable_tokens: Dict[str, ExchangeableToken] = {}
        self.activity_buffer: List[Activity] = []
        self.updated_activities: List[Activity] = []
        self.deleted_activities: List[ConversationReference] = []
        self.send_trace_activities = send_trace_activities

        self.template = (
            template_or_conversation
            if isinstance(template_or_conversation, Activity)
            else Activity(
                channel_id="test",
                service_url="https://test.com",
                from_property=ChannelAccount(id="User1", name="user"),
                recipient=ChannelAccount(id="bot", name="Bot"),
                conversation=ConversationAccount(id="Convo1"),
            )
        )

        if isinstance(template_or_conversation, ConversationReference):
            self.template.channel_id = template_or_conversation.channel_id 
Example #26
Source File: unicorn_binance_websocket_api_manager.py    From unicorn-binance-websocket-api with MIT License 5 votes vote down vote up
def _create_stream_thread(self, loop, stream_id, channels, markets, stream_label=None, stream_buffer_name=False,
                              restart=False):
        """
        Co function of self.create_stream to create a thread for the socket and to manage the coroutine

        :param loop: provide a asynio loop
        :type loop: asyncio loop
        :param stream_id: provide a stream_id (only needed for userData Streams (acquiring a listenKey)
        :type stream_id: uuid
        :param channels: provide the channels to create the URI
        :type channels: str, tuple, list, set
        :param markets: provide the markets to create the URI
        :type markets: str, tuple, list, set
        :param stream_label: provide a stream_label for the stream
        :type stream_label: str
        :param stream_buffer_name: If `False` the data is going to get written to the default stream_buffer,
                           set to `True` to read the data via `pop_stream_data_from_stream_buffer(stream_id)` or
                           provide a string to create and use a shared stream_buffer and read it via
                           `pop_stream_data_from_stream_buffer('string')`.
        :type stream_buffer_name: bool or str
        :param restart: set to `True`, if its a restart!
        :type restart: bool
        :return:
        """
        if self.is_stop_request(stream_id):
            return False
        if restart is False:
            self._add_socket_to_socket_list(stream_id, channels, markets, stream_label, stream_buffer_name)
            if stream_buffer_name is not False:
                self.stream_buffer_locks[stream_buffer_name] = threading.Lock()
                self.stream_buffers[stream_buffer_name] = []
        asyncio.set_event_loop(loop)
        binance_websocket_api_socket = BinanceWebSocketApiSocket(self, stream_id, channels, markets)
        try:
            loop.run_until_complete(binance_websocket_api_socket.start_socket())
        finally:
            loop.close() 
Example #27
Source File: token_bucket.py    From pyspider with Apache License 2.0 5 votes vote down vote up
def __init__(self, rate=1, burst=None):
        self.rate = float(rate)
        if burst is None:
            self.burst = float(rate) * 10
        else:
            self.burst = float(burst)
        self.mutex = _threading.Lock()
        self.bucket = self.burst
        self.last_update = time.time() 
Example #28
Source File: timer_queue.py    From misp42splunk with GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(self):
        self._timers = TimerQueueStruct()
        self._lock = threading.Lock()
        self._wakeup_queue = Queue.Queue()
        self._thr = threading.Thread(target=self._check_and_execute)
        self._thr.daemon = True
        self._started = False 
Example #29
Source File: hdfs_client.py    From incubator-spot with Apache License 2.0 5 votes vote down vote up
def __init__(self, hdfs_path, nbytes):
        self._data = {}
        self._lock = Lock()
        self._hpath = hdfs_path
        self._nbytes = nbytes 
Example #30
Source File: genpy.py    From hadrian with Apache License 2.0 5 votes vote down vote up
def update(self, state, scope, path, to, init, arrayErrCode, mapErrCode, fcnName, pos):
        result = None

        head, tail = path[0], path[1:]

        if self.shared:
            self.locklock.acquire()
            if head in self.locks:
                self.locks[head].acquire()
            else:
                self.locks[head] = threading.Lock()
                self.locks[head].acquire()
            self.locklock.release()

            if head not in self.value:
                self.value[head] = init
            self.value[head] = update(state, scope, self.value[head], tail, to, arrayErrCode, mapErrCode, fcnName, pos)

            result = self.value[head]
            self.locks[head].release()

        else:
            if head not in self.value:
                self.value[head] = init
            self.value[head] = update(state, scope, self.value[head], tail, to, arrayErrCode, mapErrCode, fcnName, pos)
            result = self.value[head]

        return result