Python logging.critical() Examples

The following are 30 code examples for showing how to use logging.critical(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module logging , or try the search function .

Example 1
Project: wopi-validator-cli-python   Author: microsoft   File: WopiValidatorExecutor.py    License: MIT License 7 votes vote down vote up
def get_wopi_test_endpoint(wopi_discovery_service_url):
    logging.info("WOPI Discovery Service Url: " + wopi_discovery_service_url)
    discovery_service_response = requests.get(wopi_discovery_service_url)

    try:
        discovery_service_response.raise_for_status()
    except requests.exceptions.HTTPError as exception:
        print(Fore.RED + "Failed to retrieve WOPI Discovery Service XML: Check Logs for more information")
        logging.critical("Failed to retrieve WOPI Discovery Service XML - HTTP ErrorCode: ", exception.Code)
        sys.exit(1)

    try:
        discovery_xml = ElementTree.fromstring(discovery_service_response.content)
        wopi_test_endpoint_url = discovery_xml.find(WOPITESTAPPLICATION_NODE_PATH).attrib[
            WOPITESTAPPLICATION_URLSRC_ATTRIBUTE]
    except Exception as exception:
        print(Fore.RED + "Failed to parse WOPI Discovery Service XML: Check Logs for more information")
        logging.critical("Failed to parse WOPI Discovery Service XML - Exception Details:", exception)
        sys.exit(1)

    return wopi_test_endpoint_url[:wopi_test_endpoint_url.find('?')] 
Example 2
Project: URS   Author: JosephLai241   File: Logger.py    License: MIT License 6 votes vote down vote up
def log_login(function):
        def wrapper(parser, reddit):
            print("\nLogging in...")

            try:
                function(parser, reddit)
                logging.info("Successfully logged in as u/%s." % reddit.user.me())
                logging.info("")
            except PrawcoreException as error:
                Titles.Titles.p_title(error)
                logging.critical("LOGIN FAILED.")
                logging.critical("PRAWCORE EXCEPTION: %s.\n" % error)
                parser.exit()

        return wrapper

    ### Wrapper for logging rate limit errors. 
Example 3
Project: wukong-itchat   Author: wzpan   File: bot.py    License: MIT License 6 votes vote down vote up
def convert_mp3_to_wav(mp3_path):
    """ 
    将 mp3 文件转成 wav

    :param mp3_path: mp3 文件路径
    :returns: wav 文件路径
    """
    target = mp3_path.replace(".mp3", ".wav")
    if not os.path.exists(mp3_path):
        logging.critical("文件错误 {}".format(mp3_path))
        return None
    AudioSegment.from_mp3(mp3_path).export(target, format="wav")
    return target 
Example 4
Project: ffn   Author: google   File: object_utils.py    License: Apache License 2.0 6 votes vote down vote up
def load_equivalences(paths):
  """Loads equivalences from a text file.

  Args:
    paths: sequence of paths to the text files of equivalences; id0,id1 per
      line, or id0,id1,x,y,z.

  Returns:
    NX graph object representing the equivalences
  """
  equiv_graph = nx.Graph()

  for path in paths:
    with open(path, "r") as f:
      reader = pd.read_csv(
          f, sep=",", engine="c", comment="#", chunksize=4096, header=None)
      for chunk in reader:
        if len(chunk.columns) not in (2, 5):
          logging.critical("Unexpected # of columns (%d), want 2 or 5",
                           len(chunk.columns))

        edges = chunk.values[:, :2]
        equiv_graph.add_edges_from(edges)

  return equiv_graph 
Example 5
Project: rucio   Author: rucio   File: throttler.py    License: Apache License 2.0 6 votes vote down vote up
def __schedule_requests():
    """
    Schedule requests
    """
    try:
        throttler_mode = config_core.get('throttler', 'mode', default='DEST_PER_ACT', use_cache=False)
        direction, all_activities = get_parsed_throttler_mode(throttler_mode)
        result_dict = __get_request_stats(all_activities, direction)
        if direction == 'destination' or direction == 'source':
            for rse_id in result_dict:
                rse_name = result_dict[rse_id]['rse']
                availability = get_rse(rse_id).availability
                # dest_rse is not blacklisted for write or src_rse is not blacklisted for read
                if (direction == 'destination' and availability & 2) or (direction == 'source' and availability & 4):
                    if all_activities:
                        __release_all_activities(result_dict[rse_id], direction, rse_name, rse_id)
                    else:
                        __release_per_activity(result_dict[rse_id], direction, rse_name, rse_id)
    except Exception:
        logging.critical("Failed to schedule requests, error: %s" % (traceback.format_exc())) 
Example 6
Project: rucio   Author: rucio   File: transmogrifier.py    License: Apache License 2.0 6 votes vote down vote up
def _retrial(func, *args, **kwargs):
    """
    Retrial method
    """
    delay = 0
    while True:
        try:
            return func(*args, **kwargs)
        except DataIdentifierNotFound as error:
            logging.warning(error)
            return 1
        except DatabaseException as error:
            logging.error(error)
            if exp(delay) > 600:
                logging.error('Cannot execute %s after %i attempt. Failing the job.' % (func.__name__, delay))
                raise
            else:
                logging.error('Failure to execute %s. Retrial will be done in %d seconds ' % (func.__name__, exp(delay)))
            time.sleep(exp(delay))
            delay += 1
        except Exception:
            exc_type, exc_value, exc_traceback = exc_info()
            logging.critical(''.join(format_exception(exc_type, exc_value, exc_traceback)).strip())
            raise 
Example 7
Project: benchexec   Author: sosy-lab   File: localexecution.py    License: Apache License 2.0 6 votes vote down vote up
def run(self):
        while not STOPPED_BY_INTERRUPT:
            try:
                currentRun = _Worker.working_queue.get_nowait()
            except queue.Empty:
                return

            try:
                logging.debug('Executing run "%s"', currentRun.identifier)
                self.execute(currentRun)
                logging.debug('Finished run "%s"', currentRun.identifier)
            except SystemExit as e:
                logging.critical(e)
            except BenchExecException as e:
                logging.critical(e)
            except BaseException:
                logging.exception("Exception during run execution")
            self.run_finished_callback()
            _Worker.working_queue.task_done() 
Example 8
Project: benchexec   Author: sosy-lab   File: runexecutor.py    License: Apache License 2.0 6 votes vote down vote up
def _setup_cgroup_memory_limit(self, memlimit, cgroups, pid_to_kill):
        """Start memory-limit handler.
        @return None or the memory-limit handler for calling cancel()
        """
        if memlimit is not None:
            try:
                oomThread = oomhandler.KillProcessOnOomThread(
                    cgroups=cgroups,
                    pid_to_kill=pid_to_kill,
                    callbackFn=self._set_termination_reason,
                )
                oomThread.start()
                return oomThread
            except OSError as e:
                logging.critical(
                    "OSError %s during setup of OomEventListenerThread: %s.",
                    e.errno,
                    e.strerror,
                )
        return None 
Example 9
Project: botoflow   Author: boto   File: events.py    License: Apache License 2.0 6 votes vote down vote up
def swf_event_to_object(event_dict):
    """
    takes an event dictionary from botocore and converts it into a specific
    event instance.
    """
    try:
        event_class = _event_type_name_to_class[event_dict['eventType']]
    except KeyError:
        # we cannot guarantee we do the right thing in the decider if there's an unsupported event type.
        logging.critical("Event type '%' is not implemented. Cannot continue processing decisions!",
                         event_dict['eventType'])
        raise NotImplementedError(
            "Event type '{}' is not implemented. Cannot continue processing decisions!".format(event_dict['eventType']))

    return event_class(event_dict['eventId'],
                       event_dict['eventTimestamp'],
                       event_dict[event_class.attribute_key]) 
Example 10
Project: ACE   Author: IntegralDefense   File: test_ace.py    License: Apache License 2.0 6 votes vote down vote up
def tearDown(self, *args, **kwargs):
        if self.cli_process is not None:
            try:
                self.cli_process.terminate()
                self.cli_process.wait(5)
            except TimeoutExpired:
                try:
                    self.cli_process.kill()
                    self.cli_process.wait(5)
                except Exception as e:
                    logging.critical("cannot stop subprocess {}: {}".format(self.cli_process, e))

            if self.cli_process.returncode != 0:
                self.fail("subprocess {} returned exit code {}".format(' '.join(self.cli_args), self.cli_process.returncode))

        if self.stdout_reader_thread is not None:
            self.stdout_reader_thread.join(5)
            if self.stdout_reader_thread.is_alive():
                logging.error("reader thread not stopping...")

        if self.stderr_reader_thread is not None:
            self.stderr_reader_thread.join(5)
            if self.stderr_reader_thread.is_alive():
                logging.error("reader thread not stopping...") 
Example 11
Project: ACE   Author: IntegralDefense   File: __init__.py    License: Apache License 2.0 6 votes vote down vote up
def stop_threaded_execution(self):
        if not self.is_threaded:
            return

        logging.info("stopping threaded execution for {}".format(self))

        self.threaded_execution_stop_event.set()
        start = datetime.datetime.now()
        while True:
            self.threaded_execution_thread.join(5)
            if not self.threaded_execution_thread.is_alive():
                break

            logging.error("thread {} is not stopping".format(self.threaded_execution_thread))

            # have we been waiting for a really long time?
            if (datetime.datetime.now() - start).total_seconds() >= saq.EXECUTION_THREAD_LONG_TIMEOUT:
                logging.critical("execution thread {} is failing to stop - process dying".format(
                                  self.threaded_execution_thread))
                # suicide
                os._exit(1)

        logging.debug("threaded execution module {} has stopped ({})".format(self, self.threaded_execution_thread)) 
Example 12
Project: ACE   Author: IntegralDefense   File: __init__.py    License: Apache License 2.0 6 votes vote down vote up
def create_analysis(self, observable):
        """Initializes and adds the generated Analysis for this module to the given Observable. 
           Returns the generated Analysis."""
        # have we already created analysis for this observable?
        if self.generated_analysis_type is None:
            logging.critical("called create_analysis on {} which does not actually create Analysis".format(self))
            return None

        analysis = observable.get_analysis(self.generated_analysis_type)
        if analysis:
            logging.debug("returning existing analysis {} in call to create analysis from {} for {}".format(
                          analysis, self, observable))
            return analysis
        
        # otherwise we create and initialize a new one
        analysis = self.generated_analysis_type()
        analysis.initialize_details()
        observable.add_analysis(analysis)
        return analysis

    # XXX this is not supported at all 
Example 13
Project: ACE   Author: IntegralDefense   File: url.py    License: Apache License 2.0 6 votes vote down vote up
def verify_environment(self):
        self.verify_config_exists('whitelist_path')
        self.verify_path_exists(self.config['whitelist_path'])
        self.verify_config_exists('regex_path')
        self.verify_path_exists(self.config['regex_path'])
        self.verify_config_exists('blacklist_path')
        self.verify_path_exists(self.config['blacklist_path'])
        self.verify_config_exists('uncommon_network_threshold')
        self.verify_config_exists('user-agent')
        self.verify_config_exists('timeout')
        self.verify_config_exists('max_download_size')
        self.verify_config_exists('max_file_name_length')
        self.verify_config_exists('cooldown_period')
        self.verify_config_exists('update_brocess')
        self.verify_config_exists('proxies')
        
        for name in self.config['proxies'].split(','):
            if name == 'GLOBAL':
                continue

            if 'proxy_{}'.format(name) not in saq.CONFIG:
                logging.critical("invalid proxy name {} in crawlphish config".format(name)) 
Example 14
Project: borgmatic   Author: witten   File: validate_config.py    License: GNU General Public License v3.0 6 votes vote down vote up
def main():  # pragma: no cover
    args = parse_arguments(*sys.argv[1:])

    logging.basicConfig(level=logging.INFO, format='%(message)s')

    config_filenames = tuple(collect.collect_config_filenames(args.config_paths))
    if len(config_filenames) == 0:
        logger.critical('No files to validate found')
        sys.exit(1)

    found_issues = False
    for config_filename in config_filenames:
        try:
            validate.parse_configuration(config_filename, validate.schema_filename())
        except (ValueError, OSError, validate.Validation_error) as error:
            logging.critical('{}: Error parsing configuration file'.format(config_filename))
            logging.critical(error)
            found_issues = True

    if found_issues:
        sys.exit(1)
    else:
        logger.info(
            'All given configuration files are valid: {}'.format(', '.join(config_filenames))
        ) 
Example 15
Project: pyArango   Author: ArangoDB-Community   File: jwauth.py    License: Apache License 2.0 6 votes vote down vote up
def __get_auth_token(self):
        request_data = '{"username":"%s","password":"%s"}' % (self.username, self.password)
        for connection_url in self.urls:
            try:
                response = self.session.post('%s/_open/auth' % connection_url, data=request_data)
                if response.ok:
                    json_data = response.content
                    if json_data:
                        data_dict = json_mod.loads(json_data.decode("utf-8"))
                        return data_dict.get('jwt')
            except requests_exceptions.ConnectionError:
                if connection_url is not self.urls[-1]:
                    logging.critical("Unable to connect to %s trying another", connection_url)
                else:
                    logging.critical("Unable to connect to any of the urls: %s", self.urls)
                    raise 
Example 16
Project: exopy   Author: Exopy   File: test_plugin.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_handling_crash_of_watchdog(instr_workbench, caplog):
    """Test handling that we can close even if the observer fail to join.

    """
    instr_workbench.register(InstrContributor1())

    # Test starting
    p = instr_workbench.get_plugin('exopy.instruments')

    o = p._observer
    j = o.join

    def false_join():
        import logging
        logging.critical('Crash')
        raise RuntimeError()

    o.join = false_join

    p.stop()
    j()
    assert any(r.levelname == 'CRITICAL' for r in caplog.records) 
Example 17
Project: PyCNC   Author: Nikolay-Kha   File: heater.py    License: MIT License 6 votes vote down vote up
def run(self):
        """ Thread worker implementation. There is a loop for PID control.
        """
        last_error = None
        while True:
            self._mutex.acquire()
            if not self._is_run:
                break
            try:
                current_temperature = self._measure()
            except (IOError, OSError):
                self._control(0)
                if last_error is None:
                    last_error = time.time()
                else:
                    if time.time() - last_error > self.SENSOR_TIMEOUT_S:
                        logging.critical("No data from temperature sensor."
                                         " Stop heating.")
                        break
                continue
            last_error = None
            self._current_power = self._pid.update(current_temperature) * 100
            self._control(self._current_power)
            self._mutex.release()
            time.sleep(self.LOOP_INTERVAL_S) 
Example 18
Project: dket   Author: dkmfbk   File: train.py    License: GNU General Public License v3.0 6 votes vote down vote up
def _validate_params(self, params):
        decay_steps = params[self.DECAY_STEPS_PK]
        if decay_steps <= 0:
            msg = '{} must be a positive integer.'.format(self.DECAY_STEPS_PK)
            logging.critical(msg)
            raise ValueError(msg)
        decay_rate = params[self.DECAY_RATE_PK]
        if decay_rate <= 0.0 or decay_rate > 1.0:
            msg = '{} must be a float between 0.0 and 1.0'.format(self.DECAY_RATE_PK)
            logging.critical(msg)
            raise ValueError(msg)
        
        logging.debug('decay rate: %d', decay_rate)
        logging.debug('decay steps: %f', decay_steps)
        logging.debug('staircase: %s', str(params[self.STAIRCASE_PK]))
        return params 
Example 19
Project: dket   Author: dkmfbk   File: train.py    License: GNU General Public License v3.0 6 votes vote down vote up
def _validate_params(self, params):
        min_value = params[self.MIN_VALUE_PK]
        max_value = params[self.MAX_VALUE_PK]

        msg = '{} min value cannot be `None`.'
        if min_value is None:
            msg = msg.format(self.MIN_VALUE_PK)
            logging.critical(msg)
            raise ValueError(msg)
        if max_value is None:
            msg = msg.format(self.MAX_VALUE_PK)
            logging.critical(msg)
            raise ValueError(msg)

        if min_value >= max_value:
            msg = '{} should be less than {}, found {} and {} instead.'\
                .format(self.MIN_VALUE_PK, self.MAX_VALUE_PK, min_value, max_value)
            logging.critical(msg)
            raise ValueError(msg)
        return params 
Example 20
Project: clickhouse-mysql-data-reader   Author: Altinity   File: chclient.py    License: MIT License 6 votes vote down vote up
def verify_connection_settings(self, connection_settings):
        if not connection_settings:
            logging.critical("Need CH connection settings")
            sys.exit(0)

        if 'host' not in connection_settings:
            logging.critical("Need CH host in connection settings")
            sys.exit(0)

        if not connection_settings['host']:
            logging.critical("Need CH host in connection settings")
            sys.exit(0)

        if 'port' not in connection_settings:
            logging.critical("Need CH port in connection settings")
            sys.exit(0)

        if not connection_settings['port']:
            logging.critical("Need CH port in connection settings")
            sys.exit(0)

#self.client = CHClient(connection_settings)
#self.client.execute(sql, rows) 
Example 21
Project: URS   Author: JosephLai241   File: Logger.py    License: MIT License 5 votes vote down vote up
def log_args(function):
        def wrapper(self, args, parser):
            try:
                function(self, args, parser)
            except ValueError:
                Titles.Titles.e_title()
                logging.critical("INVALID ARGUMENTS GIVEN.\n")
                parser.exit()

        return wrapper

    ### Wrapper for logging PRAW errors. 
Example 22
Project: URS   Author: JosephLai241   File: Logger.py    License: MIT License 5 votes vote down vote up
def log_rate_limit(function):
        def wrapper(reddit):
            user_limits = function(reddit)

            if int(user_limits["remaining"]) == 0:
                Titles.Titles.l_title(Global.convert_time(user_limits["reset_timestamp"]))
                logging.critical("RATE LIMIT REACHED. RATE LIMIT WILL RESET AT %s.\n" % 
                    Global.convert_time(user_limits["reset_timestamp"]))
                quit()
            
            return user_limits
        return wrapper 
Example 23
Project: URS   Author: JosephLai241   File: Logger.py    License: MIT License 5 votes vote down vote up
def log_export(function):
        def wrapper(*args):
            try:
                function(*args)

                logging.info(LogExport._get_export_switch(args[0]))
                logging.info("")
            except Exception as e:
                logging.critical("AN ERROR HAS OCCURED WHILE EXPORTING SCRAPED DATA.")
                logging.critical("%s\n" % e)

        return wrapper 
Example 24
Project: unicorn-binance-websocket-api   Author: oliver-zehentleitner   File: unicorn_binance_websocket_api_manager.py    License: MIT License 5 votes vote down vote up
def _start_monitoring_api_thread(self, host, port, warn_on_update):
        """
        Threaded method that servces the monitoring api

        :param host: IP or hostname to use
        :type host: str
        :param port: Port to use
        :type port: int
        :param warn_on_update: Should the monitoring system report available updates?
        :type warn_on_update: bool
        """
        logging.info("Starting monitoring API service ...")
        app = Flask(__name__)
        @app.route('/')
        @app.route('/status/')
        def redirect_to_wiki():
            logging.debug("Visit https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/UNICORN-"
                          "Monitoring-API-Service for further information!")
            return redirect("https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api/wiki/"
                            "UNICORN-Monitoring-API-Service", code=302)

        api = Api(app)
        api.add_resource(BinanceWebSocketApiRestServer,
                         "/status/<string:statusformat>/",
                         "/status/<string:statusformat>/<string:checkcommandversion>",
                         resource_class_kwargs={'handler_binance_websocket_api_manager': self,
                                                'warn_on_update': warn_on_update})
        try:
            dispatcher = wsgi.PathInfoDispatcher({'/': app})
            self.monitoring_api_server = wsgi.WSGIServer((host, port), dispatcher)
            self.monitoring_api_server.start()
        except RuntimeError as error_msg:
            logging.critical("Monitoring API service is going down! - Info: " + str(error_msg))
        except OSError as error_msg:
            logging.critical("Monitoring API service is going down! - Info: " + str(error_msg)) 
Example 25
Project: unicorn-binance-websocket-api   Author: oliver-zehentleitner   File: unicorn_binance_websocket_api_manager.py    License: MIT License 5 votes vote down vote up
def stream_is_crashing(self, stream_id, error_msg=False):
        """
        If a stream can not heal itself in cause of wrong parameter (wrong market, channel type) it calls this method

        :param stream_id: id of a stream
        :type stream_id: uuid
        :param error_msg: Error msg to add to the stream status!
        :type error_msg: str
        """
        logging.critical("BinanceWebSocketApiManager->stream_is_crashing(" + str(stream_id) + ")")
        self.stream_list[stream_id]['has_stopped'] = time.time()
        self.stream_list[stream_id]['status'] = "crashed"
        if error_msg:
            self.stream_list[stream_id]['status'] += " - " + str(error_msg) 
Example 26
Project: unicorn-binance-websocket-api   Author: oliver-zehentleitner   File: unicorn_binance_websocket_api_connection.py    License: MIT License 5 votes vote down vote up
def __aexit__(self, *args, **kwargs):
        try:
            await self._conn.__aexit__(*args, **kwargs)
        except AttributeError as error_msg:
            logging.error("binance_websocket_api_connection->__aexit__(*args, **kwargs): "
                          "AttributeError - " + str(error_msg))
        except websockets.exceptions.ConnectionClosed as error_msg:
            logging.critical("binance_websocket_api_connection->__aexit__(*args, **kwargs): "
                             "ConnectionClosed - " + str(error_msg))
            self.handler_binance_websocket_api_manager.stream_is_stopping(self.stream_id)
            if self.handler_binance_websocket_api_manager.is_stop_request(self.stream_id) is False and \
                    self.handler_binance_websocket_api_manager.is_stop_as_crash_request is False:
                self.handler_binance_websocket_api_manager.set_restart_request(self.stream_id)
            sys.exit(1) 
Example 27
Project: unicorn-binance-websocket-api   Author: oliver-zehentleitner   File: unicorn_binance_websocket_api_connection.py    License: MIT License 5 votes vote down vote up
def receive(self):
        self.handler_binance_websocket_api_manager.set_heartbeat(self.stream_id)
        try:
            received_data_json = await self.handler_binance_websocket_api_manager.websocket_list[self.stream_id].recv()
            try:
                if self.handler_binance_websocket_api_manager.restart_requests[self.stream_id]['status'] == "restarted":
                    self.handler_binance_websocket_api_manager.increase_reconnect_counter(self.stream_id)
                    del self.handler_binance_websocket_api_manager.restart_requests[self.stream_id]
            except KeyError:
                pass
            if received_data_json is not None:
                size = sys.getsizeof(received_data_json)
                self.handler_binance_websocket_api_manager.increase_processed_receives_statistic(self.stream_id)
                self.handler_binance_websocket_api_manager.add_total_received_bytes(size)
                self.handler_binance_websocket_api_manager.increase_received_bytes_per_second(self.stream_id,
                                                                                              size)
            return received_data_json
        except RuntimeError as error_msg:
            logging.debug("binance_websocket_api_connection->receive(" +
                          str(self.stream_id) + ") - RuntimeError - error_msg: " + str(error_msg))
            sys.exit(1)
        except ssl.SSLError as error_msg:
            logging.debug("binance_websocket_api_connection->receive(" +
                          str(self.stream_id) + ") - ssl.SSLError - error_msg: " + str(error_msg))
        except KeyError as error_msg:
            logging.debug("binance_websocket_api_connection->receive(" +
                          str(self.stream_id) + ") - KeyError - error_msg: " + str(error_msg))
            self.handler_binance_websocket_api_manager.stream_is_stopping(self.stream_id)
            if self.handler_binance_websocket_api_manager.is_stop_request(self.stream_id) is False:
                self.handler_binance_websocket_api_manager.set_restart_request(self.stream_id)
            sys.exit(1)
        except asyncio.base_futures.InvalidStateError as error_msg:
            logging.critical("binance_websocket_api_connection->receive(" +
                             str(self.stream_id) + ") - asyncio.base_futures.InvalidStateError - error_msg: " +
                             str(error_msg) + " - Extra info: https://github.com/oliver-zehentleitner/unicorn-binance-"
                                              "websocket-api/issues/18 - open an own issue if needed!")
            self.handler_binance_websocket_api_manager.stream_is_stopping(self.stream_id)
            if self.handler_binance_websocket_api_manager.is_stop_request(self.stream_id) is False:
                self.handler_binance_websocket_api_manager.set_restart_request(self.stream_id)
            sys.exit(1) 
Example 28
Project: udapi-python   Author: udapi   File: configurations.py    License: GNU General Public License v3.0 5 votes vote down vote up
def apply_query(self, query_id, node):
        """
        A generic method for applying a specified query on a specified node.

        :param query_id: A name of the query method to be called.
        :param node: An input node.

        """
        if self.verbose:
            logging.info(' - applying query %s', query_id)

        try:
            methods = globals()
            method = methods.get(query_id)
        except Exception as exception:
            logging.critical(' - no such query %s', query_id)
            raise RuntimeError('No such query %s' % query_id)

        triples = []
        try:
            triples = method(node)
        except ValueError as exception:
            if self.verbose:
                logging.info(' - no configurations: %s', exception)

        if len(triples) == 0:
            if self.verbose:
                logging.info(' - no configurations, but all conditions passed.')

        for (node_a, relation_name, node_b) in triples:
            print_triple(node_a, relation_name, node_b,
                         print_lemma=self.print_lemmas) 
Example 29
Project: PT-help   Author: Rhilip   File: scrapy_6v.py    License: MIT License 5 votes vote down vote up
def exec(self, sql: object, args: object = None, r_dict: object = False, fetch_all: object = False) -> object:
        with self._commit_lock:
            # The style of return info (dict or tuple)
            cursor = self.db.cursor(pymysql.cursors.DictCursor) if r_dict else self.db.cursor()
            row = cursor.execute(sql, args)
            try:
                self.db.commit()
                logging.debug("Success,DDL: \"{sql}\",Affect rows: {row}".format(sql=sql, row=row))
            except pymysql.Error as err:
                logging.critical("Mysql Error: \"{err}\",DDL: \"{sql}\"".format(err=err.args, sql=sql))
                self.db.rollback()

            # The lines of return info (one or all)
            return cursor.fetchall() if fetch_all else cursor.fetchone() 
Example 30
Project: leaguedirector   Author: RiotGames   File: app.py    License: Apache License 2.0 5 votes vote down vote up
def handleMessage(self, msgType, msgContext, msgString):
        if msgType == QtInfoMsg:
            logging.info('(QT) %s', msgString)
        elif msgType == QtDebugMsg:
            logging.debug('(QT) %s', msgString)
        elif msgType == QtWarningMsg:
            logging.warning('(QT) %s', msgString)
        elif msgType == QtCriticalMsg:
            logging.critical('(QT) %s', msgString)
        elif msgType == QtFatalMsg:
            logging.critical('(QT) %s', msgString)
        elif msgType == QtSystemMsg:
            logging.critical('(QT) %s', msgString)