Python logging.INFO Examples

The following are 30 code examples of logging.INFO(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module logging , or try the search function .
Example #1
Source File: utilities.py    From incubator-spot with Apache License 2.0 25 votes vote down vote up
def get_logger(cls,logger_name,create_file=False):

        # create logger for prd_ci
        log = logging.getLogger(logger_name)
        log.setLevel(level=logging.INFO)

        # create formatter and add it to the handlers
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

        if create_file:
                # create file handler for logger.
                fh = logging.FileHandler('SPOT.log')
                fh.setLevel(level=logging.DEBUG)
                fh.setFormatter(formatter)
        # reate console handler for logger.
        ch = logging.StreamHandler()
        ch.setLevel(level=logging.DEBUG)
        ch.setFormatter(formatter)

        # add handlers to logger.
        if create_file:
            log.addHandler(fh)

        log.addHandler(ch)
        return  log 
Example #2
Source File: config.py    From Pytorch-Project-Template with MIT License 12 votes vote down vote up
def setup_logging(log_dir):
    log_file_format = "[%(levelname)s] - %(asctime)s - %(name)s - : %(message)s in %(pathname)s:%(lineno)d"
    log_console_format = "[%(levelname)s]: %(message)s"

    # Main logger
    main_logger = logging.getLogger()
    main_logger.setLevel(logging.INFO)

    console_handler = logging.StreamHandler()
    console_handler.setLevel(logging.INFO)
    console_handler.setFormatter(Formatter(log_console_format))

    exp_file_handler = RotatingFileHandler('{}exp_debug.log'.format(log_dir), maxBytes=10**6, backupCount=5)
    exp_file_handler.setLevel(logging.DEBUG)
    exp_file_handler.setFormatter(Formatter(log_file_format))

    exp_errors_file_handler = RotatingFileHandler('{}exp_error.log'.format(log_dir), maxBytes=10**6, backupCount=5)
    exp_errors_file_handler.setLevel(logging.WARNING)
    exp_errors_file_handler.setFormatter(Formatter(log_file_format))

    main_logger.addHandler(console_handler)
    main_logger.addHandler(exp_file_handler)
    main_logger.addHandler(exp_errors_file_handler) 
Example #3
Source File: util.py    From mlbv with GNU General Public License v3.0 10 votes vote down vote up
def init_logging(log_file=None, append=False, console_loglevel=logging.INFO):
    """Set up logging to file and console."""
    if log_file is not None:
        if append:
            filemode_val = 'a'
        else:
            filemode_val = 'w'
        logging.basicConfig(level=logging.DEBUG,
                            format="%(asctime)s %(levelname)s %(threadName)s %(name)s %(message)s",
                            # datefmt='%m-%d %H:%M',
                            filename=log_file,
                            filemode=filemode_val)
    # define a Handler which writes INFO messages or higher to the sys.stderr
    console = logging.StreamHandler()
    console.setLevel(console_loglevel)
    # set a format which is simpler for console use
    formatter = logging.Formatter("%(message)s")
    console.setFormatter(formatter)
    # add the handler to the root logger
    logging.getLogger('').addHandler(console)
    global LOG
    LOG = logging.getLogger(__name__) 
Example #4
Source File: client.py    From BASS with GNU General Public License v2.0 9 votes vote down vote up
def parse_args():
    parser = argparse.ArgumentParser(description = "Find common ngrams in binary files")
    parser.add_argument("-v", "--verbose", action = "count", default = 0, help = "Increase verbosity")
    parser.add_argument("--output", type = str, default = None, help = "Output to file instead of stdout")
    parser.add_argument("--url", type = str, default = "http://localhost:5000", help = "URL of BASS server")
    parser.add_argument("samples", metavar = "sample", nargs = "+", help = "Cluster samples")

    args = parser.parse_args()

    try:
        loglevel = {
            0: logging.ERROR,
            1: logging.WARN,
            2: logging.INFO}[args.verbose]
    except KeyError:
        loglevel = logging.DEBUG
    logging.basicConfig(level = loglevel)
    logging.getLogger().setLevel(loglevel)

    return args 
Example #5
Source File: cmdline.py    From BASS with GNU General Public License v2.0 9 votes vote down vote up
def parse_args():
    parser = argparse.ArgumentParser(description = "Bass")
    parser.add_argument("-v", "--verbose", action = "count", default = 0, help = "Increase verbosity")
    parser.add_argument("samples", metavar = "sample", nargs = "+", help = "Sample path") 

    args = parser.parse_args()

    try:
        loglevel = {
            0: logging.ERROR,
            1: logging.WARN,
            2: logging.INFO
        }[args.verbose]
    except KeyError:
        loglevel = logging.DEBUG

    logging.basicConfig(level = loglevel)
    logging.getLogger().setLevel(loglevel)

    return args 
Example #6
Source File: whitelist.py    From BASS with GNU General Public License v2.0 8 votes vote down vote up
def parse_args():
    parser = argparse.ArgumentParser(description = "Add samples to BASS whitelist")
    parser.add_argument("-v", "--verbose", action = "count", default = 0, help = "Increase verbosity")
    parser.add_argument("--url", type = str, default = "http://localhost:5000", help = "URL of BASS server")
    parser.add_argument("sample", help = "Whitelist sample")

    args = parser.parse_args()

    try:
        loglevel = {
            0: logging.ERROR,
            1: logging.WARN,
            2: logging.INFO}[args.verbose]
    except KeyError:
        loglevel = logging.DEBUG
    logging.basicConfig(level = loglevel)
    logging.getLogger().setLevel(loglevel)

    return args 
Example #7
Source File: benchmark.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def setup_logging(log_loc):
    if os.path.exists(log_loc):
        shutil.move(log_loc, log_loc + "_" + str(int(os.path.getctime(log_loc))))
    os.makedirs(log_loc)

    log_file = '{}/benchmark.log'.format(log_loc)
    LOGGER = logging.getLogger('benchmark')
    LOGGER.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s %(levelname)s:%(name)s %(message)s')
    file_handler = logging.FileHandler(log_file)
    console_handler = logging.StreamHandler()
    file_handler.setFormatter(formatter)
    console_handler.setFormatter(formatter)

    LOGGER.addHandler(file_handler)
    LOGGER.addHandler(console_handler)
    return LOGGER 
Example #8
Source File: utils.py    From tpu_pretrain with Apache License 2.0 6 votes vote down vote up
def init(args):
    # init logger
    log_format = '%(asctime)-10s: %(message)s'
    if args.log_file is not None and args.log_file != "":
        Path(args.log_file).parent.mkdir(parents=True, exist_ok=True)
        logging.basicConfig(level=logging.INFO, filename=args.log_file, filemode='w', format=log_format)
        logging.warning(f'This will get logged to file: {args.log_file}')
    else:
        logging.basicConfig(level=logging.INFO, format=log_format)

    # create output dir
    if args.output_dir.is_dir() and list(args.output_dir.iterdir()):
        logging.warning(f"Output directory ({args.output_dir}) already exists and is not empty!")
    assert 'bert' in args.output_dir.name, \
        '''Output dir name has to contain `bert` or `roberta` for AutoModel.from_pretrained to correctly infer the model type'''

    args.output_dir.mkdir(parents=True, exist_ok=True)

    # set random seeds
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed) 
Example #9
Source File: utils.py    From incubator-spot with Apache License 2.0 6 votes vote down vote up
def get_logger(cls, logger_name, create_file=False):

        # create logger for prd_ci
        log = logging.getLogger(logger_name)
        log.setLevel(level=logging.INFO)

        # create formatter and add it to the handlers
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

        if create_file:
            # create file handler for logger.
            fh = logging.FileHandler('oa.log')
            fh.setLevel(level=logging.DEBUG)
            fh.setFormatter(formatter)
        # reate console handler for logger.
        ch = logging.StreamHandler()
        ch.setLevel(level=logging.DEBUG)
        ch.setFormatter(formatter)

        # add handlers to logger.
        if create_file:
            log.addHandler(fh)

        log.addHandler(ch)
        return log 
Example #10
Source File: getmetrics_sysdig.py    From InsightAgent with Apache License 2.0 6 votes vote down vote up
def set_logger_config(level):
    """Set up logging according to the defined log level"""
    # Get the root logger
    logger_obj = logging.getLogger(__name__)
    # Have to set the root logger level, it defaults to logging.WARNING
    logger_obj.setLevel(level)
    # route INFO and DEBUG logging to stdout from stderr
    logging_handler_out = logging.StreamHandler(sys.stdout)
    logging_handler_out.setLevel(logging.DEBUG)
    # create a logging format
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(process)d - %(threadName)s - %(levelname)s - %(message)s')
    logging_handler_out.setFormatter(formatter)
    logger_obj.addHandler(logging_handler_out)

    logging_handler_err = logging.StreamHandler(sys.stderr)
    logging_handler_err.setLevel(logging.WARNING)
    logger_obj.addHandler(logging_handler_err)
    return logger_obj 
Example #11
Source File: getmetrics_hadoop.py    From InsightAgent with Apache License 2.0 6 votes vote down vote up
def set_logger_config(level):
    """Set up logging according to the defined log level"""
    # Get the root logger
    logger_obj = logging.getLogger(__name__)
    # Have to set the root logger level, it defaults to logging.WARNING
    logger_obj.setLevel(level)
    # route INFO and DEBUG logging to stdout from stderr
    logging_handler_out = logging.StreamHandler(sys.stdout)
    logging_handler_out.setLevel(logging.DEBUG)
    # create a logging format
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(process)d - %(threadName)s - %(levelname)s - %(message)s')
    logging_handler_out.setFormatter(formatter)
    logger_obj.addHandler(logging_handler_out)

    logging_handler_err = logging.StreamHandler(sys.stderr)
    logging_handler_err.setLevel(logging.WARNING)
    logger_obj.addHandler(logging_handler_err)
    return logger_obj 
Example #12
Source File: main.py    From dogTorch with MIT License 6 votes vote down vote up
def setup_logging(filepath, verbose):
    logFormatter = logging.Formatter(
        '%(levelname)s %(asctime)-20s:\t %(message)s')
    rootLogger = logging.getLogger()
    if verbose:
        rootLogger.setLevel(logging.DEBUG)
    else:
        rootLogger.setLevel(logging.INFO)
    logging.getLogger('PIL').setLevel(logging.WARNING)

    # Setup the logger to write into file
    fileHandler = logging.FileHandler(filepath)
    fileHandler.setFormatter(logFormatter)
    rootLogger.addHandler(fileHandler)

    # Setup the logger to write into stdout
    consoleHandler = logging.StreamHandler(sys.stdout)
    consoleHandler.setFormatter(logFormatter)
    rootLogger.addHandler(consoleHandler) 
Example #13
Source File: client.py    From microgear-python with ISC License 6 votes vote down vote up
def create(gearkey,gearsecret, appid="", args = {}):
    if 'debugmode' in args:
        logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)-8s %(message)s',
                        datefmt='%d/%m/%Y %I:%M:%S %p',
                        )
    else:
        logging.basicConfig(level=logging.WARNING,
                        format='%(asctime)s %(levelname)-8s %(message)s',
                        datefmt='%d/%m/%Y %I:%M:%S %p',
                        )
    microgear.gearalias = args.get('alias',"")[0:16]
    if 'scope' in args:
        matchScope = re.match( r'^(\w+:[a-zA-Z\/]+,*)+$', args['scope'])
        if matchScope:
            microgear.scope = args["scope"]
        else:
            microgear.scope = ""
            logging.warning("Specify scope is not valid")

    microgear.gearkey = gearkey
    microgear.gearsecret = gearsecret
    microgear.appid = appid 
Example #14
Source File: _cplogging.py    From cherrypy with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, appid=None, logger_root='cherrypy'):
        self.logger_root = logger_root
        self.appid = appid
        if appid is None:
            self.error_log = logging.getLogger('%s.error' % logger_root)
            self.access_log = logging.getLogger('%s.access' % logger_root)
        else:
            self.error_log = logging.getLogger(
                '%s.error.%s' % (logger_root, appid))
            self.access_log = logging.getLogger(
                '%s.access.%s' % (logger_root, appid))
        self.error_log.setLevel(logging.INFO)
        self.access_log.setLevel(logging.INFO)

        # Silence the no-handlers "warning" (stderr write!) in stdlib logging
        self.error_log.addHandler(NullHandler())
        self.access_log.addHandler(NullHandler())

        cherrypy.engine.subscribe('graceful', self.reopen_files) 
Example #15
Source File: _cplogging.py    From cherrypy with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def error(self, msg='', context='', severity=logging.INFO,
              traceback=False):
        """Write the given ``msg`` to the error log.

        This is not just for errors! Applications may call this at any time
        to log application-specific information.

        If ``traceback`` is True, the traceback of the current exception
        (if any) will be appended to ``msg``.
        """
        exc_info = None
        if traceback:
            exc_info = _cperror._exc_info()

        self.error_log.log(
            severity,
            ' '.join((self.time(), context, msg)),
            exc_info=exc_info,
        ) 
Example #16
Source File: replay.py    From iSDX with Apache License 2.0 6 votes vote down vote up
def main(argv):
    logging.basicConfig(level=logging.INFO)

    log_history = LogHistory(argv.config, argv.flow_dir, argv.port_dir, int(argv.num_steps), debug=True)

    channel = "sdx_stats"
    address = "192.168.99.100"
    port = 6379
    db = 0

    publisher = Publisher(channel, address, port)

    log_replay = LogReplay(log_history, publisher, int(argv.timestep), debug=True)

    # start replay
    replay_thread = Thread(target=log_replay.start)
    replay_thread.daemon = True
    replay_thread.start()

    while replay_thread.is_alive():
        try:
            replay_thread.join(1)
        except KeyboardInterrupt:
            log_replay.stop() 
Example #17
Source File: loggingwrapper.py    From CAMISIM with Apache License 2.0 6 votes vote down vote up
def set_level(self, level):
		"""
		Set the minimum level of messages to be logged.

		Level of Log Messages
		CRITICAL	50
		ERROR	40
		WARNING	30
		INFO	20
		DEBUG	10
		NOTSET	0

		@param level: minimum level of messages to be logged
		@type level: int or long

		@return: None
		@rtype: None
		"""
		assert level in self._levelNames

		list_of_handlers = self._logger.handlers
		for handler in list_of_handlers:
			handler.setLevel(level) 
Example #18
Source File: loggingwrapper.py    From CAMISIM with Apache License 2.0 6 votes vote down vote up
def add_log_stream(self, stream=sys.stderr, level=logging.INFO):
		"""
		Add a stream where messages are outputted to.

		@param stream: stderr/stdout or a file stream
		@type stream: file | FileIO | StringIO
		@param level: minimum level of messages to be logged
		@type level: int | long

		@return: None
		@rtype: None
		"""
		assert self.is_stream(stream)
		# assert isinstance(stream, (file, io.FileIO))
		assert level in self._levelNames

		err_handler = logging.StreamHandler(stream)
		err_handler.setFormatter(self.message_formatter)
		err_handler.setLevel(level)
		self._logger.addHandler(err_handler) 
Example #19
Source File: loggingwrapper.py    From CAMISIM with Apache License 2.0 6 votes vote down vote up
def set_log_level(self, verbose, debug):
		"""
		Simplified way to set log level.

		@attention verbose: Ignored if 'debug' true

		@param verbose: Display info messages and higher
		@type verbose: bool
		@param debug: Display debug messages and higher
		@type debug: bool

		@return: Nothing
		@rtype: None
		"""
		if debug:
			self._logger.set_level(self._logger.DEBUG)
		elif verbose:
			self._logger.set_level(self._logger.INFO)
		else:
			self._logger.set_level(self._logger.WARNING) 
Example #20
Source File: loggingwrapper.py    From CAMISIM with Apache License 2.0 6 votes vote down vote up
def set_level(self, level):
        """
        Set the minimum level of messages to be logged.

        Level of Log Messages
        CRITICAL    50
        ERROR    40
        WARNING    30
        INFO    20
        DEBUG    10
        NOTSET    0

        @param level: minimum level of messages to be logged
        @type level: int or long

        @return: None
        @rtype: None
        """
        assert level in self._levelNames

        list_of_handlers = self._logger.handlers
        for handler in list_of_handlers:
            handler.setLevel(level) 
Example #21
Source File: loggingwrapper.py    From CAMISIM with Apache License 2.0 6 votes vote down vote up
def add_log_stream(self, stream=sys.stderr, level=logging.INFO):
        """
        Add a stream where messages are outputted to.

        @param stream: stderr/stdout or a file stream
        @type stream: file | FileIO | StringIO
        @param level: minimum level of messages to be logged
        @type level: int | long

        @return: None
        @rtype: None
        """
        assert self.is_stream(stream)
        # assert isinstance(stream, (file, io.FileIO))
        assert level in self._levelNames

        err_handler = logging.StreamHandler(stream)
        err_handler.setFormatter(self.message_formatter)
        err_handler.setLevel(level)
        self._logger.addHandler(err_handler) 
Example #22
Source File: loggingwrapper.py    From CAMISIM with Apache License 2.0 6 votes vote down vote up
def set_log_level(self, verbose, debug):
        """
        Simplified way to set log level.

        @attention verbose: Ignored if 'debug' true

        @param verbose: Display info messages and higher
        @type verbose: bool
        @param debug: Display debug messages and higher
        @type debug: bool

        @return: Nothing
        @rtype: None
        """
        if debug:
            self._logger.set_level(self._logger.DEBUG)
        elif verbose:
            self._logger.set_level(self._logger.INFO)
        else:
            self._logger.set_level(self._logger.WARNING) 
Example #23
Source File: client.py    From jsonrpcclient with MIT License 6 votes vote down vote up
def basic_logging(self) -> None:
        """
        Call this on the client object to create log handlers to output request and
        response messages.
        """
        # Request handler
        if len(request_log.handlers) == 0:
            request_handler = logging.StreamHandler()
            request_handler.setFormatter(
                logging.Formatter(fmt=self.DEFAULT_REQUEST_LOG_FORMAT)
            )
            request_log.addHandler(request_handler)
            request_log.setLevel(logging.INFO)
        # Response handler
        if len(response_log.handlers) == 0:
            response_handler = logging.StreamHandler()
            response_handler.setFormatter(
                logging.Formatter(fmt=self.DEFAULT_RESPONSE_LOG_FORMAT)
            )
            response_log.addHandler(response_handler)
            response_log.setLevel(logging.INFO) 
Example #24
Source File: common.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def create_logger():
    logger = logging.getLogger('sgd_svrg')
    logger.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(message)s')
    fh = logging.FileHandler('experiments.log')
    fh.setFormatter(formatter)
    logger.addHandler(fh)
    return logger


################################################################################
# Functions below are for benchmark purpose to calcuate expectation, variance of
# gradients per epoch for each parameter. These calculations will be helpful when
# benchmarking SVRG optimization with other optimization techniques, such as SGD.
# Currently it only calculates the expectation, variance for single context but
# can be extended to multi-context in later iterations.
################################################################################ 
Example #25
Source File: logger.py    From PickTrue with MIT License 6 votes vote down vote up
def __get_logger(name):
    __log_level = logging.INFO

    if "--debug-%s" % name in sys.argv:
        __log_level = logging.DEBUG

    fmt = "%(levelname)s - %(asctime)-15s - %(filename)s - line %(lineno)d --> %(message)s"
    date_fmt = "%a %d %b %Y %H:%M:%S"
    formatter = logging.Formatter(fmt, date_fmt)

    handler = logging.StreamHandler()
    handler.setFormatter(formatter)

    logger = logging.getLogger(name)
    logger.addHandler(
        handler
    )
    logger.setLevel(level=__log_level)
    return logger 
Example #26
Source File: getmetrics_sysdig.py    From InsightAgent with Apache License 2.0 5 votes vote down vote up
def get_parameters():
    usage = "Usage: %prog [options]"
    parser = OptionParser(usage=usage)
    parser.add_option("-w", "--serverUrl",
                      action="store", dest="serverUrl", help="Server Url")
    parser.add_option("-c", "--chunkLines",
                      action="store", dest="chunkLines", help="Timestamps per chunk for historical data.")
    parser.add_option("-l", "--logLevel",
                      action="store", dest="logLevel", help="Change log verbosity(WARNING: 0, INFO: 1, DEBUG: 2)")
    (options, args) = parser.parse_args()

    params = dict()
    if options.serverUrl is None:
        params['serverUrl'] = 'http://stg.insightfinder.com'
    else:
        params['serverUrl'] = options.serverUrl
    if options.chunkLines is None:
        params['chunkLines'] = 50
    else:
        params['chunkLines'] = int(options.chunkLines)
    params['logLevel'] = logging.INFO
    if options.logLevel == '0':
        params['logLevel'] = logging.WARNING
    elif options.logLevel == '1':
        params['logLevel'] = logging.INFO
    elif options.logLevel >= '2':
        params['logLevel'] = logging.DEBUG

    return params

#
# Read and parse InsightFinder config from config.ini
# 
Example #27
Source File: prop_args2.py    From indras_net with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, model_nm, logfile=None, prop_dict=None,
                 loglevel=logging.INFO):
        """
        Loads and sets properties in the following order:
        1. The Database
        2. The User's Environment (operating system, dev/prod settings, etc.)
        3. Property File
        4. Command Line
        5. Questions Prompts During Run-Time
        """
        self.logfile = logfile
        self.model_nm = model_nm
        self.graph = nx.Graph()
        self.props = {}

        # 1. The Database
        self.set_props_from_db()

        # 2. The Environment
        self.overwrite_props_from_env()

        # 3. Property File
        self.overwrite_props_from_dict(prop_dict)

        if self.props[UTYPE].val in (TERMINAL, IPYTHON, IPYTHON_NB):
            # 4. process command line args and set them as properties:
            self.overwrite_props_from_cl()

            # 5. Ask the user questions.
            self.overwrite_props_from_user()

        elif self.props[UTYPE].val == WEB:
            self.props[PERIODS] = Prop(val=1)
            self.props[BASE_DIR] = Prop(val=os.environ[BASE_DIR])

        self.logger = Logger(self, model_name=model_nm, logfile=logfile)
        self.graph.add_edge(self, self.logger) 
Example #28
Source File: test_notebooks_multi_gpu.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def setUpClass(self):
        logging.basicConfig(level=logging.INFO)
        assert _download_straight_dope_notebooks()

    # Chapter 7 
Example #29
Source File: getmessages_kafka2.py    From InsightAgent with Apache License 2.0 5 votes vote down vote up
def set_logger_config(level):
    """ set up logging according to the defined log level """
    # Get the root logger
    logger_obj = logging.getLogger(__name__)
    # Have to set the root logger level, it defaults to logging.WARNING
    logger_obj.setLevel(level)
    # route INFO and DEBUG logging to stdout from stderr
    logging_handler_out = logging.StreamHandler(sys.stdout)
    logging_handler_out.setLevel(logging.DEBUG)
    # create a logging format
    formatter = logging.Formatter(
        '{ts} [pid {pid}] {lvl} {mod}.{func}():{line} {msg}'.format(
            ts='%(asctime)s',
            pid='%(process)d',
            lvl='%(levelname)-8s',
            mod='%(module)s',
            func='%(funcName)s',
            line='%(lineno)d',
            msg='%(message)s'),
        ISO8601[0])
    logging_handler_out.setFormatter(formatter)
    logger_obj.addHandler(logging_handler_out)

    logging_handler_err = logging.StreamHandler(sys.stderr)
    logging_handler_err.setLevel(logging.WARNING)
    logger_obj.addHandler(logging_handler_err)
    return logger_obj 
Example #30
Source File: getmessages_kafka2.py    From InsightAgent with Apache License 2.0 5 votes vote down vote up
def get_cli_config_vars():
    """ get CLI options. use of these options should be rare """
    usage = 'Usage: %prog [options]'
    parser = OptionParser(usage=usage)
    """
    ## not ready.
    parser.add_option('--threads', default=1, action='store', dest='threads',
                      help='Number of threads to run')
    """
    parser.add_option('-q', '--quiet', action='store_true', dest='quiet',
                      help='Only display warning and error log messages')
    parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
                      help='Enable verbose logging')
    parser.add_option('-t', '--testing', action='store_true', dest='testing',
                      help='Set to testing mode (do not send data).' +
                           ' Automatically turns on verbose logging')
    (options, args) = parser.parse_args()

    """
    # not ready
    try:
        threads = int(options.threads)
    except ValueError:
        threads = 1
    """

    config_vars = {
        'threads': 1,
        'testing': False,
        'log_level': logging.INFO
    }

    if options.testing:
        config_vars['testing'] = True

    if options.verbose or options.testing:
        config_vars['log_level'] = logging.DEBUG
    elif options.quiet:
        config_vars['log_level'] = logging.WARNING

    return config_vars