Python logging.DEBUG() Examples

The following are code examples for showing how to use logging.DEBUG(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: incubator-spot   Author: apache   File: utilities.py    Apache License 2.0 15 votes vote down vote up
def get_logger(cls,logger_name,create_file=False):

        # create logger for prd_ci
        log = logging.getLogger(logger_name)
        log.setLevel(level=logging.INFO)

        # create formatter and add it to the handlers
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

        if create_file:
                # create file handler for logger.
                fh = logging.FileHandler('SPOT.log')
                fh.setLevel(level=logging.DEBUG)
                fh.setFormatter(formatter)
        # reate console handler for logger.
        ch = logging.StreamHandler()
        ch.setLevel(level=logging.DEBUG)
        ch.setFormatter(formatter)

        # add handlers to logger.
        if create_file:
            log.addHandler(fh)

        log.addHandler(ch)
        return  log 
Example 2
Project: mlbv   Author: kmac   File: util.py    GNU General Public License v3.0 11 votes vote down vote up
def init_logging(log_file=None, append=False, console_loglevel=logging.INFO):
    """Set up logging to file and console."""
    if log_file is not None:
        if append:
            filemode_val = 'a'
        else:
            filemode_val = 'w'
        logging.basicConfig(level=logging.DEBUG,
                            format="%(asctime)s %(levelname)s %(threadName)s %(name)s %(message)s",
                            # datefmt='%m-%d %H:%M',
                            filename=log_file,
                            filemode=filemode_val)
    # define a Handler which writes INFO messages or higher to the sys.stderr
    console = logging.StreamHandler()
    console.setLevel(console_loglevel)
    # set a format which is simpler for console use
    formatter = logging.Formatter("%(message)s")
    console.setFormatter(formatter)
    # add the handler to the root logger
    logging.getLogger('').addHandler(console)
    global LOG
    LOG = logging.getLogger(__name__) 
Example 3
Project: GreenGuard   Author: D3-AI   File: utils.py    MIT License 9 votes vote down vote up
def logging_setup(verbosity=1, logfile=None, logger_name=None):
    logger = logging.getLogger(logger_name)
    log_level = (3 - verbosity) * 10
    fmt = '%(asctime)s - %(process)d - %(levelname)s - %(module)s - %(message)s'
    formatter = logging.Formatter(fmt)
    logger.setLevel(log_level)
    logger.propagate = False

    if logfile:
        file_handler = logging.FileHandler(logfile)
        file_handler.setLevel(logging.DEBUG)
        file_handler.setFormatter(formatter)
        logger.addHandler(file_handler)

    else:
        console_handler = logging.StreamHandler()
        console_handler.setLevel(log_level)
        console_handler.setFormatter(formatter)
        logger.addHandler(console_handler) 
Example 4
Project: Hasami   Author: Lokraan   File: main.py    MIT License 7 votes vote down vote up
def setup_logging(config: dict) -> None:
	logging.getLogger("discord.http").setLevel(logging.WARNING)
	logging.getLogger("discord").setLevel(logging.INFO)

	logger = logging.getLogger()

	level = logging.DEBUG if config["debug"] else logging.INFO

	f_handler = logging.FileHandler(filename="hasami.log", encoding="utf-8", mode="w")
	cl_handler = logging.StreamHandler()

	dt_fmt = "%Y-%m-%d %H:%M:%S"
	out_fmt = "[{asctime}] [{levelname:<6}] {name}: {message}"
	logger_fmt = logging.Formatter(out_fmt, dt_fmt, style="{")

	cl_handler.setFormatter(logger_fmt)
	f_handler.setFormatter(logger_fmt)

	logger.addHandler(cl_handler)
	logger.addHandler(f_handler)
	logger.setLevel(level) 
Example 5
Project: incubator-spot   Author: apache   File: utils.py    Apache License 2.0 6 votes vote down vote up
def get_logger(cls, logger_name, create_file=False):

        # create logger for prd_ci
        log = logging.getLogger(logger_name)
        log.setLevel(level=logging.INFO)

        # create formatter and add it to the handlers
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

        if create_file:
            # create file handler for logger.
            fh = logging.FileHandler('oa.log')
            fh.setLevel(level=logging.DEBUG)
            fh.setFormatter(formatter)
        # reate console handler for logger.
        ch = logging.StreamHandler()
        ch.setLevel(level=logging.DEBUG)
        ch.setFormatter(formatter)

        # add handlers to logger.
        if create_file:
            log.addHandler(fh)

        log.addHandler(ch)
        return log 
Example 6
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_persistent_loggers(self):
        # Logger objects are persistent and retain their configuration, even
        #  if visible references are destroyed.
        self.root_logger.setLevel(logging.INFO)
        foo = logging.getLogger("foo")
        self._watch_for_survival(foo)
        foo.setLevel(logging.DEBUG)
        self.root_logger.debug(self.next_message())
        foo.debug(self.next_message())
        self.assert_log_lines([
            ('foo', 'DEBUG', '2'),
        ])
        del foo
        # foo has survived.
        self._assertTruesurvival()
        # foo has retained its settings.
        bar = logging.getLogger("foo")
        bar.debug(self.next_message())
        self.assert_log_lines([
            ('foo', 'DEBUG', '2'),
            ('foo', 'DEBUG', '3'),
        ]) 
Example 7
Project: backtrader-cn   Author: pandalibin   File: sina.py    GNU General Public License v3.0 6 votes vote down vote up
def enable_debug_requests():
    # Enabling debugging at http.client level (requests->urllib3->http.client)
    # you will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.
    # the only thing missing will be the response.body which is not logged.
    from http.client import HTTPConnection
    import logging

    HTTPConnection.debuglevel = 1
    logger.setLevel(logging.DEBUG)
    requests_log = logging.getLogger("requests.packages.urllib3")
    requests_log.setLevel(logging.DEBUG)
    requests_log.propagate = True


# 去掉注释,开启调试模式
# enable_debug_requests() 
Example 8
Project: BASS   Author: Cisco-Talos   File: cmdline.py    GNU General Public License v2.0 6 votes vote down vote up
def parse_args():
    parser = argparse.ArgumentParser(description = "Bass")
    parser.add_argument("-v", "--verbose", action = "count", default = 0, help = "Increase verbosity")
    parser.add_argument("samples", metavar = "sample", nargs = "+", help = "Sample path") 

    args = parser.parse_args()

    try:
        loglevel = {
            0: logging.ERROR,
            1: logging.WARN,
            2: logging.INFO
        }[args.verbose]
    except KeyError:
        loglevel = logging.DEBUG

    logging.basicConfig(level = loglevel)
    logging.getLogger().setLevel(loglevel)

    return args 
Example 9
Project: BASS   Author: Cisco-Talos   File: whitelist.py    GNU General Public License v2.0 6 votes vote down vote up
def parse_args():
    parser = argparse.ArgumentParser(description = "Add samples to BASS whitelist")
    parser.add_argument("-v", "--verbose", action = "count", default = 0, help = "Increase verbosity")
    parser.add_argument("--url", type = str, default = "http://localhost:5000", help = "URL of BASS server")
    parser.add_argument("sample", help = "Whitelist sample")

    args = parser.parse_args()

    try:
        loglevel = {
            0: logging.ERROR,
            1: logging.WARN,
            2: logging.INFO}[args.verbose]
    except KeyError:
        loglevel = logging.DEBUG
    logging.basicConfig(level = loglevel)
    logging.getLogger().setLevel(loglevel)

    return args 
Example 10
Project: BASS   Author: Cisco-Talos   File: client.py    GNU General Public License v2.0 6 votes vote down vote up
def parse_args():
    parser = argparse.ArgumentParser(description = "Find common ngrams in binary files")
    parser.add_argument("-v", "--verbose", action = "count", default = 0, help = "Increase verbosity")
    parser.add_argument("--output", type = str, default = None, help = "Output to file instead of stdout")
    parser.add_argument("--url", type = str, default = "http://localhost:5000", help = "URL of BASS server")
    parser.add_argument("samples", metavar = "sample", nargs = "+", help = "Cluster samples")

    args = parser.parse_args()

    try:
        loglevel = {
            0: logging.ERROR,
            1: logging.WARN,
            2: logging.INFO}[args.verbose]
    except KeyError:
        loglevel = logging.DEBUG
    logging.basicConfig(level = loglevel)
    logging.getLogger().setLevel(loglevel)

    return args 
Example 11
Project: RelayBot2.0   Author: nukeop   File: relaybot.py    GNU General Public License v3.0 6 votes vote down vote up
def configure_logging(logfilename=None):
        """Creates a root logger, configures it, and returns it.
        """
        root = logging.getLogger()
        root.setLevel(logging.DEBUG)

        logging.getLogger('SteamClient').setLevel(logging.WARNING)

        formatter = logging.Formatter("[%(levelname)s] - %(asctime)s - %(name)s -"
        " %(message)s")

        console = logging.StreamHandler()
        console.setLevel(logging.DEBUG)
        console.setFormatter(formatter)
        root.addHandler(console)

        if logfilename is not None:
            rfhandler = logging.handlers.RotatingFileHandler(logfilename,
                        maxBytes=2*1024*1024,
                        backupCount=8)
            rfhandler.setLevel(logging.DEBUG)
            rfhandler.setFormatter(formatter)
            root.addHandler(rfhandler)

        return root 
Example 12
Project: RelayBot2.0   Author: nukeop   File: logs.py    GNU General Public License v3.0 6 votes vote down vote up
def setup_logger(self, steamid, friend=False):
        name = (
            self.make_filename(
                self.bot.user.get_name_from_steamid(steamid),
                steamid
            ) if friend else
            self.make_filename(
                self.bot.user.groups.get_name(steamid),
                steamid
            )
        )
        self.loggers[steamid] = logging.getLogger(__name__+'.'+str(steamid))
        self.loggers[steamid].setLevel(logging.DEBUG)
        self.loggers[steamid].propagate = False
        formatter = logging.Formatter("%(asctime)s - %(message)s")
        filename = (os.path.join(self.friend_logs_path, name) if friend else
        os.path.join(self.group_logs_path, name))
        handler = logging.FileHandler(filename)
        handler.setFormatter(formatter)
        handler.setLevel(logging.DEBUG)
        self.loggers[steamid].addHandler(handler) 
Example 13
Project: oeffis-paper   Author: djaffry   File: utils.py    MIT License 6 votes vote down vote up
def get_logger(name):
    """
    Get a preconfigured logger

    Example logging output
    2019-03-03 12:40:20,025 - INFO - __main__: Application start.sh!

    :param name: Logger name
    :return: preconfigured logger
    """
    logger = logging.getLogger(name)
    logger.setLevel(logging.DEBUG)
    handler = logging.StreamHandler(sys.stdout)
    handler.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s:  %(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)
    return logger 
Example 14
Project: wikilinks   Author: trovdimi   File: builder.py    MIT License 6 votes vote down vote up
def run(self):
        self.print_title('This is the interactive building program')
        self.create_tmp_if_not_exists()

        choice = self.read_choice('Would you like to', [
            'create the database structure', 
            'extract articles and redirects from the wikipedia dump file'
            ])

        # setup logging
        LOGGING_FORMAT = '%(levelname)s:\t%(asctime)-15s %(message)s'
        LOGGING_PATH = self.read_path('Please enter the path of the logging file [.log]', default='./tmp/build-%d.log' % (choice[0]+1), must_exist=False)
        logging.basicConfig(filename=LOGGING_PATH, level=logging.DEBUG, format=LOGGING_FORMAT, filemode='w')

        if choice[0] == 0:
            self._create_structure()
        if choice[0] == 1:
            self._extract_articles() 
Example 15
Project: shaptools   Author: SUSE   File: shapcli_test.py    Apache License 2.0 6 votes vote down vote up
def test_run_sr_invalid_params(self, mock_parse_arguments, mock_setup_logger):

        mock_parser = mock.Mock()
        mock_args = mock.Mock(
            verbosity=False, config=False, sid='qas', instance='01', password=False, sr=True)
        mock_logger = mock.Mock()
        mock_hana_instance = mock.Mock()
        mock_parse_arguments.return_value = [mock_parser, mock_args]
        mock_setup_logger.return_value = mock_logger

        with pytest.raises(SystemExit) as my_exit:
            shapcli.run()

        assert my_exit.type == SystemExit
        assert my_exit.value.code == 1

        mock_parse_arguments.assert_called_once_with()
        mock_setup_logger.assert_called_once_with(logging.DEBUG)
        mock_logger.info.assert_called_once_with(
            'Configuration file or sid, instance and passwords parameters must be provided\n')
        mock_parser.print_help.assert_called_once_with() 
Example 16
Project: iSDX   Author: sdn-ixp   File: replay.py    Apache License 2.0 6 votes vote down vote up
def __init__(self, config, flows_dir, ports_dir, num_timesteps, debug=False):
        self.logger = logging.getLogger("LogHistory")
        if debug:
            self.logger.setLevel(logging.DEBUG)

        self.log_entry = namedtuple("LogEntry", "source destination type")
        self.ports = defaultdict(list)
        self.flows = defaultdict(list)

        self.data = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
        self.current_timestep = 0
        self.total_timesteps = num_timesteps

        self.parse_config(config)
        self.parse_logs(num_timesteps, flows_dir, ports_dir)
        self.info()

        pretty(self.data) 
Example 17
Project: utilities   Author: czbiohub   File: log_util.py    MIT License 6 votes vote down vote up
def get_trfh_logger(name, *args):
    # function to create a rotating-file logger
    # with potentially multiple file handlers

    logger = logging.getLogger(name)
    logger.setLevel(logging.DEBUG)

    # create a logging format
    formatter = logging.Formatter(
        "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
    )

    for file_name, log_level, when, backup_count in args:
        log_handler = TimedRotatingFileHandler(
            file_name, when=when, backupCount=backup_count
        )
        log_handler.setLevel(log_level)
        log_handler.setFormatter(formatter)
        logger.addHandler(log_handler)

    return logger 
Example 18
Project: rnm   Author: alexjaw   File: interface.py    MIT License 6 votes vote down vote up
def test_me():
    logging.basicConfig(level=logging.DEBUG)
    logger = logging.getLogger(__name__)
    logger.info('------------- Starting test... -------------')

    eth = Interface(iface='eth0')
    wlan = Interface(iface='wlan0')

    resp = eth.get_ip()
    logger.info(repr(resp))

    resp = wlan.get_ip()
    logger.info(repr(resp))

    resp = wlan.disconnect()
    # logger.info(repr(resp))

    resp = wlan.connect()
    # logger.info(repr(resp))

    logger.info('-------------    Finished      -------------') 
Example 19
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: __init__.py    MIT License 6 votes vote down vote up
def add_stderr_logger(level=logging.DEBUG):
    """
    Helper for quickly adding a StreamHandler to the logger. Useful for
    debugging.

    Returns the handler after adding it.
    """
    # This method needs to be in this __init__.py to get the __name__ correct
    # even if urllib3 is vendored within another package.
    logger = logging.getLogger(__name__)
    handler = logging.StreamHandler()
    handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
    logger.addHandler(handler)
    logger.setLevel(level)
    logger.debug('Added a stderr logging handler to logger: %s', __name__)
    return handler


# ... Clean up. 
Example 20
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: distro.py    MIT License 6 votes vote down vote up
def main():
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.DEBUG)
    logger.addHandler(logging.StreamHandler(sys.stdout))

    parser = argparse.ArgumentParser(description="Linux distro info tool")
    parser.add_argument(
        '--json',
        '-j',
        help="Output in machine readable format",
        action="store_true")
    args = parser.parse_args()

    if args.json:
        logger.info(json.dumps(info(), indent=4, sort_keys=True))
    else:
        logger.info('Name: %s', name(pretty=True))
        distribution_version = version(pretty=True)
        logger.info('Version: %s', distribution_version)
        distribution_codename = codename()
        logger.info('Codename: %s', distribution_codename) 
Example 21
Project: esp-sdk-python   Author: EvidentSecurity   File: configuration.py    MIT License 6 votes vote down vote up
def debug(self, value):
        """
        Sets the debug status.

        :param value: The debug status, True or False.
        :type: bool
        """
        self.__debug = value
        if self.__debug:
            # if debug status is True, turn on debug logging
            for _, logger in iteritems(self.logger):
                logger.setLevel(logging.DEBUG)
            # turn on httplib debug
            httplib.HTTPConnection.debuglevel = 1
        else:
            # if debug status is False, turn off debug logging,
            # setting log level to default `logging.WARNING`
            for _, logger in iteritems(self.logger):
                logger.setLevel(logging.WARNING)
            # turn off httplib debug
            httplib.HTTPConnection.debuglevel = 0 
Example 22
Project: pnp   Author: HazardDede   File: pnp.py    MIT License 6 votes vote down vote up
def _setup_logging(*candidates, default_level=logging.INFO, env_key='PNP_LOG_CONF', verbose=False):
    """Setup logging configuration"""
    log_file_path = get_first_existing_file(*candidates)
    env_path = os.getenv(env_key, None)
    if env_path:
        log_file_path = env_path
    if log_file_path and os.path.exists(log_file_path):
        with open(log_file_path, 'rt') as fhandle:
            config = yaml.safe_load(fhandle.read())
        logging.config.dictConfig(config)
        logging.info("Logging loaded from: %s", log_file_path)
        if verbose:
            logging.getLogger().setLevel(logging.DEBUG)
    else:
        logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
                            level=logging.DEBUG if verbose else default_level)
        logging.info("Logging loaded with basic configuration") 
Example 23
Project: flasky   Author: RoseOu   File: __init__.py    MIT License 6 votes vote down vote up
def add_stderr_logger(level=logging.DEBUG):
    """
    Helper for quickly adding a StreamHandler to the logger. Useful for
    debugging.

    Returns the handler after adding it.
    """
    # This method needs to be in this __init__.py to get the __name__ correct
    # even if urllib3 is vendored within another package.
    logger = logging.getLogger(__name__)
    handler = logging.StreamHandler()
    handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
    logger.addHandler(handler)
    logger.setLevel(level)
    logger.debug('Added an stderr logging handler to logger: %s' % __name__)
    return handler

# ... Clean up. 
Example 24
Project: flasky   Author: RoseOu   File: __init__.py    MIT License 6 votes vote down vote up
def add_stderr_logger(level=logging.DEBUG):
    """
    Helper for quickly adding a StreamHandler to the logger. Useful for
    debugging.

    Returns the handler after adding it.
    """
    # This method needs to be in this __init__.py to get the __name__ correct
    # even if urllib3 is vendored within another package.
    logger = logging.getLogger(__name__)
    handler = logging.StreamHandler()
    handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
    logger.addHandler(handler)
    logger.setLevel(level)
    logger.debug('Added an stderr logging handler to logger: %s' % __name__)
    return handler

# ... Clean up. 
Example 25
Project: core   Author: lifemapper   File: log.py    GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, name, level=logging.DEBUG, addConsole=False, 
                      addFile=False, logFilename=None):
      # In case level was set to None
      if level is None:
         level = logging.DEBUG
      LmLogger.__init__(self, name, level)
      if addConsole:
         self._addConsoleHandler()
      if addFile:
         if logFilename is not None:
            fn = logFilename
         else:
            fn = os.path.join(COMPUTE_LOG_PATH, '%s.log' % (name))
         self._addFileHandler(fn)

# ............................................................................. 
Example 26
Project: badge-o-matic   Author: markuslindenberg   File: webapp.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _print(pdfdata):
    if app.config['DEBUG']:
        app.logger.info('printing to /tmp/out.pdf')
        open('/tmp/out.pdf', 'wb').write(pdfdata)
    else:
        lpr = subprocess.Popen(['lpr', '-P', PRINTER], stdin=subprocess.PIPE)
        lpr.communicate(pdfdata) 
Example 27
Project: fs_image   Author: facebookincubator   File: common.py    MIT License 5 votes vote down vote up
def init_logging(*, debug: bool=False):
    logging.basicConfig(
        format='%(levelname)s %(name)s %(asctime)s %(message)s',
        level=logging.DEBUG if debug else logging.INFO,
    ) 
Example 28
Project: leapp-repository   Author: oamg   File: ntp2chrony.py    Apache License 2.0 5 votes vote down vote up
def main():
    parser = argparse.ArgumentParser(description="Convert ntp configuration to chrony.")
    parser.add_argument("-r", "--root", dest="roots", default=["/"], nargs="+",
                        metavar="DIR", help="specify root directory (default /)")
    parser.add_argument("--ntp-conf", action="store", default="/etc/ntp.conf",
                        metavar="FILE", help="specify ntp config (default /etc/ntp.conf)")
    parser.add_argument("--step-tickers", action="store", default="",
                        metavar="FILE", help="specify ntpdate step-tickers config (no default)")
    parser.add_argument("--chrony-conf", action="store", default="/etc/chrony.conf",
                        metavar="FILE", help="specify chrony config (default /etc/chrony.conf)")
    parser.add_argument("--chrony-keys", action="store", default="/etc/chrony.keys",
                        metavar="FILE", help="specify chrony keyfile (default /etc/chrony.keys)")
    parser.add_argument("-b", "--backup", action="store_true", help="backup existing configs before writing")
    parser.add_argument("-L", "--ignored-lines", action="store_true", help="print ignored lines")
    parser.add_argument("-D", "--ignored-directives", action="store_true",
                        help="print names of ignored directives")
    parser.add_argument("-n", "--dry-run", action="store_true", help="don't make any changes")
    parser.add_argument("-v", "--verbose", action="count", default=0, help="increase verbosity")

    args = parser.parse_args()

    logging.basicConfig(format="%(message)s",
                        level=[logging.ERROR, logging.INFO, logging.DEBUG][min(args.verbose, 2)])

    for root in args.roots:
        conf = NtpConfiguration(root, args.ntp_conf, args.step_tickers)

        if args.ignored_lines:
            for line in conf.ignored_lines:
                print(line)

        if args.ignored_directives:
            for directive in conf.ignored_directives:
                print(directive)

        conf.write_chrony_configuration(args.chrony_conf, args.chrony_keys, args.dry_run, args.backup) 
Example 29
Project: tom-bot   Author: maartenberg   File: system_plugin.py    MIT License 5 votes vote down vote up
def logdebug_cb(bot, message=None, *args, **kwargs):
    ''' Temporarily set the loglevel to debug. '''
    if message:
        if not isadmin(bot, message):
            return 'Not authorized.'
    logging.getLogger().setLevel(logging.DEBUG)
    return 'Ok.' 
Example 30
Project: heroku-log-lights   Author: codingjoe   File: __main__.py    MIT License 5 votes vote down vote up
def get_args():
    """Setup argument parser and return parsed arguments."""
    parser = argparse.ArgumentParser(
        description=__doc__.strip()
    )
    parser.add_argument('-a', '--app', dest='app', metavar='HEROKU_APP', type=str,
                        help='Name of the target Heroku app.')
    parser.add_argument('-t', '--token', dest='token', metavar='AUTH_TOKEN', type=str,
                        default=None, help='Heroku AUTH token.')
    parser.add_argument('-v', dest='verbose', action='store_const',
                        const=logging.DEBUG, default=logging.WARNING,
                        help='verbose mode (default: off)')
    return parser.parse_args() 
Example 31
Project: pyblish-win   Author: pyblish   File: refactor.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def refactor_doctest(self, block, lineno, indent, filename):
        """Refactors one doctest.

        A doctest is given as a block of lines, the first of which starts
        with ">>>" (possibly indented), while the remaining lines start
        with "..." (identically indented).

        """
        try:
            tree = self.parse_block(block, lineno, indent)
        except Exception as err:
            if self.logger.isEnabledFor(logging.DEBUG):
                for line in block:
                    self.log_debug("Source: %s", line.rstrip(u"\n"))
            self.log_error("Can't parse docstring in %s line %s: %s: %s",
                           filename, lineno, err.__class__.__name__, err)
            return block
        if self.refactor_tree(tree, filename):
            new = unicode(tree).splitlines(True)
            # Undo the adjustment of the line numbers in wrap_toks() below.
            clipped, new = new[:lineno-1], new[lineno-1:]
            assert clipped == [u"\n"] * (lineno-1), clipped
            if not new[-1].endswith(u"\n"):
                new[-1] += u"\n"
            block = [indent + self.PS1 + new.pop(0)]
            if new:
                block += [indent + self.PS2 + line for line in new]
        return block 
Example 32
Project: pyblish-win   Author: pyblish   File: handlers.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(self, appname, dllname=None, logtype="Application"):
        logging.Handler.__init__(self)
        try:
            import win32evtlogutil, win32evtlog
            self.appname = appname
            self._welu = win32evtlogutil
            if not dllname:
                dllname = os.path.split(self._welu.__file__)
                dllname = os.path.split(dllname[0])
                dllname = os.path.join(dllname[0], r'win32service.pyd')
            self.dllname = dllname
            self.logtype = logtype
            self._welu.AddSourceToRegistry(appname, dllname, logtype)
            self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
            self.typemap = {
                logging.DEBUG   : win32evtlog.EVENTLOG_INFORMATION_TYPE,
                logging.INFO    : win32evtlog.EVENTLOG_INFORMATION_TYPE,
                logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
                logging.ERROR   : win32evtlog.EVENTLOG_ERROR_TYPE,
                logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
         }
        except ImportError:
            print("The Python Win32 extensions for NT (service, event "\
                        "logging) appear not to be available.")
            self._welu = None 
Example 33
Project: pyblish-win   Author: pyblish   File: handlers.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def getEventType(self, record):
        """
        Return the event type for the record.

        Override this if you want to specify your own types. This version does
        a mapping using the handler's typemap attribute, which is set up in
        __init__() to a dictionary which contains mappings for DEBUG, INFO,
        WARNING, ERROR and CRITICAL. If you are using your own levels you will
        either need to override this method or place a suitable dictionary in
        the handler's typemap attribute.
        """
        return self.typemap.get(record.levelno, self.deftype) 
Example 34
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def setUp(self):
        """Setup the default logging stream to an internal StringIO instance,
        so that we can examine log output as we want."""
        logger_dict = logging.getLogger().manager.loggerDict
        logging._acquireLock()
        try:
            self.saved_handlers = logging._handlers.copy()
            self.saved_handler_list = logging._handlerList[:]
            self.saved_loggers = logger_dict.copy()
            self.saved_level_names = logging._levelNames.copy()
        finally:
            logging._releaseLock()

        # Set two unused loggers: one non-ASCII and one Unicode.
        # This is to test correct operation when sorting existing
        # loggers in the configuration code. See issue 8201.
        logging.getLogger("\xab\xd7\xbb")
        logging.getLogger(u"\u013f\u00d6\u0047")

        self.root_logger = logging.getLogger("")
        self.original_logging_level = self.root_logger.getEffectiveLevel()

        self.stream = cStringIO.StringIO()
        self.root_logger.setLevel(logging.DEBUG)
        self.root_hdlr = logging.StreamHandler(self.stream)
        self.root_formatter = logging.Formatter(self.log_format)
        self.root_hdlr.setFormatter(self.root_formatter)
        self.root_logger.addHandler(self.root_hdlr) 
Example 35
Project: drydock   Author: airshipit   File: base.py    Apache License 2.0 5 votes vote down vote up
def debug(self, ctx, msg):
        self.log_error(ctx, logging.DEBUG, msg) 
Example 36
Project: logging-test-case   Author: chadrosenquist   File: capturelogs_test.py    MIT License 5 votes vote down vote up
def test_log_level_restored(self):
        """Verifies the log level is correct restored."""
        foo_logger = logging.getLogger('foo')
        foo_logger.setLevel(logging.DEBUG)
        self._logging_test_function()
        self.assertEqual(foo_logger.level, logging.DEBUG) 
Example 37
Project: logging-test-case   Author: chadrosenquist   File: capturelogs_test.py    MIT License 5 votes vote down vote up
def test_log_level_restored_after_exception(self):
        """Verifies the log level is correct restored, even after an exception."""
        foo_logger = logging.getLogger('foo')
        foo_logger.setLevel(logging.DEBUG)
        with self.assertRaises(ValueError):
            self._logging_test_function_exception()
        self.assertEqual(foo_logger.level, logging.DEBUG) 
Example 38
Project: Flask-Python-GAE-Login-Registration   Author: orymeyer   File: logging.py    Apache License 2.0 5 votes vote down vote up
def create_logger(app):
    """Creates a logger for the given application.  This logger works
    similar to a regular Python logger but changes the effective logging
    level based on the application's debug flag.  Furthermore this
    function also removes all attached handlers in case there was a
    logger with the log name before.
    """
    Logger = getLoggerClass()

    class DebugLogger(Logger):
        def getEffectiveLevel(x):
            if x.level == 0 and app.debug:
                return DEBUG
            return Logger.getEffectiveLevel(x)

    class DebugHandler(StreamHandler):
        def emit(x, record):
            StreamHandler.emit(x, record) if app.debug else None

    handler = DebugHandler()
    handler.setLevel(DEBUG)
    handler.setFormatter(Formatter(app.debug_log_format))
    logger = getLogger(app.logger_name)
    # just in case that was not a new logger, get rid of all the handlers
    # already attached to it.
    del logger.handlers[:]
    logger.__class__ = DebugLogger
    logger.addHandler(handler)
    return logger 
Example 39
Project: Flask-Python-GAE-Login-Registration   Author: orymeyer   File: logging.py    Apache License 2.0 5 votes vote down vote up
def create_logger(app):
    """Creates a logger for the given application.  This logger works
    similar to a regular Python logger but changes the effective logging
    level based on the application's debug flag.  Furthermore this
    function also removes all attached handlers in case there was a
    logger with the log name before.
    """
    Logger = getLoggerClass()

    class DebugLogger(Logger):
        def getEffectiveLevel(x):
            if x.level == 0 and app.debug:
                return DEBUG
            return Logger.getEffectiveLevel(x)

    class DebugHandler(StreamHandler):
        def emit(x, record):
            StreamHandler.emit(x, record) if app.debug else None

    handler = DebugHandler()
    handler.setLevel(DEBUG)
    handler.setFormatter(Formatter(app.debug_log_format))
    logger = getLogger(app.logger_name)
    # just in case that was not a new logger, get rid of all the handlers
    # already attached to it.
    del logger.handlers[:]
    logger.__class__ = DebugLogger
    logger.addHandler(handler)
    return logger 
Example 40
Project: dnsbin   Author: thomas-maurice   File: logger.py    Do What The F*ck You Want To Public License 5 votes vote down vote up
def setup_logger(name, to_stdout=True, file_name=None):
    """Creates the logging object used by the script

    By defaults it prints information ton stdout, but
    you can tell it to print out information ton a file too
    """
    logger = logging.getLogger(name)
    logger.setLevel(logging.DEBUG)
    formatter = logging.Formatter(
        '%(asctime)s %(levelname)s: %(message)s'
    )

    # reset handlers
    for handler in logger.handlers:
        # Don't close stdout or stderr !
        if handler.__class__ != logging.StreamHandler:
            handler.stream.close()
        logger.removeHandler(handler)

    if file_name:
        fhandle = logging.FileHandler(file_name)
        fhandle.setLevel(logging.DEBUG)
        fhandle.setFormatter(formatter)
        logger.addHandler(fhandle)

    if to_stdout:
        chandle = logging.StreamHandler()
        chandle.setLevel(logging.DEBUG)
        chandle.setFormatter(formatter)
        logger.addHandler(chandle)

    return logger 
Example 41
Project: wikilinks   Author: trovdimi   File: pickle_data.py    MIT License 5 votes vote down vote up
def pickle_sim():
    # setup logging
    LOGGING_FORMAT = '%(levelname)s:\t%(asctime)-15s %(message)s'
    LOGGING_PATH = 'tmp/semsim-pickle.log'
    logging.basicConfig(filename=LOGGING_PATH, level=logging.DEBUG, format=LOGGING_FORMAT, filemode='w')
    i = 0
    voc_zip_links = pickle.load( open( "/ssd/ddimitrov/pickle/voc_zip_links", "rb" ) )
    print "loaded voc_zip_links"
    uniqeu_nonzero_map = pickle.load( open( "/ssd/ddimitrov/pickle/uniqeu_nonzero_map", "rb" ) )
    vocab = pickle.load( open( "/ssd/ddimitrov/pickle/vocab", "rb" ) )
    print "loaded vocab"
    sem_sim = pickle.load( open( "/ssd/ddimitrov/pickle/sem_sim", "rb" ) )


    values_rel_faeture = list()
    i_indices = list()
    j_indices = list()
    i = 0
    for link in voc_zip_links:
        i += 1
        if i % 1000000 == 0:
            print  i
        i_indices.append(uniqeu_nonzero_map[vocab[link[0]]])
        j_indices.append(vocab[link[1]])
        from_id = int(link[0])
        to_id = int(link[1])
        if from_id<=to_id:
            try:
                values_rel_faeture.append(sem_sim[(from_id,to_id)])
            except KeyError as e:
                logging.error(e)
        else:
            try:
                values_rel_faeture.append(sem_sim[(to_id,from_id)])
            except KeyError as e:
                logging.error(e)
    rel_feature_hyp_data = [i_indices, j_indices, values_rel_faeture]
    pickle.dump(rel_feature_hyp_data, open("/ssd/ddimitrov/pickle/sem_sim_hyp", "wb"), protocol=pickle.HIGHEST_PROTOCOL) 
Example 42
Project: wikilinks   Author: trovdimi   File: pickle_data.py    MIT License 5 votes vote down vote up
def pickle_sem_sim_data():
    # setup logging
    LOGGING_FORMAT = '%(levelname)s:\t%(asctime)-15s %(message)s'
    LOGGING_PATH = 'tmp/semsim-pickle.log'
    logging.basicConfig(filename=LOGGING_PATH, level=logging.DEBUG, format=LOGGING_FORMAT, filemode='w')
    sem_sim = pickle.load( open( "/ssd/ddimitrov/pickle/sem_sim", "rb" ) )

    values_sem_sim=list()
    with open(os.path.join(os.path.dirname(__file__), "/home/ddimitrov/tmp/wikipedia_network.csv")) as f:
        next(f)
        for line in f:
            line = line.strip().split('\t')
            from_id = int(line[0])
            to_id = int(line[1])
            if from_id<=to_id:
                try:
                    value = sem_sim[(from_id,to_id)]
                    values_sem_sim.append(value)
                except KeyError as e:
                    logging.error(e)
            else:
                try:
                    value = sem_sim[(to_id,from_id)]
                    values_sem_sim.append(value)
                except KeyError as e:
                    logging.error(e)


    pickle.dump(values_sem_sim, open("/ssd/ddimitrov/pickle/values_sem_sim", "wb"), protocol=pickle.HIGHEST_PROTOCOL) 
Example 43
Project: wikilinks   Author: trovdimi   File: insertarticlefeatures.py    MIT License 5 votes vote down vote up
def update_article_features():

    connection = db._create_connection()
    cursor = connection.cursor()

    network = load_graph("output/wikipedianetwork.xml.gz")
    print 'graph loaded'
    articles = db_work_view.retrieve_all_articles()
    print 'articles loaded'

    # setup logging
    LOGGING_FORMAT = '%(levelname)s:\t%(asctime)-15s %(message)s'
    LOGGING_PATH = 'tmp/articlefeatures-dbinsert.log'
    logging.basicConfig(filename=LOGGING_PATH, level=logging.DEBUG, format=LOGGING_FORMAT, filemode='w')

    for article in articles:
        try:
            article_features = {}
            vertex = network.vertex(article['id'])
            article_features['id'] = article['id']
            article_features['hits_authority'] = network.vertex_properties["authority"][vertex]
            article_features['hits_hub'] = network.vertex_properties["hub"][vertex]
            #article_features['katz'] = network.vertex_properties["katz"][vertex]

            sql  = "UPDATE article_features " \
                   "SET hits_authority = %(hits_authority)s, hits_hub = %(hits_hub)s " \
                   "WHERE id = %(id)s;"

            cursor.execute(sql, article_features)

        except MySQLdb.Error as e:
            #logging.error('DB Insert Error  article id: "%s" ' % article['id'])
            print e
        except ValueError as v:
            logging.error('ValueError for article id: "%s"' % article['id'])
            print v
        connection.commit()
    connection.close() 
Example 44
Project: wikilinks   Author: trovdimi   File: tableclassinserter.py    MIT License 5 votes vote down vote up
def table_parser(self, file_name, root):

        db = MySQLDatabase(DATABASE_HOST, DATABASE_USER, DATABASE_PASSWORD, DATABASE_NAME)
        db_build_view = db.get_build_view()

        cursor = db_build_view._cursor

        # setup logging
        LOGGING_FORMAT = '%(levelname)s:\t%(asctime)-15s %(message)s'
        LOGGING_PATH = 'tmp/tableclasses-dbinsert.log'
        logging.basicConfig(filename=LOGGING_PATH, level=logging.DEBUG, format=LOGGING_FORMAT, filemode='w')

        html_parser = WikipediaHTMLTableParser()
        zip_file_path = os.path.join(root, file_name)
        html = self.zip2html(zip_file_path)
        html_parser.feed(html.decode('utf-8'))
        source_article_id = file_name.split('_')[1]
        try:
            fed_parser = WikipediaFedTextParser(html_parser.get_data())
            table_classes = fed_parser.table_classes(None)
            table_classes = list(set(table_classes))
            for table_class in table_classes:
               self.insert_table_class(source_article_id, table_class, cursor)
        except KeyError:
            db_build_view._db_connection.rollback()
            logging.error('KeyError FedTextParser source article id: %s ' % source_article_id)
        db_build_view.commit()
        db_build_view.reset_cache() 
Example 45
Project: rubbish.py   Author: alphapapa   File: rubbish.py    GNU General Public License v3.0 5 votes vote down vote up
def cli(verbose):

    # Setup logging
    if verbose >= 2:
        LOG_LEVEL = log.DEBUG
    elif verbose == 1:
        LOG_LEVEL = log.INFO
    else:
        LOG_LEVEL = log.WARNING

    log.basicConfig(level=LOG_LEVEL, format="%(levelname)s: %(message)s")

# * Commands

# ** empty 
Example 46
Project: AutoDL   Author: tanguofu   File: worker.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def run(self):
        is_debug = self.log.loglevel == logging.DEBUG
        protocol = (
            self.websocket_protocol if self.app.callable.websocket_enabled
            else self.http_protocol)
        self._server_settings = self.app.callable._helper(
            loop=self.loop,
            debug=is_debug,
            protocol=protocol,
            ssl=self.ssl_context,
            run_async=True)
        self._server_settings['signal'] = self.signal
        self._server_settings.pop('sock')
        trigger_events(self._server_settings.get('before_start', []),
                       self.loop)
        self._server_settings['before_start'] = ()

        self._runner = asyncio.ensure_future(self._run(), loop=self.loop)
        try:
            self.loop.run_until_complete(self._runner)
            self.app.callable.is_running = True
            trigger_events(self._server_settings.get('after_start', []),
                           self.loop)
            self.loop.run_until_complete(self._check_alive())
            trigger_events(self._server_settings.get('before_stop', []),
                           self.loop)
            self.loop.run_until_complete(self.close())
        except BaseException:
            traceback.print_exc()
        finally:
            try:
                trigger_events(self._server_settings.get('after_stop', []),
                               self.loop)
            except BaseException:
                traceback.print_exc()
            finally:
                self.loop.close()

        sys.exit(self.exit_code) 
Example 47
Project: AutoDL   Author: tanguofu   File: logger.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def getLogger(name, path):
    log_filename = path + "/" + name + "_" + datetime.datetime.now().strftime('%Y%m%d_%H%M%S') + '.log'
    handler = FileTraceHandker(filename=log_filename)
    handler.setLevel(logging.DEBUG)
    formatter = logging.Formatter('%(asctime)-15s %(name)s %(process)d %(levelname)s %(message)s')
    handler.setFormatter(formatter)  

    logger = logging.getLogger(name)
    logger.setLevel(logging.DEBUG)
    logger.addHandler(handler)

    return logger 
Example 48
Project: shaptools   Author: SUSE   File: shapcli.py    Apache License 2.0 5 votes vote down vote up
def parse_arguments():
    """
    Parse command line arguments
    """
    parser = argparse.ArgumentParser(PROG)

    parser.add_argument(
        '-v', '--verbosity',
        help='Python logging level. Options: DEBUG, INFO, WARN, ERROR (INFO by default)')
    parser.add_argument(
        '-r', '--remote',
        help='Run the command in other machine using ssh')
    parser.add_argument(
        '-c', '--config',
        help='JSON configuration file with SAP HANA instance data (sid, instance and password)')
    parser.add_argument(
        '-s', '--sid', help='SAP HANA sid')
    parser.add_argument(
        '-i', '--instance', help='SAP HANA instance')
    parser.add_argument(
        '-p', '--password', help='SAP HANA password')

    subcommands = parser.add_subparsers(
        title='subcommands', description='valid subcommands', help='additional help')
    hana_subparser = subcommands.add_parser(
        'hana', help='Commands to interact with SAP HANA databse')
    sr_subparser = subcommands.add_parser(
        'sr', help='Commands to interact with SAP HANA system replication')

    parse_hana_arguments(hana_subparser)
    parse_sr_arguments(sr_subparser)

    args = parser.parse_args()
    return parser, args 
Example 49
Project: shaptools   Author: SUSE   File: shapcli.py    Apache License 2.0 5 votes vote down vote up
def run():
    """
    Main execution
    """
    parser, args = parse_arguments()
    logger = setup_logger(args.verbosity or logging.DEBUG)

    # If -c or --config flag is received data is loaded from the configuration file
    if args.config:
        data = load_config_file(args.config, logger)
        config_data = ConfigData(data, logger)
    elif args.sid and args.instance and args.password:
        config_data = ConfigData(vars(args), logger)
    else:
        logger.info(
            'Configuration file or sid, instance and passwords parameters must be provided\n')
        parser.print_help()
        exit(1)

    if args.remote:
        config_data.remote = args.remote

    try:
        hana_instance = hana.HanaInstance(
            config_data.sid, config_data.instance,
            config_data.password, remote_host=config_data.remote)
        if vars(args).get('hana'):
            run_hana_subcommands(hana_instance, args, logger)
        elif vars(args).get('sr'):
            run_sr_subcommands(hana_instance, args, logger)
        else:
            parser.print_help()
    except Exception as err:
        logger.error(err)
        exit(1) 
Example 50
Project: iSDX   Author: sdn-ixp   File: replay.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, log_history, publisher, time_step=1, debug=False):
        self.logger = logging.getLogger("LogReplay")
        if debug:
            self.logger.setLevel(logging.DEBUG)

        self.log_history = log_history
        self.time_step = time_step
        self.publisher = publisher

        self.run = False 
Example 51
Project: auto-check-in   Author: zeekvfu   File: utility.py    GNU General Public License v3.0 5 votes vote down vote up
def get_logger(log_file, log_level=logging.DEBUG):
    _format = '%(asctime)s %(process)d %(thread)d %(levelname)s | %(message)s'
    formatter = logging.Formatter(_format)

    file_handler = logging.FileHandler(log_file)
    file_handler.setFormatter(formatter)

    logger = logging.getLogger(log_file)
    logger.addHandler(file_handler)
    if isinstance(log_level, str):
        log_level = eval(log_level)
    logger.setLevel(log_level)
    return logger 
Example 52
Project: utilities   Author: czbiohub   File: log_util.py    MIT License 5 votes vote down vote up
def get_logger(name, debug=False, dryrun=False):
    logger = logging.getLogger(name)
    logger.setLevel(logging.DEBUG)

    # create a logging format
    if dryrun:
        formatter = logging.Formatter(
            "%(asctime)s - %(name)s - %(levelname)s - (DRYRUN) - %(message)s"
        )
    else:
        formatter = logging.Formatter(
            "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
        )

    stream_handler = logging.StreamHandler()
    stream_handler.setLevel(logging.DEBUG if debug else logging.INFO)
    stream_handler.setFormatter(formatter)

    logger.addHandler(stream_handler)

    if os.environ.get("AWS_BATCH_JOB_ID"):
        log_file = os.path.abspath("{}.log".format(os.environ["AWS_BATCH_JOB_ID"]))
        file_handler = logging.FileHandler(log_file)
        file_handler.setLevel(logging.DEBUG)
        file_handler.setFormatter(formatter)

        # add the handlers to the logger
        logger.addHandler(file_handler)
    else:
        log_file = None
        file_handler = None

    return logger, log_file, file_handler 
Example 53
Project: github-cm   Author: silicon-mountain   File: step2_cleanup_users.py    Apache License 2.0 5 votes vote down vote up
def guess_location(user):

    country_code, city_name = country_city_by_location(user.get('location'))

    if country_code is None:
        bads.append(user)
        return None

    if city_name is not None:
        latitude, longitude = [(x.get('latitude'), x.get('longitude'))
                    for x in countries.get(country_code).get('patterns')
                    if city_name in x.get('patterns', []) + [x.get('name')]][0]
    else:
        latitude = countries.get(country_code, {}).get('latitude')
        longitude = countries.get(country_code, {}).get('longitude')

    try:
        country_name = countries.get(country_code).get('name')
    except AttributeError:
        return None

    del(user['country'])
    del(user['city'])
    user.update({'country_code': country_code,
                 'country_name': country_name,
                 'city_name': city_name,
                 'has_city': city_name is not None,
                 'latitude': latitude,
                 'longitude': longitude,
                 'id_int': int(user.get('id').split('-', 1)[1])})
    return user

# DEBUG = True 
Example 54
Project: rnm   Author: alexjaw   File: wireless.py    MIT License 5 votes vote down vote up
def test_me():
    logging.basicConfig(level=logging.DEBUG)
    logger = logging.getLogger(__name__)
    logger.info('------------- Starting test... -------------')

    wifi = WiFi()

    resp = wifi.get_ip()
    logger.info(repr(resp))

    resp = wifi.get_hotspots_info()

    logger.info('-------------    Finished      -------------') 
Example 55
Project: rnm   Author: alexjaw   File: interface.py    MIT License 5 votes vote down vote up
def __init__(self, iface, logger=None):
        self.iface = iface
        self.logger = logger or logging.getLogger(__name__ + '.Interface')
        # todo: Sometimes we want to run at debug level, how to set that for an
        #       individual class? I get a FileHandler from web_server.py
        #       Right now I do this by setting handler.setLevel(logging.DEBUG)
        #       in web_server.py
        self.logger.info('------------- Starting... -------------') 
Example 56
Project: rnm   Author: alexjaw   File: rnm.py    MIT License 5 votes vote down vote up
def __init__(self, logger=None):
        self.logger = logger or logging.getLogger(__name__ + '.RaspberryNetworkManager')
        # todo: Sometimes we want to run at debug level, how to set that for an
        #       individual class? I get a FileHandler from web_server.py
        #       Right now I do this by setting handler.setLevel(logging.DEBUG)
        #       in web_server.py
        self.logger.info('------------- Starting... -------------')
        self.eth = Interface(iface='eth0', logger=logger)
        self.wlan = Interface(iface='wlan0', logger=logger)
        self.wifi = WiFi() 
Example 57
Project: chess-chiller   Author: fsmosca   File: chess-chiller.py    GNU General Public License v3.0 5 votes vote down vote up
def initialize_logger(logger_level):
    """ Save logs to file, use RotatingFileHandler to avoid disk space overrun """
    logger = logging.getLogger()
    logger.setLevel(logger_level)
     
    # Creates console handler for info/warning/error/critical logs
    handler = logging.StreamHandler()
    handler.setLevel(logging.INFO)
    formatter = logging.Formatter("%(message)s")
    handler.setFormatter(formatter)
    logger.addHandler(handler)
 
    # Creates error/critical file handler
    handler = RotatingFileHandler("error.log", mode='w',
                                  maxBytes=5000000, backupCount=5)  
    handler.setLevel(logging.ERROR)
    formatter = logging.Formatter("%(asctime)s [%(threadName)-10.10s] [%(funcName)-12.12s] [%(levelname)-5.5s] > %(message)s")
    handler.setFormatter(formatter)
    logger.addHandler(handler)
 
    # Creates debug/info/warning/error/critical file handler
    handler = RotatingFileHandler("all.log", mode='w',
                                  maxBytes=5000000, backupCount=5)   
    handler.setLevel(logging.DEBUG)
    formatter = logging.Formatter("%(asctime)s [%(threadName)-10.10s] [%(funcName)-12.12s] [%(levelname)-5.5s] > %(message)s")
    handler.setFormatter(formatter)
    logger.addHandler(handler) 
Example 58
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: _cmd.py    MIT License 5 votes vote down vote up
def setup_logging():
    logger.setLevel(logging.DEBUG)
    handler = logging.StreamHandler()
    logger.addHandler(handler) 
Example 59
Project: spleeter   Author: deezer   File: logging.py    MIT License 5 votes vote down vote up
def enable_tensorflow_logging():
    """ Enable tensorflow logging. """
    environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
    tf_logger = get_tensorflow_logger()
    tf_logger.set_verbosity(tf_logger.INFO)
    logger = get_logger()
    logger.setLevel(logging.DEBUG) 
Example 60
Project: pnp   Author: HazardDede   File: pnp.py    MIT License 5 votes vote down vote up
def run(args):
    """Run pull 'n' push."""
    validated = _validate_args(args)
    pnp_cfg_path = validated['<configuration>']
    default_log_level = os.environ.get('LOG_LEVEL', 'DEBUG')
    _setup_logging(
        args['--log'], 'logging.yaml', os.path.join(os.path.dirname(pnp_cfg_path), 'logging.yaml'),
        default_level=default_log_level, verbose=validated['--verbose']
    )
    app = Application.from_file(pnp_cfg_path, engine_override=validated['--engine'])
    if not validated['--check']:
        app.start() 
Example 61
Project: flasky   Author: RoseOu   File: log.py    MIT License 5 votes vote down vote up
def class_logger(cls):
    logger = logging.getLogger(cls.__module__ + "." + cls.__name__)
    cls._should_log_debug = lambda self: logger.isEnabledFor(logging.DEBUG)
    cls._should_log_info = lambda self: logger.isEnabledFor(logging.INFO)
    cls.logger = logger
    _logged_classes.add(cls)
    return cls 
Example 62
Project: flasky   Author: RoseOu   File: log.py    MIT License 5 votes vote down vote up
def _should_log_debug(self):
        return self.logger.isEnabledFor(logging.DEBUG) 
Example 63
Project: flasky   Author: RoseOu   File: log.py    MIT License 5 votes vote down vote up
def debug(self, msg, *args, **kwargs):
        """Delegate a debug call to the underlying logger."""

        self.log(logging.DEBUG, msg, *args, **kwargs) 
Example 64
Project: flasky   Author: RoseOu   File: plugin_base.py    MIT License 5 votes vote down vote up
def _log(opt_str, value, parser):
    global logging
    if not logging:
        import logging
        logging.basicConfig()

    if opt_str.endswith('-info'):
        logging.getLogger(value).setLevel(logging.INFO)
    elif opt_str.endswith('-debug'):
        logging.getLogger(value).setLevel(logging.DEBUG) 
Example 65
Project: flasky   Author: RoseOu   File: log.py    MIT License 5 votes vote down vote up
def debug(self, msg, *args, **kw):
        self.log(self.DEBUG, msg, *args, **kw) 
Example 66
Project: flasky   Author: RoseOu   File: statsd.py    MIT License 5 votes vote down vote up
def debug(self, msg, *args, **kwargs):
        self.log(logging.DEBUG, msg, *args, **kwargs) 
Example 67
Project: flasky   Author: RoseOu   File: _gaiohttp.py    MIT License 5 votes vote down vote up
def factory(self, wsgi, addr):
        # are we in debug level
        is_debug = self.log.loglevel == logging.DEBUG

        proto = WSGIServerHttpProtocol(
            wsgi, readpayload=True,
            loop=self.loop,
            log=self.log,
            debug=is_debug,
            keep_alive=self.cfg.keepalive,
            access_log=self.log.access_log,
            access_log_format=self.cfg.access_log_format)
        return self.wrap_protocol(proto) 
Example 68
Project: core   Author: lifemapper   File: log.py    GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, scriptname, level=logging.DEBUG):
      LmComputeLogger.__init__(self, scriptname, level=level, addConsole=True, 
                               addFile=True) 
Example 69
Project: core   Author: lifemapper   File: log.py    GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, name, level=logging.DEBUG, addConsole=False, 
                             addFile=False):
        LmLogger.__init__(self, name, level)
        if addConsole:
            self._addConsoleHandler()
        if addFile:
            fn = os.path.join(LOG_PATH, '%s%s' % (name, LMFormat.LOG.ext))
            self._addFileHandler(fn)

# ............................................................................. 
Example 70
Project: alfred-yubikey-otp   Author: robertoriv   File: workflow.py    MIT License 4 votes vote down vote up
def logger(self):
        """Logger that logs to both console and a log file.

        If Alfred's debugger is open, log level will be ``DEBUG``,
        else it will be ``INFO``.

        Use :meth:`open_log` to open the log file in Console.

        :returns: an initialised :class:`~logging.Logger`

        """
        if self._logger:
            return self._logger

        # Initialise new logger and optionally handlers
        logger = logging.getLogger('')

        # Only add one set of handlers
        # Exclude from coverage, as pytest will have configured the
        # root logger already
        if not len(logger.handlers):  # pragma: no cover

            fmt = logging.Formatter(
                '%(asctime)s %(filename)s:%(lineno)s'
                ' %(levelname)-8s %(message)s',
                datefmt='%H:%M:%S')

            logfile = logging.handlers.RotatingFileHandler(
                self.logfile,
                maxBytes=1024 * 1024,
                backupCount=1)
            logfile.setFormatter(fmt)
            logger.addHandler(logfile)

            console = logging.StreamHandler()
            console.setFormatter(fmt)
            logger.addHandler(console)

        if self.debugging:
            logger.setLevel(logging.DEBUG)
        else:
            logger.setLevel(logging.INFO)

        self._logger = logger

        return self._logger 
Example 71
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 4 votes vote down vote up
def test_flat(self):
        #Logging levels in a flat logger namespace.
        m = self.next_message

        ERR = logging.getLogger("ERR")
        ERR.setLevel(logging.ERROR)
        INF = logging.getLogger("INF")
        INF.setLevel(logging.INFO)
        DEB = logging.getLogger("DEB")
        DEB.setLevel(logging.DEBUG)

        # These should log.
        ERR.log(logging.CRITICAL, m())
        ERR.error(m())

        INF.log(logging.CRITICAL, m())
        INF.error(m())
        INF.warn(m())
        INF.info(m())

        DEB.log(logging.CRITICAL, m())
        DEB.error(m())
        DEB.warn (m())
        DEB.info (m())
        DEB.debug(m())

        # These should not log.
        ERR.warn(m())
        ERR.info(m())
        ERR.debug(m())

        INF.debug(m())

        self.assert_log_lines([
            ('ERR', 'CRITICAL', '1'),
            ('ERR', 'ERROR', '2'),
            ('INF', 'CRITICAL', '3'),
            ('INF', 'ERROR', '4'),
            ('INF', 'WARNING', '5'),
            ('INF', 'INFO', '6'),
            ('DEB', 'CRITICAL', '7'),
            ('DEB', 'ERROR', '8'),
            ('DEB', 'WARNING', '9'),
            ('DEB', 'INFO', '10'),
            ('DEB', 'DEBUG', '11'),
        ]) 
Example 72
Project: wechat-alfred-workflow   Author: TKkk-iOSer   File: workflow.py    MIT License 4 votes vote down vote up
def logger(self):
        """Logger that logs to both console and a log file.

        If Alfred's debugger is open, log level will be ``DEBUG``,
        else it will be ``INFO``.

        Use :meth:`open_log` to open the log file in Console.

        :returns: an initialised :class:`~logging.Logger`

        """
        if self._logger:
            return self._logger

        # Initialise new logger and optionally handlers
        logger = logging.getLogger('')

        # Only add one set of handlers
        # Exclude from coverage, as pytest will have configured the
        # root logger already
        if not len(logger.handlers):  # pragma: no cover

            fmt = logging.Formatter(
                '%(asctime)s %(filename)s:%(lineno)s'
                ' %(levelname)-8s %(message)s',
                datefmt='%H:%M:%S')

            logfile = logging.handlers.RotatingFileHandler(
                self.logfile,
                maxBytes=1024 * 1024,
                backupCount=1)
            logfile.setFormatter(fmt)
            logger.addHandler(logfile)

            console = logging.StreamHandler()
            console.setFormatter(fmt)
            logger.addHandler(console)

        if self.debugging:
            logger.setLevel(logging.DEBUG)
        else:
            logger.setLevel(logging.INFO)

        self._logger = logger

        return self._logger 
Example 73
Project: prediction-constrained-topic-models   Author: dtak   File: pprint_logging.py    MIT License 4 votes vote down vote up
def config_pprint_logging(
        output_path='/tmp/',
        do_write_txtfile=True,
        do_write_stdout=True,
        txtfile='stdout.txt',
        ):
    global RootLog
    RootLog = logging.getLogger('pprint_logging')
    RootLog.handlers = []
    RootLog.setLevel(logging.DEBUG)

    formatter = logging.Formatter('%(message)s')
    # Config logger to save transcript of log messages to plain-text file
    if do_write_txtfile:
        fh = logging.FileHandler(os.path.join(output_path, txtfile))
        fh.setLevel(logging.DEBUG)
        fh.setFormatter(formatter)
        RootLog.addHandler(fh)
    # Config logger that can write to stdout
    if do_write_stdout:
        ch = logging.StreamHandler(sys.stdout)
        ch.setLevel(logging.DEBUG)
        ch.setFormatter(formatter)
        RootLog.addHandler(ch)

    # Config null logger, avoids error messages about no handler existing
    if not do_write_txtfile and not do_write_stdout:
        RootLog.addHandler(logging.NullHandler())

    '''
    # Prepare special logs if we are running on the Brown CS grid
    try:
        jobID = int(os.getenv('JOB_ID'))
    except TypeError:
        jobID = 0
    if jobID > 0:
        Log.info('SGE Grid Job ID: %d' % (jobID))

        if 'SGE_STDOUT_PATH' in os.environ:
            # Create symlinks to captured stdout, stdout in output directory
            os.symlink(os.getenv('SGE_STDOUT_PATH'),
                       os.path.join(taskoutpath, 'stdout'))
            os.symlink(os.getenv('SGE_STDERR_PATH'),
                       os.path.join(taskoutpath, 'stderr'))

            with open(os.path.join(taskoutpath, 'GridInfo.txt'), 'w') as f:
                f.write(str(jobID) + "\n")
                f.write(str(taskid) + "\n")
                f.write('stdout: ' + os.getenv('SGE_STDOUT_PATH') + "\n")
                f.write('stderr: ' + os.getenv('SGE_STDERR_PATH') + "\n")
    return jobID
    ''' 
Example 74
Project: wikilinks   Author: trovdimi   File: insertarticlefeatures.py    MIT License 4 votes vote down vote up
def insert_article_features():

    connection = db._create_connection()
    cursor = connection.cursor()

    network = load_graph("output/wikipedianetwork.xml.gz")
    print 'graph loaded'
    articles = db_work_view.retrieve_all_articles()
    print 'articles loaded'

    # setup logging
    LOGGING_FORMAT = '%(levelname)s:\t%(asctime)-15s %(message)s'
    LOGGING_PATH = 'tmp/articlefeatures-dbinsert.log'
    logging.basicConfig(filename=LOGGING_PATH, level=logging.DEBUG, format=LOGGING_FORMAT, filemode='w')

    for article in articles:
        try:
            article_features = {}
            vertex = network.vertex(article['id'])
            article_features['id'] = article['id']
            article_features['in_degree'] = vertex.in_degree()
            article_features['out_degree'] = vertex.out_degree()
            article_features['degree'] = vertex.in_degree() + vertex.out_degree()
            article_features['page_rank'] = network.vertex_properties["page_rank"][vertex]
            article_features['eigenvector_centr'] = network.vertex_properties["eigenvector_centr"][vertex]
            article_features['local_clust'] = network.vertex_properties["local_clust"][vertex]
            article_features['kcore'] = network.vertex_properties["kcore"][vertex]
            article_features['hits_authority'] = network.vertex_properties["authority"][vertex]
            article_features['hits_hub'] = network.vertex_properties["hub"][vertex]

            sql = "INSERT INTO article_features (id, in_degree," \
                             "out_degree, degree, page_rank, " \
                             "local_clustering, eigenvector_centr," \
                             " kcore, hits_authority, hits_hub) VALUES" \
                             "(%(id)s, %(in_degree)s, %(out_degree)s," \
                             "%(degree)s, %(page_rank)s,  %(eigenvector_centr)s, " \
                             "%(local_clust)s, %(kcore)s, %(hits_authority)s, %(hits_hub)s);"


            cursor.execute(sql, article_features)
            #logging.info('DB Insert Success for article id: "%s" ' % article['id'])
        except MySQLdb.Error as e:
            logging.error('DB Insert Error  article id: "%s" ' % article['id'])
        except ValueError:
            logging.error('ValueError for article id: "%s"' % article['id'])
        connection.commit()
    connection.close() 
Example 75
Project: wikilinks   Author: trovdimi   File: insertarticlefeatures.py    MIT License 4 votes vote down vote up
def update_link_features_sem_similarity():

    connection = db._create_connection()
    cursor = connection.cursor()

    # setup logging
    LOGGING_FORMAT = '%(levelname)s:\t%(asctime)-15s %(message)s'
    LOGGING_PATH = 'tmp/link_features_semsim-dbinsert.log'
    logging.basicConfig(filename=LOGGING_PATH, level=logging.DEBUG, format=LOGGING_FORMAT, filemode='w')
    for dirname, dirnames, filenames in os.walk("/home/psinger/WikiLinks/data/sem_sim"):
        for file_name in filenames:
            if file_name.endswith(".p"):
                print file_name
                sem_sim = cPickle.load( open( "/home/psinger/WikiLinks/data/sem_sim/"+file_name, "rb" ) )
                for link, sim in sem_sim.iteritems():
                    try:
                        link_features = {}
                        link_features['source_article_id'] = link[0]
                        link_features['target_article_id'] = link[1]
                        link_features['sim'] = sim

                        sql  = "UPDATE link_features " \
                               "SET  sem_similarity=%(sim)s " \
                               "WHERE source_article_id = %(source_article_id)s AND target_article_id = %(target_article_id)s;"

                        cursor.execute(sql, link_features)

                    except MySQLdb.Error as e:
                        logging.error(e)
                    connection.commit()
                    try:
                        link_features = {}
                        link_features['source_article_id'] = link[1]
                        link_features['target_article_id'] = link[0]
                        link_features['sim'] = sim

                        sql  = "UPDATE link_features " \
                               "SET  sem_similarity=%(sim)s " \
                               "WHERE source_article_id = %(source_article_id)s AND target_article_id = %(target_article_id)s;"

                        cursor.execute(sql, link_features)

                    except MySQLdb.Error as e:
                        logging.error(e)
                    connection.commit()
                connection.close() 
Example 76
Project: shaptools   Author: SUSE   File: shapcli_test.py    Apache License 2.0 4 votes vote down vote up
def test_parse_arguments(
        self, mock_parse_sr_arguments, mock_parse_hana_arguments, mock_argument_parser):

        mock_argument_parser_instance = mock.Mock()
        mock_argument_parser.return_value = mock_argument_parser_instance
        mock_argument_parser_instance.parse_args.return_value = 'args'

        mock_subcommands = mock.Mock()
        mock_argument_parser_instance.add_subparsers.return_value = mock_subcommands

        mock_hana = mock.Mock()
        mock_sr = mock.Mock()
        mock_subcommands.add_parser.side_effect = [mock_hana, mock_sr]

        my_parser, my_args = shapcli.parse_arguments()

        mock_argument_parser.assert_called_once_with(shapcli.PROG)

        mock_argument_parser_instance.add_argument.assert_has_calls([
            mock.call('-v', '--verbosity',
                help='Python logging level. Options: DEBUG, INFO, WARN, ERROR (INFO by default)'),
            mock.call('-r', '--remote',
                help='Run the command in other machine using ssh'),
            mock.call('-c', '--config',
                help='JSON configuration file with SAP HANA instance data (sid, instance and password)'),
            mock.call('-s', '--sid',
                help='SAP HANA sid'),
            mock.call('-i', '--instance',
                help='SAP HANA instance'),
            mock.call('-p', '--password',
                help='SAP HANA password')
        ])

        assert mock_argument_parser_instance.add_argument.call_count == 6

        mock_argument_parser_instance.add_subparsers.assert_called_once_with(
            title='subcommands', description='valid subcommands', help='additional help')

        mock_subcommands.add_parser.assert_has_calls([
            mock.call('hana', help='Commands to interact with SAP HANA databse'),
            mock.call('sr', help='Commands to interact with SAP HANA system replication')
        ])

        mock_parse_sr_arguments.assert_called_once_with(mock_sr)
        mock_parse_hana_arguments.assert_called_once_with(mock_hana)

        mock_argument_parser_instance.parse_args.assert_called_once_with()

        assert my_parser == mock_argument_parser_instance
        assert my_args == 'args' 
Example 77
Project: Graphlib   Author: HamletWantToCode   File: parse_config.py    MIT License 4 votes vote down vote up
def __init__(self, args, options='', timestamp=True):
        # parse default and custom cli options
        for opt in options:
            args.add_argument(*opt.flags, default=None, type=opt.type)
        args = args.parse_args()

        if args.device:
            os.environ["CUDA_VISIBLE_DEVICES"] = args.device
        if args.resume:
            self.resume = Path(args.resume)
            self.cfg_fname = self.resume.parent / 'config.json'
        else:
            msg_no_cfg = "Configuration file need to be specified. Add '-c config.json', for example."
            assert args.config is not None, msg_no_cfg
            self.resume = None
            self.cfg_fname = Path(args.config)

        # load config file and apply custom cli options
        config = read_json(self.cfg_fname)
        self._config = _update_config(config, options, args)

        # set save_dir where trained model and log will be saved.
        save_dir = Path(self.config['trainer']['save_dir'])
        timestamp = datetime.now().strftime(r'%m%d_%H%M%S') if timestamp else ''

        exper_name = self.config['name']
        self._save_dir = save_dir / 'models' / exper_name / timestamp
        self._log_dir = save_dir / 'log' / exper_name / timestamp

        self.save_dir.mkdir(parents=True, exist_ok=True)
        self.log_dir.mkdir(parents=True, exist_ok=True)

        # save updated config file to the checkpoint dir
        write_json(self.config, self.save_dir / 'config.json')

        # configure logging module
        setup_logging(self.log_dir)
        self.log_levels = {
            0: logging.WARNING,
            1: logging.INFO,
            2: logging.DEBUG
        } 
Example 78
Project: toggle.sg-download   Author: 0x776b7364   File: download_toggle_video2.py    MIT License 4 votes vote down vote up
def user_select_options(recordsList):
	"""
	Returns a list of user-selected names and URLs from 'recordsList'
	recordsList is a list of (title,url) tuples
	"""
	user_selected_records = []

	print("")
	for cnt in range(1,len(recordsList)+1):
		print("[%s]: %s" % (cnt,recordsList[cnt-1][0]))

	is_invalid_selection = True
	while (is_invalid_selection):
		user_selection_input_list = list(set(raw_input('\nEnter selection (delimit multiple selections with space, 0 to select all): ').split()))

		for selection in user_selection_input_list:
			try:
				if int(selection) > len(recordsList) or int(selection) < 0:
					raise ValueError
				if int(selection) == 0:
					user_selected_records = []
					user_selected_records = recordsList
				else:
					user_selected_records.append(recordsList[int(selection)-1])
				is_invalid_selection = False
			except ValueError:
				logger.error("Invalid value: %s" % (selection))
				continue

	if user_selected_records:
		logger.info("Selected URL(s):")
		for record in user_selected_records:
			logger.info(record[1])

		if (logger.isEnabledFor(logging.DEBUG)):
			text_file = open("s1.selected_records.txt", "a")
			for selection in user_selection_input_list:
				try:
					text_file.write("{}".format(recordsList[int(selection)-1]))
					text_file.write("{}".format("\n"))
				except (ValueError, IndexError):
					continue
			text_file.close()

	return user_selected_records 
Example 79
Project: flasky   Author: RoseOu   File: __main__.py    MIT License 4 votes vote down vote up
def parse_options():
    """
    Define and parse `optparse` options for command-line usage.
    """
    usage = """%prog [options] [INPUTFILE]
       (STDIN is assumed if no INPUTFILE is given)"""
    desc = "A Python implementation of John Gruber's Markdown. " \
           "http://packages.python.org/Markdown/"
    ver = "%%prog %s" % markdown.version
    
    parser = optparse.OptionParser(usage=usage, description=desc, version=ver)
    parser.add_option("-f", "--file", dest="filename", default=None,
                      help="Write output to OUTPUT_FILE. Defaults to STDOUT.",
                      metavar="OUTPUT_FILE")
    parser.add_option("-e", "--encoding", dest="encoding",
                      help="Encoding for input and output files.",)
    parser.add_option("-q", "--quiet", default = CRITICAL,
                      action="store_const", const=CRITICAL+10, dest="verbose",
                      help="Suppress all warnings.")
    parser.add_option("-v", "--verbose",
                      action="store_const", const=INFO, dest="verbose",
                      help="Print all warnings.")
    parser.add_option("-s", "--safe", dest="safe", default=False,
                      metavar="SAFE_MODE",
                      help="'replace', 'remove' or 'escape' HTML tags in input")
    parser.add_option("-o", "--output_format", dest="output_format", 
                      default='xhtml1', metavar="OUTPUT_FORMAT",
                      help="'xhtml1' (default), 'html4' or 'html5'.")
    parser.add_option("--noisy",
                      action="store_const", const=DEBUG, dest="verbose",
                      help="Print debug messages.")
    parser.add_option("-x", "--extension", action="append", dest="extensions",
                      help = "Load extension EXTENSION.", metavar="EXTENSION")
    parser.add_option("-n", "--no_lazy_ol", dest="lazy_ol", 
                      action='store_false', default=True,
                      help="Observe number of first item of ordered lists.")

    (options, args) = parser.parse_args()

    if len(args) == 0:
        input_file = None
    else:
        input_file = args[0]

    if not options.extensions:
        options.extensions = []

    return {'input': input_file,
            'output': options.filename,
            'safe_mode': options.safe,
            'extensions': options.extensions,
            'encoding': options.encoding,
            'output_format': options.output_format,
            'lazy_ol': options.lazy_ol}, options.verbose 
Example 80
Project: redrum   Author: Evidlo   File: redrum.py    MIT License 4 votes vote down vote up
def main():

    parser = argparse.ArgumentParser(description="Reddit wallpaper grabber.")
    parser.add_argument('-v', '--version', action='version', version=__version__, help="show version information")
    parser.add_argument('--refresh', action='store_true', default=False, help="force a cache refresh")
    parser.add_argument('--noset', action='store_true', default=False, help="don't select and set and set wallpaper")
    parser.add_argument('--config', action='store_true', default='~/.config/redrum.ini', help="use a different config path")
    parser.add_argument('--debug', action='store_true', default=False, help="enable debug messages")

    args = parser.parse_args()

    if args.debug:
        logger.setLevel(logging.DEBUG)
        logger.debug('Debugging enabled...')

    config = Config(args.config)

    # attempt to load scored images from cache
    if not os.path.exists(config.cache_file):
        print("No previous score cache found at {0}.  This may take a minute...".format(config.cache_file))
        date = datetime.strftime(datetime.now(), config.date_format)
        images = get_images(config)
        seen = []

    else:
        with open(config.cache_file, 'r') as cache:
            j = json.loads(cache.read())
            print("Found cache at {0}".format(config.cache_file))
            date = j['date']
            # if the cache is old or `options` has changed, update it
            cache_age = datetime.now() - datetime.strptime(date, config.date_format)
            if (cache_age > config.cache_expiry or j['options'] != config.options or args.refresh):
                print("Refreshing cache...")
                # reload image metadata
                images = get_images(config)
                date = datetime.now().strftime(config.date_format)

            # otherwise, fetch scored images from cache
            else:
                images = j['images']

        seen = j['seen']

    # select image and set as wallpaper
    if not args.noset:
        image = weighted_select(config, images, seen)
        set_wallpaper(config, image)
        seen.append(image['id'])

    save(config, images, date, seen)