Python logging.log() Examples

The following are 30 code examples of logging.log(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module logging , or try the search function .
Example #1
Source File: state.py    From TabPy with MIT License 6 votes vote down vote up
def _set_config_value(
        self,
        section_name,
        option_name,
        option_value,
        logger=logging.getLogger(__name__),
        _update_revision=True,
    ):
        if not self.config:
            raise ValueError("State configuration not yet loaded.")

        if not self.config.has_section(section_name):
            logger.log(logging.DEBUG, f"Adding config section {section_name}")
            self.config.add_section(section_name)

        self.config.set(section_name, option_name, option_value)
        # update revision number
        if _update_revision:
            self._increase_revision_number()
        self._write_state(logger=logger) 
Example #2
Source File: ServerConnection.py    From 3vilTwinAttacker with MIT License 6 votes vote down vote up
def handleHeader(self, key, value):
        logging.log(self.getLogLevel(), "Got server header: %s:%s" % (key, value))

        if (key.lower() == 'location'):
            value = self.replaceSecureLinks(value)

        if (key.lower() == 'content-type'):
            if (value.find('image') != -1):
                self.isImageRequest = True
                logging.debug("Response is image content, not scanning...")

        if (key.lower() == 'content-encoding'):
            if (value.find('gzip') != -1):
                logging.debug("Response is compressed...")
                self.isCompressed = True
        elif (key.lower() == 'content-length'):
            self.contentLength = value
        elif (key.lower() == 'set-cookie'):
            self.client.responseHeaders.addRawHeader(key, value)
        elif (key.lower()== 'strict-transport-security'):
        	logging.log(self.getLogLevel(), "LEO Erasing Strict Transport Security....")
        else:
            self.client.setHeader(key, value) 
Example #3
Source File: runtime.py    From dket with GNU General Public License v3.0 6 votes vote down vote up
def _step(self):
        step, _, loss, summary, targets, predictions, lengths = self._sess.run(self._fetches)

        logging.log(HDEBUG, 'computing donwstream metrics')
        metrics = dict((key, metric.reset().compute(targets, predictions, lengths))
                       for (key, metric) in self._metrics.items())

        save_step = self._ckpt_every == 0 or (step % self._ckpt_every == 0)
        ckpt = self._save_ckpt(step) if save_step else None
        self._summarize(step, loss, summary, metrics, ckpt=ckpt)
        if ckpt and self._eval:
            self._eval.start(ckpt)

        next_step = self._steps == 0 or step < self._steps
        logging.log(HDEBUG, 'next step: %s', str(next_step))
        return step, next_step 
Example #4
Source File: log_calls.py    From instaclone with Apache License 2.0 6 votes vote down vote up
def log_calls_with(severity):
  """Create a decorator to log calls and return values of any function, for debugging."""

  def decorator(fn):
    @functools.wraps(fn)
    def wrap(*params, **kwargs):
      call_str = "%s(%s)" % (
        fn.__name__, ", ".join([repr(p) for p in params] + ["%s=%s" % (k, repr(v)) for (k, v) in kwargs.items()]))
      # TODO: Extract line number from caller and use that in logging.
      log(severity, ">> %s", call_str)
      ret = fn(*params, **kwargs)
      # TODO: Add a way to make return short or omitted.
      log(severity, "<< %s: %s", call_str, repr(ret))
      return ret

    return wrap

  return decorator

# Convenience decorators for logging. 
Example #5
Source File: utils.py    From jd_analysis with GNU Lesser General Public License v3.0 6 votes vote down vote up
def kill_ports(ports):
    for port in ports:
        log('kill %s start' % port)
        popen = subprocess.Popen('lsof -i:%s' % port, shell = True, stdout = subprocess.PIPE)
        (data, err) = popen.communicate()
        log('data:\n%s  \nerr:\n%s' % (data, err))

        pattern = re.compile(r'\b\d+\b', re.S)
        pids = re.findall(pattern, data)

        log('pids:%s' % str(pids))

        for pid in pids:
            if pid != '' and pid != None:
                try:
                    log('pid:%s' % pid)
                    popen = subprocess.Popen('kill -9 %s' % pid, shell = True, stdout = subprocess.PIPE)
                    (data, err) = popen.communicate()
                    log('data:\n%s  \nerr:\n%s' % (data, err))
                except Exception, e:
                    log('kill_ports exception:%s' % e)

        log('kill %s finish' % port) 
Example #6
Source File: greatdancer.py    From Facedancer with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def send_on_endpoint(self, ep_num, data, blocking=True):
        """
        Sends a collection of USB data on a given endpoint.

        ep_num: The number of the IN endpoint on which data should be sent.
        data: The data to be sent.
        blocking: If true, this function will wait for the transfer to complete.
        """
        logging.log(LOGLEVEL_TRACE, f"EP{ep_num}/IN: <- {bytes(data)}")

        self._wait_until_ready_to_send(ep_num)
        self.api.send_on_endpoint(ep_num, bytes(data))

        # If we're blocking, wait until the transfer completes.
        if blocking:
            while not self._transfer_is_complete(ep_num, self.DEVICE_TO_HOST):
                pass

        self._clean_up_transfers_for_endpoint(ep_num, self.DEVICE_TO_HOST) 
Example #7
Source File: settings.py    From fanci with GNU General Public License v3.0 5 votes vote down vote up
def configure_logger():
    global LOGGER_CONFIGURED, log
    if not LOGGER_CONFIGURED:

        logging.Logger.manager.loggerDict.clear()
        logging.VERBOSE = 5
        logging.addLevelName(logging.VERBOSE, 'VERBOSE')
        logging.Logger.verbose = lambda inst, msg, *args, **kwargs: inst.log(logging.VERBOSE, msg, *args, **kwargs)
        logging.verbose = lambda msg, *args, **kwargs: logging.log(logging.VERBOSE, msg, *args, **kwargs)

        log = logging.getLogger('log')
        log.setLevel(LOG_LVL)
        log_formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')

        if LOG_TO_FILE:
            file_handler = logging.FileHandler(datetime.now().strftime(LOG_ROOT + 'learning_%Y_%m_%d_%H_%M_.log'))
            file_handler.setLevel(logging.INFO)
            file_handler.setFormatter(log_formatter)
            log.addHandler(file_handler)

        console_handler = logging.StreamHandler()
        console_handler.setFormatter(log_formatter)
        log.addHandler(console_handler)
        if PRINT_TO_LOG_CONVERT:
            builtins.print = log_print
        LOGGER_CONFIGURED = True 
Example #8
Source File: solver.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def forward_end(self, i, internals):
        if i % self.interval == 0 and logging.getLogger().isEnabledFor(self.level):
            for key in sorted(internals.keys()):
                arr = internals[key]
                logging.log(self.level, 'Iter:%d  param:%s\t\tstat(%s):%s',
                            i, key, self.stat.__name__, str(self.stat(arr.asnumpy()))) 
Example #9
Source File: _mptools.py    From mptools with MIT License 5 votes vote down vote up
def _logger(name, level, msg, exc_info=None):
    elapsed = time.monotonic() - start_time
    hours = int(elapsed // 60)
    seconds = elapsed - (hours * 60)
    logging.log(level, f'{hours:3}:{seconds:06.3f} {name:20} {msg}', exc_info=exc_info)


# -- Queue handling support 
Example #10
Source File: Store.py    From buttersink with GNU General Public License v3.0 5 votes vote down vote up
def _logDryRun(logger, level, format, *args):
    logger.log(level, "WOULD: " + format % args)
    return True 
Example #11
Source File: settings.py    From fanci with GNU General Public License v3.0 5 votes vote down vote up
def log_print(*values, sep: str = ' ', end: str = '', file=None, flush: bool = False):
    try:
        if len(values) > 0:
            if values[0].strip():
                log.info(values[0].strip())
    except:
        log.error('Print redirect failure') 
Example #12
Source File: dec.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def setup(self, X, num_centers, alpha, save_to='dec_model'):
        sep = X.shape[0]*9//10
        X_train = X[:sep]
        X_val = X[sep:]
        ae_model = AutoEncoderModel(self.xpu, [X.shape[1],500,500,2000,10], pt_dropout=0.2)
        if not os.path.exists(save_to+'_pt.arg'):
            ae_model.layerwise_pretrain(X_train, 256, 50000, 'sgd', l_rate=0.1, decay=0.0,
                                        lr_scheduler=mx.lr_scheduler.FactorScheduler(20000,0.1))
            ae_model.finetune(X_train, 256, 100000, 'sgd', l_rate=0.1, decay=0.0,
                              lr_scheduler=mx.lr_scheduler.FactorScheduler(20000,0.1))
            ae_model.save(save_to+'_pt.arg')
            logging.log(logging.INFO, "Autoencoder Training error: %f"%ae_model.eval(X_train))
            logging.log(logging.INFO, "Autoencoder Validation error: %f"%ae_model.eval(X_val))
        else:
            ae_model.load(save_to+'_pt.arg')
        self.ae_model = ae_model

        self.dec_op = DECModel.DECLoss(num_centers, alpha)
        label = mx.sym.Variable('label')
        self.feature = self.ae_model.encoder
        self.loss = self.dec_op(data=self.ae_model.encoder, label=label, name='dec')
        self.args.update({k:v for k,v in self.ae_model.args.items() if k in self.ae_model.encoder.list_arguments()})
        self.args['dec_mu'] = mx.nd.empty((num_centers, self.ae_model.dims[-1]), ctx=self.xpu)
        self.args_grad.update({k: mx.nd.empty(v.shape, ctx=self.xpu) for k,v in self.args.items()})
        self.args_mult.update({k: k.endswith('bias') and 2.0 or 1.0 for k in self.args})
        self.num_centers = num_centers 
Example #13
Source File: dec.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def mnist_exp(xpu):
    X, Y = data.get_mnist()
    if not os.path.isdir('data'):
        os.makedirs('data')
    dec_model = DECModel(xpu, X, 10, 1.0, 'data/mnist')
    acc = []
    for i in [10*(2**j) for j in range(9)]:
        acc.append(dec_model.cluster(X, Y, i))
        logging.log(logging.INFO, 'Clustering Acc: %f at update interval: %d'%(acc[-1], i))
    logging.info(str(acc))
    logging.info('Best Clustering ACC: %f at update_interval: %d'%(np.max(acc), 10*(2**np.argmax(acc)))) 
Example #14
Source File: Store.py    From buttersink with GNU General Public License v3.0 5 votes vote down vote up
def skipDryRun(logger, dryRun, level=logging.DEBUG):
    """ Return logging function.

    When logging function called, will return True if action should be skipped.
    Log will indicate if skipped because of dry run.
    """
    # This is an undocumented "feature" of logging module:
    # logging.log() requires a numeric level
    # logging.getLevelName() maps names to numbers
    if not isinstance(level, int):
        level = logging.getLevelName(level)
    return (
        functools.partial(_logDryRun, logger, level) if dryRun
        else functools.partial(logger.log, level)
    ) 
Example #15
Source File: Store.py    From buttersink with GNU General Public License v3.0 5 votes vote down vote up
def sendTo(self, dest, chunkSize):
        """ Send this difference to the dest Store. """
        vol = self.toVol
        paths = self.sink.getPaths(vol)

        if self.sink == dest:
            logger.info("Keep: %s", self)
            self.sink.keep(self)
        else:
            # Log, but don't skip yet, so we can log more detailed skipped actions later
            skipDryRun(logger, dest.dryrun, 'INFO')("Xfer: %s", self)

            receiveContext = dest.receive(self, paths)

            sendContext = self.sink.send(self)

            # try:
            #     receiveContext.metadata['btrfsVersion'] = self.btrfsVersion
            # except AttributeError:
            #     pass

            transfer(sendContext, receiveContext, chunkSize)

        if vol.hasInfo():
            infoContext = dest.receiveVolumeInfo(paths)

            if infoContext is None:
                # vol.writeInfo(sys.stdout)
                pass
            else:
                with infoContext as stream:
                    vol.writeInfo(stream) 
Example #16
Source File: solver.py    From training_results_v0.6 with Apache License 2.0 5 votes vote down vote up
def backward_end(self, i, weights, grads, metric=None):
        if i % self.interval == 0 and logging.getLogger().isEnabledFor(self.level):
            for key in sorted(grads.keys()):
                arr = grads[key]
                logging.log(self.level, 'Iter:%d  param:%s\t\tstat(%s):%s\t\tgrad_stat:%s',
                            i, key, self.stat.__name__,
                            str(self.stat(weights[key].asnumpy())), str(self.stat(arr.asnumpy())))
        if i % self.interval == 0 and metric is not None:
            logging.log(logging.INFO, 'Iter:%d metric:%f', i, metric.get()[1])
            metric.reset() 
Example #17
Source File: dec.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def setup(self, X, num_centers, alpha, save_to='dec_model'):
        sep = X.shape[0]*9//10
        X_train = X[:sep]
        X_val = X[sep:]
        ae_model = AutoEncoderModel(self.xpu, [X.shape[1],500,500,2000,10], pt_dropout=0.2)
        if not os.path.exists(save_to+'_pt.arg'):
            ae_model.layerwise_pretrain(X_train, 256, 50000, 'sgd', l_rate=0.1, decay=0.0,
                                        lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
            ae_model.finetune(X_train, 256, 100000, 'sgd', l_rate=0.1, decay=0.0,
                              lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
            ae_model.save(save_to+'_pt.arg')
            logging.log(logging.INFO, "Autoencoder Training error: %f"%ae_model.eval(X_train))
            logging.log(logging.INFO, "Autoencoder Validation error: %f"%ae_model.eval(X_val))
        else:
            ae_model.load(save_to+'_pt.arg')
        self.ae_model = ae_model

        self.dec_op = DECModel.DECLoss(num_centers, alpha)
        label = mx.sym.Variable('label')
        self.feature = self.ae_model.encoder
        self.loss = self.dec_op(data=self.ae_model.encoder, label=label, name='dec')
        self.args.update({k:v for k,v in self.ae_model.args.items() if k in self.ae_model.encoder.list_arguments()})
        self.args['dec_mu'] = mx.nd.empty((num_centers, self.ae_model.dims[-1]), ctx=self.xpu)
        self.args_grad.update({k: mx.nd.empty(v.shape, ctx=self.xpu) for k,v in self.args.items()})
        self.args_mult.update({k: k.endswith('bias') and 2.0 or 1.0 for k in self.args})
        self.num_centers = num_centers 
Example #18
Source File: common.py    From locality-sensitive-hashing with MIT License 5 votes vote down vote up
def log(cls, *args, **kwargs):
    return Log(*args, **kwargs) 
Example #19
Source File: debug.py    From stn-ocr with GNU General Public License v3.0 5 votes vote down vote up
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
        grad = in_grad[0].asnumpy()
        nan = np.isnan(grad)
        num_nan = nan[nan==True]
        logging.log(logging.DEBUG, "Backward: min: {}, mean: {}, max: {} nan: {}".format(grad.min(), grad.mean(), grad.max(), len(num_nan) / len(grad.flatten())))
        self.assign(in_grad[0], req[0], out_grad[0]) 
Example #20
Source File: debug.py    From stn-ocr with GNU General Public License v3.0 5 votes vote down vote up
def forward(self, is_train, req, in_data, out_data, aux):
        x = in_data[0].asnumpy()
        nan = np.isnan(x)
        num_nan = nan[nan == True]
        logging.log(logging.DEBUG, "Forward: max: {}, mean: {}, min: {}, nan: {}".format(x.max(), x.mean(), x.min(), len(num_nan) / len(x.flatten())))
        self.assign(out_data[0], req[0], in_data[0]) 
Example #21
Source File: utils.py    From QTS_Research with MIT License 5 votes vote down vote up
def __call__(self, fn):
        def newFn(origSelf, *args, **kwargs):
            if logging.getLogger().isEnabledFor(self.logLevel):
                argNames = [argName for argName in inspect.getfullargspec(fn)[0] if argName != 'self']
                logging.log(self.logLevel, 
                    "{} {} {} kw:{}".format(self.text, fn.__name__, 
                        [nameNarg for nameNarg in zip(argNames, args) if nameNarg[1] is not origSelf], kwargs))
            fn(origSelf, *args)
        return newFn 
Example #22
Source File: obj.py    From rekall with GNU General Public License v2.0 5 votes vote down vote up
def __init__(self, reason="None Object", *args, **kwargs):
        # Often None objects are instantiated on purpose so its not really that
        # important to see their reason.
        if kwargs.get("log"):
            logging.log(logging.WARN, reason)
        self.reason = utils.SmartUnicode(reason)
        self.strict = kwargs.get("strict")
        self.args = args
        if self.strict:
            self.bt = ''.join(traceback.format_stack()[:-2]) 
Example #23
Source File: obj.py    From rekall with GNU General Public License v2.0 5 votes vote down vote up
def __init__(self):
        self.data = {}
        self.filename = os.environ.get(self.ENVIRONMENT_VAR)
        if self.filename:
            # Ensure we update the object access log when we exit.
            atexit.register(self._DumpData) 
Example #24
Source File: logger.py    From SlowFast-Network-pytorch with MIT License 5 votes vote down vote up
def e(message):
        Logger.log(logging.ERROR, message) 
Example #25
Source File: logger.py    From SlowFast-Network-pytorch with MIT License 5 votes vote down vote up
def w(message):
        Logger.log(logging.WARNING, message) 
Example #26
Source File: logger.py    From SlowFast-Network-pytorch with MIT License 5 votes vote down vote up
def i(message):
        Logger.log(logging.INFO, message) 
Example #27
Source File: logger.py    From SlowFast-Network-pytorch with MIT License 5 votes vote down vote up
def d(message):
        Logger.log(logging.DEBUG, message) 
Example #28
Source File: logger.py    From SlowFast-Network-pytorch with MIT License 5 votes vote down vote up
def log(level, message):
        assert Logger.Initialized, 'Logger has not been initialized'
        logging.log(level, message) 
Example #29
Source File: tcprelay.py    From neverendshadowsocks with Apache License 2.0 5 votes vote down vote up
def handle_event(self, sock, fd, event):
        # handle events and dispatch to handlers
        if sock:
            logging.log(shell.VERBOSE_LEVEL, 'fd %d %s', fd,
                        eventloop.EVENT_NAMES.get(event, event))
        if sock == self._server_socket:
            if event & eventloop.POLL_ERR:
                # TODO
                raise Exception('server_socket error')
            try:
                logging.debug('accept')
                conn = self._server_socket.accept()
                TCPRelayHandler(self, self._fd_to_handlers,
                                self._eventloop, conn[0], self._config,
                                self._dns_resolver, self._is_local)
            except (OSError, IOError) as e:
                error_no = eventloop.errno_from_exception(e)
                if error_no in (errno.EAGAIN, errno.EINPROGRESS,
                                errno.EWOULDBLOCK):
                    return
                else:
                    shell.print_exception(e)
                    if self._config['verbose']:
                        traceback.print_exc()
        else:
            if sock:
                handler = self._fd_to_handlers.get(fd, None)
                if handler:
                    handler.handle_event(sock, event)
            else:
                logging.warn('poll removed fd') 
Example #30
Source File: tcprelay.py    From neverendshadowsocks with Apache License 2.0 5 votes vote down vote up
def _sweep_timeout(self):
        # tornado's timeout memory management is more flexible than we need
        # we just need a sorted last_activity queue and it's faster than heapq
        # in fact we can do O(1) insertion/remove so we invent our own
        if self._timeouts:
            logging.log(shell.VERBOSE_LEVEL, 'sweeping timeouts')
            now = time.time()
            length = len(self._timeouts)
            pos = self._timeout_offset
            while pos < length:
                handler = self._timeouts[pos]
                if handler:
                    if now - handler.last_activity < self._timeout:
                        break
                    else:
                        if handler.remote_address:
                            logging.warn('timed out: %s:%d' %
                                         handler.remote_address)
                        else:
                            logging.warn('timed out')
                        handler.destroy()
                        self._timeouts[pos] = None  # free memory
                        pos += 1
                else:
                    pos += 1
            if pos > TIMEOUTS_CLEAN_SIZE and pos > length >> 1:
                # clean up the timeout queue when it gets larger than half
                # of the queue
                self._timeouts = self._timeouts[pos:]
                for key in self._handler_to_timeouts:
                    self._handler_to_timeouts[key] -= pos
                pos = 0
            self._timeout_offset = pos