Python logging.logger() Examples

The following are 30 code examples for showing how to use logging.logger(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module logging , or try the search function .

Example 1
Project: panoptes   Author: yahoo   File: context.py    License: Apache License 2.0 6 votes vote down vote up
def _get_panoptes_logger(self):
        """
        Returns the logger to be used by the context

        The method attempts to guess the name of the calling module based on introspection of the stack

        Returns:
            logger(logger): A Python logger subsystem logger

        Raises:
            PanoptesContextError: This exception is raised is any errors happen trying to instantiate the logger
        """
        self.__logger.info(u'Attempting to get logger')
        try:
            module = get_calling_module_name()
            logger = self.__rootLogger.getChild(module)
            self.__logger.info(u'Got logger for module %s' % module)
            return logger
        except Exception as e:
            raise PanoptesContextError(u'Could not get logger: %s' % str(e)) 
Example 2
Project: MONAI   Author: Project-MONAI   File: stats_handler.py    License: Apache License 2.0 6 votes vote down vote up
def attach(self, engine: Engine):
        """
        Register a set of Ignite Event-Handlers to a specified Ignite engine.

        Args:
            engine: Ignite Engine, it can be a trainer, validator or evaluator.

        """
        if self._name is None:
            self.logger = engine.logger
        if not engine.has_event_handler(self.iteration_completed, Events.ITERATION_COMPLETED):
            engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)
        if not engine.has_event_handler(self.epoch_completed, Events.EPOCH_COMPLETED):
            engine.add_event_handler(Events.EPOCH_COMPLETED, self.epoch_completed)
        if not engine.has_event_handler(self.exception_raised, Events.EXCEPTION_RAISED):
            engine.add_event_handler(Events.EXCEPTION_RAISED, self.exception_raised) 
Example 3
Project: HpBandSter   Author: automl   File: master.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def wait_for_workers(self, min_n_workers=1):
		"""
		helper function to hold execution until some workers are active

		Parameters
		----------
		min_n_workers: int
			minimum number of workers present before the run starts		
		"""
	
		self.logger.debug('wait_for_workers trying to get the condition')
		with self.thread_cond:
			while (self.dispatcher.number_of_workers() < min_n_workers):
				self.logger.debug('HBMASTER: only %i worker(s) available, waiting for at least %i.'%(self.dispatcher.number_of_workers(), min_n_workers))
				self.thread_cond.wait(1)
				self.dispatcher.trigger_discover_worker()
				
		self.logger.debug('Enough workers to start this run!') 
Example 4
Project: HpBandSter   Author: automl   File: master.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def job_callback(self, job):
		"""
		method to be called when a job has finished

		this will do some book keeping and call the user defined
		new_result_callback if one was specified
		"""
		self.logger.debug('job_callback for %s started'%str(job.id))
		with self.thread_cond:
			self.logger.debug('job_callback for %s got condition'%str(job.id))
			self.num_running_jobs -= 1

			if not self.result_logger is None:
				self.result_logger(job)
			self.iterations[job.id[0]].register_result(job)
			self.config_generator.new_result(job)

			if self.num_running_jobs <= self.job_queue_sizes[0]:
				self.logger.debug("HBMASTER: Trying to run another job!")
				self.thread_cond.notify()

		self.logger.debug('job_callback for %s finished'%str(job.id)) 
Example 5
def __init__(self, logger=None):
		"""
		Parameters
		----------

		directory: string
			where the results are logged
		logger: hpbandster.utils.result_logger_v??
			the logger to store the data, defaults to v1
		overwrite: bool
			whether or not existing data will be overwritten
		logger: logging.logger
			for some debug output

		"""

		if logger is None:
			self.logger=logging.getLogger('hpbandster')
		else:
			self.logger=logger 
Example 6
def new_result(self, job, update_model=True):
		"""
		registers finished runs

		Every time a run has finished, this function should be called
		to register it with the result logger. If overwritten, make
		sure to call this method from the base class to ensure proper
		logging.


		Parameters
		----------
		job: instance of hpbandster.distributed.dispatcher.Job
			contains all necessary information about the job
		update_model: boolean
			determines whether a model inside the config_generator should be updated
		"""
		if not job.exception is None:
			self.logger.warning("job {} failed with exception\n{}".format(job.id, job.exception)) 
Example 7
Project: dask-ml   Author: dask   File: utils.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _timer(name, _logger=None, level="info"):
    """
    Output execution time of a function to the given logger level

    Parameters
    ----------
    name : str
        How to name the timer (will be in the logs)
    logger : logging.logger
        The optional logger where to write
    level : str
        On which level to log the performance measurement
    """
    start = tic()
    _logger = _logger or logger
    _logger.info("Starting %s", name)
    yield
    stop = tic()
    delta = datetime.timedelta(seconds=stop - start)
    _logger_level = getattr(_logger, level)
    _logger_level("Finished %s in %s", name, delta)  # nicer formatting for time. 
Example 8
Project: dask-ml   Author: dask   File: utils.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _timed(_logger=None, level="info"):
    """
    Output execution time of a function to the given logger level

    level : str
        On which level to log the performance measurement
    Returns
    -------
    fun_wrapper : Callable
    """

    def fun_wrapper(f):
        @functools.wraps(f)
        def wraps(*args, **kwargs):
            with _timer(f.__name__, _logger=logger, level=level):
                results = f(*args, **kwargs)
            return results

        return wraps

    return fun_wrapper 
Example 9
Project: auptimizer   Author: LGE-ARC-AdvancedAI   File: master.py    License: GNU General Public License v3.0 6 votes vote down vote up
def wait_for_workers(self, min_n_workers=1):
		"""
		helper function to hold execution until some workers are active

		Parameters
		----------
		min_n_workers: int
			minimum number of workers present before the run starts		
		"""
	
		self.logger.debug('wait_for_workers trying to get the condition')
		with self.thread_cond:
			while (self.dispatcher.number_of_workers() < min_n_workers):
				self.logger.debug('HBMASTER: only %i worker(s) available, waiting for at least %i.'%(self.dispatcher.number_of_workers(), min_n_workers))
				self.thread_cond.wait(1)
				self.dispatcher.trigger_discover_worker()
				
		self.logger.debug('Enough workers to start this run!') 
Example 10
Project: auptimizer   Author: LGE-ARC-AdvancedAI   File: master.py    License: GNU General Public License v3.0 6 votes vote down vote up
def job_callback(self, job):
		"""
		method to be called when a job has finished

		this will do some book keeping and call the user defined
		new_result_callback if one was specified
		"""
		self.logger.debug('job_callback for %s started'%str(job.id))
		with self.thread_cond:
			self.logger.debug('job_callback for %s got condition'%str(job.id))
			self.num_running_jobs -= 1

			if not self.result_logger is None:
				self.result_logger(job)
			self.iterations[job.id[0]].register_result(job)
			self.config_generator.new_result(job)

			if self.num_running_jobs <= self.job_queue_sizes[0]:
				self.logger.debug("HBMASTER: Trying to run another job!")
				self.thread_cond.notify()

		self.logger.debug('job_callback for %s finished'%str(job.id)) 
Example 11
Project: auptimizer   Author: LGE-ARC-AdvancedAI   File: base_config_generator.py    License: GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, logger=None):
		"""
		Parameters
		----------

		directory: string
			where the results are logged
		logger: hpbandster.utils.result_logger_v??
			the logger to store the data, defaults to v1
		overwrite: bool
			whether or not existing data will be overwritten
		logger: logging.logger
			for some debug output

		"""

		if logger is None:
			self.logger=logging.getLogger('hpbandster')
		else:
			self.logger=logger 
Example 12
Project: auptimizer   Author: LGE-ARC-AdvancedAI   File: base_config_generator.py    License: GNU General Public License v3.0 6 votes vote down vote up
def new_result(self, job, update_model=True):
		"""
		registers finished runs

		Every time a run has finished, this function should be called
		to register it with the result logger. If overwritten, make
		sure to call this method from the base class to ensure proper
		logging.


		Parameters
		----------
		job: instance of hpbandster.distributed.dispatcher.Job
			contains all necessary information about the job
		update_model: boolean
			determines whether a model inside the config_generator should be updated
		"""
		if not job.exception is None:
			self.logger.warning("job {} failed with exception\n{}".format(job.id, job.exception)) 
Example 13
Project: wavelet_prosody_toolkit   Author: asuni   File: wavelet_gui.py    License: MIT License 6 votes vote down vote up
def exception_log(logger, head_msg, ex, level=logging.ERROR):
    """Helper to dump exception in the logger

    Parameters
    ----------
    logger: logging.logger
        the logger
    head_msg: string
        a human friendly message to prefix the exception stacktrace
    ex: Exception
        the exception
    level: type
        The wanted level (ERROR by default)

    """
    logger.log(level, "%s:" % head_msg)
    logger.log(level, "<br />".join(traceback.format_exception(etype=type(ex), value=ex, tb=ex.__traceback__)))


###############################################################################
# Callbacks
############################################################################### 
Example 14
Project: olympe   Author: Parrot-Developers   File: __init__.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _configure_ulog_bridge():
    # Get the ulog entries of this process into a Python logging.logger
    # This "ulog" logger has no log handler by default and should only
    # be activated for debugging
    ulog_logger = logging.getLogger("ulog")
    ulog.enable_bridge(ulog_logger, forward=False) 
Example 15
Project: panoptes   Author: yahoo   File: context.py    License: Apache License 2.0 5 votes vote down vote up
def logger(self):
        """
        A module-aware logger which will try and guess the right name for the calling module

        Returns:
            logging.logger

        """
        return self.__logger 
Example 16
Project: MONAI   Author: Project-MONAI   File: classification_saver.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(
        self,
        output_dir: str = "./",
        filename: str = "predictions.csv",
        overwrite: bool = True,
        batch_transform: Callable = lambda x: x,
        output_transform: Callable = lambda x: x,
        name: Optional[str] = None,
    ):
        """
        Args:
            output_dir: output CSV file directory.
            filename: name of the saved CSV file name.
            overwrite: whether to overwriting existing CSV file content. If we are not overwriting,
                then we check if the results have been previously saved, and load them to the prediction_dict.
            batch_transform: a callable that is used to transform the
                ignite.engine.batch into expected format to extract the meta_data dictionary.
            output_transform: a callable that is used to transform the
                ignite.engine.output into the form expected model prediction data.
                The first dimension of this transform's output will be treated as the
                batch dimension. Each item in the batch will be saved individually.
            name: identifier of logging.logger to use, defaulting to `engine.logger`.

        """
        self.saver = CSVSaver(output_dir, filename, overwrite)
        self.batch_transform = batch_transform
        self.output_transform = output_transform

        self.logger = None if name is None else logging.getLogger(name)
        self._name = name 
Example 17
Project: MONAI   Author: Project-MONAI   File: classification_saver.py    License: Apache License 2.0 5 votes vote down vote up
def attach(self, engine: Engine):
        if self._name is None:
            self.logger = engine.logger
        if not engine.has_event_handler(self, Events.ITERATION_COMPLETED):
            engine.add_event_handler(Events.ITERATION_COMPLETED, self)
        if not engine.has_event_handler(self.saver.finalize, Events.COMPLETED):
            engine.add_event_handler(Events.COMPLETED, lambda engine: self.saver.finalize()) 
Example 18
Project: MONAI   Author: Project-MONAI   File: stats_handler.py    License: Apache License 2.0 5 votes vote down vote up
def exception_raised(self, engine: Engine, e):
        """
        Handler for train or validation/evaluation exception raised Event.
        Print the exception information and traceback.

        Args:
            engine: Ignite Engine, it can be a trainer, validator or evaluator.
            e (Exception): the exception caught in Ignite during engine.run().

        """
        self.logger.exception(f"Exception: {e}")
        # import traceback
        # traceback.print_exc() 
Example 19
Project: MONAI   Author: Project-MONAI   File: segmentation_saver.py    License: Apache License 2.0 5 votes vote down vote up
def attach(self, engine: Engine):
        if self._name is None:
            self.logger = engine.logger
        if not engine.has_event_handler(self, Events.ITERATION_COMPLETED):
            engine.add_event_handler(Events.ITERATION_COMPLETED, self) 
Example 20
Project: MONAI   Author: Project-MONAI   File: segmentation_saver.py    License: Apache License 2.0 5 votes vote down vote up
def __call__(self, engine):
        """
        This method assumes self.batch_transform will extract metadata from the input batch.
        Output file datatype is determined from ``engine.state.output.dtype``.

        """
        meta_data = self.batch_transform(engine.state.batch)
        engine_output = self.output_transform(engine.state.output)
        self.saver.save_batch(engine_output, meta_data)
        self.logger.info("saved all the model outputs into files.") 
Example 21
Project: MONAI   Author: Project-MONAI   File: checkpoint_loader.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self, load_path: str, load_dict, name: Optional[str] = None):
        assert load_path is not None, "must provide clear path to load checkpoint."
        self.load_path = load_path
        assert load_dict is not None and len(load_dict) > 0, "must provide target objects to load."
        self.logger = logging.getLogger(name)
        for k, v in load_dict.items():
            if hasattr(v, "module"):
                load_dict[k] = v.module
        self.load_dict = load_dict

        self._name = name 
Example 22
Project: MONAI   Author: Project-MONAI   File: checkpoint_loader.py    License: Apache License 2.0 5 votes vote down vote up
def attach(self, engine: Engine):
        if self._name is None:
            self.logger = engine.logger
        return engine.add_event_handler(Events.STARTED, self) 
Example 23
Project: MONAI   Author: Project-MONAI   File: checkpoint_saver.py    License: Apache License 2.0 5 votes vote down vote up
def attach(self, engine: Engine):
        if self._name is None:
            self.logger = engine.logger
        if self._final_checkpoint is not None:
            engine.add_event_handler(Events.COMPLETED, self.completed)
            engine.add_event_handler(Events.EXCEPTION_RAISED, self.exception_raised)
        if self._key_metric_checkpoint is not None:
            engine.add_event_handler(Events.EPOCH_COMPLETED, self.metrics_completed)
        if self._interval_checkpoint is not None:
            if self.epoch_level:
                engine.add_event_handler(Events.EPOCH_COMPLETED(every=self.save_interval), self.interval_completed)
            else:
                engine.add_event_handler(Events.ITERATION_COMPLETED(every=self.save_interval), self.interval_completed) 
Example 24
Project: MONAI   Author: Project-MONAI   File: checkpoint_saver.py    License: Apache License 2.0 5 votes vote down vote up
def completed(self, engine) -> None:
        """Callback for train or validation/evaluation completed Event.
        Save final checkpoint if configure save_final is True.

        """
        assert callable(self._final_checkpoint), "Error: _final_checkpoint function not specified."
        self._final_checkpoint(engine, self.save_dict)
        assert self.logger is not None
        assert hasattr(self.logger, "info"), "Error, provided logger has not info attribute."
        self.logger.info(f"Train completed, saved final checkpoint: {self._final_checkpoint.last_checkpoint}") 
Example 25
Project: MONAI   Author: Project-MONAI   File: checkpoint_saver.py    License: Apache License 2.0 5 votes vote down vote up
def exception_raised(self, engine, e) -> None:
        """Callback for train or validation/evaluation exception raised Event.
        Save current data as final checkpoint if configure save_final is True.

        """
        assert callable(self._final_checkpoint), "Error: _final_checkpoint function not specified."
        self._final_checkpoint(engine, self.save_dict)
        assert self.logger is not None
        assert hasattr(self.logger, "info"), "Error, provided logger has not info attribute."
        self.logger.info(f"Exception_raised, saved exception checkpoint: {self._final_checkpoint.last_checkpoint}") 
Example 26
Project: MONAI   Author: Project-MONAI   File: checkpoint_saver.py    License: Apache License 2.0 5 votes vote down vote up
def interval_completed(self, engine) -> None:
        """Callback for train epoch/iteration completed Event.
        Save checkpoint if configure save_interval = N

        """
        assert callable(self._interval_checkpoint), "Error: _interval_checkpoint function not specified."
        self._interval_checkpoint(engine, self.save_dict)
        assert self.logger is not None
        assert hasattr(self.logger, "info"), "Error, provided logger has not info attribute."
        if self.epoch_level:
            self.logger.info(f"Saved checkpoint at epoch: {engine.state.epoch}")
        else:
            self.logger.info(f"Saved checkpoint at iteration: {engine.state.iteration}") 
Example 27
Project: MONAI   Author: Project-MONAI   File: lr_schedule_handler.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(
        self,
        lr_scheduler,
        print_lr: bool = True,
        name: Optional[str] = None,
        epoch_level: bool = True,
        step_transform: Callable = lambda engine: (),
    ):
        """
        Args:
            lr_scheduler (torch.optim.lr_scheduler): typically, lr_scheduler should be PyTorch
                lr_scheduler object. If customized version, must have `step` and `get_last_lr` methods.
            print_lr: whether to print out the latest learning rate with logging.
            name: identifier of logging.logger to use, if None, defaulting to ``engine.logger``.
            epoch_level: execute lr_scheduler.step() after every epoch or every iteration.
                `True` is epoch level, `False` is iteration level.
            step_transform: a callable that is used to transform the information from `engine`
                to expected input data of lr_scheduler.step() function if necessary.

        Raises:
            ValueError: argument `step_transform` must be a callable.

        """
        self.lr_scheduler = lr_scheduler
        self.print_lr = print_lr
        self.logger = logging.getLogger(name)
        self.epoch_level = epoch_level
        if not callable(step_transform):
            raise ValueError("argument `step_transform` must be a callable.")
        self.step_transform = step_transform

        self._name = name 
Example 28
Project: MONAI   Author: Project-MONAI   File: lr_schedule_handler.py    License: Apache License 2.0 5 votes vote down vote up
def __call__(self, engine):
        args = ensure_tuple(self.step_transform(engine))
        self.lr_scheduler.step(*args)
        if self.print_lr:
            self.logger.info(f"Current learning rate: {self.lr_scheduler._last_lr[0]}") 
Example 29
Project: HpBandSter   Author: automl   File: master.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def shutdown(self, shutdown_workers=False):
		self.logger.debug('HBMASTER: shutdown initiated, shutdown_workers = %s'%(str(shutdown_workers)))
		self.dispatcher.shutdown(shutdown_workers)
		self.dispatcher_thread.join() 
Example 30
Project: HpBandSter   Author: automl   File: master.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def adjust_queue_size(self, number_of_workers=None):

		self.logger.debug('HBMASTER: number of workers changed to %s'%str(number_of_workers))
		with self.thread_cond:
			self.logger.debug('adjust_queue_size: lock accquired')
			if self.dynamic_queue_size:
				nw = self.dispatcher.number_of_workers() if number_of_workers is None else number_of_workers
				self.job_queue_sizes = (self.user_job_queue_sizes[0] + nw, self.user_job_queue_sizes[1] + nw)
				self.logger.info('HBMASTER: adjusted queue size to %s'%str(self.job_queue_sizes))
			self.thread_cond.notify_all()