Python logging.getLogger() Examples

The following are code examples for showing how to use logging.getLogger(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: incubator-spot   Author: apache   File: utilities.py    Apache License 2.0 14 votes vote down vote up
def get_logger(cls,logger_name,create_file=False):

        # create logger for prd_ci
        log = logging.getLogger(logger_name)
        log.setLevel(level=logging.INFO)

        # create formatter and add it to the handlers
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

        if create_file:
                # create file handler for logger.
                fh = logging.FileHandler('SPOT.log')
                fh.setLevel(level=logging.DEBUG)
                fh.setFormatter(formatter)
        # reate console handler for logger.
        ch = logging.StreamHandler()
        ch.setLevel(level=logging.DEBUG)
        ch.setFormatter(formatter)

        # add handlers to logger.
        if create_file:
            log.addHandler(fh)

        log.addHandler(ch)
        return  log 
Example 2
Project: mlbv   Author: kmac   File: util.py    GNU General Public License v3.0 11 votes vote down vote up
def init_logging(log_file=None, append=False, console_loglevel=logging.INFO):
    """Set up logging to file and console."""
    if log_file is not None:
        if append:
            filemode_val = 'a'
        else:
            filemode_val = 'w'
        logging.basicConfig(level=logging.DEBUG,
                            format="%(asctime)s %(levelname)s %(threadName)s %(name)s %(message)s",
                            # datefmt='%m-%d %H:%M',
                            filename=log_file,
                            filemode=filemode_val)
    # define a Handler which writes INFO messages or higher to the sys.stderr
    console = logging.StreamHandler()
    console.setLevel(console_loglevel)
    # set a format which is simpler for console use
    formatter = logging.Formatter("%(message)s")
    console.setFormatter(formatter)
    # add the handler to the root logger
    logging.getLogger('').addHandler(console)
    global LOG
    LOG = logging.getLogger(__name__) 
Example 3
Project: Hasami   Author: Lokraan   File: main.py    MIT License 7 votes vote down vote up
def setup_logging(config: dict) -> None:
	logging.getLogger("discord.http").setLevel(logging.WARNING)
	logging.getLogger("discord").setLevel(logging.INFO)

	logger = logging.getLogger()

	level = logging.DEBUG if config["debug"] else logging.INFO

	f_handler = logging.FileHandler(filename="hasami.log", encoding="utf-8", mode="w")
	cl_handler = logging.StreamHandler()

	dt_fmt = "%Y-%m-%d %H:%M:%S"
	out_fmt = "[{asctime}] [{levelname:<6}] {name}: {message}"
	logger_fmt = logging.Formatter(out_fmt, dt_fmt, style="{")

	cl_handler.setFormatter(logger_fmt)
	f_handler.setFormatter(logger_fmt)

	logger.addHandler(cl_handler)
	logger.addHandler(f_handler)
	logger.setLevel(level) 
Example 4
Project: incubator-spot   Author: apache   File: processing.py    Apache License 2.0 6 votes vote down vote up
def convert(netflow, tmpdir, opts='', prefix=None):
    '''
        Convert `nfcapd` file to a comma-separated output format.

    :param netflow : Path of binary file.
    :param tmpdir  : Path of local staging area.
    :param opts    : A set of options for `nfdump` command.
    :param prefix  : If `prefix` is specified, the file name will begin with that;
                     otherwise, a default `prefix` is used.
    :returns       : Path of CSV-converted file.
    :rtype         : ``str``
    :raises OSError: If an error occurs while executing the `nfdump` command.
    '''
    logger = logging.getLogger('SPOT.INGEST.FLOW.PROCESS')

    with tempfile.NamedTemporaryFile(prefix=prefix, dir=tmpdir, delete=False) as fp:
        command = COMMAND.format(netflow, opts, fp.name)

        logger.debug('Execute command: {0}'.format(command))
        Util.popen(command, raises=True)

        return fp.name 
Example 5
Project: incubator-spot   Author: apache   File: processing.py    Apache License 2.0 6 votes vote down vote up
def convert(logfile, tmpdir, opts='', prefix=None):
    '''
        Copy log file to the local staging area.

    :param logfile: Path of log file.
    :param tmpdir : Path of local staging area.
    :param opts   : A set of options for the `cp` command.
    :param prefix : If `prefix` is specified, the file name will begin with that;
                     otherwise, a default `prefix` is used.
    :returns      : Path of log file in local staging area.
    :rtype        : ``str``
    '''
    logger = logging.getLogger('SPOT.INGEST.PROXY.PROCESS')

    with tempfile.NamedTemporaryFile(prefix=prefix, dir=tmpdir, delete=False) as fp:
        command = COMMAND.format(opts, logfile, fp.name)

        logger.debug('Execute command: {0}'.format(command))
        Util.popen(command, raises=True)

        return fp.name 
Example 6
Project: incubator-spot   Author: apache   File: processing.py    Apache License 2.0 6 votes vote down vote up
def convert(pcap, tmpdir, opts='', prefix=None):
    '''
        Convert `pcap` file to a comma-separated output format.

    :param pcap    : Path of binary file.
    :param tmpdir  : Path of local staging area.
    :param opts    : A set of options for `tshark` command.
    :param prefix  : If `prefix` is specified, the file name will begin with that;
                     otherwise, a default `prefix` is used.
    :returns       : Path of CSV-converted file.
    :rtype         : ``str``
    :raises OSError: If an error occurs while executing the `tshark` command.
    '''
    logger = logging.getLogger('SPOT.INGEST.DNS.PROCESS')

    with tempfile.NamedTemporaryFile(prefix=prefix, dir=tmpdir, delete=False) as fp:
        command = COMMAND.format(pcap, opts, fp.name)

        logger.debug('Execute command: {0}'.format(command))
        Util.popen(command, raises=True)

        return fp.name 
Example 7
Project: incubator-spot   Author: apache   File: utils.py    Apache License 2.0 6 votes vote down vote up
def call(cls, cmd, shell=False):
        '''
            Run command with arguments, wait to complete and return ``True`` on success.

        :param cls: The class as implicit first argument.
        :param cmd: Command string to be executed.
        :returns  : ``True`` on success, otherwise ``None``.
        :rtype    : ``bool``
        '''
        logger = logging.getLogger('SPOT.INGEST.COMMON.UTIL')
        logger.debug('Execute command: {0}'.format(cmd))

        try:
            subprocess.call(cmd, shell=shell)
            return True

        except Exception as exc:
            logger.error('[{0}] {1}'.format(exc.__class__.__name__, exc.message)) 
Example 8
Project: incubator-spot   Author: apache   File: kafka_client.py    Apache License 2.0 6 votes vote down vote up
def _initialize_members(self, topic, server, port, zk_server, zk_port, partitions):

        # get logger isinstance
        self._logger = logging.getLogger("SPOT.INGEST.KafkaProducer")

        # kafka requirements
        self._server = server
        self._port = port
        self._zk_server = zk_server
        self._zk_port = zk_port
        self._topic = topic
        self._num_of_partitions = partitions
        self._partitions = []
        self._partitioner = None
        self._kafka_brokers = '{0}:{1}'.format(self._server, self._port)

        # create topic with partitions
        self._create_topic()

        self._kafka_conf = self._producer_config(self._kafka_brokers)

        self._p = Producer(**self._kafka_conf) 
Example 9
Project: incubator-spot   Author: apache   File: serializer.py    Apache License 2.0 6 votes vote down vote up
def deserialize(rawbytes):
    '''
        Deserialize given bytes according to the supported Avro schema.

    :param rawbytes: A buffered I/O implementation using an in-memory bytes buffer.
    :returns       : List of ``str`` objects, extracted from the binary stream.
    :rtype         : ``list``
    '''
    decoder = avro.io.BinaryDecoder(io.BytesIO(rawbytes))
    reader  = avro.io.DatumReader(avro.schema.parse(AVSC))

    try: return reader.read(decoder)[list.__name__]
    except Exception as exc:
        logging.getLogger('SPOT.INGEST.COMMON.SERIALIZER')\
            .error('[{0}] {1}'.format(exc.__class__.__name__, exc.message))

    return [] 
Example 10
Project: incubator-spot   Author: apache   File: serializer.py    Apache License 2.0 6 votes vote down vote up
def serialize(value):
    '''
        Convert a ``list`` object to an avro-encoded format.

    :param value: List of ``str`` objects.
    :returns    : A buffered I/O implementation using an in-memory bytes buffer.
    :rtype      : ``str``
    '''
    writer   = avro.io.DatumWriter(avro.schema.parse(AVSC))
    rawbytes = io.BytesIO()

    try:
        writer.write({ list.__name__: value }, avro.io.BinaryEncoder(rawbytes))
        return rawbytes
    except avro.io.AvroTypeException:
        logging.getLogger('SPOT.INGEST.COMMON.SERIALIZER')\
            .error('The type of ``{0}`` is not supported by the Avro schema.'
            .format(type(value).__name__))

    return None 
Example 11
Project: incubator-spot   Author: apache   File: hdfs_client.py    Apache License 2.0 6 votes vote down vote up
def __init__(self, url, mutual_auth, cert=None, verify='true', **kwargs):

        self._logger = logging.getLogger("SPOT.INGEST.HDFS_client")
        session = Session()

        if verify == 'true':
            self._logger.info('SSL verification enabled')
            session.verify = True
            if cert is not None:
                self._logger.info('SSL Cert: ' + cert)
                if ',' in cert:
                    session.cert = [path.strip() for path in cert.split(',')]
                else:
                    session.cert = cert
        elif verify == 'false':
            session.verify = False

        super(SecureKerberosClient, self).__init__(url, mutual_auth, session=session, **kwargs) 
Example 12
Project: incubator-spot   Author: apache   File: hdfs_client.py    Apache License 2.0 6 votes vote down vote up
def get_client(user=None):
    # type: (object) -> Client

    logger = logging.getLogger('SPOT.INGEST.HDFS.get_client')
    hdfs_nm, hdfs_port, hdfs_user = Config.hdfs()
    conf = {'url': '{0}:{1}'.format(hdfs_nm, hdfs_port)}

    if Config.ssl_enabled():
        ssl_verify, ca_location, cert, key = Config.ssl()
        conf.update({'verify': ssl_verify.lower()})
        if cert:
            conf.update({'cert': cert})

    if Config.kerberos_enabled():
        krb_conf = {'mutual_auth': 'OPTIONAL'}
        conf.update(krb_conf)

    # TODO: possible user parameter
    logger.info('Client conf:')
    for k,v in conf.iteritems():
        logger.info(k + ': ' + v)

    client = SecureKerberosClient(**conf)

    return client 
Example 13
Project: incubator-spot   Author: apache   File: iana_transform.py    Apache License 2.0 6 votes vote down vote up
def __init__(self,config,logger=None):

        self._logger = logging.getLogger('OA.IANA')  if logger else Util.get_logger('OA.IANA',create_file=False)        
        if COL_CLASS in config:
            self._qclass_file_path = config[COL_CLASS]
        if COL_QTYPE in config:
            self._qtype_file_path = config[COL_QTYPE]
        if COL_RCODE in config:
            self._rcode_file_path = config[COL_RCODE]
        if COL_PRESP in config:
            self._http_rcode_file_path = config[COL_PRESP]

        self._qclass_dict = {}
        self._qtype_dict = {}
        self._rcode_dict = {} 
        self._http_rcode_dict = {}
        self._init_dicts() 
Example 14
Project: pyblish-win   Author: pyblish   File: driver.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def load_grammar(gt="Grammar.txt", gp=None,
                 save=True, force=False, logger=None):
    """Load the grammar (maybe from a pickle)."""
    if logger is None:
        logger = logging.getLogger()
    if gp is None:
        head, tail = os.path.splitext(gt)
        if tail == ".txt":
            tail = ""
        gp = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
    if force or not _newer(gp, gt):
        logger.info("Generating grammar tables from %s", gt)
        g = pgen.generate_grammar(gt)
        if save:
            logger.info("Writing grammar tables to %s", gp)
            try:
                g.dump(gp)
            except IOError, e:
                logger.info("Writing failed:"+str(e)) 
Example 15
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_nested_explicit(self):
        # Logging levels in a nested namespace, all explicitly set.
        m = self.next_message

        INF = logging.getLogger("INF")
        INF.setLevel(logging.INFO)
        INF_ERR  = logging.getLogger("INF.ERR")
        INF_ERR.setLevel(logging.ERROR)

        # These should log.
        INF_ERR.log(logging.CRITICAL, m())
        INF_ERR.error(m())

        # These should not log.
        INF_ERR.warn(m())
        INF_ERR.info(m())
        INF_ERR.debug(m())

        self.assert_log_lines([
            ('INF.ERR', 'CRITICAL', '1'),
            ('INF.ERR', 'ERROR', '2'),
        ]) 
Example 16
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_nested_with_virtual_parent(self):
        # Logging levels when some parent does not exist yet.
        m = self.next_message

        INF = logging.getLogger("INF")
        GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
        CHILD = logging.getLogger("INF.BADPARENT")
        INF.setLevel(logging.INFO)

        # These should log.
        GRANDCHILD.log(logging.FATAL, m())
        GRANDCHILD.info(m())
        CHILD.log(logging.FATAL, m())
        CHILD.info(m())

        # These should not log.
        GRANDCHILD.debug(m())
        CHILD.debug(m())

        self.assert_log_lines([
            ('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
            ('INF.BADPARENT.UNDEF', 'INFO', '2'),
            ('INF.BADPARENT', 'CRITICAL', '3'),
            ('INF.BADPARENT', 'INFO', '4'),
        ]) 
Example 17
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_persistent_loggers(self):
        # Logger objects are persistent and retain their configuration, even
        #  if visible references are destroyed.
        self.root_logger.setLevel(logging.INFO)
        foo = logging.getLogger("foo")
        self._watch_for_survival(foo)
        foo.setLevel(logging.DEBUG)
        self.root_logger.debug(self.next_message())
        foo.debug(self.next_message())
        self.assert_log_lines([
            ('foo', 'DEBUG', '2'),
        ])
        del foo
        # foo has survived.
        self._assertTruesurvival()
        # foo has retained its settings.
        bar = logging.getLogger("foo")
        bar.debug(self.next_message())
        self.assert_log_lines([
            ('foo', 'DEBUG', '2'),
            ('foo', 'DEBUG', '3'),
        ]) 
Example 18
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_encoding_plain_file(self):
        # In Python 2.x, a plain file object is treated as having no encoding.
        log = logging.getLogger("test")
        fn = tempfile.mktemp(".log")
        # the non-ascii data we write to the log.
        data = "foo\x80"
        try:
            handler = logging.FileHandler(fn)
            log.addHandler(handler)
            try:
                # write non-ascii data to the log.
                log.warning(data)
            finally:
                log.removeHandler(handler)
                handler.close()
            # check we wrote exactly those bytes, ignoring trailing \n etc
            f = open(fn)
            try:
                self.assertEqual(f.read().rstrip(), data)
            finally:
                f.close()
        finally:
            if os.path.isfile(fn):
                os.remove(fn) 
Example 19
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_encoding_cyrillic_unicode(self):
        log = logging.getLogger("test")
        #Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
        message = u'\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
        #Ensure it's written in a Cyrillic encoding
        writer_class = codecs.getwriter('cp1251')
        writer_class.encoding = 'cp1251'
        stream = cStringIO.StringIO()
        writer = writer_class(stream, 'strict')
        handler = logging.StreamHandler(writer)
        log.addHandler(handler)
        try:
            log.warning(message)
        finally:
            log.removeHandler(handler)
            handler.close()
        # check we wrote exactly those bytes, ignoring trailing \n etc
        s = stream.getvalue()
        #Compare against what the data should be when encoded in CP-1251
        self.assertEqual(s, '\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n') 
Example 20
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_encoding_utf16_unicode(self):
        # Issue #19267
        log = logging.getLogger("test")
        message = u'b\u0142\u0105d'
        writer_class = codecs.getwriter('utf-16-le')
        writer_class.encoding = 'utf-16-le'
        stream = cStringIO.StringIO()
        writer = writer_class(stream, 'strict')
        handler = logging.StreamHandler(writer)
        log.addHandler(handler)
        try:
            log.warning(message)
        finally:
            log.removeHandler(handler)
            handler.close()
        s = stream.getvalue()
        self.assertEqual(s, 'b\x00B\x01\x05\x01d\x00\n\x00') 
Example 21
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_config_9_ok(self):
        with captured_stdout() as output:
            self.apply_config(self.config9)
            logger = logging.getLogger("compiler.parser")
            #Nothing will be output since both handler and logger are set to WARNING
            logger.info(self.next_message())
            self.assert_log_lines([], stream=output)
            self.apply_config(self.config9a)
            #Nothing will be output since both handler is still set to WARNING
            logger.info(self.next_message())
            self.assert_log_lines([], stream=output)
            self.apply_config(self.config9b)
            #Message should now be output
            logger.info(self.next_message())
            self.assert_log_lines([
                ('INFO', '3'),
            ], stream=output) 
Example 22
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_config_10_ok(self):
        with captured_stdout() as output:
            self.apply_config(self.config10)
            logger = logging.getLogger("compiler.parser")
            logger.warning(self.next_message())
            logger = logging.getLogger('compiler')
            #Not output, because filtered
            logger.warning(self.next_message())
            logger = logging.getLogger('compiler.lexer')
            #Not output, because filtered
            logger.warning(self.next_message())
            logger = logging.getLogger("compiler.parser.codegen")
            #Output, as not filtered
            logger.error(self.next_message())
            self.assert_log_lines([
                ('WARNING', '1'),
                ('ERROR', '4'),
            ], stream=output) 
Example 23
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def test_listen_config_10_ok(self):
        with captured_stdout() as output:
            self.setup_via_listener(json.dumps(self.config10))
            logger = logging.getLogger("compiler.parser")
            logger.warning(self.next_message())
            logger = logging.getLogger('compiler')
            #Not output, because filtered
            logger.warning(self.next_message())
            logger = logging.getLogger('compiler.lexer')
            #Not output, because filtered
            logger.warning(self.next_message())
            logger = logging.getLogger("compiler.parser.codegen")
            #Output, as not filtered
            logger.error(self.next_message())
            self.assert_log_lines([
                ('WARNING', '1'),
                ('ERROR', '4'),
            ], stream=output) 
Example 24
Project: fs_image   Author: facebookincubator   File: unshare.py    MIT License 5 votes vote down vote up
def __exit__(self, exc_type, exc_val, exc_tb):
        if self._namespace_to_file is not None:
            # We need to close the files to let the namespaces be destroyed
            for f in self._namespace_to_file.values():
                try:
                    f.close()
                # Covering this realistically seems really hard, since this
                # should never fail. Manual test:
                #
                #   >>> log = logging.getLogger('moo')
                #   >>> f = open('/proc/self/ns/mnt', 'rb')
                #   >>> try:
                #   ...     1/0
                #   ... except:
                #   ...     log.exception(f'Closing namespace file {f.name}')
                #   ...
                #   Closing namespace file /proc/self/ns/mnt
                #   Traceback (most recent call last):
                #     File "<stdin>", line 2, in <module>
                #     ZeroDivisionError: division by zero
                except BaseException:  # pragma: no cover
                    log.exception(f'Closing namespace file {f.name}')
            self._namespace_to_file = None

        try:
            os.close(self._root_fd)
        # Same coverage story as above for the `f.close()`
        except BaseException:  # pragma: no cover
            log.exception(f'Closing root directory FD {self._root_fd}')
        self._root_fd = None

        if self._keepalive_proc:
            try:
                self._keepalive_proc.stdin.close()  # "Normally" won't fail
                if self._keepalive_proc.wait() != 0:  # prag
                    log.warning('Unshare keepalive exited with {}'.format(
                        self._keepalive_proc.returncode
                    ))
            finally:
                self._keepalive_proc = None
            # By this point, the namespaces should be getting torn down. 
Example 25
Project: fs_image   Author: facebookincubator   File: common.py    MIT License 5 votes vote down vote up
def get_file_logger(py_path: AnyStr):
    return logging.getLogger(os.path.basename(py_path)) 
Example 26
Project: leapp-repository   Author: oamg   File: systemfacts.py    Apache License 2.0 5 votes vote down vote up
def get_firewalls_status():
    ''' Get firewalld status information '''
    logger = logging.getLogger('get_firewalld_status')

    def _get_firewall_status(service_name):
        try:
            ret_list = run(['systemctl', 'is-active', service_name], split=True)['stdout']
            active = ret_list[0] == 'active'
        except CalledProcessError:
            active = False
            logger.debug('The %s service is likely not active', service_name)

        try:
            ret_list = run(['systemctl', 'is-enabled', service_name], split=True)['stdout']
            enabled = ret_list[0] == 'enabled'
        except CalledProcessError:
            enabled = False
            logger.debug('The %s service is likely not enabled nor running', service_name)

        return FirewallStatus(
            active=active,
            enabled=enabled,
        )

    return FirewallsFacts(
        firewalld=_get_firewall_status('firewalld'),
        iptables=_get_firewall_status('iptables'),
        ip6tables=_get_firewall_status('ip6tables'),
    ) 
Example 27
Project: incubator-spot   Author: apache   File: collector.py    Apache License 2.0 5 votes vote down vote up
def _initialize_members(self, hdfs_app_path, kafkaproducer, conf_type):

        # getting parameters.
        self._logger = logging.getLogger('SPOT.INGEST.FLOW')
        self._hdfs_app_path = hdfs_app_path
        self._producer = kafkaproducer

        # get script path
        self._script_path = os.path.dirname(os.path.abspath(__file__))

        # read flow configuration.
        conf_file = "{0}/ingest_conf.json".format(os.path.dirname(os.path.dirname(self._script_path)))
        conf = json.loads(open(conf_file).read())
        self._conf = conf["pipelines"][conf_type]

        # set configuration.
        self._collector_path = self._conf['collector_path']        
        self._dsource = 'flow'
        self._hdfs_root_path = "{0}/{1}".format(hdfs_app_path, self._dsource)

        self._supported_files = self._conf['supported_files']

        # create collector watcher
        self._watcher = FileWatcher(self._collector_path,self._supported_files)
        
        # Multiprocessing. 
        self._processes = conf["collector_processes"]
        self._ingestion_interval = conf["ingestion_interval"]
        self._pool = Pool(processes=self._processes)
        # TODO: review re-use of hdfs.client
        self._hdfs_client = hdfs.get_client() 
Example 28
Project: incubator-spot   Author: apache   File: collector.py    Apache License 2.0 5 votes vote down vote up
def _initialize_members(self,hdfs_app_path,kafka_topic,conf_type):
        
        # getting parameters.
        self._logger = logging.getLogger('SPOT.INGEST.PROXY')
        self._hdfs_app_path = hdfs_app_path
        self._kafka_topic= kafka_topic

        # get script path
        self._script_path = os.path.dirname(os.path.abspath(__file__))

        # read proxy configuration.
        conf_file = "{0}/ingest_conf.json".format(os.path.dirname(os.path.dirname(self._script_path)))
        conf = json.loads(open(conf_file).read())
        self._message_size = conf["kafka"]["message_size"]
        self._conf = conf["pipelines"][conf_type]

        # get collector path.
        self._collector_path = self._conf['collector_path']

        #get supported files
        self._supported_files = self._conf['supported_files']

        # create collector watcher
        self._watcher = FileWatcher(self._collector_path,self._supported_files)

        # Multiprocessing. 
        self._processes = conf["collector_processes"]
        self._ingestion_interval = conf["ingestion_interval"]
        self._pool = Pool(processes=self._processes) 
Example 29
Project: incubator-spot   Author: apache   File: collector.py    Apache License 2.0 5 votes vote down vote up
def ingest_file(file,message_size,topic,kafka_servers):
    
    logger = logging.getLogger('SPOT.INGEST.PROXY.{0}'.format(os.getpid()))
    try:        
        message = ""
        logger.info("Ingesting file: {0} process:{1}".format(file,os.getpid())) 
        with open(file,"rb") as f:
            for line in f:
                message += line
                if len(message) > message_size:
                    KafkaProducer.SendMessage(message, kafka_servers, topic, 0)
                    message = ""
            #send the last package.        
            KafkaProducer.SendMessage(message, kafka_servers, topic, 0)
        rm_file = "rm {0}".format(file)
        Util.execute_cmd(rm_file,logger)
        logger.info("File {0} has been successfully sent to Kafka Topic: {1}".format(file,topic))

    except Exception as err:        
        logger.error("There was a problem, please check the following error message:{0}".format(err.message))
        logger.error("Exception: {0}".format(err)) 
Example 30
Project: incubator-spot   Author: apache   File: collector.py    Apache License 2.0 5 votes vote down vote up
def _initialize_members(self, hdfs_app_path, kafkaproducer, conf_type):

        # getting parameters.
        self._logger = logging.getLogger('SPOT.INGEST.DNS')
        self._hdfs_app_path = hdfs_app_path
        self._producer = kafkaproducer

        # get script path
        self._script_path = os.path.dirname(os.path.abspath(__file__))

        # read dns configuration.
        conf_file = "{0}/ingest_conf.json".format(os.path.dirname(os.path.dirname(self._script_path)))
        conf = json.loads(open(conf_file).read())
        self._conf = conf["pipelines"][conf_type]

        # set configuration.
        self._collector_path = self._conf['collector_path']
        self._dsource = 'dns'
        self._hdfs_root_path = "{0}/{1}".format(hdfs_app_path, self._dsource)

        # set configuration.
        self._pkt_num = self._conf['pkt_num']
        self._pcap_split_staging = self._conf['pcap_split_staging']
        self._supported_files = self._conf['supported_files']

        # create collector watcher
        self._watcher = FileWatcher(self._collector_path, self._supported_files)

        # Multiprocessing.
        self._processes = conf["collector_processes"]
        self._ingestion_interval = conf["ingestion_interval"]
        self._pool = Pool(processes=self._processes)
        # TODO: review re-use of hdfs.client
        self._hdfs_client = hdfs.get_client() 
Example 31
Project: incubator-spot   Author: apache   File: file_watcher.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, path, supported_files, recursive):
        self._logger  = logging.getLogger('SPOT.INGEST.COMMON.FILE_WATCHER')
        self._queue   = []

        super(FileWatcher, self).__init__()

        self._logger.info('Schedule watching "{0}" directory.'.format(path))
        super(FileWatcher, self).schedule(NewFileEventHandler(self), path, recursive)

        self._regexs  = [re.compile(x) for x in supported_files]
        pattern_names = ', '.join(['"%s"' % x for x in supported_files])
        self._logger.info('Supported filenames: {0}'.format(pattern_names))

        self._logger.info('The search in sub-directories is {0}.'
            .format('enabled' if recursive else 'disabled')) 
Example 32
Project: incubator-spot   Author: apache   File: utils.py    Apache License 2.0 5 votes vote down vote up
def get_logger(cls, name, level='INFO', filepath=None, fmt=None):
        '''
            Return a new logger or an existing one, if any.

        :param cls     : The class as implicit first argument.
        :param name    : Return a logger with the specified name.
        :param filepath: Path of the file, where logs will be saved. If it is not set,
                         redirects the log stream to `sys.stdout`.
        :param fmt     : A format string for the message as a whole, as well as a format
                         string for the date/time portion of the message.
                         Default: '%(asctime)s %(levelname)-8s %(name)-34s %(message)s'
        :rtype         : :class:`logging.Logger`
        '''
        logger = logging.getLogger(name)
        # .............................if logger already exists, return it
        if logger.handlers: return logger

        if filepath:
            # .........................rotate log file (1 rotation per 512KB
            handler  = RotatingFileHandler(filepath, maxBytes=524288, backupCount=8)
        else:
            handler  = logging.StreamHandler(sys.stdout)

        fmt = fmt or '%(asctime)s %(levelname)-8s %(name)-34s %(message)s'
        handler.setFormatter(logging.Formatter(fmt))

        try:
            logger.setLevel(getattr(logging, level.upper() if level else 'INFO'))
        except: logger.setLevel(logging.INFO)

        logger.addHandler(handler)
        return logger 
Example 33
Project: incubator-spot   Author: apache   File: kafka_client.py    Apache License 2.0 5 votes vote down vote up
def _initialize_members(self, topic, server, port, zk_server, zk_port, partition):

        self._logger = logging.getLogger("SPOT.INGEST.KafkaConsumer")

        self._topic = topic
        self._server = server
        self._port = port
        self._zk_server = zk_server
        self._zk_port = zk_port
        self._id = partition
        self._kafka_brokers = '{0}:{1}'.format(self._server, self._port)
        self._kafka_conf = self._consumer_config(self._id, self._kafka_brokers) 
Example 34
Project: incubator-spot   Author: apache   File: listener.py    Apache License 2.0 5 votes vote down vote up
def streaming_listener(**kwargs):
    '''
        Initialize the Spark job.
    '''
    Util.get_logger('SPOT.INGEST', kwargs.pop('log_level'))

    logger  = logging.getLogger('SPOT.INGEST.COMMON.LISTENER')
    logger.info('Initializing Spark Streaming Listener...')

    dbtable = '{0}.{1}'.format(kwargs.pop('database'), kwargs['type'])
    topic   = kwargs.pop('topic')

    sc      = SparkContext(appName=kwargs['app_name'] or topic)
    logger.info('Connect to Spark Cluster as job "{0}" and broadcast variables on it.'
        .format(kwargs.pop('app_name') or topic))
    ssc     = StreamingContext(sc, batchDuration=kwargs['batch_duration'])
    logger.info('Streaming data will be divided into batches of {0} seconds.'
        .format(kwargs.pop('batch_duration')))
    hsc     = HiveContext(sc)
    logger.info('Read Hive\'s configuration to integrate with data stored in it.')

    import pipelines
    module  = getattr(pipelines, kwargs.pop('type'))
    stream  = module.StreamPipeline(ssc, kwargs.pop('zkquorum'),
                kwargs.pop('group_id') or topic, { topic: int(kwargs.pop('partitions')) })

    schema  = stream.schema
    segtype = stream.segtype

    stream.dstream\
        .map(lambda x: module.StreamPipeline.parse(x))\
        .filter(lambda x: bool(x))\
        .foreachRDD(lambda x: store(x, hsc, dbtable, topic, schema, segtype))

    ssc.start()
    logger.info('Start the execution of the streams.')
    ssc.awaitTermination() 
Example 35
Project: incubator-spot   Author: apache   File: listener.py    Apache License 2.0 5 votes vote down vote up
def store(rdd, hsc, dbtable, topic, schema=None, segtype='segments'):
    '''
        Interface for saving the content of the streaming :class:`DataFrame` out into
    Hive storage.

    :param rdd    : The content as a :class:`pyspark.RDD` of :class:`Row`.
    :param hsc    : A variant of Spark SQL that integrates with data stored in Hive.
    :param dbtable: The specified table in Hive database.
    :param topic  : Name of the topic to listen for incoming segments.
    :param schema : The schema of this :class:`DataFrame` as a
                    :class:`pyspark.sql.types.StructType`.
    :param segtype: The type of the received segments.
    '''
    logger = logging.getLogger('SPOT.INGEST.COMMON.LISTENER')

    if rdd.isEmpty():
        logger.info(' ---- LISTENING KAFKA TOPIC: {0} ---- '.format(topic))
        return

    hsc.setConf('hive.exec.dynamic.partition', 'true')
    hsc.setConf('hive.exec.dynamic.partition.mode', 'nonstrict')

    logger.info('Received {0} from topic. [Rows: {1}]'.format(segtype, rdd.count()))
    logger.info('Create distributed collection for partition "{0}".'
        .format(rdd.first()[0].replace('-', '').replace(' ', '')[:10]))

    df = hsc.createDataFrame(rdd, schema)
    df.write.format('parquet').mode('append').insertInto(dbtable)
    logger.info(' *** REGISTRATION COMPLETED *** ') 
Example 36
Project: incubator-spot   Author: apache   File: file_collector.py    Apache License 2.0 5 votes vote down vote up
def _initialize_members(self,collector_path,supported_files):        

        # initializing observer.
        event_handler = NewFileEvent(self)
        self._observer = Observer()
        self._observer.schedule(event_handler,collector_path)

        self._collector_path = collector_path
        self._files_queue = Queue()
        self._supported_files = supported_files

        self._logger = logging.getLogger('SPOT.INGEST.WATCHER')    
        self._logger.info("Creating File watcher")
        self._logger.info("Supported Files: {0}".format(self._supported_files)) 
Example 37
Project: incubator-spot   Author: apache   File: hdfs_client.py    Apache License 2.0 5 votes vote down vote up
def get_client(user=None):
    # type: (object) -> Client

    logger = logging.getLogger('SPOT.INGEST.HDFS.get_client')
    hdfs_nm, hdfs_port, hdfs_user = Config.hdfs()
    conf = {'url': '{0}:{1}'.format(hdfs_nm, hdfs_port),
            'mutual_auth': 'OPTIONAL'
            }

    if Config.ssl_enabled():
        ssl_verify, ca_location, cert, key = Config.ssl()
        conf.update({'verify': ssl_verify.lower()})
        if cert:
            conf.update({'cert': cert})

    if Config.kerberos_enabled():
        # TODO: handle other conditions
        krb_conf = {'mutual_auth': 'OPTIONAL'}
        conf.update(krb_conf)

    # TODO: possible user parameter
    logger.info('Client conf:')
    for k,v in conf.iteritems():
        logger.info(k + ': ' + v)

    client = SecureKerberosClient(**conf)

    return client 
Example 38
Project: incubator-spot   Author: apache   File: flow_oa.py    Apache License 2.0 5 votes vote down vote up
def _initialize_members(self,date,limit,logger): 

        # get logger if exists. if not, create new instance.
        self._logger = logging.getLogger('OA.Flow') if logger else Util.get_logger('OA.Flow',create_file=False)

        # initialize required parameters.
        self._scrtip_path = os.path.dirname(os.path.abspath(__file__))
        self._date = date
        self._table_name = "flow"
        self._flow_results = []
        self._limit = limit
        self._data_path = None
        self._ipynb_path = None
        self._ingest_summary_path = None
        self._flow_scores = []
        self._results_delimiter = '\t'
        

        # get app configuration.
        self._spot_conf = Util.get_spot_conf()

        # # get scores fields conf
        conf_file = "{0}/flow_conf.json".format(self._scrtip_path)
        self._conf = json.loads(open (conf_file).read(),object_pairs_hook=OrderedDict)

        # initialize data engine
        self._db = self._spot_conf.get('conf', 'DBNAME').replace("'", "").replace('"', '') 
Example 39
Project: incubator-spot   Author: apache   File: proxy_oa.py    Apache License 2.0 5 votes vote down vote up
def _initialize_members(self,date,limit,logger):

        # get logger if exists. if not, create new instance.
        self._logger = logging.getLogger('OA.PROXY') if logger else Util.get_logger('OA.PROXY',create_file=False)

        # initialize required parameters.
        self._scrtip_path = os.path.dirname(os.path.abspath(__file__))
        self._date = date
        self._table_name = "proxy"
        self._proxy_results = []
        self._limit = limit
        self._data_path = None
        self._ipynb_path = None
        self._ingest_summary_path = None
        self._proxy_scores = []
        self._proxy_scores_headers = []
        self._proxy_extra_columns = []
        self._results_delimiter = '\t'

        # get app configuration.
        self._spot_conf = Util.get_spot_conf()

        # get scores fields conf
        conf_file = "{0}/proxy_conf.json".format(self._scrtip_path)
        self._conf = json.loads(open (conf_file).read(),object_pairs_hook=OrderedDict)

        # initialize data engine
        self._db = self._spot_conf.get('conf', 'DBNAME').replace("'", "").replace('"', '') 
Example 40
Project: incubator-spot   Author: apache   File: dns_oa.py    Apache License 2.0 5 votes vote down vote up
def _initialize_members(self,date,limit,logger):
        
        # get logger if exists. if not, create new instance.
        self._logger = logging.getLogger('OA.DNS') if logger else Util.get_logger('OA.DNS',create_file=False)

        # initialize required parameters.
        self._scrtip_path = os.path.dirname(os.path.abspath(__file__))
        self._date = date
        self._table_name = "dns"
        self._dns_results = []
        self._limit = limit
        self._data_path = None
        self._ipynb_path = None
        self._ingest_summary_path = None
        self._dns_scores = []
        self._dns_scores_headers = []
        self._results_delimiter = '\t'
        self._details_limit = 250

        # get app configuration.
        self._spot_conf = Util.get_spot_conf()

        # get scores fields conf
        conf_file = "{0}/dns_conf.json".format(self._scrtip_path)
        self._conf = json.loads(open (conf_file).read(),object_pairs_hook=OrderedDict)

        # initialize data engine
        self._db = self._spot_conf.get('conf', 'DBNAME').replace("'", "").replace('"', '') 
Example 41
Project: incubator-spot   Author: apache   File: data.py    Apache License 2.0 5 votes vote down vote up
def __init__(self,db, pipeline,logger=None):

        # get logger if exists. if not, create new instance.
        self._logger = logging.getLogger('OA.DATA')  if logger else Util.get_logger('OA.DATA',create_file=False)       
        self._initialize_engine(db, pipeline) 
Example 42
Project: incubator-spot   Author: apache   File: network_context.py    Apache License 2.0 5 votes vote down vote up
def __init__(self,config,logger=None):
    		
		self._nc_file_path = None		
		self._logger = logging.getLogger('OA.NC')  if logger else Util.get_logger('OA.NC',create_file=False)
					
		if NC in config:
			self._nc_file_path = config[NC]

		self._nc_dict = {}
		self._init_dicts() 
Example 43
Project: incubator-spot   Author: apache   File: geoloc.py    Apache License 2.0 5 votes vote down vote up
def __init__(self,file,logger=None):

        self._logger = logging.getLogger('OA.GEO') if logger else Util.get_logger('OA.GEO',create_file=False) 
        self._ip_localization_file = file
        self._ip_localization_ranges = self._get_low_ips_in_ranges() 
Example 44
Project: incubator-spot   Author: apache   File: gti.py    Apache License 2.0 5 votes vote down vote up
def __init__(self,conf,logger=None):

        # get logger if exists. if not, create new instance.
        self._logger = logging.getLogger('OA.GTI')  if logger else Util.get_logger('OA.GTI',create_file=False)       
        self._gti_rest_client_path = conf['refclient']        
        self._gti_ci = conf['ci']
        self._gti_password = conf['password']
        self._gti_user = conf['user']
        self._gti_server = conf['server']        
        self._category_file = conf["category_file"]
        self._category_dict = {}
        self._load_category_dicts() 
Example 45
Project: jumpserver-python-sdk   Author: jumpserver   File: utils.py    GNU General Public License v2.0 5 votes vote down vote up
def get_logger(filename):
    return logging.getLogger('jms.'+filename) 
Example 46
Project: ubittool   Author: carlosperate   File: gui.py    MIT License 5 votes vote down vote up
def activate(self):
        """Configure std out/in to send to write to the console text widget."""
        sys.stdout = StdoutRedirector(self, text_color="#0D4")
        sys.stderr = StdoutRedirector(self, text_color="#D00")
        logger = logging.getLogger()
        logger.setLevel(level=logging.INFO)
        logging_handler_out = logging.StreamHandler(sys.stdout)
        logging_handler_out.setLevel(logging.INFO)
        logger.addHandler(logging_handler_out)
        logging_handler_err = logging.StreamHandler(sys.stderr)
        logging_handler_err.setLevel(logging.WARNING)
        logger.addHandler(logging_handler_err) 
Example 47
Project: tom-bot   Author: maartenberg   File: registry.py    MIT License 5 votes vote down vote up
def get_easy_logger(name, level=None):
    ''' Create a logger with the given name and optionally a level. '''
    result = logging.getLogger(name)
    if level:
        result.setLevel(level)
    return result 
Example 48
Project: tom-bot   Author: maartenberg   File: system_plugin.py    MIT License 5 votes vote down vote up
def logdebug_cb(bot, message=None, *args, **kwargs):
    ''' Temporarily set the loglevel to debug. '''
    if message:
        if not isadmin(bot, message):
            return 'Not authorized.'
    logging.getLogger().setLevel(logging.DEBUG)
    return 'Ok.' 
Example 49
Project: tom-bot   Author: maartenberg   File: system_plugin.py    MIT License 5 votes vote down vote up
def loginfo_cb(bot, message=None, *args, **kwargs):
    ''' Temporarily (re)set the loglevel to info. '''
    if message:
        if not isadmin(bot, message):
            return 'Not authorized.'
    logging.getLogger().setLevel(logging.INFO)
    return 'Ok.' 
Example 50
Project: WazeRouteCalculator   Author: kovacsbalu   File: WazeRouteCalculator.py    GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, start_address, end_address, region='EU', vehicle_type='', avoid_toll_roads=False, avoid_subscription_roads=False, avoid_ferries=False, log_lvl=None):
        self.log = logging.getLogger(__name__)
        self.log.addHandler(logging.NullHandler())
        if log_lvl:
            self.log.warning("log_lvl is deprecated please check example.py ")
        self.log.info("From: %s - to: %s", start_address, end_address)
        region = region.upper()
        if region == 'NA':  # North America
            region = 'US'
        self.region = region
        self.vehicle_type = ''
        if vehicle_type and vehicle_type in self.VEHICLE_TYPES:
            self.vehicle_type = vehicle_type.upper()
        self.route_options = ['AVOID_TRAILS']
        if avoid_toll_roads:
            self.route_options.append('AVOID_TOLL_ROADS')
        self.avoid_subscription_roads = avoid_subscription_roads
        if avoid_ferries:
            self.route_options.append('AVOID_FERRIES')
        if self.already_coords(start_address):  # See if we have coordinates or address to resolve
            self.start_coords = self.coords_string_parser(start_address)
        else:
            self.start_coords = self.address_to_coords(start_address)
        self.log.debug('Start coords: (%s, %s)', self.start_coords["lat"], self.start_coords["lon"])
        if self.already_coords(end_address):  # See if we have coordinates or address to resolve
            self.end_coords = self.coords_string_parser(end_address)
        else:
            self.end_coords = self.address_to_coords(end_address)
        self.log.debug('End coords: (%s, %s)', self.end_coords["lat"], self.end_coords["lon"]) 
Example 51
Project: pyblish-win   Author: pyblish   File: driver.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(self, grammar, convert=None, logger=None):
        self.grammar = grammar
        if logger is None:
            logger = logging.getLogger()
        self.logger = logger
        self.convert = convert 
Example 52
Project: pyblish-win   Author: pyblish   File: btm_matcher.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(self):
        self.match = set()
        self.root = BMNode()
        self.nodes = [self.root]
        self.fixers = []
        self.logger = logging.getLogger("RefactoringTool") 
Example 53
Project: pyblish-win   Author: pyblish   File: fixer_base.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def set_filename(self, filename):
        """Set the filename, and a logger derived from it.

        The main refactoring tool should call this.
        """
        self.filename = filename
        self.logger = logging.getLogger(filename) 
Example 54
Project: pyblish-win   Author: pyblish   File: config.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def configure_logger(self, name, config, incremental=False):
        """Configure a non-root logger from a dictionary."""
        logger = logging.getLogger(name)
        self.common_logger_config(logger, config, incremental)
        propagate = config.get('propagate', None)
        if propagate is not None:
            logger.propagate = propagate 
Example 55
Project: pyblish-win   Author: pyblish   File: config.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def configure_root(self, config, incremental=False):
        """Configure a root logger from a dictionary."""
        root = logging.getLogger()
        self.common_logger_config(root, config, incremental) 
Example 56
Project: pyblish-win   Author: pyblish   File: test_urllib2net.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def setUp(self):
        if 0:  # for debugging
            import logging
            logger = logging.getLogger("test_urllib2net")
            logger.addHandler(logging.StreamHandler())

    # XXX The rest of these tests aren't very good -- they don't check much.
    # They do sometimes catch some major disasters, though. 
Example 57
Project: pyblish-win   Author: pyblish   File: test_urllib2net.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def _test_urls(self, urls, handlers, retry=True):
        import time
        import logging
        debug = logging.getLogger("test_urllib2").debug

        urlopen = urllib2.build_opener(*handlers).open
        if retry:
            urlopen = _wrap_with_retry_thrice(urlopen, urllib2.URLError)

        for url in urls:
            if isinstance(url, tuple):
                url, req, expected_err = url
            else:
                req = expected_err = None
            with test_support.transient_internet(url):
                debug(url)
                try:
                    f = urlopen(url, req, TIMEOUT)
                except EnvironmentError as err:
                    debug(err)
                    if expected_err:
                        msg = ("Didn't get expected error(s) %s for %s %s, got %s: %s" %
                               (expected_err, url, req, type(err), err))
                        self.assertIsInstance(err, expected_err, msg)
                except urllib2.URLError as err:
                    if isinstance(err[0], socket.timeout):
                        print >>sys.stderr, "<timeout: %s>" % url
                        continue
                    else:
                        raise
                else:
                    try:
                        with test_support.transient_internet(url):
                            buf = f.read()
                            debug("read %d bytes" % len(buf))
                    except socket.timeout:
                        print >>sys.stderr, "<timeout: %s>" % url
                    f.close()
            debug("******** next url coming up...")
            time.sleep(0.1) 
Example 58
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def setUp(self):
        """Setup the default logging stream to an internal StringIO instance,
        so that we can examine log output as we want."""
        logger_dict = logging.getLogger().manager.loggerDict
        logging._acquireLock()
        try:
            self.saved_handlers = logging._handlers.copy()
            self.saved_handler_list = logging._handlerList[:]
            self.saved_loggers = logger_dict.copy()
            self.saved_level_names = logging._levelNames.copy()
        finally:
            logging._releaseLock()

        # Set two unused loggers: one non-ASCII and one Unicode.
        # This is to test correct operation when sorting existing
        # loggers in the configuration code. See issue 8201.
        logging.getLogger("\xab\xd7\xbb")
        logging.getLogger(u"\u013f\u00d6\u0047")

        self.root_logger = logging.getLogger("")
        self.original_logging_level = self.root_logger.getEffectiveLevel()

        self.stream = cStringIO.StringIO()
        self.root_logger.setLevel(logging.DEBUG)
        self.root_hdlr = logging.StreamHandler(self.stream)
        self.root_formatter = logging.Formatter(self.log_format)
        self.root_hdlr.setFormatter(self.root_formatter)
        self.root_logger.addHandler(self.root_hdlr) 
Example 59
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_nested_inherited(self):
        #Logging levels in a nested namespace, inherited from parent loggers.
        m = self.next_message

        INF = logging.getLogger("INF")
        INF.setLevel(logging.INFO)
        INF_ERR  = logging.getLogger("INF.ERR")
        INF_ERR.setLevel(logging.ERROR)
        INF_UNDEF = logging.getLogger("INF.UNDEF")
        INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
        UNDEF = logging.getLogger("UNDEF")

        # These should log.
        INF_UNDEF.log(logging.CRITICAL, m())
        INF_UNDEF.error(m())
        INF_UNDEF.warn(m())
        INF_UNDEF.info(m())
        INF_ERR_UNDEF.log(logging.CRITICAL, m())
        INF_ERR_UNDEF.error(m())

        # These should not log.
        INF_UNDEF.debug(m())
        INF_ERR_UNDEF.warn(m())
        INF_ERR_UNDEF.info(m())
        INF_ERR_UNDEF.debug(m())

        self.assert_log_lines([
            ('INF.UNDEF', 'CRITICAL', '1'),
            ('INF.UNDEF', 'ERROR', '2'),
            ('INF.UNDEF', 'WARNING', '3'),
            ('INF.UNDEF', 'INFO', '4'),
            ('INF.ERR.UNDEF', 'CRITICAL', '5'),
            ('INF.ERR.UNDEF', 'ERROR', '6'),
        ]) 
Example 60
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_invalid_name(self):
        self.assertRaises(TypeError, logging.getLogger, any) 
Example 61
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def setUp(self):
        BaseTest.setUp(self)
        self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
                                                        self.root_hdlr)
        self.mem_logger = logging.getLogger('mem')
        self.mem_logger.propagate = 0
        self.mem_logger.addHandler(self.mem_hdlr) 
Example 62
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_config0_ok(self):
        # A simple config file which overrides the default settings.
        with captured_stdout() as output:
            self.apply_config(self.config0)
            logger = logging.getLogger()
            # Won't output anything
            logger.info(self.next_message())
            # Outputs a message
            logger.error(self.next_message())
            self.assert_log_lines([
                ('ERROR', '2'),
            ], stream=output)
            # Original logger output is empty.
            self.assert_log_lines([]) 
Example 63
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_config1_ok(self, config=config1):
        # A config file defining a sub-parser as well.
        with captured_stdout() as output:
            self.apply_config(config)
            logger = logging.getLogger("compiler.parser")
            # Both will output a message
            logger.info(self.next_message())
            logger.error(self.next_message())
            self.assert_log_lines([
                ('INFO', '1'),
                ('ERROR', '2'),
            ], stream=output)
            # Original logger output is empty.
            self.assert_log_lines([]) 
Example 64
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_config4_ok(self):
        # A config file specifying a custom formatter class.
        with captured_stdout() as output:
            self.apply_config(self.config4)
            logger = logging.getLogger()
            try:
                raise RuntimeError()
            except RuntimeError:
                logging.exception("just testing")
            sys.stdout.seek(0)
            self.assertEqual(output.getvalue(),
                "ERROR:root:just testing\nGot a [RuntimeError]\n")
            # Original logger output is empty
            self.assert_log_lines([]) 
Example 65
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_output(self):
        # The log message sent to the SocketHandler is properly received.
        logger = logging.getLogger("tcp")
        logger.error("spam")
        logger.debug("eggs")
        self.assertEqual(self.get_output(), "spam\neggs\n") 
Example 66
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_config0_ok(self):
        # A simple config which overrides the default settings.
        with captured_stdout() as output:
            self.apply_config(self.config0)
            logger = logging.getLogger()
            # Won't output anything
            logger.info(self.next_message())
            # Outputs a message
            logger.error(self.next_message())
            self.assert_log_lines([
                ('ERROR', '2'),
            ], stream=output)
            # Original logger output is empty.
            self.assert_log_lines([]) 
Example 67
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_config1_ok(self, config=config1):
        # A config defining a sub-parser as well.
        with captured_stdout() as output:
            self.apply_config(config)
            logger = logging.getLogger("compiler.parser")
            # Both will output a message
            logger.info(self.next_message())
            logger.error(self.next_message())
            self.assert_log_lines([
                ('INFO', '1'),
                ('ERROR', '2'),
            ], stream=output)
            # Original logger output is empty.
            self.assert_log_lines([]) 
Example 68
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_config4_ok(self):
        # A config specifying a custom formatter class.
        with captured_stdout() as output:
            self.apply_config(self.config4)
            #logger = logging.getLogger()
            try:
                raise RuntimeError()
            except RuntimeError:
                logging.exception("just testing")
            sys.stdout.seek(0)
            self.assertEqual(output.getvalue(),
                "ERROR:root:just testing\nGot a [RuntimeError]\n")
            # Original logger output is empty
            self.assert_log_lines([]) 
Example 69
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_config4a_ok(self):
        # A config specifying a custom formatter class.
        with captured_stdout() as output:
            self.apply_config(self.config4a)
            #logger = logging.getLogger()
            try:
                raise RuntimeError()
            except RuntimeError:
                logging.exception("just testing")
            sys.stdout.seek(0)
            self.assertEqual(output.getvalue(),
                "ERROR:root:just testing\nGot a [RuntimeError]\n")
            # Original logger output is empty
            self.assert_log_lines([]) 
Example 70
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_config7_ok(self):
        with captured_stdout() as output:
            self.apply_config(self.config1)
            logger = logging.getLogger("compiler.parser")
            # Both will output a message
            logger.info(self.next_message())
            logger.error(self.next_message())
            self.assert_log_lines([
                ('INFO', '1'),
                ('ERROR', '2'),
            ], stream=output)
            # Original logger output is empty.
            self.assert_log_lines([])
        with captured_stdout() as output:
            self.apply_config(self.config7)
            logger = logging.getLogger("compiler.parser")
            self.assertTrue(logger.disabled)
            logger = logging.getLogger("compiler.lexer")
            # Both will output a message
            logger.info(self.next_message())
            logger.error(self.next_message())
            self.assert_log_lines([
                ('INFO', '3'),
                ('ERROR', '4'),
            ], stream=output)
            # Original logger output is empty.
            self.assert_log_lines([])

    #Same as test_config_7_ok but don't disable old loggers. 
Example 71
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_listen_config_1_ok(self):
        with captured_stdout() as output:
            self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
            logger = logging.getLogger("compiler.parser")
            # Both will output a message
            logger.info(self.next_message())
            logger.error(self.next_message())
            self.assert_log_lines([
                ('INFO', '1'),
                ('ERROR', '2'),
            ], stream=output)
            # Original logger output is empty.
            self.assert_log_lines([]) 
Example 72
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_out_of_order(self):
        self.apply_config(self.out_of_order)
        handler = logging.getLogger('mymodule').handlers[0]
        self.assertIsInstance(handler.target, logging.Handler) 
Example 73
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_child_loggers(self):
        r = logging.getLogger()
        l1 = logging.getLogger('abc')
        l2 = logging.getLogger('def.ghi')
        c1 = r.getChild('xyz')
        c2 = r.getChild('uvw.xyz')
        self.assertTrue(c1 is logging.getLogger('xyz'))
        self.assertTrue(c2 is logging.getLogger('uvw.xyz'))
        c1 = l1.getChild('def')
        c2 = c1.getChild('ghi')
        c3 = l1.getChild('def.ghi')
        self.assertTrue(c1 is logging.getLogger('abc.def'))
        self.assertTrue(c2 is logging.getLogger('abc.def.ghi'))
        self.assertTrue(c2 is c3) 
Example 74
Project: incubator-spot   Author: apache   File: processing.py    Apache License 2.0 4 votes vote down vote up
def prepare(csvfile, max_req_size):
    '''
        Prepare text-formatted data for transmission through the Kafka cluster.

        This method takes a CSV file and groups it into segments, according to the
    pattern '%Y%m%d%h'. If the size of each segment is greater than the maximum size
    of a request, then divides each segment into smaller ones so that they can be
    transmitted.

    :param csvfile     : Path of CSV-converted file; result of `convert` method.
    :param max_req_size: The maximum size of a request.
    :returns           : A generator which yields the timestamp (in milliseconds) and a
                         list of lines from the CSV-converted file.
    :rtype             : :class:`types.GeneratorType`
    :raises IOError    : If the given file has no any valid line.
    '''
    msg_list  = []
    msg_size  = segmentid = 0
    logger    = logging.getLogger('SPOT.INGEST.FLOW.PROCESS')
    partition = timestamp = None
    pattern   = re.compile('[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}')

    with open(csvfile, 'r') as fp:
        for line in fp:
            value = line.strip()
            if not value: continue

            match = pattern.search(value.split(',')[0])
            if not match: continue

            size  = sys.getsizeof(value)
            # .........................assume the first 13 characters of the `search`
            # result as the `partition`, e.g. '2018-03-20 09'
            if match.group()[:13] == partition and (msg_size + size) < max_req_size:
                msg_list.append(value)
                msg_size += size
                continue

            # .........................if the hour is different or the message size is
            # above the maximum, then yield existing list and continue with an empty one
            if timestamp:
                logger.debug('Yield segment-{0}: {1} lines, {2} bytes'.format(segmentid,
                    len(msg_list), msg_size))
                segmentid += 1

                yield (int(timestamp.total_seconds() * 1000), msg_list)

            msg_list  = [value]
            msg_size  = size
            partition = match.group()[:13]
            timestamp = datetime.strptime(match.group(), '%Y-%m-%d %H:%M:%S') - EPOCH

    # .................................send the last lines from the file. The check of
    # `timestamp` is in case the file is empty and `timestamp` is still ``None``
    if not timestamp:
        raise IOError('CSV-converted file has no valid lines.')

    logger.debug('Yield segment-{0}: {1} lines, {2} bytes'.format(segmentid,
        len(msg_list), msg_size))

    yield (int(timestamp.total_seconds() * 1000), msg_list) 
Example 75
Project: incubator-spot   Author: apache   File: collector.py    Apache License 2.0 4 votes vote down vote up
def _ingest_file(new_file, hdfs_root_path, producer, topic):

    logger = logging.getLogger('SPOT.INGEST.FLOW.{0}'.format(os.getpid()))

    try:

        # get file name and date.
        file_name_parts = new_file.split('/')
        file_name = file_name_parts[len(file_name_parts)-1]
        file_date = file_name.split('.')[1]
        file_date_path = file_date[0:8]
        file_date_hour = file_date[8:10]

        # hdfs path with timestamp.
        hdfs_path = "{0}/binary/{1}/{2}".format(hdfs_root_path, file_date_path, file_date_hour)
        hdfs_file = "{0}/{1}".format(hdfs_path, file_name)

        try:
            if len(hdfs.list_dir(hdfs_path)) == 0:
                logger.info('creating directory: ' + hdfs_path)
                hdfs.mkdir(hdfs_path)
            logger.info('uploading file to hdfs: ' + hdfs_file)
            result = hdfs.upload_file(hdfs_path, new_file)
            if not result:
                logger.error('File failed to upload: ' + hdfs_file)
                raise HdfsException
            else:
                rm_file = "rm {0}".format(new_file)
                logger.info("Removing files from local staging: {0}".format(rm_file))
                Util.execute_cmd(rm_file, logger)

        except HdfsException as err:
            logger.error('Exception: ' + err.exception)
            logger.info('Check Hdfs Connection settings and server health')

    except Exception as err:
        logger.error("There was a problem, Exception: {0}".format(err))

        # create event for workers to process the file.
        # logger.info("Sending file to worker number: {0}".format(partition))
    try:
        producer.SendMessage(hdfs_file, topic)
        logger.info("File {0} has been successfully sent to Kafka Topic to: {1}".format(hdfs_file, topic))
    except Exception as err:
        logger.info("File {0} failed to be sent to Kafka Topic to: {1}".format(hdfs_file, topic))
        logger.error("Error: {0}".format(err)) 
Example 76
Project: incubator-spot   Author: apache   File: processing.py    Apache License 2.0 4 votes vote down vote up
def prepare(csvfile, max_req_size):
    '''
        Prepare text-formatted data for transmission through the Kafka cluster.

        This method takes a CSV file and groups it into segments, according to the
    pattern '%Y%m%d%h'. If the size of each segment is greater than the maximum size
    of a request, then divides each segment into smaller ones so that they can be
    transmitted.

    :param csvfile     : Path of CSV-converted file; result of `convert` method.
    :param max_req_size: The maximum size of a request.
    :returns           : A generator which yields the timestamp (in milliseconds) and a
                         list of lines from the CSV-converted file.
    :rtype             : :class:`types.GeneratorType`
    :raises IOError    : If the given file has no any valid line.
    '''
    msg_list  = []
    msg_size  = segmentid = 0
    logger    = logging.getLogger('SPOT.INGEST.DNS.PROCESS')
    partition = timestamp = None
    pattern   = re.compile('[0-9]{10}.[0-9]{9}')

    with open(csvfile, 'r') as fp:
        for line in fp:
            value = line.strip()
            if not value: continue

            match = pattern.search(value.split(',')[2])
            if not match: continue

            size  = sys.getsizeof(value)
            # .........................assume the first 15 characters of the line, as
            # the `partition` - and not the result of `search` - e.g. 'Sep  8, 2017 11'
            if value[:15] == partition and (msg_size + size) < max_req_size:
                msg_list.append(value)
                msg_size += size
                continue

            # .........................if the hour is different or the message size is
            # above the maximum, then yield existing list and continue with an empty one
            if timestamp:
                logger.debug('Yield segment-{0}: {1} lines, {2} bytes'.format(segmentid,
                    len(msg_list), msg_size))
                segmentid += 1

                yield (int(timestamp.total_seconds() * 1000), msg_list)

            msg_list  = [value]
            msg_size  = size
            partition = value[:15]
            timestamp = datetime.fromtimestamp(float(match.group())) - EPOCH

    # .................................send the last lines from the file. The check of
    # `timestamp` is in case the file is empty and `timestamp` is still ``None``.
    if not timestamp:
        raise IOError('CSV-converted file has no valid lines.')

    logger.debug('Yield segment-{0}: {1} lines, {2} bytes'.format(segmentid,
        len(msg_list), msg_size))

    yield (int(timestamp.total_seconds() * 1000), msg_list) 
Example 77
Project: alfred-yubikey-otp   Author: robertoriv   File: workflow.py    MIT License 4 votes vote down vote up
def logger(self):
        """Logger that logs to both console and a log file.

        If Alfred's debugger is open, log level will be ``DEBUG``,
        else it will be ``INFO``.

        Use :meth:`open_log` to open the log file in Console.

        :returns: an initialised :class:`~logging.Logger`

        """
        if self._logger:
            return self._logger

        # Initialise new logger and optionally handlers
        logger = logging.getLogger('')

        # Only add one set of handlers
        # Exclude from coverage, as pytest will have configured the
        # root logger already
        if not len(logger.handlers):  # pragma: no cover

            fmt = logging.Formatter(
                '%(asctime)s %(filename)s:%(lineno)s'
                ' %(levelname)-8s %(message)s',
                datefmt='%H:%M:%S')

            logfile = logging.handlers.RotatingFileHandler(
                self.logfile,
                maxBytes=1024 * 1024,
                backupCount=1)
            logfile.setFormatter(fmt)
            logger.addHandler(logfile)

            console = logging.StreamHandler()
            console.setFormatter(fmt)
            logger.addHandler(console)

        if self.debugging:
            logger.setLevel(logging.DEBUG)
        else:
            logger.setLevel(logging.INFO)

        self._logger = logger

        return self._logger 
Example 78
Project: pyblish-win   Author: pyblish   File: refactor.py    GNU Lesser General Public License v3.0 4 votes vote down vote up
def __init__(self, fixer_names, options=None, explicit=None):
        """Initializer.

        Args:
            fixer_names: a list of fixers to import
            options: an dict with configuration.
            explicit: a list of fixers to run even if they are explicit.
        """
        self.fixers = fixer_names
        self.explicit = explicit or []
        self.options = self._default_options.copy()
        if options is not None:
            self.options.update(options)
        if self.options["print_function"]:
            self.grammar = pygram.python_grammar_no_print_statement
        else:
            self.grammar = pygram.python_grammar
        # When this is True, the refactor*() methods will call write_file() for
        # files processed even if they were not changed during refactoring. If
        # and only if the refactor method's write parameter was True.
        self.write_unchanged_files = self.options.get("write_unchanged_files")
        self.errors = []
        self.logger = logging.getLogger("RefactoringTool")
        self.fixer_log = []
        self.wrote = False
        self.driver = driver.Driver(self.grammar,
                                    convert=pytree.convert,
                                    logger=self.logger)
        self.pre_order, self.post_order = self.get_fixers()


        self.files = []  # List of files that were or should be modified

        self.BM = bm.BottomMatcher()
        self.bmi_pre_order = [] # Bottom Matcher incompatible fixers
        self.bmi_post_order = []

        for fixer in chain(self.post_order, self.pre_order):
            if fixer.BM_compatible:
                self.BM.add_fixer(fixer)
                # remove fixers that will be handled by the bottom-up
                # matcher
            elif fixer in self.pre_order:
                self.bmi_pre_order.append(fixer)
            elif fixer in self.post_order:
                self.bmi_post_order.append(fixer)

        self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order)
        self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order) 
Example 79
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 4 votes vote down vote up
def test_flat(self):
        #Logging levels in a flat logger namespace.
        m = self.next_message

        ERR = logging.getLogger("ERR")
        ERR.setLevel(logging.ERROR)
        INF = logging.getLogger("INF")
        INF.setLevel(logging.INFO)
        DEB = logging.getLogger("DEB")
        DEB.setLevel(logging.DEBUG)

        # These should log.
        ERR.log(logging.CRITICAL, m())
        ERR.error(m())

        INF.log(logging.CRITICAL, m())
        INF.error(m())
        INF.warn(m())
        INF.info(m())

        DEB.log(logging.CRITICAL, m())
        DEB.error(m())
        DEB.warn (m())
        DEB.info (m())
        DEB.debug(m())

        # These should not log.
        ERR.warn(m())
        ERR.info(m())
        ERR.debug(m())

        INF.debug(m())

        self.assert_log_lines([
            ('ERR', 'CRITICAL', '1'),
            ('ERR', 'ERROR', '2'),
            ('INF', 'CRITICAL', '3'),
            ('INF', 'ERROR', '4'),
            ('INF', 'WARNING', '5'),
            ('INF', 'INFO', '6'),
            ('DEB', 'CRITICAL', '7'),
            ('DEB', 'ERROR', '8'),
            ('DEB', 'WARNING', '9'),
            ('DEB', 'INFO', '10'),
            ('DEB', 'DEBUG', '11'),
        ]) 
Example 80
Project: pyblish-win   Author: pyblish   File: test_logging.py    GNU Lesser General Public License v3.0 4 votes vote down vote up
def test_config7_ok(self):
        with captured_stdout() as output:
            self.apply_config(self.config1a)
            logger = logging.getLogger("compiler.parser")
            # See issue #11424. compiler-hyphenated sorts
            # between compiler and compiler.xyz and this
            # was preventing compiler.xyz from being included
            # in the child loggers of compiler because of an
            # overzealous loop termination condition.
            hyphenated = logging.getLogger('compiler-hyphenated')
            # All will output a message
            logger.info(self.next_message())
            logger.error(self.next_message())
            hyphenated.critical(self.next_message())
            self.assert_log_lines([
                ('INFO', '1'),
                ('ERROR', '2'),
                ('CRITICAL', '3'),
            ], stream=output)
            # Original logger output is empty.
            self.assert_log_lines([])
        with captured_stdout() as output:
            self.apply_config(self.config7)
            logger = logging.getLogger("compiler.parser")
            self.assertFalse(logger.disabled)
            # Both will output a message
            logger.info(self.next_message())
            logger.error(self.next_message())
            logger = logging.getLogger("compiler.lexer")
            # Both will output a message
            logger.info(self.next_message())
            logger.error(self.next_message())
            # Will not appear
            hyphenated.critical(self.next_message())
            self.assert_log_lines([
                ('INFO', '4'),
                ('ERROR', '5'),
                ('INFO', '6'),
                ('ERROR', '7'),
            ], stream=output)
            # Original logger output is empty.
            self.assert_log_lines([])