Python utils.logger.info() Examples

The following are 30 code examples of utils.logger.info(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module utils.logger , or try the search function .
Example #1
Source File: writer.py    From BlogReworkPro with GNU General Public License v3.0 6 votes vote down vote up
def write(self, file_path, mode="delete", page=None):
        logger.info("Writing start: %s" % file_path)

        self._file_path = file_path
        if mode != "delete" and page == None:
            self._error("Mode is not 'delete', argument 'page' is required !")
        if mode == "update":
            if self._articles.find_one(
                {
                    "file": file_path
                }
            ):
                self._update(file_path, page)
            else:
                self._insert(page)
        elif mode == "delete":
            self._delete(file_path)
        else:
            self._error("Unexpected mode '%s' !" % mode) 
Example #2
Source File: feeds_generator.py    From BlogReworkPro with GNU General Public License v3.0 6 votes vote down vote up
def _update_files(self, file_names, time):
        if not os.path.exists(config["feeds_dir_path"]):
            os.mkdir(config["feeds_dir_path"])
        for name_pair in file_names:
            name, view = name_pair["slug"].encode("utf-8"), name_pair["view"].encode("utf-8")
            if name not in self._files:
                file_name = "%s/%s.rss.xml" % (
                    config["feeds_dir_path"],
                    name
                    )
                self._files[name] = open(file_name, "w")
                self._files[name].write(
                    template["begin"].format(
                        config["site_title"],
                        config["site_url"],
                        config["site_description"],
                        "%s/%s" % (
                            config["site_url"],
                            file_name
                        ),
                        time
                    )
                )
                logger.info("'%s' " % view, False) 
Example #3
Source File: sitemap_generator.py    From BlogReworkPro with GNU General Public License v3.0 6 votes vote down vote up
def generate(self):
        logger.info("Sitemap: Writing start...")
        with open(config["sitemap_path"], "w") as f:
            f.write(template["begin"])
            f.write(self._add_static())
            logger.info("Sitemap: Writing: ")
            for url in ["tag", "author", "category"]:
                f.write(
                    self._add_collection(url, self._collections[url])
                )
            f.write(
                self._add_archives(self._collections["article"])
            )
            f.write(template["end"])
            f.close()
        logger.info("Sitemap: Writing done...") 
Example #4
Source File: sitemap_generator.py    From BlogReworkPro with GNU General Public License v3.0 6 votes vote down vote up
def _add_archives(self, collection):
        logger.info("%s " % "article", False)
        result = ""
        archives = list(collection.find({}))
        page_count = len(archives) / 10 + 1
        result += self._add_one(
                "archives",
                datetime.now()
            )
        for index in xrange(page_count):
            result += self._add_one(
                "%s/%d" % ("archives", index),
                datetime.now()
            )
        for article in archives:
            result += self._add_one(
                "%s/%s" % ("article", article["slug"]),
                datetime.strptime(article["date"], "%Y.%m.%d %H:%M")
            )
        return result 
Example #5
Source File: cem_actor_learner.py    From tensorflow-rl with Apache License 2.0 6 votes vote down vote up
def __init__(self, args):
		super(CEMLearner, self).__init__(args)

		policy_conf = {'name': 'local_learning_{}'.format(self.actor_id),
					   'input_shape': self.input_shape,
					   'num_act': self.num_actions,
					   'args': args}

		self.local_network = args.network(policy_conf)
		self.num_params = np.sum([
			np.prod(v.get_shape().as_list())
			for v in self.local_network.params])

		logger.info('Parameter count: {}'.format(self.num_params))
		self.mu = np.zeros(self.num_params)
		self.sigma = np.ones(self.num_params)
		self.num_samples = args.episodes_per_batch
		self.num_epochs = args.num_epochs

		if self.is_master():
			var_list = self.local_network.params
			self.saver = tf.train.Saver(var_list=var_list, max_to_keep=3,
                                        keep_checkpoint_every_n_hours=2) 
Example #6
Source File: distiller.py    From DistilKoBERT with Apache License 2.0 6 votes vote down vote up
def end_epoch(self):
        """
        Finally arrived at the end of epoch (full pass on dataset).
        Do some tensorboard logging and checkpoint saving.
        """
        logger.info(f"{self.n_sequences_epoch} sequences have been trained during this epoch.")

        if self.is_master:
            self.save_checkpoint(checkpoint_name=f"model_epoch_{self.epoch}.pth")
            self.tensorboard.add_scalar(
                tag="epoch/loss", scalar_value=self.total_loss_epoch / self.n_iter, global_step=self.epoch
            )

        self.epoch += 1
        self.n_sequences_epoch = 0
        self.n_iter = 0
        self.total_loss_epoch = 0 
Example #7
Source File: intrinsic_motivation_actor_learner.py    From tensorflow-rl with Apache License 2.0 5 votes vote down vote up
def write_density_model(self):
        logger.info('T{} Writing Pickled Density Model to File...'.format(self.actor_id))
        raw_data = cPickle.dumps(self.density_model.get_state(), protocol=2)
        with self.barrier.counter.lock, open('/tmp/density_model.pkl', 'wb') as f:
            f.write(raw_data)

        for i in xrange(len(self.density_model_update_flags.updated)):
            self.density_model_update_flags.updated[i] = 1 
Example #8
Source File: file_monitor.py    From BlogReworkPro with GNU General Public License v3.0 5 votes vote down vote up
def on_created(self, event):
        path = event.src_path
        if not is_markdown_file(path):
            return
        logger.info("Create: %s" % path)
        self._work(path, "update") 
Example #9
Source File: web_caches.py    From BlogReworkPro with GNU General Public License v3.0 5 votes vote down vote up
def modifyState(self, parameters):
        name = parameters
        logger.info("Cache: %s - %s\nParams: %s" % ("modify", self.flag, parameters))
        if not self.has(parameters):
            self._error("Try to modify state but '%s' is not in cache now !" % name)
        self._state[name] = True 
Example #10
Source File: web_caches.py    From BlogReworkPro with GNU General Public License v3.0 5 votes vote down vote up
def updateContent(self, parameters, content):
        name = parameters
        logger.info("Cache: %s - %s\nParams: %s" % ("update", self.flag, parameters))
        self._cache[name] = content
        self._state[name] = False 
Example #11
Source File: feeds_generator.py    From BlogReworkPro with GNU General Public License v3.0 5 votes vote down vote up
def generate(self):
        logger.info("Feeds: Writing start...")
        self._files = {}
        time = format_date(datetime.now(), "feeds")
        articles = list(self._collection.find({}))
        articles.sort(
            key=lambda article: article["date"],reverse=True
        )
        logger.info("Feeds: Writing: ")
        for article in articles:
            content, file_names = self._format_article(article)
            self._update_files(file_names, time)
            for name in file_names:
                self._files[name["slug"].encode("utf-8")].write(
                    self._add_one(content)
                )
        indexes = {}
        logger.info("Feeds: Done: ")
        for file_name, file_obj in self._files.items():
            file_obj.write(
                template["end"]
            )
            file_obj.close()
            indexes[file_name] = "%s.rss.xml" % file_name
            logger.info("'%s' " % file_name, False)
        with open(
            "%s/%s" % (
                        config["feeds_dir_path"],
                    "indexes.json"
                ),
            "w"
        ) as f:
            json.dump(indexes ,f)
        logger.info("Feeds: Writing done...") 
Example #12
Source File: wrapper.py    From BlogReworkPro with GNU General Public License v3.0 5 votes vote down vote up
def wrap(self, metadata):
        logger.info("Wrapping start")
        return self._slug_wrap(metadata) 
Example #13
Source File: encoder.py    From BicycleGAN-Tensorflow with MIT License 5 votes vote down vote up
def __init__(self, name, is_train, norm='instance', activation='leaky',
                 image_size=128, latent_dim=8, use_resnet=True):
        logger.info('Init Encoder %s', name)
        self.name = name
        self._is_train = is_train
        self._norm = norm
        self._activation = activation
        self._reuse = False
        self._image_size = image_size
        self._latent_dim = latent_dim
        self._use_resnet = use_resnet 
Example #14
Source File: discriminator.py    From BicycleGAN-Tensorflow with MIT License 5 votes vote down vote up
def __init__(self, name, is_train, norm='instance', activation='leaky', image_size=128):
        logger.info('Init Discriminator %s', name)
        self.name = name
        self._is_train = is_train
        self._norm = norm
        self._activation = activation
        self._reuse = False
        self._image_size = image_size 
Example #15
Source File: sitemap_generator.py    From BlogReworkPro with GNU General Public License v3.0 5 votes vote down vote up
def _add_collection(self, url, collection):
        logger.info("%s " % url, False)
        result = ""
        for item in list(collection.find({})):
            result += self._add_one(
                    "%s/%s" % (url, item["slug"]),
                    datetime.now()
                )
            for index in xrange(item["count"] / config["articles_per_page"] + 1):
                result += self._add_one(
                    "%s/%s/%d" % (url, item["slug"], index),
                    datetime.now()
                )
        return result 
Example #16
Source File: web_handlers.py    From BlogReworkPro with GNU General Public License v3.0 5 votes vote down vote up
def _304(self, parameters, data):
        logger.info("304: %s\nParameters: %s" % (
            self.url, parameters
        ))
        return self._response(
            self._format_data(304, data, self.url, parameters),
            200
        ) 
Example #17
Source File: web_handlers.py    From BlogReworkPro with GNU General Public License v3.0 5 votes vote down vote up
def _handle(self, parameters=None):
        hasOrigin = "origin" in request.headers
        logger.info("Request: %s\nFrom: %s\nUrl: %s" % (
            self.url,
            request.headers["Referer"] if hasOrigin else request.remote_addr,
            request.url
        ))
        if hasOrigin and (request.headers["origin"] not in config["allow-origin"]):
            return self._403(parameters)
        if not (request.remote_addr in config["allow-ip"]):
            return self._403(parameters)
        params = self._parse_parameters(parameters)

        cache = self._cache

        if cache != None and cache.has(params) and not cache.is_modified(params):
            return self._304(params, cache.get(params))

        data = self._find_data(params)
        if not data:
            return self._404(parameters)

        logger.info("Data found: %s\nParameters: %s" % (
            self.url, parameters
        ))

        if cache != None:
            cache.updateContent(params, data)

        return self._response(
            self._format_data(200, data, self.url, params),
            200
        ) 
Example #18
Source File: actor_learner.py    From tensorflow-rl with Apache License 2.0 5 votes vote down vote up
def test(self, num_episodes=100):
        """
        Run test monitor for `num_episodes`
        """
        rewards = list()
        for episode in range(num_episodes):
            s = self.emulator.get_initial_state()
            self.reset_hidden_state()
            total_episode_reward = 0
            episode_over = False

            while not episode_over:
                a = self.choose_next_action(s)[0]
                s, reward, episode_over = self.emulator.next(a)

                total_episode_reward += reward

            else:
                rewards.append(total_episode_reward)
                logger.info("EPISODE {0} -- REWARD: {1}, RUNNING AVG: {2:.1f}±{3:.1f}, BEST: {4}".format(
                    episode,
                    total_episode_reward,
                    np.array(rewards).mean(),
                    2*np.array(rewards).std(),
                    max(rewards),
                )) 
Example #19
Source File: file_monitor.py    From BlogReworkPro with GNU General Public License v3.0 5 votes vote down vote up
def on_deleted(self, event):
        path = event.src_path
        if not is_markdown_file(path):
            return
        logger.info("Delete: %s" % path)
        self._work(path, "delete") 
Example #20
Source File: intrinsic_motivation_actor_learner.py    From tensorflow-rl with Apache License 2.0 5 votes vote down vote up
def read_density_model(self):
        logger.info('T{} Synchronizing Density Model...'.format(self.actor_id))
        with self.barrier.counter.lock, open('/tmp/density_model.pkl', 'rb') as f:
            raw_data = f.read()

        self.density_model.set_state(cPickle.loads(raw_data)) 
Example #21
Source File: discriminator.py    From CycleGAN-Tensorflow with MIT License 5 votes vote down vote up
def __init__(self, name, is_train, norm='instance', activation='leaky'):
        logger.info('Init Discriminator %s', name)
        self.name = name
        self._is_train = is_train
        self._norm = norm
        self._activation = activation
        self._reuse = False 
Example #22
Source File: file_monitor.py    From BlogReworkPro with GNU General Public License v3.0 5 votes vote down vote up
def on_modified(self, event):
        path = event.src_path
        if not is_markdown_file(path):
            return
        logger.info("Modify: %s" % path)
        self._work(path, "update") 
Example #23
Source File: generator.py    From BicycleGAN-Tensorflow with MIT License 5 votes vote down vote up
def __init__(self, name, is_train, norm='batch', image_size=128):
        logger.info('Init Generator %s', name)
        self.name = name
        self._is_train = is_train
        self._norm = norm
        self._reuse = False
        self._image_size = image_size 
Example #24
Source File: file_monitor.py    From BlogReworkPro with GNU General Public License v3.0 5 votes vote down vote up
def on_moved(self, event):
        src_path = event.src_path
        dst_path = event.dest_path
        if not is_markdown_file(src_path) or not is_markdown_file(dst_path):
            return
        logger.info("Move: %s, %s" % (src_path, dst_path))
        self._work(src_path, "delete")
        self._work(dst_path, "update") 
Example #25
Source File: distiller.py    From DistilKoBERT with Apache License 2.0 5 votes vote down vote up
def train(self):
        """
        The real training loop.
        """
        if self.is_master:
            logger.info("Starting training")
        self.last_log = time.time()
        self.student.train()
        self.teacher.eval()

        for _ in range(self.params.n_epoch):
            if self.is_master:
                logger.info(f"--- Starting epoch {self.epoch}/{self.params.n_epoch-1}")
            if self.multi_gpu:
                torch.distributed.barrier()

            iter_bar = tqdm(self.dataloader, desc="-Iter", disable=self.params.local_rank not in [-1, 0])
            for batch in iter_bar:
                if self.params.n_gpu > 0:
                    batch = tuple(t.to(f"cuda:{self.params.local_rank}") for t in batch)

                if self.mlm:
                    token_ids, attn_mask, lm_labels = self.prepare_batch_mlm(batch=batch)
                else:
                    token_ids, attn_mask, lm_labels = self.prepare_batch_clm(batch=batch)
                self.step(input_ids=token_ids, attention_mask=attn_mask, lm_labels=lm_labels)

                iter_bar.update()
                iter_bar.set_postfix(
                    {"Last_loss": f"{self.last_loss:.2f}", "Avg_cum_loss": f"{self.total_loss_epoch/self.n_iter:.2f}"}
                )
            iter_bar.close()

            if self.is_master:
                logger.info(f"--- Ending epoch {self.epoch}/{self.params.n_epoch-1}")
            self.end_epoch()

        if self.is_master:
            logger.info(f"Save very last checkpoint as `pytorch_model.bin`.")
            self.save_checkpoint(checkpoint_name=f"pytorch_model.bin")
            logger.info("Training is finished") 
Example #26
Source File: lm_seqs_dataset.py    From DistilKoBERT with Apache License 2.0 5 votes vote down vote up
def print_statistics(self):
        """
        Print some statistics on the corpus. Only the master process.
        """
        if not self.params.is_master:
            return
        logger.info(f'{len(self)} sequences')
        # data_len = sum(self.lengths)
        # nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
        # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')

        # unk_idx = self.params.special_tok_ids['unk_token']
        # nb_unkown = sum([(t==unk_idx).sum() for t in self.token_ids])
        # logger.info(f'{nb_unkown} unknown tokens (covering {100*nb_unkown/data_len:.2f}% of the data)') 
Example #27
Source File: lm_seqs_dataset.py    From DistilKoBERT with Apache License 2.0 5 votes vote down vote up
def remove_empty_sequences(self):
        """
        Too short sequences are simply removed. This could be tunedd.
        """
        init_size = len(self)
        indices = self.lengths > 11
        self.token_ids = self.token_ids[indices]
        self.lengths = self.lengths[indices]
        new_size = len(self)
        logger.info(f'Remove {init_size - new_size} too short (<=11 tokens) sequences.') 
Example #28
Source File: lm_seqs_dataset.py    From DistilKoBERT with Apache License 2.0 5 votes vote down vote up
def remove_long_sequences(self):
        """
        Sequences that are too long are splitted by chunk of max_model_input_size.
        """
        max_len = self.params.max_model_input_size
        indices = self.lengths > max_len
        logger.info(f'Splitting {sum(indices)} too long sequences.')

        def divide_chunks(l, n):
            return [l[i:i + n] for i in range(0, len(l), n)]

        new_tok_ids = []
        new_lengths = []
        if self.params.mlm:
            cls_id, sep_id = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
        else:
            cls_id, sep_id = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']

        for seq_, len_ in zip(self.token_ids, self.lengths):
            assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
            if len_ <= max_len:
                new_tok_ids.append(seq_)
                new_lengths.append(len_)
            else:
                sub_seqs = []
                for sub_s in divide_chunks(seq_, max_len-2):
                    if sub_s[0] != cls_id:
                        sub_s = np.insert(sub_s, 0, cls_id)
                    if sub_s[-1] != sep_id:
                        sub_s = np.insert(sub_s, len(sub_s), sep_id)
                    assert len(sub_s) <= max_len
                    assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
                    sub_seqs.append(sub_s)

                new_tok_ids.extend(sub_seqs)
                new_lengths.extend([len(l) for l in sub_seqs])

        self.token_ids = np.array(new_tok_ids)
        self.lengths = np.array(new_lengths) 
Example #29
Source File: grouped_batch_sampler.py    From exbert with Apache License 2.0 5 votes vote down vote up
def create_lengths_groups(lengths, k=0):
    bins = np.arange(start=3, stop=k, step=4).tolist() if k > 0 else [10]
    groups = _quantize(lengths, bins)
    # count number of elements per group
    counts = np.unique(groups, return_counts=True)[1]
    fbins = [0] + bins + [np.inf]
    logger.info("Using {} as bins for aspect lengths quantization".format(fbins))
    logger.info("Count of instances per bin: {}".format(counts))
    return groups 
Example #30
Source File: distiller.py    From exbert with Apache License 2.0 5 votes vote down vote up
def train(self):
        """
        The real training loop.
        """
        if self.is_master:
            logger.info("Starting training")
        self.last_log = time.time()
        self.student.train()
        self.teacher.eval()

        for _ in range(self.params.n_epoch):
            if self.is_master:
                logger.info(f"--- Starting epoch {self.epoch}/{self.params.n_epoch-1}")
            if self.multi_gpu:
                torch.distributed.barrier()

            iter_bar = tqdm(self.dataloader, desc="-Iter", disable=self.params.local_rank not in [-1, 0])
            for batch in iter_bar:
                if self.params.n_gpu > 0:
                    batch = tuple(t.to(f"cuda:{self.params.local_rank}") for t in batch)

                if self.mlm:
                    token_ids, attn_mask, lm_labels = self.prepare_batch_mlm(batch=batch)
                else:
                    token_ids, attn_mask, lm_labels = self.prepare_batch_clm(batch=batch)
                self.step(input_ids=token_ids, attention_mask=attn_mask, lm_labels=lm_labels)

                iter_bar.update()
                iter_bar.set_postfix(
                    {"Last_loss": f"{self.last_loss:.2f}", "Avg_cum_loss": f"{self.total_loss_epoch/self.n_iter:.2f}"}
                )
            iter_bar.close()

            if self.is_master:
                logger.info(f"--- Ending epoch {self.epoch}/{self.params.n_epoch-1}")
            self.end_epoch()

        if self.is_master:
            logger.info(f"Save very last checkpoint as `pytorch_model.bin`.")
            self.save_checkpoint(checkpoint_name=f"pytorch_model.bin")
            logger.info("Training is finished")