Python tqdm.trange() Examples

The following are 30 code examples of tqdm.trange(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tqdm , or try the search function .
Example #1
Source File: nn.py    From parasol with MIT License 6 votes vote down vote up
def chunk(*data, **kwargs):
    chunk_size = kwargs.pop('chunk_size', 100)
    shuffle = kwargs.pop('shuffle', False)
    show_progress = kwargs.pop('show_progress', None)
    N = len(data[0])
    if shuffle:
        permutation = np.random.permutation(N)
    else:
        permutation = np.arange(N)
    num_chunks = N // chunk_size
    if N % chunk_size > 0:
        num_chunks += 1
    rng = tqdm.trange(num_chunks, desc=show_progress) if show_progress is not None else range(num_chunks)
    for c in rng:
        chunk_slice = slice(c * chunk_size, (c + 1) * chunk_size)
        idx = permutation[chunk_slice]
        yield idx, tuple(d[idx] for d in data) 
Example #2
Source File: coco_seg_dataset.py    From imgclsmob with MIT License 6 votes vote down vote up
def _filter_idx(self,
                    idx,
                    idx_file,
                    pixels_thr=1000):
        logging.info("Filtering mask index")
        tbar = trange(len(idx))
        filtered_idx = []
        for i in tbar:
            img_id = idx[i]
            coco_target = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
            img_metadata = self.coco.loadImgs(img_id)[0]
            mask = self._gen_seg_mask(
                coco_target,
                img_metadata["height"],
                img_metadata["width"])
            if (mask > 0).sum() > pixels_thr:
                filtered_idx.append(img_id)
            tbar.set_description("Doing: {}/{}, got {} qualified images".format(i, len(idx), len(filtered_idx)))
        logging.info("Found number of qualified images: {}".format(len(filtered_idx)))
        np.save(idx_file, np.array(filtered_idx, np.int32))
        return filtered_idx 
Example #3
Source File: coco_seg_dataset.py    From imgclsmob with MIT License 6 votes vote down vote up
def _filter_idx(self,
                    idx_list,
                    idx_file_path,
                    pixels_thr=1000):
        logging.info("Filtering mask index:")
        tbar = trange(len(idx_list))
        filtered_idx = []
        for i in tbar:
            img_id = idx_list[i]
            coco_target = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
            img_metadata = self.coco.loadImgs(img_id)[0]
            mask = self._gen_seg_mask(
                coco_target,
                img_metadata["height"],
                img_metadata["width"])
            if (mask > 0).sum() > pixels_thr:
                filtered_idx.append(img_id)
            tbar.set_description("Doing: {}/{}, got {} qualified images".format(i, len(idx_list), len(filtered_idx)))
        logging.info("Found number of qualified images: {}".format(len(filtered_idx)))
        np.save(idx_file_path, np.array(filtered_idx, np.int32))
        return filtered_idx 
Example #4
Source File: coco_seg_dataset.py    From imgclsmob with MIT License 6 votes vote down vote up
def _filter_idx(self,
                    idx,
                    idx_file,
                    pixels_thr=1000):
        logging.info("Filtering mask index")
        tbar = trange(len(idx))
        filtered_idx = []
        for i in tbar:
            img_id = idx[i]
            coco_target = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
            img_metadata = self.coco.loadImgs(img_id)[0]
            mask = self._gen_seg_mask(
                coco_target,
                img_metadata["height"],
                img_metadata["width"])
            if (mask > 0).sum() > pixels_thr:
                filtered_idx.append(img_id)
            tbar.set_description("Doing: {}/{}, got {} qualified images".format(i, len(idx), len(filtered_idx)))
        logging.info("Found number of qualified images: {}".format(len(filtered_idx)))
        np.save(idx_file, np.array(filtered_idx, np.int32))
        return filtered_idx 
Example #5
Source File: run_unet.py    From DeepResearch with MIT License 6 votes vote down vote up
def train(unet, batch_size, epochs, epoch_lapse, threshold, learning_rate, criterion, optimizer, x_train, y_train, x_val, y_val, width_out, height_out):
    epoch_iter = np.ceil(x_train.shape[0] / batch_size).astype(int)
    t = trange(epochs, leave=True)
    for _ in t:
        total_loss = 0
        for i in range(epoch_iter):
            batch_train_x = torch.from_numpy(x_train[i * batch_size : (i + 1) * batch_size]).float()
            batch_train_y = torch.from_numpy(y_train[i * batch_size : (i + 1) * batch_size]).long()
            if use_gpu:
                batch_train_x = batch_train_x.cuda()
                batch_train_y = batch_train_y.cuda()
            batch_loss = train_step(batch_train_x , batch_train_y, optimizer, criterion, unet, width_out, height_out)
            total_loss += batch_loss
        if (_+1) % epoch_lapse == 0:
            val_loss = get_val_loss(x_val, y_val, width_out, height_out, unet)
            print("Total loss in epoch %f : %f and validation loss : %f" %(_+1, total_loss, val_loss))
    gc.collect() 
Example #6
Source File: mscoco.py    From awesome-semantic-segmentation-pytorch with Apache License 2.0 6 votes vote down vote up
def _preprocess(self, ids, ids_file):
        print("Preprocessing mask, this will take a while." + \
              "But don't worry, it only run once for each split.")
        tbar = trange(len(ids))
        new_ids = []
        for i in tbar:
            img_id = ids[i]
            cocotarget = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
            img_metadata = self.coco.loadImgs(img_id)[0]
            mask = self._gen_seg_mask(cocotarget, img_metadata['height'], img_metadata['width'])
            # more than 1k pixels
            if (mask > 0).sum() > 1000:
                new_ids.append(img_id)
            tbar.set_description('Doing: {}/{}, got {} qualified images'. \
                                 format(i, len(ids), len(new_ids)))
        print('Found number of qualified images: ', len(new_ids))
        with open(ids_file, 'wb') as f:
            pickle.dump(new_ids, f)
        return new_ids 
Example #7
Source File: coco_seg_dataset.py    From imgclsmob with MIT License 6 votes vote down vote up
def _filter_idx(self,
                    idx,
                    idx_file,
                    pixels_thr=1000):
        logging.info("Filtering mask index")
        tbar = trange(len(idx))
        filtered_idx = []
        for i in tbar:
            img_id = idx[i]
            coco_target = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
            img_metadata = self.coco.loadImgs(img_id)[0]
            mask = self._gen_seg_mask(
                coco_target,
                img_metadata["height"],
                img_metadata["width"])
            if (mask > 0).sum() > pixels_thr:
                filtered_idx.append(img_id)
            tbar.set_description("Doing: {}/{}, got {} qualified images".format(i, len(idx), len(filtered_idx)))
        logging.info("Found number of qualified images: {}".format(len(filtered_idx)))
        np.save(idx_file, np.array(filtered_idx, np.int32))
        return filtered_idx 
Example #8
Source File: preprocess.py    From CoupletAI with MIT License 6 votes vote down vote up
def create_dataset(seqs: List[List[str]],
                   tags: List[List[str]],
                   word_to_ix: Mapping[str, int],
                   max_seq_len: int,
                   pad_ix: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
    """Convert List[str] -> torch.Tensor.
    Returns:
        seqs_tensor: shape=[num_seqs, max_seq_len].
        seqs_mask: shape=[num_seqs, max_seq_len].
        tags_tesnor: shape=[num_seqs, max_seq_len].
    """
    assert len(seqs) == len(tags)
    num_seqs = len(seqs)
    seqs_tensor = torch.ones(num_seqs, max_seq_len) * pad_ix
    seqs_mask = torch.zeros(num_seqs, max_seq_len)
    tags_tesnor = torch.ones(num_seqs, max_seq_len) * pad_ix
    for i in trange(num_seqs):
        seqs_mask[i, : len(seqs[i])] = 1
        for j, word in enumerate(seqs[i]):
            seqs_tensor[i, j] = word_to_ix.get(word, word_to_ix['[UNK]'])
        for j, tag in enumerate(tags[i]):
            tags_tesnor[i, j] = word_to_ix.get(tag, word_to_ix['[UNK]'])
    return seqs_tensor.long(), seqs_mask, tags_tesnor.long() 
Example #9
Source File: splitter.py    From Splitter with GNU General Public License v3.0 6 votes vote down vote up
def fit(self):
        """
        Fitting a model.
        """
        self.base_model_fit()
        self.create_split()
        self.setup_model()
        self.model.train()
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
        self.optimizer.zero_grad()
        print("\nLearning the joint model.\n")
        random.shuffle(self.persona_walker.paths)
        self.walk_steps = trange(len(self.persona_walker.paths), desc="Loss")
        for step in self.walk_steps:
            self.reset_average_loss(step)
            walk = self.persona_walker.paths[step]
            self.process_walk(walk)
            loss_score = self.optimize()
            self.update_average_loss(loss_score) 
Example #10
Source File: mscoco.py    From SegmenTron with Apache License 2.0 6 votes vote down vote up
def _preprocess(self, ids, ids_file):
        print("Preprocessing mask, this will take a while." + \
              "But don't worry, it only run once for each split.")
        tbar = trange(len(ids))
        new_ids = []
        for i in tbar:
            img_id = ids[i]
            cocotarget = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
            img_metadata = self.coco.loadImgs(img_id)[0]
            mask = self._gen_seg_mask(cocotarget, img_metadata['height'], img_metadata['width'])
            # more than 1k pixels
            if (mask > 0).sum() > 1000:
                new_ids.append(img_id)
            tbar.set_description('Doing: {}/{}, got {} qualified images'. \
                                 format(i, len(ids), len(new_ids)))
        print('Found number of qualified images: ', len(new_ids))
        with open(ids_file, 'wb') as f:
            pickle.dump(new_ids, f)
        return new_ids 
Example #11
Source File: initialize.py    From YAMDA with MIT License 6 votes vote down vote up
def count_seqs_with_words(seqs, halflength, ming, maxg, alpha, revcomp, desc):
    if alpha == 'protein':
        ambiguous_character = 'X'
    else:
        ambiguous_character = 'N'
    gapped_kmer_dict = {}  # each key is the gapped k-mer word
    for g in trange(ming, maxg + 1, 1, desc=desc):
        w = g+2*halflength # length of the word
        gap = g * ambiguous_character
        for seq in seqs:
            slen = len(seq)
            for i in range(0, slen-w+1):
                word = seq[i : i+w]
                # skip word if it contains an ambiguous character
                if ambiguous_character in word:
                    continue
                # convert word to a gapped word. Only the first and last half-length letters are preserved
                word = word[0:halflength] + gap + word[-halflength:]
                update_gapped_kmer_dict(gapped_kmer_dict, word, revcomp)
    return gapped_kmer_dict 
Example #12
Source File: sgcn.py    From SGCN with GNU General Public License v3.0 6 votes vote down vote up
def create_and_train_model(self):
        """
        Model training and scoring.
        """
        print("\nTraining started.\n")
        self.model = SignedGraphConvolutionalNetwork(self.device, self.args, self.X).to(self.device)
        self.optimizer = torch.optim.Adam(self.model.parameters(),
                                          lr=self.args.learning_rate,
                                          weight_decay=self.args.weight_decay)
        self.model.train()
        self.epochs = trange(self.args.epochs, desc="Loss")
        for epoch in self.epochs:
            start_time = time.time()
            self.optimizer.zero_grad()
            loss, _ = self.model(self.positive_edges, self.negative_edges, self.y)
            loss.backward()
            self.epochs.set_description("SGCN (Loss=%g)" % round(loss.item(), 4))
            self.optimizer.step()
            self.logs["training_time"].append([epoch+1, time.time()-start_time])
            if self.args.test_size > 0:
                self.score_model(epoch) 
Example #13
Source File: h5_test.py    From keras-image-segmentation with MIT License 6 votes vote down vote up
def image_copy_to_dir(mode, x_paths, y_paths):
    target_path = '/run/media/tkwoo/myWorkspace/workspace/01.dataset/03.Mask_data/cityscape'
    target_path = os.path.join(target_path, mode)

    for idx in trange(len(x_paths)):
        image = cv2.imread(x_paths[idx], 1)
        mask = cv2.imread(y_paths[idx], 0)

        image = cv2.resize(image, None, fx=0.25, fy=0.25, interpolation=cv2.INTER_LINEAR)
        mask = cv2.resize(mask, None, fx=0.25, fy=0.25, interpolation=cv2.INTER_NEAREST)

        cv2.imwrite(os.path.join(target_path, 'image', os.path.basename(x_paths[idx])), image)
        cv2.imwrite(os.path.join(target_path, 'mask', os.path.basename(y_paths[idx])), mask)

        # show = image.copy()
        # mask = (mask.astype(np.float32)*255/33).astype(np.uint8)
        # mask_color = cv2.applyColorMap(mask, cv2.COLORMAP_JET)
        # show = cv2.addWeighted(show, 0.5, mask_color, 0.5, 0.0)
        # cv2.imshow('show', show)
        # key = cv2.waitKey(1)
        # if key == 27:
        #     return 
Example #14
Source File: main.py    From chinese-opinion-target-extraction with MIT License 6 votes vote down vote up
def test(data):
    print('Testing model...')
    model = Model(data).to(device)
    model.load_state_dict(torch.load(data.model_path))
    instances = data.ids
    pred_results = []
    model.eval()
    test_num = len(instances)
    total_batch = test_num // data.batch_size + 1
    for batch in trange(total_batch):
        start, end = slice_set(batch, data.batch_size, test_num)
        instance = instances[start:end]
        if not instance: continue
        _, mask, *model_input, char_recover = load_batch(instance, True)
        tag_seq = model(mask, *model_input)
        pred_label = seq2label(tag_seq, mask, data.label_alphabet, char_recover)
        pred_results += pred_label
    return pred_results 
Example #15
Source File: track.py    From spotify-downloader with MIT License 6 votes vote down vote up
def _make_progress_bar(self, iterations):
        """
        Creates a progress bar using :class:`tqdm`.

        Parameters
        ----------
        iterations: `int`
            Number of iterations to be performed.

        Returns
        -------
        progress_bar: :class:`tqdm.std.tqdm`
            An iterator object.
        """

        progress_bar = tqdm.trange(
            iterations,
            unit_scale=(self._chunksize // 1024),
            unit="KiB",
            dynamic_ncols=True,
            bar_format='{desc}: {percentage:3.0f}%|{bar}| {n_fmt}/{total_fmt}KiB '
                '[{elapsed}<{remaining}, {rate_fmt}{postfix}]',
        )
        return progress_bar 
Example #16
Source File: env.py    From parasol with MIT License 6 votes vote down vote up
def rollouts(self, num_rollouts, num_steps, show_progress=False,
                 noise=None,
                 callback=lambda x: None,
                 **kwargs):
        states, actions, costs = (
            np.empty([num_rollouts, num_steps] + [self.get_state_dim()]),
            np.empty([num_rollouts, num_steps] + [self.get_action_dim()]),
            np.empty([num_rollouts, num_steps])
        )
        infos = [None] * num_rollouts
        rollouts = tqdm.trange(num_rollouts, desc='Rollouts') if show_progress else range(num_rollouts)
        for i in rollouts:
            with contextlib.ExitStack() as stack:
                context = callback(i)
                if context is not None:
                    stack.enter_context(callback(i))
                n = None
                if noise is not None:
                    n = noise()
                states[i], actions[i], costs[i], infos[i] = \
                        self.rollout(num_steps, noise=n,**kwargs)
        return states, actions, costs, infos 
Example #17
Source File: imagenet.py    From MobileNetV2-pytorch with MIT License 6 votes vote down vote up
def train_network(start_epoch, epochs, scheduler, model, train_loader, val_loader, optimizer, criterion, device, dtype,
                  batch_size, log_interval, csv_logger, save_path, claimed_acc1, claimed_acc5, best_test):
    for epoch in trange(start_epoch, epochs + 1):
        if not isinstance(scheduler, CyclicLR):
            scheduler.step()
        train_loss, train_accuracy1, train_accuracy5, = train(model, train_loader, epoch, optimizer, criterion, device,
                                                              dtype, batch_size, log_interval, scheduler)
        test_loss, test_accuracy1, test_accuracy5 = test(model, val_loader, criterion, device, dtype)
        csv_logger.write({'epoch': epoch + 1, 'val_error1': 1 - test_accuracy1, 'val_error5': 1 - test_accuracy5,
                          'val_loss': test_loss, 'train_error1': 1 - train_accuracy1,
                          'train_error5': 1 - train_accuracy5, 'train_loss': train_loss})
        save_checkpoint({'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_prec1': best_test,
                         'optimizer': optimizer.state_dict()}, test_accuracy1 > best_test, filepath=save_path)

        csv_logger.plot_progress(claimed_acc1=claimed_acc1, claimed_acc5=claimed_acc5)

        if test_accuracy1 > best_test:
            best_test = test_accuracy1

    csv_logger.write_text('Best accuracy is {:.2f}% top-1'.format(best_test * 100.)) 
Example #18
Source File: segmentation.py    From gluon-cv with Apache License 2.0 6 votes vote down vote up
def _preprocess(self, ids, ids_file):
        print("Preprocessing mask, this will take a while." + \
              "But don't worry, it only run once for each split.")
        tbar = trange(len(ids))
        new_ids = []
        for i in tbar:
            img_id = ids[i]
            cocotarget = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
            img_metadata = self.coco.loadImgs(img_id)[0]
            mask = self._gen_seg_mask(cocotarget, img_metadata['height'],
                                      img_metadata['width'])
            # more than 1k pixels
            if (mask > 0).sum() > 1000:
                new_ids.append(img_id)
            tbar.set_description('Doing: {}/{}, got {} qualified images'.\
                format(i, len(ids), len(new_ids)))
        print('Found number of qualified images: ', len(new_ids))
        with open(ids_file, 'wb') as f:
            pickle.dump(new_ids, f)
        return new_ids 
Example #19
Source File: common.py    From dataflow with Apache License 2.0 6 votes vote down vote up
def start(self):
        """
        Start testing with a progress bar.
        """
        if not self._reset_called:
            self.ds.reset_state()
        itr = self.ds.__iter__()
        if self.warmup:
            for _ in tqdm.trange(self.warmup, **get_tqdm_kwargs()):
                next(itr)
        # add smoothing for speed benchmark
        with get_tqdm(total=self.test_size,
                      leave=True, smoothing=0.2) as pbar:
            for idx, dp in enumerate(itr):
                pbar.update()
                if idx == self.test_size - 1:
                    break 
Example #20
Source File: coco.py    From overhaul-distillation with MIT License 6 votes vote down vote up
def _preprocess(self, ids, ids_file):
        print("Preprocessing mask, this will take a while. " + \
              "But don't worry, it only run once for each split.")
        tbar = trange(len(ids))
        new_ids = []
        for i in tbar:
            img_id = ids[i]
            cocotarget = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
            img_metadata = self.coco.loadImgs(img_id)[0]
            mask = self._gen_seg_mask(cocotarget, img_metadata['height'],
                                      img_metadata['width'])
            # more than 1k pixels
            if (mask > 0).sum() > 1000:
                new_ids.append(img_id)
            tbar.set_description('Doing: {}/{}, got {} qualified images'. \
                                 format(i, len(ids), len(new_ids)))
        print('Found number of qualified images: ', len(new_ids))
        torch.save(new_ids, ids_file)
        return new_ids 
Example #21
Source File: coco.py    From PyTorch-Encoding with MIT License 6 votes vote down vote up
def _preprocess(self, ids, ids_file):
        print("Preprocessing mask, this will take a while." + \
            "But don't worry, it only run once for each split.")
        tbar = trange(len(ids))
        new_ids = []
        for i in tbar:
            img_id = ids[i]
            cocotarget = self.coco.loadAnns(self.coco.getAnnIds(imgIds=img_id))
            img_metadata = self.coco.loadImgs(img_id)[0]
            mask = self._gen_seg_mask(cocotarget, img_metadata['height'], 
                                      img_metadata['width'])
            # more than 1k pixels
            if (mask > 0).sum() > 1000:
                new_ids.append(img_id)
            tbar.set_description('Doing: {}/{}, got {} qualified images'.\
                format(i, len(ids), len(new_ids)))
        print('Found number of qualified images: ', len(new_ids))
        torch.save(new_ids, ids_file)
        return new_ids 
Example #22
Source File: walkers.py    From MUSAE with GNU General Public License v3.0 5 votes vote down vote up
def simulate_walks(self):
        """
        Doing a fixed number of truncated random walk from every node in the graph.
        """
        self.walks = []
        for _ in trange(self.num_walks, desc='Walk series: '):
            for node in trange(self.G.number_of_nodes(), desc='Nodes: '):
                walk_from_node = self.do_walk(node)
                self.walks.append(walk_from_node) 
Example #23
Source File: env.py    From parasol with MIT License 5 votes vote down vote up
def rollout(self, num_steps, policy=None, render=False,
                show_progress=False, init_std=1, noise=None):
        if policy is None:
            def policy(_, t, noise=None):
                return np.random.normal(size=self.get_action_dim(), scale=init_std)
        states, actions, costs = (
            np.zeros([num_steps] + [self.get_state_dim()]),
            np.zeros([num_steps] + [self.get_action_dim()]),
            np.zeros([num_steps])
        )
        infos = collections.defaultdict(list)
        current_state = self.reset()
        times = tqdm.trange(num_steps, desc='Rollout') if show_progress else range(num_steps)
        for t in times:
            states[t] = current_state
            if render:
                self.render(mode='human')
            if self.is_recording():
                self.render(mode='rgb_array')
                self.grab_frame()
            n = None
            if noise is not None:
                n = noise[t]
            actions[t] = policy(states, actions, t, noise=n)
            current_state, costs[t], done, info = self.step(actions[t])
            for k, v in info.items():
                infos[k].append(v)
        if self.currently_logging:
            log_entry = collections.OrderedDict()
            log_entry['episode_number'] = self.episode_number
            log_entry['mean_cost'] = costs.mean()
            log_entry['total_cost'] = costs.sum()
            log_entry['final_cost'] = costs[-1]
            for k, v in infos.items():
                v = np.array(v)
                log_entry['mean_%s' % k] = v.mean()
                log_entry['total_%s' % k] = v.sum()
                log_entry['final_%s' % k] = v[-1]
            self.log_entry(log_entry)
            self.episode_number += 1
        return states, actions, costs, infos 
Example #24
Source File: fit.py    From parasol with MIT License 5 votes vote down vote up
def quadratic_regression_pd(SA, costs, diag_cost=False):
    assert not diag_cost
    global global_step
    dsa = SA.shape[-1]
    C = tf.get_variable('cost_mat{}'.format(global_step), shape=[dsa, dsa],
                        dtype=tf.float32,
                        initializer=tf.random_uniform_initializer(minval=-0.1, maxval=0.1))
    L = tf.matrix_band_part(C, -1, 0)
    L = tf.matrix_set_diag(L, tf.maximum(tf.matrix_diag_part(L), 0.0))
    LL = tf.matmul(L, tf.transpose(L))
    c = tf.get_variable('cost_vec{}'.format(global_step), shape=[dsa],
                        dtype=tf.float32, initializer=tf.zeros_initializer())
    b = tf.get_variable('cost_bias{}'.format(global_step), shape=[],
                        dtype=tf.float32, initializer=tf.zeros_initializer())
    s_ = tf.placeholder(tf.float32, [None, dsa])
    c_ = tf.placeholder(tf.float32, [None])
    pred_cost = 0.5 * tf.einsum('na,ab,nb->n', s_, LL, s_) + \
            tf.einsum('na,a->n', s_, c) + b
    mse = tf.reduce_mean(tf.square(pred_cost - c_))
    opt = tf.train.MomentumOptimizer(1e-3, 0.9).minimize(mse)
    N = SA.shape[0]
    SA = SA.reshape([-1, dsa])
    costs = costs.reshape([-1])
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for itr in tqdm.trange(1000, desc='Fitting cost'):
            _, m = sess.run([opt, mse], feed_dict={
                s_: SA,
                c_: costs,
            })
            if itr == 0 or itr == 999:
                print('mse itr {}: {}'.format(itr, m))
        cost_mat, cost_vec = sess.run((LL, c))

    global_step += 1
    return cost_mat, cost_vec 
Example #25
Source File: run.py    From MobileNetV2-pytorch with MIT License 5 votes vote down vote up
def find_bounds_clr(model, loader, optimizer, criterion, device, dtype, min_lr=8e-6, max_lr=8e-5, step_size=2000,
                    mode='triangular', save_path='.'):
    model.train()
    correct1, correct5 = 0, 0
    scheduler = CyclicLR(optimizer, base_lr=min_lr, max_lr=max_lr, step_size=step_size, mode=mode)
    epoch_count = step_size // len(loader)  # Assuming step_size is multiple of batch per epoch
    accuracy = []
    for _ in trange(epoch_count):
        for batch_idx, (data, target) in enumerate(tqdm(loader)):
            if scheduler is not None:
                scheduler.batch_step()
            data, target = data.to(device=device, dtype=dtype), target.to(device=device)

            optimizer.zero_grad()
            output = model(data)

            loss = criterion(output, target)
            loss.backward()
            optimizer.step()

            corr = correct(output, target)
            accuracy.append(corr[0] / data.shape[0])

    lrs = np.linspace(min_lr, max_lr, step_size)
    plt.plot(lrs, accuracy)
    plt.show()
    plt.savefig(os.path.join(save_path, 'find_bounds_clr.png'))
    np.save(os.path.join(save_path, 'acc.npy'), accuracy)
    return 
Example #26
Source File: imagenet5k.py    From webvision-2.0-benchmarks with Apache License 2.0 5 votes vote down vote up
def get_training_bbox(bbox_dir, imglist):
        import xml.etree.ElementTree as ET
        ret = []

        def parse_bbox(fname):
            root = ET.parse(fname).getroot()
            size = root.find('size').getchildren()
            size = map(int, [size[0].text, size[1].text])

            box = root.find('object').find('bndbox').getchildren()
            box = map(lambda x: float(x.text), box)
            return np.asarray(box, dtype='float32')

        with timed_operation('Loading Bounding Boxes ...'):
            cnt = 0
            for k in tqdm.trange(len(imglist)):
                fname = imglist[k][0]
                fname = fname[:-4] + 'xml'
                fname = os.path.join(bbox_dir, fname)
                try:
                    ret.append(parse_bbox(fname))
                    cnt += 1
                except Exception:
                    ret.append(None)
            logger.info("{}/{} images have bounding box.".format(cnt, len(imglist)))
        return ret 
Example #27
Source File: trainer.py    From COCO-GAN with MIT License 5 votes vote down vote up
def test(self, n_samples, output_dir):
        n_digits = ceil(log(n_samples, 10))
        n_iters = n_samples // self.batch_size + 1
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        for i in trange(n_iters):
            images = self.rand_sample_full_test()
            for j in range(images.shape[0]):
                global_id = i*self.batch_size + j
                if global_id < n_samples:
                    output_path = os.path.join(output_dir, "test_sample_{}.png".format(str(global_id).zfill(n_digits)))
                    imsave(output_path, images[j]) 
Example #28
Source File: Filter_Stock_Cashflow_CHN.py    From StockRecommendSystem with MIT License 5 votes vote down vote up
def process_data(root_path, symbols, dates):
    
    negative_pect = {}
    stock_memory = {}
    symbol_memory = {}
    range_len = 3

    my_range = range(-1, -200, -1)
    #pbar = tqdm(total=len(my_range))
    pbar = trange(len(my_range))

    out_path = root_path + "/Data/CSV/target/"
    if os.path.exists(out_path) == False:
        os.mkdir(out_path)

    for index in my_range:
        day_range = [ dates[idx] for idx in range(index-range_len, index+1) ]
        file_name = out_path + day_range[-1] + ".csv"

        if os.path.exists(file_name):
            stock_filter = pd.read_csv(file_name, index_col=0)
        else:
            db_cashflow = process_all_stocks_data(root_path, symbols, day_range, stock_memory, symbol_memory, index, range_len)
            stock_filter = filter_cashflow(db_cashflow)

            if len(stock_filter) > 0: 
                stock_filter.to_csv(file_name)

        negative_pect[day_range[-1]] = get_result(stock_filter)

        # outMessage = '%-*s processed in:  %.4s seconds' % (6, index, (time.time() - startTime))
        # pbar.set_description(outMessage)    
        pbar.update(1)

    pbar.close()

    print(negative_pect) 
Example #29
Source File: Filter_Stock_Cashflow_CHN.py    From StockRecommendSystem with MIT License 5 votes vote down vote up
def summary_stock_tick_data(root_path, df, symbol, date_list):
    file_path = root_path + "/Data/CSV/tick/" + symbol + "/"    
    out_file = root_path + "/Data/CSV/cashflow/" + symbol + ".csv"

    #pbar = trange(len(date_list), mininterval=0.1, smoothing=1, leave=False)
    #for i in pbar:
    for date in date_list:
        #date = date_list[i]
        start = time.time()
        file_name = file_path + symbol + "_" + date + ".csv"

        if os.path.exists(file_name) == False:
            continue

        try:
            data = pd.read_csv(file_name, index_col=0)
        except:
            print("error on symbol:", symbol, "  date:", date)
            continue

        if (data is None) or data.empty or len(data) < 4:
            buy, sell, even = 0, 0, 0
        else:
            buy_amount, sell_amount, even_amount, buy_volume, sell_volume, even_volume, buy_max, buy_min, buy_average, sell_max, sell_min, sell_average, even_max, even_min, even_average = group_tick_data_to_cashflow(data)
            df.loc[len(df)] = [date, symbol, buy_amount, sell_amount, even_amount, buy_volume, sell_volume, even_volume, buy_max, buy_min, buy_average, sell_max, sell_min, sell_average, even_max, even_min, even_average]

        #outMessage = '%s processed in: %.3s seconds' % (date, (time.time() - start))
        #pbar.set_description(outMessage)
        
    df = df.sort_values(['symbol','date'], ascending=[True, True])
    df.to_csv(out_file) 
Example #30
Source File: swarm.py    From fragile with MIT License 5 votes vote down vote up
def get_run_loop(self, show_pbar: bool = None) -> Iterable[int]:
        """
        Return a tqdm progress bar or a regular range iterator.

        If the code is running in an IPython kernel it will also display the \
        internal ``_notebook_container``.

        Args:
            show_pbar: If ``False`` the progress bar will not be displayed.

        Returns:
            A Progressbar if ``show_pbar`` is ``True`` and the code is running \
            in an IPython kernel. If the code is running in a terminal the logging \
            level must be set at least to "INFO". Otherwise return a range iterator \
            for ``self.max_range`` iteration.

        """
        show_pbar = show_pbar if show_pbar is not None else self.show_pbar
        no_tqdm = not (
            show_pbar if self._ipython_mode else self._log.level < logging.WARNING and show_pbar
        )
        if self._ipython_mode:
            from tqdm.notebook import trange
        else:
            from tqdm import trange

        loop_iterable = trange(
            self.max_epochs, desc="%s" % self.__class__.__name__, disable=no_tqdm
        )

        if self._ipython_mode and self._use_notebook_widget:
            from IPython.core.display import display

            display(self._notebook_container)
        return loop_iterable