Python tensorpack.utils.logger.warn() Examples

The following are 12 code examples of tensorpack.utils.logger.warn(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorpack.utils.logger , or try the search function .
Example #1
Source File: imagenet_utils.py    From ghostnet with Apache License 2.0 5 votes vote down vote up
def get_imagenet_dataflow(
        datadir, name, batch_size,
        augmentors, meta_dir=None, parallel=None):
    """
    See explanations in the tutorial:
    http://tensorpack.readthedocs.io/en/latest/tutorial/efficient-dataflow.html
    """
    assert name in ['train', 'val', 'test']
    assert datadir is not None
    assert isinstance(augmentors, list)
    isTrain = name == 'train'
    
    #parallel = 1
    
    if parallel is None:
        parallel = min(40, multiprocessing.cpu_count() // 2)  # assuming hyperthreading
    if isTrain:
        ds = dataset.ILSVRC12(datadir, name, meta_dir=meta_dir, shuffle=True)
        ds = AugmentImageComponent(ds, augmentors, copy=False)
        if parallel < 16:
            logger.warn("DataFlow may become the bottleneck when too few processes are used.")
        ds = PrefetchDataZMQ(ds, parallel)
        ds = BatchData(ds, batch_size, remainder=False)
    else:
        ds = dataset.ILSVRC12Files(datadir, name, meta_dir= meta_dir, shuffle=False)
        aug = imgaug.AugmentorList(augmentors)

        def mapf(dp):
            fname, cls = dp
            im = cv2.imread(fname, cv2.IMREAD_COLOR)
            im = aug.augment(im)
            return im, cls
        ds = MultiThreadMapData(ds, parallel, mapf, buffer_size=2000, strict=True)
        ds = BatchData(ds, batch_size, remainder=True)
        ds = PrefetchDataZMQ(ds, 1)
    return ds 
Example #2
Source File: imagenet_utils.py    From webvision-2.0-benchmarks with Apache License 2.0 5 votes vote down vote up
def get_imagenet_dataflow(
        datadir, name, batch_size,
        augmentors, parallel=None):
    """
    See explanations in the tutorial:
    http://tensorpack.readthedocs.io/en/latest/tutorial/efficient-dataflow.html
    """
    assert name in ['train', 'val', 'test']
    assert datadir is not None
    assert isinstance(augmentors, list)
    isTrain = name == 'train'
    meta_dir = os.path.join(datadir, "meta")
    if parallel is None:
        parallel = min(40, multiprocessing.cpu_count())
    if isTrain:
        ds = Imagenet5k(datadir, name, meta_dir=meta_dir, shuffle=True)
        ds = AugmentImageComponent(ds, augmentors, copy=False)
        if parallel < 16:
            logger.warn("DataFlow may become the bottleneck when too few processes are used.")
        ds = PrefetchDataZMQ(ds, parallel)
        ds = BatchData(ds, batch_size, remainder=False)
    else:
        ds = Imagenet5kFiles(datadir, name, meta_dir=meta_dir, shuffle=False)
        aug = imgaug.AugmentorList(augmentors)

        def mapf(dp):
            fname, cls = dp
            im = cv2.imread(fname, cv2.IMREAD_COLOR)
            im = aug.augment(im)
            return im, cls
        ds = MultiThreadMapData(ds, parallel, mapf, buffer_size=2000, strict=True)
        ds = BatchData(ds, batch_size, remainder=True)
        ds = PrefetchDataZMQ(ds, 1)
    return ds 
Example #3
Source File: layer_info.py    From petridishnn with MIT License 5 votes vote down vote up
def sample_cat_hallucinations(self, layer_ops, merge_ops,
        prob_at_layer=None, min_num_hallus=1, hallu_input_choice=None):
        """
        prob_at_layer : probility of having input from a layer. None is translated
            to default, which sample a layer proportional to its ch_dim. The ch_dim
            is computed using self, as we assume the last op is cat, and the cat
            determines the ch_dim.

        """
        assert self[-1].merge_op == LayerTypes.MERGE_WITH_CAT
        n_inputs = self.num_inputs()
        n_final_merge = len(self[-1].inputs)

        if prob_at_layer is None:
            prob_at_layer = np.ones(len(self) - 1)
            prob_at_layer[:n_inputs-1] = n_final_merge
            prob_at_layer[n_inputs-1] = n_final_merge * 1.5
            prob_at_layer = prob_at_layer / np.sum(prob_at_layer)
        assert len(prob_at_layer) >= len(self) - 1
        if len(prob_at_layer) > len(self) - 1:
            logger.warn("sample cell hallu cuts the prob_at_layer to len(info_list) - 1")
            prob_at_layer = prob_at_layer[:len(self)-1]

        # choose inputs
        n_hallu_inputs = 2
        l_hallu = []
        for _ in range(min_num_hallus):
            # replace == True : can connect multiple times to the same layer
            in_idxs = np.random.choice(list(range(len(prob_at_layer))),
                size=n_hallu_inputs, replace=False, p=prob_at_layer)
            in_ids = list(map(lambda idx : self[idx].id, in_idxs))
            main_ops = list(map(int, np.random.choice(layer_ops, size=n_hallu_inputs)))
            merge_op = int(np.random.choice(merge_ops))
            hallu = LayerInfo(layer_id=self[-1].id, inputs=in_ids,
                operations=main_ops + [merge_op])
            l_hallu.append(hallu)
        return l_hallu 
Example #4
Source File: tensorpack_extension.py    From deep-voice-conversion with MIT License 5 votes vote down vote up
def _process(self, grads):
        g = []
        to_print = []
        for grad, var in grads:
            if re.match(self._regex, var.op.name):
                g.append((grad, var))
            else:
                to_print.append(var.op.name)
        if self._verbose and len(to_print):
            message = ', '.join(to_print)
            logger.warn("No gradient w.r.t these trainable variables: {}".format(message))
        return g 
Example #5
Source File: imagenet_utils.py    From adanet with MIT License 5 votes vote down vote up
def get_imagenet_dataflow(
        datadir, name, batch_size,
        augmentors, parallel=None):
    """
    See explanations in the tutorial:
    http://tensorpack.readthedocs.io/en/latest/tutorial/efficient-dataflow.html
    """
    assert name in ['train', 'val', 'test']
    assert datadir is not None
    assert isinstance(augmentors, list)
    isTrain = name == 'train'
    if parallel is None:
        parallel = min(40, 16)  # assuming hyperthreading
    if isTrain:
        ds1 = ilsvrcsemi.ILSVRC12(datadir, name, shuffle=True, labeled=True)
        ds2 = ilsvrcsemi.ILSVRC12(datadir, name, shuffle=True, labeled=False)
        ds1 = AugmentImageComponent(ds1, augmentors, copy=False)
        ds2 = AugmentImageComponent(ds2, augmentors, copy=False)
        ds = JoinData([ds1, ds2])

        if parallel < 16:
            logger.warn("DataFlow may become the bottleneck when too few processes are used.")
        ds = PrefetchDataZMQ(ds, parallel)
        ds = BatchData(ds, batch_size, remainder=False)
    else:
        ds = dataset.ILSVRC12Files(datadir, name, shuffle=False)
        aug = imgaug.AugmentorList(augmentors)

        def mapf(dp):
            fname, cls = dp
            im = cv2.imread(fname, cv2.IMREAD_COLOR)
            im = aug.augment(im)
            return im, cls, im, cls
        ds = MultiThreadMapData(ds, parallel, mapf, buffer_size=2000, strict=True)
        ds = BatchData(ds, batch_size, remainder=True)
        ds = PrefetchDataZMQ(ds, 1)
    return ds 
Example #6
Source File: imagenet_utils.py    From tensorpack with Apache License 2.0 5 votes vote down vote up
def get_imagenet_dataflow(
        datadir, name, batch_size,
        augmentors=None, parallel=None):
    """
    Args:
        augmentors (list[imgaug.Augmentor]): Defaults to `fbresnet_augmentor(isTrain)`

    Returns: A DataFlow which produces BGR images and labels.

    See explanations in the tutorial:
    http://tensorpack.readthedocs.io/tutorial/efficient-dataflow.html
    """
    assert name in ['train', 'val', 'test']
    isTrain = name == 'train'
    assert datadir is not None
    if augmentors is None:
        augmentors = fbresnet_augmentor(isTrain)
    assert isinstance(augmentors, list)
    if parallel is None:
        parallel = min(40, multiprocessing.cpu_count() // 2)  # assuming hyperthreading

    if isTrain:
        ds = dataset.ILSVRC12(datadir, name, shuffle=True)
        ds = AugmentImageComponent(ds, augmentors, copy=False)
        if parallel < 16:
            logger.warn("DataFlow may become the bottleneck when too few processes are used.")
        ds = MultiProcessRunnerZMQ(ds, parallel)
        ds = BatchData(ds, batch_size, remainder=False)
    else:
        ds = dataset.ILSVRC12Files(datadir, name, shuffle=False)
        aug = imgaug.AugmentorList(augmentors)

        def mapf(dp):
            fname, cls = dp
            im = cv2.imread(fname, cv2.IMREAD_COLOR)
            im = aug.augment(im)
            return im, cls
        ds = MultiThreadMapData(ds, parallel, mapf, buffer_size=2000, strict=True)
        ds = BatchData(ds, batch_size, remainder=True)
        ds = MultiProcessRunnerZMQ(ds, 1)
    return ds 
Example #7
Source File: imagenet_utils.py    From tensorpack with Apache License 2.0 5 votes vote down vote up
def get_imagenet_dataflow(
        datadir, name, batch_size,
        augmentors=None, parallel=None):
    """
    Args:
        augmentors (list[imgaug.Augmentor]): Defaults to `fbresnet_augmentor(isTrain)`

    Returns: A DataFlow which produces BGR images and labels.

    See explanations in the tutorial:
    http://tensorpack.readthedocs.io/tutorial/efficient-dataflow.html
    """
    assert name in ['train', 'val', 'test']
    isTrain = name == 'train'
    assert datadir is not None
    if augmentors is None:
        augmentors = fbresnet_augmentor(isTrain)
    assert isinstance(augmentors, list)
    if parallel is None:
        parallel = min(40, multiprocessing.cpu_count() // 2)  # assuming hyperthreading

    if isTrain:
        ds = dataset.ILSVRC12(datadir, name, shuffle=True)
        ds = AugmentImageComponent(ds, augmentors, copy=False)
        if parallel < 16:
            logger.warn("DataFlow may become the bottleneck when too few processes are used.")
        ds = MultiProcessRunnerZMQ(ds, parallel)
        ds = BatchData(ds, batch_size, remainder=False)
    else:
        ds = dataset.ILSVRC12Files(datadir, name, shuffle=False)
        aug = imgaug.AugmentorList(augmentors)

        def mapf(dp):
            fname, cls = dp
            im = cv2.imread(fname, cv2.IMREAD_COLOR)
            im = aug.augment(im)
            return im, cls
        ds = MultiThreadMapData(ds, parallel, mapf, buffer_size=2000, strict=True)
        ds = BatchData(ds, batch_size, remainder=True)
        ds = MultiProcessRunnerZMQ(ds, 1)
    return ds 
Example #8
Source File: imagenet_utils.py    From tensorpack with Apache License 2.0 5 votes vote down vote up
def get_imagenet_dataflow(
        datadir, name, batch_size,
        augmentors=None, parallel=None):
    """
    Args:
        augmentors (list[imgaug.Augmentor]): Defaults to `fbresnet_augmentor(isTrain)`

    Returns: A DataFlow which produces BGR images and labels.

    See explanations in the tutorial:
    http://tensorpack.readthedocs.io/tutorial/efficient-dataflow.html
    """
    assert name in ['train', 'val', 'test']
    isTrain = name == 'train'
    assert datadir is not None
    if augmentors is None:
        augmentors = fbresnet_augmentor(isTrain)
    assert isinstance(augmentors, list)
    if parallel is None:
        parallel = min(40, multiprocessing.cpu_count() // 2)  # assuming hyperthreading

    if isTrain:
        ds = dataset.ILSVRC12(datadir, name, shuffle=True)
        ds = AugmentImageComponent(ds, augmentors, copy=False)
        if parallel < 16:
            logger.warn("DataFlow may become the bottleneck when too few processes are used.")
        ds = MultiProcessRunnerZMQ(ds, parallel)
        ds = BatchData(ds, batch_size, remainder=False)
    else:
        ds = dataset.ILSVRC12Files(datadir, name, shuffle=False)
        aug = imgaug.AugmentorList(augmentors)

        def mapf(dp):
            fname, cls = dp
            im = cv2.imread(fname, cv2.IMREAD_COLOR)
            im = aug.augment(im)
            return im, cls
        ds = MultiThreadMapData(ds, parallel, mapf, buffer_size=2000, strict=True)
        ds = BatchData(ds, batch_size, remainder=True)
        ds = MultiProcessRunnerZMQ(ds, 1)
    return ds 
Example #9
Source File: GAN.py    From tensorpack with Apache License 2.0 5 votes vote down vote up
def __init__(self, input, model, d_period=1, g_period=1):
        """
        Args:
            d_period(int): period of each d_opt run
            g_period(int): period of each g_opt run
        """
        super(SeparateGANTrainer, self).__init__()
        self._d_period = int(d_period)
        self._g_period = int(g_period)
        assert min(d_period, g_period) == 1

        # Setup input
        cbs = input.setup(model.get_input_signature())
        self.register_callback(cbs)

        # Build the graph
        self.tower_func = TowerFunc(model.build_graph, model.inputs())
        with TowerContext('', is_training=True), \
                argscope(BatchNorm, ema_update='internal'):
            # should not hook the EMA updates to both train_op, it will hurt training speed.
            self.tower_func(*input.get_input_tensors())
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        if len(update_ops):
            logger.warn("Found {} ops in UPDATE_OPS collection!".format(len(update_ops)))
            logger.warn("Using SeparateGANTrainer with UPDATE_OPS may hurt your training speed a lot!")

        opt = model.get_optimizer()
        with tf.name_scope('optimize'):
            self.d_min = opt.minimize(
                model.d_loss, var_list=model.d_vars, name='d_min')
            self.g_min = opt.minimize(
                model.g_loss, var_list=model.g_vars, name='g_min') 
Example #10
Source File: imagenet_utils.py    From tensorpack with Apache License 2.0 5 votes vote down vote up
def get_imagenet_dataflow(
        datadir, name, batch_size,
        augmentors=None, parallel=None):
    """
    Args:
        augmentors (list[imgaug.Augmentor]): Defaults to `fbresnet_augmentor(isTrain)`

    Returns: A DataFlow which produces BGR images and labels.

    See explanations in the tutorial:
    http://tensorpack.readthedocs.io/tutorial/efficient-dataflow.html
    """
    assert name in ['train', 'val', 'test']
    isTrain = name == 'train'
    assert datadir is not None
    if augmentors is None:
        augmentors = fbresnet_augmentor(isTrain)
    assert isinstance(augmentors, list)
    if parallel is None:
        parallel = min(40, multiprocessing.cpu_count() // 2)  # assuming hyperthreading

    if isTrain:
        ds = dataset.ILSVRC12(datadir, name, shuffle=True)
        ds = AugmentImageComponent(ds, augmentors, copy=False)
        if parallel < 16:
            logger.warn("DataFlow may become the bottleneck when too few processes are used.")
        ds = MultiProcessRunnerZMQ(ds, parallel)
        ds = BatchData(ds, batch_size, remainder=False)
    else:
        ds = dataset.ILSVRC12Files(datadir, name, shuffle=False)
        aug = imgaug.AugmentorList(augmentors)

        def mapf(dp):
            fname, cls = dp
            im = cv2.imread(fname, cv2.IMREAD_COLOR)
            im = aug.augment(im)
            return im, cls
        ds = MultiThreadMapData(ds, parallel, mapf, buffer_size=2000, strict=True)
        ds = BatchData(ds, batch_size, remainder=True)
        ds = MultiProcessRunnerZMQ(ds, 1)
    return ds 
Example #11
Source File: imagenet_utils.py    From tensorpack with Apache License 2.0 5 votes vote down vote up
def get_imagenet_dataflow(
        datadir, name, batch_size,
        augmentors=None, parallel=None):
    """
    Args:
        augmentors (list[imgaug.Augmentor]): Defaults to `fbresnet_augmentor(isTrain)`

    Returns: A DataFlow which produces BGR images and labels.

    See explanations in the tutorial:
    http://tensorpack.readthedocs.io/tutorial/efficient-dataflow.html
    """
    assert name in ['train', 'val', 'test']
    isTrain = name == 'train'
    assert datadir is not None
    if augmentors is None:
        augmentors = fbresnet_augmentor(isTrain)
    assert isinstance(augmentors, list)
    if parallel is None:
        parallel = min(40, multiprocessing.cpu_count() // 2)  # assuming hyperthreading

    if isTrain:
        ds = dataset.ILSVRC12(datadir, name, shuffle=True)
        ds = AugmentImageComponent(ds, augmentors, copy=False)
        if parallel < 16:
            logger.warn("DataFlow may become the bottleneck when too few processes are used.")
        ds = MultiProcessRunnerZMQ(ds, parallel)
        ds = BatchData(ds, batch_size, remainder=False)
    else:
        ds = dataset.ILSVRC12Files(datadir, name, shuffle=False)
        aug = imgaug.AugmentorList(augmentors)

        def mapf(dp):
            fname, cls = dp
            im = cv2.imread(fname, cv2.IMREAD_COLOR)
            im = aug.augment(im)
            return im, cls
        ds = MultiThreadMapData(ds, parallel, mapf, buffer_size=2000, strict=True)
        ds = BatchData(ds, batch_size, remainder=True)
        ds = MultiProcessRunnerZMQ(ds, 1)
    return ds 
Example #12
Source File: anytime_network.py    From petridishnn with MIT License 4 votes vote down vote up
def __init__(self, input_size, args):
        super(AnytimeNetwork, self).__init__()
        self.options = args

        self.data_format = args.data_format
        self.ch_dim = 1 if self.data_format == 'channels_first' else 3
        self.h_dim = 1 + int(self.data_format == 'channels_first')
        self.w_dim = self.h_dim + 1

        self.input_size = input_size
        self.network_config = compute_cfg(self.options)
        self.total_units = sum(self.network_config.n_units_per_block)

        # Warn user if they are using imagenet but doesn't have the right channel
        self.init_channel = args.init_channel
        self.n_blocks = len(self.network_config.n_units_per_block)
        self.cumsum_blocks = np.cumsum(self.network_config.n_units_per_block)
        self.width = args.width
        self.num_classes = self.options.num_classes
        self.alter_label = self.options.alter_label
        self.alter_label_activate_frac = self.options.alter_label_activate_frac
        self.alter_loss_w = self.options.alter_loss_w

        self.options.ls_method = self.options.samloss
        if self.options.ls_method == ADALOSS_LS_METHOD:
            self.options.is_select_arr = True
            self.options.sum_rand_ratio = 0.0
            assert self.options.func_type != FUNC_TYPE_OPT

        self.weights = anytime_loss.loss_weights(self.total_units, self.options,
            cfg=self.network_config.n_units_per_block)
        self.weights_sum = np.sum(self.weights)
        self.ls_K = np.sum(np.asarray(self.weights) > 0)
        logger.info('weights: {}'.format(self.weights))

        # special names and conditions
        self.select_idx_name = "select_idx"

        # (UGLY) due to the history of development. 1,...,5 requires rewards
        self.options.require_rewards = self.options.samloss < 6 and \
            self.options.samloss > 0

        if self.options.func_type == FUNC_TYPE_OPT \
            and self.options.ls_method != NO_AANN_METHOD:
            # special case: if we are computing optimal, don't do AANN
            logger.warn("Computing optimal requires not running AANN."\
                +" Setting samloss to be {}".format(NO_AANN_METHOD))
            self.options.ls_method = NO_AANN_METHOD
            self.options.samloss = NO_AANN_METHOD

        self.input_type = tf.float32 if self.options.input_type == 'float32' else tf.uint8
        if self.options.do_mean_std_gpu_process:
            if not hasattr(self.options, 'mean'):
                raise Exception('gpu_graph expects mean but it is not in the options')
            if not hasattr(self.options, 'std'):
                raise Exception('gpu_graph expects std, but it is not in the options')

        logger.info('the final options: {}'.format(self.options))