Python utils.logger.setup_logger() Examples
The following are 8
code examples of utils.logger.setup_logger().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
utils.logger
, or try the search function
.
Example #1
Source File: train_val.py From brats_segmentation-pytorch with MIT License | 7 votes |
def main(): init_env('1') loaders = make_data_loaders(cfg) model = build_model(cfg) model = model.cuda() task_name = 'base_unet' log_dir = os.path.join(cfg.LOG_DIR, task_name) cfg.TASK_NAME = task_name mkdir(log_dir) logger = setup_logger('train', log_dir, filename='train.log') logger.info(cfg) logger = setup_logger('eval', log_dir, filename='eval.log') optimizer, scheduler = make_optimizer(cfg, model) metrics = get_metrics(cfg) losses = get_losses(cfg) train_val(model, loaders, optimizer, scheduler, losses, metrics)
Example #2
Source File: train_boundary.py From interfacegan with MIT License | 6 votes |
def main(): """Main function.""" args = parse_args() logger = setup_logger(args.output_dir, logger_name='generate_data') logger.info('Loading latent codes.') if not os.path.isfile(args.latent_codes_path): raise ValueError(f'Latent codes `{args.latent_codes_path}` does not exist!') latent_codes = np.load(args.latent_codes_path) logger.info('Loading attribute scores.') if not os.path.isfile(args.scores_path): raise ValueError(f'Attribute scores `{args.scores_path}` does not exist!') scores = np.load(args.scores_path) boundary = train_boundary(latent_codes=latent_codes, scores=scores, chosen_num_or_ratio=args.chosen_num_or_ratio, split_ratio=args.split_ratio, invalid_value=args.invalid_value, logger=logger) np.save(os.path.join(args.output_dir, 'boundary.npy'), boundary)
Example #3
Source File: test.py From CVWC2019-Amur-Tiger-Re-ID with Apache License 2.0 | 5 votes |
def main(): parser = argparse.ArgumentParser(description="ReID Baseline Inference") parser.add_argument( "--config_file", default="", help="path to config file", type=str ) parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) args = parser.parse_args() num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): mkdir(output_dir) logger = setup_logger("reid_baseline", output_dir, 0) logger.info("Using {} GPUS".format(num_gpus)) logger.info(args) if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) if cfg.MODEL.DEVICE == "cuda": os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID cudnn.benchmark = True train_loader, val_loader, num_query, num_classes = make_data_loader(cfg) model = build_model(cfg, num_classes) model.load_param(cfg.TEST.WEIGHT) inference(cfg, model, val_loader, num_query)
Example #4
Source File: train.py From CVWC2019-Amur-Tiger-Re-ID with Apache License 2.0 | 5 votes |
def main(): parser = argparse.ArgumentParser(description="ReID Baseline Training") parser.add_argument( "--config_file", default="./configs/tiger.yml", help="path to config file", type=str ) parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) parser.add_argument("--index_flod", help="Index of k-flod", default=3, type=int) #k-flod args = parser.parse_args() num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.DATASETS.INDEX_FLOD = args.index_flod cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): os.makedirs(output_dir) logger = setup_logger("reid_baseline", output_dir, 0) logger.info("Using {} GPUS".format(num_gpus)) logger.info(args) if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) if cfg.MODEL.DEVICE == "cuda": os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID # new add by gu cudnn.benchmark = True train(cfg)
Example #5
Source File: train_boundary.py From higan with MIT License | 4 votes |
def main(): """Main function.""" args = parse_args() work_dir = args.output_dir if args.save_name[-4:] != '.npy': save_name = args.save_name + '.npy' else: save_name = args.save_name if args.score_name: save_name = args.score_name + '_' + save_name logfile_name = args.logfile_name or save_name[:-4] + '.log' logger_name = f'boundary_training_logger' logger = setup_logger(work_dir, logfile_name, logger_name) logger.info(f'Loading data from `{args.data_path}`.') if not os.path.isfile(args.data_path): raise ValueError(f'Data `{args.data_path}` does not exist!') data = np.load(args.data_path) logger.info(f'Loading scores from `{args.scores_path}`.') if not os.path.isfile(args.scores_path): raise ValueError(f'Scores `{args.scores_path}` does not exist!') scores = np.load(args.scores_path, allow_pickle=True)[()] if args.score_name: assert isinstance(scores, dict) if args.score_name in scores: scores = scores[args.score_name] else: score_idx = scores['name_to_idx'][args.score_name] scores = scores['score'][:, score_idx] if data.ndim < 2: raise ValueError(f'Data should be with shape [num, ..., dim], where `num` ' f'is the total number of smaples and `dim` is the space ' f'dimension for boundary search.\n' f'But {data.ndim} is received!') data_shape = data.shape data = data.reshape(data_shape[0], -1, data_shape[-1]) boundaries = [] for layer_idx in range(data.shape[1]): logger.info(f'==== Layer {layer_idx:02d} ====') boundary = train_boundary(data=data[:, layer_idx], scores=scores, boundary_type=args.boundary_type, invalid_value=args.invalid_value, chosen_num_or_ratio=args.chosen_num_or_ratio, split_ratio=args.split_ratio, verbose_test=args.verbose_test, logger=logger) boundaries.append(boundary) boundaries = np.stack(boundaries, axis=1) boundaries = boundaries.reshape(1, *data_shape[1:]) np.save(os.path.join(work_dir, save_name), boundaries)
Example #6
Source File: generate_data.py From interfacegan with MIT License | 4 votes |
def main(): """Main function.""" args = parse_args() logger = setup_logger(args.output_dir, logger_name='generate_data') logger.info(f'Initializing generator.') gan_type = MODEL_POOL[args.model_name]['gan_type'] if gan_type == 'pggan': model = PGGANGenerator(args.model_name, logger) kwargs = {} elif gan_type == 'stylegan': model = StyleGANGenerator(args.model_name, logger) kwargs = {'latent_space_type': args.latent_space_type} else: raise NotImplementedError(f'Not implemented GAN type `{gan_type}`!') logger.info(f'Preparing latent codes.') if os.path.isfile(args.latent_codes_path): logger.info(f' Load latent codes from `{args.latent_codes_path}`.') latent_codes = np.load(args.latent_codes_path) latent_codes = model.preprocess(latent_codes, **kwargs) else: logger.info(f' Sample latent codes randomly.') latent_codes = model.easy_sample(args.num, **kwargs) total_num = latent_codes.shape[0] logger.info(f'Generating {total_num} samples.') results = defaultdict(list) pbar = tqdm(total=total_num, leave=False) for latent_codes_batch in model.get_batch_inputs(latent_codes): if gan_type == 'pggan': outputs = model.easy_synthesize(latent_codes_batch) elif gan_type == 'stylegan': outputs = model.easy_synthesize(latent_codes_batch, **kwargs, generate_style=args.generate_style, generate_image=args.generate_image) for key, val in outputs.items(): if key == 'image': for image in val: save_path = os.path.join(args.output_dir, f'{pbar.n:06d}.jpg') cv2.imwrite(save_path, image[:, :, ::-1]) pbar.update(1) else: results[key].append(val) if 'image' not in outputs: pbar.update(latent_codes_batch.shape[0]) if pbar.n % 1000 == 0 or pbar.n == total_num: logger.debug(f' Finish {pbar.n:6d} samples.') pbar.close() logger.info(f'Saving results.') for key, val in results.items(): save_path = os.path.join(args.output_dir, f'{key}.npy') np.save(save_path, np.concatenate(val, axis=0))
Example #7
Source File: edit.py From interfacegan with MIT License | 4 votes |
def main(): """Main function.""" args = parse_args() logger = setup_logger(args.output_dir, logger_name='generate_data') logger.info(f'Initializing generator.') gan_type = MODEL_POOL[args.model_name]['gan_type'] if gan_type == 'pggan': model = PGGANGenerator(args.model_name, logger) kwargs = {} elif gan_type == 'stylegan': model = StyleGANGenerator(args.model_name, logger) kwargs = {'latent_space_type': args.latent_space_type} else: raise NotImplementedError(f'Not implemented GAN type `{gan_type}`!') logger.info(f'Preparing boundary.') if not os.path.isfile(args.boundary_path): raise ValueError(f'Boundary `{args.boundary_path}` does not exist!') boundary = np.load(args.boundary_path) np.save(os.path.join(args.output_dir, 'boundary.npy'), boundary) logger.info(f'Preparing latent codes.') if os.path.isfile(args.input_latent_codes_path): logger.info(f' Load latent codes from `{args.input_latent_codes_path}`.') latent_codes = np.load(args.input_latent_codes_path) latent_codes = model.preprocess(latent_codes, **kwargs) else: logger.info(f' Sample latent codes randomly.') latent_codes = model.easy_sample(args.num, **kwargs) np.save(os.path.join(args.output_dir, 'latent_codes.npy'), latent_codes) total_num = latent_codes.shape[0] logger.info(f'Editing {total_num} samples.') for sample_id in tqdm(range(total_num), leave=False): interpolations = linear_interpolate(latent_codes[sample_id:sample_id + 1], boundary, start_distance=args.start_distance, end_distance=args.end_distance, steps=args.steps) interpolation_id = 0 for interpolations_batch in model.get_batch_inputs(interpolations): if gan_type == 'pggan': outputs = model.easy_synthesize(interpolations_batch) elif gan_type == 'stylegan': outputs = model.easy_synthesize(interpolations_batch, **kwargs) for image in outputs['image']: save_path = os.path.join(args.output_dir, f'{sample_id:03d}_{interpolation_id:03d}.jpg') cv2.imwrite(save_path, image[:, :, ::-1]) interpolation_id += 1 assert interpolation_id == args.steps logger.debug(f' Finished sample {sample_id:3d}.') logger.info(f'Successfully edited {total_num} samples.')
Example #8
Source File: main.py From AnyNet with MIT License | 4 votes |
def main(): global args train_left_img, train_right_img, train_left_disp, test_left_img, test_right_img, test_left_disp = lt.dataloader( args.datapath) TrainImgLoader = torch.utils.data.DataLoader( DA.myImageFloder(train_left_img, train_right_img, train_left_disp, True), batch_size=args.train_bsize, shuffle=True, num_workers=4, drop_last=False) TestImgLoader = torch.utils.data.DataLoader( DA.myImageFloder(test_left_img, test_right_img, test_left_disp, False), batch_size=args.test_bsize, shuffle=False, num_workers=4, drop_last=False) if not os.path.isdir(args.save_path): os.makedirs(args.save_path) log = logger.setup_logger(args.save_path + '/training.log') for key, value in sorted(vars(args).items()): log.info(str(key) + ': ' + str(value)) model = models.anynet.AnyNet(args) model = nn.DataParallel(model).cuda() optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999)) log.info('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()]))) args.start_epoch = 0 if args.resume: if os.path.isfile(args.resume): log.info("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) log.info("=> loaded checkpoint '{}' (epoch {})" .format(args.resume, checkpoint['epoch'])) else: log.info("=> no checkpoint found at '{}'".format(args.resume)) log.info("=> Will start from scratch.") else: log.info('Not Resume') start_full_time = time.time() for epoch in range(args.start_epoch, args.epochs): log.info('This is {}-th epoch'.format(epoch)) train(TrainImgLoader, model, optimizer, log, epoch) savefilename = args.save_path + '/checkpoint.tar' torch.save({ 'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), }, savefilename) test(TestImgLoader, model, log) log.info('full training time = {:.2f} Hours'.format((time.time() - start_full_time) / 3600))