Python util.visualizer.Visualizer() Examples

The following are 6 code examples of util.visualizer.Visualizer(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module util.visualizer , or try the search function .
Example #1
Source File: mtl_test.py    From aerial_mtl with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def initialize(self, opt):
        self.opt = opt
        self.opt.imageSize = self.opt.imageSize if len(self.opt.imageSize) == 2 else self.opt.imageSize * 2
        self.gpu_ids = ''
        self.batchSize = self.opt.batchSize
        self.checkpoints_path = os.path.join(self.opt.checkpoints, self.opt.name)
        self.create_save_folders()

        self.netG = self.load_network()
        # st()
        if 'vaihingen' not in self.opt.dataset_name:
            self.data_loader, _ = CreateDataLoader(opt)

        # visualizer
        self.visualizer = Visualizer(self.opt)
        if 'semantics' in self.opt.tasks:
            from util.util import get_color_palette
            self.opt.color_palette = np.array(get_color_palette(self.opt.dataset_name))
            self.opt.color_palette = list(self.opt.color_palette.reshape(-1)) 
Example #2
Source File: train.py    From DMIT with MIT License 5 votes vote down vote up
def main():
    opt = TrainOptions().parse()
    data_loader = CreateDataLoader(opt)
    dataset_size = len(data_loader) * opt.batch_size
    visualizer = Visualizer(opt)
    model = create_model(opt)    
    start_epoch = model.start_epoch
    total_steps = start_epoch*dataset_size
    for epoch in range(start_epoch+1, opt.niter+opt.niter_decay+1):
        epoch_start_time = time.time()
        model.update_lr()
        save_result = True
        for i, data in enumerate(data_loader):
            iter_start_time = time.time()
            total_steps += opt.batch_size
            epoch_iter = total_steps - dataset_size * (epoch - 1)
            model.prepare_data(data)
            model.update_model()
            if save_result or total_steps % opt.display_freq == 0:
                save_result = save_result or total_steps % opt.update_html_freq == 0
                visualizer.display_current_results(model.get_current_visuals(), epoch, ncols=1, save_result=save_result)
                save_result = False
            if total_steps % opt.print_freq == 0:
                errors = model.get_current_errors()
                t = (time.time() - iter_start_time) / opt.batch_size
                visualizer.print_current_errors(epoch, epoch_iter, errors, t)
                if opt.display_id > 0:
                    visualizer.plot_current_errors(epoch, float(epoch_iter)/dataset_size, opt, errors)
        print('epoch {} cost dime {}'.format(epoch,time.time()-epoch_start_time))
        model.save_ckpt(epoch)
        model.save_generator('latest')
        if epoch % opt.save_epoch_freq == 0:
            print('saving the generator at the end of epoch {}, iters {}'.format(epoch, total_steps))
            model.save_generator(epoch) 
Example #3
Source File: test_function.py    From non-stationary_texture_syn with MIT License 5 votes vote down vote up
def test_func(opt_train, webpage, epoch='latest'):
	opt = copy.deepcopy(opt_train)
	print(opt)
	# specify the directory to save the results during training
	opt.results_dir = './results/'
	opt.isTrain = False
	opt.nThreads = 1   # test code only supports nThreads = 1
	opt.batchSize = 1  # test code only supports batchSize = 1
	opt.serial_batches = True  # no shuffle
	opt.no_flip = True  # no flip
	opt.dataroot = opt.dataroot + '/test'
	opt.model = 'test'
	opt.dataset_mode = 'single'
	opt.which_epoch = epoch
	opt.how_many = 50
	opt.phase = 'test'
	# opt.name = name

	data_loader = CreateDataLoader(opt)
	dataset = data_loader.load_data()
	model = create_model(opt)
	visualizer = Visualizer(opt)
	# create website
	# web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))
	# web_dir = os.path.join(opt.results_dir, opt.name)
	# webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch))
	# test
	for i, data in enumerate(dataset):
	    if i >= opt.how_many:
	        break
	    model.set_input(data)
	    model.test()
	    visuals = model.get_current_visuals()
	    img_path = model.get_image_paths()
	    print('process image... %s' % img_path)
	    visualizer.save_images_epoch(webpage, visuals, img_path, epoch)

	webpage.save() 
Example #4
Source File: test_model_raster.py    From aerial_mtl with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def initialize(self, opt):
        # GenericTestModel.initialize(self, opt)
        self.opt = opt
        self.get_color_palette()
        self.opt.imageSize = self.opt.imageSize if len(self.opt.imageSize) == 2 else self.opt.imageSize * 2
        self.gpu_ids = ''
        self.batchSize = self.opt.batchSize
        self.checkpoints_path = os.path.join(self.opt.checkpoints, self.opt.name)
        self.create_save_folders()
        self.opt.use_semantics = (('multitask' in self.opt.model) or ('semantics' in self.opt.model))

        self.netG = self.load_network()
        # self.opt.dfc_preprocessing = 2
        # self.data_loader, _ = CreateDataLoader(opt, Dataset)

        # visualizer
        self.visualizer = Visualizer(self.opt)
        if 'semantics' in self.opt.tasks:
            from util.util import get_color_palette
            self.opt.color_palette = np.array(get_color_palette(self.opt.dataset_name))
            # self.opt.color_palette = list(self.opt.color_palette.reshape(-1))
            # st()

    # def initialize(self, opt):
    #     GenericTestModel.initialize(self, opt)
    #     self.get_color_palette() 
Example #5
Source File: base_model.py    From aerial_mtl with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def initialize(self, opt):
        self.opt = opt
        self.gpu_ids = ''
        self.batchSize = self.opt.batchSize
        self.checkpoints_path = os.path.join(self.opt.checkpoints, self.opt.name)
        self.create_save_folders()

        self.start_epoch = 1
        self.best_val_error = 999.9

        self.criterion_eval = nn.L1Loss()

        self.input = self.get_variable(torch.FloatTensor(self.batchSize, 3, self.opt.imageSize, self.opt.imageSize))
        self.target = self.get_variable(torch.FloatTensor(self.batchSize, 1, self.opt.imageSize, self.opt.imageSize))
        # self.logfile = # ToDo

        # visualizer
        self.visualizer = Visualizer(opt)

        # Logfile
        self.logfile = open(os.path.join(self.checkpoints_path, 'logfile.txt'), 'a')
        if opt.validate:
            self.logfile_val = open(os.path.join(self.checkpoints_path, 'logfile_val.txt'), 'a')

        # Prepare a random seed that will be the same for everyone
        opt.manualSeed = random.randint(1, 10000)   # fix seed
        print("Random Seed: ", opt.manualSeed)
        random.seed(opt.manualSeed)
        torch.manual_seed(opt.manualSeed)
        if opt.cuda:
            torch.cuda.manual_seed(opt.manualSeed)

        # uses the inbuilt cudnn auto-tuner to find the fastest convolution algorithms.
        cudnn.benchmark = True
        cudnn.enabled =   True

        if not opt.train and not opt.test:
            raise Exception("You have to set --train or --test")

        if torch.cuda.is_available and not opt.cuda:
            print("WARNING: You have a CUDA device, so you should run WITHOUT --cpu")
        if not torch.cuda.is_available and opt.cuda:
            raise Exception("No GPU found, run WITH --cpu") 
Example #6
Source File: train.py    From SingleGAN with MIT License 4 votes vote down vote up
def main():
    opt = TrainOptions().parse()
    data_loader = CreateDataLoader(opt)
    dataset_size = len(data_loader) * opt.batchSize
    visualizer = Visualizer(opt)


    model = SingleGAN()
    model.initialize(opt)


    total_steps = 0
    lr = opt.lr
    for epoch in range(1, opt.niter + opt.niter_decay + 1):
        epoch_start_time = time.time()
        save_result = True
        for i, data in enumerate(data_loader):
            iter_start_time = time.time()
            total_steps += opt.batchSize
            epoch_iter = total_steps - dataset_size * (epoch - 1)
            model.update_model(data)
            
            if save_result or total_steps % opt.display_freq == 0:
                save_result = save_result or total_steps % opt.update_html_freq == 0
                print('mode:{} dataset:{}'.format(opt.mode,opt.name))
                visualizer.display_current_results(model.get_current_visuals(), epoch, ncols=1, save_result=save_result)
                save_result = False
            
            if total_steps % opt.print_freq == 0:
                errors = model.get_current_errors()
                t = (time.time() - iter_start_time) / opt.batchSize
                visualizer.print_current_errors(epoch, epoch_iter, errors, t)
                if opt.display_id > 0:
                    visualizer.plot_current_errors(epoch, float(epoch_iter)/dataset_size, opt, errors)
                    
            if total_steps % opt.save_latest_freq == 0:
                print('saving the latest model (epoch %d, total_steps %d)' %(epoch, total_steps))
                model.save('latest')
                
        if epoch % opt.save_epoch_freq == 0:
            print('saving the model at the end of epoch %d, iters %d' %(epoch, total_steps))
            model.save('latest')
            model.save(epoch)
            
        if epoch > opt.niter:
            lr -= opt.lr / opt.niter_decay
            model.update_lr(lr)