Python util.util.util() Examples
The following are 30
code examples of util.util.util().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
util.util
, or try the search function
.
Example #1
Source File: ui_model.py From EverybodyDanceNow_reproduce_pytorch with MIT License | 6 votes |
def add_objects(self, click_src, label_tgt, mask, style_id=0): y, x = click_src[0], click_src[1] mask = np.transpose(mask, (2, 0, 1))[np.newaxis,...] idx_src = torch.from_numpy(mask).cuda().nonzero() idx_src[:,2] += y idx_src[:,3] += x # backup current maps self.backup_current_state() # update label map self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt for k in range(self.opt.label_nc): self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0 self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1 # update instance map self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt self.net_input[:,-1,:,:] = self.get_edges(self.inst_map) # update feature map self.set_features(idx_src, self.feat, style_id) self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map))
Example #2
Source File: ui_model.py From deep-learning-for-document-dewarping with MIT License | 6 votes |
def add_objects(self, click_src, label_tgt, mask, style_id=0): y, x = click_src[0], click_src[1] mask = np.transpose(mask, (2, 0, 1))[np.newaxis,...] idx_src = torch.from_numpy(mask).cuda().nonzero() idx_src[:,2] += y idx_src[:,3] += x # backup current maps self.backup_current_state() # update label map self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt for k in range(self.opt.label_nc): self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0 self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1 # update instance map self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt self.net_input[:,-1,:,:] = self.get_edges(self.inst_map) # update feature map self.set_features(idx_src, self.feat, style_id) self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map))
Example #3
Source File: innerResShiftTriple.py From Shift-Net_pytorch with MIT License | 6 votes |
def forward(self, input): #print(input.shape) _, self.c, self.h, self.w = input.size() self.flag = util.cal_flag_given_mask_thred(self.mask, self.shift_sz, self.stride, self.mask_thred) shift_out = InnerShiftTripleFunction.apply(input, self.shift_sz, self.stride, self.triple_weight, self.flag, self.show_flow) c_out = shift_out.size(1) # get F_c, F_s, F_shift F_c = shift_out.narrow(1, 0, c_out//3) F_s = shift_out.narrow(1, c_out//3, c_out//3) F_shift = shift_out.narrow(1, c_out*2//3, c_out//3) F_fuse = F_c * F_shift F_com = torch.cat([F_c, F_fuse], dim=1) res_out = self.res_net(F_com) F_c = F_c + res_out final_out = torch.cat([F_c, F_s], dim=1) if self.show_flow: self.flow_srcs = InnerShiftTripleFunction.get_flow_src() return final_out
Example #4
Source File: test_model.py From non-stationary_texture_syn with MIT License | 6 votes |
def recurrent_test(self, step=5): input_size = self.input_A.cpu().shape width,height = input_size[3], input_size[2] results = [] self.real_A = Variable(self.input_A, volatile=True) self.fake_B = self.netG.forward(self.real_A) real_A = util.tensor2im(self.real_A.data) fake_B = util.tensor2im(self.fake_B.data) results.append(('real_{}_A'.format(0), real_A)) results.append(('fake_{}_B'.format(0), fake_B)) for i in range(1, step): # rw = random.randint(0, width) # rh = random.randint(0, height) rw = int(width/2) rh = int(height/2) self.real_A = Variable(self.fake_B.data[:, :, rh:rh + height, rw:rw + width], volatile=True) self.fake_B = self.netG.forward(self.real_A) real_A = util.tensor2im(self.real_A.data) fake_B = util.tensor2im(self.fake_B.data) results.append(('real_{}_A'.format(i), real_A)) results.append(('fake_{}_B'.format(i), fake_B)) return OrderedDict(results)
Example #5
Source File: ui_model.py From everybody_dance_now_pytorch with GNU Affero General Public License v3.0 | 6 votes |
def add_objects(self, click_src, label_tgt, mask, style_id=0): y, x = click_src[0], click_src[1] mask = np.transpose(mask, (2, 0, 1))[np.newaxis,...] idx_src = torch.from_numpy(mask).cuda().nonzero() idx_src[:,2] += y idx_src[:,3] += x # backup current maps self.backup_current_state() # update label map self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt for k in range(self.opt.label_nc): self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0 self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1 # update instance map self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt self.net_input[:,-1,:,:] = self.get_edges(self.inst_map) # update feature map self.set_features(idx_src, self.feat, style_id) self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map))
Example #6
Source File: test_model.py From non-stationary_texture_syn with MIT License | 6 votes |
def recurrent_test_l2_searching(self, step=5): input_size = self.input_A.cpu().shape width,height = input_size[3], input_size[2] results = [] self.real_A = Variable(self.input_A, volatile=True) self.fake_B = self.netG.forward(self.real_A) real_A = util.tensor2im(self.real_A.data) fake_B = util.tensor2im(self.fake_B.data) results.append(('l2_search_real_{}_A'.format(0), real_A)) results.append(('l2_search_fake_{}_B'.format(0), fake_B)) for i in range(1, step): # rw = random.randint(0, width) # rh = random.randint(0, height) rw, rh = self.l2_searching(self.real_A.clone(), self.fake_B.clone()) print("end selection: ", rw, rh) self.real_A = Variable(self.fake_B.data[:, :, rh:rh + height, rw:rw + width], volatile=True) self.fake_B = self.netG.forward(self.real_A) real_A = util.tensor2im(self.real_A.data) fake_B = util.tensor2im(self.fake_B.data) results.append(('l2_search_real_{}_{}_{}_A'.format(i, rw, rh), real_A)) results.append(('l2_search_fake_{}_B'.format(i), fake_B)) return OrderedDict(results)
Example #7
Source File: test_model.py From non-stationary_texture_syn with MIT License | 6 votes |
def random_crop(self, crop_patch=6): input_size = self.input_A.cpu().shape width, height = input_size[3], input_size[2] results = [] self.real_A = Variable(self.input_A, volatile=True) self.fake_B = self.netG.forward(self.real_A) src_fake_B = self.fake_B.clone() real_A = util.tensor2im(self.real_A.data) fake_B = util.tensor2im(self.fake_B.data) results.append(('real_A', real_A)) results.append(('fake_{}_B'.format('src'), fake_B)) for i in range(0, crop_patch): rw = random.randint(0, width) rh = random.randint(0, height) self.real_A = Variable(src_fake_B.data[:, :, rh:rh + height, rw:rw + width], volatile=True) self.fake_B = self.netG.forward(self.real_A) real_A = util.tensor2im(self.real_A.data) fake_B = util.tensor2im(self.fake_B.data) results.append(('real_{}_{}_{}_A'.format(i, rw, rh), real_A)) results.append(('fake_{}_B'.format(i), fake_B)) return OrderedDict(results)
Example #8
Source File: test_model.py From non-stationary_texture_syn with MIT License | 6 votes |
def random_crop_256x256(self, crop_patch=6): input_size = self.input_A.cpu().shape width, height = input_size[3], input_size[2] results = [] self.real_A = Variable(self.input_A, volatile=True) real_A_src = self.real_A.clone() # self.fake_B = self.netG.forward(self.real_A) # src_fake_B = self.fake_B.clone() real_A = util.tensor2im(self.real_A.data) # fake_B = util.tensor2im(self.fake_B.data) results.append(('real_A', real_A)) # results.append(('fake_{}_B'.format('src'), fake_B)) for i in range(0, crop_patch): rw = random.randint(0, width - 256) rh = random.randint(0, height - 256) self.real_A = Variable(real_A_src.data[:, :, rh:rh + 256, rw:rw + 256], volatile=True) self.fake_B = self.netG.forward(self.real_A) real_A = util.tensor2im(self.real_A.data) fake_B = util.tensor2im(self.fake_B.data) results.append(('256_real_{}_{}_{}_A'.format(i, rw, rh), real_A)) results.append(('512_fake_{}_B'.format(i), fake_B)) return OrderedDict(results)
Example #9
Source File: sparse_wgangp_pix2pix_model.py From iSketchNFill with GNU General Public License v3.0 | 6 votes |
def get_latent_space_visualization(self,num_interpolate=20,label_1=-1,label_2=-1): rand_perm = np.random.permutation( self.opt.n_classes ) if label_1 == -1: label_1 = self.label[0] #rand_perm[0] if label_2 == -1: label_2 = self.opt.target_label #rand_perm[1] alpha_blends = np.linspace(0,1,num_interpolate) self.label[0] = label_1 output_gate_1 = self.netG.forward_gate(self.label) self.label[0] = label_2 output_gate_2 = self.netG.forward_gate(self.label) results={} results['latent_real_A']=util.tensor2im(self.real_A.data) results['latent_real_B']=util.tensor2im(self.real_B.data) for i in range(num_interpolate): alpha_blend = alpha_blends[i] output_gate = output_gate_1*alpha_blend + output_gate_2*(1-alpha_blend) self.fake_B = self.netG.forward_main( self.real_A,output_gate) results['%d_L_fake_B_inter'%(i)]=util.tensor2im(self.fake_B.data) return OrderedDict(results)
Example #10
Source File: InnerFaceShiftTriple.py From Shift-Net_pytorch with MIT License | 6 votes |
def forward(self, input, flip_feat=None): self.bz, self.c, self.h, self.w = input.size() if self.device != 'cpu': self._split_mask(self.bz) else: self.cur_mask = self.mask_all self.mask = self.cur_mask self.mask_flip = torch.flip(self.mask, [3]) self.flag = util.cal_flag_given_mask_thred(self.mask, self.shift_sz, self.stride, self.mask_thred) self.flag_flip = util.cal_flag_given_mask_thred(self.mask_flip, self.shift_sz, self.stride, self.mask_thred) final_out = InnerFaceShiftTripleFunction.apply(input, self.shift_sz, self.stride, self.triple_weight, self.flag, self.flag_flip, self.show_flow, flip_feat) if self.show_flow: self.flow_srcs = InnerFaceShiftTripleFunction.get_flow_src() innerFeat = input.clone().narrow(1, self.c//2, self.c//2) return final_out, innerFeat
Example #11
Source File: ui_model.py From EverybodyDanceNow-Temporal-FaceGAN with MIT License | 6 votes |
def add_objects(self, click_src, label_tgt, mask, style_id=0): y, x = click_src[0], click_src[1] mask = np.transpose(mask, (2, 0, 1))[np.newaxis,...] idx_src = torch.from_numpy(mask).cuda().nonzero() idx_src[:,2] += y idx_src[:,3] += x # backup current maps self.backup_current_state() # update label map self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt for k in range(self.opt.label_nc): self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0 self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1 # update instance map self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt self.net_input[:,-1,:,:] = self.get_edges(self.inst_map) # update feature map self.set_features(idx_src, self.feat, style_id) self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map))
Example #12
Source File: CycleGAN.py From Bayesian-CycleGAN with MIT License | 6 votes |
def get_current_visuals(self): real_A = util.tensor2im(self.input_A) fake_B = util.tensor2im(self.fake_B) rec_A = util.tensor2im(self.rec_A) real_B = util.tensor2im(self.input_B) fake_A = util.tensor2im(self.fake_A) rec_B = util.tensor2im(self.rec_B) visuals = OrderedDict([ ('real_A', real_A), ('fake_B', fake_B), ('rec_A', rec_A), ('real_B', real_B), ('fake_A', fake_A), ('rec_B', rec_B) ]) return visuals
Example #13
Source File: label_channel_gated_pix2pix_model.py From iSketchNFill with GNU General Public License v3.0 | 6 votes |
def get_latent_space_visualization(self,num_interpolate=20,label_1=-1,label_2=-1): rand_perm = np.random.permutation( self.opt.n_classes ) if label_1 == -1: label_1 = self.label[0] #rand_perm[0] if label_2 == -1: label_2 = self.opt.target_label #rand_perm[1] alpha_blends = np.linspace(0,1,num_interpolate) self.label[0] = label_1 output_gate_1 = self.netG.forward_gate(self.label) self.label[0] = label_2 output_gate_2 = self.netG.forward_gate(self.label) results={} results['latent_real_A']=util.tensor2im(self.real_A.data) results['latent_real_B']=util.tensor2im(self.real_B.data) for i in range(num_interpolate): alpha_blend = alpha_blends[i] output_gate = output_gate_1*alpha_blend + output_gate_2*(1-alpha_blend) self.fake_B = self.netG.forward_main( self.real_A,output_gate) results['%d_L_fake_B_inter'%(i)]=util.tensor2im(self.fake_B.data) return OrderedDict(results)
Example #14
Source File: CycleGAN_bayes_z.py From Bayesian-CycleGAN with MIT License | 6 votes |
def get_current_visuals(self): real_A = util.tensor2im(self.input_A) fake_B = util.tensor2im(self.fake_B) rec_A = util.tensor2im(self.rec_A) real_B = util.tensor2im(self.input_B) fake_A = util.tensor2im(self.fake_A) rec_B = util.tensor2im(self.rec_B) visuals = OrderedDict([ ('real_A', real_A), ('fake_B', fake_B), ('rec_A', rec_A), ('real_B', real_B), ('fake_A', fake_A), ('rec_B', rec_B) ]) return visuals
Example #15
Source File: CycleGAN_bayes.py From Bayesian-CycleGAN with MIT License | 6 votes |
def get_current_visuals(self): real_A = util.tensor2im(self.input_A) fake_B = util.tensor2im(self.fake_B) rec_A = util.tensor2im(self.rec_A) real_B = util.tensor2im(self.input_B) fake_A = util.tensor2im(self.fake_A) rec_B = util.tensor2im(self.rec_B) visuals = OrderedDict([ ('real_A', real_A), ('fake_B', fake_B), ('rec_A', rec_A), ('real_B', real_B), ('fake_A', fake_A), ('rec_B', rec_B) ]) return visuals
Example #16
Source File: Gen_final_v1.py From Talking-Face-Generation-DAVS with MIT License | 6 votes |
def get_current_visuals(self): fake_B_audio = self.audio_gen_fakes.view(-1, self.opt.sequence_length, self.opt.image_channel_size, self.opt.image_size, self.opt.image_size) fake_B_image = self.image_gen_fakes.view(-1, self.opt.sequence_length, self.opt.image_channel_size, self.opt.image_size, self.opt.image_size) real_A = util.tensor2im(self.real_A.data) oderdict = OrderedDict([('real_A', real_A)]) fake_audio_B = {} fake_image_B = {} real_B = {} for i in range(self.opt.sequence_length): fake_audio_B[i] = util.tensor2im(fake_B_audio[:, i, :, :, :].data) fake_image_B[i] = util.tensor2im(fake_B_image[:, i, :, :, :].data) real_B[i] = util.tensor2im(self.real_videos[:, i, :, :, :].data) oderdict['real_B_' + str(i)] = real_B[i] oderdict['fake_audio_B_' + str(i)] = fake_audio_B[i] oderdict['fake_image_B_' + str(i)] = fake_image_B[i] return oderdict
Example #17
Source File: pix2pixHD_condImg_model.py From neurips18_hierchical_image_manipulation with MIT License | 5 votes |
def get_current_visuals(self): return OrderedDict([ ('input_label', util.tensor2label(self.input_label, self.opt.label_nc)), ('input_image', util.tensor2im(self.input_image)), ('real_image', util.tensor2im(self.real_image)), ('synthesized_image', util.tensor2im(self.fake_image)) ])
Example #18
Source File: pix2pixHD_condImgColor_model.py From neurips18_hierchical_image_manipulation with MIT License | 5 votes |
def get_current_visuals(self): return OrderedDict([ ('input_label', util.tensor2label(self.input_label, self.opt.label_nc)), ('input_image', util.tensor2im(self.input_image)), ('real_image', util.tensor2im(self.real_image)), ('synthesized_image', util.tensor2im(self.fake_image)) ])
Example #19
Source File: innerPatchSoftShiftTriple.py From Shift-Net_pytorch with MIT License | 5 votes |
def set_mask(self, mask_global): mask = util.cal_feat_mask(mask_global, self.layer_to_last) self.mask = mask return self.mask # If mask changes, then need to set cal_fix_flag true each iteration.
Example #20
Source File: combogan_model.py From ToDayGAN with BSD 2-Clause "Simplified" License | 5 votes |
def get_current_visuals(self, testing=False): if not testing: self.visuals = [self.real_A, self.fake_B, self.rec_A, self.real_B, self.fake_A, self.rec_B] self.labels = ['real_A', 'fake_B', 'rec_A', 'real_B', 'fake_A', 'rec_B'] images = [util.tensor2im(v.data) for v in self.visuals] return OrderedDict(zip(self.labels, images))
Example #21
Source File: face_shiftnet_model.py From Shift-Net_pytorch with MIT License | 5 votes |
def backward_D(self): fake_B = self.fake_B # Real real_B = self.real_B # GroundTruth # Has been verfied, for square mask, let D discrinate masked patch, improves the results. if self.opt.mask_type == 'center' or self.opt.mask_sub_type == 'rect': # Using the cropped fake_B as the input of D. fake_B = self.fake_B[:, :, self.rand_t:self.rand_t+self.opt.fineSize//2-2*self.opt.overlap, \ self.rand_l:self.rand_l+self.opt.fineSize//2-2*self.opt.overlap] real_B = self.real_B[:, :, self.rand_t:self.rand_t+self.opt.fineSize//2-2*self.opt.overlap, \ self.rand_l:self.rand_l+self.opt.fineSize//2-2*self.opt.overlap] self.pred_fake = self.netD(fake_B.detach()) self.pred_real = self.netD(real_B) if self.opt.gan_type == 'wgan_gp': gradient_penalty, _ = util.cal_gradient_penalty(self.netD, real_B, fake_B.detach(), self.device, constant=1, lambda_gp=self.opt.gp_lambda) self.loss_D_fake = torch.mean(self.pred_fake) self.loss_D_real = -torch.mean(self.pred_real) self.loss_D = self.loss_D_fake + self.loss_D_real + gradient_penalty else: if self.opt.gan_type in ['vanilla', 'lsgan']: self.loss_D_fake = self.criterionGAN(self.pred_fake, False) self.loss_D_real = self.criterionGAN (self.pred_real, True) self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5 elif self.opt.gan_type == 're_s_gan': self.loss_D = self.criterionGAN(self.pred_real - self.pred_fake, True) elif self.opt.gan_type == 're_avg_gan': self.loss_D = (self.criterionGAN (self.pred_real - torch.mean(self.pred_fake), True) \ + self.criterionGAN (self.pred_fake - torch.mean(self.pred_real), False)) / 2. # for `re_avg_gan`, need to retain graph of D. if self.opt.gan_type == 're_avg_gan': self.loss_D.backward(retain_graph=True) else: self.loss_D.backward()
Example #22
Source File: InnerFaceShiftTriple.py From Shift-Net_pytorch with MIT License | 5 votes |
def set_mask(self, mask_global): self.mask_all = util.cal_feat_mask(mask_global, self.layer_to_last)
Example #23
Source File: combogan_model.py From ComboGAN with BSD 2-Clause "Simplified" License | 5 votes |
def get_current_visuals(self, testing=False): if not testing: self.visuals = [self.real_A, self.fake_B, self.rec_A, self.real_B, self.fake_A, self.rec_B] self.labels = ['real_A', 'fake_B', 'rec_A', 'real_B', 'fake_A', 'rec_B'] images = [util.tensor2im(v.data) for v in self.visuals] return OrderedDict(zip(self.labels, images))
Example #24
Source File: InnerCos.py From Shift-Net_pytorch with MIT License | 5 votes |
def set_mask(self, mask_global): mask_all = util.cal_feat_mask(mask_global, self.layer_to_last) self.mask_all = mask_all.float()
Example #25
Source File: ui_model.py From deep-learning-for-document-dewarping with MIT License | 5 votes |
def add_strokes(self, click_src, label_tgt, bw, save): # get the region of the new strokes (bw is the brush width) size = self.net_input.size() h, w = size[2], size[3] idx_src = torch.LongTensor(bw**2, 4).fill_(0) for i in range(bw): idx_src[i*bw:(i+1)*bw, 2] = min(h-1, max(0, click_src[0]-bw//2 + i)) for j in range(bw): idx_src[i*bw+j, 3] = min(w-1, max(0, click_src[1]-bw//2 + j)) idx_src = idx_src.cuda() # again, need to update 3 things if idx_src.shape: # backup current maps if save: self.backup_current_state() # update the label map (and the network input) in the stroke region self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt for k in range(self.opt.label_nc): self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0 self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1 # update the instance map (and the network input) self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt self.net_input[:,-1,:,:] = self.get_edges(self.inst_map) # also update the features if available if self.opt.instance_feat: feat = self.features_clustered[label_tgt] #np.random.seed(label_tgt+1) #cluster_idx = np.random.randint(0, feat.shape[0]) cluster_idx = self.cluster_indices[label_tgt] self.set_features(idx_src, feat, cluster_idx) self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map)) # add an object to the clicked position with selected style
Example #26
Source File: ui_model.py From deep-learning-for-document-dewarping with MIT License | 5 votes |
def change_labels(self, click_src, click_tgt): y_src, x_src = click_src[0], click_src[1] y_tgt, x_tgt = click_tgt[0], click_tgt[1] label_src = int(self.label_map[0, 0, y_src, x_src]) inst_src = self.inst_map[0, 0, y_src, x_src] label_tgt = int(self.label_map[0, 0, y_tgt, x_tgt]) inst_tgt = self.inst_map[0, 0, y_tgt, x_tgt] idx_src = (self.inst_map == inst_src).nonzero() # need to change 3 things: label map, instance map, and feature map if idx_src.shape: # backup current maps self.backup_current_state() # change both the label map and the network input self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt self.net_input[idx_src[:,0], idx_src[:,1] + label_src, idx_src[:,2], idx_src[:,3]] = 0 self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1 # update the instance map (and the network input) if inst_tgt > 1000: # if different instances have different ids, give the new object a new id tgt_indices = (self.inst_map > label_tgt * 1000) & (self.inst_map < (label_tgt+1) * 1000) inst_tgt = self.inst_map[tgt_indices].max() + 1 self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = inst_tgt self.net_input[:,-1,:,:] = self.get_edges(self.inst_map) # also copy the source features to the target position idx_tgt = (self.inst_map == inst_tgt).nonzero() if idx_tgt.shape: self.copy_features(idx_src, idx_tgt[0,:]) self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map)) # add strokes of target label in the image
Example #27
Source File: ui_model.py From EverybodyDanceNow_reproduce_pytorch with MIT License | 5 votes |
def get_current_visuals(self, getLabel=False): mask = self.mask if self.mask is not None: mask = np.transpose(self.mask[0].cpu().float().numpy(), (1,2,0)).astype(np.uint8) dict_list = [('fake_image', self.fake_image), ('mask', mask)] if getLabel: # only output label map if needed to save bandwidth label = util.tensor2label(self.net_input.data[0], self.opt.label_nc) dict_list += [('label', label)] return OrderedDict(dict_list)
Example #28
Source File: ui_model.py From EverybodyDanceNow-Temporal-FaceGAN with MIT License | 5 votes |
def change_labels(self, click_src, click_tgt): y_src, x_src = click_src[0], click_src[1] y_tgt, x_tgt = click_tgt[0], click_tgt[1] label_src = int(self.label_map[0, 0, y_src, x_src]) inst_src = self.inst_map[0, 0, y_src, x_src] label_tgt = int(self.label_map[0, 0, y_tgt, x_tgt]) inst_tgt = self.inst_map[0, 0, y_tgt, x_tgt] idx_src = (self.inst_map == inst_src).nonzero() # need to change 3 things: label map, instance map, and feature map if idx_src.shape: # backup current maps self.backup_current_state() # change both the label map and the network input self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt self.net_input[idx_src[:,0], idx_src[:,1] + label_src, idx_src[:,2], idx_src[:,3]] = 0 self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1 # update the instance map (and the network input) if inst_tgt > 1000: # if different instances have different ids, give the new object a new id tgt_indices = (self.inst_map > label_tgt * 1000) & (self.inst_map < (label_tgt+1) * 1000) inst_tgt = self.inst_map[tgt_indices].max() + 1 self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = inst_tgt self.net_input[:,-1,:,:] = self.get_edges(self.inst_map) # also copy the source features to the target position idx_tgt = (self.inst_map == inst_tgt).nonzero() if idx_tgt.shape: self.copy_features(idx_src, idx_tgt[0,:]) self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map)) # add strokes of target label in the image
Example #29
Source File: ui_model.py From EverybodyDanceNow-Temporal-FaceGAN with MIT License | 5 votes |
def add_strokes(self, click_src, label_tgt, bw, save): # get the region of the new strokes (bw is the brush width) size = self.net_input.size() h, w = size[2], size[3] idx_src = torch.LongTensor(bw**2, 4).fill_(0) for i in range(bw): idx_src[i*bw:(i+1)*bw, 2] = min(h-1, max(0, click_src[0]-bw//2 + i)) for j in range(bw): idx_src[i*bw+j, 3] = min(w-1, max(0, click_src[1]-bw//2 + j)) idx_src = idx_src.cuda() # again, need to update 3 things if idx_src.shape: # backup current maps if save: self.backup_current_state() # update the label map (and the network input) in the stroke region self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt for k in range(self.opt.label_nc): self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0 self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1 # update the instance map (and the network input) self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt self.net_input[:,-1,:,:] = self.get_edges(self.inst_map) # also update the features if available if self.opt.instance_feat: feat = self.features_clustered[label_tgt] #np.random.seed(label_tgt+1) #cluster_idx = np.random.randint(0, feat.shape[0]) cluster_idx = self.cluster_indices[label_tgt] self.set_features(idx_src, feat, cluster_idx) self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map)) # add an object to the clicked position with selected style
Example #30
Source File: ui_model.py From EverybodyDanceNow-Temporal-FaceGAN with MIT License | 5 votes |
def get_current_visuals(self, getLabel=False): mask = self.mask if self.mask is not None: mask = np.transpose(self.mask[0].cpu().float().numpy(), (1,2,0)).astype(np.uint8) dict_list = [('fake_image', self.fake_image), ('mask', mask)] if getLabel: # only output label map if needed to save bandwidth label = util.tensor2label(self.net_input.data[0], self.opt.label_nc) dict_list += [('label', label)] return OrderedDict(dict_list)