Python chainer.functions.mean_absolute_error() Examples

The following are 23 code examples of chainer.functions.mean_absolute_error(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module chainer.functions , or try the search function .
Example #1
Source File: updater.py    From chainer-partial_convolution_image_inpainting with MIT License 6 votes vote down vote up
def calc_loss_style(hout_dict,hcomp_dict,hgt_dict):
    layers = hgt_dict.keys()
    for i,layer_name in enumerate(layers):
        B,C,H,W = hout_dict[layer_name].shape
        hout = F.reshape(hout_dict[layer_name],(B,C,H*W))
        hcomp = F.reshape(hcomp_dict[layer_name],(B,C,H*W))
        hgt = F.reshape(hgt_dict[layer_name],(B,C,H*W))
        
        hout_gram = F.batch_matmul(hout,hout,transb=True)
        hcomp_gram = F.batch_matmul(hcomp,hcomp,transb=True)
        hgt_gram = F.batch_matmul(hgt,hgt,transb=True)
        
        if i==0: 
            L_style_out = F.mean_absolute_error(hout_gram,hgt_gram)/(C*H*W)
            L_style_comp = F.mean_absolute_error(hcomp_gram,hgt_gram)/(C*H*W)
        else:
            L_style_out += F.mean_absolute_error(hout_gram,hgt_gram)/(C*H*W)
            L_style_comp += F.mean_absolute_error(hcomp_gram,hgt_gram)/(C*H*W)        

    return L_style_out + L_style_comp 
Example #2
Source File: updater.py    From pfio with MIT License 5 votes vote down vote up
def loss_enc(self, enc, x_out, t_out, y_out, lam1=100, lam2=1):
        batchsize, _, w, h = y_out.data.shape
        loss_rec = lam1*(F.mean_absolute_error(x_out, t_out))
        loss_adv = lam2*F.sum(F.softplus(-y_out)) / batchsize / w / h
        loss = loss_rec + loss_adv
        chainer.report({'loss': loss}, enc)
        return loss 
Example #3
Source File: updater.py    From yukarin with MIT License 5 votes vote down vote up
def _loss_predictor_cg(predictor, reconstruct, output, target, d_fake, loss_config: LossConfig):
    b, _, t = d_fake.data.shape

    loss_mse = (F.mean_absolute_error(reconstruct, target))
    chainer.report({'mse': loss_mse}, predictor)

    loss_identity = (F.mean_absolute_error(output, target))
    chainer.report({'identity': loss_identity}, predictor)

    loss_adv = F.sum(F.softplus(-d_fake)) / (b * t)
    chainer.report({'adversarial': loss_adv}, predictor)

    loss = loss_config.mse * loss_mse + loss_config.mse / 100 * loss_identity + loss_config.adversarial * loss_adv
    chainer.report({'loss': loss}, predictor)
    return loss 
Example #4
Source File: updater.py    From yukarin with MIT License 5 votes vote down vote up
def _loss_predictor(predictor, output, target, d_fake, loss_config: LossConfig):
    b, _, t = d_fake.data.shape

    loss_mse = (F.mean_absolute_error(output, target))
    chainer.report({'mse': loss_mse}, predictor)

    loss_adv = F.sum(F.softplus(-d_fake)) / (b * t)
    chainer.report({'adversarial': loss_adv}, predictor)

    loss = loss_config.mse * loss_mse + loss_config.adversarial * loss_adv
    chainer.report({'loss': loss}, predictor)
    return loss 
Example #5
Source File: updater.py    From pixcaler with MIT License 5 votes vote down vote up
def loss_func_rec_gen(self, x_in, x_out):
        return F.mean_absolute_error(x_out, x_in) 
Example #6
Source File: updater.py    From pixcaler with MIT License 5 votes vote down vote up
def loss_func_rec_gen(self, x_in, x_out):
        return F.mean_absolute_error(x_out, x_in) 
Example #7
Source File: network.py    From Looking-to-Listen with MIT License 5 votes vote down vote up
def __call__(self, num):
        # ===== Initialize variables ===== #
        audio_spec, face1, face2, true_spec = self.loadData(num=num)

        # ===== Compute mask ===== #
        y = self.separateSpectrogram(spec=audio_spec, face1=face1, face2=face2)
        
        # ===== Evaluate loss ===== #
        loss = F.mean_absolute_error(y, true_spec)
        #assert xp.isnan(loss.data).any()==False, "assert np.isnan(loss) {}".format(num)
        if xp.isnan(loss.data).any()==True:
            loss = chainer.Variable(xp.zeros(loss.shape))
        
        reporter.report({"loss": loss.data}, self)
        return loss 
Example #8
Source File: sr_updater.py    From become-yukarin with MIT License 5 votes vote down vote up
def _loss_predictor(self, predictor, output, target, d_fake):
        b, _, w, h = d_fake.data.shape

        loss_mse = (F.mean_absolute_error(output, target))
        chainer.report({'mse': loss_mse}, predictor)

        loss_adv = F.sum(F.softplus(-d_fake)) / (b * w * h)
        chainer.report({'adversarial': loss_adv}, predictor)

        loss = self.loss_config.mse * loss_mse + self.loss_config.adversarial * loss_adv
        chainer.report({'loss': loss}, predictor)
        return loss 
Example #9
Source File: updater.py    From become-yukarin with MIT License 5 votes vote down vote up
def _loss_predictor(self, predictor, output, target, d_fake):
        b, _, t = d_fake.data.shape

        loss_mse = (F.mean_absolute_error(output, target))
        chainer.report({'mse': loss_mse}, predictor)

        loss_adv = F.sum(F.softplus(-d_fake)) / (b * t)
        chainer.report({'adversarial': loss_adv}, predictor)

        loss = self.loss_config.mse * loss_mse + self.loss_config.adversarial * loss_adv
        chainer.report({'loss': loss}, predictor)
        return loss 
Example #10
Source File: net.py    From chainer-gan-lib with MIT License 5 votes vote down vote up
def __call__(self, x):
        h = x
        h = F.leaky_relu(self.c0(h))
        h = F.leaky_relu(self.c1(h))
        h = F.leaky_relu(self.c2(h))
        h = F.leaky_relu(self.c3(h))
        h = F.leaky_relu(self.l4(h))
        h = F.reshape(F.leaky_relu(self.l5(h)), (x.data.shape[0], self.ch, 4, 4))
        h = F.leaky_relu(self.dc3(h))
        h = F.leaky_relu(self.dc2(h))
        h = F.leaky_relu(self.dc1(h))
        h = F.tanh(self.dc0(h))
        return F.mean_absolute_error(h, x) 
Example #11
Source File: updater.py    From chainer-cyclegan with MIT License 5 votes vote down vote up
def loss_func_rec_l1(x_out, t):
    return F.mean_absolute_error(x_out, t) 
Example #12
Source File: updater.py    From pfio with MIT License 5 votes vote down vote up
def loss_dec(self, dec, x_out, t_out, y_out, lam1=100, lam2=1):
        batchsize, _, w, h = y_out.data.shape
        loss_rec = lam1*(F.mean_absolute_error(x_out, t_out))
        loss_adv = lam2*F.sum(F.softplus(-y_out)) / batchsize / w / h
        loss = loss_rec + loss_adv
        chainer.report({'loss': loss}, dec)
        return loss 
Example #13
Source File: updater.py    From Deep_VoiceChanger with MIT License 5 votes vote down vote up
def gene_update_half(self, a):
        if a:
            itr_x = self.itr_a
            itr_y = self.itr_b
            gene_xy = self.generator_ab
            gene_yx = self.generator_ba
            disc = self.discriminator_b
            opt = self.opt_g_a
        else:
            itr_x = self.itr_b
            itr_y = self.itr_a
            gene_xy = self.generator_ba
            gene_yx = self.generator_ab
            disc = self.discriminator_a
            opt = self.opt_g_b

        x = Variable(self.converter(itr_x.next(), self.device))
        y = Variable(self.converter(itr_y.next(), self.device))

        xy  = gene_xy(x)
        xyx = gene_yx(xy)
        yy  = gene_xy(y)

        xy_disc = disc(xy)

        recon_loss = F.mean_absolute_error(x, xyx)
        gan_loss   = self.loss_hinge_gene(xy_disc)
        ident_loss = F.mean_absolute_error(y, yy)

        loss_gene = recon_loss*3.0 + gan_loss + ident_loss*0.5

        gene_xy.cleargrads()
        loss_gene.backward()
        opt.update()

        chainer.reporter.report({
            'loss/g/recon': recon_loss,
            'loss/g/ident': ident_loss,
            'loss/g/gene':  gan_loss}) 
Example #14
Source File: test_mean_absolute_error.py    From chainer with MIT License 5 votes vote down vote up
def test_invalid_dtype2(self):
        x0 = chainer.Variable(
            numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32))
        x1 = chainer.Variable(
            numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float16))
        with self.assertRaises(type_check.InvalidType):
            functions.mean_absolute_error(x0, x1)


# See chainer#6702. 
Example #15
Source File: test_mean_absolute_error.py    From chainer with MIT License 5 votes vote down vote up
def test_invalid_dtype1(self):
        x0 = chainer.Variable(
            numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.int32))
        x1 = chainer.Variable(
            numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.int32))
        with self.assertRaises(type_check.InvalidType):
            functions.mean_absolute_error(x0, x1) 
Example #16
Source File: test_mean_absolute_error.py    From chainer with MIT License 5 votes vote down vote up
def forward(self, inputs, device):
        x0, x1 = inputs
        loss = functions.mean_absolute_error(x0, x1)
        return loss, 
Example #17
Source File: updater.py    From chainer with MIT License 5 votes vote down vote up
def loss_dec(self, dec, x_out, t_out, y_out, lam1=100, lam2=1):
        batchsize, _, w, h = y_out.shape
        loss_rec = lam1*(F.mean_absolute_error(x_out, t_out))
        loss_adv = lam2*F.sum(F.softplus(-y_out)) / batchsize / w / h
        loss = loss_rec + loss_adv
        chainer.report({'loss': loss}, dec)
        return loss 
Example #18
Source File: updater.py    From chainer with MIT License 5 votes vote down vote up
def loss_enc(self, enc, x_out, t_out, y_out, lam1=100, lam2=1):
        batchsize, _, w, h = y_out.shape
        loss_rec = lam1*(F.mean_absolute_error(x_out, t_out))
        loss_adv = lam2*F.sum(F.softplus(-y_out)) / batchsize / w / h
        loss = loss_rec + loss_adv
        chainer.report({'loss': loss}, enc)
        return loss 
Example #19
Source File: updater.py    From chainer-partial_convolution_image_inpainting with MIT License 5 votes vote down vote up
def calc_loss_tv(Icomp, mask, xp=np):
    canvas = mask.data
    canvas[:,:,:,:-1] += mask.data[:,:,:,1:] #mask left overlap
    canvas[:,:,:,1:] += mask.data[:,:,:,:-1] #mask right overlap
    canvas[:,:,:-1,:] += mask.data[:,:,1:,:] #mask up overlap
    canvas[:,:,1:,:] += mask.data[:,:,:-1,:] #mask bottom overlap
    
    P = Variable((xp.sign(canvas-0.5)+1.0)*0.5) #P region (hole mask: 1 pixel dilated region from hole)
    return F.mean_absolute_error(P[:,:,:,1:]*Icomp[:,:,:,1:],P[:,:,:,:-1]*Icomp[:,:,:,:-1])+ F.mean_absolute_error(P[:,:,1:,:]*Icomp[:,:,1:,:],P[:,:,:-1,:]*Icomp[:,:,:-1,:]) 
Example #20
Source File: updater.py    From chainer-partial_convolution_image_inpainting with MIT License 5 votes vote down vote up
def calc_loss_perceptual(hout_dict,hcomp_dict,hgt_dict):
    layers = list(hout_dict.keys())
    layer_name =  layers[0]
    loss = F.mean_absolute_error(hout_dict[layer_name],hgt_dict[layer_name])
    loss += F.mean_absolute_error(hcomp_dict[layer_name],hgt_dict[layer_name])
    for layer_name in layers[1:]: 
        loss += F.mean_absolute_error(hout_dict[layer_name],hgt_dict[layer_name])
        loss += F.mean_absolute_error(hcomp_dict[layer_name],hgt_dict[layer_name])
    return loss 
Example #21
Source File: updater.py    From Deep_VoiceChanger with MIT License 5 votes vote down vote up
def gene_update_full(self):
        a = Variable(self.converter(self.itr_a.next(), self.device))
        b = Variable(self.converter(self.itr_b.next(), self.device))

        ab  = self.generator_ab(a)
        ba  = self.generator_ba(b)
        aba = self.generator_ba(ab)
        bab = self.generator_ab(ba)
        aa  = self.generator_ba(a)
        bb  = self.generator_ab(b)

        ab_disc = self.discriminator_b(ab)
        ba_disc = self.discriminator_a(ba)

        recon_loss = F.mean_absolute_error(a, aba) + F.mean_absolute_error(b, bab)
        gan_loss   = self.loss_hinge_gene(ab_disc) + self.loss_hinge_gene(ba_disc)
        ident_loss = F.mean_absolute_error(a, aa)  + F.mean_absolute_error(b, bb)

        loss_gene = recon_loss*3.0 + gan_loss + ident_loss*0.5

        self.generator_ab.cleargrads()
        self.generator_ba.cleargrads()
        loss_gene.backward()
        self.opt_g_a.update()
        self.opt_g_b.update()

        chainer.reporter.report({
            'loss/g/recon': recon_loss,
            'loss/g/ident': ident_loss,
            'loss/g/gene':  gan_loss}) 
Example #22
Source File: train_own_dataset.py    From chainer-chemistry with MIT License 4 votes vote down vote up
def main():
    # Parse the arguments.
    args = parse_arguments()

    if args.label:
        labels = args.label
        class_num = len(labels) if isinstance(labels, list) else 1
    else:
        raise ValueError('No target label was specified.')

    # Dataset preparation. Postprocessing is required for the regression task.
    def postprocess_label(label_list):
        return numpy.asarray(label_list, dtype=numpy.float32)

    # Apply a preprocessor to the dataset.
    print('Preprocessing dataset...')
    preprocessor = preprocess_method_dict[args.method]()
    parser = CSVFileParser(preprocessor, postprocess_label=postprocess_label,
                           labels=labels, smiles_col='SMILES')
    dataset = parser.parse(args.datafile)['dataset']

    # Scale the label values, if necessary.
    if args.scale == 'standardize':
        scaler = StandardScaler()
        scaler.fit(dataset.get_datasets()[-1])
    else:
        scaler = None

    # Split the dataset into training and validation.
    train_data_size = int(len(dataset) * args.train_data_ratio)
    train, _ = split_dataset_random(dataset, train_data_size, args.seed)

    # Set up the predictor.
    predictor = set_up_predictor(
        args.method, args.unit_num,
        args.conv_layers, class_num, label_scaler=scaler)

    # Set up the regressor.
    device = chainer.get_device(args.device)
    metrics_fun = {'mae': F.mean_absolute_error, 'rmse': rmse}
    regressor = Regressor(predictor, lossfun=F.mean_squared_error,
                          metrics_fun=metrics_fun, device=device)

    print('Training...')
    converter = converter_method_dict[args.method]
    run_train(regressor, train, valid=None,
              batch_size=args.batchsize, epoch=args.epoch,
              out=args.out, extensions_list=None,
              device=device, converter=converter,
              resume_path=None)

    # Save the regressor's parameters.
    model_path = os.path.join(args.out, args.model_filename)
    print('Saving the trained model to {}...'.format(model_path))

    # TODO(nakago): ChainerX array cannot be sent to numpy array when internal
    # state has gradients.
    if hasattr(regressor.predictor.graph_conv, 'reset_state'):
        regressor.predictor.graph_conv.reset_state()

    regressor.save_pickle(model_path, protocol=args.protocol) 
Example #23
Source File: updater.py    From chainer-partial_convolution_image_inpainting with MIT License 4 votes vote down vote up
def update_core(self):
        xp = self.model.xp
        self._iter += 1
        batch = self.get_iterator('main').next() #img_processed (B,4,H,W), origin (B,3,H,W), mask (B,1,H,W)
        batchsize = len(batch)

        w_in = self._image_size

        zero_f = Variable(xp.zeros((batchsize, 3, w_in, w_in)).astype("f"))
        
        x_train = np.zeros((batchsize, 3, w_in, w_in)).astype("f")
        mask_train = np.zeros((batchsize, 3, w_in, w_in)).astype("f")
         
        for i in range(batchsize):
            x_train[i, :] = batch[i][0] #original image
            mask_train[i, :] = batch[i][1] #0-1 mask of c 
        
        x_train = xp.array(x_train)
        mask_train = xp.array(mask_train)
        mask_b = xp.array(mask_train.astype("bool"))
        
        I_gt = Variable(x_train)
        M = Variable(mask_train)
        M_b = Variable(mask_b)

        I_out = self.model(I_gt,M)
        I_comp = F.where(M_b,I_gt,I_out) #if an element of Mc_b is True, return the corresponded element of I_gt, otherwise return that of I_out)

        fs_I_gt = vgg_extract(self.vgg,I_gt) #feature dict
        fs_I_out = vgg_extract(self.vgg,I_out) #feature dict
        fs_I_comp = vgg_extract(self.vgg,I_comp) #feature dict

        opt_model = self.get_optimizer('model')

        L_valid = F.mean_absolute_error(M*I_out,M*I_gt)
        L_hole = F.mean_absolute_error((1-M)*I_out,(1-M)*I_gt) 
        
        L_perceptual = calc_loss_perceptual(fs_I_gt,fs_I_out,fs_I_comp)
        
        L_style = calc_loss_style(fs_I_out,fs_I_comp,fs_I_gt) #Loss style out and comp 
        L_tv = calc_loss_tv(I_comp, M, xp=xp)

        L_total = L_valid + self._lambda1 * L_hole + self._lambda2 * L_perceptual + \
                  self._lambda3 * L_style + self._lambda4 * L_tv
        
        self.vgg.cleargrads()
        self.model.cleargrads()
        L_total.backward()
        opt_model.update()

        chainer.report({'L_valid': L_valid})
        chainer.report({'L_hole': L_hole})
        chainer.report({'L_perceptual': L_perceptual})
        chainer.report({'L_style': L_style})
        chainer.report({'L_tv': L_tv})