Python keras.backend.log() Examples

The following are 30 code examples for showing how to use keras.backend.log(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module keras.backend , or try the search function .

Example 1
Project: reinforcement-learning-kr   Author: rlcode   File: breakout_a3c.py    License: MIT License 6 votes vote down vote up
def actor_optimizer(self):
        action = K.placeholder(shape=[None, self.action_size])
        advantages = K.placeholder(shape=[None, ])

        policy = self.actor.output

        # 정책 크로스 엔트로피 오류함수
        action_prob = K.sum(action * policy, axis=1)
        cross_entropy = K.log(action_prob + 1e-10) * advantages
        cross_entropy = -K.sum(cross_entropy)

        # 탐색을 지속적으로 하기 위한 엔트로피 오류
        entropy = K.sum(policy * K.log(policy + 1e-10), axis=1)
        entropy = K.sum(entropy)

        # 두 오류함수를 더해 최종 오류함수를 만듬
        loss = cross_entropy + 0.01 * entropy

        optimizer = RMSprop(lr=self.actor_lr, rho=0.99, epsilon=0.01)
        updates = optimizer.get_updates(self.actor.trainable_weights, [],loss)
        train = K.function([self.actor.input, action, advantages],
                           [loss], updates=updates)
        return train

    # 가치신경망을 업데이트하는 함수 
Example 2
Project: reinforcement-learning-kr   Author: rlcode   File: reinforce_agent.py    License: MIT License 6 votes vote down vote up
def optimizer(self):
        action = K.placeholder(shape=[None, 5])
        discounted_rewards = K.placeholder(shape=[None, ])
        
        # 크로스 엔트로피 오류함수 계산
        action_prob = K.sum(action * self.model.output, axis=1)
        cross_entropy = K.log(action_prob) * discounted_rewards
        loss = -K.sum(cross_entropy)
        
        # 정책신경망을 업데이트하는 훈련함수 생성
        optimizer = Adam(lr=self.learning_rate)
        updates = optimizer.get_updates(self.model.trainable_weights,[],
                                        loss)
        train = K.function([self.model.input, action, discounted_rewards], [],
                           updates=updates)

        return train

    # 정책신경망으로 행동 선택 
Example 3
Project: reinforcement-learning   Author: rlcode   File: breakout_a3c.py    License: MIT License 6 votes vote down vote up
def build_model(self):
        input = Input(shape=self.state_size)
        conv = Conv2D(16, (8, 8), strides=(4, 4), activation='relu')(input)
        conv = Conv2D(32, (4, 4), strides=(2, 2), activation='relu')(conv)
        conv = Flatten()(conv)
        fc = Dense(256, activation='relu')(conv)
        policy = Dense(self.action_size, activation='softmax')(fc)
        value = Dense(1, activation='linear')(fc)

        actor = Model(inputs=input, outputs=policy)
        critic = Model(inputs=input, outputs=value)

        actor._make_predict_function()
        critic._make_predict_function()

        actor.summary()
        critic.summary()

        return actor, critic

    # make loss function for Policy Gradient
    # [log(action probability) * advantages] will be input for the back prop
    # we add entropy of action probability to loss 
Example 4
Project: reinforcement-learning   Author: rlcode   File: breakout_a3c.py    License: MIT License 6 votes vote down vote up
def actor_optimizer(self):
        action = K.placeholder(shape=[None, self.action_size])
        advantages = K.placeholder(shape=[None, ])

        policy = self.actor.output

        good_prob = K.sum(action * policy, axis=1)
        eligibility = K.log(good_prob + 1e-10) * advantages
        actor_loss = -K.sum(eligibility)

        entropy = K.sum(policy * K.log(policy + 1e-10), axis=1)
        entropy = K.sum(entropy)

        loss = actor_loss + 0.01*entropy
        optimizer = RMSprop(lr=self.actor_lr, rho=0.99, epsilon=0.01)
        updates = optimizer.get_updates(self.actor.trainable_weights, [], loss)
        train = K.function([self.actor.input, action, advantages], [loss], updates=updates)

        return train

    # make loss function for Value approximation 
Example 5
Project: reinforcement-learning   Author: rlcode   File: reinforce_agent.py    License: MIT License 6 votes vote down vote up
def optimizer(self):
        action = K.placeholder(shape=[None, 5])
        discounted_rewards = K.placeholder(shape=[None, ])

        # Calculate cross entropy error function
        action_prob = K.sum(action * self.model.output, axis=1)
        cross_entropy = K.log(action_prob) * discounted_rewards
        loss = -K.sum(cross_entropy)

        # create training function
        optimizer = Adam(lr=self.learning_rate)
        updates = optimizer.get_updates(self.model.trainable_weights, [],
                                        loss)
        train = K.function([self.model.input, action, discounted_rewards], [],
                           updates=updates)

        return train

    # get action from policy network 
Example 6
Project: reinforcement-learning   Author: rlcode   File: cartpole_a3c.py    License: MIT License 6 votes vote down vote up
def build_model(self):
        state = Input(batch_shape=(None,  self.state_size))
        shared = Dense(self.hidden1, input_dim=self.state_size, activation='relu', kernel_initializer='glorot_uniform')(state)

        actor_hidden = Dense(self.hidden2, activation='relu', kernel_initializer='glorot_uniform')(shared)
        action_prob = Dense(self.action_size, activation='softmax', kernel_initializer='glorot_uniform')(actor_hidden)

        value_hidden = Dense(self.hidden2, activation='relu', kernel_initializer='he_uniform')(shared)
        state_value = Dense(1, activation='linear', kernel_initializer='he_uniform')(value_hidden)

        actor = Model(inputs=state, outputs=action_prob)
        critic = Model(inputs=state, outputs=state_value)

        actor._make_predict_function()
        critic._make_predict_function()

        actor.summary()
        critic.summary()

        return actor, critic

    # make loss function for Policy Gradient
    # [log(action probability) * advantages] will be input for the back prop
    # we add entropy of action probability to loss 
Example 7
Project: reinforcement-learning   Author: rlcode   File: cartpole_a3c.py    License: MIT License 6 votes vote down vote up
def actor_optimizer(self):
        action = K.placeholder(shape=(None, self.action_size))
        advantages = K.placeholder(shape=(None, ))

        policy = self.actor.output

        good_prob = K.sum(action * policy, axis=1)
        eligibility = K.log(good_prob + 1e-10) * K.stop_gradient(advantages)
        loss = -K.sum(eligibility)

        entropy = K.sum(policy * K.log(policy + 1e-10), axis=1)

        actor_loss = loss + 0.01*entropy

        optimizer = Adam(lr=self.actor_lr)
        updates = optimizer.get_updates(self.actor.trainable_weights, [], actor_loss)
        train = K.function([self.actor.input, action, advantages], [], updates=updates)
        return train

    # make loss function for Value approximation 
Example 8
Project: 2019-OSS-Summer-RL   Author: utilForever   File: reinforce_agent.py    License: MIT License 6 votes vote down vote up
def optimizer(self):
        action = K.placeholder(shape=[None, 5])
        discounted_rewards = K.placeholder(shape=[None, ])
        
        # 크로스 엔트로피 오류함수 계산
        action_prob = K.sum(action * self.model.output, axis=1)
        cross_entropy = K.log(action_prob) * discounted_rewards
        loss = -K.sum(cross_entropy)
        
        # 정책신경망을 업데이트하는 훈련함수 생성
        optimizer = Adam(lr=self.learning_rate)
        updates = optimizer.get_updates(self.model.trainable_weights,[],
                                        loss)
        train = K.function([self.model.input, action, discounted_rewards], [],
                           updates=updates)

        return train

    # 정책신경망으로 행동 선택 
Example 9
Project: wtte-rnn   Author: ragulpr   File: wtte.py    License: MIT License 6 votes vote down vote up
def __init__(self,
                 kind,
                 reduce_loss=True,
                 clip_prob=1e-6,
                 regularize=False,
                 location=None,
                 growth=None):

        self.kind = kind
        self.reduce_loss = reduce_loss
        self.clip_prob = clip_prob

        if regularize == True or location is not None or growth is not None:
            raise DeprecationWarning('Directly penalizing beta has been found \
                                      to be unneccessary when using bounded activation \
                                      and clipping of log-likelihood.\
                                      Use this method instead.') 
Example 10
Project: wtte-rnn   Author: ragulpr   File: wtte.py    License: MIT License 6 votes vote down vote up
def loss_function(self, y_true, y_pred):

        y, u, a, b = _keras_split(y_true, y_pred)
        if self.kind == 'discrete':
            loglikelihoods = loglik_discrete(y, u, a, b)
        elif self.kind == 'continuous':
            loglikelihoods = loglik_continuous(y, u, a, b)

        if self.clip_prob is not None:
            loglikelihoods = K.clip(loglikelihoods, 
                log(self.clip_prob), log(1 - self.clip_prob))
        if self.reduce_loss:
            loss = -1.0 * K.mean(loglikelihoods, axis=-1)
        else:
            loss = -loglikelihoods

        return loss

# For backwards-compatibility 
Example 11
Project: 4Dsurvival   Author: UK-Digital-Heart-Project   File: trainDL.py    License: GNU General Public License v3.0 6 votes vote down vote up
def sort4minibatches(xvals, evals, tvals, batchsize):
    ntot = len(xvals)
    indices = np.arange(ntot)
    np.random.shuffle(indices)
    start_idx=0
    esall = []
    for end_idx in list(range(batchsize, batchsize*(ntot//batchsize)+1, batchsize))+[ntot]:
        excerpt = indices[start_idx:end_idx]
        sort_idx = np.argsort(tvals[excerpt])[::-1]
        es = excerpt[sort_idx]
        esall += list(es)
        start_idx = end_idx
    return (xvals[esall], evals[esall], tvals[esall], esall)


#Define Cox PH partial likelihood function loss.
#Arguments: E (censoring status), risk (risk [log hazard ratio] predicted by network) for batch of input subjects
#As defined, this function requires that all subjects in input batch must be sorted in descending order of survival/censoring time (i.e. arguments E and risk will be in this order) 
Example 12
Project: icassp19   Author: edufonseca   File: losses.py    License: MIT License 6 votes vote down vote up
def crossentropy_reed_wrap(_beta):
    def crossentropy_reed_core(y_true, y_pred):
        """
        This loss function is proposed in:
        Reed et al. "Training Deep Neural Networks on Noisy Labels with Bootstrapping", 2014

        :param y_true:
        :param y_pred:
        :return:
        """

        # hyper param
        print(_beta)
        y_pred = K.clip(y_pred, K.epsilon(), 1)

        # (1) dynamically update the targets based on the current state of the model: bootstrapped target tensor
        # use predicted class proba directly to generate regression targets
        y_true_update = _beta * y_true + (1 - _beta) * y_pred

        # (2) compute loss as always
        _loss = -K.sum(y_true_update * K.log(y_pred), axis=-1)

        return _loss
    return crossentropy_reed_core 
Example 13
Project: aiexamples   Author: mogoweb   File: training.py    License: Apache License 2.0 6 votes vote down vote up
def softmax_loss(y_true, y_pred):
    """Compute cross entropy loss aka softmax loss.

    # Arguments
        y_true: Ground truth targets,
            tensor of shape (?, num_boxes, num_classes).
        y_pred: Predicted logits,
            tensor of shape (?, num_boxes, num_classes).

    # Returns
        softmax_loss: Softmax loss, tensor of shape (?, num_boxes).
    """
    eps = K.epsilon()
    y_pred = K.clip(y_pred, eps, 1. - eps)
    softmax_loss = -tf.reduce_sum(y_true * tf.log(y_pred), axis=-1)
    return softmax_loss 
Example 14
Project: aiexamples   Author: mogoweb   File: training.py    License: Apache License 2.0 6 votes vote down vote up
def focal_loss(y_true, y_pred, gamma=2, alpha=0.25):
    """Compute focal loss.
    
    # Arguments
        y_true: Ground truth targets,
            tensor of shape (?, num_boxes, num_classes).
        y_pred: Predicted logits,
            tensor of shape (?, num_boxes, num_classes).
    
    # Returns
        focal_loss: Focal loss, tensor of shape (?, num_boxes).

    # References
        https://arxiv.org/abs/1708.02002
    """
    #y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
    eps = K.epsilon()
    y_pred = K.clip(y_pred, eps, 1. - eps)
    
    pt = tf.where(tf.equal(y_true, 1), y_pred, 1 - y_pred)
    focal_loss = -tf.reduce_sum(alpha * K.pow(1. - pt, gamma) * K.log(pt), axis=-1)
    return focal_loss 
Example 15
Project: sam   Author: marcellacornia   File: models.py    License: MIT License 6 votes vote down vote up
def kl_divergence(y_true, y_pred):
    max_y_pred = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.max(K.max(y_pred, axis=2), axis=2)), 
                                                                   shape_r_out, axis=-1)), shape_c_out, axis=-1)
    y_pred /= max_y_pred

    sum_y_true = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.sum(K.sum(y_true, axis=2), axis=2)), 
                                                                   shape_r_out, axis=-1)), shape_c_out, axis=-1)
    sum_y_pred = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.sum(K.sum(y_pred, axis=2), axis=2)), 
                                                                   shape_r_out, axis=-1)), shape_c_out, axis=-1)
    y_true /= (sum_y_true + K.epsilon())
    y_pred /= (sum_y_pred + K.epsilon())

    return 10 * K.sum(K.sum(y_true * K.log((y_true / (y_pred + K.epsilon())) + K.epsilon()), axis=-1), axis=-1)


# Correlation Coefficient Loss 
Example 16
Project: maskrcnn   Author: shtamura   File: loss.py    License: MIT License 6 votes vote down vote up
def sparse_categorical_crossentropy(gt_ids, pred_one_hot_post_softmax):
    """
    K.sparse_categorical_crossentropyだと結果がNaNになる。。。
    0割り算が発生しているかも。
    https://qiita.com/4Ui_iUrz1/items/35a8089ab0ebc98061c1
    対策として、微少値を用いてlog(0)にならないよう調整した本関数を作成。
    """
    gt_ids = log.tfprint(gt_ids, "cross:gt_ids:")
    pred_one_hot_post_softmax = log.tfprint(pred_one_hot_post_softmax,
                                            "cross:pred_one_hot_post_softmax:")

    gt_one_hot = K.one_hot(gt_ids, K.shape(pred_one_hot_post_softmax)[-1])
    gt_one_hot = log.tfprint(gt_one_hot, "cross:gt_one_hot:")

    epsilon = K.epsilon()  # 1e-07
    loss = -K.sum(
        gt_one_hot * K.log(
            tf.clip_by_value(pred_one_hot_post_softmax, epsilon, 1 - epsilon)),
        axis=-1)
    loss = log.tfprint(loss, "cross:loss:")
    return loss 
Example 17
Project: maskrcnn   Author: shtamura   File: loss.py    License: MIT License 6 votes vote down vote up
def rpn_offsets_loss(gt_offsets, gt_fg, pred_offsets):
    """RPNのオフセット回帰の損失関数
    positive(gt_fg > 0)データのみ評価対象とする

    gt_offsets: 正解オフセット
        [N, R, 4]
        3軸目は領域提案とアンカーのオフセット(中心、幅、高さ)。
            (tx, ty, th, tw)
    gt_fg: 正解データの前景/背景
        [N, R]
    pred_offsets: 予測値
        [N, R, 4].
    """
    pos_idx = tf.where(gt_fg > 0)
    gt_offsets = tf.gather_nd(gt_offsets, pos_idx)
    pred_offsets = tf.gather_nd(pred_offsets, pos_idx)
    # FasterRCNNの論文上は、RPNのオフセット回帰には係数10を乗ずることでオブジェクト分類損失とのバランスを取ることになっている。
    # が、rpnの損失の全損失に占める割合が高すぎるようなら係数調整
    p = 1.
    loss = p * offsets_loss(gt_offsets, pred_offsets)
    loss = log.tfprint(loss, "rpn_offsets_loss")
    return loss 
Example 18
Project: MIDI-VAE   Author: brunnergino   File: vae_definition.py    License: MIT License 6 votes vote down vote up
def sample_vector(vector, sample_method):
    if np.sum(vector) > 0:
        if sample_method == 'argmax':
            max_index = np.argmax(vector)

        if sample_method == 'choice':
            vector = vector/(np.sum(vector)*1.0)

            vector = np.log(vector) / temperature
            vector = np.exp(vector) / np.sum(np.exp(vector))

            #give it number_of_tries to find a note that is above the cutoff_sample_threshold
            for _ in range(number_of_tries):
                max_index = np.random.choice(len(vector), p=vector)

                if vector[max_index] > cutoff_sample_threshold:
                    break
    else:
        max_index = 0
    return max_index 
Example 19
Project: Keras-GAN   Author: eriklindernoren   File: bgan.py    License: MIT License 5 votes vote down vote up
def boundary_loss(self, y_true, y_pred):
        """
        Boundary seeking loss.
        Reference: https://wiseodd.github.io/techblog/2017/03/07/boundary-seeking-gan/
        """
        return 0.5 * K.mean((K.log(y_pred) - K.log(1 - y_pred))**2) 
Example 20
Project: focal-loss-keras   Author: mkocabas   File: focal_loss.py    License: MIT License 5 votes vote down vote up
def focal_loss(gamma=2., alpha=.25):
	def focal_loss_fixed(y_true, y_pred):
		pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
		pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
		return -K.mean(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) - K.mean((1 - alpha) * K.pow(pt_0, gamma) * K.log(1. - pt_0))
	return focal_loss_fixed 
Example 21
Project: reinforcement-learning-kr   Author: rlcode   File: cartpole_a2c.py    License: MIT License 5 votes vote down vote up
def actor_optimizer(self):
        action = K.placeholder(shape=[None, self.action_size])
        advantage = K.placeholder(shape=[None, ])

        action_prob = K.sum(action * self.actor.output, axis=1)
        cross_entropy = K.log(action_prob) * advantage
        loss = -K.sum(cross_entropy)

        optimizer = Adam(lr=self.actor_lr)
        updates = optimizer.get_updates(self.actor.trainable_weights, [], loss)
        train = K.function([self.actor.input, action, advantage], [],
                           updates=updates)
        return train

    # 가치신경망을 업데이트하는 함수 
Example 22
Project: 2019-OSS-Summer-RL   Author: utilForever   File: cartpole_a2c.py    License: MIT License 5 votes vote down vote up
def actor_optimizer(self):
        action = K.placeholder(shape=[None, self.action_size])
        advantage = K.placeholder(shape=[None, ])

        action_prob = K.sum(action * self.actor.output, axis=1)
        cross_entropy = K.log(action_prob) * advantage
        loss = -K.sum(cross_entropy)

        optimizer = Adam(lr=self.actor_lr)
        updates = optimizer.get_updates(self.actor.trainable_weights, [], loss)
        train = K.function([self.actor.input, action, advantage], [],
                           updates=updates)
        return train

    # 가치신경망을 업데이트하는 함수 
Example 23
Project: Dropout_BBalpha   Author: YingzhenLi   File: BBalpha_dropout.py    License: MIT License 5 votes vote down vote up
def test_MC_dropout(model, X, Y):
    pred = model.predict(X)  # N x K x D
    pred = np.mean(pred, 1)
    acc = np.mean(np.argmax(pred, axis=-1) == np.argmax(Y, axis=-1))
    ll = np.sum(np.log(np.sum(pred * Y, -1)))
    return acc, ll 
Example 24
Project: Dropout_BBalpha   Author: YingzhenLi   File: BBalpha_dropout.py    License: MIT License 5 votes vote down vote up
def logsumexp(x, axis=None):
    x_max = K.max(x, axis=axis, keepdims=True)
    return K.log(K.sum(K.exp(x - x_max), axis=axis, keepdims=True)) + x_max 
Example 25
Project: Dropout_BBalpha   Author: YingzhenLi   File: BBalpha_dropout.py    License: MIT License 5 votes vote down vote up
def bbalpha_softmax_cross_entropy_with_mc_logits(alpha):
    alpha = K.cast_to_floatx(alpha)
    def loss(y_true, mc_logits):
        # log(p_ij), p_ij = softmax(logit_ij)
        #assert mc_logits.ndim == 3
        mc_log_softmax = mc_logits - K.max(mc_logits, axis=2, keepdims=True)
        mc_log_softmax = mc_log_softmax - K.log(K.sum(K.exp(mc_log_softmax), axis=2, keepdims=True))
        mc_ll = K.sum(y_true * mc_log_softmax, -1)  # N x K
        K_mc = mc_ll.get_shape().as_list()[1]	# only for tensorflow
        return - 1. / alpha * (logsumexp(alpha * mc_ll, 1) + K.log(1.0 / K_mc))
    return loss


###################################################################
# the model 
Example 26
Project: MBLLEN   Author: Lvfeifan   File: utls.py    License: Apache License 2.0 5 votes vote down vote up
def log10(x):
    numerator = K.log(x)
    denominator = K.log(K.constant(10, dtype=numerator.dtype))
    return numerator / denominator 
Example 27
Project: A3C_Keras_FlappyBird   Author: shalabhsingh   File: test.py    License: MIT License 5 votes vote down vote up
def logloss(y_true, y_pred):     #policy loss
	return -K.sum( K.log(y_true*y_pred + (1-y_true)*(1-y_pred) + const), axis=-1) 
	# BETA * K.sum(y_pred * K.log(y_pred + const) + (1-y_pred) * K.log(1-y_pred + const))   #regularisation term

#loss function for critic output 
Example 28
Project: A3C_Keras_FlappyBird   Author: shalabhsingh   File: train_network.py    License: MIT License 5 votes vote down vote up
def logloss(y_true, y_pred):     #policy loss
	return -K.sum( K.log(y_true*y_pred + (1-y_true)*(1-y_pred) + const), axis=-1) 
	# BETA * K.sum(y_pred * K.log(y_pred + const) + (1-y_pred) * K.log(1-y_pred + const))   #regularisation term

#loss function for critic output 
Example 29
Project: keras_bn_library   Author: bnsnapper   File: rbm.py    License: MIT License 5 votes vote down vote up
def free_energy(self, x):
		wx_b = K.dot(x, self.Wrbm) + self.bh

		if(self.visible_unit_type == 'gaussian'):
			vbias_term = 0.5*K.sum((x - self.bx)**2, axis=1)
			hidden_term = K.sum(K.log(1 + K.exp(wx_b)), axis=1)
			return -hidden_term + vbias_term
		else:
			hidden_term = K.sum(K.log(1 + K.exp(wx_b)), axis=1)
			vbias_term = K.dot(x, self.bx)
			return -hidden_term - vbias_term 
Example 30
Project: keras_bn_library   Author: bnsnapper   File: rbm.py    License: MIT License 5 votes vote down vote up
def reconstruction_loss(self, y_true, y_pred):

		x = y_pred

		def loss(x):
			if(self.visible_unit_type == 'gaussian'):
				x_rec, _, _ = self.mcmc_chain(x, self.nb_gibbs_steps)
				return K.mean(K.sqrt(x - x_rec))
			else:
				_, pre, _ = self.mcmc_chain(x, self.nb_gibbs_steps)
				cross_entropy_loss = -K.mean(K.sum(x*K.log(K.sigmoid(pre)) + 
										(1 - x)*K.log(1 - K.sigmoid(pre)), axis=1))
				return cross_entropy_loss

		return loss(x)