Python keras.backend.logsumexp() Examples

The following are 26 code examples of keras.backend.logsumexp(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.backend , or try the search function .
Example #1
Source File: layers.py    From indic_tagger with Apache License 2.0 6 votes vote down vote up
def step(self, input_energy_t, states, return_logZ=True):
        # not in the following  `prev_target_val` has shape = (B, F)
        # where B = batch_size, F = output feature dim
        # Note: `i` is of float32, due to the behavior of `K.rnn`
        prev_target_val, i, chain_energy = states[:3]
        t = K.cast(i[0, 0], dtype='int32')
        if len(states) > 3:
            if K.backend() == 'theano':
                m = states[3][:, t:(t + 2)]
            else:
                m = K.tf.slice(states[3], [0, t], [-1, 2])
            input_energy_t = input_energy_t * K.expand_dims(m[:, 0])
            chain_energy = chain_energy * K.expand_dims(K.expand_dims(m[:, 0] * m[:, 1]))  # (1, F, F)*(B, 1, 1) -> (B, F, F)
        if return_logZ:
            energy = chain_energy + K.expand_dims(input_energy_t - prev_target_val, 2)  # shapes: (1, B, F) + (B, F, 1) -> (B, F, F)
            new_target_val = K.logsumexp(-energy, 1)  # shapes: (B, F)
            return new_target_val, [new_target_val, i + 1]
        else:
            energy = chain_energy + K.expand_dims(input_energy_t + prev_target_val, 2)
            min_energy = K.min(energy, 1)
            argmin_table = K.cast(K.argmin(energy, 1), K.floatx())  # cast for tf-version `K.rnn`
            return argmin_table, [min_energy, i + 1] 
Example #2
Source File: layers.py    From anago with MIT License 6 votes vote down vote up
def step(self, input_energy_t, states, return_logZ=True):
        # not in the following  `prev_target_val` has shape = (B, F)
        # where B = batch_size, F = output feature dim
        # Note: `i` is of float32, due to the behavior of `K.rnn`
        prev_target_val, i, chain_energy = states[:3]
        t = K.cast(i[0, 0], dtype='int32')
        if len(states) > 3:
            if K.backend() == 'theano':
                m = states[3][:, t:(t + 2)]
            else:
                m = K.tf.slice(states[3], [0, t], [-1, 2])
            input_energy_t = input_energy_t * K.expand_dims(m[:, 0])
            chain_energy = chain_energy * K.expand_dims(K.expand_dims(m[:, 0] * m[:, 1]))  # (1, F, F)*(B, 1, 1) -> (B, F, F)
        if return_logZ:
            energy = chain_energy + K.expand_dims(input_energy_t - prev_target_val, 2)  # shapes: (1, B, F) + (B, F, 1) -> (B, F, F)
            new_target_val = K.logsumexp(-energy, 1)  # shapes: (B, F)
            return new_target_val, [new_target_val, i + 1]
        else:
            energy = chain_energy + K.expand_dims(input_energy_t + prev_target_val, 2)
            min_energy = K.min(energy, 1)
            argmin_table = K.cast(K.argmin(energy, 1), K.floatx())  # cast for tf-version `K.rnn`
            return argmin_table, [min_energy, i + 1] 
Example #3
Source File: layers.py    From sequence-tagging-ner with Apache License 2.0 6 votes vote down vote up
def step(self, input_energy_t, states, return_logZ=True):
        # not in the following  `prev_target_val` has shape = (B, F)
        # where B = batch_size, F = output feature dim
        # Note: `i` is of float32, due to the behavior of `K.rnn`
        prev_target_val, i, chain_energy = states[:3]
        t = K.cast(i[0, 0], dtype='int32')
        if len(states) > 3:
            if K.backend() == 'theano':
                m = states[3][:, t:(t + 2)]
            else:
                m = K.tf.slice(states[3], [0, t], [-1, 2])
            input_energy_t = input_energy_t * K.expand_dims(m[:, 0])
            chain_energy = chain_energy * K.expand_dims(K.expand_dims(m[:, 0] * m[:, 1]))  # (1, F, F)*(B, 1, 1) -> (B, F, F)
        if return_logZ:
            energy = chain_energy + K.expand_dims(input_energy_t - prev_target_val, 2)  # shapes: (1, B, F) + (B, F, 1) -> (B, F, F)
            new_target_val = K.logsumexp(-energy, 1)  # shapes: (B, F)
            return new_target_val, [new_target_val, i + 1]
        else:
            energy = chain_energy + K.expand_dims(input_energy_t + prev_target_val, 2)
            min_energy = K.min(energy, 1)
            argmin_table = K.cast(K.argmin(energy, 1), K.floatx())  # cast for tf-version `K.rnn`
            return argmin_table, [min_energy, i + 1] 
Example #4
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_logsumexp(self, x_np, axis, keepdims):
        '''
        Check if K.logsumexp works properly for values close to one.
        '''
        for k in BACKENDS:
            x = k.variable(x_np)
            assert_allclose(k.eval(k.logsumexp(x, axis=axis, keepdims=keepdims)),
                            np.log(np.sum(np.exp(x_np), axis=axis, keepdims=keepdims)),
                            rtol=1e-5) 
Example #5
Source File: keras_bert_layer.py    From nlp_xiaojiang with MIT License 5 votes vote down vote up
def step(self, input_energy_t, states, return_logZ=True):
        # not in the following  `prev_target_val` has shape = (B, F)
        # where B = batch_size, F = output feature dim
        # Note: `i` is of float32, due to the behavior of `K.rnn`
        prev_target_val, i, chain_energy = states[:3]
        t = K.cast(i[0, 0], dtype='int32')
        if len(states) > 3:
            if K.backend() == 'theano':
                m = states[3][:, t:(t + 2)]
            else:
                m = K.tf.slice(states[3], [0, t], [-1, 2])
            input_energy_t = input_energy_t * K.expand_dims(m[:, 0])
            # (1, F, F)*(B, 1, 1) -> (B, F, F)
            chain_energy = chain_energy * K.expand_dims(
                K.expand_dims(m[:, 0] * m[:, 1]))
        if return_logZ:
            # shapes: (1, B, F) + (B, F, 1) -> (B, F, F)
            energy = chain_energy + K.expand_dims(input_energy_t - prev_target_val, 2)
            new_target_val = K.logsumexp(-energy, 1)  # shapes: (B, F)
            return new_target_val, [new_target_val, i + 1]
        else:
            energy = chain_energy + K.expand_dims(input_energy_t + prev_target_val, 2)
            min_energy = K.min(energy, 1)
            # cast for tf-version `K.rnn
            argmin_table = K.cast(K.argmin(energy, 1), K.floatx())
            return argmin_table, [min_energy, i + 1] 
Example #6
Source File: layer_crf_bojone.py    From nlp_xiaojiang with MIT License 5 votes vote down vote up
def loss(self, y_true, y_pred):  # 目标y_pred需要是one hot形式
        mask = 1 - y_true[:, 1:, -1] if self.ignore_last_label else None
        y_true, y_pred = y_true[:, :, :self.num_labels], y_pred[:, :, :self.num_labels]
        init_states = [y_pred[:, 0]]  # 初始状态
        log_norm, _, _ = K.rnn(self.log_norm_step, y_pred[:, 1:], init_states, mask=mask)  # 计算Z向量(对数)
        log_norm = K.logsumexp(log_norm, 1, keepdims=True)  # 计算Z(对数)
        path_score = self.path_score(y_pred, y_true)  # 计算分子(对数)
        return log_norm - path_score  # 即log(分子/分母) 
Example #7
Source File: layer_crf_bojone.py    From nlp_xiaojiang with MIT License 5 votes vote down vote up
def log_norm_step(self, inputs, states):
        """递归计算归一化因子
        要点:1、递归计算;2、用logsumexp避免溢出。
        技巧:通过expand_dims来对齐张量。
        """
        states = K.expand_dims(states[0], 2)  # (batch_size, output_dim, 1)
        trans = K.expand_dims(self.trans, 0)  # (1, output_dim, output_dim)
        output = K.logsumexp(states + trans, 1)  # (batch_size, output_dim)
        return output + inputs, [output + inputs] 
Example #8
Source File: ChainCRF.py    From emnlp2017-bilstm-cnn-crf with Apache License 2.0 5 votes vote down vote up
def free_energy0(x, U, mask=None):
    '''Free energy without boundary potential handling.'''
    initial_states = [x[:, 0, :]]
    last_alpha, _ = _forward(x,
                             lambda B: [K.logsumexp(B, axis=1)],
                             initial_states,
                             U,
                             mask)
    return last_alpha[:, 0] 
Example #9
Source File: layers.py    From delft with Apache License 2.0 5 votes vote down vote up
def free_energy0(x, U, mask=None):
    """Free energy without boundary potential handling."""
    initial_states = [x[:, 0, :]]
    last_alpha, _ = _forward(x,
                             lambda B: [K.logsumexp(B, axis=1)],
                             initial_states,
                             U,
                             mask)
    return last_alpha[:, 0] 
Example #10
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_logsumexp_optim(self):
        '''
        Check if optimization works.
        '''
        for k in [KTF]:
            x_np = np.array([1e+4, 1e-4])
            assert_allclose(k.eval(k.logsumexp(k.variable(x_np), axis=0)),
                            1e4,
                            rtol=1e-5) 
Example #11
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_logsumexp(self, x_np, axis, keepdims):
        '''
        Check if K.logsumexp works properly for values close to one.
        '''
        for k in BACKENDS:
            x = k.variable(x_np)
            assert_allclose(k.eval(k.logsumexp(x, axis=axis, keepdims=keepdims)),
                            np.log(np.sum(np.exp(x_np), axis=axis, keepdims=keepdims)),
                            rtol=1e-5) 
Example #12
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_logsumexp_optim(self):
        '''
        Check if optimization works.
        '''
        for k in [KTF]:
            x_np = np.array([1e+4, 1e-4])
            assert_allclose(k.eval(k.logsumexp(k.variable(x_np), axis=0)),
                            1e4,
                            rtol=1e-5) 
Example #13
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_logsumexp(self, x_np, axis, keepdims):
        '''
        Check if K.logsumexp works properly for values close to one.
        '''
        for k in BACKENDS:
            x = k.variable(x_np)
            assert_allclose(k.eval(k.logsumexp(x, axis=axis, keepdims=keepdims)),
                            np.log(np.sum(np.exp(x_np), axis=axis, keepdims=keepdims)),
                            rtol=1e-5) 
Example #14
Source File: ChainCRF.py    From elmo-bilstm-cnn-crf with Apache License 2.0 5 votes vote down vote up
def free_energy0(x, U, mask=None):
    '''Free energy without boundary potential handling.'''
    initial_states = [x[:, 0, :]]
    last_alpha, _ = _forward(x,
                             lambda B: [K.logsumexp(B, axis=1)],
                             initial_states,
                             U,
                             mask)
    return last_alpha[:, 0] 
Example #15
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_logsumexp_optim(self):
        '''
        Check if optimization works.
        '''
        for k in [KTF]:
            x_np = np.array([1e+4, 1e-4])
            assert_allclose(k.eval(k.logsumexp(k.variable(x_np), axis=0)),
                            1e4,
                            rtol=1e-5) 
Example #16
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_logsumexp(self, x_np, axis, keepdims):
        '''
        Check if K.logsumexp works properly for values close to one.
        '''
        for k in BACKENDS:
            x = k.variable(x_np)
            assert_allclose(k.eval(k.logsumexp(x, axis=axis, keepdims=keepdims)),
                            np.log(np.sum(np.exp(x_np), axis=axis, keepdims=keepdims)),
                            rtol=1e-5) 
Example #17
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_logsumexp_optim(self):
        '''
        Check if optimization works.
        '''
        for k in [KTF]:
            x_np = np.array([1e+4, 1e-4])
            assert_allclose(k.eval(k.logsumexp(k.variable(x_np), axis=0)),
                            1e4,
                            rtol=1e-5) 
Example #18
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_logsumexp(self, x_np, axis, keepdims):
        '''
        Check if K.logsumexp works properly for values close to one.
        '''
        for k in BACKENDS:
            x = k.variable(x_np)
            assert_allclose(k.eval(k.logsumexp(x, axis=axis, keepdims=keepdims)),
                            np.log(np.sum(np.exp(x_np), axis=axis, keepdims=keepdims)),
                            rtol=1e-5) 
Example #19
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_logsumexp(self, x_np, axis, keepdims):
        '''
        Check if K.logsumexp works properly for values close to one.
        '''
        for k in BACKENDS:
            x = k.variable(x_np)
            assert_allclose(k.eval(k.logsumexp(x, axis=axis, keepdims=keepdims)),
                            np.log(np.sum(np.exp(x_np), axis=axis, keepdims=keepdims)),
                            rtol=1e-5) 
Example #20
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_logsumexp_optim(self):
        '''
        Check if optimization works.
        '''
        for k in [KTF]:
            x_np = np.array([1e+4, 1e-4])
            assert_allclose(k.eval(k.logsumexp(k.variable(x_np), axis=0)),
                            1e4,
                            rtol=1e-5) 
Example #21
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_logsumexp(self, x_np, axis, keepdims):
        '''
        Check if K.logsumexp works properly for values close to one.
        '''
        for k in BACKENDS:
            x = k.variable(x_np)
            assert_allclose(k.eval(k.logsumexp(x, axis=axis, keepdims=keepdims)),
                            np.log(np.sum(np.exp(x_np), axis=axis, keepdims=keepdims)),
                            rtol=1e-5) 
Example #22
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_logsumexp_optim(self):
        '''
        Check if optimization works.
        '''
        for k in [KTF]:
            x_np = np.array([1e+4, 1e-4])
            assert_allclose(k.eval(k.logsumexp(k.variable(x_np), axis=0)),
                            1e4,
                            rtol=1e-5) 
Example #23
Source File: backend_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_logsumexp(self, x_np, axis, keepdims):
        '''
        Check if K.logsumexp works properly for values close to one.
        '''
        for k in BACKENDS:
            x = k.variable(x_np)
            assert_allclose(k.eval(k.logsumexp(x, axis=axis, keepdims=keepdims)),
                            np.log(np.sum(np.exp(x_np), axis=axis, keepdims=keepdims)),
                            rtol=1e-5) 
Example #24
Source File: interpolation_layer.py    From interp-net with MIT License 5 votes vote down vote up
def call(self, x, reconstruction=False):
        self.reconstruction = reconstruction
        self.output_dim = K.int_shape(x)[-1]
        cross_channel_interp = self.cross_channel_interp
        y = x[:, :self.d_dim, :]
        w = x[:, self.d_dim:2*self.d_dim, :]
        intensity = K.exp(w)
        y = tf.transpose(y, perm=[0, 2, 1])
        w = tf.transpose(w, perm=[0, 2, 1])
        w2 = w
        w = K.tile(w[:, :, :, None], (1, 1, 1, self.d_dim))
        den = K.logsumexp(w, axis=2)
        w = K.exp(w2 - den)
        mean = K.mean(y, axis=1)
        mean = K.tile(mean[:, None, :], (1, self.output_dim, 1))
        w2 = K.dot(w*(y - mean), cross_channel_interp) + mean
        rep1 = tf.transpose(w2, perm=[0, 2, 1])
        if reconstruction is False:
            y_trans = x[:, 2*self.d_dim:3*self.d_dim, :]
            y_trans = y_trans - rep1  # subtracting smooth from transient part
            rep1 = tf.concat([rep1, intensity, y_trans], 1)
        return rep1 
Example #25
Source File: interpolation_layer.py    From interp-net with MIT License 5 votes vote down vote up
def call(self, x, reconstruction=False):
        self.reconstruction = reconstruction
        x_t = x[:, :self.d_dim, :]
        d = x[:, 2*self.d_dim:3*self.d_dim, :]
        if reconstruction:
            output_dim = self.time_stamp
            m = x[:, 3*self.d_dim:, :]
            ref_t = K.tile(d[:, :, None, :], (1, 1, output_dim, 1))
        else:
            m = x[:, self.d_dim: 2*self.d_dim, :]
            ref_t = np.linspace(0, self.hours_look_ahead, self.ref_points)
            output_dim = self.ref_points
            ref_t.shape = (1, ref_t.shape[0])
        #x_t = x_t*m
        d = K.tile(d[:, :, :, None], (1, 1, 1, output_dim))
        mask = K.tile(m[:, :, :, None], (1, 1, 1, output_dim))
        x_t = K.tile(x_t[:, :, :, None], (1, 1, 1, output_dim))
        norm = (d - ref_t)*(d - ref_t)
        a = K.ones((self.d_dim, self.time_stamp, output_dim))
        pos_kernel = K.log(1 + K.exp(self.kernel))
        alpha = a*pos_kernel[:, np.newaxis, np.newaxis]
        w = K.logsumexp(-alpha*norm + K.log(mask), axis=2)
        w1 = K.tile(w[:, :, None, :], (1, 1, self.time_stamp, 1))
        w1 = K.exp(-alpha*norm + K.log(mask) - w1)
        y = K.sum(w1*x_t, axis=2)
        if reconstruction:
            rep1 = tf.concat([y, w], 1)
        else:
            w_t = K.logsumexp(-10.0*alpha*norm + K.log(mask),
                              axis=2)  # kappa = 10
            w_t = K.tile(w_t[:, :, None, :], (1, 1, self.time_stamp, 1))
            w_t = K.exp(-10.0*alpha*norm + K.log(mask) - w_t)
            y_trans = K.sum(w_t*x_t, axis=2)
            rep1 = tf.concat([y, w, y_trans], 1)
        return rep1 
Example #26
Source File: crf.py    From keras-contrib with MIT License 5 votes vote down vote up
def step(self, input_energy_t, states, return_logZ=True):
        # not in the following  `prev_target_val` has shape = (B, F)
        # where B = batch_size, F = output feature dim
        # Note: `i` is of float32, due to the behavior of `K.rnn`
        prev_target_val, i, chain_energy = states[:3]
        t = K.cast(i[0, 0], dtype='int32')
        if len(states) > 3:
            if K.backend() == 'theano':
                m = states[3][:, t:(t + 2)]
            else:
                m = K.slice(states[3], [0, t], [-1, 2])
            input_energy_t = input_energy_t * K.expand_dims(m[:, 0])
            # (1, F, F)*(B, 1, 1) -> (B, F, F)
            chain_energy = chain_energy * K.expand_dims(
                K.expand_dims(m[:, 0] * m[:, 1]))
        if return_logZ:
            # shapes: (1, B, F) + (B, F, 1) -> (B, F, F)
            energy = chain_energy + K.expand_dims(input_energy_t - prev_target_val, 2)
            new_target_val = K.logsumexp(-energy, 1)  # shapes: (B, F)
            return new_target_val, [new_target_val, i + 1]
        else:
            energy = chain_energy + K.expand_dims(input_energy_t + prev_target_val, 2)
            min_energy = K.min(energy, 1)
            # cast for tf-version `K.rnn
            argmin_table = K.cast(K.argmin(energy, 1), K.floatx())
            return argmin_table, [min_energy, i + 1]