Python keras.backend.exp() Examples

The following are 30 code examples for showing how to use keras.backend.exp(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module keras.backend , or try the search function .

Example 1
Project: steppy-toolkit   Author: minerva-ml   File: contrib.py    License: MIT License 6 votes vote down vote up
def call(self, x, mask=None):
        # computes a probability distribution over the timesteps
        # uses 'max trick' for numerical stability
        # reshape is done to avoid issue with Tensorflow
        # and 1-dimensional weights
        logits = K.dot(x, self.W)
        x_shape = K.shape(x)
        logits = K.reshape(logits, (x_shape[0], x_shape[1]))
        ai = K.exp(logits - K.max(logits, axis=-1, keepdims=True))

        # masked timesteps have zero weight
        if mask is not None:
            mask = K.cast(mask, K.floatx())
            ai = ai * mask
        att_weights = ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon())
        weighted_input = x * K.expand_dims(att_weights)
        result = K.sum(weighted_input, axis=1)
        if self.return_attention:
            return [result, att_weights]
        return result 
Example 2
Project: Keras-GAN   Author: eriklindernoren   File: aae.py    License: MIT License 6 votes vote down vote up
def build_encoder(self):
        # Encoder

        img = Input(shape=self.img_shape)

        h = Flatten()(img)
        h = Dense(512)(h)
        h = LeakyReLU(alpha=0.2)(h)
        h = Dense(512)(h)
        h = LeakyReLU(alpha=0.2)(h)
        mu = Dense(self.latent_dim)(h)
        log_var = Dense(self.latent_dim)(h)
        latent_repr = merge([mu, log_var],
                mode=lambda p: p[0] + K.random_normal(K.shape(p[0])) * K.exp(p[1] / 2),
                output_shape=lambda p: p[0])

        return Model(img, latent_repr) 
Example 3
Project: Python-Deep-Learning-SE   Author: ivan-vasilev   File: chapter_06_001.py    License: MIT License 6 votes vote down vote up
def sampling(args: tuple):
    """
    Reparameterization trick by sampling z from unit Gaussian
    :param args: (tensor, tensor) mean and log of variance of q(z|x)
    :returns tensor: sampled latent vector z
    """

    # unpack the input tuple
    z_mean, z_log_var = args

    # mini-batch size
    mb_size = K.shape(z_mean)[0]

    # latent space size
    dim = K.int_shape(z_mean)[1]

    # random normal vector with mean=0 and std=1.0
    epsilon = K.random_normal(shape=(mb_size, dim))

    return z_mean + K.exp(0.5 * z_log_var) * epsilon 
Example 4
Project: DeepResearch   Author: Hsankesara   File: attention_with_context.py    License: MIT License 6 votes vote down vote up
def call(self, x, mask=None):
        uit = dot_product(x, self.W)

        if self.bias:
            uit += self.b

        uit = K.tanh(uit)
        ait = dot_product(uit, self.u)

        a = K.exp(ait)

        # apply mask after the exp. will be re-normalized next
        if mask is not None:
            # Cast the mask to floatX to avoid float64 upcasting in theano
            a *= K.cast(mask, K.floatx())

        # in some cases especially in the early stages of training the sum may be almost zero
        # and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
        # a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())

        a = K.expand_dims(a)
        weighted_input = x * a
        return K.sum(weighted_input, axis=1) 
Example 5
Project: keras_bn_library   Author: bnsnapper   File: test_vae_lstm.py    License: MIT License 6 votes vote down vote up
def gen_cosine_amp(amp=100, period=1000, x0=0, xn=50000, step=1, k=0.0001):
	"""Generates an absolute cosine time series with the amplitude
	exponentially decreasing

	Arguments:
	    amp: amplitude of the cosine function
	    period: period of the cosine function
	    x0: initial x of the time series
	    xn: final x of the time series
	    step: step of the time series discretization
	    k: exponential rate
	"""
	cos = np.zeros(((xn - x0) * step, 1, 1))
	for i in range(len(cos)):
		idx = x0 + i * step
		cos[i, 0, 0] = amp * np.cos(2 * np.pi * idx / period)
		cos[i, 0, 0] = cos[i, 0, 0] * np.exp(-k * idx)
	return cos 
Example 6
Project: pyod   Author: yzhao062   File: vae.py    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
def sampling(self, args):
        """Reparametrisation by sampling from Gaussian, N(0,I)
        To sample from epsilon = Norm(0,I) instead of from likelihood Q(z|X)
        with latent variables z: z = z_mean + sqrt(var) * epsilon

        Parameters
        ----------
        args : tensor
            Mean and log of variance of Q(z|X).
    
        Returns
        -------
        z : tensor
            Sampled latent variable.
        """

        z_mean, z_log = args
        batch = K.shape(z_mean)[0]  # batch size
        dim = K.int_shape(z_mean)[1]  # latent dimension
        epsilon = K.random_normal(shape=(batch, dim))  # mean=0, std=1.0

        return z_mean + K.exp(0.5 * z_log) * epsilon 
Example 7
Project: deephlapan   Author: jiujiezz   File: attention.py    License: GNU General Public License v2.0 6 votes vote down vote up
def call(self, x, mask=None):
        eij = dot_product(x, self.W)

        if self.bias:
            eij += self.b

        eij = K.tanh(eij)

        a = K.exp(eij)

        if mask is not None:
            a *= K.cast(mask, K.floatx())

        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())

        weighted_input = x * K.expand_dims(a)

        result = K.sum(weighted_input, axis=1)

        if self.return_attention:
            return [result, a]
        return result 
Example 8
Project: text-classifier   Author: shibing624   File: attention_layer.py    License: Apache License 2.0 6 votes vote down vote up
def call(self, x, mask=None):
        # size of x :[batch_size, sel_len, attention_dim]
        # size of u :[batch_size, attention_dim]
        # uit = tanh(xW+b)
        uit = K.tanh(K.bias_add(K.dot(x, self.W), self.b))
        ait = K.dot(uit, self.u)
        ait = K.squeeze(ait, -1)

        ait = K.exp(ait)

        if mask is not None:
            # Cast the mask to floatX to avoid float64 upcasting in theano
            ait *= K.cast(mask, K.floatx())
        ait /= K.cast(K.sum(ait, axis=1, keepdims=True) + K.epsilon(), K.floatx())
        ait = K.expand_dims(ait)
        weighted_input = x * ait
        output = K.sum(weighted_input, axis=1)

        return output 
Example 9
Project: gandlf   Author: codekansas   File: losses.py    License: MIT License 5 votes vote down vote up
def rbf_moment_matching(y_true, y_pred, sigmas=[2, 5, 10, 20, 40, 80]):
    """Generative moment matching loss with RBF kernel.

    Reference: https://arxiv.org/abs/1502.02761
    """

    warnings.warn('Moment matching loss is still in development.')

    if len(K.int_shape(y_pred)) != 2 or len(K.int_shape(y_true)) != 2:
        raise ValueError('RBF Moment Matching function currently only works '
                         'for outputs with shape (batch_size, num_features).'
                         'Got y_true="%s" and y_pred="%s".' %
                         (str(K.int_shape(y_pred)), str(K.int_shape(y_true))))

    sigmas = list(sigmas) if isinstance(sigmas, (list, tuple)) else [sigmas]

    x = K.concatenate([y_pred, y_true], 0)

    # Performs dot product between all combinations of rows in X.
    xx = K.dot(x, K.transpose(x))  # (batch_size, batch_size)

    # Performs dot product of all rows with themselves.
    x2 = K.sum(x * x, 1, keepdims=True)  # (batch_size, None)

    # Gets exponent entries of the RBF kernel (without sigmas).
    exponent = xx - 0.5 * x2 - 0.5 * K.transpose(x2)

    # Applies all the sigmas.
    total_loss = None
    for sigma in sigmas:
        kernel_val = K.exp(exponent / sigma)
        loss = K.sum(kernel_val)
        total_loss = loss if total_loss is None else loss + total_loss

    return total_loss 
Example 10
Project: gandlf   Author: codekansas   File: similarities.py    License: MIT License 5 votes vote down vote up
def exp_l1(a, b):
    """Exponential of L1 similarity. Maximum is 1 (a == b), minimum is 0."""

    return K.exp(l1(a, b)) 
Example 11
Project: gandlf   Author: codekansas   File: similarities.py    License: MIT License 5 votes vote down vote up
def exp_l2(a, b):
    """Exponential of L2 similarity. Maximum is 1 (a == b), minimum is 0."""

    return K.exp(l2(a, b)) 
Example 12
Project: Quora-Question-Pairs   Author: rupak-118   File: MaLSTM_train.py    License: MIT License 5 votes vote down vote up
def exponent_neg_manhattan_distance(left, right):
    ''' 
    Purpose : Helper function for the similarity estimate of the LSTMs outputs
    Inputs : Two n-dimensional vectors
    Output : Manhattan distance between the input vectors
    
    '''
    return K.exp(-K.sum(K.abs(left-right), axis=1, keepdims=True))


# Applying the pre-processing function on the combined text corpus 
Example 13
Project: Quora-Question-Pairs   Author: rupak-118   File: test.py    License: MIT License 5 votes vote down vote up
def exponent_neg_manhattan_distance(left, right):
    ''' 
    Purpose : Helper function for the similarity estimate of the LSTMs outputs
    Inputs : Two n-dimensional vectors
    Output : Manhattan distance between the input vectors
    
    '''
    return K.exp(-K.sum(K.abs(left-right), axis=1, keepdims=True))


#print("\n Helper functions loaded")

# Based on the training set, a keep list of common dot words was prepared 
Example 14
Project: CapsNet   Author: l11x0m7   File: capsule.py    License: MIT License 5 votes vote down vote up
def softmax(x, axis=-1):
    """
    Self-defined softmax function
    """
    x = K.exp(x - K.max(x, axis=axis, keepdims=True))
    x /= K.sum(x, axis=axis, keepdims=True)
    return x 
Example 15
Project: CalibrationNN   Author: Andres-Hernandez   File: neural_network.py    License: GNU General Public License v3.0 5 votes vote down vote up
def rbf(x):
    #This is not really a radial basis function, but it is similar
    return K.exp(-K.square(x)) 
Example 16
Project: CalibrationNN   Author: Andres-Hernandez   File: neural_network.py    License: GNU General Public License v3.0 5 votes vote down vote up
def test_fnn(func):
    parameters = [(6, 4, 0.25, 0.25, 0.25, 0.001, 1.0)]
    results = Parallel(n_jobs=n_jobs)(delayed(test_helper)(func, exp, layer, lr, 
                       dof, dom, dol, alpha)
                              for exp, layer, dof, dom, dol, lr, alpha in parameters)

    results = sorted(results, key = lambda x: x[0], reverse=True)
    for result in results:
        print(result) 
Example 17
Project: keras-yolo3   Author: bing0037   File: model.py    License: MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example 18
Project: deepchem   Author: deepchem   File: model.py    License: MIT License 5 votes vote down vote up
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std=0.01):
    h = Convolution1D(9, 9, activation='relu', name='conv_1')(x)
    h = Convolution1D(9, 9, activation='relu', name='conv_2')(h)
    h = Convolution1D(10, 11, activation='relu', name='conv_3')(h)
    h = Flatten(name='flatten_1')(h)
    h = Dense(435, activation='relu', name='dense_1')(h)

    def sampling(args):
      z_mean_, z_log_var_ = args
      batch_size = K.shape(z_mean_)[0]
      epsilon = K.random_normal(
          shape=(batch_size, latent_rep_size), mean=0., std=epsilon_std)
      return z_mean_ + K.exp(z_log_var_ / 2) * epsilon

    z_mean = Dense(latent_rep_size, name='z_mean', activation='linear')(h)
    z_log_var = Dense(latent_rep_size, name='z_log_var', activation='linear')(h)

    def vae_loss(x, x_decoded_mean):
      x = K.flatten(x)
      x_decoded_mean = K.flatten(x_decoded_mean)
      xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
      kl_loss = -0.5 * K.mean(
          1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
      return xent_loss + kl_loss

    return (vae_loss, Lambda(
        sampling, output_shape=(latent_rep_size,),
        name='lambda')([z_mean, z_log_var])) 
Example 19
Project: multi-object-tracking   Author: jguoaj   File: model.py    License: GNU General Public License v3.0 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    box_xy = K.sigmoid(feats[..., :2])
    box_wh = K.exp(feats[..., 2:4])
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (box_xy + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = box_wh * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))

    return box_xy, box_wh, box_confidence, box_class_probs 
Example 20
Project: vision-web-service   Author: sherlockchou86   File: model.py    License: MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    grid_shape = K.shape(feats)[1:3] # height, width
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
        [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
        [grid_shape[0], 1, 1, 1])
    grid = K.concatenate([grid_x, grid_y])
    grid = K.cast(grid, K.dtype(feats))

    feats = K.reshape(
        feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    if calc_loss == True:
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs 
Example 21
Project: DeepLearn   Author: GauravBh1010tt   File: layers.py    License: MIT License 5 votes vote down vote up
def call(self, x, mask=None):
        h1 = x[0]
        h2 = x[1]    
        dif = K.sum(K.abs(h1-h2),axis=1)  
        h = K.exp(-dif)
        #print h.shape
        h=K.clip(h,1e-7,1.0-1e-7)
        h = K.reshape(h, (h.shape[0],1))
        return h 
Example 22
Project: navbot   Author: marooncn   File: VAE2.py    License: MIT License 5 votes vote down vote up
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], Z_DIM), mean=0., stddev=1.)
    return z_mean + K.exp(z_log_var / 2) * epsilon 
Example 23
Project: autopool   Author: marl   File: autopool.py    License: MIT License 5 votes vote down vote up
def call(self, x, mask=None):
        scaled = self.kernel * x
        max_val = K.max(scaled, axis=self.axis, keepdims=True)
        softmax = K.exp(scaled - max_val)
        weights = softmax / K.sum(softmax, axis=self.axis, keepdims=True)
        return K.sum(x * weights, axis=self.axis, keepdims=False) 
Example 24
Project: autopool   Author: marl   File: autopool.py    License: MIT License 5 votes vote down vote up
def call(self, x, mask=None):
        max_val = K.max(x, axis=self.axis, keepdims=True)
        softmax = K.exp((x - max_val))
        weights = softmax / K.sum(softmax, axis=self.axis, keepdims=True)
        return K.sum(x * weights, axis=self.axis, keepdims=False) 
Example 25
Project: Projects   Author: iamshang1   File: conv_vae.py    License: MIT License 5 votes vote down vote up
def _sampling(self,args):
        '''
        sampling function for embedding layer
        '''
        z_mean,z_log_var = args
        epsilon = K.random_normal(shape=K.shape(z_mean),mean=self.eps_mean,
                                  stddev=self.eps_std)
        return z_mean + K.exp(z_log_var) * epsilon 
Example 26
Project: Projects   Author: iamshang1   File: conv_vae.py    License: MIT License 5 votes vote down vote up
def _vae_loss(self,input,output):
        '''
        loss function for variational autoencoder
        '''
        input_flat = K.flatten(input)
        output_flat = K.flatten(output)
        xent_loss = self.image_size[0] * self.image_size[1] \
                    * objectives.binary_crossentropy(input_flat,output_flat)
        kl_loss = - 0.5 * K.mean(1 + self.z_log_var - K.square(self.z_mean) 
                  - K.exp(self.z_log_var), axis=-1)
        return xent_loss + kl_loss 
Example 27
Project: object-detection   Author: kaka-lin   File: model.py    License: MIT License 5 votes vote down vote up
def yolo_head(feats, anchors, num_classes, n):
    """Convert final layer features to bounding box parameters."""
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    conv_dims = K.shape(feats)[1:3]  # assuming channels last
    # In YOLO the height index is the inner most iteration.
    conv_height_index = K.arange(0, stop=conv_dims[0])
    conv_width_index = K.arange(0, stop=conv_dims[1])
    conv_height_index = K.tile(conv_height_index, [conv_dims[1]])

    conv_width_index = K.tile(K.expand_dims(conv_width_index, 0), [conv_dims[0], 1])
    conv_width_index = K.flatten(K.transpose(conv_width_index))
    conv_index = K.transpose(K.stack([conv_height_index, conv_width_index]))
    conv_index = K.reshape(conv_index, [1, conv_dims[0], conv_dims[1], 1, 2])
    conv_index = K.cast(conv_index, K.dtype(feats))

    feats = K.reshape(feats, [-1, conv_dims[0], conv_dims[1], num_anchors, num_classes + 5])
    conv_dims = K.cast(K.reshape(conv_dims, [1, 1, 1, 1, 2]), K.dtype(feats))

    box_xy = K.sigmoid(feats[..., :2])
    box_wh = K.exp(feats[..., 2:4])
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.sigmoid(feats[..., 5:])

    # Adjust preditions to each spatial grid point and anchor size.
    # Note: YOLO iterates over height index before width index.
    # TODO: It works with +1, don't know why.
    box_xy = (box_xy + conv_index + 1) / conv_dims
    # TODO: Input layer size
    box_wh = box_wh * anchors_tensor / conv_dims / {0:32, 1:16, 2:8}[n]

    return [box_xy, box_wh, box_confidence, box_class_probs] 
Example 28
Project: Dropout_BBalpha   Author: YingzhenLi   File: BBalpha_dropout.py    License: MIT License 5 votes vote down vote up
def logsumexp(x, axis=None):
    x_max = K.max(x, axis=axis, keepdims=True)
    return K.log(K.sum(K.exp(x - x_max), axis=axis, keepdims=True)) + x_max 
Example 29
Project: Dropout_BBalpha   Author: YingzhenLi   File: BBalpha_dropout.py    License: MIT License 5 votes vote down vote up
def bbalpha_softmax_cross_entropy_with_mc_logits(alpha):
    alpha = K.cast_to_floatx(alpha)
    def loss(y_true, mc_logits):
        # log(p_ij), p_ij = softmax(logit_ij)
        #assert mc_logits.ndim == 3
        mc_log_softmax = mc_logits - K.max(mc_logits, axis=2, keepdims=True)
        mc_log_softmax = mc_log_softmax - K.log(K.sum(K.exp(mc_log_softmax), axis=2, keepdims=True))
        mc_ll = K.sum(y_true * mc_log_softmax, -1)  # N x K
        K_mc = mc_ll.get_shape().as_list()[1]	# only for tensorflow
        return - 1. / alpha * (logsumexp(alpha * mc_ll, 1) + K.log(1.0 / K_mc))
    return loss


###################################################################
# the model 
Example 30
Project: convnets-keras   Author: heuritech   File: customlayers.py    License: MIT License 5 votes vote down vote up
def call(self, x, mask=None):
        e = K.exp(x - K.max(x, axis=self.axis, keepdims=True))
        s = K.sum(e, axis=self.axis, keepdims=True)
        return e / s