Python keras.activations() Examples

The following are code examples for showing how to use keras.activations(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: CAPTCHA-breaking   Author: lllcho   File: test_activations.py    MIT License 6 votes vote down vote up
def test_softmax():

    from keras.activations import softmax as s

    # Test using a reference implementation of softmax
    def softmax(values):
        m = max(values)
        values = numpy.array(values)
        e = numpy.exp(values - m)
        dist = list(e / numpy.sum(e))

        return dist

    x = T.vector()
    exp = s(x)
    f = theano.function([x], exp)
    test_values=get_standard_values()

    result = f(test_values)
    expected = softmax(test_values)

    print(str(result))
    print(str(expected))

    list_assert_equal(result, expected) 
Example 2
Project: CAPTCHA-breaking   Author: lllcho   File: test_activations.py    MIT License 6 votes vote down vote up
def test_relu():
    '''
    Relu implementation doesn't depend on the value being
    a theano variable. Testing ints, floats and theano tensors.
    '''

    from keras.activations import relu as r

    assert r(5) == 5
    assert r(-5) == 0
    assert r(-0.1) == 0
    assert r(0.1) == 0.1

    x = T.vector()
    exp = r(x)
    f = theano.function([x], exp)

    test_values = get_standard_values()
    result = f(test_values)

    list_assert_equal(result, test_values) # because no negatives in test values 
Example 3
Project: CAPTCHA-breaking   Author: lllcho   File: test_activations.py    MIT License 6 votes vote down vote up
def test_tanh():

    from keras.activations import tanh as t
    test_values = get_standard_values()

    x = T.vector()
    exp = t(x)
    f = theano.function([x], exp)

    result = f(test_values)
    expected = [math.tanh(v) for v in test_values]

    print(result)
    print(expected)

    list_assert_equal(result, expected) 
Example 4
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: RP_Bilinear_Pooling.py    MIT License 6 votes vote down vote up
def __init__(self, out_dim, core_dim,
                 time_window_size, stride=2, trainable=False, 
                 reduce_dim=False, n_recur=1,
                 **kwargs):
        
        self.out_dim = out_dim
        self.core_dim = core_dim
        
        self.reduce_dim = reduce_dim
        self.n_recur = n_recur

        self.time_window_size = time_window_size
        self.stride = stride
        self.trainable = trainable

        # self.act_fun = keras.activations.linear
        self.act_fun = K.tanh
        super(TensorDecomposePooling, self).__init__(**kwargs) 
Example 5
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: RP_Bilinear_Pooling.py    MIT License 6 votes vote down vote up
def __init__(self,
                 in_dim,
                 out_dim,
                 n_factor=20,
                 out_fusion_type='avg', # or max or w-sum
                 stride=2, 
                 time_window_size=5,
                 **kwargs):
        
        self.in_dim = in_dim
        self.out_dim = out_dim
        self.n_factor = n_factor

        self.out_fusion_type = out_fusion_type
        self.stride = stride
        self.act_fun = tf.keras.activations.linear

        super(FBM, self).__init__(**kwargs) 
Example 6
Project: CAPTCHA-breaking   Author: lllcho   File: test_activations.py    MIT License 5 votes vote down vote up
def test_linear():
    '''
    This function does no input validation, it just returns the thing
    that was passed in.
    '''

    from keras.activations import linear as l

    xs = [1, 5, True, None, 'foo']

    for x in xs:
        assert x == l(x) 
Example 7
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: RP_Bilinear_Pooling.py    MIT License 5 votes vote down vote up
def __init__(self, n_recur=1, same_mat=False, out_fusion_type='mean',
                 act_fun_in='linear', act_fun_out='linear', 
                 stride=2, trainable=True, 
                 **kwargs):
        
        
        self.out_fusion_type=out_fusion_type
        self.n_recur = n_recur
        self.stride = stride
        self.trainable = trainable
        self.same_mat=same_mat

        if act_fun_in=='linear':
            self.act_fun_in = keras.activations.linear
        elif act_fun_in=='tanh':
            self.act_fun_in = K.tanh
        else:
            print('[ERROR]: no such activation function for input. Program terminates')
            sys.exit()


        if act_fun_out=='linear':
            self.act_fun_out = keras.activations.linear
        elif act_fun_out=='tanh':
            self.act_fun_out = K.tanh
        else:
            print('[ERROR]: no such activation function for output. Program terminates')
            sys.exit()


        super(TensorChainDecomposePooling, self).__init__(**kwargs) 
Example 8
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: RP_Bilinear_Pooling.py    MIT License 5 votes vote down vote up
def __init__(self, n_recur=1, 
                 same_mat=False, 
                 use_bias=True,
                 out_fusion_type='sum',
                 act_fun_in='linear', 
                 act_fun_out='linear',
                 stride=2, 
                 trainable=True, 
                 **kwargs):
        
        
        self.out_fusion_type=out_fusion_type
        self.n_recur = n_recur
        self.stride = stride
        self.trainable = trainable
        self.same_mat=same_mat
        self.use_bias = use_bias

        if act_fun_in=='linear':
            self.act_fun_in = keras.activations.linear
        elif act_fun_in=='tanh':
            self.act_fun_in = K.tanh
        else:
            print('[ERROR]: no such activation function for input. Program terminates')
            sys.exit()


        if act_fun_out=='linear':
            self.act_fun_out = keras.activations.linear
        elif act_fun_out=='tanh':
            self.act_fun_out = K.tanh
        else:
            print('[ERROR]: no such activation function for output. Program terminates')
            sys.exit()





        super(TensorStarDecomposePooling, self).__init__(**kwargs) 
Example 9
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: RP_Bilinear_Pooling.py    MIT License 5 votes vote down vote up
def __init__(self,
                 n_basis=8,
                 n_components=1,
                 use_normalization=False,
                 activation=None,
                 out_fusion_type='avg', # or max or w-sum
                 stride=2, 
                 time_window_size=5,
                 **kwargs):
        
        self.n_basis = n_basis
        self.out_dim = n_basis**2
        self.n_components=n_components
        self.out_fusion_type = out_fusion_type
        self.stride = stride
        self.use_normalization = use_normalization
        self.time_window_size = time_window_size

        if activation == None:
            self.act_fun = tf.keras.activations.linear
        elif activation == 'tanh':
            self.act_fun = tf.keras.activations.tanh
        elif activation == 'relu':
            self.act_fun = tf.keras.activations.relu

        super(RPBinaryPooling, self).__init__(**kwargs) 
Example 10
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: RP_Bilinear_Pooling.py    MIT License 5 votes vote down vote up
def __init__(self,
                 n_basis=8,
                 n_components=1,
                 use_normalization=False,
                 activation=None,
                 out_fusion_type='avg', # or max or w-sum
                 stride=2, 
                 time_window_size=5,
                 **kwargs):
        
        self.n_basis = n_basis
        self.out_dim = n_basis**2
        self.n_components=n_components
        self.out_fusion_type = out_fusion_type
        self.stride = stride
        self.use_normalization = use_normalization
        self.time_window_size = time_window_size

        if activation == None:
            self.act_fun = tf.keras.activations.linear
        elif activation == 'tanh':
            self.act_fun = tf.keras.activations.tanh
        elif activation == 'relu':
            self.act_fun = tf.keras.activations.relu

        super(RPBinaryPooling2, self).__init__(**kwargs) 
Example 11
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: RP_Bilinear_Pooling.py    MIT License 5 votes vote down vote up
def __init__(self,
                 n_basis=8,
                 n_components=1, 
                 init_sigma=None,
                 use_normalization=False,
                 activation=None,
                 learnable_radius=True,
                 out_fusion_type='avg', # or max or w-sum
                 stride=2, 
                 time_window_size=5,
                 **kwargs):
        
        self.n_basis = n_basis
        self.n_components=n_components
        self.out_fusion_type = out_fusion_type
        self.stride = stride
        self.use_normalization = use_normalization
        self.time_window_size = time_window_size
        self.learnable_radius = True
        self.init_sigma = init_sigma
        self.out_dim = n_components*(n_basis)**2
        # print('-----------init_sigma={}-------------'.format(init_sigma))
        if activation == None:
            self.act_fun = tf.keras.activations.linear
        elif activation == 'tanh':
            self.act_fun = tf.keras.activations.tanh
        elif activation == 'relu':
            self.act_fun = tf.keras.activations.relu
        super(RPGaussianPooling2, self).__init__(**kwargs) 
Example 12
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: RP_Bilinear_Pooling.py    MIT License 5 votes vote down vote up
def __init__(self,
                 n_basis=8,
                 n_components=1, 
                 use_normalization=False,
                 activation=None,
                 learnable_radius=True,
                 out_fusion_type='avg', # or max or w-sum
                 stride=2, 
                 time_window_size=5,
                 **kwargs):
        
        self.n_basis = n_basis
        self.n_components=n_components
        self.out_fusion_type = out_fusion_type
        self.stride = stride
        self.use_normalization = use_normalization
        self.time_window_size = time_window_size
        self.learnable_radius = learnable_radius
        self.out_dim = n_components*(n_basis)**2

        if activation == None:
            self.act_fun = tf.keras.activations.linear
        elif activation == 'tanh':
            self.act_fun = tf.keras.activations.tanh
        elif activation == 'relu':
            self.act_fun = tf.keras.activations.relu

        super(RPLearnable, self).__init__(**kwargs) 
Example 13
Project: TemporalActionParsing-FineGrained   Author: yz-cnsdqz   File: RP_Bilinear_Pooling.py    MIT License 5 votes vote down vote up
def __init__(self,
                 n_basis=8,
                 n_components=1,
                 use_normalization=False,
                 activation=None,
                 learnable_radius=True,
                 out_fusion_type='avg', # or max or w-sum
                 stride=2, 
                 time_window_size=5,
                 **kwargs):
        
        self.n_basis = n_basis
        self.out_dim = n_basis
        self.out_fusion_type = out_fusion_type
        self.stride = stride
        self.use_normalization = use_normalization
        self.time_window_size = time_window_size
        self.learnable_radius = learnable_radius
        self.n_components = n_components
        
        if activation == None:
            self.act_fun = tf.keras.activations.linear
        elif activation == 'tanh':
            self.act_fun = tf.keras.activations.tanh
        elif activation == 'relu':
            self.act_fun = tf.keras.activations.relu

        super(MultiModalLowRankPooling, self).__init__(**kwargs)