Python cntk.relu() Examples

The following are 22 code examples of cntk.relu(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cntk , or try the search function .
Example #1
Source Project: ngraph-python   Author: NervanaSystems   File: cifar_training.py    License: Apache License 2.0 6 votes vote down vote up
def create_basic_model(input, out_dims):
    net = C.layers.Convolution(
        (5, 5), 32, init=C.initializer.glorot_uniform(), activation=C.relu, pad=True
    )(input)
    net = C.layers.MaxPooling((3, 3), strides=(2, 2))(net)

    net = C.layers.Convolution(
        (5, 5), 32, init=C.initializer.glorot_uniform(), activation=C.relu, pad=True
    )(net)
    net = C.layers.MaxPooling((3, 3), strides=(2, 2))(net)

    net = C.layers.Convolution(
        (5, 5), 64, init=C.initializer.glorot_uniform(), activation=C.relu, pad=True
    )(net)
    net = C.layers.MaxPooling((3, 3), strides=(2, 2))(net)

    net = C.layers.Dense(64, init=C.initializer.glorot_uniform())(net)
    net = C.layers.Dense(out_dims, init=C.initializer.glorot_uniform(), activation=None)(net)

    return net 
Example #2
Source Project: ngraph-python   Author: NervanaSystems   File: cifar_training.py    License: Apache License 2.0 6 votes vote down vote up
def create_vgg9_model(input, out_dims):
    with C.layers.default_options(activation=C.relu):
        model = C.layers.Sequential([
            C.layers.For(range(3), lambda i: [
                C.layers.Convolution(
                    (3, 3), [64, 96, 128][i], init=C.initializer.glorot_uniform(), pad=True
                ),
                C.layers.Convolution(
                    (3, 3), [64, 96, 128][i], init=C.initializer.glorot_uniform(), pad=True
                ),
                C.layers.MaxPooling((3, 3), strides=(2, 2))
            ]),
            C.layers.For(range(2), lambda: [
                C.layers.Dense(1024, init=C.initializer.glorot_uniform())
            ]),
            C.layers.Dense(out_dims, init=C.initializer.glorot_uniform(), activation=None)
        ])
    return model(input) 
Example #3
Source Project: GraphicDesignPatternByPython   Author: Relph1119   File: cntk_backend.py    License: MIT License 6 votes vote down vote up
def relu(x, alpha=0., max_value=None, threshold=0.):

    if alpha != 0.:
        if threshold != 0.:
            negative_part = C.relu(-x + threshold)
        else:
            negative_part = C.relu(-x)

    if threshold != 0.:
        x = x * C.greater(x, threshold)
    else:
        x = C.relu(x)

    if max_value is not None:
        x = C.clip(x, 0.0, max_value)

    if alpha != 0.:
        x -= alpha * negative_part

    return x 
Example #4
Source Project: CNTK-World   Author: astorfi   File: autoencoders.py    License: MIT License 6 votes vote down vote up
def create_model(features):
    '''
    This function creates the architecture model.
    :param features: The input features.
    :return: The output of the network which its dimentionality is num_classes.
    '''
    with C.layers.default_options(init = C.layers.glorot_uniform(), activation = C.ops.relu):

            # Hidden input dimention
            hidden_dim = 64

            # Encoder
            encoder_out = C.layers.Dense(hidden_dim, activation=C.relu)(features)
            encoder_out = C.layers.Dense(int(hidden_dim / 2.0), activation=C.relu)(encoder_out)

            # Decoder
            decoder_out = C.layers.Dense(int(hidden_dim / 2.0), activation=C.relu)(encoder_out)
            decoder_out = C.layers.Dense(hidden_dim, activation=C.relu)(decoder_out)
            decoder_out = C.layers.Dense(feature_dim, activation=C.sigmoid)(decoder_out)

            return decoder_out

# Initializing the model with normalized input. 
Example #5
Source Project: ngraph-python   Author: NervanaSystems   File: cifar_training.py    License: Apache License 2.0 5 votes vote down vote up
def create_terse_model(input, out_dims):
    with C.layers.default_options(activation=C.relu):
        model = C.layers.Sequential([
            C.layers.For(range(3), lambda i: [
                C.layers.Convolution(
                    (5, 5), [32, 32, 64][i], init=C.initializer.glorot_uniform(), pad=True
                ),
                C.layers.MaxPooling((3, 3), strides=(2, 2))
            ]),
            C.layers.Dense(64, init=C.initializer.glorot_uniform()),
            C.layers.Dense(out_dims, init=C.initializer.glorot_uniform(), activation=None)
        ])

    return model(input) 
Example #6
Source Project: ngraph-python   Author: NervanaSystems   File: cifar_training.py    License: Apache License 2.0 5 votes vote down vote up
def create_dropout_model(input, out_dims):
    with C.layers.default_options(activation=C.relu):
        model = C.layers.Sequential([
            C.layers.For(range(3), lambda i: [
                C.layers.Convolution(
                    (5, 5), [32, 32, 64][i], init=C.initializer.glorot_uniform(), pad=True
                ),
                C.layers.MaxPooling((3, 3), strides=(2, 2))
            ]),
            C.layers.Dense(64, init=C.initializer.glorot_uniform()),
            C.layers.Dropout(0.25),
            C.layers.Dense(out_dims, init=C.initializer.glorot_uniform(), activation=None)
        ])

    return model(input) 
Example #7
Source Project: ngraph-python   Author: NervanaSystems   File: cifar_training.py    License: Apache License 2.0 5 votes vote down vote up
def convolution_bn(input, filter_size, num_filters, strides=(1, 1),
                   init=C.he_normal(), activation=C.relu):
    if activation is None:
        activation = lambda x: x

    r = C.layers.Convolution(
        filter_size, num_filters,
        strides=strides, init=init,
        activation=None, pad=True, bias=False
    )(input)
    # r = C.layers.BatchNormalization(map_rank=1)(r)
    return activation(r) 
Example #8
Source Project: ngraph-python   Author: NervanaSystems   File: cifar_training.py    License: Apache License 2.0 5 votes vote down vote up
def resnet_basic_inc(input, num_filters):
    c1 = convolution_bn(input, (3, 3), num_filters, strides=(2, 2))
    c2 = convolution_bn(c1, (3, 3), num_filters, activation=None)
    s = convolution_bn(input, (1, 1), num_filters, strides=(2, 2), activation=None)
    return C.relu(c2 + s) 
Example #9
Source Project: ngraph-python   Author: NervanaSystems   File: test_ops_unary.py    License: Apache License 2.0 5 votes vote down vote up
def test_relu():
    assert_cntk_ngraph_array_equal(C.relu([-2, -1., 0., 1., 2.]))
    assert_cntk_ngraph_array_equal(C.relu([0.]))
    assert_cntk_ngraph_array_equal(C.relu([-0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1]))
    assert_cntk_ngraph_array_equal(C.relu([[1, 2, 3], [4, 5, 6]]))
    assert_cntk_ngraph_array_equal(C.relu([[-3, -2, -1], [1, 2, 3]])) 
Example #10
Source Project: end2end_AU_speech   Author: haixpham   File: LayerUtils.py    License: MIT License 5 votes vote down vote up
def bn_relu(input, name=""):
    return bn(input, activation=C.relu, name=name) 
Example #11
Source Project: end2end_AU_speech   Author: haixpham   File: LayerUtils.py    License: MIT License 5 votes vote down vote up
def conv_bn_relu(input, filter_shape, num_filters, strides=(1,1), init=C.he_normal(), name=""):
    return conv_bn(input, filter_shape, num_filters, strides, init, activation=C.relu, name=name) 
Example #12
Source Project: end2end_AU_speech   Author: haixpham   File: LayerUtils.py    License: MIT License 5 votes vote down vote up
def conv_bn_relu_nopad(input, filter_shape, num_filters, strides=(1,1), init=C.he_normal(), name=""):
    return conv_bn_nopad(input, filter_shape, num_filters, strides, init, activation=C.relu, name=name) 
Example #13
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: cntk_backend.py    License: MIT License 5 votes vote down vote up
def relu(x, alpha=0., max_value=None):
    if alpha != 0.:
        negative_part = C.relu(-x)
    x = C.relu(x)
    if max_value is not None:
        x = C.clip(x, 0.0, max_value)
    if alpha != 0.:
        x -= alpha * negative_part
    return x 
Example #14
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: cntk_backend.py    License: MIT License 5 votes vote down vote up
def relu(x, alpha=0., max_value=None):
    if alpha != 0.:
        negative_part = C.relu(-x)
    x = C.relu(x)
    if max_value is not None:
        x = C.clip(x, 0.0, max_value)
    if alpha != 0.:
        x -= alpha * negative_part
    return x 
Example #15
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: cntk_backend.py    License: MIT License 5 votes vote down vote up
def relu(x, alpha=0., max_value=None):
    if alpha != 0.:
        negative_part = C.relu(-x)
    x = C.relu(x)
    if max_value is not None:
        x = C.clip(x, 0.0, max_value)
    if alpha != 0.:
        x -= alpha * negative_part
    return x 
Example #16
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: cntk_backend.py    License: MIT License 5 votes vote down vote up
def relu(x, alpha=0., max_value=None):
    if alpha != 0.:
        negative_part = C.relu(-x)
    x = C.relu(x)
    if max_value is not None:
        x = C.clip(x, 0.0, max_value)
    if alpha != 0.:
        x -= alpha * negative_part
    return x 
Example #17
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: cntk_backend.py    License: MIT License 5 votes vote down vote up
def relu(x, alpha=0., max_value=None):
    if alpha != 0.:
        negative_part = C.relu(-x)
    x = C.relu(x)
    if max_value is not None:
        x = C.clip(x, 0.0, max_value)
    if alpha != 0.:
        x -= alpha * negative_part
    return x 
Example #18
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: cntk_backend.py    License: MIT License 5 votes vote down vote up
def relu(x, alpha=0., max_value=None):
    if alpha != 0.:
        negative_part = C.relu(-x)
    x = C.relu(x)
    if max_value is not None:
        x = C.clip(x, 0.0, max_value)
    if alpha != 0.:
        x -= alpha * negative_part
    return x 
Example #19
Source Project: DeepLearning_Wavelet-LSTM   Author: hello-sea   File: cntk_backend.py    License: MIT License 5 votes vote down vote up
def relu(x, alpha=0., max_value=None):
    if alpha != 0.:
        negative_part = C.relu(-x)
    x = C.relu(x)
    if max_value is not None:
        x = C.clip(x, 0.0, max_value)
    if alpha != 0.:
        x -= alpha * negative_part
    return x 
Example #20
Source Project: deepQuest   Author: sheffieldnlp   File: cntk_backend.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def relu(x, alpha=0., max_value=None):
    if alpha != 0.:
        negative_part = C.relu(-x)
    x = C.relu(x)
    if max_value is not None:
        x = C.clip(x, 0.0, max_value)
    if alpha != 0.:
        x -= alpha * negative_part
    return x 
Example #21
Source Project: CNTK-World   Author: astorfi   File: conditional-DCGAN.py    License: MIT License 5 votes vote down vote up
def D(x_img, x_code):
    '''
    Detector network architecture

    Args:
        x_img: cntk.input_variable represent images to network
        x_code: cntk.input_variable represent conditional code to network
    '''
    def bn_with_leaky_relu(x, leak=0.2):
        h = C.layers.BatchNormalization(map_rank=1)(x)
        r = C.param_relu(C.constant((np.ones(h.shape) * leak).astype(np.float32)), h)
        return r

    with C.layers.default_options(init=C.normal(scale=0.02)):

        h0 = C.layers.Convolution2D(dkernel, 1, strides=dstride)(x_img)
        h0 = bn_with_leaky_relu(h0, leak=0.2)
        print('h0 shape :', h0.shape)

        h1 = C.layers.Convolution2D(dkernel, 64, strides=dstride)(h0)
        h1 = bn_with_leaky_relu(h1, leak=0.2)
        print('h1 shape :', h1.shape)

        h2 = C.layers.Dense(256, activation=None)(h1)
        h2 = bn_with_leaky_relu(h2, leak=0.2)
        print('h2 shape :', h2.shape)

        h2_aug = C.splice(h2, x_code)

        h3 = C.layers.Dense(256, activation=C.relu)(h2_aug)

        h4 = C.layers.Dense(1, activation=C.sigmoid, name='D_out')(h3)
        print('h3 shape :', h4.shape)

        return h4 
Example #22
Source Project: keras-lambda   Author: sunilmallya   File: cntk_backend.py    License: MIT License 5 votes vote down vote up
def relu(x, alpha=0., max_value=None):
    if alpha != 0.:
        negative_part = C.relu(-x)
    x = C.relu(x)
    if max_value is not None:
        x = C.clip(x, 0.0, max_value)
    if alpha != 0.:
        x -= alpha * negative_part
    return x