Python keras.activations.linear() Examples

The following are code examples for showing how to use keras.activations.linear(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: keras-mnist-workshop   Author: drschilling   File: keras_mnist_vis.py    Apache License 2.0 6 votes vote down vote up
def keras_digits_vis(model, X_test, y_test):
    
    layer_idx = utils.find_layer_idx(model, 'preds')
    model.layers[layer_idx].activation = activations.linear
    model = utils.apply_modifications(model)

    for class_idx in np.arange(10):    
        indices = np.where(y_test[:, class_idx] == 1.)[0]
        idx = indices[0]

        f, ax = plt.subplots(1, 4)
        ax[0].imshow(X_test[idx][..., 0])
        
        for i, modifier in enumerate([None, 'guided', 'relu']):
            heatmap = visualize_saliency(model, layer_idx, filter_indices=class_idx, 
                                        seed_input=X_test[idx], backprop_modifier=modifier)
            if modifier is None:
                modifier = 'vanilla'
            ax[i+1].set_title(modifier)    
            ax[i+1].imshow(heatmap)
    plt.imshow(heatmap)
    plt.show() 
Example 2
Project: DSP_EMGDL_Chapter   Author: DSIP-UPatras   File: visualizations.py    MIT License 6 votes vote down vote up
def saliency(model, input_images, input_labels):
    """Function that computes the attention map visualization.
    Args:
        model: A keras.model
        input_images: Array of 3D images (height, width, 3) of which the attention is computed
        input_labels: The class label for each input image
    Returns:
        A list of attention maps
    """
    layer_idx = -1

    # Swap softmax with linear
    model.layers[-2].activation = activations.linear
    model = utils.apply_modifications(model)

    # This is the output node we want to maximize.
    vis_images = []
    for l in range(len(input_images)):
        img = input_images[l]
        label = input_labels[l]
        grads = visualize_saliency(model, layer_idx, filter_indices=label,
                                   seed_input=img)
        vis_images.append(grads)
    return vis_images 
Example 3
Project: applications   Author: geomstats   File: activations_test.py    MIT License 6 votes vote down vote up
def test_get_fn():
    """Activations has a convenience "get" function. All paths of this
    function are tested here, although the behaviour in some instances
    seems potentially surprising (e.g. situation 3)
    """

    # 1. Default returns linear
    a = activations.get(None)
    assert a == activations.linear

    # 2. Passing in a layer raises a warning
    layer = Dense(32)
    with pytest.warns(UserWarning):
        a = activations.get(layer)

    # 3. Callables return themselves for some reason
    a = activations.get(lambda x: 5)
    assert a(None) == 5

    # 4. Anything else is not a valid argument
    with pytest.raises(ValueError):
        a = activations.get(6) 
Example 4
Project: CAPTCHA-breaking   Author: lllcho   File: test_activations.py    MIT License 5 votes vote down vote up
def test_linear():
    '''
    This function does no input validation, it just returns the thing
    that was passed in.
    '''

    from keras.activations import linear as l

    xs = [1, 5, True, None, 'foo']

    for x in xs:
        assert x == l(x) 
Example 5
Project: workspace_2017   Author: nwiizo   File: test_activations.py    MIT License 5 votes vote down vote up
def test_linear():
    '''
    This function does no input validation, it just returns the thing
    that was passed in.
    '''
    xs = [1, 5, True, None, 'foo']
    for x in xs:
        assert(x == activations.linear(x)) 
Example 6
Project: applications   Author: geomstats   File: activations_test.py    MIT License 5 votes vote down vote up
def test_serialization():
    all_activations = ['softmax', 'relu', 'elu', 'tanh',
                       'sigmoid', 'hard_sigmoid', 'linear',
                       'softplus', 'softsign', 'selu']
    for name in all_activations:
        fn = activations.get(name)
        ref_fn = getattr(activations, name)
        assert fn == ref_fn
        config = activations.serialize(fn)
        fn = activations.deserialize(config)
        assert fn == ref_fn 
Example 7
Project: applications   Author: geomstats   File: activations_test.py    MIT License 5 votes vote down vote up
def test_linear():
    xs = [1, 5, True, None]
    for x in xs:
        assert(x == activations.linear(x)) 
Example 8
Project: experiments   Author: Octavian-ai   File: adjacency_layer.py    MIT License 5 votes vote down vote up
def __init__(self, person_count, product_count, style_width, **kwargs):
		self.person_count = person_count
		self.product_count = product_count
		self.style_width = style_width
		self.dense1 = layers.Dense(units=(style_width), activation=activations.softplus, use_bias=False, kernel_regularizer=Clip)
		#self.dense2 = layers.(units=(1), activation=activations.linear)
		self.dense3 = layers.Dense(units=1, activation=partial(activations.relu, alpha=0.1), use_bias=False, kernel_regularizer=Clip)
		super(Adjacency, self).__init__(**kwargs) 
Example 9
Project: bidaf-keras   Author: ParikhKadam   File: similarity_layer.py    GNU General Public License v3.0 5 votes vote down vote up
def compute_similarity(self, repeated_context_vectors, repeated_query_vectors):
        element_wise_multiply = repeated_context_vectors * repeated_query_vectors
        concatenated_tensor = K.concatenate(
            [repeated_context_vectors, repeated_query_vectors, element_wise_multiply], axis=-1)
        dot_product = K.squeeze(K.dot(concatenated_tensor, self.kernel), axis=-1)
        return linear(dot_product + self.bias) 
Example 10
Project: cnn-number-detection   Author: FabianGroeger96   File: model.py    MIT License 5 votes vote down vote up
def visualize_dense_layer(self):
        self.logger.info('Visualizing dense layers')

        # create folder for saving visualization
        save_path = os.path.join(constants.MODEL_DIR, 'Visualization', self.model_name)
        if not os.path.exists(save_path):
            os.makedirs(save_path)

        # search the last dense layer with the name 'preds'
        layer_idx = utils.find_layer_idx(self.model, 'preds')

        # Swap softmax with linear
        self.model.layers[layer_idx].activation = activations.linear
        model = utils.apply_modifications(self.model)

        # output node we want to maximize
        for class_idx in np.arange(len(constants.CATEGORIES)):
            # Lets turn off verbose output this time to avoid clutter and just see the output.
            img = visualize_activation(model, layer_idx, filter_indices=class_idx, input_range=(0., 1.))
            plt.figure()
            plt.title('Networks perception of {}'.format(class_idx))
            plt.imshow(img[..., 0])

            # save the plot
            plot_name = 'dense-layer-{}.png'.format(constants.CATEGORIES[class_idx])
            plt.savefig(os.path.join(save_path, plot_name))
            plt.show() 
Example 11
Project: cnn-number-detection   Author: FabianGroeger96   File: model.py    MIT License 5 votes vote down vote up
def visualize_feature_map(self):
        self.logger.info('Visualizing feature map')

        # create folder for saving visualization
        save_path = os.path.join(constants.MODEL_DIR, 'Visualization', self.model_name)
        if not os.path.exists(save_path):
            os.makedirs(save_path)

        # search the last dense layer with the name 'preds'
        layer_idx = utils.find_layer_idx(self.model, 'preds')

        # Swap softmax with linear
        self.model.layers[layer_idx].activation = activations.linear
        model = utils.apply_modifications(self.model)

        # corresponds to the Dense linear layer
        for class_idx in np.arange(len(constants.CATEGORIES)):
            # choose a random image from test data
            indices = np.where(self.testY[:, class_idx] == 1.)[0]
            idx = random.choice(indices)

            f, ax = plt.subplots(1, 4)
            ax[0].imshow(self.testX[idx][..., 0])

            for i, modifier in enumerate([None, 'guided', 'relu']):
                grads = visualize_saliency(model, layer_idx,
                                           filter_indices=class_idx,
                                           seed_input=self.testX[idx],
                                           backprop_modifier=modifier,
                                           grad_modifier='negate')
                if modifier is None:
                    modifier = 'vanilla'

                ax[i + 1].set_title(modifier)
                ax[i + 1].imshow(grads, cmap='jet')

            # save the plot
            plot_name = 'feature-map-{}.png'.format(constants.CATEGORIES[class_idx])
            plt.savefig(os.path.join(save_path, plot_name))
            plt.show() 
Example 12
Project: lucid4keras   Author: totti0223   File: utils.py    Apache License 2.0 4 votes vote down vote up
def prepare_model(model,layer_name="conv2d_5",linearize=True):
    '''
    input:
        model : a model built with keras.
        layer_name : a valid layer name within the model.
        linearize : will modify the specified layer from relu to linear. (actually the final layer of the generated intermediate model)
    return:
        modified keras model.
    '''
    def linearize_activations(input_model,layer_name):
        def search_inbound(layer_name):
            layer_list = []
            if "merge" in str(input_model.get_layer(layer_name)):
                #print("\tlayer",input_model.get_layer(layer_name).name,"is a merge layer. searching for connected layers: ")
                for layer in input_model.get_layer(layer_name)._inbound_nodes[0].inbound_layers:
                    search_inbound(layer.name)
            elif "pool" in str(input_model.get_layer(layer_name)):
                pass
            else:
                #print("\ttargeting layer:",input_model.get_layer(layer_name).name,input_model.get_layer(layer_name).activation)
                if input_model.get_layer(layer_name).activation == activations.linear:
                    print("already a linear layer")
                else:
                    print("\tlinearizing layer:",layer_name)
                    input_model.get_layer(layer_name).activation = activations.linear
            return 0

        if "merge" in str(input_model.get_layer(layer_name)) or "GlobalAveragePooling2D" in str(input_model.get_layer(layer_name)):
            print(layer_name,input_model.get_layer(layer_name),"is a merge layer. will linearize connected relu containing layers")
            #print("inbound layers are")
            #for layer in input_model.get_layer(layer_name)._inbound_nodes[0].inbound_layers:
            #    print("\t",layer.name)

            #print("will (recursively) search for layers connected to the specified layers until it hits a activation layer or conv2d layer having activations")
            _ = search_inbound(layer_name)
            model_path = os.path.join(tempfile.gettempdir(), next(tempfile._get_candidate_names()) + ".hdf5")
            input_model.save(model_path)
            input_model = load_model(model_path,compile=False)
            os.remove(model_path)
            return input_model
        else:
            #print("linearizing the specified layer:",layer_name,str(input_model.get_layer(layer_name)),input_model.get_layer(layer_name).activation)
            if input_model.get_layer(layer_name).activation == activations.linear:
                print("already a linear layer, return unmodified model")
                return input_model
            else:
                print("linearizing layer:",layer_name)
                input_model.get_layer(layer_name).activation = activations.linear
                model_path = os.path.join(tempfile.gettempdir(), next(tempfile._get_candidate_names()) + ".hdf5")
                input_model.save(model_path)
                input_model = load_model(model_path,compile=False)
                os.remove(model_path)
                return input_model
    

    model_intout = model.get_layer(layer_name).output
    int_model = Model(inputs=model.input,outputs=model_intout)

    if linearize:
        int_model = linearize_activations(int_model,layer_name)
    return int_model 
Example 13
Project: TF_PG_GANS   Author: naykun   File: model.py    MIT License 4 votes vote down vote up
def Generator(
    num_channels        =1,
    resolution          =32,
    label_size          =0,
    fmap_base           =4096,
    fmap_decay          =1.0,
    fmap_max            =256,
    latent_size         =None,
    normalize_latents   =True,
    use_wscale          =True,
    use_pixelnorm       =True,
    use_leakyrelu       =True,
    use_batchnorm       =False,
    tanh_at_end         =None,
    **kwargs):
    R = int(np.log2(resolution))
    assert resolution == 2 ** R and resolution >= 4
    cur_lod = K.variable(np.float32(0.0), dtype='float32', name='cur_lod')

    def numf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
    if latent_size is None:
        latent_size = numf(0)
    (act, act_init) = (lrelu, lrelu_init) if use_leakyrelu else (relu, relu_init)

    inputs = [Input(shape=[latent_size], name='Glatents')]
    net = inputs[-1]

    #print("DEEEEEEEE")

    if normalize_latents:
        net = PixelNormLayer(name='Gnorm')(net)
    if label_size:
        inputs += [Input(shape=[label_size], name='Glabels')]
        net = Concatenate(name='G1na')([net, inputs[-1]])
    net = Reshape((1, 1,K.int_shape(net)[1]), name='G1nb')(net)

    net = G_convblock(net, numf(1), 4, act, act_init, pad='full', use_wscale=use_wscale,
                      use_batchnorm=use_batchnorm, use_pixelnorm=use_pixelnorm, name='G1a')
    net = G_convblock(net, numf(1), 3, act, act_init, pad=1, use_wscale=use_wscale,
                      use_batchnorm=use_batchnorm, use_pixelnorm=use_pixelnorm, name='G1b')
    lods = [net]
    for I in range(2, R):
        net = UpSampling2D(2, name='G%dup' % I)(net)
        net = G_convblock(net, numf(I), 3, act, act_init, pad=1, use_wscale=use_wscale,
                          use_batchnorm=use_batchnorm, use_pixelnorm=use_pixelnorm, name='G%da' % I)
        net = G_convblock(net, numf(I), 3, act, act_init, pad=1, use_wscale=use_wscale,
                          use_batchnorm=use_batchnorm, use_pixelnorm=use_pixelnorm, name='G%db' % I)
        lods += [net]

    lods = [NINblock(l, num_channels, linear, linear_init, use_wscale=use_wscale,
                     name='Glod%d' % i) for i, l in enumerate(reversed(lods))]
    output = LODSelectLayer(cur_lod, name='Glod')(lods)
    if tanh_at_end is not None:
        output = Activation('tanh', name='Gtanh')(output)
        if tanh_at_end != 1.0:
            output = Lambda(lambda x: x * tanh_at_end, name='Gtanhs')

    model = Model(inputs=inputs, outputs=[output])
    model.cur_lod = cur_lod
    return model 
Example 14
Project: Keras-progressive_growing_of_gans   Author: MSC-BUAA   File: model.py    MIT License 4 votes vote down vote up
def Generator(
    num_channels        =1,
    resolution          =32,
    label_size          =0,
    fmap_base           =4096,
    fmap_decay          =1.0,
    fmap_max            =256,
    latent_size         =None,
    normalize_latents   =True,
    use_wscale          =True,
    use_pixelnorm       =True,
    use_leakyrelu       =True,
    use_batchnorm       =False,
    tanh_at_end         =None,
    **kwargs):
    R = int(np.log2(resolution))
    assert resolution == 2 ** R and resolution >= 4
    cur_lod = K.variable(np.float32(0.0), dtype='float32', name='cur_lod')

    def numf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max)
    if latent_size is None:
        latent_size = numf(0)
    (act, act_init) = (lrelu, lrelu_init) if use_leakyrelu else (relu, relu_init)

    inputs = [Input(shape=[latent_size], name='Glatents')]
    net = inputs[-1]

    #print("DEEEEEEEE")

    if normalize_latents:
        net = PixelNormLayer(name='Gnorm')(net)
    if label_size:
        inputs += [Input(shape=[label_size], name='Glabels')]
        net = Concatenate(name='G1na')([net, inputs[-1]])
    net = Reshape((1, 1,K.int_shape(net)[1]), name='G1nb')(net)

    net = G_convblock(net, numf(1), 4, act, act_init, pad='full', use_wscale=use_wscale,
                      use_batchnorm=use_batchnorm, use_pixelnorm=use_pixelnorm, name='G1a')
    net = G_convblock(net, numf(1), 3, act, act_init, pad=1, use_wscale=use_wscale,
                      use_batchnorm=use_batchnorm, use_pixelnorm=use_pixelnorm, name='G1b')
    lods = [net]
    for I in range(2, R):
        net = UpSampling2D(2, name='G%dup' % I)(net)
        net = G_convblock(net, numf(I), 3, act, act_init, pad=1, use_wscale=use_wscale,
                          use_batchnorm=use_batchnorm, use_pixelnorm=use_pixelnorm, name='G%da' % I)
        net = G_convblock(net, numf(I), 3, act, act_init, pad=1, use_wscale=use_wscale,
                          use_batchnorm=use_batchnorm, use_pixelnorm=use_pixelnorm, name='G%db' % I)
        lods += [net]

    lods = [NINblock(l, num_channels, linear, linear_init, use_wscale=use_wscale,
                     name='Glod%d' % i) for i, l in enumerate(reversed(lods))]
    output = LODSelectLayer(cur_lod, name='Glod')(lods)
    if tanh_at_end is not None:
        output = Activation('tanh', name='Gtanh')(output)
        if tanh_at_end != 1.0:
            output = Lambda(lambda x: x * tanh_at_end, name='Gtanhs')

    model = Model(inputs=inputs, outputs=[output])
    model.cur_lod = cur_lod
    return model 
Example 15
Project: cnn-number-detection   Author: FabianGroeger96   File: model.py    MIT License 4 votes vote down vote up
def visualize_heat_map(self):
        self.logger.info('Visualizing heat map')

        # create folder for saving visualization
        save_path = os.path.join(constants.MODEL_DIR, 'Visualization', self.model_name)
        if not os.path.exists(save_path):
            os.makedirs(save_path)

        # search the last dense layer with the name 'preds'
        layer_idx = utils.find_layer_idx(self.model, 'preds')

        # Swap softmax with linear
        self.model.layers[layer_idx].activation = activations.linear
        model = utils.apply_modifications(self.model)

        for class_idx in np.arange(len(constants.CATEGORIES)):
            # choose a random image from test data
            indices = np.where(self.testY[:, class_idx] == 1.)[0]
            idx = random.choice(indices)

            f, ax = plt.subplots(1, 4)
            ax[0].imshow(self.testX[idx][..., 0])

            for i, modifier in enumerate([None, 'guided', 'relu']):
                grads = visualize_cam(model, layer_idx,
                                      filter_indices=None,
                                      seed_input=self.testX[idx],
                                      backprop_modifier=modifier)

                # create heat map to overlay on image
                jet_heat_map = np.uint8(cm.jet(grads)[..., :3] * 255)
                image = np.asarray(self.testX[idx] * 255, np.uint8)
                if constants.USE_GRAY_SCALE:
                    image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)

                if modifier is None:
                    modifier = 'vanilla'

                ax[i + 1].set_title(modifier)
                ax[i + 1].imshow(overlay(jet_heat_map, image))

            # save the plot
            plot_name = 'heat-map-{}.png'.format(constants.CATEGORIES[class_idx])
            plt.savefig(os.path.join(save_path, plot_name))
            plt.show()