Python keras.backend.learning_phase() Examples

The following are 30 code examples for showing how to use keras.backend.learning_phase(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module keras.backend , or try the search function .

Example 1
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_deep_representations(model, X, batch_size=256):
    """
    TODO
    :param model:
    :param X:
    :param batch_size:
    :return:
    """
    # last hidden layer is always at index -4
    output_dim = model.layers[-4].output.shape[-1].value
    get_encoding = K.function(
        [model.layers[0].input, K.learning_phase()],
        [model.layers[-4].output]
    )

    n_batches = int(np.ceil(X.shape[0] / float(batch_size)))
    output = np.zeros(shape=(len(X), output_dim))
    for i in range(n_batches):
        output[i * batch_size:(i + 1) * batch_size] = \
            get_encoding([X[i * batch_size:(i + 1) * batch_size], 0])[0]

    return output 
Example 2
Project: blackbox-attacks   Author: sunblaze-ucb   File: cifar10_query_based.py    License: MIT License 6 votes vote down vote up
def one_shot_method(prediction, x, curr_sample, curr_target, p_t):
    grad_est = np.zeros((BATCH_SIZE, IMAGE_ROWS, IMAGE_COLS, NUM_CHANNELS))
    DELTA = np.random.randint(2, size=(BATCH_SIZE, IMAGE_ROWS, IMAGE_COLS, NUM_CHANNELS))
    np.place(DELTA, DELTA==0, -1)

    y_plus = np.clip(curr_sample + args.delta * DELTA, CLIP_MIN, CLIP_MAX)
    y_minus = np.clip(curr_sample - args.delta * DELTA, CLIP_MIN, CLIP_MAX)

    if args.CW_loss == 0:
        pred_plus = K.get_session().run([prediction], feed_dict={x: y_plus, K.learning_phase(): 0})[0]
        pred_plus_t = pred_plus[np.arange(BATCH_SIZE), list(curr_target)]

        pred_minus = K.get_session().run([prediction], feed_dict={x: y_minus, K.learning_phase(): 0})[0]
        pred_minus_t = pred_minus[np.arange(BATCH_SIZE), list(curr_target)]

        num_est = (pred_plus_t - pred_minus_t)

    grad_est = num_est[:, None, None, None]/(args.delta * DELTA)

    # Getting gradient of the loss
    if args.CW_loss == 0:
        loss_grad = -1.0 * grad_est/p_t[:, None, None, None]

    return loss_grad 
Example 3
Project: object-detection   Author: kaka-lin   File: test_tiny_yolo.py    License: MIT License 6 votes vote down vote up
def image_detection(sess, image_path, image_file, colors):
    # Preprocess your image
    image, image_data = preprocess_image(image_path + image_file, model_image_size = (416, 416))
    
    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
    # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input:image_data, K.learning_phase():0})

    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    
    # Draw bounding boxes on the image file
    image = draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    # Save the predicted bounding box on the image
    #image.save(os.path.join("out", image_file), quality=90)
    cv2.imwrite(os.path.join("out", "tiny_yolo_" + image_file), image, [cv2.IMWRITE_JPEG_QUALITY, 90])
    
    return out_scores, out_boxes, out_classes 
Example 4
Project: object-detection   Author: kaka-lin   File: test_yolov3.py    License: MIT License 6 votes vote down vote up
def image_detection(sess, image_path, image_file, colors):
    # Preprocess your image
    image, image_data = preprocess_image(image_path + image_file, model_image_size = (416, 416))
    
    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
    # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolov3.input:image_data, K.learning_phase():0})

    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    
    # Draw bounding boxes on the image file
    image = draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    # Save the predicted bounding box on the image
    #image.save(os.path.join("out", image_file), quality=90)
    cv2.imwrite(os.path.join("out", "yolov3_" + image_file), image, [cv2.IMWRITE_JPEG_QUALITY, 90])
    
    return out_scores, out_boxes, out_classes 
Example 5
Project: detection-2016-nipsws   Author: imatge-upc   File: features.py    License: MIT License 6 votes vote down vote up
def get_feature_map_4(model, im):
    im = im.astype(np.float32)
    dim_ordering = K.image_dim_ordering()
    if dim_ordering == 'th':
        # 'RGB'->'BGR'
        im = im[::-1, :, :]
        # Zero-center by mean pixel
        im[0, :, :] -= 103.939
        im[1, :, :] -= 116.779
        im[2, :, :] -= 123.68
    else:
        # 'RGB'->'BGR'
        im = im[:, :, ::-1]
        # Zero-center by mean pixel
        im[:, :, 0] -= 103.939
        im[:, :, 1] -= 116.779
        im[:, :, 2] -= 123.68
    im = im.transpose((2, 0, 1))
    im = np.expand_dims(im, axis=0)
    inputs = [K.learning_phase()] + model.inputs
    _convout1_f = K.function(inputs, [model.layers[23].output])
    feature_map = _convout1_f([0] + [im])
    feature_map = np.array([feature_map])
    feature_map = feature_map[0, 0, 0, :, :, :]
    return feature_map 
Example 6
Project: detection-2016-nipsws   Author: imatge-upc   File: features.py    License: MIT License 6 votes vote down vote up
def get_image_descriptor_for_image(image, model):
    im = cv2.resize(image, (224, 224)).astype(np.float32)
    dim_ordering = K.image_dim_ordering()
    if dim_ordering == 'th':
        # 'RGB'->'BGR'
        im = im[::-1, :, :]
        # Zero-center by mean pixel
        im[0, :, :] -= 103.939
        im[1, :, :] -= 116.779
        im[2, :, :] -= 123.68
    else:
        # 'RGB'->'BGR'
        im = im[:, :, ::-1]
        # Zero-center by mean pixel
        im[:, :, 0] -= 103.939
        im[:, :, 1] -= 116.779
        im[:, :, 2] -= 123.68
    im = im.transpose((2, 0, 1))
    im = np.expand_dims(im, axis=0)
    inputs = [K.learning_phase()] + model.inputs
    _convout1_f = K.function(inputs, [model.layers[33].output])
    return _convout1_f([0] + [im]) 
Example 7
Project: detection-2016-nipsws   Author: imatge-upc   File: features.py    License: MIT License 6 votes vote down vote up
def get_conv_image_descriptor_for_image(image, model):
    im = cv2.resize(image, (224, 224)).astype(np.float32)
    dim_ordering = K.image_dim_ordering()
    if dim_ordering == 'th':
        # 'RGB'->'BGR'
        im = im[::-1, :, :]
        # Zero-center by mean pixel
        im[0, :, :] -= 103.939
        im[1, :, :] -= 116.779
        im[2, :, :] -= 123.68
    else:
        # 'RGB'->'BGR'
        im = im[:, :, ::-1]
        # Zero-center by mean pixel
        im[:, :, 0] -= 103.939
        im[:, :, 1] -= 116.779
        im[:, :, 2] -= 123.68
    im = im.transpose((2, 0, 1))
    im = np.expand_dims(im, axis=0)
    inputs = [K.learning_phase()] + model.inputs
    _convout1_f = K.function(inputs, [model.layers[31].output])
    return _convout1_f([0] + [im]) 
Example 8
Project: lid_adversarial_subspace_detection   Author: xingjunm   File: util.py    License: MIT License 6 votes vote down vote up
def get_deep_representations(model, X, batch_size=256):
    """
    TODO
    :param model:
    :param X:
    :param batch_size:
    :return:
    """
    # last hidden layer is always at index -4
    output_dim = model.layers[-4].output.shape[-1].value
    get_encoding = K.function(
        [model.layers[0].input, K.learning_phase()],
        [model.layers[-4].output]
    )

    n_batches = int(np.ceil(X.shape[0] / float(batch_size)))
    output = np.zeros(shape=(len(X), output_dim))
    for i in range(n_batches):
        output[i * batch_size:(i + 1) * batch_size] = \
            get_encoding([X[i * batch_size:(i + 1) * batch_size], 0])[0]

    return output 
Example 9
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    License: BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_mc_predictions(model, X, nb_iter=50, batch_size=256):
    """
    TODO
    :param model:
    :param X:
    :param nb_iter:
    :param batch_size:
    :return:
    """
    output_dim = model.layers[-1].output.shape[-1].value
    get_output = K.function(
        [model.layers[0].input, K.learning_phase()],
        [model.layers[-1].output]
    )

    def predict():
        n_batches = int(np.ceil(X.shape[0] / float(batch_size)))
        output = np.zeros(shape=(len(X), output_dim))
        for i in range(n_batches):
            output[i * batch_size:(i + 1) * batch_size] = \
                get_output([X[i * batch_size:(i + 1) * batch_size], 1])[0]
        return output

    preds_mc = []
    for i in tqdm(range(nb_iter)):
        preds_mc.append(predict())

    return np.asarray(preds_mc) 
Example 10
Project: gandlf   Author: codekansas   File: models.py    License: MIT License 5 votes vote down vote up
def _get_learning_phase(self):
        if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
            return [K.learning_phase()]
        else:
            return [] 
Example 11
Project: tartarus   Author: sergiooramas   File: predict.py    License: MIT License 5 votes vote down vote up
def get_activations(model, layer_idx, X_batch):
    get_activations = K.function([model.layers[0].input, K.learning_phase()], [model.layers[layer_idx].output,])
    activations = get_activations([X_batch,0])
    return activations 
Example 12
Project: blackbox-attacks   Author: sunblaze-ucb   File: particle_swarm_attack.py    License: MIT License 5 votes vote down vote up
def loss(X):
    X = X.reshape((1, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS))
    confidence = K.get_session().run([prediction], feed_dict={x: X, K.learning_phase(): 0})[0]
    # confidence[:,curr_target] = 1e-4
    max_conf_i = np.argmax(confidence, 1)
    max_conf = np.max(confidence, 1)[0]
    if max_conf_i == curr_target:
        return max_conf
    elif max_conf_i != curr_target:
        return -1.0 * max_conf 
Example 13
Project: blackbox-attacks   Author: sunblaze-ucb   File: particle_swarm_attack.py    License: MIT License 5 votes vote down vote up
def logit_loss(X):
    X = X.reshape((1, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS))
    confidence = K.get_session().run([prediction], feed_dict={x: X, K.learning_phase(): 0})[0]
    # confidence[:,curr_target] = 1e-4
    logits = np.log(confidence)

    logit_t = logits[:, curr_target]
    logits[:, curr_target] = 1e-4
    max_logit_i = np.argmax(logits, 1)
    logit_max = logits[:, max_logit_i]
    return logit_t - logit_max 
Example 14
Project: Document-Classifier-LSTM   Author: AlexGidiotis   File: utils.py    License: MIT License 5 votes vote down vote up
def visualize_attention(test_seq,
    model,
    id2wrd,
    n):
    """
    Visualize the top n words that the model pays attention to. 
    We first do a forward pass and get the output of the LSTM layer.
    THen we apply the function of the Attention layer and get the weights.
    Finally we obtain and print the words of the input sequence 
    that have these weights.


    """

    get_layer_output = K.function([model.layers[0].input, K.learning_phase()], [model.layers[4].output])
    out = get_layer_output([test_seq, ])[0]  # test mode

    att_w = model.layers[5].get_weights()

    eij = np.tanh(np.dot(out[0], att_w[0]))
    ai = np.exp(eij)
    weights = ai/np.sum(ai)
    weights = np.sum(weights,axis=1)

    topKeys = np.argpartition(weights,-n)[-n:]

    print ' '.join([id2wrd[wrd_id] for wrd_id in test_seq[0] if wrd_id != 0.]) 
    
    for k in test_seq[0][topKeys]:
        if k != 0.:
            print id2wrd[k]
    
    return 
Example 15
Project: keras-vgg-buddy   Author: awentzonline   File: models.py    License: MIT License 5 votes vote down vote up
def __init__(self, num_rows, num_cols, weights_path='vgg16_weights.h5',
            pool_mode='avg', last_layer='conv5_1', learning_phase=None):
        self.learning_phase = learning_phase
        self.last_layer = last_layer
        self.net = get_model(num_rows, num_cols, weights_path=weights_path,
            pool_mode=pool_mode, last_layer=last_layer)
        self.net_input = self.net.get_layer('vgg_input')
        self._f_layer_outputs = {} 
Example 16
Project: keras-vgg-buddy   Author: awentzonline   File: models.py    License: MIT License 5 votes vote down vote up
def get_f_layer(self, layer_name):
        '''Create a function for the response of a layer.'''
        inputs = [self.net_input]
        if self.learning_phase is not None:
            inputs.append(K.learning_phase())
        return K.function(inputs, [self.get_layer_output(layer_name)]) 
Example 17
Project: keras-vgg-buddy   Author: awentzonline   File: models.py    License: MIT License 5 votes vote down vote up
def get_features(self, x, layers):
        '''Evaluate layer outputs for `x`'''
        if not layers:
            return None
        inputs = [self.net.input]
        if self.learning_phase is not None:
            inputs.append(self.learning_phase)
        f = K.function(inputs, [self.get_layer_output(layer_name) for layer_name in layers])
        feature_outputs = f([x])
        features = dict(zip(layers, feature_outputs))
        return features 
Example 18
Project: face_classification   Author: oarriaga   File: grad_cam.py    License: MIT License 5 votes vote down vote up
def compile_saliency_function(model, activation_layer='conv2d_7'):
    input_image = model.input
    layer_output = model.get_layer(activation_layer).output
    max_output = K.max(layer_output, axis=3)
    saliency = K.gradients(K.sum(max_output), input_image)[0]
    return K.function([input_image, K.learning_phase()], [saliency]) 
Example 19
Project: face_classification   Author: oarriaga   File: grad_cam.py    License: MIT License 5 votes vote down vote up
def compile_gradient_function(input_model, category_index, layer_name):
    model = Sequential()
    model.add(input_model)

    num_classes = model.output_shape[1]
    target_layer = lambda x: target_category_loss(x, category_index, num_classes)
    model.add(Lambda(target_layer,
                     output_shape=target_category_loss_output_shape))

    loss = K.sum(model.layers[-1].output)
    conv_output = model.layers[0].get_layer(layer_name).output
    gradients = normalize(K.gradients(loss, conv_output)[0])
    gradient_function = K.function([model.layers[0].input, K.learning_phase()],
                                   [conv_output, gradients])
    return gradient_function 
Example 20
Project: object-detection   Author: kaka-lin   File: test_tiny_yolo.py    License: MIT License 5 votes vote down vote up
def video_detection(sess, image, colors):
    resized_image = cv2.resize(image, (416, 416), interpolation=cv2.INTER_AREA)
    resized_image = cv2.cvtColor(resized_image, cv2.COLOR_BGR2RGB)
    image_data = np.array(resized_image, dtype='float32')
    image_data /= 255.
    image_data = np.expand_dims(image_data, 0)

    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input:image_data, K.learning_phase():0})

    image = draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    return image 
Example 21
Project: object-detection   Author: kaka-lin   File: test_yolov3.py    License: MIT License 5 votes vote down vote up
def video_detection(sess, image, colors):
    resized_image = cv2.resize(image, (416, 416), interpolation=cv2.INTER_AREA)
    resized_image = cv2.cvtColor(resized_image, cv2.COLOR_BGR2RGB)
    image_data = np.array(resized_image, dtype='float32')
    image_data /= 255.
    image_data = np.expand_dims(image_data, 0)

    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolov3.input:image_data, K.learning_phase():0})

    image = draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    return image 
Example 22
Project: Emotion   Author: petercunha   File: grad_cam.py    License: MIT License 5 votes vote down vote up
def compile_saliency_function(model, activation_layer='conv2d_7'):
    input_image = model.input
    layer_output = model.get_layer(activation_layer).output
    max_output = K.max(layer_output, axis=3)
    saliency = K.gradients(K.sum(max_output), input_image)[0]
    return K.function([input_image, K.learning_phase()], [saliency]) 
Example 23
Project: Emotion   Author: petercunha   File: grad_cam.py    License: MIT License 5 votes vote down vote up
def compile_gradient_function(input_model, category_index, layer_name):
    model = Sequential()
    model.add(input_model)

    num_classes = model.output_shape[1]
    target_layer = lambda x: target_category_loss(x, category_index, num_classes)
    model.add(Lambda(target_layer,
                     output_shape = target_category_loss_output_shape))

    loss = K.sum(model.layers[-1].output)
    conv_output = model.layers[0].get_layer(layer_name).output
    gradients = normalize(K.gradients(loss, conv_output)[0])
    gradient_function = K.function([model.layers[0].input, K.learning_phase()],
                                                    [conv_output, gradients])
    return gradient_function 
Example 24
Project: deeplift   Author: kundajelab   File: test_conv2d_model_channels_first.py    License: MIT License 5 votes vote down vote up
def setUp(self):
        self.inp = (np.random.randn(10*10*51*51)
                    .reshape(10,10,51,51))
        self.keras_model = keras.models.Sequential()
        conv_layer = keras.layers.convolutional.Conv2D(
                        filters=2, kernel_size=(4,4), strides=(2,2),
                        activation="relu", input_shape=(10,51,51),
                        data_format="channels_first")
        self.keras_model.add(conv_layer)
        self.keras_model.add(keras.layers.pooling.MaxPooling2D(
                             pool_size=(4,4), strides=(2,2),
                             data_format="channels_first")) 
        self.keras_model.add(keras.layers.pooling.AveragePooling2D(
                             pool_size=(4,4), strides=(2,2),
                             data_format="channels_first")) 
        self.keras_model.add(keras.layers.Flatten())
        self.keras_model.add(keras.layers.Dense(output_dim=1))
        self.keras_model.add(keras.layers.core.Activation("sigmoid"))
        self.keras_model.compile(loss="mse", optimizer="sgd")
        self.keras_output_fprop_func = compile_func(
                        [self.keras_model.layers[0].input,
                         K.learning_phase()],
                        self.keras_model.layers[-1].output)

        grad = tf.gradients(tf.reduce_sum(
            self.keras_model.layers[-2].output[:,0]),
            [self.keras_model.layers[0].input])[0]
        self.grad_func = compile_func(
            [self.keras_model.layers[0].input,
             K.learning_phase()], grad)

        self.saved_file_path = "conv2model_channelsfirst.h5"
        if (os.path.isfile(self.saved_file_path)):
            os.remove(self.saved_file_path)
        self.keras_model.save(self.saved_file_path) 
Example 25
Project: deeplift   Author: kundajelab   File: test_conv2d_model_same_padding.py    License: MIT License 5 votes vote down vote up
def setUp(self):
        self.inp = (np.random.randn(10*10*51*51)
                    .reshape(10,10,51,51)).transpose(0,2,3,1)
        self.keras_model = keras.models.Sequential()
        conv_layer1 = keras.layers.convolutional.Convolution2D(
                        nb_filter=20, nb_row=4, nb_col=4, subsample=(2,2),
                        activation="relu",
                        padding='same',
                        input_shape=(51,51,10))
        self.keras_model.add(conv_layer1)
        conv_layer2 = keras.layers.convolutional.Convolution2D(
                        nb_filter=10, nb_row=4, nb_col=4, subsample=(2,2),
                        activation="relu",
                        padding='same')
        self.keras_model.add(conv_layer2)
        self.keras_model.add(keras.layers.pooling.MaxPooling2D(
                             pool_size=(4,4), strides=(2,2),
                             padding='same')) 
        self.keras_model.add(keras.layers.pooling.AveragePooling2D(
                             pool_size=(4,4), strides=(2,2),
                             padding='same')) 
        self.keras_model.add(keras.layers.Flatten())
        self.keras_model.add(keras.layers.Dense(output_dim=1))
        self.keras_model.add(keras.layers.core.Activation("sigmoid"))
        self.keras_model.compile(loss="mse", optimizer="sgd")
        self.keras_output_fprop_func = compile_func(
                        [self.keras_model.layers[0].input,
                         K.learning_phase()],
                        self.keras_model.layers[-1].output)

        grad = tf.gradients(tf.reduce_sum(
            self.keras_model.layers[-2].output[:,0]),
            [self.keras_model.layers[0].input])[0]
        self.grad_func = compile_func(
            [self.keras_model.layers[0].input,
             K.learning_phase()], grad)

        self.saved_file_path = "conv2model_samepadding.h5"
        if (os.path.isfile(self.saved_file_path)):
            os.remove(self.saved_file_path)
        self.keras_model.save(self.saved_file_path) 
Example 26
Project: deeplift   Author: kundajelab   File: test_conv1d_model_valid_padding.py    License: MIT License 5 votes vote down vote up
def setUp(self):
        self.inp = (np.random.randn(10*10*51)
                    .reshape(10,10,51)).transpose(0,2,1)
        self.keras_model = keras.models.Sequential()
        self.keras_model.add(keras.layers.InputLayer((51,10)))
        conv_layer = keras.layers.convolutional.Convolution1D(
                        nb_filter=2, filter_length=4, subsample_length=2,
                        activation="relu", input_shape=(51,10))
        self.keras_model.add(conv_layer)
        self.keras_model.add(keras.layers.pooling.MaxPooling1D(
                             pool_length=4, stride=2)) 
        self.keras_model.add(keras.layers.pooling.AveragePooling1D(
                             pool_length=4, stride=2)) 
        self.keras_model.add(keras.layers.Flatten())
        self.keras_model.add(keras.layers.Dense(output_dim=1))
        self.keras_model.add(keras.layers.core.Activation("sigmoid"))
        self.keras_model.compile(loss="mse", optimizer="sgd")
        self.keras_output_fprop_func = compile_func(
                        [self.keras_model.layers[0].input,
                         K.learning_phase()],
                        self.keras_model.layers[-1].output)

        grad = tf.gradients(tf.reduce_sum(
            self.keras_model.layers[-2].output[:,0]),
            [self.keras_model.layers[0].input])[0]
        self.grad_func = compile_func(
            [self.keras_model.layers[0].input,
             K.learning_phase()], grad) 

        self.saved_file_path = "conv1model_validpadding.h5"
        if (os.path.isfile(self.saved_file_path)):
            os.remove(self.saved_file_path)
        self.keras_model.save(self.saved_file_path) 
Example 27
Project: deeplift   Author: kundajelab   File: test_conv2d_model_valid_padding.py    License: MIT License 5 votes vote down vote up
def setUp(self):
        self.inp = (np.random.randn(10*10*51*51)
                    .reshape(10,10,51,51)).transpose(0,2,3,1)
        self.keras_model = keras.models.Sequential()
        conv_layer = keras.layers.convolutional.Convolution2D(
                        nb_filter=2, nb_row=4, nb_col=4, subsample=(2,2),
                        activation="relu", input_shape=(51,51,10))
        self.keras_model.add(conv_layer)
        self.keras_model.add(keras.layers.pooling.MaxPooling2D(
                             pool_size=(4,4), strides=(2,2))) 
        self.keras_model.add(keras.layers.pooling.AveragePooling2D(
                             pool_size=(4,4), strides=(2,2))) 
        self.keras_model.add(keras.layers.Flatten())
        self.keras_model.add(keras.layers.Dense(output_dim=1))
        self.keras_model.add(keras.layers.core.Activation("sigmoid"))
        self.keras_model.compile(loss="mse", optimizer="sgd")
        self.keras_output_fprop_func = compile_func(
                        [self.keras_model.layers[0].input,
                         K.learning_phase()],
                        self.keras_model.layers[-1].output)

        grad = tf.gradients(tf.reduce_sum(
            self.keras_model.layers[-2].output[:,0]),
            [self.keras_model.layers[0].input])[0]
        self.grad_func = compile_func(
            [self.keras_model.layers[0].input,
             K.learning_phase()], grad)

        self.saved_file_path = "conv2model_validpadding.h5"
        if (os.path.isfile(self.saved_file_path)):
            os.remove(self.saved_file_path)
        self.keras_model.save(self.saved_file_path) 
Example 28
def _get_embeddings_model(self):
        last_layer_output = K.function([self.model.layers[0].input,
                                        K.learning_phase()],
                                       [self.model.layers[-3].output])
        return lambda x: last_layer_output([x, 0])[0] 
Example 29
Project: Face-and-Emotion-Recognition   Author: vjgpt   File: grad_cam.py    License: MIT License 5 votes vote down vote up
def compile_saliency_function(model, activation_layer='conv2d_7'):
    input_image = model.input
    layer_output = model.get_layer(activation_layer).output
    max_output = K.max(layer_output, axis=3)
    saliency = K.gradients(K.sum(max_output), input_image)[0]
    return K.function([input_image, K.learning_phase()], [saliency]) 
Example 30
Project: Face-and-Emotion-Recognition   Author: vjgpt   File: grad_cam.py    License: MIT License 5 votes vote down vote up
def compile_gradient_function(input_model, category_index, layer_name):
    model = Sequential()
    model.add(input_model)

    num_classes = model.output_shape[1]
    target_layer = lambda x: target_category_loss(x, category_index, num_classes)
    model.add(Lambda(target_layer,
                     output_shape = target_category_loss_output_shape))

    loss = K.sum(model.layers[-1].output)
    conv_output = model.layers[0].get_layer(layer_name).output
    gradients = normalize(K.gradients(loss, conv_output)[0])
    gradient_function = K.function([model.layers[0].input, K.learning_phase()],
                                                    [conv_output, gradients])
    return gradient_function