Python keras.backend.learning_phase() Examples

The following are code examples for showing how to use keras.backend.learning_phase(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_deep_representations(model, X, batch_size=256):
    """
    TODO
    :param model:
    :param X:
    :param batch_size:
    :return:
    """
    # last hidden layer is always at index -4
    output_dim = model.layers[-4].output.shape[-1].value
    get_encoding = K.function(
        [model.layers[0].input, K.learning_phase()],
        [model.layers[-4].output]
    )

    n_batches = int(np.ceil(X.shape[0] / float(batch_size)))
    output = np.zeros(shape=(len(X), output_dim))
    for i in range(n_batches):
        output[i * batch_size:(i + 1) * batch_size] = \
            get_encoding([X[i * batch_size:(i + 1) * batch_size], 0])[0]

    return output 
Example 2
Project: blackbox-attacks   Author: sunblaze-ucb   File: cifar10_query_based.py    MIT License 6 votes vote down vote up
def one_shot_method(prediction, x, curr_sample, curr_target, p_t):
    grad_est = np.zeros((BATCH_SIZE, IMAGE_ROWS, IMAGE_COLS, NUM_CHANNELS))
    DELTA = np.random.randint(2, size=(BATCH_SIZE, IMAGE_ROWS, IMAGE_COLS, NUM_CHANNELS))
    np.place(DELTA, DELTA==0, -1)

    y_plus = np.clip(curr_sample + args.delta * DELTA, CLIP_MIN, CLIP_MAX)
    y_minus = np.clip(curr_sample - args.delta * DELTA, CLIP_MIN, CLIP_MAX)

    if args.CW_loss == 0:
        pred_plus = K.get_session().run([prediction], feed_dict={x: y_plus, K.learning_phase(): 0})[0]
        pred_plus_t = pred_plus[np.arange(BATCH_SIZE), list(curr_target)]

        pred_minus = K.get_session().run([prediction], feed_dict={x: y_minus, K.learning_phase(): 0})[0]
        pred_minus_t = pred_minus[np.arange(BATCH_SIZE), list(curr_target)]

        num_est = (pred_plus_t - pred_minus_t)

    grad_est = num_est[:, None, None, None]/(args.delta * DELTA)

    # Getting gradient of the loss
    if args.CW_loss == 0:
        loss_grad = -1.0 * grad_est/p_t[:, None, None, None]

    return loss_grad 
Example 3
Project: object-detection   Author: kaka-lin   File: test_tiny_yolo.py    MIT License 6 votes vote down vote up
def image_detection(sess, image_path, image_file, colors):
    # Preprocess your image
    image, image_data = preprocess_image(image_path + image_file, model_image_size = (416, 416))
    
    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
    # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input:image_data, K.learning_phase():0})

    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    
    # Draw bounding boxes on the image file
    image = draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    # Save the predicted bounding box on the image
    #image.save(os.path.join("out", image_file), quality=90)
    cv2.imwrite(os.path.join("out", "tiny_yolo_" + image_file), image, [cv2.IMWRITE_JPEG_QUALITY, 90])
    
    return out_scores, out_boxes, out_classes 
Example 4
Project: object-detection   Author: kaka-lin   File: test_yolov3.py    MIT License 6 votes vote down vote up
def image_detection(sess, image_path, image_file, colors):
    # Preprocess your image
    image, image_data = preprocess_image(image_path + image_file, model_image_size = (416, 416))
    
    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
    # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolov3.input:image_data, K.learning_phase():0})

    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    
    # Draw bounding boxes on the image file
    image = draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    # Save the predicted bounding box on the image
    #image.save(os.path.join("out", image_file), quality=90)
    cv2.imwrite(os.path.join("out", "yolov3_" + image_file), image, [cv2.IMWRITE_JPEG_QUALITY, 90])
    
    return out_scores, out_boxes, out_classes 
Example 5
Project: MCTN   Author: hainow   File: utils.py    GNU General Public License v2.0 6 votes vote down vote up
def on_epoch_end(self, epoch, logs={}):
    if epoch % 10 == 0 and epoch > 0:
      input_train, output_train = self.train_data
      input_test, output_test = self.test_data

      get_encoder_output = K.function(
        [self.model.layers[0].input, K.learning_phase()],
        [self.model.layers[self.idx].output])

      # test flag is 0
      train_encoder_output = get_encoder_output([input_train, 0])[0]
      test_encoder_output = get_encoder_output([input_test, 0])[0]
      print("Train encoder output: {}".format(train_encoder_output.shape))
      print("Test encoder output: {}".format(test_encoder_output.shape))

      # dumping to disk
      dump_obj(train_encoder_output, "{}_train_encoder_{}_{}_{}".
               format(self.description, self.infeat, self.outfeat, epoch))
      dump_obj(test_encoder_output, "{}_test_encoder_{}_{}_{}".
               format(self.description, self.infeat, self.outfeat, epoch))
      if epoch == 10:
        dump_obj(output_train, "y_train")
        dump_obj(output_test, "y_test")
      print("\tALL SAVED TO DISK") 
Example 6
Project: auckland-ai-meetup-x-triage   Author: a-i-joe   File: visualizations.py    MIT License 6 votes vote down vote up
def get_gradcam(image,model,layer_name,mode):
    layer = model.get_layer(layer_name)
    image = np.expand_dims(image,0)
    loss = K.variable(0.)
    if mode == "abnormal":
        loss += K.sum(model.output)
    elif mode == "normal":
        loss += K.sum(1 - model.output)
    else:
        raise ValueError("mode must be normal or abnormal")
    #gradients of prediction wrt the conv layer of choice are used
    upstream_grads = K.gradients(loss,layer.output)[0]
    feature_weights = K.mean(upstream_grads,axis=[1,2]) #spatial global avg pool
    heatmap = K.relu(K.dot(layer.output, K.transpose(feature_weights)))
    fetch_heatmap = K.function([model.input, K.learning_phase()], [heatmap])
    return fetch_heatmap([image,0])[0] 
Example 7
Project: diktya   Author: BioroboticsLab   File: test_regularizers.py    Apache License 2.0 6 votes vote down vote up
def test_weight_orth_regularizer():
    reg = WeightOrthRegularizer(weight=1.)
    loss = K.variable(0.)

    normal_filters = K.random_normal((32, 3, 3))
    uniform_filters = K.random_uniform((32, 3, 3))

    reg.set_param(normal_filters)
    loss_function = K.function([K.learning_phase()], reg(loss))
    normal_loss = loss_function((1,))

    reg.set_param(uniform_filters)
    loss_function = K.function([K.learning_phase()], reg(loss))
    uniform_loss = loss_function((1,))

    assert(normal_loss < uniform_loss) 
Example 8
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_mc_predictions(model, X, nb_iter=50, batch_size=256):
    """
    TODO
    :param model:
    :param X:
    :param nb_iter:
    :param batch_size:
    :return:
    """
    output_dim = model.layers[-1].output.shape[-1].value
    get_output = K.function(
        [model.layers[0].input, K.learning_phase()],
        [model.layers[-1].output]
    )

    def predict():
        n_batches = int(np.ceil(X.shape[0] / float(batch_size)))
        output = np.zeros(shape=(len(X), output_dim))
        for i in range(n_batches):
            output[i * batch_size:(i + 1) * batch_size] = \
                get_output([X[i * batch_size:(i + 1) * batch_size], 1])[0]
        return output

    preds_mc = []
    for i in tqdm(range(nb_iter)):
        preds_mc.append(predict())

    return np.asarray(preds_mc) 
Example 9
Project: cdc   Author: ckbjimmy   File: EmbCRNN.py    MIT License 5 votes vote down vote up
def get_activations(model, layer, X_batch):
    get_activations = K.function([model.layers[0].input, K.learning_phase()], [model.layers[layer].output])
    activations = get_activations([X_batch, 0])
    return activations 
Example 10
Project: cdc   Author: ckbjimmy   File: EmbGRUattention.py    MIT License 5 votes vote down vote up
def get_activations(model, layer, X_batch):
    get_activations = K.function([model.layers[0].input, K.learning_phase()], [model.layers[layer].output])
    activations = get_activations([X_batch, 0])
    return activations 
Example 11
Project: gandlf   Author: codekansas   File: models.py    MIT License 5 votes vote down vote up
def _get_learning_phase(self):
        if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
            return [K.learning_phase()]
        else:
            return [] 
Example 12
Project: design_embeddings_jmd_2016   Author: IDEALLab   File: stacked_ae.py    MIT License 5 votes vote down vote up
def train_ae(data, feature_dim, hidden_sizes, l, p=0, batch_size=100, activation='tanh', 
             activity_regularizer=None, weights=None, nb_epoch=1000, loss='mse', verbose=False):

    data_dim = data.shape[1]
    inputs = Input(shape=(data_dim,))
    
    sizes = [data_dim] + hidden_sizes + [feature_dim]
    n_layers = len(sizes) - 1
    
    # Encoder
    x = noise.GaussianDropout(p)(inputs)
    for i in range(n_layers):
        x = Dense(sizes[i+1], activation=activation, W_regularizer=l2(l))(x)
    
    # Decoder
    for i in range(n_layers):
        x = Dense(sizes[-i-2], activation=activation, W_regularizer=l2(l))(x)
    decoded = x
    
    model = Model(input=inputs, output=decoded)
    
    if weights is not None:
        model.set_weights(weights)
        
#    optimizer = Adagrad(lr=lr, epsilon=epsilon)
    optimizer = Adam()
    model.compile(loss=loss, optimizer=optimizer)
#    early_stopping = MyEarlyStopping(monitor='loss', patience=10, verbose=verbose, tol=1e-6)
    model.fit(data, data, batch_size=batch_size, nb_epoch=nb_epoch, verbose=verbose)#, callbacks=[early_stopping])
    
    if n_layers == 1:
        W_en = model.layers[-2].get_weights()
        W_de = model.layers[-1].get_weights()
    else:
        W_en = None
        W_de = None
        
    encode = K.function([model.layers[0].input, K.learning_phase()], [model.layers[-2].output])
    a = encode([data, 0])[0] # hidden layer's activation
    
    return a, W_en, W_de, model 
Example 13
Project: isl-gaze-demo   Author: djpetti   File: keras_utils.py    MIT License 5 votes vote down vote up
def fuse_loaders(train_outputs, test_outputs):
  """ Keras is capable of loading data and labels from custom tensors. However,
  it does not support toggling between training and testing inputs. Therefore,
  this method uses a conditional operation to select between the two loaders.
  Args:
    train_outputs: The output tensors from the training data loader.
    test_outputs: The output tensors from the testing data loader.
  Returnes:
    The output tensors. """
  is_train = K.learning_phase()
  return tf.cond(is_train, lambda: train_outputs, lambda: test_outputs) 
Example 14
Project: blackbox-attacks   Author: sunblaze-ucb   File: particle_swarm_attack.py    MIT License 5 votes vote down vote up
def loss(X):
    X = X.reshape((1, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS))
    confidence = K.get_session().run([prediction], feed_dict={x: X, K.learning_phase(): 0})[0]
    # confidence[:,curr_target] = 1e-4
    max_conf_i = np.argmax(confidence, 1)
    max_conf = np.max(confidence, 1)[0]
    if max_conf_i == curr_target:
        return max_conf
    elif max_conf_i != curr_target:
        return -1.0 * max_conf 
Example 15
Project: blackbox-attacks   Author: sunblaze-ucb   File: particle_swarm_attack.py    MIT License 5 votes vote down vote up
def logit_loss(X):
    X = X.reshape((1, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS))
    confidence = K.get_session().run([prediction], feed_dict={x: X, K.learning_phase(): 0})[0]
    # confidence[:,curr_target] = 1e-4
    logits = np.log(confidence)

    logit_t = logits[:, curr_target]
    logits[:, curr_target] = 1e-4
    max_logit_i = np.argmax(logits, 1)
    logit_max = logits[:, max_logit_i]
    return logit_t - logit_max 
Example 16
Project: Keras_MedicalImgAI   Author: taoyilee   File: grad_cam.py    MIT License 5 votes vote down vote up
def compile_saliency_function(model, activation_layer='block5_conv3'):
    input_img = model.input
    layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
    layer_output = layer_dict[activation_layer].output
    max_output = K.max(layer_output, axis=3)
    saliency = K.gradients(K.sum(max_output), input_img)[0]
    return K.function([input_img, K.learning_phase()], [saliency]) 
Example 17
Project: Keras_MedicalImgAI   Author: taoyilee   File: grad_cam.py    MIT License 5 votes vote down vote up
def grad_cam(input_model, model_x, orig_x, category_index, layer_name, class_names):
    output = input_model.output

    final_layer = Lambda(lambda x: target_category_loss(x, category_index, len(class_names)))
    output = final_layer(output)
    model = Model(inputs=input_model.input, outputs=output)
    loss = K.sum(model.layers[-1].output)
    conv_output = model.get_layer(layer_name).output
    grads = normalize(K.gradients(loss, conv_output)[0])
    gradient_function = K.function([model.layers[0].input, K.learning_phase()], [conv_output, grads])

    output, grads_val = gradient_function([model_x, 0])
    output, grads_val = output[0, :], grads_val[0, :, :, :]

    weights = np.mean(grads_val, axis=(0, 1))
    cam = np.zeros(output.shape[0: 2], dtype=np.float32)

    for i, w in enumerate(weights):
        cam += w * output[:, :, i]

    cam = np.maximum(cam, np.zeros(output.shape[0: 2], dtype=np.float32))
    cam = cam.squeeze()
    cam = cv2.applyColorMap(np.uint8(255 * cam / np.max(cam)), cv2.COLORMAP_JET)
    cam = cv2.resize(cam, (np.shape(orig_x)[0], np.shape(orig_x)[1]))
    cam = 0.4 * cam + 0.6 * orig_x
    return np.uint8(cam) 
Example 18
Project: lipnet   Author: osalinasv   File: lipnet.py    Apache License 2.0 5 votes vote down vote up
def capture_softmax_output(self):
		return k.function([self.input_layer, k.learning_phase()], [self.y_pred]) 
Example 19
Project: Visual-Question-Answering   Author: Hynix07   File: question_answer.py    MIT License 5 votes vote down vote up
def extract_image_features(img_path):
	model = models.VGG_16('weights/vgg16_weights_th_dim_ordering_th_kernels.h5')
	img = image.load_img(img_path,target_size=(224,224))
	x = image.img_to_array(img)
	x = np.expand_dims(x,axis=0)
	x = preprocess_input(x)
	last_layer_output = K.function([model.layers[0].input,K.learning_phase()],
		[model.layers[-1].input])
	features = last_layer_output([x,0])[0]
	return features 
Example 20
Project: Document-Classifier-LSTM   Author: AlexGidiotis   File: utils.py    MIT License 5 votes vote down vote up
def visualize_attention(test_seq,
    model,
    id2wrd,
    n):
    """
    Visualize the top n words that the model pays attention to. 
    We first do a forward pass and get the output of the LSTM layer.
    THen we apply the function of the Attention layer and get the weights.
    Finally we obtain and print the words of the input sequence 
    that have these weights.


    """

    get_layer_output = K.function([model.layers[0].input, K.learning_phase()], [model.layers[4].output])
    out = get_layer_output([test_seq, ])[0]  # test mode

    att_w = model.layers[5].get_weights()

    eij = np.tanh(np.dot(out[0], att_w[0]))
    ai = np.exp(eij)
    weights = ai/np.sum(ai)
    weights = np.sum(weights,axis=1)

    topKeys = np.argpartition(weights,-n)[-n:]

    print ' '.join([id2wrd[wrd_id] for wrd_id in test_seq[0] if wrd_id != 0.]) 
    
    for k in test_seq[0][topKeys]:
        if k != 0.:
            print id2wrd[k]
    
    return 
Example 21
Project: dialectal_arabic_segmenter   Author: qcri   File: callbacks.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}

        if self.model.validation_data and self.histogram_freq:
            if epoch % self.histogram_freq == 0:
                # TODO: implement batched calls to sess.run
                # (current call will likely go OOM on GPU)
                if self.model.uses_learning_phase:
                    cut_v_data = len(self.model.inputs)
                    val_data = self.model.validation_data[:cut_v_data] + [0]
                    tensors = self.model.inputs + [K.learning_phase()]
                else:
                    val_data = self.model.validation_data
                    tensors = self.model.inputs
                feed_dict = dict(zip(tensors, val_data))
                result = self.sess.run([self.merged], feed_dict=feed_dict)
                summary_str = result[0]
                self.writer.add_summary(summary_str, epoch)

        for name, value in logs.items():
            if name in ['batch', 'size']:
                continue
            summary = tf.Summary()
            summary_value = summary.value.add()
            summary_value.simple_value = value.item()
            summary_value.tag = name
            self.writer.add_summary(summary, epoch)
        self.writer.flush() 
Example 22
Project: gccaps   Author: tqbl   File: capsnet.py    MIT License 5 votes vote down vote up
def gccaps_predict(x, model, batch_size=32):
    """Generate output predictions for the given input examples.

    Args:
        x (np.ndarray): Array of input examples.
        model: Keras model of GCCaps architecture.
        batch_size (int): Number of examples in a mini-batch.

    Returns:
        tuple: A tuple containing the audio tagging predictions and
        SED predictions.
    """

    # Compute audio tagging predictions
    at_preds = model.predict(x, batch_size=batch_size)

    # Prepare for sound event detection
    input_tensor = model.get_layer('input_tensor').input
    capsule_output = model.get_layer('capsule_length').output
    func = K.function([input_tensor, K.learning_phase()], [capsule_output])

    # Compute sound event detection predictions
    n_steps = int(np.ceil(len(x) / batch_size))
    sed_preds = [func([x[batch_size*i:batch_size*(i+1)]])[0]
                 for i in range(n_steps)]
    # Transpose so that final dimension is the time axis
    sed_preds = np.transpose(np.concatenate(sed_preds), (0, 2, 1))

    return at_preds, sed_preds 
Example 23
Project: face_classification   Author: oarriaga   File: grad_cam.py    MIT License 5 votes vote down vote up
def compile_saliency_function(model, activation_layer='conv2d_7'):
    input_image = model.input
    layer_output = model.get_layer(activation_layer).output
    max_output = K.max(layer_output, axis=3)
    saliency = K.gradients(K.sum(max_output), input_image)[0]
    return K.function([input_image, K.learning_phase()], [saliency]) 
Example 24
Project: face_classification   Author: oarriaga   File: grad_cam.py    MIT License 5 votes vote down vote up
def compile_gradient_function(input_model, category_index, layer_name):
    model = Sequential()
    model.add(input_model)

    num_classes = model.output_shape[1]
    target_layer = lambda x: target_category_loss(x, category_index, num_classes)
    model.add(Lambda(target_layer,
                     output_shape=target_category_loss_output_shape))

    loss = K.sum(model.layers[-1].output)
    conv_output = model.layers[0].get_layer(layer_name).output
    gradients = normalize(K.gradients(loss, conv_output)[0])
    gradient_function = K.function([model.layers[0].input, K.learning_phase()],
                                   [conv_output, gradients])
    return gradient_function 
Example 25
Project: satellite-imagery-change-detection   Author: soroushhashemifar   File: feat.py    GNU General Public License v3.0 5 votes vote down vote up
def get_activations(model, layer_idx, X_batch):
    get_activations = K.function([model.layers[0].input, K.learning_phase()], [model.layers[layer_idx].output,])
    activations = get_activations([X_batch,0])
    return activations

#Function to extract features from intermediate layers 
Example 26
Project: GM-DGM   Author: MatthewWilletts   File: model.py    MIT License 5 votes vote down vote up
def training_fd(self, x_l, y_l, x_u):
        if self.learning_paradigm == 'semisupervised' or self.learning_paradigm == 'semi-unsupervised':
            return {self.x_l: x_l, self.y_l: y_l, self.x_u: x_u, self.x: x_l, self.y: y_l, self.reg_term:self.n_train, K.learning_phase(): 1}
        if self.learning_paradigm == 'supervised':
            return {self.x_l: x_l, self.y_l: y_l, self.x: x_l, self.y: y_l, self.reg_term:self.n_train, K.learning_phase(): 1}
        if self.learning_paradigm == 'unsupervised':
            return {self.x_u: x_u, self.x: x_l, self.y: y_l, self.reg_term:self.n_train, K.learning_phase(): 1} 
Example 27
Project: GM-DGM   Author: MatthewWilletts   File: model.py    MIT License 5 votes vote down vote up
def _printing_feed_dict(self, Data, x_l, x_u, y, eval_samps, binarize):
        x_train, y_train = Data.sample_train(eval_samps)
        x_test, y_test = Data.sample_test(eval_samps)
        return {self.x_train: x_train, self.y_train: y_train,
                self.x_test: x_test, self.y_test: y_test,
                self.x_l: x_l, self.y_l: y, self.x_u: x_u,
                K.learning_phase(): 0} 
Example 28
Project: object-detection   Author: kaka-lin   File: test_tiny_yolo.py    MIT License 5 votes vote down vote up
def video_detection(sess, image, colors):
    resized_image = cv2.resize(image, (416, 416), interpolation=cv2.INTER_AREA)
    resized_image = cv2.cvtColor(resized_image, cv2.COLOR_BGR2RGB)
    image_data = np.array(resized_image, dtype='float32')
    image_data /= 255.
    image_data = np.expand_dims(image_data, 0)

    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input:image_data, K.learning_phase():0})

    image = draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    return image 
Example 29
Project: object-detection   Author: kaka-lin   File: test_yolov3.py    MIT License 5 votes vote down vote up
def video_detection(sess, image, colors):
    resized_image = cv2.resize(image, (416, 416), interpolation=cv2.INTER_AREA)
    resized_image = cv2.cvtColor(resized_image, cv2.COLOR_BGR2RGB)
    image_data = np.array(resized_image, dtype='float32')
    image_data /= 255.
    image_data = np.expand_dims(image_data, 0)

    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolov3.input:image_data, K.learning_phase():0})

    image = draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    return image 
Example 30
Project: Emotion   Author: petercunha   File: grad_cam.py    MIT License 5 votes vote down vote up
def compile_saliency_function(model, activation_layer='conv2d_7'):
    input_image = model.input
    layer_output = model.get_layer(activation_layer).output
    max_output = K.max(layer_output, axis=3)
    saliency = K.gradients(K.sum(max_output), input_image)[0]
    return K.function([input_image, K.learning_phase()], [saliency]) 
Example 31
Project: Emotion   Author: petercunha   File: grad_cam.py    MIT License 5 votes vote down vote up
def compile_gradient_function(input_model, category_index, layer_name):
    model = Sequential()
    model.add(input_model)

    num_classes = model.output_shape[1]
    target_layer = lambda x: target_category_loss(x, category_index, num_classes)
    model.add(Lambda(target_layer,
                     output_shape = target_category_loss_output_shape))

    loss = K.sum(model.layers[-1].output)
    conv_output = model.layers[0].get_layer(layer_name).output
    gradients = normalize(K.gradients(loss, conv_output)[0])
    gradient_function = K.function([model.layers[0].input, K.learning_phase()],
                                                    [conv_output, gradients])
    return gradient_function 
Example 32
Project: abstention   Author: kundajelab   File: util.py    MIT License 5 votes vote down vote up
def get_preact_func(model, task_idx):
    from keras import backend as K
    preact_func = K.function([model.layers[0].input, K.learning_phase()],
                                   [model.layers[-2].output])
    def batched_func(data, learning_phase, batch_size):
        to_return = []
        start_idx = 0
        while start_idx < len(data):
            to_return.extend(
                preact_func([data[start_idx:start_idx+batch_size],
                            learning_phase])[0][:, task_idx])
            start_idx += batch_size
        return np.array(to_return)
    return batched_func 
Example 33
Project: abstention   Author: kundajelab   File: util.py    MIT License 5 votes vote down vote up
def get_embed_func(model, task_idx):
    from keras import backend as K
    embed_func = K.function([model.layers[0].input, K.learning_phase()],
                                   [model.layers[-3].output])
    def batched_func(data, learning_phase, batch_size):
        to_return = []
        start_idx = 0
        while start_idx < len(data):
            to_return.extend(
                embed_func([data[start_idx:start_idx+batch_size],
                            learning_phase])[0][:, task_idx])
            start_idx += batch_size
        return np.array(to_return)
    return batched_func 
Example 34
Project: abstention   Author: kundajelab   File: util.py    MIT License 5 votes vote down vote up
def obtain_raw_data(preact_func, data, num_dropout_runs, batch_size=50):
    print("Computing deterministic activations")
    deterministic_preacts = np.array(
        preact_func(data=data, learning_phase=0,
                    batch_size=batch_size))
    
    print("Computing nondeterministic activations")
    dropout_run_results = []
    for i in range(num_dropout_runs):
        if ((i+1)%10==0):
            print("Done",i+1,"runs")
        dropout_run_results.append(
            np.array(preact_func(data=data, learning_phase=1,
                                 batch_size=batch_size)).squeeze())
    return deterministic_preacts, np.array(dropout_run_results) 
Example 35
Project: abstention   Author: kundajelab   File: util.py    MIT License 5 votes vote down vote up
def obtain_embeddings(embed_func, data, batch_size=50):
    print("Computing embeddings")
    embeddings_results = np.array(embed_func(data=data, learning_phase=0,
                                             batch_size=batch_size)).squeeze()
    return embeddings_results 
Example 36
Project: iMIMIC-RCVs   Author: medgift   File: optimizer.py    MIT License 5 votes vote down vote up
def __init__(self, input_tensor, losses, input_range=(0, 255), wrt_tensor=None, norm_grads=True):
        """Creates an optimizer that minimizes weighted loss function.

        Args:
            input_tensor: An input tensor of shape: `(samples, channels, image_dims...)` if `image_data_format=
                channels_first` or `(samples, image_dims..., channels)` if `image_data_format=channels_last`.
            losses: List of ([Loss](vis.losses#Loss), weight) tuples.
            input_range: Specifies the input range as a `(min, max)` tuple. This is used to rescale the
                final optimized input to the given range. (Default value=(0, 255))
            wrt_tensor: Short for, with respect to. This instructs the optimizer that the aggregate loss from `losses`
                should be minimized with respect to `wrt_tensor`.
                `wrt_tensor` can be any tensor that is part of the model graph. Default value is set to None
                which means that loss will simply be minimized with respect to `input_tensor`.
            norm_grads: True to normalize gradients. Normalization avoids very small or large gradients and ensures
                a smooth gradient gradient descent process. If you want the actual gradient
                (for example, visualizing attention), set this to false.
        """
        self.input_tensor = input_tensor
        self.input_range = input_range
        self.loss_names = []
        self.loss_functions = []
        self.wrt_tensor = self.input_tensor if wrt_tensor is None else wrt_tensor

        overall_loss = None
        for loss, weight in losses:
            # Perf optimization. Don't build loss function with 0 weight.
            if weight != 0:
                loss_fn = weight * loss.build_loss()
                overall_loss = loss_fn if overall_loss is None else overall_loss + loss_fn
                self.loss_names.append(loss.name)
                self.loss_functions.append(loss_fn)

        # Compute gradient of overall with respect to `wrt` tensor.
        grads = K.gradients(overall_loss, self.wrt_tensor)[0]
        if norm_grads:
            grads = K.l2_normalize(grads)

        # The main function to compute various quantities in optimization loop.
        self.compute_fn = K.function([self.input_tensor, K.learning_phase()],
                                     self.loss_functions + [overall_loss, grads, self.wrt_tensor]) 
Example 37
Project: iMIMIC-RCVs   Author: medgift   File: test_backend.py    MIT License 5 votes vote down vote up
def _compute_grads(model, input_array):
    grads_fn = K.gradients(model.output, model.input)[0]
    compute_fn = K.function([model.input, K.learning_phase()], [grads_fn])
    return compute_fn([np.array([input_array]), 0])[0][0] 
Example 38
Project: auckland-ai-meetup-x-triage   Author: a-i-joe   File: visualizations.py    MIT License 5 votes vote down vote up
def get_saliency(image,model):
    """Returns a saliency map with same shape as image. """
    K.set_learning_phase(0)
    K._LEARNING_PHASE = tf.constant(0)
    image = np.expand_dims(image,0)
    loss = K.variable(0.)
    loss += K.sum(K.square(model.output))
    grads = K.abs(K.gradients(loss,model.input)[0])
    saliency = K.max(grads,axis=3)
    fetch_saliency = K.function([model.input,K.learning_phase()],[loss,saliency])
    outputs, saliency = fetch_saliency([image,0])
    K.set_learning_phase(True)
    return saliency 
Example 39
Project: deeplift   Author: kundajelab   File: test_conv2d_model_channels_first.py    MIT License 5 votes vote down vote up
def setUp(self):
        self.inp = (np.random.randn(10*10*51*51)
                    .reshape(10,10,51,51))
        self.keras_model = keras.models.Sequential()
        conv_layer = keras.layers.convolutional.Conv2D(
                        filters=2, kernel_size=(4,4), strides=(2,2),
                        activation="relu", input_shape=(10,51,51),
                        data_format="channels_first")
        self.keras_model.add(conv_layer)
        self.keras_model.add(keras.layers.pooling.MaxPooling2D(
                             pool_size=(4,4), strides=(2,2),
                             data_format="channels_first")) 
        self.keras_model.add(keras.layers.pooling.AveragePooling2D(
                             pool_size=(4,4), strides=(2,2),
                             data_format="channels_first")) 
        self.keras_model.add(keras.layers.Flatten())
        self.keras_model.add(keras.layers.Dense(output_dim=1))
        self.keras_model.add(keras.layers.core.Activation("sigmoid"))
        self.keras_model.compile(loss="mse", optimizer="sgd")
        self.keras_output_fprop_func = compile_func(
                        [self.keras_model.layers[0].input,
                         K.learning_phase()],
                        self.keras_model.layers[-1].output)

        grad = tf.gradients(tf.reduce_sum(
            self.keras_model.layers[-2].output[:,0]),
            [self.keras_model.layers[0].input])[0]
        self.grad_func = compile_func(
            [self.keras_model.layers[0].input,
             K.learning_phase()], grad)

        self.saved_file_path = "conv2model_channelsfirst.h5"
        if (os.path.isfile(self.saved_file_path)):
            os.remove(self.saved_file_path)
        self.keras_model.save(self.saved_file_path) 
Example 40
Project: deeplift   Author: kundajelab   File: test_conv2d_model_same_padding.py    MIT License 5 votes vote down vote up
def setUp(self):
        self.inp = (np.random.randn(10*10*51*51)
                    .reshape(10,10,51,51)).transpose(0,2,3,1)
        self.keras_model = keras.models.Sequential()
        conv_layer1 = keras.layers.convolutional.Convolution2D(
                        nb_filter=20, nb_row=4, nb_col=4, subsample=(2,2),
                        activation="relu",
                        padding='same',
                        input_shape=(51,51,10))
        self.keras_model.add(conv_layer1)
        conv_layer2 = keras.layers.convolutional.Convolution2D(
                        nb_filter=10, nb_row=4, nb_col=4, subsample=(2,2),
                        activation="relu",
                        padding='same')
        self.keras_model.add(conv_layer2)
        self.keras_model.add(keras.layers.pooling.MaxPooling2D(
                             pool_size=(4,4), strides=(2,2),
                             padding='same')) 
        self.keras_model.add(keras.layers.pooling.AveragePooling2D(
                             pool_size=(4,4), strides=(2,2),
                             padding='same')) 
        self.keras_model.add(keras.layers.Flatten())
        self.keras_model.add(keras.layers.Dense(output_dim=1))
        self.keras_model.add(keras.layers.core.Activation("sigmoid"))
        self.keras_model.compile(loss="mse", optimizer="sgd")
        self.keras_output_fprop_func = compile_func(
                        [self.keras_model.layers[0].input,
                         K.learning_phase()],
                        self.keras_model.layers[-1].output)

        grad = tf.gradients(tf.reduce_sum(
            self.keras_model.layers[-2].output[:,0]),
            [self.keras_model.layers[0].input])[0]
        self.grad_func = compile_func(
            [self.keras_model.layers[0].input,
             K.learning_phase()], grad)

        self.saved_file_path = "conv2model_samepadding.h5"
        if (os.path.isfile(self.saved_file_path)):
            os.remove(self.saved_file_path)
        self.keras_model.save(self.saved_file_path) 
Example 41
Project: deeplift   Author: kundajelab   File: test_conv1d_model_valid_padding.py    MIT License 5 votes vote down vote up
def setUp(self):
        self.inp = (np.random.randn(10*10*51)
                    .reshape(10,10,51)).transpose(0,2,1)
        self.keras_model = keras.models.Sequential()
        self.keras_model.add(keras.layers.InputLayer((51,10)))
        conv_layer = keras.layers.convolutional.Convolution1D(
                        nb_filter=2, filter_length=4, subsample_length=2,
                        activation="relu", input_shape=(51,10))
        self.keras_model.add(conv_layer)
        self.keras_model.add(keras.layers.pooling.MaxPooling1D(
                             pool_length=4, stride=2)) 
        self.keras_model.add(keras.layers.pooling.AveragePooling1D(
                             pool_length=4, stride=2)) 
        self.keras_model.add(keras.layers.Flatten())
        self.keras_model.add(keras.layers.Dense(output_dim=1))
        self.keras_model.add(keras.layers.core.Activation("sigmoid"))
        self.keras_model.compile(loss="mse", optimizer="sgd")
        self.keras_output_fprop_func = compile_func(
                        [self.keras_model.layers[0].input,
                         K.learning_phase()],
                        self.keras_model.layers[-1].output)

        grad = tf.gradients(tf.reduce_sum(
            self.keras_model.layers[-2].output[:,0]),
            [self.keras_model.layers[0].input])[0]
        self.grad_func = compile_func(
            [self.keras_model.layers[0].input,
             K.learning_phase()], grad) 

        self.saved_file_path = "conv1model_validpadding.h5"
        if (os.path.isfile(self.saved_file_path)):
            os.remove(self.saved_file_path)
        self.keras_model.save(self.saved_file_path) 
Example 42
Project: deeplift   Author: kundajelab   File: test_conv2d_model_valid_padding.py    MIT License 5 votes vote down vote up
def setUp(self):
        self.inp = (np.random.randn(10*10*51*51)
                    .reshape(10,10,51,51)).transpose(0,2,3,1)
        self.keras_model = keras.models.Sequential()
        conv_layer = keras.layers.convolutional.Convolution2D(
                        nb_filter=2, nb_row=4, nb_col=4, subsample=(2,2),
                        activation="relu", input_shape=(51,51,10))
        self.keras_model.add(conv_layer)
        self.keras_model.add(keras.layers.pooling.MaxPooling2D(
                             pool_size=(4,4), strides=(2,2))) 
        self.keras_model.add(keras.layers.pooling.AveragePooling2D(
                             pool_size=(4,4), strides=(2,2))) 
        self.keras_model.add(keras.layers.Flatten())
        self.keras_model.add(keras.layers.Dense(output_dim=1))
        self.keras_model.add(keras.layers.core.Activation("sigmoid"))
        self.keras_model.compile(loss="mse", optimizer="sgd")
        self.keras_output_fprop_func = compile_func(
                        [self.keras_model.layers[0].input,
                         K.learning_phase()],
                        self.keras_model.layers[-1].output)

        grad = tf.gradients(tf.reduce_sum(
            self.keras_model.layers[-2].output[:,0]),
            [self.keras_model.layers[0].input])[0]
        self.grad_func = compile_func(
            [self.keras_model.layers[0].input,
             K.learning_phase()], grad)

        self.saved_file_path = "conv2model_validpadding.h5"
        if (os.path.isfile(self.saved_file_path)):
            os.remove(self.saved_file_path)
        self.keras_model.save(self.saved_file_path) 
Example 43
Project: GewitterGefahr   Author: thunderhoser   File: gradcam.py    MIT License 5 votes vote down vote up
def _make_saliency_function(model_object, target_layer_name,
                            input_layer_indices):
    """Creates saliency function.

    This function computes the gradient of activations in the target layer with
    respect to each input value in the specified layers.

    :param model_object: Instance of `keras.models.Model` or
        `keras.models.Sequential`.
    :param target_layer_name: Target layer (numerator in gradient will be based
        on activations in this layer).
    :param input_layer_indices: 1-D numpy array of indices.  If the array
        contains j, the gradient will be computed with respect to every value in
        the [j]th input layer.
    :return: saliency_function: Instance of `keras.backend.function`.
    """

    output_tensor = model_object.get_layer(name=target_layer_name).output
    filter_maxxed_output_tensor = K.max(output_tensor, axis=-1)

    if isinstance(model_object.input, list):
        list_of_input_tensors = model_object.input
    else:
        list_of_input_tensors = [model_object.input]

    list_of_saliency_tensors = K.gradients(
        K.sum(filter_maxxed_output_tensor),
        [list_of_input_tensors[i] for i in input_layer_indices]
    )

    return K.function(
        list_of_input_tensors + [K.learning_phase()],
        list_of_saliency_tensors
    ) 
Example 44
Project: GewitterGefahr   Author: thunderhoser   File: model_activation.py    MIT License 5 votes vote down vote up
def get_neuron_activation_for_examples(
        model_object, layer_name, neuron_indices, list_of_input_matrices):
    """For each input example, returns the activation of one neuron.

    :param model_object: Instance of `keras.models.Model`.
    :param layer_name: Name of layer containing the relevant neuron.
    :param neuron_indices: 1-D numpy array with indices of the relevant neuron.
        Must have length K - 1, where K = number of dimensions in layer output.
        The first dimension of the layer output is the example dimension, for
        which all indices from 0...(E - 1) are used.
    :param list_of_input_matrices: See doc for
        `get_class_activation_for_examples`.
    :return: activation_values: length-E numpy array, where activation_values[i]
        is the activation of the given neuron by the [i]th example.
    """

    check_metadata(
        component_type_string=model_interpretation.NEURON_COMPONENT_TYPE_STRING,
        layer_name=layer_name,
        neuron_index_matrix=numpy.expand_dims(neuron_indices, axis=0)
    )

    if isinstance(model_object.input, list):
        list_of_input_tensors = model_object.input
    else:
        list_of_input_tensors = [model_object.input]

    activation_function = K.function(
        list_of_input_tensors + [K.learning_phase()],
        [model_object.get_layer(name=layer_name).output[..., neuron_indices]])

    return activation_function(list_of_input_matrices + [0])[0] 
Example 45
Project: synthesis-database-public   Author: olivettigroup   File: token_classifier.py    MIT License 5 votes vote down vote up
def load(self, filepath='bin/token_classifier_nn'):
    try:
      filepath += '.' + str(self.model_type) + '.model'
      self.model = load_model(filepath, custom_objects={'_non_null_accuracy': self._non_null_accuracy})
      self.fast_predict = K.function(
        self.model.inputs + [K.learning_phase()],
        [self.model.layers[1].output]
      )
    except:
      return -1 
Example 46
Project: twitter-emotion-recognition   Author: nikicc   File: emotion_predictor.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def _get_embeddings_model(self):
        last_layer_output = K.function([self.model.layers[0].input,
                                        K.learning_phase()],
                                       [self.model.layers[-3].output])
        return lambda x: last_layer_output([x, 0])[0] 
Example 47
Project: Face-and-Emotion-Recognition   Author: vjgpt   File: grad_cam.py    MIT License 5 votes vote down vote up
def compile_saliency_function(model, activation_layer='conv2d_7'):
    input_image = model.input
    layer_output = model.get_layer(activation_layer).output
    max_output = K.max(layer_output, axis=3)
    saliency = K.gradients(K.sum(max_output), input_image)[0]
    return K.function([input_image, K.learning_phase()], [saliency]) 
Example 48
Project: Face-and-Emotion-Recognition   Author: vjgpt   File: grad_cam.py    MIT License 5 votes vote down vote up
def compile_gradient_function(input_model, category_index, layer_name):
    model = Sequential()
    model.add(input_model)

    num_classes = model.output_shape[1]
    target_layer = lambda x: target_category_loss(x, category_index, num_classes)
    model.add(Lambda(target_layer,
                     output_shape = target_category_loss_output_shape))

    loss = K.sum(model.layers[-1].output)
    conv_output = model.layers[0].get_layer(layer_name).output
    gradients = normalize(K.gradients(loss, conv_output)[0])
    gradient_function = K.function([model.layers[0].input, K.learning_phase()],
                                                    [conv_output, gradients])
    return gradient_function 
Example 49
Project: poker   Author: surgebiswas   File: callbacks.py    MIT License 5 votes vote down vote up
def on_epoch_end(self, epoch, logs=None):
        logs = logs or {}

        if self.model.validation_data and self.histogram_freq:
            if epoch % self.histogram_freq == 0:
                # TODO: implement batched calls to sess.run
                # (current call will likely go OOM on GPU)
                if self.model.uses_learning_phase:
                    cut_v_data = len(self.model.inputs)
                    val_data = self.model.validation_data[:cut_v_data] + [0]
                    tensors = self.model.inputs + [K.learning_phase()]
                else:
                    val_data = self.model.validation_data
                    tensors = self.model.inputs
                feed_dict = dict(zip(tensors, val_data))
                result = self.sess.run([self.merged], feed_dict=feed_dict)
                summary_str = result[0]
                self.writer.add_summary(summary_str, epoch)

        for name, value in logs.items():
            if name in ['batch', 'size']:
                continue
            summary = tf.Summary()
            summary_value = summary.value.add()
            summary_value.simple_value = value.item()
            summary_value.tag = name
            self.writer.add_summary(summary, epoch)
        self.writer.flush() 
Example 50
Project: keras-fcn   Author: JihongJu   File: callbacks.py    MIT License 5 votes vote down vote up
def on_batch_end(self, batch, logs=None):
        if self.validation_data and self.histogram_freq:
            if batch % self.histogram_freq == 0:
                for layer in self.model.layers:
                    functor = K.function([self.model.input, K.learning_phase()], [layer.output])
                    layer_out = functor(self.validation_data)
                    if np.any(np.isnan(layer_out)) or np.any(np.isinf(layer_out)):
                        print('The output of {} becomes nan'.format(layer.name))
                        self.model.stop_training = True 
Example 51
Project: DGMs_for_semi-unsupervised_learning   Author: SemiUnsupervisedLearning   File: model.py    MIT License 5 votes vote down vote up
def training_fd(self, x_l, y_l, x_u):
        if self.learning_paradigm == 'semisupervised' or self.learning_paradigm == 'semi-unsupervised':
            return {self.x_l: x_l, self.y_l: y_l, self.x_u: x_u, self.x: x_l, self.y: y_l, self.reg_term:self.n_train, K.learning_phase(): 1}
        if self.learning_paradigm == 'supervised':
            return {self.x_l: x_l, self.y_l: y_l, self.x: x_l, self.y: y_l, self.reg_term:self.n_train, K.learning_phase(): 1}
        if self.learning_paradigm == 'unsupervised':
            return {self.x_u: x_u, self.x: x_l, self.y: y_l, self.reg_term:self.n_train, K.learning_phase(): 1} 
Example 52
Project: DGMs_for_semi-unsupervised_learning   Author: SemiUnsupervisedLearning   File: model.py    MIT License 5 votes vote down vote up
def _printing_feed_dict(self, Data, x_l, x_u, y, eval_samps, binarize):
        x_train, y_train = Data.sample_train(eval_samps)
        x_test, y_test = Data.sample_test(eval_samps)
        return {self.x_train: x_train, self.y_train: y_train,
                self.x_test: x_test, self.y_test: y_test,
                self.x_l: x_l, self.y_l: y, self.x_u: x_u,
                K.learning_phase(): 0} 
Example 53
Project: deep-pmsm   Author: wkirgsn   File: custom_layers.py    MIT License 5 votes vote down vote up
def data_based_init(model, input):

    # input can be dict, numpy array, or list of numpy arrays
    if type(input) is dict:
        feed_dict = input
    elif type(input) is list:
        feed_dict = {tf_inp: np_inp for tf_inp,np_inp in zip(model.inputs,input)}
    else:
        feed_dict = {model.inputs[0]: input}

    # add learning phase if required
    if model.uses_learning_phase and K.learning_phase() not in feed_dict:
        feed_dict.update({K.learning_phase(): 1})

    # get all layer name, output, weight, bias tuples
    layer_output_weight_bias = []
    for l in model.layers:
        if hasattr(l, 'W') and hasattr(l, 'b'):
            assert(l.built)
            layer_output_weight_bias.append( (l.name,l.get_output_at(0),l.W,l.b) ) # if more than one node, only use the first

    # iterate over our list and do data dependent init
    sess = K.get_session()
    for l,o,W,b in layer_output_weight_bias:
        print('Performing data dependent initialization for layer ' + l)
        m,v = tf.nn.moments(o, [i for i in range(len(o.get_shape())-1)])
        s = tf.sqrt(v + 1e-10)
        updates = tf.group(W.assign(W/tf.reshape(s,[1]*(len(W.get_shape())-1)+[-1])), b.assign((b-m)/s))
        sess.run(updates, feed_dict) 
Example 54
Project: TwitterCityMood   Author: anthonymirand   File: emotion_predictor.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def _get_embeddings_model(self):
    last_layer_output = K.function([ self.model.layers[0].input,
                                     K.learning_phase() ],
                                   [ self.model.layers[-3].output ])
    return lambda x: last_layer_output([x, 0])[0] 
Example 55
Project: neural-fingerprinting   Author: StephanZheng   File: attacks.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def basic_iterative_method(sess, model, X, Y, eps, eps_iter, nb_iter=50,
                           clip_min=None, clip_max=None, batch_size=256):
    """
    TODO
    :param sess:
    :param model: predictions or after-softmax
    :param X:
    :param Y:
    :param eps:
    :param eps_iter:
    :param nb_iter:
    :param clip_min:
    :param clip_max:
    :param batch_size:
    :return:
    """
    print("nb_iter",nb_iter)
    # Define TF placeholders for the input and output
    x = tf.placeholder(tf.float32, shape=(None,)+X.shape[1:])
    y = tf.placeholder(tf.float32, shape=(None,)+Y.shape[1:])
    # results will hold the adversarial inputs at each iteration of BIM;
    # thus it will have shape (nb_iter, n_samples, n_rows, n_cols, n_channels)
    results = np.zeros((nb_iter, X.shape[0],) + X.shape[1:])
    # Initialize adversarial samples as the original samples, set upper and
    # lower bounds
    X_adv = X
    X_min = X_adv - eps
    X_max = X_adv + eps
    print('Running BIM iterations...')
    # "its" is a dictionary that keeps track of the iteration at which each
    # sample becomes misclassified. The default value will be (nb_iter-1), the
    # very last iteration.
    def f(val):
        return lambda: val
    its = defaultdict(f(nb_iter-1))
    # Out keeps track of which samples have already been misclassified
    out = set()
    for i in tqdm(range(nb_iter)):
        adv_x = fgsm(
            x, model(x), eps=eps_iter,
            clip_min=clip_min, clip_max=clip_max, y=y
        )
        X_adv, = batch_eval(
            sess, [x, y], [adv_x],
            [X_adv, Y], feed={K.learning_phase(): 0},
            args={'batch_size': batch_size}
        )
        X_adv = np.maximum(np.minimum(X_adv, X_max), X_min)
        results[i] = X_adv
        # check misclassifieds
        predictions = model.predict_classes(X_adv, batch_size=512, verbose=0)
        misclassifieds = np.where(predictions != Y.argmax(axis=1))[0]
        for elt in misclassifieds:
            if elt not in out:
                its[elt] = i
                out.add(elt)

    return its, results 
Example 56
Project: Logo-Retrieval-in-Commercial-Plaza   Author: zhang-rongchen   File: yolo.py    MIT License 4 votes vote down vote up
def detect_image(self, image):

        start = timer()
        if self.model_image_size != (None, None):
            assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
            assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
            boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
        else:
            new_image_size = (image.width - (image.width % 32),
                              image.height - (image.height % 32))
            boxed_image = letterbox_image(image, new_image_size)
        image_data = np.array(boxed_image, dtype='float32')
        print(image_data.shape)
        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

        out_boxes, out_scores, out_classes = self.sess.run(
            [self.boxes, self.scores, self.classes],
            feed_dict={
                self.yolo_model.input: image_data,
                self.input_image_shape: [image.shape[0], image.shape[1]],
                K.learning_phase(): 0
            })

        print('Found {} boxes for {}'.format(len(out_boxes), 'img'))

        logos = []
        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = self.class_names[c]
            box = out_boxes[i]
            score = out_scores[i]
            label = '{} {:.2f}'.format(predicted_class, score)
            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.shape[0], np.floor(bottom + 0.5).astype('int32'))
            right = min(image.shape[1], np.floor(right + 0.5).astype('int32'))
            print(label, (left, top), (right, bottom))
            roi = image[top:bottom, left:right]
            logos.append(roi)
            #cv2.rectangle(image, (left, top), (right, bottom), color=(255,255,0),thickness=8)
            text_origin = np.array([left, top + 1])
            cv2.putText(image, label, tuple(text_origin), cv2.FONT_HERSHEY_COMPLEX, 2, (0,255,0), 3)
        end = timer()
        print(end - start)
        return image, logos 
Example 57
Project: Sushi-dish-detection   Author: blackrubystudio   File: model.py    MIT License 4 votes vote down vote up
def run_graph(self, images, outputs, image_metas=None):
        """Runs a sub-set of the computation graph that computes the given
        outputs.

        image_metas: If provided, the images are assumed to be already
            molded (i.e. resized, padded, and normalized)

        outputs: List of tuples (name, tensor) to compute. The tensors are
            symbolic TensorFlow tensors and the names are for easy tracking.

        Returns an ordered dict of results. Keys are the names received in the
        input and values are Numpy arrays.
        """
        model = self.keras_model

        # Organize desired outputs into an ordered dict
        outputs = OrderedDict(outputs)
        for o in outputs.values():
            assert o is not None

        # Build a Keras function to run parts of the computation graph
        inputs = model.inputs
        if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
            inputs += [K.learning_phase()]
        kf = K.function(model.inputs, list(outputs.values()))

        # Prepare inputs
        if image_metas is None:
            molded_images, image_metas, _ = self.mold_inputs(images)
        else:
            molded_images = images
        image_shape = molded_images[0].shape
        # Anchors
        anchors = self.get_anchors(image_shape)
        # Duplicate across the batch dimension because Keras requires it
        # TODO: can this be optimized to avoid duplicating the anchors?
        anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
        model_in = [molded_images, image_metas, anchors]

        # Run inference
        if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
            model_in.append(0.)
        outputs_np = kf(model_in)

        # Pack the generated Numpy arrays into a a dict and log the results.
        outputs_np = OrderedDict([(k, v)
                                  for k, v in zip(outputs.keys(), outputs_np)])
        for k, v in outputs_np.items():
            log(k, v)
        return outputs_np


############################################################
#  Data Formatting
############################################################ 
Example 58
Project: blackbox-attacks   Author: sunblaze-ucb   File: tf_utils.py    MIT License 4 votes vote down vote up
def batch_eval(tf_inputs, tf_outputs, numpy_inputs):
    """
    A helper function that computes a tensor on numpy inputs by batches.
    From: https://github.com/openai/cleverhans/blob/master/cleverhans/utils_tf.py
    """

    n = len(numpy_inputs)
    assert n > 0
    assert n == len(tf_inputs)
    m = numpy_inputs[0].shape[0]
    for i in range(1, n):
        assert numpy_inputs[i].shape[0] == m

    out = []
    for _ in tf_outputs:
        out.append([])

    for start in range(0, m, BATCH_SIZE):
        batch = start // BATCH_SIZE

        # Compute batch start and end indices
        start = batch * BATCH_SIZE
        end = start + BATCH_SIZE
        numpy_input_batches = [numpy_input[start:end]
                               for numpy_input in numpy_inputs]
        cur_batch_size = numpy_input_batches[0].shape[0]
        assert cur_batch_size <= BATCH_SIZE
        for e in numpy_input_batches:
            assert e.shape[0] == cur_batch_size

        feed_dict = dict(zip(tf_inputs, numpy_input_batches))
        feed_dict[K.learning_phase()] = 0
        numpy_output_batches = K.get_session().run(tf_outputs,
                                                   feed_dict=feed_dict)
        for e in numpy_output_batches:
            assert e.shape[0] == cur_batch_size, e.shape
        for out_elem, numpy_output_batch in zip(out, numpy_output_batches):
            out_elem.append(numpy_output_batch)

    out = [np.concatenate(x, axis=0) for x in out]
    for e in out:
        assert e.shape[0] == m, e.shape
    return out 
Example 59
Project: blackbox-attacks   Author: sunblaze-ucb   File: tf_utils.py    MIT License 4 votes vote down vote up
def batch_eval(tf_inputs, tf_outputs, numpy_inputs):
    """
    A helper function that computes a tensor on numpy inputs by batches.
    From: https://github.com/openai/cleverhans/blob/master/cleverhans/utils_tf.py
    """

    n = len(numpy_inputs)
    assert n > 0
    assert n == len(tf_inputs)
    m = numpy_inputs[0].shape[0]
    for i in range(1, n):
        assert numpy_inputs[i].shape[0] == m

    out = []
    for _ in tf_outputs:
        out.append([])

    for start in range(0, m, BATCH_SIZE):
        batch = start // BATCH_SIZE

        # Compute batch start and end indices
        start = batch * BATCH_SIZE
        end = start + BATCH_SIZE
        numpy_input_batches = [numpy_input[start:end]
                               for numpy_input in numpy_inputs]
        cur_batch_size = numpy_input_batches[0].shape[0]
        assert cur_batch_size <= BATCH_SIZE
        for e in numpy_input_batches:
            assert e.shape[0] == cur_batch_size

        feed_dict = dict(zip(tf_inputs, numpy_input_batches))
        feed_dict[K.learning_phase()] = 0
        numpy_output_batches = K.get_session().run(tf_outputs,
                                                   feed_dict=feed_dict)
        for e in numpy_output_batches:
            assert e.shape[0] == cur_batch_size, e.shape
        for out_elem, numpy_output_batch in zip(out, numpy_output_batches):
            out_elem.append(numpy_output_batch)

    out = [np.concatenate(x, axis=0) for x in out]
    for e in out:
        assert e.shape[0] == m, e.shape
    return out 
Example 60
Project: multi-object-tracking   Author: jguoaj   File: yolo.py    GNU General Public License v3.0 4 votes vote down vote up
def detect_image(self, image):
        if self.is_fixed_size:
            assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
            assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
            boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
        else:
            new_image_size = (image.width - (image.width % 32),
                              image.height - (image.height % 32))
            boxed_image = letterbox_image(image, new_image_size)
        image_data = np.array(boxed_image, dtype='float32')

        #print(image_data.shape)
        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.
        
        out_boxes, out_scores, out_classes = self.sess.run(
            [self.boxes, self.scores, self.classes],
            feed_dict={
                self.yolo_model.input: image_data,
                self.input_image_shape: [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })
        return_boxs = []
        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = self.class_names[c]
            if predicted_class != 'person' :
                continue
            box = out_boxes[i]
           # score = out_scores[i]  
            x = int(box[1])  
            y = int(box[0])  
            w = int(box[3]-box[1])
            h = int(box[2]-box[0])
            if x < 0 :
                w = w + x
                x = 0
            if y < 0 :
                h = h + y
                y = 0 
            return_boxs.append([x,y,w,h])

        return return_boxs 
Example 61
Project: labelImg   Author: keyuncheng   File: model.py    MIT License 4 votes vote down vote up
def run_graph(self, images, outputs):
        """Runs a sub-set of the computation graph that computes the given
        outputs.

        outputs: List of tuples (name, tensor) to compute. The tensors are
            symbolic TensorFlow tensors and the names are for easy tracking.

        Returns an ordered dict of results. Keys are the names received in the
        input and values are Numpy arrays.
        """
        model = self.keras_model

        # Organize desired outputs into an ordered dict
        outputs = OrderedDict(outputs)
        for o in outputs.values():
            assert o is not None

        # Build a Keras function to run parts of the computation graph
        inputs = model.inputs
        if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
            inputs += [K.learning_phase()]
        kf = K.function(model.inputs, list(outputs.values()))

        # Run inference
        molded_images, image_metas, windows = self.mold_inputs(images)
        # TODO: support training mode?
        # if TEST_MODE == "training":
        #     model_in = [molded_images, image_metas,
        #                 target_rpn_match, target_rpn_bbox,
        #                 gt_boxes, gt_masks]
        #     if not config.USE_RPN_ROIS:
        #         model_in.append(target_rois)
        #     if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
        #         model_in.append(1.)
        #     outputs_np = kf(model_in)
        # else:

        model_in = [molded_images, image_metas]
        if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
            model_in.append(0.)
        outputs_np = kf(model_in)

        # Pack the generated Numpy arrays into a a dict and log the results.
        outputs_np = OrderedDict([(k, v)
                                  for k, v in zip(outputs.keys(), outputs_np)])
        for k, v in outputs_np.items():
            log(k, v)
        return outputs_np


############################################################
#  Data Formatting
############################################################ 
Example 62
Project: Mask-R-CNN-sports-action-fine-tuing   Author: adelmassimo   File: model.py    MIT License 4 votes vote down vote up
def run_graph(self, images, outputs):
        """Runs a sub-set of the computation graph that computes the given
        outputs.

        outputs: List of tuples (name, tensor) to compute. The tensors are
            symbolic TensorFlow tensors and the names are for easy tracking.

        Returns an ordered dict of results. Keys are the names received in the
        input and values are Numpy arrays.
        """
        model = self.keras_model

        # Organize desired outputs into an ordered dict
        outputs = OrderedDict(outputs)
        for o in outputs.values():
            assert o is not None

        # Build a Keras function to run parts of the computation graph
        inputs = model.inputs
        if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
            inputs += [K.learning_phase()]
        kf = K.function(model.inputs, list(outputs.values()))

        # Run inference
        molded_images, image_metas, windows = self.mold_inputs(images)
        # TODO: support training mode?
        # if TEST_MODE == "training":
        #     model_in = [molded_images, image_metas,
        #                 target_rpn_match, target_rpn_bbox,
        #                 gt_boxes, gt_masks]
        #     if not config.USE_RPN_ROIS:
        #         model_in.append(target_rois)
        #     if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
        #         model_in.append(1.)
        #     outputs_np = kf(model_in)
        # else:

        model_in = [molded_images, image_metas]
        if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
            model_in.append(0.)
        outputs_np = kf(model_in)

        # Pack the generated Numpy arrays into a a dict and log the results.
        outputs_np = OrderedDict([(k, v)
                                  for k, v in zip(outputs.keys(), outputs_np)])
        for k, v in outputs_np.items():
            log(k, v)
        return outputs_np


############################################################
#  Data Formatting
############################################################ 
Example 63
Project: PanopticSegmentation   Author: dmechea   File: model.py    MIT License 4 votes vote down vote up
def run_graph(self, images, outputs, image_metas=None):
        """Runs a sub-set of the computation graph that computes the given
        outputs.

        image_metas: If provided, the images are assumed to be already
            molded (i.e. resized, padded, and normalized)

        outputs: List of tuples (name, tensor) to compute. The tensors are
            symbolic TensorFlow tensors and the names are for easy tracking.

        Returns an ordered dict of results. Keys are the names received in the
        input and values are Numpy arrays.
        """
        model = self.keras_model

        # Organize desired outputs into an ordered dict
        outputs = OrderedDict(outputs)
        for o in outputs.values():
            assert o is not None

        # Build a Keras function to run parts of the computation graph
        inputs = model.inputs
        if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
            inputs += [K.learning_phase()]
        kf = K.function(model.inputs, list(outputs.values()))

        # Prepare inputs
        if image_metas is None:
            molded_images, image_metas, _ = self.mold_inputs(images)
        else:
            molded_images = images
        image_shape = molded_images[0].shape
        # Anchors
        anchors = self.get_anchors(image_shape)
        # Duplicate across the batch dimension because Keras requires it
        # TODO: can this be optimized to avoid duplicating the anchors?
        anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
        model_in = [molded_images, image_metas, anchors]

        # Run inference
        if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
            model_in.append(0.)
        outputs_np = kf(model_in)

        # Pack the generated Numpy arrays into a a dict and log the results.
        outputs_np = OrderedDict([(k, v)
                                  for k, v in zip(outputs.keys(), outputs_np)])
        for k, v in outputs_np.items():
            log(k, v)
        return outputs_np


############################################################
#  Data Formatting
############################################################ 
Example 64
Project: lucid4keras   Author: totti0223   File: render.py    Apache License 2.0 4 votes vote down vote up
def keras_make_vis_T(input_model, objective_f, param_f=None, optimizer=None,
               transforms=None):
    def connect_model(bottom,input_size,transform_f):
        '''
        connect the keras model with transformation function defined by transform_f with lambda
        '''
        input_tensor = Input(shape=input_size)
        transformed = Lambda(lambda inputs: transform_f(inputs),name="transform_layer")(input_tensor)
        top = Model(inputs=input_tensor,outputs=transformed)

        _model = Model(inputs = top.input,
                  outputs = bottom(top.output))
        return _model
    

    _t_image = make_t_image(param_f)
    _t_image = K.eval(_t_image)
    #have to be isolated from gpu for fast calculation. maybe
    #maybe if the adam calculation is on pure gpu, becomes faster and K.eval is no longer needed

    input_size = _t_image.shape[1:]

    transform_f = make_transform_f(transforms)
    target_model =connect_model(input_model,input_size,transform_f)
        
    
    objective_f = as_objective(objective_f)
    
    
    #create (batch,size,size,channel) in case object function requires multiple input by add or subtraction
    try:
        batch = int(objective_f.batch_n)
    except:
        batch = 1

    if batch > 1:
        t_image = np.zeros((batch,_t_image.shape[1],_t_image.shape[2],_t_image.shape[3]))
        for k in range(batch):
            t_image[k] = _t_image[0]
    else:
        #or do nothing
        t_image = _t_image

    #elif :
    #    pass
        #or interpolation
    

    #optimizer = make_optimizer(optimizer, [])
    loss = objective_f(target_model)
    grads = K.gradients(loss,target_model.input)[0]
    train = K.function([target_model.input, K.learning_phase()], [loss, grads])    
    return t_image, loss, train 
Example 65
Project: models   Author: IntelAI   File: model.py    Apache License 2.0 4 votes vote down vote up
def run_graph(self, images, outputs):
        """Runs a sub-set of the computation graph that computes the given
        outputs.

        outputs: List of tuples (name, tensor) to compute. The tensors are
            symbolic TensorFlow tensors and the names are for easy tracking.

        Returns an ordered dict of results. Keys are the names received in the
        input and values are Numpy arrays.
        """
        model = self.keras_model

        # Organize desired outputs into an ordered dict
        outputs = OrderedDict(outputs)
        for o in outputs.values():
            assert o is not None

        # Build a Keras function to run parts of the computation graph
        inputs = model.inputs
        if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
            inputs += [K.learning_phase()]
        kf = K.function(model.inputs, list(outputs.values()))

        # Run inference
        molded_images, image_metas, windows = self.mold_inputs(images)
        # TODO: support training mode?
        # if TEST_MODE == "training":
        #     model_in = [molded_images, image_metas,
        #                 target_rpn_match, target_rpn_bbox,
        #                 gt_boxes, gt_masks]
        #     if not config.USE_RPN_ROIS:
        #         model_in.append(target_rois)
        #     if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
        #         model_in.append(1.)
        #     outputs_np = kf(model_in)
        # else:

        model_in = [molded_images, image_metas]
        if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
            model_in.append(0.)
        outputs_np = kf(model_in)

        # Pack the generated Numpy arrays into a a dict and log the results.
        outputs_np = OrderedDict([(k, v)
                                  for k, v in zip(outputs.keys(), outputs_np)])
        for k, v in outputs_np.items():
            log(k, v)
        return outputs_np


############################################################
#  Data Formatting
############################################################ 
Example 66
Project: deeplift   Author: kundajelab   File: test_load_batchnorm.py    MIT License 4 votes vote down vote up
def setUp(self):
         
        self.axis=3
        self.inp = (np.arange(16).reshape(2,2,2,2)
                    .transpose(0,2,3,1).astype("float32"))
        self.keras_model = keras.models.Sequential()
        self.epsilon = 10**(-3)
        self.gamma = np.array([2.0, 3.0]).astype("float32") 
        self.beta = np.array([4.0, 5.0]).astype("float32")
        self.mean = np.array([3.0, 3.0]).astype("float32")
        self.var = np.array([4.0, 9.0]).astype("float32")
        batch_norm_layer = keras.layers.normalization.BatchNormalization(
                           axis=self.axis, input_shape=(2,2,2))
        self.keras_model.add(batch_norm_layer)
        batch_norm_layer.set_weights(np.array([
                                      self.gamma, #gamma (scaling)
                                      self.beta, #beta (shift)
                                      self.mean, #mean
                                      self.var])) #std
        self.keras_model.add(keras.layers.Flatten())
        dense_layer = keras.layers.Dense(output_dim=1)
        self.keras_model.add(dense_layer)
        dense_layer.set_weights([np.ones((1,8)).astype("float32").T,
                                 np.zeros(1).astype("float32")])
        self.keras_model.compile(loss="mse", optimizer="sgd")

        keras_batchnorm_fprop_func = compile_func(
            [self.keras_model.layers[0].input, K.learning_phase()],        
            self.keras_model.layers[0].output)
        self.keras_batchnorm_fprop_func = compile_func(
            [self.keras_model.layers[0].input, K.learning_phase()],
            self.keras_model.layers[0].output) 
        self.keras_output_fprop_func = compile_func(
            [self.keras_model.layers[0].input, K.learning_phase()],
            self.keras_model.layers[-1].output)

        grad = tf.gradients(tf.reduce_sum(
                   self.keras_model.layers[-1].output[:,0]),
                   [self.keras_model.layers[0].input])[0]
        self.grad_func = compile_func(
            [self.keras_model.layers[0].input, K.learning_phase()], grad)

        self.saved_file_path = "batchnorm_model.h5"
        if (os.path.isfile(self.saved_file_path)):
            os.remove(self.saved_file_path)
        self.keras_model.save(self.saved_file_path) 
Example 67
Project: HandyNet   Author: arangesh   File: model.py    MIT License 4 votes vote down vote up
def run_graph(self, images, outputs):
        """Runs a sub-set of the computation graph that computes the given
        outputs.

        outputs: List of tuples (name, tensor) to compute. The tensors are
            symbolic TensorFlow tensors and the names are for easy tracking.

        Returns an ordered dict of results. Keys are the names received in the
        input and values are Numpy arrays.
        """
        model = self.keras_model

        # Organize desired outputs into an ordered dict
        outputs = OrderedDict(outputs)
        for o in outputs.values():
            assert o is not None

        # Build a Keras function to run parts of the computation graph
        inputs = model.inputs
        if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
            inputs += [K.learning_phase()]
        kf = K.function(model.inputs, list(outputs.values()))

        # Run inference
        molded_images, image_metas, windows = self.mold_inputs(images)
        # TODO: support training mode?
        # if TEST_MODE == "training":
        #     model_in = [molded_images, image_metas,
        #                 target_rpn_match, target_rpn_bbox,
        #                 gt_boxes, gt_masks]
        #     if not config.USE_RPN_ROIS:
        #         model_in.append(target_rois)
        #     if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
        #         model_in.append(1.)
        #     outputs_np = kf(model_in)
        # else:

        model_in = [molded_images, image_metas]
        if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
            model_in.append(0.)
        outputs_np = kf(model_in)

        # Pack the generated Numpy arrays into a a dict and log the results.
        outputs_np = OrderedDict([(k, v)
                                  for k, v in zip(outputs.keys(), outputs_np)])
        for k, v in outputs_np.items():
            log(k, v)
        return outputs_np


############################################################
#  Data Formatting
############################################################ 
Example 68
Project: GewitterGefahr   Author: thunderhoser   File: saliency_maps.py    MIT License 4 votes vote down vote up
def _do_saliency_calculations(
        model_object, loss_tensor, list_of_input_matrices):
    """Does saliency calculations.

    T = number of input tensors to the model
    E = number of examples (storm objects)

    :param model_object: Instance of `keras.models.Model`.
    :param loss_tensor: Keras tensor defining the loss function.
    :param list_of_input_matrices: length-T list of numpy arrays, comprising one
        or more examples (storm objects).  list_of_input_matrices[i] must have
        the same dimensions as the [i]th input tensor to the model.
    :return: list_of_saliency_matrices: length-T list of numpy arrays,
        comprising the saliency map for each example.
        list_of_saliency_matrices[i] has the same dimensions as
        list_of_input_matrices[i] and defines the "saliency" of each value x,
        which is the gradient of the loss function with respect to x.
    """

    if isinstance(model_object.input, list):
        list_of_input_tensors = model_object.input
    else:
        list_of_input_tensors = [model_object.input]

    list_of_gradient_tensors = K.gradients(loss_tensor, list_of_input_tensors)
    num_input_tensors = len(list_of_input_tensors)

    for i in range(num_input_tensors):
        list_of_gradient_tensors[i] /= K.maximum(
            K.std(list_of_gradient_tensors[i]), K.epsilon()
        )

    inputs_to_gradients_function = K.function(
        list_of_input_tensors + [K.learning_phase()], list_of_gradient_tensors
    )

    # list_of_saliency_matrices = None
    # num_examples = list_of_input_matrices[0].shape[0]
    #
    # for i in range(num_examples):
    #     these_input_matrices = [a[[i], ...] for a in list_of_input_matrices]
    #     these_saliency_matrices = inputs_to_gradients_function(
    #         these_input_matrices + [0])
    #
    #     if list_of_saliency_matrices is None:
    #         list_of_saliency_matrices = these_saliency_matrices + []
    #     else:
    #         for i in range(num_input_tensors):
    #             list_of_saliency_matrices[i] = numpy.concatenate(
    #                 (list_of_saliency_matrices[i], these_saliency_matrices[i]),
    #                 axis=0)

    list_of_saliency_matrices = inputs_to_gradients_function(
        list_of_input_matrices + [0]
    )

    for i in range(num_input_tensors):
        list_of_saliency_matrices[i] *= -1

    return list_of_saliency_matrices 
Example 69
Project: GewitterGefahr   Author: thunderhoser   File: model_activation.py    MIT License 4 votes vote down vote up
def get_class_activation_for_examples(
        model_object, target_class, list_of_input_matrices):
    """For each input example, returns predicted probability of target class.

    :param model_object: Instance of `keras.models.Model`.
    :param target_class: Predictions will be returned for this class.  Must be
        an integer in 0...(K - 1), where K = number of classes.
    :param list_of_input_matrices: length-T list of numpy arrays, comprising
        one or more examples (storm objects).  list_of_input_matrices[i] must
        have the same dimensions as the [i]th input tensor to the model.
    :return: activation_values: length-E numpy array, where activation_values[i]
        is the activation (predicted probability or logit) of the target class
        for the [i]th example.
    """

    check_metadata(
        component_type_string=model_interpretation.CLASS_COMPONENT_TYPE_STRING,
        target_class=target_class
    )

    if isinstance(model_object.input, list):
        list_of_input_tensors = model_object.input
    else:
        list_of_input_tensors = [model_object.input]

    num_output_neurons = model_object.layers[-1].output.get_shape().as_list()[
        -1]

    if num_output_neurons == 1:
        error_checking.assert_is_leq(target_class, 1)
        if target_class == 1:
            output_tensor = model_object.layers[-1].output[..., 0]
        else:
            output_tensor = 1. - model_object.layers[-1].output[..., 0]
    else:
        error_checking.assert_is_less_than(target_class, num_output_neurons)
        output_tensor = model_object.layers[-1].output[..., target_class]

    activation_function = K.function(
        list_of_input_tensors + [K.learning_phase()],
        [output_tensor])

    return activation_function(list_of_input_matrices + [0])[0] 
Example 70
Project: GewitterGefahr   Author: thunderhoser   File: model_activation.py    MIT License 4 votes vote down vote up
def get_channel_activation_for_examples(
        model_object, layer_name, channel_index, list_of_input_matrices,
        stat_function_for_neuron_activations):
    """For each input example, returns the activation of one channel.

    :param model_object: Instance of `keras.models.Model`.
    :param layer_name: Name of layer containing the relevant channel.
    :param channel_index: Index of the relevant channel.  This method computes
        activations for the [j]th output channel of `layer_name`, where
        j = `channel_index`.
    :param list_of_input_matrices: See doc for
        `get_class_activation_for_examples`.
    :param stat_function_for_neuron_activations: Function used to process neuron
        activations (needed because a channel generally has many neurons).  This
        function must take a Keras tensor (containing neuron activations) and
        return a single number.  Some examples are `keras.backend.max` and
        `keras.backend.mean`.
    :return: activation_values: length-E numpy array, where activation_values[i]
        is stat_function_for_neuron_activations(channel_activations) for the
        [i]th example.
    """

    check_metadata(
        component_type_string=
        model_interpretation.CHANNEL_COMPONENT_TYPE_STRING,
        layer_name=layer_name,
        channel_indices=numpy.array([channel_index], dtype=int)
    )

    if isinstance(model_object.input, list):
        list_of_input_tensors = model_object.input
    else:
        list_of_input_tensors = [model_object.input]

    activation_function = K.function(
        list_of_input_tensors + [K.learning_phase()],
        [stat_function_for_neuron_activations(
            model_object.get_layer(name=layer_name).output[..., channel_index])
        ]
    )

    return activation_function(list_of_input_matrices + [0])[0] 
Example 71
Project: EasyPR-python   Author: SunskyF   File: model.py    Apache License 2.0 4 votes vote down vote up
def run_graph(self, images, outputs):
        """Runs a sub-set of the computation graph that computes the given
        outputs.

        outputs: List of tuples (name, tensor) to compute. The tensors are 
            symbolic TensorFlow tensors and the names are for easy tracking.

        Returns an ordered dict of results. Keys are the names received in the
        input and values are Numpy arrays.
        """
        model = self.keras_model

        # Organize desired outputs into an ordered dict
        outputs = OrderedDict(outputs)
        for o in outputs.values():
            assert o is not None

        # Build a Keras function to run parts of the computation graph
        inputs = model.inputs
        if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
            inputs += [K.learning_phase()]
        kf = K.function(model.inputs, list(outputs.values()))

        # Run inference
        molded_images, image_metas, windows = self.mold_inputs(images)
        # TODO: support training mode?
        # if TEST_MODE == "training":
        #     model_in = [molded_images, image_metas, 
        #                 target_rpn_match, target_rpn_bbox, 
        #                 gt_boxes, gt_masks]
        #     if not config.USE_RPN_ROIS:
        #         model_in.append(target_rois)
        #     if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
        #         model_in.append(1.)
        #     outputs_np = kf(model_in)
        # else:

        model_in = [molded_images, image_metas]
        if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
            model_in.append(0.)
        outputs_np = kf(model_in)

        # Pack the generated Numpy arrays into a a dict and log the results.
        outputs_np = OrderedDict([(k, v) for k, v in zip(outputs.keys(), outputs_np)])
        for k, v in outputs_np.items():
            log(k, v)
        return outputs_np


############################################################
#  Data Formatting
############################################################ 
Example 72
Project: deep_sort_yolov3   Author: Qidian213   File: yolo.py    GNU General Public License v3.0 4 votes vote down vote up
def detect_image(self, image):
        if self.is_fixed_size:
            assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
            assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
            boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
        else:
            new_image_size = (image.width - (image.width % 32),
                              image.height - (image.height % 32))
            boxed_image = letterbox_image(image, new_image_size)
        image_data = np.array(boxed_image, dtype='float32')

        #print(image_data.shape)
        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.
        
        out_boxes, out_scores, out_classes = self.sess.run(
            [self.boxes, self.scores, self.classes],
            feed_dict={
                self.yolo_model.input: image_data,
                self.input_image_shape: [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })
        return_boxs = []
        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = self.class_names[c]
            if predicted_class != 'person' :
                continue
            box = out_boxes[i]
           # score = out_scores[i]  
            x = int(box[1])  
            y = int(box[0])  
            w = int(box[3]-box[1])
            h = int(box[2]-box[0])
            if x < 0 :
                w = w + x
                x = 0
            if y < 0 :
                h = h + y
                y = 0 
            return_boxs.append([x,y,w,h])

        return return_boxs 
Example 73
Project: deep_sort_tiny_yolo3   Author: scofield1991   File: yolo.py    MIT License 4 votes vote down vote up
def detect_image(self, image):
        if self.is_fixed_size:
            assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
            assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
            boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
        else:
            new_image_size = (image.width - (image.width % 32),
                              image.height - (image.height % 32))
            boxed_image = letterbox_image(image, new_image_size)
        image_data = np.array(boxed_image, dtype='float32')

        #print(image_data.shape)
        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.
        
        out_boxes, out_scores, out_classes = self.sess.run(
            [self.boxes, self.scores, self.classes],
            feed_dict={
                self.yolo_model.input: image_data,
                self.input_image_shape: [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })
        return_boxs = []
        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = self.class_names[c]
            if predicted_class != 'person' :
                continue
            box = out_boxes[i]
           # score = out_scores[i]  
            x = int(box[1])  
            y = int(box[0])  
            w = int(box[3]-box[1])
            h = int(box[2]-box[0])
            if x < 0 :
                w = w + x
                x = 0
            if y < 0 :
                h = h + y
                y = 0 
            return_boxs.append([x,y,w,h])

        return return_boxs 
Example 74
Project: diktya   Author: BioroboticsLab   File: gan.py    Apache License 2.0 4 votes vote down vote up
def _build(self):
        fake, _, _, g_additional_losses = self.g.run_internal_graph(self.g.inputs)

        real = self.d.inputs[0]
        data = concat([fake, real], axis=0)

        realness, _, _, d_additional_losses = self.d.run_internal_graph(
            [data] + self.d.inputs[1:])

        nb_fakes = fake.shape[0]
        fake_realness = realness[:nb_fakes]
        real_realness = realness[nb_fakes:]
        split = 2*nb_fakes // 3
        g_fake_realness = fake_realness[:split]
        d_fake_realness = fake_realness[split:]

        outputs = OrderedDict()
        g_loss = K.mean(K.binary_crossentropy(g_fake_realness, K.ones_like(real_realness)))
        outputs['g_loss'] = g_loss
        g_reg_loss = sum([v for v in g_additional_losses.values()])
        if g_reg_loss != 0:
            outputs['g_reg_loss'] = g_reg_loss
        g_total_loss = g_loss + g_reg_loss

        d_loss = K.mean(K.binary_crossentropy(real_realness, K.ones_like(real_realness)))
        d_loss += K.mean(K.binary_crossentropy(d_fake_realness, K.zeros_like(real_realness)))
        outputs['d_loss'] = d_loss
        d_reg_loss = sum([v for v in d_additional_losses.values()])
        if d_reg_loss != 0:
            outputs['d_reg_loss'] = d_reg_loss
        d_total_loss = d_loss + d_reg_loss

        inputs = {i.name: i for i in self.g.inputs + self.d.inputs}
        inputs_list = []
        for name in self.input_names:
            inputs_list.append(inputs[name])

        g_updates = self.g_optimizer.get_updates(
            collect_trainable_weights(self.g), self.g.constraints, g_total_loss)
        d_updates = self.d_optimizer.get_updates(
            collect_trainable_weights(self.d), self.d.constraints, d_total_loss)

        if self.uses_learning_phase:
            lr_phase = [K.learning_phase()]
        else:
            lr_phase = []
        self.metrics_names = list(outputs.keys())
        self._train_function = K.function(inputs_list + lr_phase, list(outputs.values()),
                                          updates=g_updates + d_updates) 
Example 75
Project: segmentation-unet-maskrcnn   Author: olgaliak   File: model.py    MIT License 4 votes vote down vote up
def run_graph(self, images, outputs):
        """Runs a sub-set of the computation graph that computes the given
        outputs.

        outputs: List of tuples (name, tensor) to compute. The tensors are
            symbolic TensorFlow tensors and the names are for easy tracking.

        Returns an ordered dict of results. Keys are the names received in the
        input and values are Numpy arrays.
        """
        model = self.keras_model

        # Organize desired outputs into an ordered dict
        outputs = OrderedDict(outputs)
        for o in outputs.values():
            assert o is not None

        # Build a Keras function to run parts of the computation graph
        inputs = model.inputs
        if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
            inputs += [K.learning_phase()]
        kf = K.function(model.inputs, list(outputs.values()))

        # Run inference
        molded_images, image_metas, windows = self.mold_inputs(images)
        # TODO: support training mode?
        # if TEST_MODE == "training":
        #     model_in = [molded_images, image_metas,
        #                 target_rpn_match, target_rpn_bbox,
        #                 gt_boxes, gt_masks]
        #     if not config.USE_RPN_ROIS:
        #         model_in.append(target_rois)
        #     if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
        #         model_in.append(1.)
        #     outputs_np = kf(model_in)
        # else:

        model_in = [molded_images, image_metas]
        if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
            model_in.append(0.)
        outputs_np = kf(model_in)

        # Pack the generated Numpy arrays into a a dict and log the results.
        outputs_np = OrderedDict([(k, v)
                                  for k, v in zip(outputs.keys(), outputs_np)])
        for k, v in outputs_np.items():
            log(k, v)
        return outputs_np


############################################################
#  Data Formatting
############################################################ 
Example 76
Project: Vehicle-Detection-and-Tracking-Usig-YOLO-and-Deep-Sort-with-Keras-and-Tensorflow   Author: Akhtar303   File: yolo.py    MIT License 4 votes vote down vote up
def detect_image(self, image):
        if self.is_fixed_size:
            assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
            assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
            boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
        else:
            new_image_size = (image.width - (image.width % 32),
                              image.height - (image.height % 32))
            boxed_image = letterbox_image(image, new_image_size)
        image_data = np.array(boxed_image, dtype='float32')

        #print(image_data.shape)
        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

        out_boxes, out_scores, out_classes = self.sess.run(
            [self.boxes, self.scores, self.classes],
            feed_dict={
                self.yolo_model.input: image_data,
                self.input_image_shape: [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })
        return_boxs = []
        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = self.class_names[c]
            list3 = ["motorbike","car", "bus","bicycle"]

            for pp in list3:

                if predicted_class != pp :
                    continue
                box = out_boxes[i]
               # score = out_scores[i]
                x = int(box[1])
                y = int(box[0])
                w = int(box[3]-box[1])
                h = int(box[2]-box[0])
                if x < 0 :
                    w = w + x
                    x = 0
                if y < 0 :
                    h = h + y
                    y = 0
                return_boxs.append([x,y,w,h])

        return return_boxs 
Example 77
Project: Mask-RCNN   Author: yonghankim   File: model.py    MIT License 4 votes vote down vote up
def run_graph(self, images, outputs, image_metas=None):
        """Runs a sub-set of the computation graph that computes the given
        outputs.

        image_metas: If provided, the images are assumed to be already
            molded (i.e. resized, padded, and noramlized)

        outputs: List of tuples (name, tensor) to compute. The tensors are
            symbolic TensorFlow tensors and the names are for easy tracking.

        Returns an ordered dict of results. Keys are the names received in the
        input and values are Numpy arrays.
        """
        model = self.keras_model

        # Organize desired outputs into an ordered dict
        outputs = OrderedDict(outputs)
        for o in outputs.values():
            assert o is not None

        # Build a Keras function to run parts of the computation graph
        inputs = model.inputs
        if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
            inputs += [K.learning_phase()]
        kf = K.function(model.inputs, list(outputs.values()))

        # Prepare inputs
        if image_metas is None:
            molded_images, image_metas, _ = self.mold_inputs(images)
        else:
            molded_images = images
        image_shape = molded_images[0].shape
        # Anchors
        anchors = self.get_anchors(image_shape)
        # Duplicate across the batch dimension because Keras requires it
        # TODO: can this be optimized to avoid duplicating the anchors?
        anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
        model_in = [molded_images, image_metas, anchors]

        # Run inference
        if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
            model_in.append(0.)
        outputs_np = kf(model_in)

        # Pack the generated Numpy arrays into a a dict and log the results.
        outputs_np = OrderedDict([(k, v)
                                  for k, v in zip(outputs.keys(), outputs_np)])
        for k, v in outputs_np.items():
            log(k, v)
        return outputs_np


############################################################
#  Data Formatting
############################################################ 
Example 78
Project: yoloface   Author: sthanhng   File: yolo.py    MIT License 4 votes vote down vote up
def detect_image(self, image):
        start_time = timer()
        if self.model_image_size != (None, None):
            assert self.model_image_size[
                       0] % 32 == 0, 'Multiples of 32 required'
            assert self.model_image_size[
                       1] % 32 == 0, 'Multiples of 32 required'
            boxed_image = letterbox_image(image, tuple(
                reversed(self.model_image_size)))
        else:
            new_image_size = (image.width - (image.width % 32),
                              image.height - (image.height % 32))
            boxed_image = letterbox_image(image, new_image_size)
        image_data = np.array(boxed_image, dtype='float32')
        print(image_data.shape)
        image_data /= 255.
        # add batch dimension
        image_data = np.expand_dims(image_data, 0)
        out_boxes, out_scores, out_classes = self.sess.run(
            [self.boxes, self.scores, self.classes],
            feed_dict={
                self.yolo_model.input: image_data,
                self.input_image_shape: [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })
        print('*** Found {} face(s) for this image'.format(len(out_boxes)))
        thickness = (image.size[0] + image.size[1]) // 400

        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = self.class_names[c]
            box = out_boxes[i]
            score = out_scores[i]
            text = '{} {:.2f}'.format(predicted_class, score)
            draw = ImageDraw.Draw(image)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
            right = min(image.size[0], np.floor(right + 0.5).astype('int32'))

            print(text, (left, top), (right, bottom))

            for thk in range(thickness):
                draw.rectangle(
                    [left + thk, top + thk, right - thk, bottom - thk],
                    outline=(51, 178, 255))
            del draw

        end_time = timer()
        print('*** Processing time: {:.2f}ms'.format((end_time -
                                                          start_time) * 1000))
        return image, out_boxes 
Example 79
Project: Object-and-facial-detection-in-python   Author: grebtsew   File: yolo.py    MIT License 4 votes vote down vote up
def detect_image_faster(self, image):
        '''
        Simply removed prints, we are still getting slowed alot by these transforms between numpy and Image
        for the letterbox_image function... Might have to change that in the future!
        '''
        if self.model_image_size != (None, None):
            assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
            assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
            boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
        else:
            new_image_size = (image.width - (image.width % 32),
                              image.height - (image.height % 32))
            boxed_image = letterbox_image(image, new_image_size)
        image_data = np.array(boxed_image, dtype='float32')

        image_data /= 255.
        image_data = np.expand_dims(image_data, 0)  # Add batch dimension.

        out_boxes, out_scores, out_classes = self.sess.run(
            [self.boxes, self.scores, self.classes],
            feed_dict={
                self.yolo_model.input: image_data,
                self.input_image_shape: [image.size[1], image.size[0]],
                K.learning_phase(): 0
            })

        font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
                    size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
        thickness = (image.size[0] + image.size[1]) // 300

        for i, c in reversed(list(enumerate(out_classes))):
            predicted_class = self.class_names[c]
            box = out_boxes[i]
            score = out_scores[i]

            label = '{} {:.2f}'.format(predicted_class, score)
            draw = ImageDraw.Draw(image)
            label_size = draw.textsize(label, font)

            top, left, bottom, right = box
            top = max(0, np.floor(top + 0.5).astype('int32'))
            left = max(0, np.floor(left + 0.5).astype('int32'))
            bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
            right = min(image.size[0], np.floor(right + 0.5).astype('int32'))

            if top - label_size[1] >= 0:
                text_origin = np.array([left, top - label_size[1]])
            else:
                text_origin = np.array([left, top + 1])

            for i in range(thickness):
                draw.rectangle(
                    [left + i, top + i, right - i, bottom - i],
                    outline=self.colors[c])
            draw.rectangle(
                [tuple(text_origin), tuple(text_origin + label_size)],
                fill=self.colors[c])
            draw.text(text_origin, label, fill=(0, 0, 0), font=font)
            del draw

        return image 
Example 80
Project: Refined-Segmentation-R-CNN   Author: YalongLiu   File: model_enhancedrpn_enlargeroi_segnet_crf.py    MIT License 4 votes vote down vote up
def run_graph(self, images, outputs, image_metas=None):
        """Runs a sub-set of the computation graph that computes the given
        outputs.

        image_metas: If provided, the images are assumed to be already
            molded (i.e. resized, padded, and normalized)

        outputs: List of tuples (name, tensor) to compute. The tensors are
            symbolic TensorFlow tensors and the names are for easy tracking.

        Returns an ordered dict of results. Keys are the names received in the
        input and values are Numpy arrays.
        """
        model = self.keras_model

        # Organize desired outputs into an ordered dict
        outputs = OrderedDict(outputs)
        for o in outputs.values():
            assert o is not None

        # Build a Keras function to run parts of the computation graph
        inputs = model.inputs
        if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
            inputs += [K.learning_phase()]
        kf = K.function(model.inputs, list(outputs.values()))

        # Prepare inputs
        if image_metas is None:
            molded_images, image_metas, _ = self.mold_inputs(images)
        else:
            molded_images = images
        image_shape = molded_images[0].shape
        # Anchors
        anchors = self.get_anchors(image_shape)
        # Duplicate across the batch dimension because Keras requires it
        # TODO: can this be optimized to avoid duplicating the anchors?
        anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)
        model_in = [molded_images, image_metas, anchors]

        # Run inference
        if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
            model_in.append(0.)
        outputs_np = kf(model_in)

        # Pack the generated Numpy arrays into a a dict and log the results.
        outputs_np = OrderedDict([(k, v)
                                  for k, v in zip(outputs.keys(), outputs_np)])
        for k, v in outputs_np.items():
            log(k, v)
        return outputs_np


############################################################
#  Data Formatting
############################################################