Python keras.backend.eval() Examples

The following are code examples for showing how to use keras.backend.eval(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: phoneticSimilarity   Author: ronggong   File: regression_train_predict.py    GNU Affero General Public License v3.0 7 votes vote down vote up
def evaluate_model(model, X, y, scaler):
    """
    eval the model with mse loss
    :param model:
    :param X:
    :param y:
    :param scaler:
    :return:
    """

    y_pred = model_prediction(model, X, scaler)

    # print(y.shape, y_pred.shape)
    y = K.variable(y)
    y_pred = K.variable(y_pred)

    loss = K.eval(mean_absolute_error(y, y_pred))

    return loss 
Example 2
Project: Keras_MedicalImgAI   Author: taoyilee   File: Visualizer.py    MIT License 6 votes vote down vote up
def kernel_visualize(self):
        self.prepare_model()
        if self.MDConfig.show_model_summary:
            self.model.summary()
        layer: Conv2D = self.model.get_layer("conv1/conv")

        weights: tf.Variable = layer.weights[0]
        weights = np.array(K.eval(weights))
        weights -= np.min(weights)
        weights /= np.max(weights)
        weights *= 255
        weights_mosaic = np.zeros((56, 56, 3))
        for i in range(8):
            for j in range(8):
                weights_mosaic[i * 7:(i + 1) * 7, j * 7:(j + 1) * 7, :] = weights[:, :, :, i * 8 + j].squeeze()
        weights_mosaic = cv2.resize(weights_mosaic, (1024, 1024))
        cv2.imwrite("kernels.bmp", weights_mosaic) 
Example 3
Project: AI2-Reasoning-Challenge-ARC   Author: SebiSebi   File: keras_custom_layers.py    GNU General Public License v3.0 6 votes vote down vote up
def main():
    input = Input(shape=(7, 2))
    output_1, state1_h, state1_c = LSTM(4, return_sequences=True,
                                        return_state=True)(input)
    output_2 = LSTM(4)(output_1, initial_state=[state1_h, state1_c])
    output_3 = LinkedAttention(250)([output_1, output_2])

    # state_h and state_c are only for the last timestamp.
    # output_1[-1] == state_h

    model = Model(inputs=[input], outputs=[output_1, output_3])
    model.compile(loss=categorical_crossentropy,
                  optimizer='adam',
                  metrics=['accuracy'])
    model.summary()
    # y = model.predict(np.ones((3, 7, 2)), batch_size=3)

    '''
    x = K.variable(value=np.array([[[1, 4]], [[-3, 2]]]))
    y = K.variable(value=np.array([[[1, 2, 3], [-1, 5, 2]],
                                  [[3, 4, 1], [1, 6, 4]]]))
    z = K.batch_dot(x, y)
    print(x.shape)
    print(K.eval(z))
    ''' 
Example 4
Project: keras-adamw   Author: OverLordGoldDragon   File: optimizers_225.py    MIT License 6 votes vote down vote up
def get_config(self):
        config = {
            'lr': float(K.get_value(self.lr)),
            'beta_1': float(K.get_value(self.beta_1)),
            'beta_2': float(K.get_value(self.beta_2)),
            'decay': float(K.get_value(self.decay)),
            'batch_size': int(K.get_value(self.batch_size)),
            'total_iterations': int(self.total_iterations),
            'weight_decays': self.weight_decays,
            'lr_multipliers': self.lr_multipliers,
            'use_cosine_annealing': self.use_cosine_annealing,
            't_cur': int(K.get_value(self.t_cur)),
            'eta_t': int(K.eval(self.eta_t)),
            'eta_min': int(K.get_value(self.eta_min)),
            'eta_max': int(K.get_value(self.eta_max)),
            'init_verbose': self.init_verbose,
            'epsilon': self.epsilon,
            'amsgrad': self.amsgrad
        }
        base_config = super(AdamW, self).get_config()
        return dict(list(base_config.items()) + list(config.items())) 
Example 5
Project: keras-adamw   Author: OverLordGoldDragon   File: optimizers_225.py    MIT License 6 votes vote down vote up
def get_config(self):
        config = {
            'lr': float(K.get_value(self.lr)),
            'beta_1': float(K.get_value(self.beta_1)),
            'beta_2': float(K.get_value(self.beta_2)),
            'epsilon': self.epsilon,
            'schedule_decay': self.schedule_decay,
            'batch_size': int(K.get_value(self.batch_size)),
            'total_iterations': int(self.total_iterations),
            'weight_decays': self.weight_decays,
            'lr_multipliers': self.lr_multipliers,
            'use_cosine_annealing': self.use_cosine_annealing,
            't_cur': int(K.get_value(self.t_cur)),
            'eta_t': int(K.eval(self.eta_t)),
            'eta_min': int(K.get_value(self.eta_min)),
            'eta_max': int(K.get_value(self.eta_max)),
            'init_verbose': self.init_verbose
        }
        base_config = super(NadamW, self).get_config()
        return dict(list(base_config.items()) + list(config.items())) 
Example 6
Project: keras-adamw   Author: OverLordGoldDragon   File: optimizers_225.py    MIT License 6 votes vote down vote up
def get_config(self):
        config = {
            'lr': float(K.get_value(self.lr)),
            'momentum': float(K.get_value(self.momentum)),
            'decay': float(K.get_value(self.decay)),
            'nesterov': self.nesterov,
            'batch_size': int(K.get_value(self.batch_size)),
            'total_iterations': int(self.total_iterations),
            'weight_decays': self.weight_decays,
            'lr_multipliers': self.lr_multipliers,
            'use_cosine_annealing': self.use_cosine_annealing,
            't_cur': int(K.get_value(self.t_cur)),
            'eta_t': int(K.eval(self.eta_t)),
            'eta_min': int(K.get_value(self.eta_min)),
            'eta_max': int(K.get_value(self.eta_max)),
            'init_verbose': self.init_verbose
        }
        base_config = super(SGDW, self).get_config()
        return dict(list(base_config.items()) + list(config.items())) 
Example 7
Project: deep-learning-keras   Author: arnaudvl   File: nn.py    MIT License 6 votes vote down vote up
def _calc_metric(self,y_true,y_pred):
        """
        Calculate evaluation metric.
        
        Supports: "roc-auc","norm-gini","mean_squared_error","mean_absolute_error",
                  "categorical_crossentropy","binary_crossentropy".
        """
        if self._val_loss=='roc-auc':
            metric = roc_auc_score(y_true, y_pred)
        elif self._val_loss=='norm-gini':
            metric = (2 * roc_auc_score(y_true, y_pred)) - 1
        elif self._val_loss=='mean_squared_error':
            metric = K.eval(mean_squared_error(K.variable(y_true), K.variable(y_pred)))
        elif self._val_loss=='mean_absolute_error':
            metric = K.eval(mean_absolute_error(K.variable(y_true), K.variable(y_pred)))
        elif self._val_loss=='categorical_crossentropy':
            metric = K.eval(categorical_crossentropy(K.variable(y_true), K.variable(y_pred)))
        elif self._val_loss=='binary_crossentropy':
            metric = K.eval(binary_crossentropy(K.variable(y_true), K.variable(y_pred)))
        else:
            raise ValueError('Invalid value for "custom_eval_stopping["name"], "roc-auc","norm-gini","mean_squared_error", \
                             "mean_absolute_error","categorical_crossentropy","binary_crossentropy" supported.')
        return metric 
Example 8
Project: deep-learning-keras   Author: arnaudvl   File: nn.py    MIT License 6 votes vote down vote up
def _get_metric(self,y_true,y_pred):
        """
        Calculate metric being logged.
        
        Supports: "roc-auc","norm-gini","mean_squared_error","mean_absolute_error",
                  "categorical_crossentropy","binary_crossentropy".
        """
        if self._metric=='roc-auc':
            metric = roc_auc_score(y_true, y_pred)
        elif self._metric=='norm-gini':
            metric = (2 * roc_auc_score(y_true, y_pred)) - 1
        elif self._metric=='mean_squared_error':
            metric = K.eval(mean_squared_error(K.variable(y_true), K.variable(y_pred)))
        elif self._metric=='mean_absolute_error':
            metric = K.eval(mean_absolute_error(K.variable(y_true), K.variable(y_pred)))
        elif self._metric=='categorical_crossentropy':
            metric = K.eval(categorical_crossentropy(K.variable(y_true), K.variable(y_pred)))
        elif self._metric=='binary_crossentropy':
            metric = K.eval(binary_crossentropy(K.variable(y_true), K.variable(y_pred)))
        else:
            raise ValueError('Invalid value for "custom_eval_stopping["name"], "roc-auc","norm-gini","mean_squared_error", \
                             "mean_absolute_error","categorical_crossentropy","binary_crossentropy" supported.')
        return metric 
Example 9
Project: backdoor   Author: bolunwang   File: visualizer.py    MIT License 6 votes vote down vote up
def save_tmp_func(self, step):

        cur_mask = K.eval(self.mask_upsample_tensor)
        cur_mask = cur_mask[0, ..., 0]
        img_filename = (
            '%s/%s' % (self.tmp_dir, 'tmp_mask_step_%d.png' % step))
        utils_backdoor.dump_image(np.expand_dims(cur_mask, axis=2) * 255,
                                  img_filename,
                                  'png')

        cur_fusion = K.eval(self.mask_upsample_tensor *
                            self.pattern_raw_tensor)
        cur_fusion = cur_fusion[0, ...]
        img_filename = (
            '%s/%s' % (self.tmp_dir, 'tmp_fusion_step_%d.png' % step))
        utils_backdoor.dump_image(cur_fusion, img_filename, 'png')

        pass 
Example 10
Project: RPGOne   Author: RTHMaK   File: backend_test.py    Apache License 2.0 6 votes vote down vote up
def test_hardmax(self):
        batch_size = 3
        knowledge_length = 5
        unnormalized_attention = K.variable(numpy.random.rand(batch_size, knowledge_length))
        hardmax_output = hardmax(unnormalized_attention, knowledge_length)
        input_value = K.eval(unnormalized_attention)
        output_value = K.eval(hardmax_output)
        assert output_value.shape == (batch_size, knowledge_length)
        # Assert all elements other than the ones are zeros
        assert numpy.count_nonzero(output_value) == batch_size
        # Assert the max values in all rows are ones
        assert numpy.all(numpy.equal(numpy.max(output_value, axis=1),
                                     numpy.ones((batch_size,))))
        # Assert ones are in the right places
        assert numpy.all(numpy.equal(numpy.argmax(output_value, axis=1),
                                     numpy.argmax(input_value, axis=1))) 
Example 11
Project: RPGOne   Author: RTHMaK   File: test_option_attention_sum.py    Apache License 2.0 6 votes vote down vote up
def test_mean_mode_mask(self):
        # Testing the general masked batched case.
        document_indices = K.variable(np.array([[1, 2, 3, 4, 1, 2]]))
        document_probabilities = K.variable(np.array([[.1, .2, .3, .4, 0.01, 0.03]]))
        options = K.variable(np.array([[[1, 2, 1], [3, 4, 2], [4, 1, 0]]]))
        option_attention_sum_mean = K.eval(OptionAttentionSum().call([document_indices,
                                                                      document_probabilities,
                                                                      options]))
        assert_array_almost_equal(option_attention_sum_mean,
                                  np.array([[0.14999999, 0.31000003, 0.255]]))

        options = K.variable(np.array([[[1, 2, 1], [3, 4, 2], [0, 0, 0]]]))
        option_attention_sum_mean = K.eval(OptionAttentionSum().call([document_indices,
                                                                      document_probabilities,
                                                                      options]))
        assert_array_almost_equal(option_attention_sum_mean,
                                  np.array([[0.14999999, 0.31000003, 0.0]]))

        # Testing the masked batched case where input is all 0s.
        options = K.variable(np.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0]]]))
        option_attention_sum_mean = K.eval(OptionAttentionSum().call([document_indices,
                                                                      document_probabilities,
                                                                      options]))
        assert_array_almost_equal(option_attention_sum_mean,
                                  np.array([[0, 0, 0]])) 
Example 12
Project: RPGOne   Author: RTHMaK   File: attention_test.py    Apache License 2.0 6 votes vote down vote up
def test_batched_masked(self):
        # Testing general masked non-batched case.
        vector = K.variable(numpy.array([[0.3, 0.1, 0.5], [0.3, 0.1, 0.5]]))
        matrix = K.variable(numpy.array([[[0.6, 0.8, 0.1], [0.15, 0.5, 0.2], [0.5, 0.3, 0.2]],
                                         [[0.6, 0.8, 0.1], [0.15, 0.5, 0.2], [0.5, 0.3, 0.2]]]))
        mask = K.variable(numpy.array([[1.0, 1.0, 0.0], [1.0, 0.0, 1.0]]))
        result = K.eval(Attention().call([vector, matrix], mask=["_", mask]))
        assert_almost_equal(result, numpy.array([[0.52871835, 0.47128162, 0.0],
                                                 [0.50749944, 0.0, 0.49250056]]))

        # Test the case where a mask is all 0s and an input is all 0s.
        vector = K.variable(numpy.array([[0.0, 0.0, 0.0], [0.3, 0.1, 0.5]]))
        matrix = K.variable(numpy.array([[[0.6, 0.8, 0.1], [0.15, 0.5, 0.2], [0.5, 0.3, 0.2]],
                                         [[0.6, 0.8, 0.1], [0.15, 0.5, 0.2], [0.5, 0.3, 0.2]]]))
        mask = K.variable(numpy.array([[1.0, 1.0, 0.0], [0.0, 0.0, 0.0]]))
        result = K.eval(Attention().call([vector, matrix], mask=["_", mask]))
        assert_almost_equal(result, numpy.array([[0.5, 0.5, 0.0],
                                                 [0.0, 0.0, 0.0]])) 
Example 13
Project: RPGOne   Author: RTHMaK   File: threshold_tuple_matcher_test.py    Apache License 2.0 6 votes vote down vote up
def test_general_case(self):
        match_layer = ThresholdTupleMatcher({"type": "cosine_similarity"},
                                            self.num_hidden_layers,
                                            self.hidden_layer_width,
                                            initialization=Constant(.999),
                                            hidden_layer_activation=self.hidden_layer_activation)
        output = match_layer([self.tuple1_input, self.tuple2_input])
        model = Model([self.tuple1_input, self.tuple2_input], output)

        # Get the initial weights for use in testing
        layer_nn = match_layer.hidden_layer_weights

        # Testing general unmasked case.
        desired_overlap = K.variable(numpy.asarray([[0, 1/5, 2/5]]))
        # Desired_overlap gets fed into the inner NN.
        neural_network_feed_forward = apply_feed_forward(desired_overlap, layer_nn,
                                                         activations.get(match_layer.hidden_layer_activation))
        # Apply the final activation function.
        desired_result = activations.get(match_layer.final_activation)(K.dot(neural_network_feed_forward,
                                                                             match_layer.score_layer))
        result = model.predict([self.tuple1, self.tuple2])
        assert_array_almost_equal(result, K.eval(desired_result)) 
Example 14
Project: RPGOne   Author: RTHMaK   File: slot_similarity_tuple_matcher_test.py    Apache License 2.0 6 votes vote down vote up
def test_general_case(self):

        match_layer = SlotSimilarityTupleMatcher({"type": "cosine_similarity"},
                                                 self.num_hidden_layers,
                                                 self.hidden_layer_width,
                                                 hidden_layer_activation=self.hidden_layer_activation)
        output = match_layer([self.tuple1_input, self.tuple2_input])
        model = Model([self.tuple1_input, self.tuple2_input], output)

        # Get the initial weights for use in testing
        dense_hidden_weights = K.eval(model.trainable_weights[0])
        score_weights = K.eval(model.trainable_weights[1])

        # Testing general unmasked case.
        similarity_function = CosineSimilarity(name="cosine_similarity")
        cosine_similarities = similarity_function.compute_similarity(K.variable(self.tuple1),
                                                                     K.variable(self.tuple2))

        # Desired_overlap gets fed into the inner NN.
        dense1_activation = numpy.dot(K.eval(cosine_similarities), dense_hidden_weights)
        final_score = numpy.dot(dense1_activation, score_weights)
        # Apply the final sigmoid activation function.
        desired_result = logistic.cdf(final_score)
        result = model.predict([self.tuple1, self.tuple2])
        assert_array_almost_equal(result, desired_result) 
Example 15
Project: AnisotropicMultiStreamCNN   Author: AnnekeMeyer   File: train.py    MIT License 5 votes vote down vote up
def on_epoch_end(self, epoch, logs={}):
        lr = K.eval(self.model.optimizer.lr)
        logs['LR'] = lr 
Example 16
Project: RFMLS-NEU   Author: neu-spiral   File: rf_util.py    MIT License 5 votes vote down vote up
def on_epoch_end(self, epoch, logs={}):
        optimizer = self.model.optimizer
        lr = K.eval(optimizer.lr * (1. / (1. + optimizer.decay * optimizer.iterations)))
        print('\nLR: {:.6f}\n'.format(lr)) 
Example 17
Project: phoneticSimilarity   Author: ronggong   File: models_siamese_tripletloss.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def calculate_loss(triplet_model,
                   generator,
                   iter_time,
                   batch_size,
                   N_diff,
                   margin):
    """calculate the max loss during Ndiff iterations"""
    max_loss = -np.inf
    ii_Ndiff = 0
    list_loss = []
    ii_counter = 0
    for input_batch in generator:
        outputs_batch = triplet_model.predict_on_batch(input_batch)
        loss_batch = K.eval(K.mean(triplet_loss(outputs_batch, margin=margin)))
        # print('predict on iter', ii_counter, loss_batch)

        if loss_batch > max_loss:
            max_loss = loss_batch

        ii_Ndiff += 1
        if ii_Ndiff >= N_diff: # every Ndiff iterations append and reset max_loss
            # print(max_loss)
            list_loss.append(max_loss)
            max_loss = -np.inf
            ii_Ndiff = 0

        ii_counter += 1
        if ii_counter >= iter_time: # after iterating all samples, return mean loss
            return np.mean(list_loss) 
Example 18
Project: phoneticSimilarity   Author: ronggong   File: models_RNN.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def evaluate_model(model, X, y, scaler):

    y_pred = np.zeros_like(y)
    for ii in range(len(X)):
        X_sample = np.expand_dims(scaler.transform(X[ii]), axis=0)
        y_pred[ii] = model.predict_on_batch(X_sample)

    print(y.shape, y_pred.shape)
    y = K.variable(y)
    y_pred = K.variable(y_pred)

    loss = K.eval(categorical_crossentropy(y, y_pred))

    return np.mean(loss) 
Example 19
Project: 3DGCN   Author: blackmints   File: callback.py    MIT License 5 votes vote down vote up
def on_epoch_end(self, epoch, logs=None):
        logs.update({'lr': K.eval(self.model.optimizer.lr)})
        super().on_epoch_end(epoch, logs) 
Example 20
Project: trVAE   Author: theislab   File: _utils.py    MIT License 5 votes vote down vote up
def compute_kernel(x, y, kernel='rbf', **kwargs):
    """
        Computes RBF kernel between x and y.
        # Parameters
            x: Tensor
                Tensor with shape [batch_size, z_dim]
            y: Tensor
                Tensor with shape [batch_size, z_dim]
        # Returns
            returns the computed RBF kernel between x and y
    """
    scales = kwargs.get("scales", [])
    if kernel == "rbf":
        x_size = K.shape(x)[0]
        y_size = K.shape(y)[0]
        dim = K.shape(x)[1]
        tiled_x = K.tile(K.reshape(x, K.stack([x_size, 1, dim])), K.stack([1, y_size, 1]))
        tiled_y = K.tile(K.reshape(y, K.stack([1, y_size, dim])), K.stack([x_size, 1, 1]))
        return K.exp(-K.mean(K.square(tiled_x - tiled_y), axis=2) / K.cast(dim, tf.float32))
    elif kernel == 'raphy':
        scales = K.variable(value=np.asarray(scales))
        squared_dist = K.expand_dims(squared_distance(x, y), 0)
        scales = K.expand_dims(K.expand_dims(scales, -1), -1)
        weights = K.eval(K.shape(scales)[0])
        weights = K.variable(value=np.asarray(weights))
        weights = K.expand_dims(K.expand_dims(weights, -1), -1)
        return K.sum(weights * K.exp(-squared_dist / (K.pow(scales, 2))), 0)
    elif kernel == "multi-scale-rbf":
        sigmas = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100, 1e3, 1e4, 1e5, 1e6]

        beta = 1. / (2. * (K.expand_dims(sigmas, 1)))
        distances = squared_distance(x, y)
        s = K.dot(beta, K.reshape(distances, (1, -1)))

        return K.reshape(tf.reduce_sum(tf.exp(-s), 0), K.shape(distances)) / len(sigmas) 
Example 21
Project: Keras_MedicalImgAI   Author: taoyilee   File: callback.py    MIT License 5 votes vote down vote up
def on_epoch_end(self, epoch, logs={}):
        """
        Calculate the average AUROC and save the best model weights according
        to this metric.

        """
        self.stats["lr"] = float(kb.eval(self.model.optimizer.lr))
        print(f"current learning rate: {self.stats['lr']}")

        """
        y_hat shape: (#samples, len(class_names))
        y: [(#samples, 1), (#samples, 1) ... (#samples, 1)]
        """
        print(f"*** epoch#{epoch + 1} dev auroc ***")
        _, mean_auroc, _, _ = metrics.compute_auroc(self.model, self.generator, self.class_mode, self.class_names)

        if mean_auroc > self.stats["best_mean_auroc"]:
            print(f"update best auroc from {self.stats['best_mean_auroc']} to {mean_auroc}")

            # 1. copy best model
            shutil.copy(self.weights_path, self.best_weights_path)

            # 2. update log file
            print(f"update log file: {self.best_auroc_log_path}")
            with open(self.best_auroc_log_path, "a") as f:
                f.write(f"(epoch#{epoch + 1}) auroc: {mean_auroc}, lr: {self.stats['lr']}\n")

            # 3. write stats output, this is used for resuming the training
            with open(self.stats_output_path, 'w') as f:
                json.dump(self.stats, f)

            print(f"update model file: {self.weights_path} -> {self.best_weights_path}")
            self.stats["best_mean_auroc"] = mean_auroc
        return 
Example 22
Project: Keras_MedicalImgAI   Author: taoyilee   File: test_DataSet.py    MIT License 5 votes vote down vote up
def main(config_file):
    tr = Trainer(config_file)
    tr.prepare_datasets()
    tr.prepare_loss_function()
    targets = np.array(tr.train_generator.targets())
    targets = targets[0:3]
    # pred = np.random.rand(14)
    pred = 0.3 * np.ones((3, 14), dtype=K.floatx())
    loss = tr.loss_function(targets, pred)
    print(K.eval(loss)) 
Example 23
Project: ICASSP2019_TCN   Author: DSIP-UPatras   File: utils.py    MIT License 5 votes vote down vote up
def on_epoch_end(self, epoch, logs=None):
        lr = K.eval(self.model.optimizer.lr)
        lr_summary = tf.Summary(
            value=[tf.Summary.Value(tag='lr', simple_value=lr)])
        self.writer.add_summary(lr_summary, epoch)
        self.writer.flush()
        super(MyTensorboard, self).on_epoch_end(epoch, logs) 
Example 24
Project: dockerizeme   Author: dockerizeme   File: snippet.py    Apache License 2.0 5 votes vote down vote up
def test_FScore2():
    '''test for FScore2'''
    # Test 1:
    y_true = [[1, 0, 0, 1],
              [0, 1, 0, 1]]
    y_pred = [[1, 1, 1, 1],
              [1, 0, 1, 1]]

    score_python = FScore2_python(y_true, y_pred)

    y_true = K.constant(y_true)
    y_pred = K.constant(y_pred)
    score_keras = K.eval(FScore2(y_true, y_pred))
    print('python:', score_python)
    print('keras:', score_keras)
    assert(abs(score_keras-score_python) < 0.0001)
    print('Test 1 passed!') 
Example 25
Project: MS-CMR2019   Author: Suiiyu   File: train.py    MIT License 5 votes vote down vote up
def lr_poly_decay(model, base_lr, curr_iter, max_iter, power=0.5):
    lrate = base_lr * (1.0 - (curr_iter / float(max_iter))) ** power
    K.set_value(model.optimizer.lr, lrate)

    return K.eval(model.optimizer.lr) 
Example 26
Project: MS-CMR2019   Author: Suiiyu   File: train.py    MIT License 5 votes vote down vote up
def lr_ep_decay(model, base_lr, curr_ep, step=0.1):
    
    lrate = base_lr * step**(curr_ep/40)
    K.set_value(model.optimizer.lr, lrate)
    return K.eval(model.optimizer.lr) 
Example 27
Project: BERT   Author: yyht   File: test_transformer.py    Apache License 2.0 5 votes vote down vote up
def compare_two_models(model_a, model_b):
        assert len(model_a.weights) == len(model_b.weights)
        for x, y in zip(model_a.weights, model_b.weights):
            assert (K.eval(x) == K.eval(y)).all() 
Example 28
Project: rpsai   Author: qwertpi   File: train.py    GNU General Public License v3.0 5 votes vote down vote up
def on_epoch_end(self,epoch,logs=None):
        print(K.eval(self.model.optimizer.lr)) 
Example 29
Project: wtte-rnn   Author: ragulpr   File: test_keras.py    MIT License 5 votes vote down vote up
def test_keras_unstack_hack():
    y_true_np = np.random.random([1, 3, 2])
    y_true_np[:, :, 0] = 0
    y_true_np[:, :, 1] = 1

    y_true_keras = K.variable(y_true_np)

    y, u = wtte._keras_unstack_hack(y_true_keras)
    y_true_keras_new = K.stack([y, u], axis=-1)

    np.testing.assert_array_equal(K.eval(y_true_keras_new), y_true_np)

# SANITY CHECK: Use pure Weibull data censored at C(ensoring point).
# Should converge to the generating A(alpha) and B(eta) for each timestep 
Example 30
Project: Variational-AutoEncoder-For-Novelty-Detection   Author: LordAlucard90   File: helper.py    GNU General Public License v3.0 5 votes vote down vote up
def _test(self, vae, hidden, reg_val=None, drp_val=None):
        keras.backend.clear_session()

        self.Trn_an_ts = np.append(self.Trn, self.Val).reshape((len(self.Trn) + len(self.Val), 28 * 28))
        self.Trn_an_ts = tf.convert_to_tensor(self.Trn_an_ts, np.float32)
        self.Tst_an_ts = copy(self.Tst).reshape((len(self.Tst), 28 * 28))
        self.Tst_an_ts = tf.convert_to_tensor(self.Tst_an_ts, np.float32)

        m_generator = ModelGenerator(vae=vae, hidden=hidden, reg_val=reg_val, drp_val=drp_val)
        name = m_generator.get_name()
        m_generator.load_best_w(self.models_dir)
        model = m_generator.get_model()

        loss = model.evaluate(self.Tst, self.Tst, verbose=0)

        trn_pred = model.predict(self.Trn_an).reshape((len(self.Trn_an), 28 * 28))
        trn_pred = tf.convert_to_tensor(trn_pred, np.float32)

        trn_mse = K.eval(mean_squared_error(self.Trn_an_ts, trn_pred))
        th = trn_mse[np.argsort(trn_mse)[-len(self.Trn_an_idx)]]

        tst_pred = model.predict(self.Tst_an).reshape((len(self.Tst_an), 28 * 28))
        tst_pred = tf.convert_to_tensor(tst_pred, np.float32)

        tst_mse = K.eval(mean_squared_error(self.Tst_an_ts, tst_pred))

        [prc, _], [rcl, _], [f1, _], _ = precision_recall_fscore_support(self.Tst_lbls[self.Tst_idx] > 0, tst_mse > th)

        return [name, loss, th, prc, rcl, f1] 
Example 31
Project: keras-fcn   Author: JihongJu   File: test_blocks.py    MIT License 5 votes vote down vote up
def test_vgg_deconv():
    if K.image_data_format() == 'channels_first':
        x1 = K.variable(np.random.random((1, 512, 8, 8)))
        y1_shape = (1, 21, 18, 18)
        x2 = K.variable(np.random.random((1, 512, 27, 27)))
        y2_shape = (1, 21, 38, 38)
        x3 = K.variable(np.random.random((1, 256, 53, 53)))
        y3_shape = (1, 21, 312, 312)
    else:
        x1 = K.variable(np.random.random((1, 8, 8, 512)))
        y1_shape = (1, 18, 18, 21)
        x2 = K.variable(np.random.random((1, 27, 27, 512)))
        y2_shape = (1, 38, 38, 21)
        x3 = K.variable(np.random.random((1, 53, 53, 256)))
        y3_shape = (1, 312, 312, 21)

    upscore1 = vgg_deconv(classes=21)(x1, None)
    assert K.int_shape(upscore1) == y1_shape
    assert not np.any(np.isnan(K.eval(upscore1)))

    upscore2 = vgg_deconv(classes=21)(x2, upscore1)
    assert K.int_shape(upscore2) == y2_shape
    assert not np.any(np.isnan(K.eval(upscore2)))

    upscore3 = vgg_deconv(classes=21, kernel_size=(16, 16),
                          strides=(8, 8))(x3, upscore2)
    assert K.int_shape(upscore3) == y3_shape
    assert not np.any(np.isnan(K.eval(upscore3))) 
Example 32
Project: keras-fcn   Author: JihongJu   File: test_losses.py    MIT License 5 votes vote down vote up
def test_categorical_crossentropy():

    y_true = np.reshape([1, 1, 0, 0], [1, 2, 2]).astype('int')
    y_true = np.eye(2)[y_true]
    y_pred = np.ones((1, 2, 2, 2)) * 0.5

    y_true, y_pred = K.variable(y_true), K.variable(y_pred)

    loss = mean_categorical_crossentropy(y_true, y_pred)
    loss = K.eval(loss)
    assert np.allclose(loss, 0.69314718) 
Example 33
Project: deep-pmsm   Author: wkirgsn   File: rnn_model_utils.py    MIT License 5 votes vote down vote up
def score(self, x, y, **kwargs):
        """This score func will return the loss"""
        # sample weight needed

        if kwargs.pop('score_directly', False):
            #  x = actual, y = prediction
            if np.any(np.isnan(y)):
                loss = 9999  # NaN -> const.
            else:
                loss = np.mean(K.eval(
                    self.model.loss_functions[0](K.cast(x, np.float32),
                                                 K.cast(y, np.float32))))
            print(f'Loss: {loss:.6} K²'),
            return loss
        else:
            p_id_col = kwargs.pop('p_id_col', 'p_id_col_not_found')
            downsample_rate = kwargs.pop('downsample_rate', None)
            tbptt_len = kwargs.pop('tbptt_len', None)
            batch_size = kwargs['batch_size']
            batch_generation_cfg = {'p_id_col': p_id_col,
                                    'batch_size': batch_size,
                                    'downsample_rate': downsample_rate,
                                    'tbptt_len': tbptt_len}
            x, sample_weights = \
                self._generate_batches(x, **batch_generation_cfg)
            y, _ = \
                self._generate_batches(y, **batch_generation_cfg)
            kwargs['sample_weight'] = sample_weights

            return super().score(x, y, **kwargs) 
Example 34
Project: deep-pmsm   Author: wkirgsn   File: data.py    MIT License 5 votes vote down vote up
def score(self, y_hat, y_true=None):
        """Prints score by comparing given y_hat with dataset's target,
        which is in the testset. Returns the actual target data as well"""
        if y_true is not None:
            act = y_true
        else:
            # compare with actual
            target_col = self.y_cols if y_hat.shape[1] > 1 else [self.y_cols[-1]]
            act = pd.concat(self.actual, axis=0, ignore_index=True)[target_col]
            assert set(act.columns.tolist()) == set(y_hat.columns.tolist()), 'ping!'
        score = np.mean(K.eval(self.loss_func(act.values, y_hat.values)))
        print('{:}: {:.6} K²'.format(cfg.data_cfg['loss'], score))
        return score, act 
Example 35
Project: deep-pmsm   Author: wkirgsn   File: cnn_model_utils.py    MIT License 5 votes vote down vote up
def score(self, x, y, **kwargs):
        """This score func will return the loss"""

        if kwargs.pop('score_directly', False):
            #  x = actual, y = prediction
            if np.any(np.isnan(y)):
                loss = 9999  # NaN -> const.
            else:
                loss = np.mean(K.eval(
                    self.model.loss_functions[0](K.cast(x, np.float32),
                                                 K.cast(y, np.float32))))
            print(f'Loss: {loss:.6} K²'),
            return loss
        else:
            raise NotImplementedError()
            # todo: make the below code work
            # sample weight needed
            p_id_col = kwargs.pop('p_id_col', 'p_id_col_not_found')
            batch_size = kwargs.get('batch_size', None)
            x, sample_weights = self._generate_batches(x, p_id_col=p_id_col,
                                                       batch_size=batch_size)
            kwargs['sample_weight'] = sample_weights
            n_dummy = batch_size - np.count_nonzero(sample_weights[-batch_size:, 0])
            y = np.vstack((y.values, np.zeros((n_dummy, y.shape[1]))))

            return super().score(x, y, **kwargs) 
Example 36
Project: yolov3-3dcarbox   Author: zoujialong9012   File: modelorient.py    MIT License 5 votes vote down vote up
def orientation_loss2(y_true, y_pred, obj_mask, mf):
# Find number of anchors
    #print('orient loss ------')
    #print(test.shape)
    #K.reshape(y_pred*obj_mask, [-1, BIN, 2])
    anchors = K.sum(K.square(y_true), axis=2)
    anchors = K.greater(anchors, tf.constant(0.5))
    anchors = K.sum(K.cast(anchors, dtype='float32'), 1)
# Define the loss
# cos^2 + sin ^2 = 1
   # K.abs()
    #loss = K.abs(y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    #print(tf.Session().run(y_true))
    #print(tf.Session().run(y_pred))
    loss = (y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])   # -1 - 1
    #loss = K.switch(loss > 0.0, loss, K.zeros_like(loss))
    loss = 1-loss
    print(loss.shape)

    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    losssum = K.sum(K.sum(loss, axis=0))
   # print(losssum.shape)
    allobj = K.sum(obj_mask)
    #print(allobj.shape)
  #  if K.eval(allobj) == 0:
  #      loss = 0.0
  #  else :
  #      loss = 4.0*(2 - 2 * (K.sum(K.sum(loss, axis=0))/allobj))
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    #loss =  (allobj-K.sum(K.sum(loss, axis=0)))/mf
    #loss = tf.cond(allobj > 0, lambda: 3.0*(1 -  (K.sum(K.sum(loss, axis=0))/allobj)), lambda: 0.0)
    loss = tf.cond(allobj > 0, lambda: losssum/allobj, lambda: 0.0)

    #loss = 3.0 * K.abs(loss)
    #K.switch
    #loss = tf.cond(allobj > 0, lambda: (allobj-K.sum(K.sum(loss, axis=0)))/mf, lambda: 0.0)
    
    #loss = K.sum((2 - 2 * K.mean(loss,axis=0))) / anchors
    #print(loss.shape)
    return loss 
Example 37
Project: yolov3-3dcarbox   Author: zoujialong9012   File: modeltree.py    MIT License 5 votes vote down vote up
def orientation_loss(y_true, y_pred, obj_mask, mf):
# Find number of anchors
    #print('orient loss ------')
    #print(test.shape)
    y_true = K.reshape(y_true*obj_mask, [-1, BIN, 2])
    y_pred = K.reshape(y_pred*obj_mask, [-1, BIN, 2])
    anchors = K.sum(K.square(y_true), axis=2)
    anchors = K.greater(anchors, tf.constant(0.5))
    anchors = K.sum(K.cast(anchors, dtype='float32'), 1)
# Define the loss
# cos^2 + sin ^2 = 1
   # K.abs()
    #loss = K.abs(y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    loss = (y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    losssum = K.sum(K.sum(loss, axis=0))
   # print(losssum.shape)
    allobj = K.sum(obj_mask)
    #print(allobj.shape)
  #  if K.eval(allobj) == 0:
  #      loss = 0.0
  #  else :
  #      loss = 4.0*(2 - 2 * (K.sum(K.sum(loss, axis=0))/allobj))
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
   # loss =  (allobj-K.sum(K.sum(loss, axis=0)))/mf
    loss = tf.cond(allobj > 0, lambda: 10.0*(1 - 1 * (K.sum(K.sum(loss, axis=0))/allobj)), lambda: 0.0)
    #loss = tf.cond(allobj > 0, lambda: (allobj-K.sum(K.sum(loss, axis=0)))/mf, lambda: 0.0)
    
    #loss = K.sum((2 - 2 * K.mean(loss,axis=0))) / anchors
    #print(loss.shape)
    return loss
    #K.mean(loss) 
Example 38
Project: yolov3-3dcarbox   Author: zoujialong9012   File: modelmerge.py    MIT License 5 votes vote down vote up
def orientation_loss(y_true, y_pred, obj_mask, mf):
# Find number of anchors
    #print('orient loss ------')
    #print(test.shape)
    y_true = K.reshape(y_true*obj_mask, [-1, BIN, 2])
    y_pred = K.reshape(y_pred*obj_mask, [-1, BIN, 2])
    anchors = K.sum(K.square(y_true), axis=2)
    anchors = K.greater(anchors, tf.constant(0.5))
    anchors = K.sum(K.cast(anchors, dtype='float32'), 1)
# Define the loss
# cos^2 + sin ^2 = 1
   # K.abs()
    #loss = K.abs(y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    loss = (y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    losssum = K.sum(K.sum(loss, axis=0))
   # print(losssum.shape)
    allobj = K.sum(obj_mask)
    #print(allobj.shape)
  #  if K.eval(allobj) == 0:
  #      loss = 0.0
  #  else :
  #      loss = 4.0*(2 - 2 * (K.sum(K.sum(loss, axis=0))/allobj))
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    #loss =  (allobj-K.sum(K.sum(loss, axis=0)))/mf
    loss = tf.cond(allobj > 0, lambda: 3.0*(1 -  (K.sum(K.sum(loss, axis=0))/allobj)), lambda: 0.0)
    #loss = tf.cond(allobj > 0, lambda: (allobj-K.sum(K.sum(loss, axis=0)))/mf, lambda: 0.0)
    
    #loss = K.sum((2 - 2 * K.mean(loss,axis=0))) / anchors
    #print(loss.shape)
    return loss
    #K.mean(loss) 
Example 39
Project: yolov3-3dcarbox   Author: zoujialong9012   File: modelmergetree.py    MIT License 5 votes vote down vote up
def orientation_loss2(y_true, y_pred, obj_mask, mf):
# Find number of anchors
    #print('orient loss ------')
    #print(test.shape)
    #K.reshape(y_pred*obj_mask, [-1, BIN, 2])
    anchors = K.sum(K.square(y_true), axis=2)
    anchors = K.greater(anchors, tf.constant(0.5))
    anchors = K.sum(K.cast(anchors, dtype='float32'), 1)
# Define the loss
# cos^2 + sin ^2 = 1
   # K.abs()
    #loss = K.abs(y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    #print(tf.Session().run(y_true))
    #print(tf.Session().run(y_pred))
    loss = (y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])   # -1 - 1
    #loss = K.switch(loss > 0.0, loss, K.zeros_like(loss))
    loss = 1-loss
    print(loss.shape)

    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    losssum = K.sum(K.sum(loss, axis=0))
   # print(losssum.shape)
    allobj = K.sum(obj_mask)
    #print(allobj.shape)
  #  if K.eval(allobj) == 0:
  #      loss = 0.0
  #  else :
  #      loss = 4.0*(2 - 2 * (K.sum(K.sum(loss, axis=0))/allobj))
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    #loss =  (allobj-K.sum(K.sum(loss, axis=0)))/mf
    #loss = tf.cond(allobj > 0, lambda: 3.0*(1 -  (K.sum(K.sum(loss, axis=0))/allobj)), lambda: 0.0)
    loss = tf.cond(allobj > 0, lambda: losssum/allobj, lambda: 0.0)

    #loss = 3.0 * K.abs(loss)
    #K.switch
    #loss = tf.cond(allobj > 0, lambda: (allobj-K.sum(K.sum(loss, axis=0)))/mf, lambda: 0.0)
    
    #loss = K.sum((2 - 2 * K.mean(loss,axis=0))) / anchors
    #print(loss.shape)
    return loss 
Example 40
Project: yolov3-3dcarbox   Author: zoujialong9012   File: modelmergetree.py    MIT License 5 votes vote down vote up
def orientation_loss2(y_true, y_pred, obj_mask, mf):
# Find number of anchors
    #print('orient loss ------')
    #print(test.shape)
    #K.reshape(y_pred*obj_mask, [-1, BIN, 2])
    anchors = K.sum(K.square(y_true), axis=2)
    anchors = K.greater(anchors, tf.constant(0.5))
    anchors = K.sum(K.cast(anchors, dtype='float32'), 1)
# Define the loss
# cos^2 + sin ^2 = 1
   # K.abs()
    #loss = K.abs(y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    #print(tf.Session().run(y_true))
    #print(tf.Session().run(y_pred))
    loss = (y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])   # -1 - 1
    #loss = K.switch(loss > 0.0, loss, K.zeros_like(loss))
    loss = 1-loss
    print(loss.shape)

    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    losssum = K.sum(K.sum(loss, axis=0))
   # print(losssum.shape)
    allobj = K.sum(obj_mask)
    #print(allobj.shape)
  #  if K.eval(allobj) == 0:
  #      loss = 0.0
  #  else :
  #      loss = 4.0*(2 - 2 * (K.sum(K.sum(loss, axis=0))/allobj))
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    #loss =  (allobj-K.sum(K.sum(loss, axis=0)))/mf
    #loss = tf.cond(allobj > 0, lambda: 3.0*(1 -  (K.sum(K.sum(loss, axis=0))/allobj)), lambda: 0.0)
    #loss = tf.cond(allobj > 0, lambda: losssum/allobj, lambda: 0.0)
    loss = K.switch(allobj > 0, losssum/allobj, 0.0)

    #loss = 3.0 * K.abs(loss)
    #K.switch
    #loss = tf.cond(allobj > 0, lambda: (allobj-K.sum(K.sum(loss, axis=0)))/mf, lambda: 0.0)
    
    #loss = K.sum((2 - 2 * K.mean(loss,axis=0))) / anchors
    #print(loss.shape)
    return loss 
Example 41
Project: keras-centernet   Author: see--   File: decode_test.py    MIT License 5 votes vote down vote up
def test_ctdet_decode():
  np.random.seed(32)
  hm = np.random.randn(2, 64, 64, 80)
  reg = np.random.randn(2, 64, 64, 2) * 10.0
  wh = np.random.randn(2, 64, 64, 2) * 20.0

  keras_hm = K.constant(hm)
  keras_reg = K.constant(reg)
  keras_wh = K.constant(wh)

  keras_detections = K.eval(_ctdet_decode(keras_hm, keras_reg, keras_wh, output_stride=1))

  gold_fn = 'tests/data/ctdet_decode_gold.p'
  if not os.path.exists(gold_fn):
    import torch as th
    import sys
    sys.path.append(os.path.expanduser('~/Pytorch/CenterNet/src'))
    from lib.models.decode import ctdet_decode  # noqa
    py_hm = th.from_numpy(hm.transpose(0, 3, 1, 2)).float()
    py_hm.sigmoid_()
    py_reg = th.from_numpy(reg.transpose(0, 3, 1, 2)).float()
    py_wh = th.from_numpy(wh.transpose(0, 3, 1, 2)).float()
    py_detections = ctdet_decode(py_hm, py_reg, py_wh).detach().numpy()
    with open(gold_fn, 'wb') as f:
      pickle.dump(py_detections, f)
  else:
    with open(gold_fn, 'rb') as f:
      py_detections = pickle.load(f)
  assert np.allclose(keras_detections, py_detections) 
Example 42
Project: DeepCAGE   Author: kimmo1019   File: 4.Classification.py    MIT License 5 votes vote down vote up
def on_epoch_end(self,epoch,logs={}):
        lr = self.model.optimizer.lr
        decay = self.model.optimizer.decay
        iterations = self.model.optimizer.iterations
        lr_with_decay = lr / (1. + decay * K.cast(iterations, K.dtype(decay)))
        print(K.eval(lr_with_decay))
        self.single_model.save_weights('%s/checkpoint/model_weights_at_epoch_%d.h5'%(DPATH,epoch)) 
Example 43
Project: DeepCAGE   Author: kimmo1019   File: 5.Regression.py    MIT License 5 votes vote down vote up
def on_epoch_end(self,epoch,logs={}):
        lr = self.model.optimizer.lr
        decay = self.model.optimizer.decay
        iterations = self.model.optimizer.iterations
        lr_with_decay = lr / (1. + decay * K.cast(iterations, K.dtype(decay)))
        print(K.eval(lr_with_decay))
        self.single_model.save_weights('%s/checkpoint/regression_multi_gpu_model_weights_at_epoch_%d.h5'%(DPATH,epoch)) 
Example 44
Project: RPGOne   Author: RTHMaK   File: backend_test.py    Apache License 2.0 5 votes vote down vote up
def test_cumulative_sum(self):
        vector = numpy.asarray([1, 2, 3, 4, 5])
        result = K.eval(cumulative_sum(K.variable(vector)))
        assert_allclose(result, [1, 3, 6, 10, 15])

        vector = numpy.asarray([[1, 2, 3, 4, 5],
                                [1, 1, 1, 1, 1],
                                [3, 5, 0, 0, 0]])
        result = K.eval(cumulative_sum(K.variable(vector)))
        assert_allclose(result, [[1, 3, 6, 10, 15],
                                 [1, 2, 3, 4, 5],
                                 [3, 8, 8, 8, 8]]) 
Example 45
Project: RPGOne   Author: RTHMaK   File: cosine_similarity_test.py    Apache License 2.0 5 votes vote down vote up
def test_compute_similarity_does_a_cosine_similarity(self):
        a_vectors = numpy.asarray([[numpy.random.random(3) for _ in range(2)]], dtype="float32")
        b_vectors = numpy.asarray([[numpy.random.random(3) for _ in range(2)]], dtype="float32")
        normed_a = K.l2_normalize(K.variable(a_vectors), axis=-1)
        normed_b = K.l2_normalize(K.variable(b_vectors), axis=-1)
        desired_result = K.eval(self.dot_product.compute_similarity(normed_a, normed_b))
        result = K.eval(self.cosine_similarity.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
        assert result.shape == (1, 2)    # batch_size = 1
        assert numpy.all(result == desired_result) 
Example 46
Project: RPGOne   Author: RTHMaK   File: cosine_similarity_test.py    Apache License 2.0 5 votes vote down vote up
def test_compute_similarity_works_with_higher_order_tensors(self):
        a_vectors = numpy.random.rand(5, 4, 3, 6, 7)
        b_vectors = numpy.random.rand(5, 4, 3, 6, 7)
        normed_a = K.eval(K.l2_normalize(K.variable(a_vectors), axis=-1))
        normed_b = K.eval(K.l2_normalize(K.variable(b_vectors), axis=-1))
        result = K.eval(self.cosine_similarity.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
        assert result.shape == (5, 4, 3, 6)
        assert_almost_equal(result[3, 2, 1, 3],
                            numpy.dot(normed_a[3, 2, 1, 3], normed_b[3, 2, 1, 3]),
                            decimal=6) 
Example 47
Project: RPGOne   Author: RTHMaK   File: linear_test.py    Apache License 2.0 5 votes vote down vote up
def test_compute_similarity_does_a_weighted_product(self):
        linear = Linear(name='linear', combination='x,y')
        linear.weight_vector = K.variable(numpy.asarray([[-.3], [.5], [2.0], [-1.0]]))
        linear.bias = K.variable(numpy.asarray([.1]))
        a_vectors = numpy.asarray([[[1, 1, 1], [-1, -1, 0]]])
        b_vectors = numpy.asarray([[[0], [1]]])
        result = K.eval(linear.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
        assert result.shape == (1, 2,)
        assert_almost_equal(result, [[2.3, -1.1]]) 
Example 48
Project: RPGOne   Author: RTHMaK   File: linear_test.py    Apache License 2.0 5 votes vote down vote up
def test_compute_similarity_works_with_multiply_combinations(self):
        linear = Linear(name='linear', combination='x*y')
        linear.weight_vector = K.variable(numpy.asarray([[-.3], [.5]]))
        linear.bias = K.variable(numpy.asarray([0]))
        a_vectors = numpy.asarray([[1, 1], [-1, -1]])
        b_vectors = numpy.asarray([[1, 0], [0, 1]])
        result = K.eval(linear.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
        assert result.shape == (2,)
        assert_almost_equal(result, [-.3, -.5]) 
Example 49
Project: RPGOne   Author: RTHMaK   File: linear_test.py    Apache License 2.0 5 votes vote down vote up
def test_compute_similarity_works_with_divide_combinations(self):
        linear = Linear(name='linear', combination='x/y')
        linear.weight_vector = K.variable(numpy.asarray([[-.3], [.5]]))
        linear.bias = K.variable(numpy.asarray([0]))
        a_vectors = numpy.asarray([[1, 1], [-1, -1]])
        b_vectors = numpy.asarray([[1, 2], [2, 1]])
        result = K.eval(linear.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
        assert result.shape == (2,)
        assert_almost_equal(result, [-.05, -.35]) 
Example 50
Project: RPGOne   Author: RTHMaK   File: linear_test.py    Apache License 2.0 5 votes vote down vote up
def test_compute_similarity_works_with_add_combinations(self):
        linear = Linear(name='linear', combination='x+y')
        linear.weight_vector = K.variable(numpy.asarray([[-.3], [.5]]))
        linear.bias = K.variable(numpy.asarray([0]))
        a_vectors = numpy.asarray([[1, 1], [-1, -1]])
        b_vectors = numpy.asarray([[1, 0], [0, 1]])
        result = K.eval(linear.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
        assert result.shape == (2,)
        assert_almost_equal(result, [-.1, .3]) 
Example 51
Project: RPGOne   Author: RTHMaK   File: linear_test.py    Apache License 2.0 5 votes vote down vote up
def test_compute_similarity_works_with_subtract_combinations(self):
        linear = Linear(name='linear', combination='x-y')
        linear.weight_vector = K.variable(numpy.asarray([[-.3], [.5]]))
        linear.bias = K.variable(numpy.asarray([0]))
        a_vectors = numpy.asarray([[1, 1], [-1, -1]])
        b_vectors = numpy.asarray([[1, 0], [0, 1]])
        result = K.eval(linear.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
        assert result.shape == (2,)
        assert_almost_equal(result, [.5, -.7]) 
Example 52
Project: RPGOne   Author: RTHMaK   File: bilinear_test.py    Apache License 2.0 5 votes vote down vote up
def test_compute_similarity_does_a_bilinear_product(self):
        bilinear = Bilinear(name='bilinear')
        weights = numpy.asarray([[-.3, .5], [2.0, -1.0]])
        bilinear.weight_matrix = K.variable(weights)
        bilinear.bias = K.variable(numpy.asarray([.1]))
        a_vectors = numpy.asarray([[1, 1], [-1, -1]])
        b_vectors = numpy.asarray([[1, 0], [0, 1]])
        result = K.eval(bilinear.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
        assert result.shape == (2,)
        assert_almost_equal(result, [1.8, .6]) 
Example 53
Project: RPGOne   Author: RTHMaK   File: dot_product_test.py    Apache License 2.0 5 votes vote down vote up
def test_compute_similarity_does_a_dot_product(self):
        a_vectors = numpy.asarray([[1, 1, 1], [-1, -1, -1]])
        b_vectors = numpy.asarray([[1, 0, 1], [1, 0, 0]])
        result = K.eval(self.dot_product.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
        assert result.shape == (2,)
        assert numpy.all(result == [2, -1]) 
Example 54
Project: RPGOne   Author: RTHMaK   File: dot_product_test.py    Apache License 2.0 5 votes vote down vote up
def test_compute_similarity_works_with_higher_order_tensors(self):
        a_vectors = numpy.random.rand(5, 4, 3, 6, 7)
        b_vectors = numpy.random.rand(5, 4, 3, 6, 7)
        result = K.eval(self.dot_product.compute_similarity(K.variable(a_vectors), K.variable(b_vectors)))
        assert result.shape == (5, 4, 3, 6)
        assert_almost_equal(result[3, 2, 1, 3],
                            numpy.dot(a_vectors[3, 2, 1, 3], b_vectors[3, 2, 1, 3]),
                            decimal=6) 
Example 55
Project: RPGOne   Author: RTHMaK   File: masked_operations_test.py    Apache License 2.0 5 votes vote down vote up
def test_masked_batch_dot_handles_uneven_tensors(self):
        # We're going to test masked_batch_dot with tensors of shape (batch_size, a_length,
        # embedding_dim) and (batch_size, embedding_dim).  The result should have shape
        # (batch_size, a_length).
        embedding_dim = 3
        a_length = 5
        batch_size = 2

        tensor_a = numpy.random.rand(batch_size, a_length, embedding_dim)
        tensor_b = numpy.random.rand(batch_size, embedding_dim)
        mask_a = numpy.ones((batch_size, a_length))
        mask_a[0, 3] = 0
        mask_b = numpy.ones((batch_size,))
        mask_b[1] = 0
        result = K.eval(masked_batch_dot(K.variable(tensor_a),
                                         K.variable(tensor_b),
                                         K.variable(mask_a),
                                         K.variable(mask_b)))
        assert result[0, 0] != 0
        assert result[0, 1] != 0
        assert result[0, 2] != 0
        assert result[0, 3] == 0
        assert result[0, 4] != 0
        assert numpy.all(result[1, :] == numpy.zeros((a_length)))

        # We should get the same result if we flip the order of the tensors.
        flipped_result = K.eval(masked_batch_dot(K.variable(tensor_b),
                                                 K.variable(tensor_a),
                                                 K.variable(mask_b),
                                                 K.variable(mask_a)))
        assert numpy.all(result == flipped_result) 
Example 56
Project: RPGOne   Author: RTHMaK   File: masked_operations_test.py    Apache License 2.0 5 votes vote down vote up
def test_masked_batch_dot_handles_uneven_higher_order_tensors(self):
        # We're going to test masked_batch_dot with tensors of shape (batch_size, common,
        # a_length, embedding_dim) and (batch_size, common, embedding_dim).  The result should have
        # shape (batch_size, common, a_length).  This currently doesn't work with the theano
        # backend, because of an inconsistency in K.batch_dot for higher-order tensors.  The code
        # will crash if you try to run this in Theano, so we require tensorflow for this test.
        embedding_dim = 3
        common_length = 4
        a_length = 5
        batch_size = 2

        tensor_a = numpy.random.rand(batch_size, common_length, a_length, embedding_dim)
        tensor_b = numpy.random.rand(batch_size, common_length, embedding_dim)
        mask_a = numpy.ones((batch_size, common_length, a_length))
        mask_a[1, 1, 3] = 0
        mask_b = numpy.ones((batch_size, common_length))
        mask_b[1, 2] = 0
        result = K.eval(masked_batch_dot(K.variable(tensor_a),
                                         K.variable(tensor_b),
                                         K.variable(mask_a),
                                         K.variable(mask_b)))
        assert numpy.all(result[0, :, :] != numpy.zeros((common_length, a_length)))
        assert numpy.all(result[1, 0, :] != numpy.zeros((a_length)))
        assert result[1, 1, 0] != 0
        assert result[1, 1, 1] != 0
        assert result[1, 1, 2] != 0
        assert result[1, 1, 3] == 0
        assert result[1, 1, 4] != 0
        assert numpy.all(result[1, 2, :] == numpy.zeros((a_length)))
        assert numpy.all(result[1, 3, :] != numpy.zeros((a_length)))

        # We should get the same result if we pass the smaller tensor in first.
        flipped_result = K.eval(masked_batch_dot(K.variable(tensor_b),
                                                 K.variable(tensor_a),
                                                 K.variable(mask_b),
                                                 K.variable(mask_a)))
        assert numpy.all(result == flipped_result) 
Example 57
Project: RPGOne   Author: RTHMaK   File: masked_operations_test.py    Apache License 2.0 5 votes vote down vote up
def test_l1_normalize_no_mask(self):
        # Testing the general unmasked 1D case.
        vector_1d = K.variable(numpy.array([[2, 1, 5, 7]]))
        vector_1d_normalized = K.eval(l1_normalize(vector_1d))
        assert_almost_equal(vector_1d_normalized,
                            numpy.array([[0.13333333, 0.06666666,
                                          0.33333333, 0.46666666]]))
        assert_almost_equal(1.0, numpy.sum(vector_1d_normalized), decimal=6)

        # Testing the unmasked 1D case with all 0s.
        vector_1d_zeros = K.variable(numpy.array([[0, 0, 0, 0]]))
        vector_1d_zeros_normalized = K.eval(l1_normalize(vector_1d_zeros))
        assert_array_almost_equal(vector_1d_zeros_normalized,
                                  numpy.array([[0.25, 0.25, 0.25, 0.25]]))

        # Testing the general unmasked batched case when
        # inputs are not all 0's
        matrix = K.variable(numpy.array([[2, 1, 5, 7], [2, 2, 2, 2]]))
        matrix_normalized = K.eval(l1_normalize(matrix))
        assert_array_almost_equal(matrix_normalized,
                                  numpy.array([[0.13333333, 0.06666666,
                                                0.33333333, 0.46666666],
                                               [0.25, 0.25,
                                                0.25, 0.25]]))
        assert_almost_equal(numpy.array([1.0, 1.0]),
                            numpy.sum(matrix_normalized, axis=1), decimal=6)

        # Testing the general unmasked batched case when
        # one row is all 0's
        matrix = K.variable(numpy.array([[2, 1, 5, 7], [0, 0, 0, 0]]))
        matrix_normalized = K.eval(l1_normalize(matrix))
        assert_array_almost_equal(matrix_normalized,
                                  numpy.array([[0.13333333, 0.06666666,
                                                0.33333333, 0.46666666],
                                               [0.25, 0.25,
                                                0.25, 0.25]]))
        assert_almost_equal(numpy.array([1.0, 1.0]),
                            numpy.sum(matrix_normalized, axis=1), decimal=6) 
Example 58
Project: RPGOne   Author: RTHMaK   File: masked_operations_test.py    Apache License 2.0 5 votes vote down vote up
def test_masked_softmax_no_mask(self):
        # Testing the general unmasked 1D case.
        vector_1d = K.variable(numpy.array([[1.0, 2.0, 3.0]]))
        vector_1d_softmaxed = K.eval(masked_softmax(vector_1d, None))
        assert_array_almost_equal(vector_1d_softmaxed,
                                  numpy.array([[0.090031, 0.244728, 0.665241]]))
        assert_almost_equal(1.0, numpy.sum(vector_1d_softmaxed), decimal=6)

        vector_1d = K.variable(numpy.array([[1.0, 2.0, 5.0]]))
        vector_1d_softmaxed = K.eval(masked_softmax(vector_1d, None))
        assert_array_almost_equal(vector_1d_softmaxed,
                                  numpy.array([[0.017148, 0.046613, 0.93624]]))

        # Testing the unmasked 1D case where the input is all 0s.
        vector_zero = K.variable(numpy.array([[0.0, 0.0, 0.0]]))
        vector_zero_softmaxed = K.eval(masked_softmax(vector_zero, None))
        assert_array_almost_equal(vector_zero_softmaxed,
                                  numpy.array([[0.33333334, 0.33333334, 0.33333334]]))

        # Testing the general unmasked batched case.
        matrix = K.variable(numpy.array([[1.0, 2.0, 5.0], [1.0, 2.0, 3.0]]))
        masked_matrix_softmaxed = K.eval(masked_softmax(matrix, None))
        assert_array_almost_equal(masked_matrix_softmaxed,
                                  numpy.array([[0.01714783, 0.04661262, 0.93623955],
                                               [0.09003057, 0.24472847, 0.66524096]]))

        # Testing the unmasked batched case where one of the inputs are all 0s.
        matrix = K.variable(numpy.array([[1.0, 2.0, 5.0], [0.0, 0.0, 0.0]]))
        masked_matrix_softmaxed = K.eval(masked_softmax(matrix, None))
        assert_array_almost_equal(masked_matrix_softmaxed,
                                  numpy.array([[0.01714783, 0.04661262, 0.93623955],
                                               [0.33333334, 0.33333334, 0.33333334]])) 
Example 59
Project: RPGOne   Author: RTHMaK   File: test_option_attention_sum.py    Apache License 2.0 5 votes vote down vote up
def test_compute_mask(self):
        option_attention_sum = OptionAttentionSum()
        result = option_attention_sum.compute_mask(["_", "_",
                                                    K.variable(np.array([[[1, 2, 0], [2, 3, 3],
                                                                          [0, 0, 0], [0, 0, 0]]],
                                                                        dtype="int32"))])
        assert_array_equal(K.eval(result), np.array([[1, 1, 0, 0]]))
        result = option_attention_sum.compute_mask(["_", "_",
                                                    K.variable(np.array([[[1, 2, 0], [1, 0, 0],
                                                                          [0, 0, 0], [0, 0, 0]]],
                                                                        dtype="int32"))])
        assert_array_equal(K.eval(result), np.array([[1, 1, 0, 0]]))
        result = option_attention_sum.compute_mask(["_", "_",
                                                    K.variable(np.array([[[1, 2, 0], [0, 0, 0],
                                                                          [0, 0, 0], [0, 0, 0]]],
                                                                        dtype="int32"))])
        assert_array_equal(K.eval(result), np.array([[1, 0, 0, 0]]))

        # test batch case
        result = option_attention_sum.compute_mask(["_", "_",
                                                    K.variable(np.array([[[1, 2, 0], [2, 3, 3],
                                                                          [0, 0, 0], [0, 0, 0]],
                                                                         [[1, 1, 0], [3, 3, 3],
                                                                          [0, 0, 3], [0, 0, 0]]],
                                                                        dtype="int32"))])
        assert_array_equal(K.eval(result), np.array([[1, 1, 0, 0], [1, 1, 1, 0]])) 
Example 60
Project: RPGOne   Author: RTHMaK   File: overlap_test.py    Apache License 2.0 5 votes vote down vote up
def test_masked_batched_case(self):
        tensor_a = K.variable(numpy.array([[1, 3, 4, 8, 2], [2, 8, 1, 2, 3]]),
                              dtype="int32")
        tensor_b = K.variable(numpy.array([[9, 4, 2, 5], [6, 1, 2, 2]]),
                              dtype="int32")
        mask_a = K.variable(numpy.array([[1, 1, 1, 0, 0], [1, 1, 1, 1, 0]]))
        mask_b = K.variable(numpy.array([[1, 1, 0, 0], [1, 1, 0, 0]]))
        expected_output = numpy.array([[[1.0, 0.0], [1.0, 0.0],
                                        [0.0, 1.0], [1.0, 0.0], [1.0, 0.0]],
                                       [[1.0, 0.0], [1.0, 0.0],
                                        [0.0, 1.0], [1.0, 0.0], [1.0, 0.0]]])

        # Testing the masked general batched case
        result = K.eval(Overlap()([tensor_a, tensor_b], mask=[mask_a, mask_b]))
        assert_almost_equal(result, expected_output) 
Example 61
Project: RPGOne   Author: RTHMaK   File: gated_attention_test.py    Apache License 2.0 5 votes vote down vote up
def test_masked_multiplication(self):
        # test masked batch case
        document = K.variable(numpy.array([[[0.3, 0.1], [0.4, 0.2], [0.8, 0.1]]]))
        document_mask = K.variable(numpy.array([[1, 1, 0]]))
        question = K.variable(numpy.array([[[0.2, 0.6], [0.4, 0.3], [0.5, 0.7],
                                            [0.1, .6]]]))
        attention = K.variable(numpy.array([[[0.3, 0.1, 0.5, 0.2],
                                             [0.4, 0.2, 0.8, 0.7],
                                             [0.8, 0.1, 0.6, 0.4]]]))
        gated_attention = GatedAttention(gating_function="*")
        result = K.eval(gated_attention([document, question, attention],
                                        mask=[document_mask]))
        assert_almost_equal(result, numpy.array([[[0.111, 0.068],
                                                  [0.252, 0.256],
                                                  [0.0, 0.0]]])) 
Example 62
Project: RPGOne   Author: RTHMaK   File: gated_attention_test.py    Apache License 2.0 5 votes vote down vote up
def test_masked_concatenation(self):
        # test masked batch case
        document = K.variable(numpy.array([[[0.3, 0.1], [0.4, 0.2], [0.8, 0.1]]]))
        document_mask = K.variable(numpy.array([[1, 1, 0]]))
        question = K.variable(numpy.array([[[0.2, 0.6], [0.4, 0.3], [0.5, 0.7],
                                            [0.1, .6]]]))
        attention = K.variable(numpy.array([[[0.3, 0.1, 0.5, 0.2],
                                             [0.4, 0.2, 0.8, 0.7],
                                             [0.8, 0.1, 0.6, 0.4]]]))
        gated_attention = GatedAttention(gating_function="||")
        result = K.eval(gated_attention([document, question, attention],
                                        mask=[document_mask]))
        assert_almost_equal(result, numpy.array([[[0.37, 0.68, 0.3, 0.1],
                                                  [0.63, 1.28, 0.4, 0.2],
                                                  [0.0, 0.0, 0.0, 0.0]]])) 
Example 63
Project: RPGOne   Author: RTHMaK   File: masked_softmax_test.py    Apache License 2.0 5 votes vote down vote up
def test_call_handles_masking_properly(self):
        options = K.variable(numpy.asarray([[2, 4, 0, 1]]))
        mask = K.variable(numpy.asarray([[1, 0, 1, 1]]))
        softmax = K.eval(MaskedSoftmax().call(options, mask=mask))
        assert softmax.shape == (1, 4)
        numpy.testing.assert_almost_equal(softmax, [[0.66524096, 0, 0.09003057, 0.24472847]]) 
Example 64
Project: RPGOne   Author: RTHMaK   File: attention_test.py    Apache License 2.0 5 votes vote down vote up
def test_masked(self):
        # Testing general masked non-batched case.
        vector = K.variable(numpy.array([[0.3, 0.1, 0.5]]))
        matrix = K.variable(numpy.array([[[0.6, 0.8, 0.1], [0.15, 0.5, 0.2], [0.1, 0.4, 0.3]]]))
        mask = K.variable(numpy.array([[1.0, 0.0, 1.0]]))
        result = K.eval(Attention().call([vector, matrix], mask=["_", mask]))
        assert_almost_equal(result, numpy.array([[0.52248482, 0.0, 0.47751518]])) 
Example 65
Project: RPGOne   Author: RTHMaK   File: batch_dot_test.py    Apache License 2.0 5 votes vote down vote up
def test_output_shapes(self):
        bd = BatchDot()
        a_shapes = [(5, 10), (1, 1, 1), (1, 5, 3), (1, 5, 4, 3), (1, 5, 3)]
        b_shapes = [(5, 10), (1, 1, 1), (1, 2, 3), (1, 5, 3), (1, 5, 4, 3)]
        expected_shapes = [(5, 1), (1, 1, 1), (1, 5, 2), (1, 5, 4), (1, 5, 4)]
        for a_shape, b_shape, expected_shape in zip(a_shapes, b_shapes, expected_shapes):
            if (len(a_shape) > 3 or len(b_shape) > 3) and K.backend() == "theano":
                # this breaks in theano, so check that an error is raised
                self.assertRaises(RuntimeError, bd.call,
                                  [K.ones(shape=a_shape), K.ones(shape=b_shape)])
            else:
                assert K.eval(bd([K.ones(shape=a_shape), K.ones(shape=b_shape)])).shape == expected_shape
            assert bd.compute_output_shape([a_shape, b_shape]) == expected_shape 
Example 66
Project: RPGOne   Author: RTHMaK   File: word_overlap_tuple_matcher_test.py    Apache License 2.0 5 votes vote down vote up
def test_masks_handled_correctly(self):
        # Test when one tuple is all padding.
        # Here, since tuple3 is all padding, we want to return a mask value of 0 for this pair
        tuple1 = K.variable(self.tuple1)
        tuple3 = K.variable(self.tuple3)
        calculated_mask_exclude = K.eval(self.match_layer.compute_mask([tuple1, tuple3], [None, None]))
        assert calculated_mask_exclude.shape == (1, 1)
        assert_array_almost_equal(calculated_mask_exclude, np.array([[0]], dtype='int32'))

        # Test when tuple2 is valid.
        # Here, since tuple2 is valid, we want to return a mask value of 1 for this pair
        tuple2 = K.variable(self.tuple2)
        calculated_mask_include = K.eval(self.match_layer.compute_mask([tuple1, tuple2], [None, None]))
        assert calculated_mask_include.shape == (1, 1)
        assert_array_almost_equal(calculated_mask_include, np.array([[1]], dtype='int32')) 
Example 67
Project: RPGOne   Author: RTHMaK   File: threshold_tuple_matcher_test.py    Apache License 2.0 5 votes vote down vote up
def test_returns_masks_correctly(self):
        # Test when one tuple is all padding.
        # Here, since tuple2 is all padding, we want to return a mask value of 0 for this pair
        # tuple1 shape: (batch size, num_slots, num_words, embed_dimension)
        tuple1 = K.variable(self.tuple1)
        mask1 = K.variable(numpy.ones((1, self.num_slots, self.num_words)))
        tuple2 = K.variable(self.tuple2)
        mask2 = K.variable(numpy.zeros((1, self.num_slots, self.num_words)))
        calculated_mask_exclude = K.eval(
                ThresholdTupleMatcher({"type": "cosine_similarity"},
                                      self.num_hidden_layers,
                                      self.hidden_layer_width,
                                      hidden_layer_activation=self.hidden_layer_activation)
                .compute_mask([tuple1, tuple2], [mask1, mask2]))
        assert_array_almost_equal(calculated_mask_exclude, numpy.array([[0]], dtype='int32'))
        assert calculated_mask_exclude.shape == (1, 1,)

        # Test when tuple2 is valid.
        # Here, since tuple2 is valid, we want to return a mask value of 1 for this pair
        new_mask = numpy.ones((1, self.num_slots, self.num_words))
        new_mask[:, :, 1] = 0
        mask2 = K.variable(new_mask)

        calculated_mask_include = K.eval(
                ThresholdTupleMatcher({"type": "cosine_similarity"},
                                      self.num_hidden_layers,
                                      self.hidden_layer_width,
                                      hidden_layer_activation=self.hidden_layer_activation)
                .compute_mask([tuple1, tuple2], [mask1, mask2]))
        assert_array_almost_equal(calculated_mask_include, numpy.array([[1]], dtype='int32'))
        assert calculated_mask_include.shape == (1, 1,) 
Example 68
Project: RPGOne   Author: RTHMaK   File: threshold_tuple_matcher_test.py    Apache License 2.0 5 votes vote down vote up
def test_handles_input_masks_correctly(self):
        num_slots = 3
        num_words = 5
        embed_dimension = 4
        tuple1_word_input = Input(shape=(num_slots, num_words), dtype='int32', name="input_tuple1")
        tuple2_word_input = Input(shape=(num_slots, num_words), dtype='int32', name="input_tuple2")

        embedding = TimeDistributedEmbedding(10, embed_dimension, mask_zero=True)
        embedded_masked_tuple1 = embedding(tuple1_word_input)
        embedded_masked_tuple2 = embedding(tuple2_word_input)

        match_layer = ThresholdTupleMatcher({"type": "cosine_similarity"},
                                            self.num_hidden_layers,
                                            self.hidden_layer_width,
                                            initialization=Constant(.999),
                                            hidden_layer_activation=self.hidden_layer_activation)
        output = match_layer([embedded_masked_tuple1, embedded_masked_tuple2])
        mask_model = Model([tuple1_word_input, tuple2_word_input], output)

        # Assign tuple1 to be all 4's and tuple2 to be all 3's so we can control lexical overlap
        tuple1_words = numpy.ones((1, num_slots, num_words)) * 4
        tuple2_words = numpy.ones((1, num_slots, num_words)) * 3
        # Add a set of matching zeros to slot 1 in each tuple1 -- but shouldn't "match" because it's padding
        tuple1_words[:, 1, :] = numpy.zeros(num_words)
        tuple2_words[:, 1, :] = numpy.zeros(num_words)

        # Get the initial weights for use in testing
        layer_nn = match_layer.hidden_layer_weights

        # Testing general unmasked case.
        desired_overlap = K.variable(numpy.asarray([[0, 0, 0]]))
        # Desired_overlap gets fed into the inner NN.
        neural_network_feed_forward = apply_feed_forward(desired_overlap, layer_nn,
                                                         activations.get(match_layer.hidden_layer_activation))
        # Apply the final activation function.
        desired_result = activations.get(match_layer.final_activation)(K.dot(neural_network_feed_forward,
                                                                             match_layer.score_layer))
        result = mask_model.predict([tuple1_words, tuple2_words])
        assert_array_almost_equal(result, K.eval(desired_result)) 
Example 69
Project: SkinLesionNeuralNetwork   Author: Neurality   File: visu.py    GNU General Public License v3.0 4 votes vote down vote up
def get_weights_mosaic(model, layer_id, n=64):
    """
    """

    # Get Keras layer
    layer = model.layers[layer_id]
    
    
    
    # Check if this layer has weight values
    if not hasattr(layer, "W"):
        raise Exception("The layer {} of type {} does not have weights.".format(layer.name,
                                                           layer.__class__.__name__))
    weights = K.eval(layer.W.value())
    
    #Check the image ordering convention type
    
    if K.image_dim_ordering() == 'tf':
        weights = np.reshape(weights,(weights.shape[3], weights.shape[2], weights.shape[0], weights.shape[1]))
        
    
    # For now we only handle Conv layer like with 4 dimensions
    if weights.ndim != 4:
        raise Exception("The layer {} has {} dimensions which is not supported.".format(layer.name, weights.ndim))

    # n define the maximum number of weights to display
    
    if weights.shape[0] < n:
        n = weights.shape[0]

    # Create the mosaic of weights
    nrows = int(np.round(np.sqrt(n)))
    ncols = int(nrows)

    if nrows ** 2 < n:
        ncols +=1
    
    
    
    
    
    mosaic = make_mosaic(weights[:n,0], nrows, ncols, border=1)

    return mosaic 
Example 70
Project: SkinLesionNeuralNetwork   Author: Neurality   File: visu.py    GNU General Public License v3.0 4 votes vote down vote up
def plot_all_weights(model, n=64, n_columns=3, **kwargs):
    """
    """
    import matplotlib.pyplot as plt
    from mpl_toolkits.axes_grid1 import make_axes_locatable

    # Set default matplotlib parameters
    if not 'interpolation' in kwargs.keys():
        kwargs['interpolation'] = "none"

    if not 'cmap' in kwargs.keys():
        kwargs['cmap'] = "gray"

    layers_to_show = []

    for i, layer in enumerate(model.layers):
        if hasattr(layer, "W"):
            weights = K.eval(layer.W.value())
            if weights.ndim == 4:
                layers_to_show.append((i, layer))

    n_mosaic = len(layers_to_show)
    nrows = n_mosaic // n_columns
    ncols = n_columns

    if ncols ** 2 < n_mosaic:
        nrows +=1

    fig_w = 15
    fig_h = nrows * fig_w / ncols

    fig = plt.figure(figsize=(fig_w, fig_h))

    for i, (layer_id, layer) in enumerate(layers_to_show):

        mosaic = get_weights_mosaic(model, layer_id=layer_id, n=n)

        ax = fig.add_subplot(nrows, ncols, i+1)

        im = ax.imshow(mosaic, **kwargs)
        ax.set_title("Layer #{} called '{}' \nof type {}".format(layer_id, layer.name, layer.__class__.__name__))

        divider = make_axes_locatable(ax)
        cax = divider.append_axes("right", size="5%", pad=0.1)
        plt.colorbar(im, cax=cax)

        ax.axis('off')

        for sp in ax.spines.values():
            sp.set_visible(False)
        if ax.is_first_row():
            ax.spines['top'].set_visible(True)
        if ax.is_last_row():
            ax.spines['bottom'].set_visible(True)
        if ax.is_first_col():
            ax.spines['left'].set_visible(True)
        if ax.is_last_col():
            ax.spines['right'].set_visible(True)

    #fig.tight_layout()
    return fig 
Example 71
Project: lucid4keras   Author: totti0223   File: render.py    Apache License 2.0 4 votes vote down vote up
def keras_make_vis_T(input_model, objective_f, param_f=None, optimizer=None,
               transforms=None):
    def connect_model(bottom,input_size,transform_f):
        '''
        connect the keras model with transformation function defined by transform_f with lambda
        '''
        input_tensor = Input(shape=input_size)
        transformed = Lambda(lambda inputs: transform_f(inputs),name="transform_layer")(input_tensor)
        top = Model(inputs=input_tensor,outputs=transformed)

        _model = Model(inputs = top.input,
                  outputs = bottom(top.output))
        return _model
    

    _t_image = make_t_image(param_f)
    _t_image = K.eval(_t_image)
    #have to be isolated from gpu for fast calculation. maybe
    #maybe if the adam calculation is on pure gpu, becomes faster and K.eval is no longer needed

    input_size = _t_image.shape[1:]

    transform_f = make_transform_f(transforms)
    target_model =connect_model(input_model,input_size,transform_f)
        
    
    objective_f = as_objective(objective_f)
    
    
    #create (batch,size,size,channel) in case object function requires multiple input by add or subtraction
    try:
        batch = int(objective_f.batch_n)
    except:
        batch = 1

    if batch > 1:
        t_image = np.zeros((batch,_t_image.shape[1],_t_image.shape[2],_t_image.shape[3]))
        for k in range(batch):
            t_image[k] = _t_image[0]
    else:
        #or do nothing
        t_image = _t_image

    #elif :
    #    pass
        #or interpolation
    

    #optimizer = make_optimizer(optimizer, [])
    loss = objective_f(target_model)
    grads = K.gradients(loss,target_model.input)[0]
    train = K.function([target_model.input, K.learning_phase()], [loss, grads])    
    return t_image, loss, train 
Example 72
Project: musical-onset-efficient   Author: ronggong   File: bock_crnn_basecode.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def loss_cal(fns, data_path, scaler, model, len_seq):
    """
    Calculate loss
    :param fns:
    :param data_path:
    :param scaler:
    :param model:
    :return:
    """
    y_pred_val_all = np.array([], dtype='float32')
    label_val_all = np.array([], dtype='int')

    for fn in fns:

        mfcc_line, label, sample_weights = featureLabelSampleWeightsLoad(data_path,
                                                                         fn,
                                                                         scaler)

        # pad sequence
        mfcc_line_pad, label_pad, sample_weights_pad, len_padded = \
            featureLabelSampleWeightsPad(mfcc_line, label, sample_weights, len_seq)

        iter_time = len(mfcc_line_pad) / len_seq
        for ii_iter in range(iter_time):

            # create tensor from the padded line
            mfcc_line_tensor, label_tensor, _ = \
                createInputTensor(mfcc_line_pad, label_pad, sample_weights_pad, len_seq, ii_iter)

            y_pred = model.predict_on_batch(mfcc_line_tensor)

            # remove the padded samples
            if ii_iter == iter_time - 1 and len_padded > 0:
                y_pred = y_pred[:, :len_seq - len_padded, :]
                label_tensor = label_tensor[:, :len_seq - len_padded, :]

            # reduce the label dimension
            y_pred = y_pred.reshape((y_pred.shape[1],))
            label_tensor = label_tensor.reshape((label_tensor.shape[1],))

            y_pred_val_all = np.append(y_pred_val_all, y_pred)
            label_val_all = np.append(label_val_all, label_tensor)

    y_true = K.variable(label_val_all)
    y_pred = K.variable(y_pred_val_all)

    loss = K.eval(binary_crossentropy(y_true, y_pred))

    return loss 
Example 73
Project: musical-onset-efficient   Author: ronggong   File: jingju_crnn_basecode.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def loss_cal(fns, data_path, scaler, model, len_seq):
    """
    Calculate loss
    :param fns:
    :param data_path:
    :param scaler:
    :param model:
    :return:
    """
    y_pred_val_all = np.array([], dtype='float32')
    label_val_all = np.array([], dtype='int')

    for fn in fns:

        mfcc_line, label, sample_weights = featureLabelSampleWeightsLoad(data_path,
                                                                         fn,
                                                                         scaler)

        # pad sequence
        mfcc_line_pad, label_pad, sample_weights_pad, len_padded = \
            featureLabelSampleWeightsPad(mfcc_line, label, sample_weights, len_seq)

        iter_time = len(mfcc_line_pad) / len_seq
        for ii_iter in range(iter_time):

            # create tensor from the padded line
            mfcc_line_tensor, label_tensor, _ = \
                createInputTensor(mfcc_line_pad, label_pad, sample_weights_pad, len_seq, ii_iter)

            y_pred = model.predict_on_batch(mfcc_line_tensor)

            # remove the padded samples
            if ii_iter == iter_time - 1 and len_padded > 0:
                y_pred = y_pred[:, :len_seq - len_padded, :]
                label_tensor = label_tensor[:, :len_seq - len_padded, :]

            # reduce the label dimension
            y_pred = y_pred.reshape((y_pred.shape[1],))
            label_tensor = label_tensor.reshape((label_tensor.shape[1],))

            y_pred_val_all = np.append(y_pred_val_all, y_pred)
            label_val_all = np.append(label_val_all, label_tensor)

    y_true = K.variable(label_val_all)
    y_pred = K.variable(y_pred_val_all)

    loss = K.eval(binary_crossentropy(y_true, y_pred))

    return loss 
Example 74
Project: keras-fcn   Author: JihongJu   File: test_encoders.py    MIT License 4 votes vote down vote up
def test_vgg16():
    for data_format in ['channels_first', 'channels_last']:
        K.set_image_data_format(data_format)
        if K.image_data_format() == 'channels_first':
            x = Input(shape=(3, 500, 500))
            pool1_shape = (None, 64, 250, 250)
            pool2_shape = (None, 128, 125, 125)
            pool3_shape = (None, 256, 63, 63)
            pool4_shape = (None, 512, 32, 32)
            drop7_shape = (None, 4096, 16, 16)
            conv1_weight = -0.35009676
        else:
            x = Input(shape=(500, 500, 3))
            pool1_shape = (None, 250, 250, 64)
            pool2_shape = (None, 125, 125, 128)
            pool3_shape = (None, 63, 63, 256)
            pool4_shape = (None, 32, 32, 512)
            drop7_shape = (None, 16, 16, 4096)
            conv1_weight = 0.429471

        encoder = VGG16(x, weights='imagenet', trainable=False)
        feat_pyramid = encoder.outputs

        assert len(feat_pyramid) == 5

        assert K.int_shape(feat_pyramid[0]) == drop7_shape
        assert K.int_shape(feat_pyramid[1]) == pool4_shape
        assert K.int_shape(feat_pyramid[2]) == pool3_shape
        assert K.int_shape(feat_pyramid[3]) == pool2_shape
        assert K.int_shape(feat_pyramid[4]) == pool1_shape

        for layer in encoder.layers:
            if layer.name == 'block1_conv1':
                assert layer.trainable is False
                weights = K.eval(layer.weights[0])
                assert np.allclose(weights[0, 0, 0, 0], conv1_weight)

        encoder_from_scratch = VGG16(x, weights=None, trainable=True)
        for layer in encoder_from_scratch.layers:
            if layer.name == 'block1_conv1':
                assert layer.trainable is True
                weights = K.eval(layer.weights[0])
                assert not np.allclose(weights[0, 0, 0, 0], conv1_weight) 
Example 75
Project: keras-fcn   Author: JihongJu   File: test_encoders.py    MIT License 4 votes vote down vote up
def test_vgg19():
    for data_format in ['channels_first', 'channels_last']:
        K.set_image_data_format(data_format)
        if K.image_data_format() == 'channels_first':
            x = Input(shape=(3, 500, 500))
            pool1_shape = (None, 64, 250, 250)
            pool2_shape = (None, 128, 125, 125)
            pool3_shape = (None, 256, 63, 63)
            pool4_shape = (None, 512, 32, 32)
            drop7_shape = (None, 4096, 16, 16)
            conv1_weight = -0.35009676
        else:
            x = Input(shape=(500, 500, 3))
            pool1_shape = (None, 250, 250, 64)
            pool2_shape = (None, 125, 125, 128)
            pool3_shape = (None, 63, 63, 256)
            pool4_shape = (None, 32, 32, 512)
            drop7_shape = (None, 16, 16, 4096)
            conv1_weight = 0.429471

        encoder = VGG19(x, weights='imagenet', trainable=False)
        feat_pyramid = encoder.outputs

        assert len(feat_pyramid) == 5

        assert K.int_shape(feat_pyramid[0]) == drop7_shape
        assert K.int_shape(feat_pyramid[1]) == pool4_shape
        assert K.int_shape(feat_pyramid[2]) == pool3_shape
        assert K.int_shape(feat_pyramid[3]) == pool2_shape
        assert K.int_shape(feat_pyramid[4]) == pool1_shape

        for layer in encoder.layers:
            if layer.name == 'block1_conv1':
                assert layer.trainable is False
                weights = K.eval(layer.weights[0])
                assert np.allclose(weights[0, 0, 0, 0], conv1_weight)

        encoder_from_scratch = VGG19(x, weights=None, trainable=True)
        for layer in encoder_from_scratch.layers:
            if layer.name == 'block1_conv1':
                assert layer.trainable is True
                weights = K.eval(layer.weights[0])
                assert not np.allclose(weights[0, 0, 0, 0], conv1_weight) 
Example 76
Project: yolov3-3dcarbox   Author: zoujialong9012   File: modelorient.py    MIT License 4 votes vote down vote up
def orientation_loss(y_true, y_pred, obj_mask, mf):
# Find number of anchors
    #print('orient loss ------')
    #print(test.shape)
    y_true = K.reshape(y_true*obj_mask, [-1, BIN, 2])
    y_pred = y_pred*obj_mask
    y_pred = K.l2_normalize(K.reshape(y_pred, [-1, BIN, 2]), 2)
    obj_mask = K.reshape(obj_mask, [-1, 1])
    #K.reshape(y_pred*obj_mask, [-1, BIN, 2])
    #anchors = K.sum(K.square(y_true), axis=2)
    #anchors = K.greater(anchors, tf.constant(0.5))
    #anchors = K.sum(K.cast(anchors, dtype='float32'), 1)
# Define the loss
# cos^2 + sin ^2 = 1
   # K.abs()
    #loss = K.abs(y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    #print(tf.Session().run(y_true))
    #print(tf.Session().run(y_pred))
    #loss = K.switch(loss > 0.0, loss, K.zeros_like(loss))
    loss = (y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])   # -1 - 1
    loss = 1-loss
    loss = K.reshape(loss, [-1, 2])
    loss = loss*obj_mask

    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    losssum = K.sum(K.sum(loss, axis=0))
   # print(losssum.shape)
    allobj = K.sum(obj_mask)
    #print(allobj.shape)
  #  if K.eval(allobj) == 0:
  #      loss = 0.0
  #  else :
  #      loss = 4.0*(2 - 2 * (K.sum(K.sum(loss, axis=0))/allobj))
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    #loss =  (allobj-K.sum(K.sum(loss, axis=0)))/mf
    #loss = tf.cond(allobj > 0, lambda: 3.0*(1 -  (K.sum(K.sum(loss, axis=0))/allobj)), lambda: 0.0)
    loss = tf.cond(allobj > 0, lambda: losssum/allobj, lambda: 0.0)

    #loss = 3.0 * K.abs(loss)
    #K.switch
    #loss = tf.cond(allobj > 0, lambda: (allobj-K.sum(K.sum(loss, axis=0)))/mf, lambda: 0.0)
    
    #loss = K.sum((2 - 2 * K.mean(loss,axis=0))) / anchors
    #print(loss.shape)
    return loss
    #K.mean(loss) 
Example 77
Project: yolov3-3dcarbox   Author: zoujialong9012   File: modelorient.py    MIT License 4 votes vote down vote up
def orientation_loss3(y_true, y_pred, obj_mask, mf):
# Find number of anchors
    #print('orient loss ------')
    #print(test.shape)
    y_true = K.reshape(y_true*obj_mask, [-1, BIN, 2])
    y_pred = y_pred*obj_mask
    y_pred = K.l2_normalize(K.reshape(y_pred, [-1, BIN, 2]), 2)
    obj_mask = K.reshape(obj_mask, [-1, 1])
    #K.reshape(y_pred*obj_mask, [-1, BIN, 2])
    #anchors = K.sum(K.square(y_true), axis=2)
    #anchors = K.greater(anchors, tf.constant(0.5))
    #anchors = K.sum(K.cast(anchors, dtype='float32'), 1)
# Define the loss
# cos^2 + sin ^2 = 1
   # K.abs()
    #loss = K.abs(y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    #print(tf.Session().run(y_true))
    #print(tf.Session().run(y_pred))
    #loss = K.switch(loss > 0.0, loss, K.zeros_like(loss))
    #loss = (y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])   # -1 - 1
    #loss = 1-loss
    cosd = K.square(y_true[:,:,0] - y_pred[:,:,0])
    sind = K.square(y_true[:,:,1] - y_pred[:,:,1])
    loss = cosd+sind
    #loss = K.reshape(loss, [-1, 2])
    loss = loss*obj_mask

    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    losssum = K.sum(K.sum(loss, axis=0))
   # print(losssum.shape)
    allobj = K.sum(obj_mask)
    #print(allobj.shape)
  #  if K.eval(allobj) == 0:
  #      loss = 0.0
  #  else :
  #      loss = 4.0*(2 - 2 * (K.sum(K.sum(loss, axis=0))/allobj))
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    #loss =  (allobj-K.sum(K.sum(loss, axis=0)))/mf
    #loss = tf.cond(allobj > 0, lambda: 3.0*(1 -  (K.sum(K.sum(loss, axis=0))/allobj)), lambda: 0.0)
    loss = tf.cond(allobj > 0, lambda: losssum/allobj, lambda: 0.0)

    #loss = 3.0 * K.abs(loss)
    #K.switch
    #loss = tf.cond(allobj > 0, lambda: (allobj-K.sum(K.sum(loss, axis=0)))/mf, lambda: 0.0)
    
    #loss = K.sum((2 - 2 * K.mean(loss,axis=0))) / anchors
    #print(loss.shape)
    return loss
    #K.mean(loss) 
Example 78
Project: yolov3-3dcarbox   Author: zoujialong9012   File: modelmergetree.py    MIT License 4 votes vote down vote up
def orientation_loss3(y_true, y_pred, obj_mask, mf):
# Find number of anchors
    #print('orient loss ------')
    #print(test.shape)
    y_true = K.reshape(y_true*obj_mask, [-1, BIN, 2])
    y_pred = y_pred*obj_mask
    y_pred = K.l2_normalize(K.reshape(y_pred, [-1, BIN, 2]), 2)
    obj_mask = K.reshape(obj_mask, [-1, 1])
    #K.reshape(y_pred*obj_mask, [-1, BIN, 2])
    #anchors = K.sum(K.square(y_true), axis=2)
    #anchors = K.greater(anchors, tf.constant(0.5))
    #anchors = K.sum(K.cast(anchors, dtype='float32'), 1)
# Define the loss
# cos^2 + sin ^2 = 1
   # K.abs()
    #loss = K.abs(y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    #print(tf.Session().run(y_true))
    #print(tf.Session().run(y_pred))
    #loss = K.switch(loss > 0.0, loss, K.zeros_like(loss))
    #loss = (y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])   # -1 - 1
    #loss = 1-loss
    cosd = K.square(y_true[:,:,0] - y_pred[:,:,0])
    sind = K.square(y_true[:,:,1] - y_pred[:,:,1])
    loss = cosd+sind
    #loss = K.reshape(loss, [-1, 2])
    loss = loss*obj_mask

    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    losssum = K.sum(K.sum(loss, axis=0))
   # print(losssum.shape)
    allobj = K.sum(obj_mask)
    #print(allobj.shape)
  #  if K.eval(allobj) == 0:
  #      loss = 0.0
  #  else :
  #      loss = 4.0*(2 - 2 * (K.sum(K.sum(loss, axis=0))/allobj))
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    #loss =  (allobj-K.sum(K.sum(loss, axis=0)))/mf
    #loss = tf.cond(allobj > 0, lambda: 3.0*(1 -  (K.sum(K.sum(loss, axis=0))/allobj)), lambda: 0.0)
    loss = tf.cond(allobj > 0, lambda: losssum/allobj, lambda: 0.0)

    #loss = 3.0 * K.abs(loss)
    #K.switch
    #loss = tf.cond(allobj > 0, lambda: (allobj-K.sum(K.sum(loss, axis=0)))/mf, lambda: 0.0)
    
    #loss = K.sum((2 - 2 * K.mean(loss,axis=0))) / anchors
    #print(loss.shape)
    return loss
    #K.mean(loss) 
Example 79
Project: yolov3-3dcarbox   Author: zoujialong9012   File: modelmergetree.py    MIT License 4 votes vote down vote up
def orientation_loss(y_true, y_pred, obj_mask, mf):
# Find number of anchors
    #print('orient loss ------')
    #print(test.shape)
    y_true = K.reshape(y_true*obj_mask, [-1, BIN, 2])
    y_pred = y_pred*obj_mask
    y_pred = K.l2_normalize(K.reshape(y_pred, [-1, BIN, 2]), 2)
    obj_mask = K.reshape(obj_mask, [-1, 1])
    #K.reshape(y_pred*obj_mask, [-1, BIN, 2])
    anchors = K.sum(K.square(y_true), axis=2)
    anchors = K.greater(anchors, 0.5) #tf.constant(0.5))
    anchors = K.sum(K.cast(anchors, dtype='float32'), 1)
# Define the loss
# cos^2 + sin ^2 = 1
   # K.abs()
    #loss = K.abs(y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])
    #print(tf.Session().run(y_true))
    #print(tf.Session().run(y_pred))
    loss = (y_true[:,:,0]*y_pred[:,:,0] + y_true[:,:,1]*y_pred[:,:,1])   # -1 - 1
    #loss = K.switch(loss > 0.0, loss, K.zeros_like(loss))
    loss = 1-loss
    loss = K.reshape(loss, [-1, 2])
    loss = loss*obj_mask
    print(loss.shape)

    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    losssum = K.sum(K.sum(loss, axis=0))
   # print(losssum.shape)
    allobj = K.sum(obj_mask)
    #print(allobj.shape)
  #  if K.eval(allobj) == 0:
  #      loss = 0.0
  #  else :
  #      loss = 4.0*(2 - 2 * (K.sum(K.sum(loss, axis=0))/allobj))
    #loss = 4.0*K.sum((2 - 2 * K.mean(loss,axis=0)))
    #loss =  (allobj-K.sum(K.sum(loss, axis=0)))/mf
    #loss = tf.cond(allobj > 0, lambda: 3.0*(1 -  (K.sum(K.sum(loss, axis=0))/allobj)), lambda: 0.0)
    loss = tf.cond(allobj > 0, lambda: losssum/allobj, lambda: 0.0)
    #loss = K.switch(allobj > 0, losssum/allobj, 0.0)

    #loss = 3.0 * K.abs(loss)
    #K.switch
    #loss = tf.cond(allobj > 0, lambda: (allobj-K.sum(K.sum(loss, axis=0)))/mf, lambda: 0.0)
    
    #loss = K.sum((2 - 2 * K.mean(loss,axis=0))) / anchors
    #print(loss.shape)
    return loss
    #K.mean(loss) 
Example 80
Project: keras-centernet   Author: see--   File: decode_test.py    MIT License 4 votes vote down vote up
def test_hpdet_decode():
  np.random.seed(32)
  hm = np.random.randn(2, 64, 64, 1)
  hm_hp = np.random.randn(2, 64, 64, 17)
  hp_offset = np.random.randn(2, 64, 64, 2) * 10.0
  kps = np.random.randn(2, 64, 64, 34) * 5.0
  reg = np.random.randn(2, 64, 64, 2) * 10.0
  wh = np.random.randn(2, 64, 64, 2) * 20.0

  keras_hm = K.constant(hm)
  keras_hm_hp = K.constant(hm_hp)
  keras_hp_offset = K.constant(hp_offset)
  keras_kps = K.constant(kps)
  keras_reg = K.constant(reg)
  keras_wh = K.constant(wh)

  keras_detections = K.eval(_hpdet_decode(
    keras_hm, keras_wh, keras_kps, keras_reg, keras_hm_hp, keras_hp_offset, output_stride=1))
  gold_fn = 'tests/data/hpdet_decode_gold.p'
  if not os.path.exists(gold_fn):
    import torch as th
    import sys
    sys.path.append(os.path.expanduser('~/Pytorch/CenterNet/src'))
    from lib.models.decode import multi_pose_decode as hpdet_decode  # noqa
    py_hm = th.from_numpy(hm.transpose(0, 3, 1, 2)).float()
    py_hm.sigmoid_()
    py_hm_hp = th.from_numpy(hm_hp.transpose(0, 3, 1, 2)).float()
    py_hm_hp.sigmoid_()
    py_kps = th.from_numpy(kps.transpose(0, 3, 1, 2)).float()
    py_reg = th.from_numpy(reg.transpose(0, 3, 1, 2)).float()
    py_wh = th.from_numpy(wh.transpose(0, 3, 1, 2)).float()
    py_hp_offset = th.from_numpy(hp_offset.transpose(0, 3, 1, 2)).float()
    py_detections = hpdet_decode(py_hm, py_wh, py_kps, py_reg, py_hm_hp, py_hp_offset).detach().numpy()
    py_kps = py_detections[..., 5:-1].copy()
    py_kps_x = py_kps[..., ::2]
    py_kps_y = py_kps[..., 1::2]
    py_kps = np.concatenate([py_kps_x, py_kps_y], 2)
    py_detections[..., 5:-1] = py_kps
    with open(gold_fn, 'wb') as f:
      pickle.dump(py_detections, f)
  else:
    with open(gold_fn, 'rb') as f:
      py_detections = pickle.load(f)

  assert np.allclose(keras_detections, py_detections)