Python tensorflow.keras.backend.function() Examples

The following are 19 code examples of tensorflow.keras.backend.function(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.backend , or try the search function .
Example #1
Source File: qactivation_test.py    From qkeras with Apache License 2.0 6 votes vote down vote up
def test_smooth_sigmoid():
  """Test smooth_sigmoid function."""
  test_values = np.array(
      [[-3.0, -2.0, -1.0, -0.5, 0.005, 0.0, 0.005, 0.5, 1, 4, 10]],
      dtype=K.floatx())

  def ref_smooth_sigmoid(y):
    x = 0.1875 * y + 0.5
    z = 0.0 if x <= 0.0 else (1.0 if x >= 1.0 else x)
    return z

  sigmoid = np.vectorize(ref_smooth_sigmoid)
  x = K.placeholder(ndim=2)
  f = K.function([x], [smooth_sigmoid(x)])
  result = f([test_values])[0]
  expected = sigmoid(test_values)
  assert_allclose(result, expected, rtol=1e-05) 
Example #2
Source File: qactivation_test.py    From qkeras with Apache License 2.0 6 votes vote down vote up
def test_hard_sigmoid():
  """Test hard_sigmoid function."""
  test_values = np.array(
      [[-3.0, -2.0, -1.0, -0.5, 0.005, 0.0, 0.005, 0.5, 1, 4, 10]],
      dtype=K.floatx())

  def ref_hard_sigmoid(y):
    x = 0.5 * y + 0.5
    z = 0.0 if x <= 0.0 else (1.0 if x >= 1.0 else x)
    return z

  sigmoid = np.vectorize(ref_hard_sigmoid)

  x = K.placeholder(ndim=2)
  f = K.function([x], [hard_sigmoid(x)])
  result = f([test_values])[0]
  expected = sigmoid(test_values)
  assert_allclose(result, expected, rtol=1e-05) 
Example #3
Source File: qactivation_test.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def test_binary(use_01, alpha, test_values, expected_values):
  x = K.placeholder(ndim=2)
  f = K.function([x], [binary(use_01, alpha)(x)])
  result = f([test_values])[0]
  assert_allclose(result, expected_values, rtol=1e-05) 
Example #4
Source File: asr_solver.py    From delta with Apache License 2.0 5 votes vote down vote up
def get_metric_func(self):
    ''' build metric function '''
    _input_data = self.model.get_layer('inputs').input
    y_pred = self.model.get_layer('ctc').input[0]
    metric_func = K.function([_input_data], [y_pred])
    return metric_func

  #pylint: disable=too-many-locals 
Example #5
Source File: asr_solver.py    From delta with Apache License 2.0 5 votes vote down vote up
def input_fn(self, mode):
    ''' input function for tf.data.Dataset'''
    super().input_fn(mode)
    assert self.task
    self.batch_input_shape = self.task.batch_input_shape()
    batch_size = self.config['solver']['optimizer']['batch_size']
    num_epoch = self.config['solver']['optimizer']['epochs']
    return self.task.input_fn(mode, batch_size, num_epoch), self.task 
Example #6
Source File: keract.py    From keract with MIT License 5 votes vote down vote up
def _get_gradients(model, x, y, nodes):
    if model.optimizer is None:
        raise Exception('Please compile the model first. The loss function is required to compute the gradients.')

    nodes_names = nodes.keys()
    nodes_values = nodes.values()

    try:
        if not hasattr(model, 'total_loss'):
            raise Exception('Disable TF eager mode to use get_gradients.\n'
                            'Add this command at the beginning of your script:\n'
                            'tf.compat.v1.disable_eager_execution()')
        grads = model.optimizer.get_gradients(model.total_loss, nodes_values)
    except ValueError as e:
        if 'differentiable' in str(e):
            # Probably one of the gradients operations is not differentiable...
            grads = []
            differentiable_nodes = []
            for n in nodes_values:
                try:
                    grads.extend(model.optimizer.get_gradients(model.total_loss, n))
                    differentiable_nodes.append(n)
                except ValueError:
                    pass
            nodes_values = differentiable_nodes
        else:
            raise e

    gradients_values = _evaluate(model, grads, x, y)

    return OrderedDict(zip(nodes_names, gradients_values)) 
Example #7
Source File: keract.py    From keract with MIT License 5 votes vote down vote up
def get_gradients_of_activations(model, x, y, layer_names=None, output_format='simple', nested=False):
    """
    Get gradients of the outputs of the activation functions, regarding the loss.
    Intuitively, it shows how your activation maps change over a tiny modification of the loss.
    :param model: keras compiled model or one of ['vgg16', 'vgg19', 'inception_v3', 'inception_resnet_v2',
    'mobilenet_v2', 'mobilenetv2'].
    :param x: Model input (Numpy array). In the case of multi-inputs, x should be of type List.
    :param y: Model target (Numpy array). In the case of multi-inputs, y should be of type List.
    :param layer_names: (optional) Single name of a layer or list of layer names for which activations should be
    returned. It is useful in very big networks when it is computationally expensive to evaluate all the layers/nodes.
    :param output_format: Change the output dictionary key of the function.
    - 'simple': output key will match the names of the Keras layers. For example Dense(1, name='d1') will
    return {'d1': ...}.
    - 'full': output key will match the full name of the output layer name. In the example above, it will
    return {'d1/BiasAdd:0': ...}.
    - 'numbered': output key will be an index range, based on the order of definition of each layer within the model.
    - 'nested': If specified, will move recursively through the model definition to retrieve nested layers.
                Recursion ends at leaf layers of the model tree or at layers with their name specified in layer_names.

                E.g., a model with the following structure

                -layer1
                    -conv1
                    ...
                    -fc1
                -layer2
                    -fc2

                ... yields a dictionary with keys 'layer1/conv1', ..., 'layer1/fc1', 'layer2/fc2'.
                If layer_names = ['layer2/fc2'] is specified, the dictionary will only hold one key 'layer2/fc2'.

                The layer names are generated by joining all layers from top level to leaf level with the separator '/'.
    :return: Dict {layer_names (specified by output_format) -> activation of the layer output/node (Numpy array)}.
    """
    nodes = _get_nodes(model, output_format, nested=nested, layer_names=layer_names)
    return _get_gradients(model, x, y, nodes) 
Example #8
Source File: keract.py    From keract with MIT License 5 votes vote down vote up
def _evaluate(model: Model, nodes_to_evaluate, x, y=None, auto_compile=False):
    if not model._is_compiled:
        if model.name in ['vgg16', 'vgg19', 'inception_v3', 'inception_resnet_v2', 'mobilenet_v2', 'mobilenetv2']:
            print('Transfer learning detected. Model will be compiled with ("categorical_crossentropy", "adam").')
            print('If you want to change the default behaviour, then do in python:')
            print('model.name = ""')
            print('Then compile your model with whatever loss you want: https://keras.io/models/model/#compile.')
            print('If you want to get rid of this message, add this line before calling keract:')
            print('model.compile(loss="categorical_crossentropy", optimizer="adam")')
            model.compile(loss='categorical_crossentropy', optimizer='adam')
        else:
            if auto_compile:
                model.compile(loss='mse', optimizer='adam')
            else:
                print('Please compile your model first! https://keras.io/models/model/#compile.')
                print('If you only care about the activations (outputs of the layers), '
                      'then just compile your model like that:')
                print('model.compile(loss="mse", optimizer="adam")')
                raise Exception('Compilation of the model required.')

    def eval_fn(k_inputs):
        try:
            return K.function(k_inputs, nodes_to_evaluate)(model._standardize_user_data(x, y))
        except AttributeError:  # one way to avoid forcing non eager mode.
            return K.function(k_inputs, nodes_to_evaluate)((x, y))  # although works.

    try:
        return eval_fn(model._feed_inputs + model._feed_targets + model._feed_sample_weights)
    except Exception:
        return eval_fn(model._feed_inputs) 
Example #9
Source File: qactivation_test.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def test_stochastic_ternary_inference_mode(alpha, threshold, test_values, expected_values):
  K.set_learning_phase(0)
  x = K.placeholder(ndim=2)
  q = stochastic_ternary(alpha, threshold)
  f = K.function([x],
                 [q(x)])
  result = f([test_values])[0]
  assert_allclose(result, expected_values, rtol=1e-05) 
Example #10
Source File: qactivation_test.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def test_stochastic_round_quantized_relu_po2(test_values, expected_values):
  K.set_learning_phase(1)
  np.random.seed(666)
  x = K.placeholder(ndim=2)
  q = quantized_relu_po2(use_stochastic_rounding=True)
  f = K.function([x], [q(x)])
  res = f([test_values])[0]
  res = np.average(res)
  assert_allclose(res, expected_values, rtol=1e-01, atol=1e-6) 
Example #11
Source File: qactivation_test.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def test_stochastic_round_quantized_po2(test_values, expected_values):
  K.set_learning_phase(1)
  np.random.seed(666)
  x = K.placeholder(ndim=2) 
  q = quantized_po2(use_stochastic_rounding=True)
  f = K.function([x], [q(x)])
  res = f([test_values])[0]
  res = np.average(res)
  assert_allclose(res, expected_values, rtol=1e-01, atol=1e-6) 
Example #12
Source File: qactivation_test.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def test_ternary(alpha, threshold, test_values, expected_values):
  x = K.placeholder(ndim=2)
  f = K.function([x],
                 [ternary(alpha, threshold)(x)])
  result = f([test_values])[0]
  assert_allclose(result, expected_values, rtol=1e-05) 
Example #13
Source File: qactivation_test.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def test_quantized_bits(bits, integer, symmetric, keep_negative, test_values,
                        expected_values):
  x = K.placeholder(ndim=2)
  f = K.function([x],
                 [quantized_bits(bits, integer, symmetric, keep_negative)(x)])
  result = f([test_values])[0]
  assert_allclose(result, expected_values, rtol=1e-05) 
Example #14
Source File: qactivation_test.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def test_quantized_relu_po2(bits,
                            max_value,
                            use_stochastic_rounding,
                            quadratic_approximation,
                            test_values,
                            expected_values):
  """Test quantized_po2 function."""
  x = K.placeholder(ndim=2)
  f = K.function([x],
                 [quantized_relu_po2(bits, max_value, use_stochastic_rounding,
                                     quadratic_approximation)(x)])
  result = f([test_values])[0]
  assert_allclose(result, expected_values, rtol=1e-05, atol=1e-05) 
Example #15
Source File: qactivation_test.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def test_quantized_po2(bits,
                       max_value,
                       use_stochastic_rounding,
                       quadratic_approximation,
                       test_values,
                       expected_values):
  """Test quantized_po2 function."""
  x = K.placeholder(ndim=2)
  f = K.function([x], [quantized_po2(bits, max_value, use_stochastic_rounding,
                                     quadratic_approximation)(x)])
  result = f([test_values])[0]
  assert_allclose(result, expected_values, rtol=1e-05, atol=1e-05) 
Example #16
Source File: leakyrelu_test.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def test_quantized_relu(bits, integer, use_sigmoid, negative_slope, test_values,
                        expected_values):
  """Test quantized_relu function."""
  x = K.placeholder(ndim=2)
  f = K.function([x], [quantized_relu(bits, integer, use_sigmoid,
                                      negative_slope)(x)])
  result = f([test_values])[0]
  assert_allclose(result, expected_values, rtol=1e-05) 
Example #17
Source File: sentiment-analysis.py    From gradio-UI with Apache License 2.0 5 votes vote down vote up
def saliency(input, output):
    with graph.as_default():
        with sess.as_default():
            processed_input = preprocessing(input)
            processed_output = output

            output = 0 if float(output["Positive review"]) > 0.5 else 1
            input_tensors = [model.layers[0].input, K.learning_phase()]
            saliency_input = model.layers[1].input
            saliency_output = model.layers[-1].output[:, output]
            gradients = model.optimizer.get_gradients(saliency_output, saliency_input)
            compute_gradients = K.function(inputs=input_tensors, outputs=gradients)
            saliency_graph = compute_gradients(processed_input.reshape(1, 500))[0]

            saliency_graph = saliency_graph.reshape(500, 32)

            saliency_graph = np.abs(saliency_graph).sum(axis=1)
            normalized_saliency = (saliency_graph - saliency_graph.min()) /                                   (saliency_graph.max() - saliency_graph.min())

            start_idx = np.where(processed_input[0] == START_TOKEN)[0][0]
            heat_map = []
            counter = 0
            words = input.split(" ")
            for i in range(start_idx + 1, 500):
                heat_map.extend([normalized_saliency[i]] * len(words[counter]))
                heat_map.append(0)  # zero saliency value assigned to the spaces between words
                counter += 1
            return np.array(heat_map)


# In[6]: 
Example #18
Source File: callbacks.py    From delta with Apache License 2.0 4 votes vote down vote up
def on_epoch_end(self, epoch, logs={}):
    '''computing every class prec/rec'''

    cur_session = tf.keras.backend.get_session()
    truth, predict = [], []

    is_py_sequence = True
    if isinstance(self.eval_task, (dataset_ops.DatasetV2, dataset_ops.DatasetV1)):
      eval_gen = self.eval_task.make_one_shot_iterator()
      self.next_batch_gen = eval_gen.get_next()
      is_py_sequence = False
    elif isinstance(self.eval_task,
                    (iterator_ops.IteratorV2, iterator_ops.Iterator)):
      self.next_batch_gen = self.ds.get_next()
      is_py_sequence = False

    for index in range(len(self.eval_task)):
      batch_data = None
      if is_py_sequence:
        batch_data, batch_truth = self.eval_task[index]
      else:
        batch_data = cur_session.run(self.next_batch_gen)
      #print("batch_data", batch_data)
      batch_input = batch_data
      batch_truth = batch_truth.tolist()

      text = self.model.get_layer('text').input
      speech = self.model.get_layer('speech').input
      y_pred = self.model(batch_input)
      f = K.function([text, speech], y_pred)
      batch_predict = f([batch_input['inputs'], batch_input['texts']])
      truth.extend(batch_truth)
      predict.extend(batch_predict)
    y_true = np.argmax(np.asarray(truth), axis=1)
    y_pred = np.argmax(np.asarray(predict), axis=1)
    accuracy = metrics.accuracy_score(y_true, y_pred)
    unw_accuracy = metrics.precision_score(y_true, y_pred, average='macro')
    logs['ClassReport'] = accuracy
    logging.info("Epoch {}: on eval.".format(
        epoch + 1))
    logging.info("Weighted accuracy: {}".format(accuracy))
    logging.info("Unweighted accuracy: {}".format(unw_accuracy))
    logging.info("Specific results: {}".format('\n' + metrics.classification_report(
      y_true, y_pred, digits=4))) 
Example #19
Source File: keras_model.py    From code2vec with MIT License 4 votes vote down vote up
def _create_keras_model(self):
        # Each input sample consists of a bag of x`MAX_CONTEXTS` tuples (source_terminal, path, target_terminal).
        # The valid mask indicates for each context whether it actually exists or it is just a padding.
        path_source_token_input = Input((self.config.MAX_CONTEXTS,), dtype=tf.int32)
        path_input = Input((self.config.MAX_CONTEXTS,), dtype=tf.int32)
        path_target_token_input = Input((self.config.MAX_CONTEXTS,), dtype=tf.int32)
        context_valid_mask = Input((self.config.MAX_CONTEXTS,))

        # Input paths are indexes, we embed these here.
        paths_embedded = Embedding(
            self.vocabs.path_vocab.size, self.config.PATH_EMBEDDINGS_SIZE, name='path_embedding')(path_input)

        # Input terminals are indexes, we embed these here.
        token_embedding_shared_layer = Embedding(
            self.vocabs.token_vocab.size, self.config.TOKEN_EMBEDDINGS_SIZE, name='token_embedding')
        path_source_token_embedded = token_embedding_shared_layer(path_source_token_input)
        path_target_token_embedded = token_embedding_shared_layer(path_target_token_input)

        # `Context` is a concatenation of the 2 terminals & path embedding.
        # Each context is a vector of size 3 * EMBEDDINGS_SIZE.
        context_embedded = Concatenate()([path_source_token_embedded, paths_embedded, path_target_token_embedded])
        context_embedded = Dropout(1 - self.config.DROPOUT_KEEP_RATE)(context_embedded)

        # Lets get dense: Apply a dense layer for each context vector (using same weights for all of the context).
        context_after_dense = TimeDistributed(
            Dense(self.config.CODE_VECTOR_SIZE, use_bias=False, activation='tanh'))(context_embedded)

        # The final code vectors are received by applying attention to the "densed" context vectors.
        code_vectors, attention_weights = AttentionLayer(name='attention')(
            [context_after_dense, context_valid_mask])

        # "Decode": Now we use another dense layer to get the target word embedding from each code vector.
        target_index = Dense(
            self.vocabs.target_vocab.size, use_bias=False, activation='softmax', name='target_index')(code_vectors)

        # Wrap the layers into a Keras model, using our subtoken-metrics and the CE loss.
        inputs = [path_source_token_input, path_input, path_target_token_input, context_valid_mask]
        self.keras_train_model = keras.Model(inputs=inputs, outputs=target_index)

        # Actual target word predictions (as strings). Used as a second output layer.
        # Used for predict() and for the evaluation metrics calculations.
        topk_predicted_words, topk_predicted_words_scores = TopKWordPredictionsLayer(
            self.config.TOP_K_WORDS_CONSIDERED_DURING_PREDICTION,
            self.vocabs.target_vocab.get_index_to_word_lookup_table(),
            name='target_string')(target_index)

        # We use another dedicated Keras model for evaluation.
        # The evaluation model outputs the `topk_predicted_words` as a 2nd output.
        # The separation between train and eval models is for efficiency.
        self.keras_eval_model = keras.Model(
            inputs=inputs, outputs=[target_index, topk_predicted_words], name="code2vec-keras-model")

        # We use another dedicated Keras function to produce predictions.
        # It have additional outputs than the original model.
        # It is based on the trained layers of the original model and uses their weights.
        predict_outputs = tuple(KerasPredictionModelOutput(
            target_index=target_index, code_vectors=code_vectors, attention_weights=attention_weights,
            topk_predicted_words=topk_predicted_words, topk_predicted_words_scores=topk_predicted_words_scores))
        self.keras_model_predict_function = K.function(inputs=inputs, outputs=predict_outputs)