Python tensorflow.keras.backend.get_session() Examples

The following are 24 code examples of tensorflow.keras.backend.get_session(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.backend , or try the search function .
Example #1
Source File: save_tfs_model.py    From image-quality-assessment with Apache License 2.0 6 votes vote down vote up
def main(base_model_name, weights_file, export_path):
    # Load model and weights
    nima = Nima(base_model_name, weights=None)
    nima.build()
    nima.nima_model.load_weights(weights_file)

    # Tell keras that this will be used for making predictions
    K.set_learning_phase(0)

    # CustomObject required by MobileNet
    with CustomObjectScope({'relu6': relu6, 'DepthwiseConv2D': DepthwiseConv2D}):
        builder = saved_model_builder.SavedModelBuilder(export_path)
        signature = predict_signature_def(
            inputs={'input_image': nima.nima_model.input},
            outputs={'quality_prediction': nima.nima_model.output}
        )

        builder.add_meta_graph_and_variables(
            sess=K.get_session(),
            tags=[tag_constants.SERVING],
            signature_def_map={'image_quality': signature}
        )
        builder.save()

    print(f'TF model exported to: {export_path}') 
Example #2
Source File: freeze_model.py    From EfficientDet with Apache License 2.0 6 votes vote down vote up
def main():
    phi = 1
    weighted_bifpn = False
    model_path = 'checkpoints/2019-12-03/pascal_05_0.6283_1.1975_0.8029.h5'
    image_sizes = (512, 640, 768, 896, 1024, 1280, 1408)
    image_size = image_sizes[phi]
    classes = [
        'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair',
        'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor',
    ]
    num_classes = len(classes)
    score_threshold = 0.5
    model, prediction_model = efficientdet(phi=phi,
                                           weighted_bifpn=weighted_bifpn,
                                           num_classes=num_classes,
                                           score_threshold=score_threshold)
    prediction_model.load_weights(model_path, by_name=True)
    
    frozen_graph = freeze_session(K.get_session(),  output_names=[out.op.name for out in prediction_model.outputs])
    tf.train.write_graph(frozen_graph, "./checkpoints/2019-12-03/", "pascal_05.pb", as_text=False) 
Example #3
Source File: asr_solver.py    From delta with Apache License 2.0 6 votes vote down vote up
def infer(self, yield_single_examples=False):
    ''' only for infer '''
    #load data
    mode = utils.INFER
    # data must be init before model build
    infer_ds, infer_task = self.input_data(mode=mode)
    infer_gen = tf.data.make_one_shot_iterator(infer_ds)

    self.model_fn(mode=mode)
    assert self._built

    #load model
    infer_func = self.get_metric_func()

    for _ in range(len(infer_task)):
      batch_data = tf.keras.backend.get_session().run(infer_gen.get_next()[0])
      batch_input = batch_data['inputs']
      batch_uttid = batch_data['uttids'].tolist()
      batch_predict = infer_func(batch_input)[0]
      batch_decode = py_ctc.ctc_greedy_decode(batch_predict, 0, unique=True)
      for utt_index, uttid in enumerate(batch_uttid):
        logging.info("utt ID: {}".format(uttid))
        logging.info("infer result: {}".format(batch_decode[utt_index])) 
Example #4
Source File: model.py    From keras-mobile-detectnet with MIT License 6 votes vote down vote up
def __init__(self, model, shape):
        shape = (None, shape[0], shape[1], shape[2])
        x_name = 'image_tensor_x'
        with K.get_session() as sess:
            x_tensor = tf.placeholder(tf.float32, shape, x_name)
            K.set_learning_phase(0)
            y_tensor = model(x_tensor)
            y_name = [y_tensor[-1].name[:-2], y_tensor[-2].name[:-2]]
            graph = sess.graph.as_graph_def()
            graph0 = tf.graph_util.convert_variables_to_constants(sess, graph, y_name)
            graph1 = tf.graph_util.remove_training_nodes(graph0)

        self.x_name = [x_name]
        self.y_name = y_name
        self.frozen = graph1
        self.model = model 
Example #5
Source File: private_model.py    From tf-encrypted with Apache License 2.0 6 votes vote down vote up
def secure_model(model, **kwargs):
    """Secure a plaintext model from the current session."""
    session = K.get_session()
    min_graph = graph_util.convert_variables_to_constants(
        session, session.graph_def, [node.op.name for node in model.outputs]
    )
    graph_fname = "model.pb"
    tf.train.write_graph(min_graph, _TMPDIR, graph_fname, as_text=False)

    if "batch_size" in kwargs:
        batch_size = kwargs.pop("batch_size")
    else:
        batch_size = 1

    graph_def, inputs = load_graph(
        os.path.join(_TMPDIR, graph_fname), batch_size=batch_size
    )

    c = tfe.convert.convert.Converter(tfe.convert.registry(), **kwargs)
    y = c.convert(remove_training_nodes(graph_def), "input-provider", inputs)

    return PrivateModel(y) 
Example #6
Source File: model.py    From kryptoflow with GNU General Public License v3.0 5 votes vote down vote up
def store(self, name: str='nn') -> None:
        os.makedirs(self.model_path, exist_ok=True)
        with K.get_session() as sess:
            self._store_keras(name)
            self._store_tf(name, sess) 
Example #7
Source File: predictor.py    From Deep-Channel with MIT License 5 votes vote down vote up
def auc(y_true, y_pred):
    auc = tf.metrics.auc(y_true, y_pred)[1]
    K.get_session().run(tf.local_variables_initializer())
    return auc 
Example #8
Source File: postprocess.py    From keras-YOLOv3-model-set with MIT License 5 votes vote down vote up
def yolo2_eval(yolo_outputs,
              image_shape,
              max_boxes=10,
              score_threshold=.6,
              iou_threshold=.5):
    """Evaluate YOLOv2 model on given input batch and return filtered boxes."""
    box_xy, box_wh, box_confidence, box_class_probs = yolo_outputs
    boxes = yolo2_boxes_to_corners(box_xy, box_wh)
    boxes, scores, classes = yolo2_filter_boxes(
        boxes, box_confidence, box_class_probs, threshold=score_threshold)

    # Scale boxes back to original image shape.
    height = image_shape[0]
    width = image_shape[1]
    image_dims = K.stack([height, width, height, width])
    image_dims = K.reshape(image_dims, [1, 4])
    boxes = boxes * image_dims

    # TODO: Something must be done about this ugly hack!
    max_boxes_tensor = K.constant(max_boxes, dtype='int32')
    K.get_session().run(tf.variables_initializer([max_boxes_tensor]))
    nms_index = tf.image.non_max_suppression(
        boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold)
    boxes = K.gather(boxes, nms_index)
    scores = K.gather(scores, nms_index)
    classes = K.gather(classes, nms_index)
    return boxes, scores, classes 
Example #9
Source File: run_clinicnet.py    From CDSS with GNU General Public License v3.0 5 votes vote down vote up
def auc(y_true, y_pred):
    auc = tf.keras.metrics.AUC(y_true, y_pred)[1]
    K.get_session().run(tf.local_variables_initializer())
    return auc 
Example #10
Source File: utils.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def reset_weights(model, session=None):
    """
    reset weights of model with the appropriate initializer.
    Note: only uses "kernel_initializer" and "bias_initializer"
    does not close session.

    Reference:
    https://www.codementor.io/nitinsurya/how-to-re-initialize-keras-model-weights-et41zre2g

    Parameters:
        model: keras model to reset
        session (optional): the current session
    """

    if session is None:
        session = K.get_session()

    for layer in model.layers: 
        reset = False
        if hasattr(layer, 'kernel_initializer'):
            layer.kernel.initializer.run(session=session)
            reset = True
        
        if hasattr(layer, 'bias_initializer'):
            layer.bias.initializer.run(session=session)
            reset = True
        
        if not reset:
            print('Could not find initializer for layer %s. skipping', layer.name) 
Example #11
Source File: __init__.py    From BERT-keras with GNU General Public License v3.0 5 votes vote down vote up
def tpu_compatible():
    '''Fit the tpu problems we meet while using keras tpu model'''
    if not hasattr(tpu_compatible, 'once'):
        tpu_compatible.once = True
    else:
        return
    import tensorflow as tf
    import tensorflow.keras.backend as K
    _version = tf.__version__.split('.')
    is_correct_version = int(_version[0]) >= 1 and (int(_version[0]) >= 2 or int(_version[1]) >= 13)
    from tensorflow.contrib.tpu.python.tpu.keras_support import KerasTPUModel
    def initialize_uninitialized_variables():
        sess = K.get_session()
        uninitialized_variables = set([i.decode('ascii') for i in sess.run(tf.report_uninitialized_variables())])
        init_op = tf.variables_initializer(
            [v for v in tf.global_variables() if v.name.split(':')[0] in uninitialized_variables]
        )
        sess.run(init_op)

    _tpu_compile = KerasTPUModel.compile

    def tpu_compile(self,
                    optimizer,
                    loss=None,
                    metrics=None,
                    loss_weights=None,
                    sample_weight_mode=None,
                    weighted_metrics=None,
                    target_tensors=None,
                    **kwargs):
        if not is_correct_version:
            raise ValueError('You need tensorflow >= 1.3 for better keras tpu support!')
        _tpu_compile(self, optimizer, loss, metrics, loss_weights,
                     sample_weight_mode, weighted_metrics,
                     target_tensors, **kwargs)
        initialize_uninitialized_variables()  # for unknown reason, we should run this after compile sometimes

    KerasTPUModel.compile = tpu_compile 
Example #12
Source File: asr_solver.py    From delta with Apache License 2.0 5 votes vote down vote up
def eval(self):
    ''' only eval'''
    #get eval dataset
    # data must be init before model build
    logging.info("make Task")
    eval_ds, eval_task = self.input_data(mode=utils.EVAL)
    eval_gen = tf.data.make_one_shot_iterator(eval_ds)

    logging.info("build Model")
    #get eval model
    self.model_fn(mode=utils.EVAL)
    assert self._built

    #load model
    eval_func = self.get_metric_func()

    target_seq_list, predict_seq_list = [], []
    for _ in range(len(eval_task)):
      batch_data = tf.keras.backend.get_session().run(eval_gen.get_next()[0])

      batch_input = batch_data['inputs']
      batch_target = batch_data['targets'].tolist()

      batch_predict = eval_func(batch_input)[0]

      batch_decode = py_ctc.ctc_greedy_decode(batch_predict, 0, unique=True)

      target_seq_list += batch_target
      predict_seq_list += batch_decode

    token_errors = metrics_lib.token_error(
        predict_seq_list=predict_seq_list,
        target_seq_list=target_seq_list,
        eos_id=0)
    logging.info("eval finish!")
    logging.info("Token Error: {}".format(token_errors)) 
Example #13
Source File: convert_test.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def export_flatten(filename, input_shape):
    model = Sequential()
    model.add(Flatten(input_shape=input_shape[1:]))
    model.predict(np.random.uniform(size=input_shape))

    sess = K.get_session()
    output = model.get_layer("flatten").output

    return export(output, filename, sess=sess) 
Example #14
Source File: mnist_cifar_models.py    From CROWN-IBP with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def get_gradient(self, data, sess = None):
        if sess is None:
            sess = K.get_session()
        # initialize all un initialized variables
        # sess.run(tf.variables_initializer([v for v in tf.global_variables() if v.name.split(':')[0] in set(sess.run(tf.report_uninitialized_variables()))]))
        evaluated_gradients = []
        for g in self.gradients:
            evaluated_gradients.append(sess.run(g, feed_dict={self.model.input:data}))
        return evaluated_gradients 
Example #15
Source File: convert_test.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def export_split_edge_case(filename, input_shape):
    model, _ = _keras_model_core(split_edge_case_builder, shape=input_shape)

    sess = K.get_session()
    output = model.output
    return export(output, filename, sess=sess) 
Example #16
Source File: convert_test.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def export_keras_global_maxpool(filename, input_shape):
    model, _ = _keras_global_maxpool_core(shape=input_shape)

    sess = K.get_session()
    output = model.get_layer("global_max_pooling2d").output
    return export(output, filename, sess=sess) 
Example #17
Source File: convert_test.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def export_keras_global_avgpool(filename, input_shape):
    model, _ = _keras_global_avgpool_core(shape=input_shape)

    sess = K.get_session()
    output = model.get_layer("global_average_pooling2d").output
    return export(output, filename, sess=sess) 
Example #18
Source File: convert_test.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def export_keras_batchnorm(filename, input_shape):
    model, _ = _keras_batchnorm_core(shape=input_shape)

    sess = K.get_session()
    output = model.get_layer("batch_normalization").output
    return export(output, filename, sess=sess) 
Example #19
Source File: convert_test.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def export_keras_dense(filename, input_shape):
    model, _ = _keras_dense_core(shape=input_shape)

    sess = K.get_session()
    output = model.get_layer("dense").output
    return export(output, filename, sess=sess) 
Example #20
Source File: convert_test.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def export_keras_conv2d(filename, input_shape):
    model, _ = _keras_conv2d_core(shape=input_shape)

    sess = K.get_session()
    output = model.get_layer("conv2d").output
    return export(output, filename, sess=sess) 
Example #21
Source File: convert_test.py    From tf-encrypted with Apache License 2.0 5 votes vote down vote up
def export_keras_multilayer(filename, input_shape):
    model, _ = _keras_model_core(keras_multilayer_builder, shape=input_shape)

    sess = K.get_session()
    output = model.output
    return export(output, filename, sess=sess) 
Example #22
Source File: main.py    From tf-encrypted with Apache License 2.0 4 votes vote down vote up
def train(train_x_df, train_y_df):
    """Train a logistic regressor on the dataset."""
    x = list(train_x_df.columns.values)
    model = build_model(len(x))

    os.makedirs("./saved_models", exist_ok=True)

    cp_callback = keras.callbacks.ModelCheckpoint(
        checkpoint_path, save_weights_only=True, save_best_only=True, verbose=1
    )

    # first 80 percent for training
    train_x = train_x_df[1:246005]
    train_y = train_y_df[1:246005]

    # other 20 percent for evaluating
    eval_x = train_x_df[246006 : len(train_x_df) - 1]
    eval_y = train_y_df[246006 : len(train_y_df) - 1]

    # train model
    model.fit(
        train_x,
        train_y,
        epochs=epochs,
        validation_split=0.2,
        verbose=0,
        batch_size=batch_size,
        callbacks=[cp_callback],
    )

    print("done training")

    # get the default session and graph for exporting and calculating the AUC
    sess = K.get_session()
    graph = K.get_session().graph

    # export the graph to a protobuf file for loading in tfe and secure enclave
    export_to_pb(
        K.get_session(),
        graph.get_tensor_by_name("dense/Sigmoid:0"),
        "house_credit_default.pb",
    )

    # evaluate the model using AUC, the metric used in the kaggle competition
    loss = model.evaluate(eval_x, eval_y, batch_size=batch_size)

    predictions = model.predict(eval_x, batch_size=batch_size)
    auc = tf.metrics.auc(eval_y, predictions)

    print("Evaluation Loss:", loss[0])
    print("Accuracy:", loss[1])

    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    print("AUC: ", sess.run([auc])[0][1]) 
Example #23
Source File: train.py    From MultiPlanarUNet with MIT License 4 votes vote down vote up
def run(project_dir, gpu_mon, logger, args):
    """
    Runs training of a model in a mpunet project directory.

    Args:
        project_dir: A path to a mpunet project
        gpu_mon: An initialized GPUMonitor object
        logger: A mpunet logging object
        args: argparse arguments
    """
    # Read in hyperparameters from YAML file
    from mpunet.hyperparameters import YAMLHParams
    hparams = YAMLHParams(project_dir + "/train_hparams.yaml", logger=logger)
    validate_hparams(hparams)

    # Wait for PID to terminate before continuing?
    if args.wait_for:
        from mpunet.utils import await_PIDs
        await_PIDs(args.wait_for)

    # Prepare sequence generators and potential model specific hparam changes
    train, val, hparams = get_data_sequences(project_dir=project_dir,
                                             hparams=hparams,
                                             logger=logger,
                                             args=args)

    # Set GPU visibility and create model with MirroredStrategy
    set_gpu(gpu_mon, args)
    import tensorflow as tf
    with tf.distribute.MirroredStrategy().scope():
        model = get_model(project_dir=project_dir, train_seq=train,
                          hparams=hparams, logger=logger, args=args)

        # Get trainer and compile model
        from mpunet.train import Trainer
        trainer = Trainer(model, logger=logger)
        trainer.compile_model(n_classes=hparams["build"].get("n_classes"),
                              reduction=tf.keras.losses.Reduction.NONE,
                              **hparams["fit"])

    # Debug mode?
    if args.debug:
        from tensorflow.python import debug as tfdbg
        from tensorflow.keras import backend as K
        K.set_session(tfdbg.LocalCLIDebugWrapperSession(K.get_session()))

    # Fit the model
    _ = trainer.fit(train=train, val=val,
                    train_im_per_epoch=args.train_images_per_epoch,
                    val_im_per_epoch=args.val_images_per_epoch,
                    hparams=hparams, no_im=args.no_images, **hparams["fit"])
    save_final_weights(model, project_dir, logger) 
Example #24
Source File: asr_solver.py    From delta with Apache License 2.0 4 votes vote down vote up
def train_and_eval(self):
    ''' train and eval '''
    # data must be init before model builg
    #backend_sess = K.get_session()
    train_ds, train_task = self.input_data(mode=utils.TRAIN)
    #train_gen = self.input_generator(tf.data.make_one_shot_iterator(train_ds), train_task, backend_sess, mode=utils.TRAIN)
    eval_ds, eval_task = self.input_data(mode=utils.EVAL)
    #eval_gen = self.input_generator(tf.data.make_one_shot_iterator(eval_ds), eval_task, backend_sess, mode=utils.EVAL)

    self.model_fn(mode=utils.TRAIN)
    assert self._built

    callbacks = self.get_callbacks(
        eval_ds, eval_task, monitor_used=self._monitor_used)

    try:
      # Run training
      self.active_model.fit_generator(
          train_task,
          steps_per_epoch=len(train_task),
          epochs=self._num_epochs,
          verbose=1,
          callbacks=callbacks,
          validation_data=eval_task,
          validation_steps=len(eval_task),
          validation_freq=1,
          class_weight=None,
          max_queue_size=100,
          workers=4,
          use_multiprocessing=False,
          shuffle=True,
          initial_epoch=self._init_epoch)
      #save model
      # not work for subclassed model, using tf.keras.experimental.export_saved_model
      #self.save_model()

    except (Exception, ArithmeticError) as err:  #pylint: disable=broad-except
      template = "An exception of type {0} occurred. Arguments:\n{1!r}"
      message = template.format(type(err).__name__, err.args)
      logging.error(message)
      raise err

    finally:
      # Clear memory
      K.clear_session()
      logging.info("Ending time: {}".format(
          datetime.now().strftime('%Y-%m-%d %H:%M:%S')))

  #pylint: disable=unused-argument,too-many-locals