Python keras.backend() Examples

The following are code examples for showing how to use keras.backend(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: keras_pickle_wrapper   Author: wwoods   File: __init__.py    MIT License 6 votes vote down vote up
def _reduce_ex(self, protocol=None):
    _keras_pickle_pid_check()

    ofile = tempfile.NamedTemporaryFile(dir=_SHM_DIR, suffix='.h5',
            delete=False)
    try:
        ofile.close()
        self.save(ofile.name)
        # Before reading it out, clear the session if we're supposed to
        if _reduce_should_clear_session[0]:
            del self
            if hasattr(keras.backend, 'clear_session'):
                keras.backend.clear_session()
        with open(ofile.name, 'rb') as f:
            return (_load_model, ([f.read()],))
    finally:
        os.unlink(ofile.name)
# Please use KerasWrapper instead (see below)
#keras.models.Model.__reduce_ex__ = _reduce_ex 
Example 2
Project: sleep-convolutions-tf   Author: cliffordlab   File: model.py    MIT License 6 votes vote down vote up
def ckpt_to_keras(checkpoint, modelfile, **params):
    import h5py
    import keras
    import keras.backend as K
    seq_len = int(D.sr*D.dt)
    params = {k.replace('-', '_'): v for k, v in params.items()}
    with tf.Graph().as_default():
        features = {
            tp: keras.Input(shape=(seq_len,), name=tp)
            for tp in D.channels
        }
        model, _ = build_model(features, print_summary=True, **params)
        saver = tf.train.Saver()
        with tf.Session() as S:
            saver.restore(S, checkpoint)
            K.manual_variable_initialization(True)
            model.save(modelfile) 
Example 3
Project: Efficient_Augmentation   Author: mkuchnik   File: keras_dnn_feature_extractor.py    MIT License 6 votes vote down vote up
def _fit(self, sample_weight):
        import tensorflow as tf
        import keras
        callbacks = None  # TODO maybe add?
        with self.sess_.as_default():
            keras.backend.get_session().run(
                tf.global_variables_initializer()
            )
            self.model_.fit(
                    x=self.X_,
                    y=self.y_,
                    batch_size=self.batch_size,
                    epochs=self.max_iter,
                    verbose=self.verbose,
                    callbacks=callbacks,
                    shuffle=self.shuffle,
                    class_weight=self.class_weight,
                    sample_weight=sample_weight,
            )
        return self 
Example 4
Project: Efficient_Augmentation   Author: mkuchnik   File: keras_dnn_feature_extractor_complex.py    MIT License 6 votes vote down vote up
def _fit(self, sample_weight):
        import tensorflow as tf
        import keras
        callbacks = None  # TODO
        with self.sess_.as_default():
            keras.backend.get_session().run(
                tf.global_variables_initializer()
            )
            self.model_.fit(
                    x=self.X_,
                    y=self.y_,
                    batch_size=self.batch_size,
                    epochs=self.max_iter,
                    verbose=self.verbose,
                    callbacks=callbacks,
                    shuffle=self.shuffle,
                    class_weight=self.class_weight,
                    sample_weight=sample_weight,
            )
        return self 
Example 5
Project: Efficient_Augmentation   Author: mkuchnik   File: keras_dnn_pretrained_feature_extractor.py    MIT License 6 votes vote down vote up
def _fit(self, sample_weight):
        import tensorflow as tf
        import keras
        callbacks = None  # TODO
        with self.sess_.as_default():
            keras.backend.get_session().run(
                tf.global_variables_initializer()
            )
            self.model_.fit(
                    x=self.X_,
                    y=self.y_,
                    batch_size=self.batch_size,
                    epochs=self.max_iter,
                    verbose=self.verbose,
                    callbacks=callbacks,
                    shuffle=self.shuffle,
                    class_weight=self.class_weight,
                    sample_weight=sample_weight,
            )
        return self 
Example 6
Project: Efficient_Augmentation   Author: mkuchnik   File: keras_dnn_pretrained_feature_extractor_norb.py    MIT License 6 votes vote down vote up
def _fit(self, sample_weight):
        import tensorflow as tf
        import keras
        callbacks = None  # TODO maybe add?
        with self.sess_.as_default():
            keras.backend.get_session().run(
                tf.global_variables_initializer()
            )
            self.model_.fit(
                    x=self.X_,
                    y=self.y_,
                    batch_size=self.batch_size,
                    epochs=self.max_iter,
                    verbose=self.verbose,
                    callbacks=callbacks,
                    shuffle=self.shuffle,
                    class_weight=self.class_weight,
                    sample_weight=sample_weight,
            )
        return self 
Example 7
Project: typhon   Author: atmtools   File: qrnn.py    MIT License 6 votes vote down vote up
def on_epoch_end(self, epoch, logs={}):
        self.losses += [logs.get('val_loss')]
        if not self.losses[-1] < self.min_loss:
            self.steps = self.steps + 1
        else:
            self.steps = 0
        if self.steps > self.convergence_steps:
            lr = keras.backend.get_value(self.model.optimizer.lr)
            keras.backend.set_value(
                self.model.optimizer.lr, lr / self.lr_decay)
            self.steps = 0
            logger.info("\n Reduced learning rate to " + str(lr))

            if lr < self.lr_minimum:
                self.model.stop_training = True

        self.min_loss = min(self.min_loss, self.losses[-1])

################################################################################
# QRNN
################################################################################ 
Example 8
Project: EasyPR-python   Author: SunskyF   File: model.py    Apache License 2.0 6 votes vote down vote up
def __init__(self, mode, config, model_dir):
        """
        mode: Either "training" or "inference"
        config: A Sub-class of the Config class
        model_dir: Directory to save training logs and trained weights
        """
        assert mode in ['training', 'inference']
        if mode == 'training':
            import keras.backend.tensorflow_backend as KTF
            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True
            session = tf.Session(config=config)
            KTF.set_session(session)
        self.mode = mode
        self.config = config
        self.model_dir = model_dir
        self.set_log_dir()
        self.keras_model = self.build(mode=mode, config=config) 
Example 9
Project: imgclsmob   Author: osmr   File: preresnet.py    MIT License 6 votes vote down vote up
def preres_activation(x,
                      name="preres_activation"):
    """
    PreResNet pure pre-activation block without convolution layer. It's used by itself as the final block.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    name : str, default 'preres_activation'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    x = batchnorm(
        x=x,
        name=name + "/bn")
    x = nn.Activation("relu", name=name + "/activ")(x)
    return x 
Example 10
Project: Variational-AutoEncoder-For-Novelty-Detection   Author: LordAlucard90   File: helper.py    GNU General Public License v3.0 6 votes vote down vote up
def _train_model(self, vae, hidden, reg_val=None, drp_val=None):
        keras.backend.clear_session()

        m_generator = ModelGenerator(vae=vae, hidden=hidden, reg_val=reg_val, drp_val=drp_val)
        name = m_generator.get_name()

        if not exists(join(self.models_dir, '{}_BestW.hdf5'.format(name))):
            checkpointer = ModelCheckpoint(
                filepath=join(self.bests_tmp_dir, name + '_Wep{epoch:03d}_loss{val_loss:.5f}.hdf5'),
                verbose=0,
                save_best_only=True)

            model, history = m_generator.train(epochs=self.epochs, train=self.Trn, validation=self.Val,
                                               callbacks=[checkpointer])

            save_name = join(self.models_dir, name)
            best = sorted(list(filter(lambda w: w.startswith(name) and w.endswith('.hdf5'),
                                      listdir(self.bests_tmp_dir))))[-1]
            move(join(self.bests_tmp_dir, best), '{}_BestW.hdf5'.format(save_name))
            np.save('{}.npy'.format(save_name), history)
            for file in filter(lambda w: w.startswith(name) and w.endswith('.hdf5'), listdir('bests_tmp')):
                remove(join(self.bests_tmp_dir, file)) 
Example 11
Project: MLL-machine-learning-language   Author: riccardobernardi   File: utils.py    MIT License 6 votes vote down vote up
def get_keras_layers() -> dict:
    keras_layers = {}

    keras_layers["models"] = set()
    for k in models.__dict__.keys():
        if "__" not in k and k != "K":
            keras_layers["models"].add(k)

    keras_layers["layers"] = set()
    for k in layers.__dict__.keys():
        if "__" not in k and k != "K":
            keras_layers["layers"].add(k)

    keras_layers["backend"] = set()
    for k in backend.__dict__.keys():
        if "__" not in k and k != "K":
            keras_layers["backend"].add(k)

    return keras_layers 
Example 12
Project: MLL-machine-learning-language   Author: riccardobernardi   File: utils.py    MIT License 6 votes vote down vote up
def get_keras_layers() -> dict:
    keras_layers = {}

    keras_layers["models"] = set()
    for k in models.__dict__.keys():
        if "__" not in k and k != "K":
            keras_layers["models"].add(k)

    keras_layers["layers"] = set()
    for k in layers.__dict__.keys():
        if "__" not in k and k != "K":
            keras_layers["layers"].add(k)

    keras_layers["backend"] = set()
    for k in backend.__dict__.keys():
        if "__" not in k and k != "K":
            keras_layers["backend"].add(k)

    return keras_layers 
Example 13
Project: MatchZoo   Author: NTMC-Community   File: base_model.py    Apache License 2.0 6 votes vote down vote up
def compile(self):
        """
        Compile model for training.

        Only `keras` native metrics are compiled together with backend.
        MatchZoo metrics are evaluated only through :meth:`evaluate`.
        Notice that `keras` count `loss` as one of the metrics while MatchZoo
        :class:`matchzoo.engine.BaseTask` does not.

        Examples:
            >>> from matchzoo import models
            >>> model = models.Naive()
            >>> model.guess_and_fill_missing_params(verbose=0)
            >>> model.params['task'].metrics = ['mse', 'map']
            >>> model.params['task'].metrics
            ['mse', mean_average_precision(0.0)]
            >>> model.build()
            >>> model.compile()

        """
        self._backend.compile(optimizer=self._params['optimizer'],
                              loss=self._params['task'].loss) 
Example 14
Project: gandlf   Author: codekansas   File: models.py    MIT License 5 votes vote down vote up
def _sort_weights_by_name(self, weights):
        """Sorts weights by name and returns them."""

        if not weights:
            return []

        if K.backend() == 'theano':
            key = lambda x: x.name if x.name else x.auto_name
        else:
            key = lambda x: x.name

        weights.sort(key=key)
        return weights 
Example 15
Project: keras-wrn   Author: EricAlcaide   File: wrn.py    MIT License 5 votes vote down vote up
def build_model(input_dims, output_dim, n, k, act= "relu", dropout=None):
	""" Builds the model. Params:
			- n: number of layers. WRNs are of the form WRN-N-K
				 It must satisfy that (N-4)%6 = 0
			- k: Widening factor. WRNs are of the form WRN-N-K
				 It must satisfy that K%2 = 0
			- input_dims: input dimensions for the model
			- output_dim: output dimensions for the model
			- dropout: dropout rate - default=0 (not recomended >0.3)
			- act: activation function - default=relu. Build your custom
				   one with keras.backend (ex: swish, e-swish)
	"""
	# Ensure n & k are correct
	assert (n-4)%6 == 0
	assert k%2 == 0
	n = (n-4)//6 
	# This returns a tensor input to the model
	inputs = Input(shape=(input_dims))

	# Head of the model
	x = Conv2D(16, (3,3), padding="same")(inputs)
	x = BatchNormalization()(x)
	x = Activation('relu')(x)

	# 3 Blocks (normal-residual)
	x = main_block(x, 16*k, n, (1,1), dropout) # 0
	x = main_block(x, 32*k, n, (2,2), dropout) # 1
	x = main_block(x, 64*k, n, (2,2), dropout) # 2
			
	# Final part of the model
	x = AveragePooling2D((8,8))(x)
	x = Flatten()(x)
	outputs = Dense(output_dim, activation="softmax")(x)

	model = Model(inputs=inputs, outputs=outputs)
	return model 
Example 16
Project: Neural_Temporality_Adaptation   Author: xiaoleihuang   File: model_helper.py    Apache License 2.0 5 votes vote down vote up
def mask_layer(inputs):
    """
    This function will be applied to Lambda, only select the domain features
    (x_d, 0, 0)
    :param inputs:
    :return:
    """
    return keras.backend.squeeze(keras.backend.dot(inputs[-1], inputs[:-1])) 
Example 17
Project: Efficient_Augmentation   Author: mkuchnik   File: keras_dnn_feature_extractor.py    MIT License 5 votes vote down vote up
def transform(self, X):
        import keras
        import keras.backend
        import tensorflow as tf
        K = keras.backend
        sklearn.utils.validation.check_is_fitted(self, ["X_", "y_"])
        # Assumes sequential
        with self.sess_.as_default():
            outs = []
            print("Using outputs layer: {}".format(self.model_.layers[-2]))
            features = K.function([self.model_.input],
                                  [self.model_.layers[-2].output])
            if self.batch_size <= len(X):
                for i in range(0, len(X) - self.batch_size + 1, self.batch_size):
                    x = X[i:i+self.batch_size]
                    f = features([x])
                    assert len(f) == 1
                    f = f[0]
                    outs.append(f.reshape(len(f), -1))
                i = i + self.batch_size
                if i != len(X):
                    assert i < len(X)
                    x = X[i:]
                    f = features([x])
                    assert len(f) == 1
                    f = f[0]
                    outs.append(f.reshape(len(f), -1))
            else:
                x = X[:]
                f = features([x])
                assert len(f) == 1
                f = f[0]
                outs.append(f.reshape(len(f), -1))
            outs = np.concatenate(outs, axis=0)
            return outs 
Example 18
Project: Efficient_Augmentation   Author: mkuchnik   File: keras_dnn_feature_extractor.py    MIT License 5 votes vote down vote up
def __del__(self):
        import keras.backend
        K = keras.backend
        if hasattr(self, "sess_"):
            with self.sess_.as_default():
                K.clear_session() 
Example 19
Project: Efficient_Augmentation   Author: mkuchnik   File: keras_dnn_feature_extractor_complex.py    MIT License 5 votes vote down vote up
def transform(self, X):
        import keras
        import keras.backend
        import tensorflow as tf
        K = keras.backend
        sklearn.utils.validation.check_is_fitted(self, ["X_", "y_"])
        # Assumes sequential
        with self.sess_.as_default():
            outs = []
            print("Using outputs layer: {}".format(self.model_.layers[-2]))
            features = K.function([self.model_.input],
                                  [self.model_.layers[-2].output])
            if self.batch_size <= len(X):
                for i in range(0, len(X) - self.batch_size + 1, self.batch_size):
                    x = X[i:i+self.batch_size]
                    f = features([x])
                    assert len(f) == 1
                    f = f[0]
                    outs.append(f.reshape(len(f), -1))
                i = i + self.batch_size
                if i != len(X):
                    assert i < len(X)
                    x = X[i:]
                    f = features([x])
                    assert len(f) == 1
                    f = f[0]
                    outs.append(f.reshape(len(f), -1))
            else:
                x = X[:]
                f = features([x])
                assert len(f) == 1
                f = f[0]
                outs.append(f.reshape(len(f), -1))
            outs = np.concatenate(outs, axis=0)
            return outs 
Example 20
Project: Efficient_Augmentation   Author: mkuchnik   File: keras_dnn_pretrained_feature_extractor.py    MIT License 5 votes vote down vote up
def transform(self, X):
        import keras
        import keras.backend
        import tensorflow as tf
        K = keras.backend
        sklearn.utils.validation.check_is_fitted(self, ["X_", "y_"])
        # Assumes sequential
        with self.sess_.as_default():
            outs = []
            print("Using outputs layer: {}".format(self.model_.layers[-2]))
            features = K.function([self.model_.input],
                                  # [self.model_.layers[-2].output])
                                  [self.model_.layers[-1].output])
            if self.batch_size <= len(X):
                for i in range(0, len(X) - self.batch_size + 1, self.batch_size):
                    x = X[i:i+self.batch_size]
                    f = features([x])
                    assert len(f) == 1
                    f = f[0]
                    outs.append(f.reshape(len(f), -1))
                i = i + self.batch_size
                if i != len(X):
                    assert i < len(X)
                    x = X[i:]
                    f = features([x])
                    assert len(f) == 1
                    f = f[0]
                    outs.append(f.reshape(len(f), -1))
            else:
                x = X[:]
                f = features([x])
                assert len(f) == 1
                f = f[0]
                outs.append(f.reshape(len(f), -1))
            outs = np.concatenate(outs, axis=0)
            return outs 
Example 21
Project: Efficient_Augmentation   Author: mkuchnik   File: keras_dnn_pretrained_feature_extractor.py    MIT License 5 votes vote down vote up
def __del__(self):
        pass
        import keras.backend
        K = keras.backend
        if hasattr(self, "sess_"):
            with self.sess_.as_default():
                K.clear_session() 
Example 22
Project: Efficient_Augmentation   Author: mkuchnik   File: pretrained_models.py    MIT License 5 votes vote down vote up
def transform(self, X):
        import keras
        import keras.backend
        import tensorflow as tf
        K = keras.backend
        with self.sess_.as_default():
            outs = []
            features = K.function([self.model_.input],
                                  [self.model_.layers[-2].output])
            if self.batch_size <= len(X):
                for i in range(0, len(X) - self.batch_size + 1, self.batch_size):
                    x = X[i:i+self.batch_size]
                    f = features([x])
                    assert len(f) == 1
                    f = f[0]
                    outs.append(f.reshape(len(f), -1))
                i = i + self.batch_size
                if i != len(X):
                    x = X[i:]
                    f = features([x])
                    assert len(f) == 1
                    f = f[0]
                    outs.append(f.reshape(len(f), -1))
            else:
                x = X[:]
                f = features([x])
                assert len(f) == 1
                f = f[0]
                outs.append(f.reshape(len(f), -1))
            outs = np.concatenate(outs, axis=0)
            return outs 
Example 23
Project: Efficient_Augmentation   Author: mkuchnik   File: keras_dnn_pretrained_feature_extractor_norb.py    MIT License 5 votes vote down vote up
def __init__(self, model_filename):
            if model_filename is not None:
                pre_model = keras.models.load_model(
                    model_filename
                )
                self.model = pre_model
                self.session = keras.backend.get_session()
            else:
                print("Warning: model filename is None") 
Example 24
Project: aetros-cli   Author: aetros   File: starter.py    MIT License 5 votes vote down vote up
def start_keras(logger, job_backend):
    if 'KERAS_BACKEND' not in os.environ:
        os.environ['KERAS_BACKEND'] = 'tensorflow'

    from . import keras_model_utils

    # we need to import keras here, so we know which backend is used (and whether GPU is used)
    os.chdir(job_backend.git.work_tree)
    logger.debug("Start simple model")

    # we use the source from the job commit directly
    with job_backend.git.batch_commit('Git Version'):
        job_backend.set_system_info('git_remote_url', job_backend.git.get_remote_url('origin'))
        job_backend.set_system_info('git_version', job_backend.git.job_id)

    # all our shapes are Tensorflow schema. (height, width, channels)
    import keras.backend
    if hasattr(keras.backend, 'set_image_dim_ordering'):
        keras.backend.set_image_dim_ordering('tf')

    if hasattr(keras.backend, 'set_image_data_format'):
        keras.backend.set_image_data_format('channels_last')

    from .KerasCallback import KerasCallback
    trainer = Trainer(job_backend)
    keras_logger = KerasCallback(job_backend, job_backend.logger)

    job_backend.progress(0, job_backend.job['config']['epochs'])

    logger.info("Start training")
    keras_model_utils.job_start(job_backend, trainer, keras_logger)

    job_backend.done() 
Example 25
Project: aetros-cli   Author: aetros   File: backend.py    MIT License 5 votes vote down vote up
def on_signusr1(self, signal, frame):
        self.logger.warning("USR1: backend job_id=%s (running=%s, ended=%s), client (online=%s, active=%s, registered=%s, "
                            "connected=%s, queue=%s), git (active_thread=%s, last_push_time=%s)." % (
          str(self.job_id),
          str(self.running),
          str(self.ended),
          str(self.client.online),
          str(self.client.active),
          str(self.client.registered),
          str(self.client.connected),
          str([str(i)+':'+str(len(x)) for i, x in six.iteritems(self.client.queues)]),
          str(self.git.active_thread),
          str(self.git.last_push_time),
        )) 
Example 26
Project: aetros-cli   Author: aetros   File: backend.py    MIT License 5 votes vote down vote up
def is_master_process(self):
        """
        Master means that aetros.backend.start_job() has been called without using the command `aetros start`.
        If master is true, we collect and track some data that usually `aetros start` would do and reset the job's
        temp files on the server.
        :return:
        """

        return os.getenv('AETROS_JOB_ID') is None 
Example 27
Project: aetros-cli   Author: aetros   File: backend.py    MIT License 5 votes vote down vote up
def sync_weights(self, push=True):

        if not os.path.exists(self.get_job_model().get_weights_filepath_latest()):
            return

        self.logger.debug("sync weights...")
        self.set_status('SYNC WEIGHTS', add_section=False)

        with open(self.get_job_model().get_weights_filepath_latest(), 'rb') as f:
            import keras.backend
            self.git.commit_file('Added weights', 'aetros/weights/latest.hdf5', f.read())

            image_data_format = None
            if hasattr(keras.backend, 'set_image_data_format'):
                image_data_format = keras.backend.image_data_format()

            info = {
                'framework': 'keras',
                'backend': keras.backend.backend(),
                'image_data_format': image_data_format
            }
            self.git.commit_file('Added weights', 'aetros/weights/latest.json', simplejson.dumps(info))
            if push:
                self.git.push()

        # todo, implement optional saving of self.get_job_model().get_weights_filepath_best() 
Example 28
Project: RLContinuousActionSpace   Author: hchkaiban   File: Models.py    MIT License 5 votes vote down vote up
def huber_loss(y_true, y_pred):
    from keras import backend as K
    err = y_true - y_pred

    cond = K.abs(err) <= HUBER_LOSS_DELTA
    if cond == True:
        loss = 0.5 * K.square(err)
        
    else:
        loss = 0.5 * HUBER_LOSS_DELTA**2 + HUBER_LOSS_DELTA * (K.abs(err) - HUBER_LOSS_DELTA)

    return K.mean(loss) 
Example 29
Project: Dropout_BBalpha   Author: YingzhenLi   File: utils_tf.py    MIT License 5 votes vote down vote up
def batch_eval(sess, tf_inputs, tf_outputs, numpy_inputs):
    """
    A helper function that computes a tensor on numpy inputs by batches.
    """
    n = len(numpy_inputs)
    assert n > 0
    assert n == len(tf_inputs)
    m = numpy_inputs[0].shape[0]
    for i in six.moves.xrange(1, n):
        assert numpy_inputs[i].shape[0] == m
    out = []
    for _ in tf_outputs:
        out.append([])
    with sess.as_default():
        for start in six.moves.xrange(0, m, FLAGS.batch_size):
            batch = start // FLAGS.batch_size
            if batch % 100 == 0 and batch > 0:
                print("Batch " + str(batch))

            # Compute batch start and end indices
            start = batch * FLAGS.batch_size
            end = start + FLAGS.batch_size
            numpy_input_batches = [numpy_input[start:end] for numpy_input in numpy_inputs]
            cur_batch_size = numpy_input_batches[0].shape[0]
            assert cur_batch_size <= FLAGS.batch_size
            for e in numpy_input_batches:
                assert e.shape[0] == cur_batch_size

            feed_dict = dict(zip(tf_inputs, numpy_input_batches))
            feed_dict[keras.backend.learning_phase()] = 0
            numpy_output_batches = sess.run(tf_outputs, feed_dict=feed_dict)
            for e in numpy_output_batches:
                assert e.shape[0] == cur_batch_size, e.shape
            for out_elem, numpy_output_batch in zip(out, numpy_output_batches):
                out_elem.append(numpy_output_batch)

    out = [np.concatenate(x, axis=0) for x in out]
    for e in out:
        assert e.shape[0] == m, e.shape
    return out 
Example 30
Project: Dropout_BBalpha   Author: YingzhenLi   File: utils_tf.py    MIT License 5 votes vote down vote up
def model_argmax(sess, x, predictions, sample):
    """
    Helper function that computes the current class prediction
    :param sess: TF session
    :param x: the input placeholder
    :param predictions: the model's symbolic output
    :param sample: (1 x 1 x img_rows x img_cols) numpy array with sample input
    :return: the argmax output of predictions, i.e. the current predicted class
    """

    feed_dict = {x: sample, keras.backend.learning_phase(): 0}
    probabilities = sess.run(predictions, feed_dict)

    return np.argmax(probabilities) 
Example 31
Project: robust_physical_perturbations   Author: evtimovi   File: utils.py    MIT License 5 votes vote down vote up
def setup_model_and_sess():
    '''
    Sets up and loads the model used for classifying 
    the signs with the help of keras
    and the corresponding TF session. (Code from cleverhans example)
    Needs FLAGS.model_path in order to locate the stored model
    :return: a tuple (model, sess) 
    '''
    # print all parameters for the current run
    # print "Parameters"
    # for k in sorted(FLAGS.__dict__["__flags"].keys()):
    #     print k, FLAGS.__dict__["__flags"][k]

    ###### setup code from cleverhans example ######
    # Set TF random seed to improve reproducibility
    tf.set_random_seed(FLAGS.tf_seed)

    if not hasattr(backend, "tf"):
        raise RuntimeError("This tutorial requires keras to be configured"
                           " to use the TensorFlow backend.")

    # Image dimensions ordering should follow the Theano convention
    if keras.backend.image_dim_ordering() != 'tf':
        keras.backend.set_image_dim_ordering('tf')
        print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' to "
              "'th', temporarily setting to 'tf'")

    # Create TF session and set as Keras backend session
    sess = tf.Session()
    keras.backend.set_session(sess)
    print("Created TensorFlow session and set Keras backend.")

    # Define TF model graph
    model = cnn_model(img_rows=FLAGS.img_rows, img_cols=FLAGS.img_cols, channels=FLAGS.nb_channels, nb_classes=FLAGS.nb_classes)

    # Restore the model from previously saved parameters
    saver = tf.train.Saver()
    saver.restore(sess, FLAGS.model_path)
    print("Loaded the parameters for the model from %s"%FLAGS.model_path)
    
    return model, sess 
Example 32
Project: imgclsmob   Author: osmr   File: squeezenet.py    MIT License 5 votes vote down vote up
def fire_conv(x,
              in_channels,
              out_channels,
              kernel_size,
              padding,
              name="fire_conv"):
    """
    SqueezeNet specific convolution block.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    kernel_size : int or tuple/list of 2 int
        Convolution window size.
    padding : int or tuple/list of 2 int
        Padding value for convolution layer.
    name : str, default 'fire_conv'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    x = conv2d(
        x=x,
        in_channels=in_channels,
        out_channels=out_channels,
        kernel_size=kernel_size,
        padding=padding,
        use_bias=True,
        name=name + "/conv")
    x = nn.Activation("relu", name=name + "/activ")(x)
    return x 
Example 33
Project: imgclsmob   Author: osmr   File: squeezenet.py    MIT License 5 votes vote down vote up
def squeeze_init_block(x,
                       in_channels,
                       out_channels,
                       kernel_size,
                       name="squeeze_init_block"):
    """
    ResNet specific initial block.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    kernel_size : int or tuple/list of 2 int
        Convolution window size.
    name : str, default 'squeeze_init_block'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    x = conv2d(
        x=x,
        in_channels=in_channels,
        out_channels=out_channels,
        kernel_size=kernel_size,
        strides=2,
        use_bias=True,
        name=name + "/conv")
    x = nn.Activation("relu", name=name + "/activ")(x)
    return x 
Example 34
Project: imgclsmob   Author: osmr   File: shufflenetv2.py    MIT License 5 votes vote down vote up
def shuffle_init_block(x,
                       in_channels,
                       out_channels,
                       name="shuffle_init_block"):
    """
    ShuffleNetV2 specific initial block.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    name : str, default 'shuffle_init_block'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    x = conv3x3_block(
        x=x,
        in_channels=in_channels,
        out_channels=out_channels,
        strides=2,
        name=name + "/conv")
    x = maxpool2d(
        x=x,
        pool_size=3,
        strides=2,
        padding=0,
        ceil_mode=True,
        name=name + "/pool")
    return x 
Example 35
Project: imgclsmob   Author: osmr   File: preresnet.py    MIT License 5 votes vote down vote up
def preres_block(x,
                 in_channels,
                 out_channels,
                 strides,
                 name="preres_block"):
    """
    Simple PreResNet block for residual path in PreResNet unit.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    strides : int or tuple/list of 2 int
        Strides of the convolution.
    name : str, default 'preres_block'
        Block name.

    Returns
    -------
    tuple of two keras.backend tensor/variable/symbol
        Resulted tensor and preactivated input tensor.
    """
    x, x_pre_activ = pre_conv3x3_block(
        x=x,
        in_channels=in_channels,
        out_channels=out_channels,
        strides=strides,
        return_preact=True,
        name=name + "/conv1")
    x = pre_conv3x3_block(
        x=x,
        in_channels=in_channels,
        out_channels=out_channels,
        name=name + "/conv2")
    return x, x_pre_activ 
Example 36
Project: imgclsmob   Author: osmr   File: mobilenet.py    MIT License 5 votes vote down vote up
def dws_conv_block(x,
                   in_channels,
                   out_channels,
                   strides,
                   name="dws_conv_block"):
    """
    Depthwise separable convolution block with BatchNorms and activations at each convolution layers. It is used as
    a MobileNet unit.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    strides : int or tuple/list of 2 int
        Strides of the convolution.
    name : str, default 'dws_conv_block'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    x = dwconv3x3_block(
        x=x,
        in_channels=in_channels,
        out_channels=in_channels,
        strides=strides,
        name=name + "/dw_conv")
    x = conv1x1_block(
        x=x,
        in_channels=in_channels,
        out_channels=out_channels,
        name=name + "/pw_conv")
    return x 
Example 37
Project: imgclsmob   Author: osmr   File: vgg.py    MIT License 5 votes vote down vote up
def vgg_dense(x,
              in_channels,
              out_channels,
              name="vgg_dense"):
    """
    VGG specific dense block.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    name : str, default 'vgg_dense'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    x = nn.Dense(
        units=out_channels,
        input_dim=in_channels,
        name=name + "/fc")(x)
    x = nn.Activation("relu", name=name + "/activ")(x)
    x = nn.Dropout(
        rate=0.5,
        name=name + "/dropout")(x)
    return x 
Example 38
Project: imgclsmob   Author: osmr   File: resnet.py    MIT License 5 votes vote down vote up
def res_block(x,
              in_channels,
              out_channels,
              strides,
              name="res_block"):
    """
    Simple ResNet block for residual path in ResNet unit.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    strides : int or tuple/list of 2 int
        Strides of the convolution.
    name : str, default 'res_block'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    x = conv3x3_block(
        x=x,
        in_channels=in_channels,
        out_channels=out_channels,
        strides=strides,
        name=name + "/conv1")
    x = conv3x3_block(
        x=x,
        in_channels=in_channels,
        out_channels=out_channels,
        activation=None,
        name=name + "/conv2")
    return x 
Example 39
Project: imgclsmob   Author: osmr   File: resnet.py    MIT License 5 votes vote down vote up
def res_init_block(x,
                   in_channels,
                   out_channels,
                   name="res_init_block"):
    """
    ResNet specific initial block.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    name : str, default 'res_init_block'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    x = conv7x7_block(
        x=x,
        in_channels=in_channels,
        out_channels=out_channels,
        strides=2,
        name=name + "/conv")
    x = maxpool2d(
        x=x,
        pool_size=3,
        strides=2,
        padding=1,
        name=name + "/pool")
    return x 
Example 40
Project: imgclsmob   Author: osmr   File: shufflenet.py    MIT License 5 votes vote down vote up
def shuffle_init_block(x,
                       in_channels,
                       out_channels,
                       name="shuffle_init_block"):
    """
    ShuffleNet specific initial block.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    name : str, default 'shuffle_init_block'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    x = conv3x3(
        x=x,
        in_channels=in_channels,
        out_channels=out_channels,
        strides=2,
        name=name + "/conv")
    x = batchnorm(
        x=x,
        name=name + "/bn")
    x = nn.Activation("relu", name=name + "/activ")(x)
    x = maxpool2d(
        x=x,
        pool_size=3,
        strides=2,
        padding=1,
        name=name + "/pool")
    return x 
Example 41
Project: imgclsmob   Author: osmr   File: squeezenext.py    MIT License 5 votes vote down vote up
def sqnxt_init_block(x,
                     in_channels,
                     out_channels,
                     name="sqnxt_init_block"):
    """
    ResNet specific initial block.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    name : str, default 'sqnxt_init_block'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    x = conv7x7_block(
        x=x,
        in_channels=in_channels,
        out_channels=out_channels,
        strides=2,
        padding=1,
        use_bias=True,
        name=name + "/conv")
    x = maxpool2d(
        x=x,
        pool_size=3,
        strides=2,
        ceil_mode=True,
        name=name + "/pool")
    return x 
Example 42
Project: imgclsmob   Author: osmr   File: mobilenetv3.py    MIT License 5 votes vote down vote up
def mobilenetv3_final_block(x,
                            in_channels,
                            out_channels,
                            use_se,
                            name="mobilenetv3_final_block"):
    """
    MobileNetV3 final block.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    use_se : bool
        Whether to use SE-module.
    name : str, default 'mobilenetv3_final_block'
        Unit name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    x = conv1x1_block(
        x=x,
        in_channels=in_channels,
        out_channels=out_channels,
        activation="hswish",
        name=name + "/conv")
    if use_se:
        x = se_block(
            x=x,
            channels=out_channels,
            reduction=4,
            approx_sigmoid=True,
            round_mid=True,
            name=name + "/se")
    return x 
Example 43
Project: imgclsmob   Author: osmr   File: alexnet.py    MIT License 5 votes vote down vote up
def alex_dense(x,
               in_channels,
               out_channels,
               name="alex_dense"):
    """
    AlexNet specific dense block.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    name : str, default 'alex_dense'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    x = nn.Dense(
        units=out_channels,
        input_dim=in_channels,
        name=name + "/fc")(x)
    x = nn.Activation("relu", name=name + "/activ")(x)
    x = nn.Dropout(
        rate=0.5,
        name=name + "/dropout")(x)
    return x 
Example 44
Project: imgclsmob   Author: osmr   File: alexnet.py    MIT License 5 votes vote down vote up
def alex_output_block(x,
                      in_channels,
                      classes,
                      name="alex_output_block"):
    """
    AlexNet specific output block.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    classes : int
        Number of classification classes.
    name : str, default 'alex_output_block'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    mid_channels = 4096

    x = alex_dense(
        x=x,
        in_channels=in_channels,
        out_channels=mid_channels,
        name=name + "/fc1")
    x = alex_dense(
        x=x,
        in_channels=mid_channels,
        out_channels=mid_channels,
        name=name + "/fc2")
    x = nn.Dense(
        units=classes,
        input_dim=mid_channels,
        name=name + "/fc3")(x)
    return x 
Example 45
Project: imgclsmob   Author: osmr   File: darknet.py    MIT License 5 votes vote down vote up
def _test():
    import numpy as np
    import keras

    pretrained = False
    keras.backend.set_learning_phase(0)

    models = [
        darknet_ref,
        darknet_tiny,
        darknet19,
    ]

    for model in models:

        net = model(pretrained=pretrained)
        # net.summary()
        weight_count = keras.utils.layer_utils.count_params(net.trainable_weights)
        print("m={}, {}".format(model.__name__, weight_count))
        assert (model != darknet_ref or weight_count == 7319416)
        assert (model != darknet_tiny or weight_count == 1042104)
        assert (model != darknet19 or weight_count == 20842376)

        if is_channels_first():
            x = np.zeros((1, 3, 224, 224), np.float32)
        else:
            x = np.zeros((1, 224, 224, 3), np.float32)
        y = net.predict(x)
        assert (y.shape == (1, 1000)) 
Example 46
Project: Variational-AutoEncoder-For-Novelty-Detection   Author: LordAlucard90   File: helper.py    GNU General Public License v3.0 5 votes vote down vote up
def _test(self, vae, hidden, reg_val=None, drp_val=None):
        keras.backend.clear_session()

        self.Trn_an_ts = np.append(self.Trn, self.Val).reshape((len(self.Trn) + len(self.Val), 28 * 28))
        self.Trn_an_ts = tf.convert_to_tensor(self.Trn_an_ts, np.float32)
        self.Tst_an_ts = copy(self.Tst).reshape((len(self.Tst), 28 * 28))
        self.Tst_an_ts = tf.convert_to_tensor(self.Tst_an_ts, np.float32)

        m_generator = ModelGenerator(vae=vae, hidden=hidden, reg_val=reg_val, drp_val=drp_val)
        name = m_generator.get_name()
        m_generator.load_best_w(self.models_dir)
        model = m_generator.get_model()

        loss = model.evaluate(self.Tst, self.Tst, verbose=0)

        trn_pred = model.predict(self.Trn_an).reshape((len(self.Trn_an), 28 * 28))
        trn_pred = tf.convert_to_tensor(trn_pred, np.float32)

        trn_mse = K.eval(mean_squared_error(self.Trn_an_ts, trn_pred))
        th = trn_mse[np.argsort(trn_mse)[-len(self.Trn_an_idx)]]

        tst_pred = model.predict(self.Tst_an).reshape((len(self.Tst_an), 28 * 28))
        tst_pred = tf.convert_to_tensor(tst_pred, np.float32)

        tst_mse = K.eval(mean_squared_error(self.Tst_an_ts, tst_pred))

        [prc, _], [rcl, _], [f1, _], _ = precision_recall_fscore_support(self.Tst_lbls[self.Tst_idx] > 0, tst_mse > th)

        return [name, loss, th, prc, rcl, f1] 
Example 47
Project: E3Outlier   Author: demonzyj56   File: utils.py    MIT License 5 votes vote down vote up
def get_channels_axis():
    import keras
    idf = keras.backend.image_data_format()
    if idf == 'channels_first':
        return 1
    assert idf == 'channels_last'
    return 3 
Example 48
Project: GraphicDesignPatternByPython   Author: Relph1119   File: autogen.py    MIT License 5 votes vote down vote up
def post_process_signature(signature):
    parts = re.split(r'\.(?!\d)', signature)
    if len(parts) >= 4:
        if parts[1] == 'layers':
            signature = 'keras.layers.' + '.'.join(parts[3:])
        if parts[1] == 'utils':
            signature = 'keras.utils.' + '.'.join(parts[3:])
        if parts[1] == 'backend':
            signature = 'keras.backend.' + '.'.join(parts[3:])
    return signature 
Example 49
Project: workspace_2017   Author: nwiizo   File: test_sparse.py    MIT License 5 votes vote down vote up
def do_sparse():
    return K == KTF or KTH.th_sparse_module 
Example 50
Project: classification_models   Author: qubvel   File: keras.py    MIT License 5 votes vote down vote up
def get_kwargs():
        return {
            'backend': keras.backend,
            'layers': keras.layers,
            'models': keras.models,
            'utils': keras.utils,
        } 
Example 51
Project: applications   Author: geomstats   File: autogen.py    MIT License 5 votes vote down vote up
def post_process_signature(signature):
    parts = re.split('\.(?!\d)', signature)
    if len(parts) >= 4:
        if parts[1] == 'layers':
            signature = 'keras.layers.' + '.'.join(parts[3:])
        if parts[1] == 'utils':
            signature = 'keras.utils.' + '.'.join(parts[3:])
        if parts[1] == 'backend':
            signature = 'keras.backend.' + '.'.join(parts[3:])
    return signature 
Example 52
Project: psoCNN   Author: feferna   File: psoCNN.py    MIT License 5 votes vote down vote up
def fit_gBest(self, batch_size, epochs, dropout_rate):
        print("\nFurther training gBest model...")
        self.gBest.model_compile(dropout_rate)        
        trainable_count = int(np.sum([keras.backend.count_params(p) for p in set(self.gBest.model.trainable_weights)]))
        print("gBest's number of trainable parameters: " + str(trainable_count))
        self.gBest.model_fit_complete(self.x_train, self.y_train, batch_size=batch_size, epochs=epochs)

        return trainable_count 
Example 53
Project: MLL-machine-learning-language   Author: riccardobernardi   File: testMLL.py    MIT License 5 votes vote down vote up
def test_keras_layer_import_lower(self):
        print("Add" in keras.backend.__dict__.keys())

        a = []

        for i in keras.backend.__dict__.keys():
            if str(i).islower() and "__" not in i:
                a+=[i]

        print(a) 
Example 54
Project: cleverhans-models   Author: tiendzung-le   File: ex_cifar10_tf.py    MIT License 5 votes vote down vote up
def data_cifar10():
    """
    Preprocess CIFAR10 dataset
    :return:
    """

    # These values are specific to CIFAR10
    img_rows = 32
    img_cols = 32
    nb_classes = 10

    # the data, shuffled and split between train and test sets
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()

    if keras.backend.image_dim_ordering() == 'th':
        X_train = X_train.reshape(X_train.shape[0], 3, img_rows, img_cols)
        X_test = X_test.reshape(X_test.shape[0], 3, img_rows, img_cols)
    else:
        X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3)
        X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 3)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255
    print('X_train shape:', X_train.shape)
    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)
    return X_train, Y_train, X_test, Y_test 
Example 55
Project: MatchZoo   Author: NTMC-Community   File: base_model.py    Apache License 2.0 5 votes vote down vote up
def __init__(
        self,
        params: typing.Optional[ParamTable] = None,
        backend: typing.Optional[keras.models.Model] = None
    ):
        """Init."""
        self._params = params or self.get_default_params()
        self._backend = backend 
Example 56
Project: MatchZoo   Author: NTMC-Community   File: base_model.py    Apache License 2.0 5 votes vote down vote up
def backend(self) -> keras.models.Model:
        """:return model backend, a keras model instance."""
        if not self._backend:
            raise ValueError("Backend not found."
                             "Please build the model first.")
        else:
            return self._backend 
Example 57
Project: MatchZoo   Author: NTMC-Community   File: base_model.py    Apache License 2.0 5 votes vote down vote up
def load_model(dirpath: typing.Union[str, Path]) -> BaseModel:
    """
    Load a model. The reverse function of :meth:`BaseModel.save`.

    :param dirpath: directory path of the saved model
    :return: a :class:`BaseModel` instance

    Example:

            >>> import matchzoo as mz
            >>> model = mz.models.Naive()
            >>> model.guess_and_fill_missing_params(verbose=0)
            >>> model.build()
            >>> model.save('my-model')
            >>> model.params.keys() == mz.load_model('my-model').params.keys()
            True
            >>> import shutil
            >>> shutil.rmtree('my-model')

    """
    dirpath = Path(dirpath)

    params_path = dirpath.joinpath(BaseModel.PARAMS_FILENAME)
    weights_path = dirpath.joinpath(BaseModel.BACKEND_WEIGHTS_FILENAME)

    with open(params_path, mode='rb') as params_file:
        params = dill.load(params_file)

    model_instance = params['model_class'](params=params)
    model_instance.build()
    model_instance.compile()
    model_instance.backend.load_weights(weights_path)
    return model_instance 
Example 58
Project: MatchZoo   Author: NTMC-Community   File: esim.py    Apache License 2.0 5 votes vote down vote up
def _expand_dim(self, inp: tf.Tensor, axis: int) -> keras.layers.Layer:
        """
        Wrap keras.backend.expand_dims into a Lambda layer.

        :param inp: input tensor to expand the dimension
        :param axis: the axis of new dimension
        """
        return keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=axis))(inp) 
Example 59
Project: dqn_rl_outbreak_response   Author: fonnesbeck   File: Case3DQN.py    MIT License 5 votes vote down vote up
def Kernel(dist_squared):
    dist_squared = np.asarray(dist_squared)
    K = 1/(dist_squared + 400)
    return(K)

# USE THIS FUNCTION TO GENERATE THE DISEASE PARAMETERS OF THE OUTBREAK 
Example 60
Project: dqn_rl_outbreak_response   Author: fonnesbeck   File: Case1DQN.py    MIT License 5 votes vote down vote up
def Kernel(dist_squared):
    dist_squared = np.asarray(dist_squared)
    K = 1/(dist_squared + 400)
    return(K)

# USE THIS FUNCTION TO GENERATE THE DISEASE PARAMETERS OF THE OUTBREAK 
Example 61
Project: neural-fingerprinting   Author: StephanZheng   File: attacks.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def main(args):
    assert args.dataset in ['mnist', 'cifar', 'svhn'], \
        "Dataset parameter must be either 'mnist', 'cifar' or 'svhn'"
    assert args.attack in ['fgsm', 'bim-a', 'bim-b', 'jsma', 'cw-l2', 'all', 'cw-lid'], \
        "Attack parameter must be either 'fgsm', 'bim-a', 'bim-b', " \
        "'jsma', 'cw-l2', 'all' or 'cw-lid' for attacking LID detector"
    model_file = os.path.join(PATH_DATA, "model_%s.h5" % args.dataset)
    # model_file = "../data_v1/model_%s.h5" % args.dataset
    print(model_file)
    assert os.path.isfile(model_file), \
        'model file not found... must first train model using train_model.py.'
    if args.dataset == 'svhn' and args.attack == 'cw-l2':
        assert args.batch_size == 16, \
        "svhn has 26032 test images, the batch_size for cw-l2 attack should be 16, " \
        "otherwise, there will be error at the last batch!"


    print('Dataset: %s. Attack: %s' % (args.dataset, args.attack))
    # Create TF session, set it as Keras backend
    sess = tf.Session()
    K.set_session(sess)
    if args.attack == 'cw-l2' or args.attack == 'cw-lid':
        warnings.warn("Important: remove the softmax layer for cw attacks!")
        # use softmax=False to load without softmax layer
        model = get_model(args.dataset, softmax=False)
        model.compile(
            loss=cross_entropy,
            optimizer='adadelta',
            metrics=['accuracy']
        )
        model.load_weights(model_file)
    else:
        model = load_model(model_file)

    _, _, X_test, Y_test = get_data(args.dataset)
    _, acc = model.evaluate(X_test, Y_test, batch_size=args.batch_size,
                            verbose=0)
    print("Accuracy on the test set: %0.2f%%" % (100*acc))

    if args.attack == 'cw-lid': # breaking LID detector - test
        X_test = X_test[:1000]
        Y_test = Y_test[:1000]

    if args.attack == 'all':
        # Cycle through all attacks
        for attack in ['fgsm']:
            craft_one_type(sess, model, X_test, Y_test, args.dataset, attack,
                           args.batch_size)
    else:
        # Craft one specific attack type
        craft_one_type(sess, model, X_test, Y_test, args.dataset, args.attack,
                       args.batch_size)
    print('Adversarial samples crafted and saved to %s ' % PATH_DATA)
    sess.close() 
Example 62
Project: CNNLocalizationTF   Author: hulop   File: export_graph_image_keras.py    MIT License 4 votes vote down vote up
def main():
    description = 'This script is for testing posenet'
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('input_model_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where input model is saved.')
    parser.add_argument('output_graph_file', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='File path where exported graph def protobuf (.pb) file will be saved.')
    parser.add_argument('output_model_dir', action='store', nargs=None, const=None, \
                        default=None, type=str, choices=None, metavar=None, \
                        help='Directory path where output model will be saved.')
    parser.add_argument('-m', '--base_model', action='store', type=str, default=posenet_config.base_model, \
                        help='Base model : inception-v1/inception-v3/mobilenet-v1 (Default : ' + str(posenet_config.base_model))
    parser.add_argument('-l', '--lstm_model', action='store_true', default=False, \
                        help='Export LSTM model (default: False)')
    args = parser.parse_args()
    input_model_dir = args.input_model_dir
    output_graph_file = args.output_graph_file
    output_model_dir = args.output_model_dir
    output_model_file = os.path.join(output_model_dir, "model.ckpt")
    posenet_config.base_model = args.base_model
    lstm_model = args.lstm_model
    print "base model : " + str(posenet_config.base_model)
    print "LSTM model : " + str(lstm_model)
    
    # convert hd5 file to ckpt
    # https://github.com/keras-team/keras/issues/9040
    K.set_learning_phase(0)
    if lstm_model:
        if posenet_config.base_model=="inception-v1":
            model = posenet_lstm_keras.create_posenet_inception_v1(trainable=False)
        elif posenet_config.base_model=="inception-v3":
            model = posenet_lstm_keras.create_posenet_inception_v3(trainable=False)
        elif posenet_config.base_model=="mobilenet-v1":
            model = posenet_lstm_keras.create_posenet_mobilenet_v1(trainable=False)
        else:
                print "invalid base model : " + posenet_config.base_model
                sys.exit()
    else:
        if posenet_config.base_model=="inception-v1":
            model = posenet_keras.create_posenet_inception_v1(trainable=False)
        elif posenet_config.base_model=="inception-v3":
            model = posenet_keras.create_posenet_inception_v3(trainable=False)
        elif posenet_config.base_model=="mobilenet-v1":
            model = posenet_keras.create_posenet_mobilenet_v1(trainable=False)
        else:
                print "invalid base model : " + posenet_config.base_model
                sys.exit()
    model.load_weights(os.path.join(input_model_dir, 'trained_weights.h5'))
    model.summary()
    
    #Save graph and checkpoint
    session = keras.backend.get_session()
    graph = session.graph
    graph_def = graph.as_graph_def()
    with gfile.GFile(output_graph_file, 'wb') as f:
        f.write(graph_def.SerializeToString())
    
    saver = tf.train.Saver()
    saver.save(session, output_model_file) 
Example 63
Project: CNNLocalizationTF   Author: hulop   File: export_graph_beacon_keras.py    MIT License 4 votes vote down vote up
def main():
        global num_dense_sample
        
        description = 'This script is for testing posenet'
        parser = argparse.ArgumentParser(description=description)
        parser.add_argument('input_beacon_setting_file', action='store', nargs=None, const=None, \
                            default=None, type=str, choices=None, metavar=None, \
                            help='File path where beacon setting file is saved.')
        parser.add_argument('input_model_dir', action='store', nargs=None, const=None, \
                            default=None, type=str, choices=None, metavar=None, \
                            help='Directory path where input model is saved.')
        parser.add_argument('output_graph_file', action='store', nargs=None, const=None, \
                            default=None, type=str, choices=None, metavar=None, \
                            help='File path where exported graph def protobuf (.pb) file will be saved.')
        parser.add_argument('output_model_dir', action='store', nargs=None, const=None, \
                            default=None, type=str, choices=None, metavar=None, \
                            help='Directory path where output model will be saved.')
        parser.add_argument('-s', '--use_shrink_model', action='store_true', default=False, \
                            help='Use shrink model (default: False)')
        parser.add_argument('-l', '--lstm_model', action='store_true', default=False, \
                            help='Export LSTM model (default: False)')
        args = parser.parse_args()
        input_beacon_setting_file = args.input_beacon_setting_file
        input_model_dir = args.input_model_dir
        output_graph_file = args.output_graph_file
        output_model_dir = args.output_model_dir
        output_model_file = os.path.join(output_model_dir, "model.ckpt")        
        use_shrink_model = args.use_shrink_model
        lstm_model = args.lstm_model
        print "use shrink model for training : " + str(use_shrink_model)
        
        # parse beacon setting file
        beaconmap = IBeaconUtils.parseBeaconSetting(input_beacon_setting_file)
        beacon_num = len(beaconmap.keys())
        
        # convert hd5 file to ckpt
        # https://github.com/keras-team/keras/issues/9040
        K.set_learning_phase(0)
        if use_shrink_model:
                if lstm_model:
                        model = posenet_beacon_no_inception_shrink_lstm_keras.create_posenet(beacon_num, trainable=False)
                else:
                        model = posenet_beacon_no_inception_shrink_keras.create_posenet(beacon_num, trainable=False)
        else:
                print "Do not shrink model is not supported"
                sys.exit()
        model.load_weights(os.path.join(input_model_dir, 'trained_weights.h5'))
        model.summary()

        #Save graph and checkpoint
        session = keras.backend.get_session()
        graph = session.graph
        graph_def = graph.as_graph_def()
        with gfile.GFile(output_graph_file, 'wb') as f:
            f.write(graph_def.SerializeToString())
        
        saver = tf.train.Saver()
        saver.save(session, output_model_file) 
Example 64
Project: Dropout_BBalpha   Author: YingzhenLi   File: loading_utils.py    MIT License 4 votes vote down vote up
def batch_eval(sess, tf_inputs, tf_outputs, numpy_inputs, stepsize_ph, stepsize_val, x_original_ph = None, x_original_val = None):
    """
    A helper function that computes a tensor on numpy inputs by batches.
    """
    n = len(numpy_inputs)
    assert n > 0
    assert n == len(tf_inputs)
    m = numpy_inputs[0].shape[0]
    for i in six.moves.xrange(1, n):
        assert numpy_inputs[i].shape[0] == m
    out = []
    for _ in tf_outputs:
        out.append([])
    with sess.as_default():
        for start in six.moves.xrange(0, m, FLAGS.batch_size):
            batch = start // FLAGS.batch_size
            if batch % 100 == 0 and batch > 0:
                print("Batch " + str(batch))

            # Compute batch start and end indices
            start = batch * FLAGS.batch_size
            end = start + FLAGS.batch_size
            numpy_input_batches = [numpy_input[start:end] for numpy_input in numpy_inputs]
            cur_batch_size = numpy_input_batches[0].shape[0]
            assert cur_batch_size <= FLAGS.batch_size
            for e in numpy_input_batches:
                assert e.shape[0] == cur_batch_size

            feed_dict = dict(zip(tf_inputs, numpy_input_batches))
            feed_dict[stepsize_ph] = stepsize_val
            feed_dict[keras.backend.learning_phase()] = 0
            if x_original_ph is not None:
                feed_dict[x_original_ph] = x_original_val[start:end]
            numpy_output_batches = sess.run(tf_outputs, feed_dict=feed_dict)
            for e in numpy_output_batches:
                assert e.shape[0] == cur_batch_size, e.shape
            for out_elem, numpy_output_batch in zip(out, numpy_output_batches):
                out_elem.append(numpy_output_batch)

    out = [np.concatenate(x, axis=0) for x in out]
    for e in out:
        assert e.shape[0] == m, e.shape
    return out 
Example 65
Project: Dropout_BBalpha   Author: YingzhenLi   File: utils_tf.py    MIT License 4 votes vote down vote up
def model_eval(sess, x, y, model, X_test, Y_test):
    """
    Compute the accuracy of a TF model on some data
    :param sess: TF session to use when training the graph
    :param x: input placeholder
    :param y: output placeholder (for labels)
    :param model: model output predictions
    :param X_test: numpy array with training inputs
    :param Y_test: numpy array with training outputs
    :return: a float with the accuracy value
    """
    # Define sympbolic for accuracy
    acc_value = keras.metrics.categorical_accuracy(y, model)

    # Init result var
    accuracy = 0.0

    with sess.as_default():
        # Compute number of batches
        nb_batches = int(math.ceil(float(len(X_test)) / FLAGS.batch_size))
        assert nb_batches * FLAGS.batch_size >= len(X_test)

        for batch in range(nb_batches):
            if batch % 100 == 0 and batch > 0:
                print("Batch " + str(batch))

            # Must not use the `batch_indices` function here, because it
            # repeats some examples.
            # It's acceptable to repeat during training, but not eval.
            start = batch * FLAGS.batch_size
            end = min(len(X_test), start + FLAGS.batch_size)
            cur_batch_size = end - start

            # The last batch may be smaller than all others, so we need to
            # account for variable batch size here
            accuracy += cur_batch_size * acc_value.eval(feed_dict={x: X_test[start:end],
                                            y: Y_test[start:end],
                                            keras.backend.learning_phase(): 0})
        assert end >= len(X_test)

        # Divide by number of examples to get final value
        accuracy /= len(X_test)

    return accuracy 
Example 66
Project: imgclsmob   Author: osmr   File: resnext.py    MIT License 4 votes vote down vote up
def resnext_bottleneck(x,
                       in_channels,
                       out_channels,
                       strides,
                       cardinality,
                       bottleneck_width,
                       bottleneck_factor=4,
                       name="resnext_bottleneck"):
    """
    ResNeXt bottleneck block for residual path in ResNeXt unit.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    strides : int or tuple/list of 2 int
        Strides of the convolution.
    cardinality: int
        Number of groups.
    bottleneck_width: int
        Width of bottleneck block.
    bottleneck_factor : int, default 4
        Bottleneck factor.
    name : str, default 'resnext_bottleneck'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    mid_channels = out_channels // bottleneck_factor
    D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
    group_width = cardinality * D

    x = conv1x1_block(
        x=x,
        in_channels=in_channels,
        out_channels=group_width,
        name=name + "/conv1")
    x = conv3x3_block(
        x=x,
        in_channels=group_width,
        out_channels=group_width,
        strides=strides,
        groups=cardinality,
        name=name + "/conv2")
    x = conv1x1_block(
        x=x,
        in_channels=group_width,
        out_channels=out_channels,
        activation=None,
        name=name + "/conv3")
    return x 
Example 67
Project: imgclsmob   Author: osmr   File: resnext.py    MIT License 4 votes vote down vote up
def resnext_unit(x,
                 in_channels,
                 out_channels,
                 strides,
                 cardinality,
                 bottleneck_width,
                 name="resnext_unit"):
    """
    ResNeXt unit with residual connection.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    strides : int or tuple/list of 2 int
        Strides of the convolution.
    cardinality: int
        Number of groups.
    bottleneck_width: int
        Width of bottleneck block.
    name : str, default 'resnext_unit'
        Unit name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    resize_identity = (in_channels != out_channels) or (strides != 1)
    if resize_identity:
        identity = conv1x1_block(
            x=x,
            in_channels=in_channels,
            out_channels=out_channels,
            strides=strides,
            activation=None,
            name=name + "/identity_conv")
    else:
        identity = x

    x = resnext_bottleneck(
        x=x,
        in_channels=in_channels,
        out_channels=out_channels,
        strides=strides,
        cardinality=cardinality,
        bottleneck_width=bottleneck_width,
        name=name + "/body")

    x = nn.add([x, identity], name=name + "/add")

    activ = nn.Activation("relu", name=name + "/activ")
    x = activ(x)
    return x 
Example 68
Project: imgclsmob   Author: osmr   File: mobilenetv2.py    MIT License 4 votes vote down vote up
def linear_bottleneck(x,
                      in_channels,
                      out_channels,
                      strides,
                      expansion,
                      name="linear_bottleneck"):
    """
    So-called 'Linear Bottleneck' layer. It is used as a MobileNetV2 unit.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    strides : int or tuple/list of 2 int
        Strides of the convolution.
    expansion : bool
        Whether do expansion of channels.
    name : str, default 'linear_bottleneck'
        Unit name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    residual = (in_channels == out_channels) and (strides == 1)
    mid_channels = in_channels * 6 if expansion else in_channels

    if residual:
        identity = x

    x = conv1x1_block(
        x=x,
        in_channels=in_channels,
        out_channels=mid_channels,
        activation="relu6",
        name=name + "/conv1")
    x = dwconv3x3_block(
        x=x,
        in_channels=mid_channels,
        out_channels=mid_channels,
        strides=strides,
        activation="relu6",
        name=name + "/conv2")
    x = conv1x1_block(
        x=x,
        in_channels=mid_channels,
        out_channels=out_channels,
        activation=None,
        name=name + "/conv3")

    if residual:
        x = nn.add([x, identity], name=name + "/add")

    return x 
Example 69
Project: imgclsmob   Author: osmr   File: mnasnet.py    MIT License 4 votes vote down vote up
def mnas_final_block(x,
                     in_channels,
                     out_channels,
                     mid_channels,
                     use_skip,
                     name="mnas_final_block"):
    """
    MnasNet specific final block.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    mid_channels : int
        Number of middle channels.
    use_skip : bool
        Whether to use skip connection in the second block.
    name : str, default 'mnas_init_block'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    x = dws_exp_se_res_unit(
        x=x,
        in_channels=in_channels,
        out_channels=mid_channels,
        exp_factor=6,
        use_skip=use_skip,
        name=name + "/conv1")
    x = conv1x1_block(
        x=x,
        in_channels=mid_channels,
        out_channels=out_channels,
        name=name + "/conv2")
    return x 
Example 70
Project: imgclsmob   Author: osmr   File: senet.py    MIT License 4 votes vote down vote up
def senet_bottleneck(x,
                     in_channels,
                     out_channels,
                     strides,
                     cardinality,
                     bottleneck_width,
                     name="senet_bottleneck"):
    """
    SENet bottleneck block for residual path in SENet unit.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    strides : int or tuple/list of 2 int
        Strides of the convolution.
    cardinality: int
        Number of groups.
    bottleneck_width: int
        Width of bottleneck block.
    name : str, default 'senet_bottleneck'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    mid_channels = out_channels // 4
    D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
    group_width = cardinality * D
    group_width2 = group_width // 2

    x = conv1x1_block(
        x=x,
        in_channels=in_channels,
        out_channels=group_width2,
        name=name + "/conv1")
    x = conv3x3_block(
        x=x,
        in_channels=group_width2,
        out_channels=group_width,
        strides=strides,
        groups=cardinality,
        name=name + "/conv2")
    x = conv1x1_block(
        x=x,
        in_channels=group_width,
        out_channels=out_channels,
        activation=None,
        name=name + "/conv3")
    return x 
Example 71
Project: imgclsmob   Author: osmr   File: senet.py    MIT License 4 votes vote down vote up
def senet_init_block(x,
                     in_channels,
                     out_channels,
                     name="senet_init_block"):
    """
    SENet specific initial block.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    name : str, default 'senet_init_block'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    mid_channels = out_channels // 2

    x = conv3x3_block(
        x=x,
        in_channels=in_channels,
        out_channels=mid_channels,
        strides=2,
        name=name + "/conv1")
    x = conv3x3_block(
        x=x,
        in_channels=mid_channels,
        out_channels=mid_channels,
        name=name + "/conv2")
    x = conv3x3_block(
        x=x,
        in_channels=mid_channels,
        out_channels=out_channels,
        name=name + "/conv3")
    x = nn.MaxPool2D(
        pool_size=3,
        strides=2,
        padding='same',
        name=name + "/pool")(x)
    return x 
Example 72
Project: imgclsmob   Author: osmr   File: preresnet.py    MIT License 4 votes vote down vote up
def preres_unit(x,
                in_channels,
                out_channels,
                strides,
                bottleneck,
                conv1_stride,
                name="preres_unit"):
    """
    PreResNet unit with residual connection.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    strides : int or tuple/list of 2 int
        Strides of the convolution.
    bottleneck : bool
        Whether to use a bottleneck or simple block in units.
    conv1_stride : bool
        Whether to use stride in the first or the second convolution layer of the block.
    name : str, default 'preres_unit'
        Unit name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor.
    """
    identity = x

    if bottleneck:
        x, x_pre_activ = preres_bottleneck_block(
            x=x,
            in_channels=in_channels,
            out_channels=out_channels,
            strides=strides,
            conv1_stride=conv1_stride,
            name=name + "/body")
    else:
        x, x_pre_activ = preres_block(
            x=x,
            in_channels=in_channels,
            out_channels=out_channels,
            strides=strides,
            name=name + "/body")

    resize_identity = (in_channels != out_channels) or (strides != 1)
    if resize_identity:
        identity = conv1x1(
            x=x_pre_activ,
            in_channels=in_channels,
            out_channels=out_channels,
            strides=strides,
            name=name + "/identity_conv")

    x = nn.add([x, identity], name=name + "/add")
    return x 
Example 73
Project: imgclsmob   Author: osmr   File: preresnet.py    MIT License 4 votes vote down vote up
def preres_init_block(x,
                      in_channels,
                      out_channels,
                      name="preres_init_block"):
    """
    PreResNet specific initial block.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    name : str, default 'preres_init_block'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    x = conv2d(
        x=x,
        in_channels=in_channels,
        out_channels=out_channels,
        kernel_size=7,
        strides=2,
        padding=3,
        use_bias=False,
        name=name + "/conv")
    x = batchnorm(
        x=x,
        name=name + "/bn")
    x = nn.Activation("relu", name=name + "/activ")(x)
    x = nn.MaxPool2D(
        pool_size=3,
        strides=2,
        padding="same",
        name=name + "/pool")(x)
    return x 
Example 74
Project: imgclsmob   Author: osmr   File: resnet.py    MIT License 4 votes vote down vote up
def res_bottleneck_block(x,
                         in_channels,
                         out_channels,
                         strides,
                         conv1_stride=False,
                         bottleneck_factor=4,
                         name="res_bottleneck_block"):
    """
    ResNet bottleneck block for residual path in ResNet unit.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    strides : int or tuple/list of 2 int
        Strides of the convolution.
    conv1_stride : bool, default False
        Whether to use stride in the first or the second convolution layer of the block.
    bottleneck_factor : int, default 4
        Bottleneck factor.
    name : str, default 'res_bottleneck_block'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    mid_channels = out_channels // bottleneck_factor

    x = conv1x1_block(
        x=x,
        in_channels=in_channels,
        out_channels=mid_channels,
        strides=(strides if conv1_stride else 1),
        name=name + "/conv1")
    x = conv3x3_block(
        x=x,
        in_channels=in_channels,
        out_channels=mid_channels,
        strides=(1 if conv1_stride else strides),
        name=name + "/conv2")
    x = conv1x1_block(
        x=x,
        in_channels=in_channels,
        out_channels=out_channels,
        activation=None,
        name=name + "/conv3")
    return x 
Example 75
Project: imgclsmob   Author: osmr   File: efficientnet.py    MIT License 4 votes vote down vote up
def effi_init_block(x,
                    in_channels,
                    out_channels,
                    bn_epsilon,
                    activation,
                    tf_mode,
                    name="effi_init_block"):
    """
    EfficientNet specific initial block.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    bn_epsilon : float
        Small float added to variance in Batch norm.
    activation : str
        Name of activation function.
    tf_mode : bool
        Whether to use TF-like mode.
    name : str, default 'effi_init_block'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    if tf_mode:
        x = nn.ZeroPadding2D(
            padding=calc_tf_padding(x, kernel_size=3, strides=2),
            name=name + "/conv_pad")(x)
    x = conv3x3_block(
        x=x,
        in_channels=in_channels,
        out_channels=out_channels,
        strides=2,
        padding=(0 if tf_mode else 1),
        bn_epsilon=bn_epsilon,
        activation=activation,
        name=name + "/conv")
    return x 
Example 76
Project: imgclsmob   Author: osmr   File: densenet.py    MIT License 4 votes vote down vote up
def dense_unit(x,
               in_channels,
               out_channels,
               dropout_rate,
               name="dense_unit"):
    """
    DenseNet unit.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    dropout_rate : float
        Parameter of Dropout layer. Faction of the input units to drop.
    name : str, default 'dense_unit'
        Unit name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor.
    """
    bn_size = 4
    inc_channels = out_channels - in_channels
    mid_channels = inc_channels * bn_size

    identity = x

    x = pre_conv1x1_block(
        x=x,
        in_channels=in_channels,
        out_channels=mid_channels,
        name=name + "/conv1")
    x = pre_conv3x3_block(
        x=x,
        in_channels=mid_channels,
        out_channels=inc_channels,
        name=name + "/conv2")

    use_dropout = (dropout_rate != 0.0)
    if use_dropout:
        x = nn.Dropout(
            rate=dropout_rate,
            name=name + "dropout")(x)

    x = nn.concatenate([identity, x], axis=get_channel_axis(), name=name + "/concat")
    return x 
Example 77
Project: imgclsmob   Author: osmr   File: darknet53.py    MIT License 4 votes vote down vote up
def dark_unit(x,
              in_channels,
              out_channels,
              alpha,
              name="dark_unit"):
    """
    DarkNet unit.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    alpha : float
        Slope coefficient for Leaky ReLU activation.
    name : str, default 'dark_unit'
        Unit name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    assert (out_channels % 2 == 0)
    mid_channels = out_channels // 2

    identity = x
    x = conv1x1_block(
        x=x,
        in_channels=in_channels,
        out_channels=mid_channels,
        activation=nn.LeakyReLU(
            alpha=alpha,
            name=name + "/conv1/activ"),
        name=name + "/conv1")
    x = conv3x3_block(
        x=x,
        in_channels=mid_channels,
        out_channels=out_channels,
        activation=nn.LeakyReLU(
            alpha=alpha,
            name=name + "/conv2/activ"),
        name=name + "/conv2")
    x = nn.add([x, identity], name=name + "/add")
    return x 
Example 78
Project: imgclsmob   Author: osmr   File: mobilenetv3.py    MIT License 4 votes vote down vote up
def mobilenetv3_classifier(x,
                           in_channels,
                           out_channels,
                           mid_channels,
                           dropout_rate,
                           name="mobilenetv3_final_block"):
    """
    MobileNetV3 classifier.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    mid_channels : int
        Number of middle channels.
    dropout_rate : float
        Parameter of Dropout layer. Faction of the input units to drop.
    name : str, default 'mobilenetv3_classifier'
        Unit name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    x = conv1x1(
        x=x,
        in_channels=in_channels,
        out_channels=mid_channels,
        name=name + "/conv1")
    x = HSwish(name=name + "/hswish")(x)

    use_dropout = (dropout_rate != 0.0)
    if use_dropout:
        x = nn.Dropout(
            rate=dropout_rate,
            name=name + "dropout")(x)

    x = conv1x1(
        x=x,
        in_channels=mid_channels,
        out_channels=out_channels,
        use_bias=True,
        name=name + "/conv2")
    return x 
Example 79
Project: imgclsmob   Author: osmr   File: alexnet.py    MIT License 4 votes vote down vote up
def alex_conv(x,
              in_channels,
              out_channels,
              kernel_size,
              strides,
              padding,
              use_lrn,
              name="alex_conv"):
    """
    AlexNet specific convolution block.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    kernel_size : int or tuple/list of 2 int
        Convolution window size.
    strides : int or tuple/list of 2 int
        Strides of the convolution.
    padding : int or tuple/list of 2 int
        Padding value for convolution layer.
    use_lrn : bool
        Whether to use LRN layer.
    name : str, default 'alex_conv'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    x = conv_block(
        x=x,
        in_channels=in_channels,
        out_channels=out_channels,
        kernel_size=kernel_size,
        strides=strides,
        padding=padding,
        use_bias=True,
        use_bn=False,
        name=name + "/conv")
    if use_lrn:
        x = lrn(x)
    return x 
Example 80
Project: imgclsmob   Author: osmr   File: darknet.py    MIT License 4 votes vote down vote up
def dark_convYxY(x,
                 in_channels,
                 out_channels,
                 alpha,
                 pointwise,
                 name="dark_convYxY"):
    """
    DarkNet unit.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    alpha : float
        Slope coefficient for Leaky ReLU activation.
    pointwise : bool
        Whether use 1x1 (pointwise) convolution or 3x3 convolution.
    name : str, default 'dark_convYxY'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    if pointwise:
        return conv1x1_block(
            x=x,
            in_channels=in_channels,
            out_channels=out_channels,
            activation=nn.LeakyReLU(alpha=alpha, name=name + "/activ"),
            name=name)
    else:
        return conv3x3_block(
            x=x,
            in_channels=in_channels,
            out_channels=out_channels,
            activation=nn.LeakyReLU(alpha=alpha, name=name + "/activ"),
            name=name)