Python keras.optimizers.Adagrad() Examples

The following are 30 code examples of keras.optimizers.Adagrad(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.optimizers , or try the search function .
Example #1
Source File: optimizers.py    From Unsupervised-Aspect-Extraction with Apache License 2.0 6 votes vote down vote up
def get_optimizer(args):

	clipvalue = 0
	clipnorm = 10

	if args.algorithm == 'rmsprop':
		optimizer = opt.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'sgd':
		optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adagrad':
		optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adadelta':
		optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adam':
		optimizer = opt.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adamax':
		optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
	
	return optimizer 
Example #2
Source File: optimizers.py    From DAS with Apache License 2.0 6 votes vote down vote up
def get_optimizer(args):

	clipvalue = 0
	clipnorm = 10

	if args.algorithm == 'rmsprop':
		optimizer = opt.RMSprop(lr=0.0005, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'sgd':
		optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adagrad':
		optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adadelta':
		optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adam':
		optimizer = opt.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adamax':
		optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
	
	return optimizer 
Example #3
Source File: BuildModel.py    From RMDL with GNU General Public License v3.0 6 votes vote down vote up
def optimizors(random_optimizor):
    if random_optimizor:
        i = random.randint(1,3)
        if i==0:
            opt = optimizers.SGD()
        elif i==1:
            opt= optimizers.RMSprop()
        elif i==2:
            opt= optimizers.Adagrad()
        elif i==3:
            opt = optimizers.Adam()
        elif i==4:
            opt =optimizers.Nadam()
        print(opt)
    else:
        opt= optimizers.Adam()
    return opt 
Example #4
Source File: optimizers.py    From IMN-E2E-ABSA with Apache License 2.0 6 votes vote down vote up
def get_optimizer(args):

	clipvalue = 0
	clipnorm = 10

	if args.algorithm == 'rmsprop':
		optimizer = opt.RMSprop(lr=0.0001, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'sgd':
		optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adagrad':
		optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adadelta':
		optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adam':
		optimizer = opt.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adamax':
		optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
	
	return optimizer 
Example #5
Source File: trainer.py    From image-segmentation with MIT License 6 votes vote down vote up
def get_optimizer(config):
    if config.OPTIMIZER == 'SGD':
        return SGD(lr=config.LEARNING_RATE, momentum=config.LEARNING_MOMENTUM, clipnorm=config.GRADIENT_CLIP_NORM, nesterov=config.NESTEROV)
    elif config.OPTIMIZER == 'RMSprop':
        return RMSprop(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM)
    elif config.OPTIMIZER == 'Adagrad':
        return Adagrad(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM)
    elif config.OPTIMIZER == 'Adadelta':
        return Adadelta(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM)
    elif config.OPTIMIZER == 'Adam':
        return Adam(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM, amsgrad=config.AMSGRAD)
    elif config.OPTIMIZER == 'Adamax':
        return Adamax(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM)
    elif config.OPTIMIZER == 'Nadam':
        return Nadam(lr=config.LEARNING_RATE, clipnorm=config.GRADIENT_CLIP_NORM)
    else:
        raise Exception('Unrecognized optimizer: {}'.format(config.OPTIMIZER)) 
Example #6
Source File: optimizers.py    From nea with GNU General Public License v3.0 6 votes vote down vote up
def get_optimizer(args):

	clipvalue = 0
	clipnorm = 10

	if args.algorithm == 'rmsprop':
		optimizer = opt.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'sgd':
		optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adagrad':
		optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adadelta':
		optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adam':
		optimizer = opt.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adamax':
		optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
	
	return optimizer 
Example #7
Source File: optimizers.py    From Attention-Based-Aspect-Extraction with Apache License 2.0 6 votes vote down vote up
def get_optimizer(args):

	clipvalue = 0
	clipnorm = 10

	if args.algorithm == 'rmsprop':
		optimizer = opt.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'sgd':
		optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adagrad':
		optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adadelta':
		optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adam':
		optimizer = opt.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adamax':
		optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
	
	return optimizer 
Example #8
Source File: utils_models.py    From auto_ml with MIT License 6 votes vote down vote up
def get_optimizer(name='Adadelta'):
    if name == 'SGD':
        return optimizers.SGD(clipnorm=1.)
    if name == 'RMSprop':
        return optimizers.RMSprop(clipnorm=1.)
    if name == 'Adagrad':
        return optimizers.Adagrad(clipnorm=1.)
    if name == 'Adadelta':
        return optimizers.Adadelta(clipnorm=1.)
    if name == 'Adam':
        return optimizers.Adam(clipnorm=1.)
    if name == 'Adamax':
        return optimizers.Adamax(clipnorm=1.)
    if name == 'Nadam':
        return optimizers.Nadam(clipnorm=1.)

    return optimizers.Adam(clipnorm=1.) 
Example #9
Source File: optimizers.py    From Aspect-level-sentiment with Apache License 2.0 6 votes vote down vote up
def get_optimizer(args):

	clipvalue = 0
	clipnorm = 10

	if args.algorithm == 'rmsprop':
		optimizer = opt.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'sgd':
		optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adagrad':
		optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adadelta':
		optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adam':
		optimizer = opt.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
	elif args.algorithm == 'adamax':
		optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
	
	return optimizer 
Example #10
Source File: deepae.py    From KATE with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def fit(self, train_X, val_X, nb_epoch=50, batch_size=100, feature_weights=None):
        print 'Training autoencoder'
        optimizer = Adadelta(lr=1.5)
        # optimizer = Adam()
        # optimizer = Adagrad()
        if feature_weights is None:
            self.autoencoder.compile(optimizer=optimizer, loss='binary_crossentropy') # kld, binary_crossentropy, mse
        else:
            print 'Using weighted loss'
            self.autoencoder.compile(optimizer=optimizer, loss=weighted_binary_crossentropy(feature_weights)) # kld, binary_crossentropy, mse

        self.autoencoder.fit(train_X[0], train_X[1],
                        nb_epoch=nb_epoch,
                        batch_size=batch_size,
                        shuffle=True,
                        validation_data=(val_X[0], val_X[1]),
                        callbacks=[
                                    ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01),
                                    EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=1, mode='auto'),
                                    # ModelCheckpoint(self.model_save_path, monitor='val_loss', save_best_only=True, verbose=0),
                        ]
                        )

        return self 
Example #11
Source File: KerasCallback.py    From aetros-cli with MIT License 5 votes vote down vote up
def get_learning_rate(self):

        if hasattr(self.model, 'optimizer'):
            config = self.model.optimizer.get_config()

            from keras.optimizers import Adadelta, Adam, Adamax, Adagrad, RMSprop, SGD

            if isinstance(self.model.optimizer, Adadelta) or isinstance(self.model.optimizer, Adam) \
                    or isinstance(self.model.optimizer, Adamax) or isinstance(self.model.optimizer, Adagrad)\
                    or isinstance(self.model.optimizer, RMSprop) or isinstance(self.model.optimizer, SGD):
                return config['lr'] * (1. / (1. + config['decay'] * float(K.get_value(self.model.optimizer.iterations))))

            elif 'lr' in config:
                return config['lr'] 
Example #12
Source File: optimizers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_adagrad():
    _test_optimizer(optimizers.Adagrad())
    _test_optimizer(optimizers.Adagrad(decay=1e-3)) 
Example #13
Source File: optimizers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_adagrad():
    _test_optimizer(optimizers.Adagrad())
    _test_optimizer(optimizers.Adagrad(decay=1e-3)) 
Example #14
Source File: optimizers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_adagrad():
    _test_optimizer(optimizers.Adagrad())
    _test_optimizer(optimizers.Adagrad(decay=1e-3)) 
Example #15
Source File: optimizers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_adagrad():
    _test_optimizer(optimizers.Adagrad())
    _test_optimizer(optimizers.Adagrad(decay=1e-3)) 
Example #16
Source File: optimizers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_adagrad():
    _test_optimizer(optimizers.Adagrad())
    _test_optimizer(optimizers.Adagrad(decay=1e-3)) 
Example #17
Source File: optimizers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_adagrad():
    _test_optimizer(optimizers.Adagrad())
    _test_optimizer(optimizers.Adagrad(decay=1e-3)) 
Example #18
Source File: normalizers.py    From talos with MIT License 5 votes vote down vote up
def lr_normalizer(lr, optimizer):
    """Assuming a default learning rate 1, rescales the learning rate
    such that learning rates amongst different optimizers are more or less
    equivalent.

    Parameters
    ----------
    lr : float
        The learning rate.
    optimizer : keras optimizer
        The optimizer. For example, Adagrad, Adam, RMSprop.
    """

    from keras.optimizers import SGD, Adam, Adadelta, Adagrad, Adamax, RMSprop
    from keras.optimizers import Nadam
    from talos.utils.exceptions import TalosModelError

    if optimizer == Adadelta:
        pass
    elif optimizer == SGD or optimizer == Adagrad:
        lr /= 100.0
    elif optimizer == Adam or optimizer == RMSprop:
        lr /= 1000.0
    elif optimizer == Adamax or optimizer == Nadam:
        lr /= 500.0
    else:
        raise TalosModelError(str(optimizer) + " is not supported by lr_normalizer")

    return lr 
Example #19
Source File: w2vv.py    From w2vv with MIT License 5 votes vote down vote up
def compile_model(self, loss_name, opt=None):
        print "loss function: ", loss_name
        print "optimizer: ", opt.optimizer
        print "learning_rate: ", opt.lr
        if loss_name == 'mse':
            loss = loss_name

        clipnorm = opt.clipnorm
        optimizer = opt.optimizer
        learning_rate = opt.lr
        if optimizer == 'sgd':
            # let's train the model using SGD + momentum (how original).
            if clipnorm > 0:
                sgd = SGD(lr=learning_rate, clipnorm=clipnorm, decay=1e-6, momentum=0.9, nesterov=True)
            else:
                sgd = SGD(lr=learning_rate, decay=1e-6, momentum=0.9, nesterov=True)
            self.model.compile(loss=loss, optimizer=sgd)
        elif optimizer == 'rmsprop':
            if clipnorm > 0:
                rmsprop = RMSprop(lr=learning_rate, clipnorm=clipnorm, rho=0.9, epsilon=1e-6)
            else:
                rmsprop = RMSprop(lr=learning_rate, rho=0.9, epsilon=1e-6)
            self.model.compile(loss=loss, optimizer=rmsprop)
        elif optimizer == 'adagrad':
            if clipnorm > 0:
                adagrad = Adagrad(lr=learning_rate, clipnorm=clipnorm, epsilon=1e-06)
            else:
                adagrad = Adagrad(lr=learning_rate, epsilon=1e-06)
            self.model.compile(loss=loss, optimizer=adagrad)
        elif optimizer == 'adma':
            if clipnorm > 0:
                adma = Adam(lr=learning_rate, clipnorm=clipnorm, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
            else:
                adma = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
            self.model.compile(loss=loss, optimizer=adma) 
Example #20
Source File: NeuMF_RecommenderWrapper.py    From RecSys2019_DeepLearning_Evaluation with GNU Affero General Public License v3.0 5 votes vote down vote up
def set_learner(model, learning_rate, learner):

    if learner.lower() == "adagrad":
        model.compile(optimizer=Adagrad(lr=learning_rate), loss='binary_crossentropy')
    elif learner.lower() == "rmsprop":
        model.compile(optimizer=RMSprop(lr=learning_rate), loss='binary_crossentropy')
    elif learner.lower() == "adam":
        model.compile(optimizer=Adam(lr=learning_rate), loss='binary_crossentropy')
    else:
        model.compile(optimizer=SGD(lr=learning_rate), loss='binary_crossentropy')

    return model 
Example #21
Source File: base_model.py    From saber with MIT License 5 votes vote down vote up
def _compile(self, model, loss_function, optimizer, lr=0.01, decay=0.0, clipnorm=0.0):
        """Compiles a model specified with Keras.

        See https://keras.io/optimizers/ for more info on each optimizer.

        Args:
            model: Keras model object to compile
            loss_function: Keras loss_function object to compile model with
            optimizer (str): the optimizer to use during training
            lr (float): learning rate to use during training
            decay (float): per epoch decay rate
            clipnorm (float): gradient normalization threshold
        """
        # The parameters of these optimizers can be freely tuned.
        if optimizer == 'sgd':
            optimizer_ = optimizers.SGD(lr=lr, decay=decay, clipnorm=clipnorm)
        elif optimizer == 'adam':
            optimizer_ = optimizers.Adam(lr=lr, decay=decay, clipnorm=clipnorm)
        elif optimizer == 'adamax':
            optimizer_ = optimizers.Adamax(lr=lr, decay=decay, clipnorm=clipnorm)
        # It is recommended to leave the parameters of this optimizer at their
        # default values (except the learning rate, which can be freely tuned).
        # This optimizer is usually a good choice for recurrent neural networks
        elif optimizer == 'rmsprop':
            optimizer_ = optimizers.RMSprop(lr=lr, clipnorm=clipnorm)
        # It is recommended to leave the parameters of these optimizers at their
        # default values.
        elif optimizer == 'adagrad':
            optimizer_ = optimizers.Adagrad(clipnorm=clipnorm)
        elif optimizer == 'adadelta':
            optimizer_ = optimizers.Adadelta(clipnorm=clipnorm)
        elif optimizer == 'nadam':
            optimizer_ = optimizers.Nadam(clipnorm=clipnorm)
        else:
            err_msg = "Argument for `optimizer` is invalid, got: {}".format(optimizer)
            LOGGER.error('ValueError %s', err_msg)
            raise ValueError(err_msg)

        model.compile(optimizer=optimizer_, loss=loss_function) 
Example #22
Source File: agent.py    From StockRecommendSystem with MIT License 5 votes vote down vote up
def buildnetwork(self):
        model = Sequential()
        model.add(lstm(20, dropout_W=0.2, input_shape = (self.seq_len, self.n_feature)))
        #model.add(LSTM(20, dropout=0.2, input_shape=(int(self.seq_len), int(self.n_feature))))
        model.add(Dense(1, activation=None))
        model.compile(loss='mean_squared_error', optimizer=Adagrad(lr=0.002,clipvalue=10), metrics=['mean_squared_error'])

        return model 
Example #23
Source File: test_optimizers.py    From CAPTCHA-breaking with MIT License 5 votes vote down vote up
def test_adagrad(self):
        print('test Adagrad')
        self.assertTrue(_test_optimizer(Adagrad())) 
Example #24
Source File: train_multi_v2.py    From DeepFashion with Apache License 2.0 5 votes vote down vote up
def create_model(input_shape, optimizer='Adagrad', learn_rate=None, decay=0.0, momentum=0.0, activation='relu', dropout_rate=0.5):
    logging.debug('input_shape {}'.format(input_shape))
    logging.debug('input_shape {}'.format(type(input_shape)))

    # input_shape = (7, 7, 512)

    # Optimizer
    optimizer, learn_rate = get_optimizer(optimizer, learn_rate, decay, momentum)


    # Model
    model = Sequential()
    model.add(Flatten(input_shape=input_shape))

    model.add(Dense(256, activation=activation))
    model.add(Dropout(dropout_rate))
    model.add(Dense(len(class_names), activation='softmax'))                                        # Binary to Multi classification changes
    # model.add(Dense(1, activation='sigmoid'))

    logging.debug('model summary {}'.format(model.summary()))


    # Compile
    model.compile(optimizer=optimizer,
                  loss='sparse_categorical_crossentropy', metrics=['accuracy'])                     # Binary to Multi classification changes

    logging.info('optimizer:{}  learn_rate:{}  decay:{}  momentum:{}  activation:{}  dropout_rate:{}'.format(
        optimizer, learn_rate, decay, momentum, activation, dropout_rate))

    return model 
Example #25
Source File: train_multi.py    From DeepFashion with Apache License 2.0 5 votes vote down vote up
def create_model(input_shape, optimizer='Adagrad', learn_rate=None, decay=0.0, momentum=0.0, activation='relu', dropout_rate=0.5):
    logging.debug('input_shape {}'.format(input_shape))
    logging.debug('input_shape {}'.format(type(input_shape)))

    # input_shape = (7, 7, 512)

    # Optimizer
    optimizer, learn_rate = get_optimizer(optimizer, learn_rate, decay, momentum)


    # Model
    model = Sequential()
    model.add(Flatten(input_shape=input_shape))

    model.add(Dense(256, activation=activation))
    model.add(Dropout(dropout_rate))
    model.add(Dense(len(class_names), activation='softmax'))                                        # Binary to Multi classification changes
    # model.add(Dense(1, activation='sigmoid'))

    logging.debug('model summary {}'.format(model.summary()))


    # Compile
    model.compile(optimizer=optimizer,
                  loss='sparse_categorical_crossentropy', metrics=['accuracy'])                     # Binary to Multi classification changes

    logging.info('optimizer:{}  learn_rate:{}  decay:{}  momentum:{}  activation:{}  dropout_rate:{}'.format(
        optimizer, learn_rate, decay, momentum, activation, dropout_rate))

    return model 
Example #26
Source File: optimizers_test.py    From DeepLearning_Wavelet-LSTM with MIT License 5 votes vote down vote up
def test_adagrad():
    _test_optimizer(optimizers.Adagrad())
    _test_optimizer(optimizers.Adagrad(decay=1e-3)) 
Example #27
Source File: train_multi.py    From DeepFashion with Apache License 2.0 4 votes vote down vote up
def get_optimizer(optimizer='Adagrad', lr=None, decay=0.0, momentum=0.0):

    if optimizer == 'SGD':
        if lr is None:
            lr = 0.01
        optimizer_mod = keras.optimizers.SGD(lr=lr, momentum=momentum, decay=decay, nesterov=False)

    elif optimizer == 'RMSprop':
        if lr is None:
            lr = 0.001
        optimizer_mod = keras.optimizers.RMSprop(lr=lr, rho=0.9, epsilon=1e-08, decay=decay)

    elif optimizer == 'Adagrad':
        if lr is None:
            lr = 0.01
        optimizer_mod = keras.optimizers.Adagrad(lr=lr, epsilon=1e-08, decay=decay)

    elif optimizer == 'Adadelta':
        if lr is None:
            lr = 1.0
        optimizer_mod = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)

    elif optimizer == 'Adam':
        if lr is None:
            lr = 0.001
        optimizer_mod = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    elif optimizer == 'Adamax':
        if lr is None:
            lr = 0.002
        optimizer_mod = keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    elif optimizer == 'Nadam':
        if lr is None:
            lr = 0.002
        optimizer_mod = keras.optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)

    else:
        logging.error('Unknown optimizer {}'.format(optimizer))
        exit(1)

    # logging.debug('lr {}'.format(lr))
    # logging.debug('momentum {}'.format(momentum))
    # logging.debug('decay {}'.format(decay))
    # logging.debug('optimizer_mod {}'.format(optimizer_mod))

    return optimizer_mod, lr 
Example #28
Source File: train_multi_v4.py    From DeepFashion with Apache License 2.0 4 votes vote down vote up
def get_optimizer(optimizer='Adagrad', lr=None, decay=0.0, momentum=0.0):

    if optimizer == 'SGD':
        if lr is None:
            lr = 0.01
        optimizer_mod = keras.optimizers.SGD(lr=lr, momentum=momentum, decay=decay, nesterov=False)

    elif optimizer == 'RMSprop':
        if lr is None:
            lr = 0.001
        optimizer_mod = keras.optimizers.RMSprop(lr=lr, rho=0.9, epsilon=1e-08, decay=decay)

    elif optimizer == 'Adagrad':
        if lr is None:
            lr = 0.01
        optimizer_mod = keras.optimizers.Adagrad(lr=lr, epsilon=1e-08, decay=decay)

    elif optimizer == 'Adadelta':
        if lr is None:
            lr = 1.0
        optimizer_mod = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)

    elif optimizer == 'Adam':
        if lr is None:
            lr = 0.001
        optimizer_mod = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    elif optimizer == 'Adamax':
        if lr is None:
            lr = 0.002
        optimizer_mod = keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    elif optimizer == 'Nadam':
        if lr is None:
            lr = 0.002
        optimizer_mod = keras.optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)

    else:
        logging.error('Unknown optimizer {}'.format(optimizer))
        exit(1)

    # logging.debug('lr {}'.format(lr))
    # logging.debug('momentum {}'.format(momentum))
    # logging.debug('decay {}'.format(decay))
    # logging.debug('optimizer_mod {}'.format(optimizer_mod))

    return optimizer_mod, lr


# INPUT:
#           VGG16 - block5_pool (MaxPooling2D) (None, 7, 7, 512)
# OUTPUT:
#           Branch1 - Class Prediction
#           Branch2 - IOU Prediction

# NOTE: Both models in create_model_train() and  create_model_predict() should be exaclty same 
Example #29
Source File: train_multi_v4.py    From DeepFashion with Apache License 2.0 4 votes vote down vote up
def create_model_train(input_shape, optimizer='Adagrad', learn_rate=None, decay=0.0, momentum=0.0, activation='relu', dropout_rate=0.5):
    logging.debug('input_shape {}'.format(input_shape))
    logging.debug('input_shape {}'.format(type(input_shape)))

    # Optimizer
    optimizer, learn_rate = get_optimizer(optimizer, learn_rate, decay, momentum)

    # input_shape = (7, 7, 512)                                                                     # VGG bottleneck layer - block5_pool (MaxPooling2D)

    inputs = Input(shape=(input_shape))
    # x_common = Dense(256, activation='relu')(inputs)

    ## Model Classification
    #x = Flatten()(x_common)
    x = Flatten()(inputs)
    x = Dense(256, activation='tanh')(x)
    x = Dropout(dropout_rate)(x)
    predictions_class = Dense(len(class_names), activation='softmax', name='predictions_class')(x)


    ## Model (Regression) IOU score
    #x = Flatten()(x_common)
    x = Flatten()(inputs)
    x = Dense(256, activation='tanh')(x)
    x = Dropout(dropout_rate)(x)
    x = Dense(256, activation='tanh')(x)
    x = Dropout(dropout_rate)(x)
    predictions_iou = Dense(1, activation='sigmoid', name='predictions_iou')(x)


    # This creates a model that includes the Input layer and three Dense layers
    model = Model(inputs=inputs, outputs=[predictions_class, predictions_iou])


    logging.debug('model summary {}'.format(model.summary()))


    # Compile
    model.compile(optimizer=optimizer,
                  loss={'predictions_class': 'sparse_categorical_crossentropy', 'predictions_iou': 'mean_squared_error'}, metrics=['accuracy'],
                  loss_weights={'predictions_class': 0.5, 'predictions_iou': 0.5})
                  #loss_weights={'predictions_class': 0.5, 'predictions_iou': 0.5})
                  #loss={'predictions_class': 'sparse_categorical_crossentropy', 'predictions_iou': 'logcosh'}, metrics=['accuracy'],

    logging.info('optimizer:{}  learn_rate:{}  decay:{}  momentum:{}  activation:{}  dropout_rate:{}'.format(
        optimizer, learn_rate, decay, momentum, activation, dropout_rate))

    return model 
Example #30
Source File: train_multi_v2.py    From DeepFashion with Apache License 2.0 4 votes vote down vote up
def get_optimizer(optimizer='Adagrad', lr=None, decay=0.0, momentum=0.0):

    if optimizer == 'SGD':
        if lr is None:
            lr = 0.01
        optimizer_mod = keras.optimizers.SGD(lr=lr, momentum=momentum, decay=decay, nesterov=False)

    elif optimizer == 'RMSprop':
        if lr is None:
            lr = 0.001
        optimizer_mod = keras.optimizers.RMSprop(lr=lr, rho=0.9, epsilon=1e-08, decay=decay)

    elif optimizer == 'Adagrad':
        if lr is None:
            lr = 0.01
        optimizer_mod = keras.optimizers.Adagrad(lr=lr, epsilon=1e-08, decay=decay)

    elif optimizer == 'Adadelta':
        if lr is None:
            lr = 1.0
        optimizer_mod = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)

    elif optimizer == 'Adam':
        if lr is None:
            lr = 0.001
        optimizer_mod = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    elif optimizer == 'Adamax':
        if lr is None:
            lr = 0.002
        optimizer_mod = keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

    elif optimizer == 'Nadam':
        if lr is None:
            lr = 0.002
        optimizer_mod = keras.optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)

    else:
        logging.error('Unknown optimizer {}'.format(optimizer))
        exit(1)

    # logging.debug('lr {}'.format(lr))
    # logging.debug('momentum {}'.format(momentum))
    # logging.debug('decay {}'.format(decay))
    # logging.debug('optimizer_mod {}'.format(optimizer_mod))

    return optimizer_mod, lr