Python keras.optimizers.Adam() Examples
The following are 30 code examples for showing how to use keras.optimizers.Adam(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
You may check out the related API usage on the sidebar.
You may also want to check out all available functions/classes of the module
keras.optimizers
, or try the search function
.
Example 1
Project: tartarus Author: sergiooramas File: train.py License: MIT License | 6 votes |
def build_model(config): """Builds the cnn.""" params = config.model_arch get_model = getattr(models, 'get_model_'+str(params['architecture'])) model = get_model(params) #model = model_kenun.build_convnet_model(params) # Learning setup t_params = config.training_params sgd = SGD(lr=t_params["learning_rate"], decay=t_params["decay"], momentum=t_params["momentum"], nesterov=t_params["nesterov"]) adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08) optimizer = eval(t_params['optimizer']) metrics = ['mean_squared_error'] if config.model_arch["final_activation"] == 'softmax': metrics.append('categorical_accuracy') if t_params['loss_func'] == 'cosine': loss_func = eval(t_params['loss_func']) else: loss_func = t_params['loss_func'] model.compile(loss=loss_func, optimizer=optimizer,metrics=metrics) return model
Example 2
Project: Jtyoui Author: jtyoui File: NER.py License: MIT License | 6 votes |
def train_model(): if cxl_model: embedding_matrix = load_embedding() else: embedding_matrix = {} train, label = vocab_train_label(train_path, vocab=vocab, tags=tag, max_chunk_length=length) n = np.array(label, dtype=np.float) labels = n.reshape((n.shape[0], n.shape[1], 1)) model = Sequential([ Embedding(input_dim=len(vocab), output_dim=300, mask_zero=True, input_length=length, weights=[embedding_matrix], trainable=False), SpatialDropout1D(0.2), Bidirectional(layer=LSTM(units=150, return_sequences=True, dropout=0.2, recurrent_dropout=0.2)), TimeDistributed(Dense(len(tag), activation=relu)), ]) crf_ = CRF(units=len(tag), sparse_target=True) model.add(crf_) model.compile(optimizer=Adam(), loss=crf_.loss_function, metrics=[crf_.accuracy]) model.fit(x=np.array(train), y=labels, batch_size=16, epochs=4, callbacks=[RemoteMonitor()]) model.save(model_path)
Example 3
Project: Jtyoui Author: jtyoui File: cnn_rnn_crf.py License: MIT License | 6 votes |
def create_model(): inputs = Input(shape=(length,), dtype='int32', name='inputs') embedding_1 = Embedding(len(vocab), EMBED_DIM, input_length=length, mask_zero=True)(inputs) bilstm = Bidirectional(LSTM(EMBED_DIM // 2, return_sequences=True))(embedding_1) bilstm_dropout = Dropout(DROPOUT_RATE)(bilstm) embedding_2 = Embedding(len(vocab), EMBED_DIM, input_length=length)(inputs) con = Conv1D(filters=FILTERS, kernel_size=2 * HALF_WIN_SIZE + 1, padding='same')(embedding_2) con_d = Dropout(DROPOUT_RATE)(con) dense_con = TimeDistributed(Dense(DENSE_DIM))(con_d) rnn_cnn = concatenate([bilstm_dropout, dense_con], axis=2) dense = TimeDistributed(Dense(len(chunk_tags)))(rnn_cnn) crf = CRF(len(chunk_tags), sparse_target=True) crf_output = crf(dense) model = Model(input=[inputs], output=[crf_output]) model.compile(loss=crf.loss_function, optimizer=Adam(), metrics=[crf.accuracy]) return model
Example 4
Project: deep_architect Author: negrinho File: main_keras.py License: MIT License | 6 votes |
def evaluate(self, inputs, outputs): keras.backend.clear_session() X = Input(self.X_train[0].shape) co.forward({inputs['in']: X}) logits = outputs['out'].val probs = Activation('softmax')(logits) model = Model(inputs=[inputs['in'].val], outputs=[probs]) model.compile(optimizer=Adam(lr=self.learning_rate), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary() history = model.fit(self.X_train, self.y_train, batch_size=self.batch_size, epochs=self.num_training_epochs, validation_data=(self.X_val, self.y_val)) results = {'validation_accuracy': history.history['val_accuracy'][-1]} return results
Example 5
Project: LearningX Author: ankonzoid File: NN_regr.py License: MIT License | 6 votes |
def fit(self, X, y): os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' assert len(X.shape) == 2 N, d = X.shape from keras.models import Sequential from keras.layers import Dense from keras.optimizers import Adam model = Sequential() model.add(Dense(10, input_dim=d, activation="relu")) model.add(Dense(10, activation="relu")) model.add(Dense(1, activation="relu")) model.compile(loss="mse", optimizer=Adam(lr=0.005)) self.model = model n_epochs = 100 self.model.fit(X, y, epochs=n_epochs, verbose=False)
Example 6
Project: reinforcement-learning-kr Author: rlcode File: reinforce_agent.py License: MIT License | 6 votes |
def optimizer(self): action = K.placeholder(shape=[None, 5]) discounted_rewards = K.placeholder(shape=[None, ]) # 크로스 엔트로피 오류함수 계산 action_prob = K.sum(action * self.model.output, axis=1) cross_entropy = K.log(action_prob) * discounted_rewards loss = -K.sum(cross_entropy) # 정책신경망을 업데이트하는 훈련함수 생성 optimizer = Adam(lr=self.learning_rate) updates = optimizer.get_updates(self.model.trainable_weights,[], loss) train = K.function([self.model.input, action, discounted_rewards], [], updates=updates) return train # 정책신경망으로 행동 선택
Example 7
Project: Document-Classifier-LSTM Author: AlexGidiotis File: classifier.py License: MIT License | 6 votes |
def load_model(stamp): """ """ json_file = open(stamp+'.json', 'r') loaded_model_json = json_file.read() json_file.close() model = model_from_json(loaded_model_json, {'AttentionWithContext': AttentionWithContext}) model.load_weights(stamp+'.h5') print("Loaded model from disk") model.summary() adam = Adam(lr=0.001) model.compile(loss='binary_crossentropy', optimizer=adam, metrics=[f1_score]) return model
Example 8
Project: reinforcement-learning Author: rlcode File: reinforce_agent.py License: MIT License | 6 votes |
def optimizer(self): action = K.placeholder(shape=[None, 5]) discounted_rewards = K.placeholder(shape=[None, ]) # Calculate cross entropy error function action_prob = K.sum(action * self.model.output, axis=1) cross_entropy = K.log(action_prob) * discounted_rewards loss = -K.sum(cross_entropy) # create training function optimizer = Adam(lr=self.learning_rate) updates = optimizer.get_updates(self.model.trainable_weights, [], loss) train = K.function([self.model.input, action, discounted_rewards], [], updates=updates) return train # get action from policy network
Example 9
Project: reinforcement-learning Author: rlcode File: cartpole_a3c.py License: MIT License | 6 votes |
def actor_optimizer(self): action = K.placeholder(shape=(None, self.action_size)) advantages = K.placeholder(shape=(None, )) policy = self.actor.output good_prob = K.sum(action * policy, axis=1) eligibility = K.log(good_prob + 1e-10) * K.stop_gradient(advantages) loss = -K.sum(eligibility) entropy = K.sum(policy * K.log(policy + 1e-10), axis=1) actor_loss = loss + 0.01*entropy optimizer = Adam(lr=self.actor_lr) updates = optimizer.get_updates(self.actor.trainable_weights, [], actor_loss) train = K.function([self.actor.input, action, advantages], [], updates=updates) return train # make loss function for Value approximation
Example 10
Project: C51-DDQN-Keras Author: flyyufelix File: networks.py License: MIT License | 6 votes |
def value_distribution_network(input_shape, num_atoms, action_size, learning_rate): """Model Value Distribution With States as inputs and output Probability Distributions for all Actions """ state_input = Input(shape=(input_shape)) cnn_feature = Convolution2D(32, 8, 8, subsample=(4,4), activation='relu')(state_input) cnn_feature = Convolution2D(64, 4, 4, subsample=(2,2), activation='relu')(cnn_feature) cnn_feature = Convolution2D(64, 3, 3, activation='relu')(cnn_feature) cnn_feature = Flatten()(cnn_feature) cnn_feature = Dense(512, activation='relu')(cnn_feature) distribution_list = [] for i in range(action_size): distribution_list.append(Dense(num_atoms, activation='softmax')(cnn_feature)) model = Model(input=state_input, output=distribution_list) adam = Adam(lr=learning_rate) model.compile(loss='categorical_crossentropy',optimizer=adam) return model
Example 11
Project: 2019-OSS-Summer-RL Author: utilForever File: reinforce_agent.py License: MIT License | 6 votes |
def optimizer(self): action = K.placeholder(shape=[None, 5]) discounted_rewards = K.placeholder(shape=[None, ]) # 크로스 엔트로피 오류함수 계산 action_prob = K.sum(action * self.model.output, axis=1) cross_entropy = K.log(action_prob) * discounted_rewards loss = -K.sum(cross_entropy) # 정책신경망을 업데이트하는 훈련함수 생성 optimizer = Adam(lr=self.learning_rate) updates = optimizer.get_updates(self.model.trainable_weights,[], loss) train = K.function([self.model.input, action, discounted_rewards], [], updates=updates) return train # 정책신경망으로 행동 선택
Example 12
Project: navbot Author: marooncn File: RNN.py License: MIT License | 6 votes |
def _build(self): # the model that will be trained rnn_x = Input(shape=(None, Z_DIM + ACTION_DIM)) lstm = LSTM(HIDDEN_UNITS, return_sequences=True, return_state=True) lstm_output, _, _ = lstm(rnn_x) mdn = Dense(Z_DIM)(lstm_output) rnn = Model(rnn_x, mdn) # the model used during prediction state_input_h = Input(shape=(HIDDEN_UNITS,)) state_input_c = Input(shape=(HIDDEN_UNITS,)) state_inputs = [state_input_h, state_input_c] _, state_h, state_c = lstm(rnn_x, initial_state=state_inputs) forward = Model([rnn_x] + state_inputs, [state_h, state_c]) optimizer = Adam(lr=0.0001) # optimizer = SGD(lr=0.0001, decay=1e-4, momentum=0.9, nesterov=True) rnn.compile(loss='mean_squared_error', optimizer=optimizer) return [rnn, forward]
Example 13
Project: MCF-3D-CNN Author: xyj77 File: liver_model.py License: MIT License | 6 votes |
def build_3dcnn_model(self, fusion_type, Fusion): if len(Fusion[0]) == 1: input_shape = (32, 32, len(Fusion)) model_in,model = self.cnn_2D(input_shape) else: input_shape = (32, 32, 5, len(Fusion)) model_in,model = self.cnn_3D(input_shape) model = Dropout(0.5)(model) model = Dense(32, activation='relu', name = 'fc2')(model) model = Dense(self.config.classes, activation='softmax', name = 'fc3')(model) model = Model(input=model_in,output=model) # 统计参数 # model.summary() plot_model(model,to_file='experiments/img/' + str(Fusion) + fusion_type + r'_model.png',show_shapes=True) print(' Saving model Architecture') adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8) # model.compile(optimizer=adam, loss=self.mycrossentropy, metrics=['accuracy']) #有改善,但不稳定 model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy']) return model
Example 14
Project: udacity-SDC-baseline Author: dolaameng File: model.py License: MIT License | 6 votes |
def build_vgg16(image_size=None): image_size = image_size or (240, 240) if K.image_dim_ordering() == 'th': input_shape = (3,) + image_size else: input_shape = image_size + (3, ) bottleneck_model = vgg16.VGG16(include_top=False, input_tensor=Input(input_shape)) #bottleneck_model.trainable = False for layer in bottleneck_model.layers: layer.trainable = False x = bottleneck_model.input y = bottleneck_model.output y = Flatten()(y) y = BatchNormalization()(y) y = Dense(2048, activation='relu')(y) y = Dropout(.5)(y) y = Dense(1024, activation='relu')(y) y = Dropout(.5)(y) y = Dense(1)(y) model = Model(input=x, output=y) model.compile(optimizer=Adam(lr=1e-4), loss = 'mse') return model
Example 15
Project: SeqGAN Author: tyo-yo File: train.py License: MIT License | 6 votes |
def pre_train_generator(self, g_epochs=3, g_pre_path=None, lr=1e-3): if g_pre_path is None: self.g_pre_path = os.path.join(self.top, 'data', 'save', 'generator_pre.hdf5') else: self.g_pre_path = g_pre_path g_adam = Adam(lr) self.generator_pre.compile(g_adam, 'categorical_crossentropy') print('Generator pre-training') self.generator_pre.summary() self.generator_pre.fit_generator( self.g_data, steps_per_epoch=None, epochs=g_epochs) self.generator_pre.save_weights(self.g_pre_path) self.reflect_pre_train()
Example 16
Project: siamese-two-stream Author: icarofua File: siamese.py License: Apache License 2.0 | 6 votes |
def siamese_model(type): if type=='plate': input_shape = (image_size_h_p,image_size_w_p,nchannels) else: input_shape = (image_size_h_c,image_size_w_c,nchannels) left_input = Input(input_shape) right_input = Input(input_shape) convnet = small_vgg(input_shape) encoded_l = convnet(left_input) encoded_r = convnet(right_input) # Add the distance function to the network L1_distance = L1_layer([encoded_l, encoded_r]) prediction = Dense(2,activation='softmax')(L1_distance) optimizer = Adam(0.001, decay=2.5e-4) model = Model(inputs=[left_input,right_input],outputs=prediction) model.compile(loss="binary_crossentropy",optimizer=optimizer,metrics=['accuracy']) return model #------------------------------------------------------------------------------
Example 17
Project: Keras-GAN Author: eriklindernoren File: sgan.py License: MIT License | 5 votes |
def __init__(self): self.img_rows = 28 self.img_cols = 28 self.channels = 1 self.img_shape = (self.img_rows, self.img_cols, self.channels) self.num_classes = 10 self.latent_dim = 100 optimizer = Adam(0.0002, 0.5) # Build and compile the discriminator self.discriminator = self.build_discriminator() self.discriminator.compile( loss=['binary_crossentropy', 'categorical_crossentropy'], loss_weights=[0.5, 0.5], optimizer=optimizer, metrics=['accuracy'] ) # Build the generator self.generator = self.build_generator() # The generator takes noise as input and generates imgs noise = Input(shape=(100,)) img = self.generator(noise) # For the combined model we will only train the generator self.discriminator.trainable = False # The valid takes generated images as input and determines validity valid, _ = self.discriminator(img) # The combined model (stacked generator and discriminator) # Trains generator to fool discriminator self.combined = Model(noise, valid) self.combined.compile(loss=['binary_crossentropy'], optimizer=optimizer)
Example 18
Project: Keras-GAN Author: eriklindernoren File: context_encoder.py License: MIT License | 5 votes |
def __init__(self): self.img_rows = 32 self.img_cols = 32 self.mask_height = 8 self.mask_width = 8 self.channels = 3 self.num_classes = 2 self.img_shape = (self.img_rows, self.img_cols, self.channels) self.missing_shape = (self.mask_height, self.mask_width, self.channels) optimizer = Adam(0.0002, 0.5) # Build and compile the discriminator self.discriminator = self.build_discriminator() self.discriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) # Build the generator self.generator = self.build_generator() # The generator takes noise as input and generates the missing # part of the image masked_img = Input(shape=self.img_shape) gen_missing = self.generator(masked_img) # For the combined model we will only train the generator self.discriminator.trainable = False # The discriminator takes generated images as input and determines # if it is generated or if it is a real image valid = self.discriminator(gen_missing) # The combined model (stacked generator and discriminator) # Trains generator to fool discriminator self.combined = Model(masked_img , [gen_missing, valid]) self.combined.compile(loss=['mse', 'binary_crossentropy'], loss_weights=[0.999, 0.001], optimizer=optimizer)
Example 19
Project: Keras-GAN Author: eriklindernoren File: ccgan.py License: MIT License | 5 votes |
def __init__(self): self.img_rows = 32 self.img_cols = 32 self.channels = 1 self.img_shape = (self.img_rows, self.img_cols, self.channels) self.mask_height = 10 self.mask_width = 10 self.num_classes = 10 # Number of filters in first layer of generator and discriminator self.gf = 32 self.df = 32 optimizer = Adam(0.0002, 0.5) # Build and compile the discriminator self.discriminator = self.build_discriminator() self.discriminator.compile(loss=['mse', 'categorical_crossentropy'], loss_weights=[0.5, 0.5], optimizer=optimizer, metrics=['accuracy']) # Build the generator self.generator = self.build_generator() # The generator takes noise as input and generates imgs masked_img = Input(shape=self.img_shape) gen_img = self.generator(masked_img) # For the combined model we will only train the generator self.discriminator.trainable = False # The valid takes generated images as input and determines validity valid, _ = self.discriminator(gen_img) # The combined model (stacked generator and discriminator) # Trains the generator to fool the discriminator self.combined = Model(masked_img , valid) self.combined.compile(loss=['mse'], optimizer=optimizer)
Example 20
Project: Keras-GAN Author: eriklindernoren File: bigan.py License: MIT License | 5 votes |
def __init__(self): self.img_rows = 28 self.img_cols = 28 self.channels = 1 self.img_shape = (self.img_rows, self.img_cols, self.channels) self.latent_dim = 100 optimizer = Adam(0.0002, 0.5) # Build and compile the discriminator self.discriminator = self.build_discriminator() self.discriminator.compile(loss=['binary_crossentropy'], optimizer=optimizer, metrics=['accuracy']) # Build the generator self.generator = self.build_generator() # Build the encoder self.encoder = self.build_encoder() # The part of the bigan that trains the discriminator and encoder self.discriminator.trainable = False # Generate image from sampled noise z = Input(shape=(self.latent_dim, )) img_ = self.generator(z) # Encode image img = Input(shape=self.img_shape) z_ = self.encoder(img) # Latent -> img is fake, and img -> latent is valid fake = self.discriminator([z, img_]) valid = self.discriminator([z_, img]) # Set up and compile the combined model # Trains generator to fool the discriminator self.bigan_generator = Model([z, img], [fake, valid]) self.bigan_generator.compile(loss=['binary_crossentropy', 'binary_crossentropy'], optimizer=optimizer)
Example 21
Project: Keras-GAN Author: eriklindernoren File: cgan.py License: MIT License | 5 votes |
def __init__(self): # Input shape self.img_rows = 28 self.img_cols = 28 self.channels = 1 self.img_shape = (self.img_rows, self.img_cols, self.channels) self.num_classes = 10 self.latent_dim = 100 optimizer = Adam(0.0002, 0.5) # Build and compile the discriminator self.discriminator = self.build_discriminator() self.discriminator.compile(loss=['binary_crossentropy'], optimizer=optimizer, metrics=['accuracy']) # Build the generator self.generator = self.build_generator() # The generator takes noise and the target label as input # and generates the corresponding digit of that label noise = Input(shape=(self.latent_dim,)) label = Input(shape=(1,)) img = self.generator([noise, label]) # For the combined model we will only train the generator self.discriminator.trainable = False # The discriminator takes generated image as input and determines validity # and the label of that image valid = self.discriminator([img, label]) # The combined model (stacked generator and discriminator) # Trains generator to fool discriminator self.combined = Model([noise, label], valid) self.combined.compile(loss=['binary_crossentropy'], optimizer=optimizer)
Example 22
Project: Keras-GAN Author: eriklindernoren File: lsgan.py License: MIT License | 5 votes |
def __init__(self): self.img_rows = 28 self.img_cols = 28 self.channels = 1 self.img_shape = (self.img_rows, self.img_cols, self.channels) self.latent_dim = 100 optimizer = Adam(0.0002, 0.5) # Build and compile the discriminator self.discriminator = self.build_discriminator() self.discriminator.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) # Build the generator self.generator = self.build_generator() # The generator takes noise as input and generated imgs z = Input(shape=(self.latent_dim,)) img = self.generator(z) # For the combined model we will only train the generator self.discriminator.trainable = False # The valid takes generated images as input and determines validity valid = self.discriminator(img) # The combined model (stacked generator and discriminator) # Trains generator to fool discriminator self.combined = Model(z, valid) # (!!!) Optimize w.r.t. MSE loss instead of crossentropy self.combined.compile(loss='mse', optimizer=optimizer)
Example 23
Project: Keras-GAN Author: eriklindernoren File: acgan.py License: MIT License | 5 votes |
def __init__(self): # Input shape self.img_rows = 28 self.img_cols = 28 self.channels = 1 self.img_shape = (self.img_rows, self.img_cols, self.channels) self.num_classes = 10 self.latent_dim = 100 optimizer = Adam(0.0002, 0.5) losses = ['binary_crossentropy', 'sparse_categorical_crossentropy'] # Build and compile the discriminator self.discriminator = self.build_discriminator() self.discriminator.compile(loss=losses, optimizer=optimizer, metrics=['accuracy']) # Build the generator self.generator = self.build_generator() # The generator takes noise and the target label as input # and generates the corresponding digit of that label noise = Input(shape=(self.latent_dim,)) label = Input(shape=(1,)) img = self.generator([noise, label]) # For the combined model we will only train the generator self.discriminator.trainable = False # The discriminator takes generated image as input and determines validity # and the label of that image valid, target_label = self.discriminator(img) # The combined model (stacked generator and discriminator) # Trains the generator to fool the discriminator self.combined = Model([noise, label], [valid, target_label]) self.combined.compile(loss=losses, optimizer=optimizer)
Example 24
Project: Keras-GAN Author: eriklindernoren File: cogan.py License: MIT License | 5 votes |
def __init__(self): self.img_rows = 28 self.img_cols = 28 self.channels = 1 self.img_shape = (self.img_rows, self.img_cols, self.channels) self.latent_dim = 100 optimizer = Adam(0.0002, 0.5) # Build and compile the discriminator self.d1, self.d2 = self.build_discriminators() self.d1.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) self.d2.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) # Build the generator self.g1, self.g2 = self.build_generators() # The generator takes noise as input and generated imgs z = Input(shape=(self.latent_dim,)) img1 = self.g1(z) img2 = self.g2(z) # For the combined model we will only train the generators self.d1.trainable = False self.d2.trainable = False # The valid takes generated images as input and determines validity valid1 = self.d1(img1) valid2 = self.d2(img2) # The combined model (stacked generators and discriminators) # Trains generators to fool discriminators self.combined = Model(z, [valid1, valid2]) self.combined.compile(loss=['binary_crossentropy', 'binary_crossentropy'], optimizer=optimizer)
Example 25
Project: Keras-GAN Author: eriklindernoren File: dcgan.py License: MIT License | 5 votes |
def __init__(self): # Input shape self.img_rows = 28 self.img_cols = 28 self.channels = 1 self.img_shape = (self.img_rows, self.img_cols, self.channels) self.latent_dim = 100 optimizer = Adam(0.0002, 0.5) # Build and compile the discriminator self.discriminator = self.build_discriminator() self.discriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) # Build the generator self.generator = self.build_generator() # The generator takes noise as input and generates imgs z = Input(shape=(self.latent_dim,)) img = self.generator(z) # For the combined model we will only train the generator self.discriminator.trainable = False # The discriminator takes generated images as input and determines validity valid = self.discriminator(img) # The combined model (stacked generator and discriminator) # Trains the generator to fool the discriminator self.combined = Model(z, valid) self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
Example 26
Project: Keras-GAN Author: eriklindernoren File: gan.py License: MIT License | 5 votes |
def __init__(self): self.img_rows = 28 self.img_cols = 28 self.channels = 1 self.img_shape = (self.img_rows, self.img_cols, self.channels) self.latent_dim = 100 optimizer = Adam(0.0002, 0.5) # Build and compile the discriminator self.discriminator = self.build_discriminator() self.discriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) # Build the generator self.generator = self.build_generator() # The generator takes noise as input and generates imgs z = Input(shape=(self.latent_dim,)) img = self.generator(z) # For the combined model we will only train the generator self.discriminator.trainable = False # The discriminator takes generated images as input and determines validity validity = self.discriminator(img) # The combined model (stacked generator and discriminator) # Trains the generator to fool the discriminator self.combined = Model(z, validity) self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
Example 27
Project: Keras-GAN Author: eriklindernoren File: aae.py License: MIT License | 5 votes |
def __init__(self): self.img_rows = 28 self.img_cols = 28 self.channels = 1 self.img_shape = (self.img_rows, self.img_cols, self.channels) self.latent_dim = 10 optimizer = Adam(0.0002, 0.5) # Build and compile the discriminator self.discriminator = self.build_discriminator() self.discriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) # Build the encoder / decoder self.encoder = self.build_encoder() self.decoder = self.build_decoder() img = Input(shape=self.img_shape) # The generator takes the image, encodes it and reconstructs it # from the encoding encoded_repr = self.encoder(img) reconstructed_img = self.decoder(encoded_repr) # For the adversarial_autoencoder model we will only train the generator self.discriminator.trainable = False # The discriminator determines validity of the encoding validity = self.discriminator(encoded_repr) # The adversarial_autoencoder model (stacked generator and discriminator) self.adversarial_autoencoder = Model(img, [reconstructed_img, validity]) self.adversarial_autoencoder.compile(loss=['mse', 'binary_crossentropy'], loss_weights=[0.999, 0.001], optimizer=optimizer)
Example 28
Project: Keras-GAN Author: eriklindernoren File: bgan.py License: MIT License | 5 votes |
def __init__(self): self.img_rows = 28 self.img_cols = 28 self.channels = 1 self.img_shape = (self.img_rows, self.img_cols, self.channels) self.latent_dim = 100 optimizer = Adam(0.0002, 0.5) # Build and compile the discriminator self.discriminator = self.build_discriminator() self.discriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) # Build the generator self.generator = self.build_generator() # The generator takes noise as input and generated imgs z = Input(shape=(self.latent_dim,)) img = self.generator(z) # For the combined model we will only train the generator self.discriminator.trainable = False # The valid takes generated images as input and determines validity valid = self.discriminator(img) # The combined model (stacked generator and discriminator) # Trains the generator to fool the discriminator self.combined = Model(z, valid) self.combined.compile(loss=self.boundary_loss, optimizer=optimizer)
Example 29
Project: View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition Author: microsoft File: va-rnn.py License: MIT License | 5 votes |
def main(rootdir, case, results): train_x, train_y, valid_x, valid_y, test_x, test_y = get_data(args.dataset, case) input_shape = (train_x.shape[1], train_x.shape[2]) num_class = train_y.shape[1] if not os.path.exists(rootdir): os.makedirs(rootdir) filepath = os.path.join(rootdir, str(case) + '.hdf5') saveto = os.path.join(rootdir, str(case) + '.csv') optimizer = Adam(lr=args.lr, clipnorm=args.clip) pred_dir = os.path.join(rootdir, str(case) + '_pred.txt') if args.train: model = creat_model(input_shape, num_class) early_stop = EarlyStopping(monitor='val_acc', patience=15, mode='auto') reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.1, patience=5, mode='auto', cooldown=3., verbose=1) checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='auto') csv_logger = CSVLogger(saveto) if args.dataset=='NTU' or args.dataset == 'PKU': callbacks_list = [csv_logger, checkpoint, early_stop, reduce_lr] else: callbacks_list = [csv_logger, checkpoint] model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) model.fit(train_x, train_y, validation_data=[valid_x, valid_y], epochs=args.epochs, batch_size=args.batch_size, callbacks=callbacks_list, verbose=2) # test model = creat_model(input_shape, num_class) model.load_weights(filepath) model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) scores = get_activation(model, test_x, test_y, pred_dir, VA=10, par=9) results.append(round(scores, 2))
Example 30
Project: Jtyoui Author: jtyoui File: HandWritingRecognition.py License: MIT License | 5 votes |
def cnn_model(): (x_train, y_train), _ = mnist.load_data() # 归一化 x_train = x_train.reshape(-1, 28, 28, 1) / 255. # one-hot y_train = np_utils.to_categorical(y=y_train, num_classes=10) model = Sequential([ # input_shape:输入平面,就在第一个位置设置 # filters:卷积核、滤波器 # kernel_size:卷积核大小 # strides:步长 # padding有两种方式:same/valid # activation:激活函数 Convolution2D(input_shape=(28, 28, 1), filters=32, kernel_size=5, strides=1, padding='same', activation=relu), MaxPool2D(pool_size=2, strides=2, padding='same'), Convolution2D(filters=64, kernel_size=5, padding='same', activation=relu), MaxPool2D(pool_size=2, trainable=2, padding='same'), Flatten(), # 扁平化 Dense(units=1024, activation=relu), Dropout(0.5), Dense(units=10, activation=softmax), ]) opt = Adam(lr=1e-4) model.compile(optimizer=opt, loss=categorical_crossentropy, metrics=['accuracy']) model.fit(x=x_train, y=y_train, batch_size=64, epochs=20, callbacks=[RemoteMonitor()]) model_save(model, './model.h5')