Python keras.layers() Examples

The following are code examples for showing how to use keras.layers(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    Apache License 2.0 6 votes vote down vote up
def weather_ae(layers, lr, decay, loss, 
               input_len, input_features):
    
    inputs = Input(shape=(input_len, input_features), name='input_layer')
    
    for i, hidden_nums in enumerate(layers):
        if i==0:
            hn = Dense(hidden_nums, activation='relu')(inputs)
        else:
            hn = Dense(hidden_nums, activation='relu')(hn)

    outputs = Dense(3, activation='sigmoid', name='output_layer')(hn)

    weather_model = Model(inputs, outputs=[outputs])

    return weather_model 
Example 2
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: competition_model_class.py    Apache License 2.0 6 votes vote down vote up
def build_graph(self):
        #keras.backend.clear_session() # clear session/graph    
        self.optimizer = keras.optimizers.Adam(lr=self.lr, decay=self.decay)

        self.model = Seq2Seq_MVE_subnets_swish(id_embd=True, time_embd=True,
            lr=self.lr, decay=self.decay,
            num_input_features=self.num_input_features, num_output_features=self.num_output_features,
            num_decoder_features=self.num_decoder_features, layers=self.layers,
            loss=self.loss, regulariser=self.regulariser)

        def _mve_loss(y_true, y_pred):
            pred_u = crop(2,0,3)(y_pred)
            pred_sig = crop(2,3,6)(y_pred)
            print(pred_sig)
            #exp_sig = tf.exp(pred_sig) # avoid pred_sig is too small such as zero    
            #precision = 1./exp_sig
            precision = 1./pred_sig
            #log_loss= 0.5*tf.log(exp_sig)+0.5*precision*((pred_u-y_true)**2)
            log_loss= 0.5*tf.log(pred_sig)+0.5*precision*((pred_u-y_true)**2)            
          
            log_loss=tf.reduce_mean(log_loss)
            return log_loss

        print(self.model.summary())
        self.model.compile(optimizer = self.optimizer, loss=_mve_loss) 
Example 3
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: gap_keras.py    MIT License 6 votes vote down vote up
def GAP():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(96, (7, 7), padding='valid', strides=2, activation='relu', name='conv1')(inputs)
    x = MaxPooling2D((3, 3), strides=2,  padding='same')(x)
    x = Conv2D(256, (5, 5), padding='valid', strides=2, activation='relu', name='conv2')(x)
    x = keras.layers.ZeroPadding2D(1)(x)
    x = MaxPooling2D((3, 3), strides=2, padding='same')(x)
    x = Conv2D(384, (3, 3), padding='same', activation='relu', name='conv3')(x)
    x = Conv2D(384, (3, 3), padding='same', activation='relu', name='conv4')(x)
    x = Conv2D(256, (3, 3), padding='same', activation='relu', name='conv5')(x)
    x = MaxPooling2D((3, 3), strides=2, padding='same')(x)
    # GAP
    x = Conv2D(num_classes, (1, 1), padding='same', activation=None, name='out')(x)
    x = keras.layers.GlobalAveragePooling2D()(x)
    x = Activation('softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 4
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: alexnet_keras.py    MIT License 6 votes vote down vote up
def AlexNet():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(96, (11, 11), padding='valid', strides=4, activation='relu', name='conv1')(inputs)
    x = MaxPooling2D((3, 3), strides=2,  padding='same')(x)
    x = Conv2D(256, (5, 5), padding='valid', activation='relu', name='conv2')(x)
    x = keras.layers.ZeroPadding2D(1)(x)
    x = MaxPooling2D((3, 3), strides=2, padding='same')(x)
    x = Conv2D(384, (3, 3), padding='same', activation='relu', name='conv3')(x)
    x = Conv2D(384, (3, 3), padding='same', activation='relu', name='conv4')(x)
    x = Conv2D(256, (3, 3), padding='same', activation='relu', name='conv5')(x)
    
    x = Flatten()(x)
    x = Dense(4096, name='dense1', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, name='dense2', activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 5
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: nin_keras.py    MIT License 6 votes vote down vote up
def NIN():
    inputs = Input((img_height, img_width, 3))
    x = Conv2D(192, (5, 5), padding='same', strides=1, activation='relu', name='conv1')(inputs)
    x = Conv2D(160, (1, 1), padding='same', strides=1, activation='relu', name='cccp1')(x)
    x = Conv2D(96, (1, 1), padding='same', strides=1, activation='relu', name='cccp2')(x)
    x = MaxPooling2D((3, 3), strides=2,  padding='same')(x)
    x = Dropout(0.5)(x)
    x = Conv2D(192, (5, 5), padding='same', strides=1, activation='relu', name='conv2')(x)
    x = Conv2D(192, (1, 1), padding='same', strides=1, activation='relu', name='cccp3')(x)
    x = Conv2D(192, (1, 1), padding='same', strides=1, activation='relu', name='cccp4')(x)
    x = AveragePooling2D((3, 3), strides=2,  padding='same')(x)
    x = Dropout(0.5)(x)
    x = Conv2D(192, (3, 3), padding='same', strides=1, activation='relu', name='conv3')(x)
    x = Conv2D(192, (1, 1), padding='same', strides=1, activation='relu', name='cccp5')(x)
    x = Conv2D(num_classes, (1, 1), padding='same', strides=1, activation='relu', name='cccp6')(x)
    x = keras.layers.GlobalAveragePooling2D()(x)
    x = Activation('softmax')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 6
Project: sleep-convolutions-tf   Author: cliffordlab   File: model.py    MIT License 6 votes vote down vote up
def save_summary(model, filename):
    layers = model.layers
    relevant_nodes = model.container_nodes if hasattr(model, 'container_nodes') else None
    column_names = ['Layer', 'Type', 'Output Shape', 'Param #', 'Connected to']

    summary = []
    for layer in layers:
        summary.append([
            layer.name,
            layer.__class__.__name__,
            _format_output_shape(layer),
            layer.count_params(),
            '\n'.join(_layer_connections(layer, relevant_nodes=relevant_nodes))
        ])

    import pandas
    df = pandas.DataFrame(data=summary, columns=column_names)    
    df.to_csv(filename, index=False) 
Example 7
Project: neural-fingerprinting   Author: StephanZheng   File: utils_keras.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _get_softmax_name(self):
        """
        Looks for the name of the softmax layer.
        :return: Softmax layer name
        """
        for i, layer in enumerate(self.model.layers):
            cfg = layer.get_config()
            if 'activation' in cfg and cfg['activation'] == 'softmax':
                return layer.name

        raise Exception("No softmax layers found") 
Example 8
Project: neural-fingerprinting   Author: StephanZheng   File: utils_keras.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_layer_names(self):
        """
        :return: Names of all the layers kept by Keras
        """
        layer_names = [x.name for x in self.model.layers]
        return layer_names 
Example 9
Project: neural-fingerprinting   Author: StephanZheng   File: utils_keras.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def fprop(self, x):
        """
        Exposes all the layers of the model returned by get_layer_names.
        :param x: A symbolic representation of the network input
        :return: A dictionary mapping layer names to the symbolic
                 representation of their output.
        """
        from keras.models import Model as KerasModel

        if self.keras_model is None:
            # Get the input layer
            new_input = self.model.get_input_at(0)

            # Make a new model that returns each of the layers as output
            out_layers = [x_layer.output for x_layer in self.model.layers]
            self.keras_model = KerasModel(new_input, out_layers)

        # and get the outputs for that model on the input x
        outputs = self.keras_model(x)

        # Keras only returns a list for outputs of length >= 1, if the model
        # is only one layer, wrap a list
        if len(self.model.layers) == 1:
            outputs = [outputs]

        # compute the dict to return
        fprop_dict = dict(zip(self.get_layer_names(), outputs))

        return fprop_dict 
Example 10
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    Apache License 2.0 5 votes vote down vote up
def weather_conv1D(layers, lr, decay, loss, 
               input_len, input_features, 
               strides_len, kernel_size):
    
    inputs = Input(shape=(input_len, input_features), name='input_layer')
    for i, hidden_nums in enumerate(layers):
        if i==0:
            #inputs = BatchNormalization(name='BN_input')(inputs)
            hn = Conv1D(hidden_nums, kernel_size=kernel_size, strides=strides_len, 
                        data_format='channels_last', 
                        padding='same', activation='linear')(inputs)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn)
            hn = Activation('relu')(hn)
        elif i<len(layers)-1:
            hn = Conv1D(hidden_nums, kernel_size=kernel_size, strides=strides_len,
                        data_format='channels_last', 
                        padding='same',activation='linear')(hn)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn) 
            hn = Activation('relu')(hn)
        else:
            hn = Conv1D(hidden_nums, kernel_size=kernel_size, strides=strides_len,
                        data_format='channels_last', 
                        padding='same',activation='linear')(hn)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn) 

    outputs = Dense(80, activation='relu', name='dense_layer')(hn)
    outputs = Dense(3, activation='tanh', name='output_layer')(outputs)

    weather_model = Model(inputs, outputs=[outputs])

    return weather_model 
Example 11
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    Apache License 2.0 5 votes vote down vote up
def weather_fnn(layers, lr,
            decay, loss, seq_len, 
            input_features, output_features):
    
    ori_inputs = Input(shape=(seq_len, input_features), name='input_layer')
    #print(seq_len*input_features)
    conv_ = Conv1D(11, kernel_size=13, strides=1, 
                        data_format='channels_last', 
                        padding='valid', activation='linear')(ori_inputs)
    conv_ = BatchNormalization(name='BN_conv')(conv_)
    conv_ = Activation('relu')(conv_)
    conv_ = Conv1D(5, kernel_size=7, strides=1, 
                        data_format='channels_last', 
                        padding='valid', activation='linear')(conv_)
    conv_ = BatchNormalization(name='BN_conv2')(conv_)
    conv_ = Activation('relu')(conv_)

    inputs = Reshape((-1,))(conv_)

    for i, hidden_nums in enumerate(layers):
        if i==0:
            hn = Dense(hidden_nums, activation='linear')(inputs)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn)
            hn = Activation('relu')(hn)
        else:
            hn = Dense(hidden_nums, activation='linear')(hn)
            hn = BatchNormalization(name='BN_{}'.format(i))(hn)
            hn = Activation('relu')(hn)
            #hn = Dropout(0.1)(hn)
    #print(seq_len, output_features)
    #print(hn)
    outputs = Dense(seq_len*output_features, activation='sigmoid', name='output_layer')(hn) # 37*3
    outputs = Reshape((seq_len, output_features))(outputs)

    weather_fnn = Model(ori_inputs, outputs=[outputs])

    return weather_fnn 
Example 12
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: seq2seq_class.py    Apache License 2.0 5 votes vote down vote up
def build_graph(self):
        #keras.backend.clear_session() # clear session/graph    
        self.optimizer = keras.optimizers.Adam(lr=self.lr, decay=self.decay)

        self.model = Seq2Seq_MVE(id_embd=self.id_embd, time_embd=self.time_embd,
            lr=self.lr, decay=self.decay, 
            num_input_features=self.num_input_features, num_output_features=self.num_output_features,
            num_decoder_features=self.num_decoder_features, layers=self.layers,
            loss=self.loss, regulariser=self.regulariser, dropout_rate = self.dropout_rate)

        def loss_fn(y_true, y_pred):
            pred_u = crop(2,0,3)(y_pred) # mean of Gaussian distribution
            pred_sig = crop(2,3,6)(y_pred) # variance of Gaussian distribution
            if self.loss == 'mve':
                precision = 1./pred_sig
                log_loss= 0.5*tf.log(pred_sig)+0.5*precision*((pred_u-y_true)**2)                                 
                log_loss=tf.reduce_mean(log_loss)
                return log_loss
            elif self.loss == 'mse':
                mse_loss = tf.reduce_mean((pred_u-y_true)**2)
                return mse_loss
            elif self.loss == 'mae':
                mae_loss = tf.reduce_mean(tf.abs(y_true-pred_u))
                return mae_loss
            else:
                sys.exit("'Loss type wrong! They can only be mae, mse or mve'")
                
        print(self.model.summary())
        self.model.compile(optimizer = self.optimizer, loss=loss_fn) 
Example 13
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: competition_model_class.py    Apache License 2.0 5 votes vote down vote up
def build_graph(self):
        keras.backend.clear_session() # clear session/graph

        self.model = weather_conv1D(self.layers, self.lr,
            self.decay, self.loss, self.input_len, 
            self.input_features, self.kernel_strides, self.kernel_size)

        print(self.model.summary()) 
Example 14
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: competition_model_class.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, regulariser,lr, decay, loss, 
        layers, batch_size, seq_len, input_features, output_features):

        self.regulariser=regulariser
        self.layers=layers
        self.lr=lr
        self.decay=decay
        self.loss=loss
        self.seq_len=seq_len
        self.input_features=input_features
        self.output_features = output_features 
Example 15
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: competition_model_class.py    Apache License 2.0 5 votes vote down vote up
def build_graph(self):
        keras.backend.clear_session() # clear session/graph

        self.model = weather_fnn(self.layers, self.lr,
            self.decay, self.loss, self.seq_len, 
            self.input_features, self.output_features)

        print(self.model.summary()) 
Example 16
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: competition_model_class.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, num_input_features, num_output_features, num_decoder_features,
               input_sequence_length, target_sequence_length,
              num_steps_to_predict, regulariser = None,
              lr=0.001, decay=0, loss = "mse",
              layers=[35, 35]):
        
        self.num_input_features = num_input_features
        self.num_output_features = num_output_features
        self.num_decoder_features = num_decoder_features
        self.input_sequence_length = input_sequence_length
        self.target_sequence_length = target_sequence_length
        self.num_steps_to_predict = num_steps_to_predict
        self.regulariser = regulariser
        self.layers = layers
        self.lr = lr
        self.decay = decay
        self.loss = loss
        self.pred_result = None
        self.train_loss=[]

        self.target_list=['t2m','rh2m','w10m']

        self.obs_range_dic={'t2m':[-30,42], # Official value: [-20,42]
                         'rh2m':[0.0,100.0],
                         'w10m':[0.0, 30.0]}

        print('Initialized!') 
Example 17
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: competition_model_class.py    Apache License 2.0 5 votes vote down vote up
def build_graph(self):
        keras.backend.clear_session() # clear session/graph
        self.model = RNN_builder(self.num_output_features, self.num_decoder_features,
                self.target_sequence_length,
              self.num_steps_to_predict, self.regulariser,
              self.lr, self.decay, self.loss, self.layers)

        print(self.model.summary()) 
Example 18
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: competition_model_class.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, id_embd, time_embd, 
        num_input_features, num_output_features, num_decoder_features,
               input_sequence_length, target_sequence_length,
              num_steps_to_predict, regulariser = None,
              lr=0.001, decay=0, loss = "mse",
              layers=[35, 35], model_save_path='../models', 
              model_structure_name='seq2seq_model.json', model_weights_name='seq2seq_model_weights.h5'):
        
        super().__init__(num_input_features, num_output_features, num_decoder_features,
               input_sequence_length, target_sequence_length,
              num_steps_to_predict, regulariser = None,
              lr=lr, decay=decay, loss = loss,
              layers=layers)

        self.id_embd = id_embd
        self.time_embd = time_embd
        self.val_loss_list=[]
        self.train_loss_list=[]
        self.current_mean_val_loss = None
        self.early_stop_limit = 10 # with the unit of Iteration Display
        self.EARLY_STOP=False
        self.pred_var_result = []

        self.pi_dic={0.95:1.96, 0.9:1.645, 0.8:1.28, 0.68:1.}
        self.target_list=['t2m','rh2m','w10m']
        self.obs_range_dic={'t2m':[-30,42], # Official value: [-20,42]
                         'rh2m':[0.0,100.0],
                         'w10m':[0.0, 30.0]}
        self.obs_and_output_feature_index_map = {'t2m':0,'rh2m':1,'w10m':2}
        self.ruitu_feature_index_map = {'t2m':1,'rh2m':3,'w10m':4}
        self.model_save_path = model_save_path
        self.model_structure_name=model_structure_name
        self.model_weights_name=model_weights_name 
Example 19
Project: AI_Competition   Author: Decalogue   File: rcnn.py    MIT License 5 votes vote down vote up
def model(self, embeddings_matrix, maxlen, word_index, num_class):
        inp = Input(shape=(maxlen,))
        encode = Bidirectional(GRU(1, return_sequences=True))
        encode2 = Bidirectional(GRU(1, return_sequences=True))
        attention = Attention(maxlen)
        x_4 = Embedding(len(word_index) + 1,
                        embeddings_matrix.shape[1],
                        weights=[embeddings_matrix],
                        input_length=maxlen,
                        trainable=True)(inp)
        x_3 = SpatialDropout1D(0.2)(x_4)
        x_3 = encode(x_3)
        x_3 = Dropout(0.2)(x_3)
        x_3 = encode2(x_3)
        x_3 = Dropout(0.2)(x_3)
        x_3 = Conv1D(64, kernel_size=3, padding="valid", kernel_initializer="glorot_uniform")(x_3)
        x_3 = Dropout(0.2)(x_3)
        avg_pool_3 = GlobalAveragePooling1D()(x_3)
        max_pool_3 = GlobalMaxPooling1D()(x_3)
        attention_3 = attention(x_3)
        x = keras.layers.concatenate([avg_pool_3, max_pool_3, attention_3])
        if num_class == 2:
            x = Dense(num_class, activation="sigmoid")(x)
            loss = 'binary_crossentropy'
        else:
            x = Dense(num_class, activation="softmax")(x)
            loss = 'categorical_crossentropy'

        adam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
        rmsprop = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06)
        model = Model(inputs=inp, outputs=x)
        model.compile(
            loss=loss,
            optimizer=rmsprop
            )
        return model 
Example 20
Project: AI_Competition   Author: Decalogue   File: bigru.py    MIT License 5 votes vote down vote up
def model(self, embeddings_matrix, maxlen, word_index, num_class):
        inp = Input(shape=(maxlen,))
        encode = Bidirectional(CuDNNGRU(128, return_sequences=True))
        encode2 = Bidirectional(CuDNNGRU(128, return_sequences=True))
        attention = Attention(maxlen)
        x_4 = Embedding(len(word_index) + 1,
                        embeddings_matrix.shape[1],
                        weights=[embeddings_matrix],
                        input_length=maxlen,
                        trainable=True)(inp)
        x_3 = SpatialDropout1D(0.2)(x_4)
        x_3 = encode(x_3)
        x_3 = Dropout(0.2)(x_3)
        x_3 = encode2(x_3)
        x_3 = Dropout(0.2)(x_3)
        avg_pool_3 = GlobalAveragePooling1D()(x_3)
        max_pool_3 = GlobalMaxPooling1D()(x_3)
        attention_3 = attention(x_3)
        x = keras.layers.concatenate([avg_pool_3, max_pool_3, attention_3], name="fc")
        if num_class == 2:
            output = Dense(num_class, activation="sigmoid")(x)
            loss = 'binary_crossentropy'
        else:
            output = Dense(num_class, activation="softmax")(x)
            loss = 'categorical_crossentropy'
        adam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, amsgrad=True)
        model = Model(inputs=inp, outputs=output)
        model.compile(
            loss=loss,
            optimizer=adam,
            metrics=["categorical_accuracy"])
        return model 
Example 21
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: gap_keras.py    MIT License 5 votes vote down vote up
def train():
    model = GAP()

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    xs, ts, paths = data_load('../Dataset/train/images', hf=True, vf=True)

    # training
    mb = 8
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        loss, acc = model.train_on_batch(x=x, y=t)
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 22
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: vgg16_keras.py    MIT License 5 votes vote down vote up
def train():
    model = VGG16()

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    xs, ts, paths = data_load('../Dataset/train/images', hf=True, vf=True)

    # training
    mb = 8
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        loss, acc = model.train_on_batch(x=x, y=t)
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 23
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: res18_keras.py    MIT License 5 votes vote down vote up
def train():
    model = Res18()

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    xs, ts, paths = data_load('../Dataset/train/images', hf=True, vf=True, rot=1)

    # training
    mb = 16
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = copy.copy(train_ind)[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        loss, acc = model.train_on_batch(x=x, y={'out':t})

        if (i+1) % 10 == 0:
            print("iter >>", i+1, ", loss >>", loss_total, ', accuracy >>', acc)

    model.save('model.h5')

# test 
Example 24
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: easy_keras.py    MIT License 5 votes vote down vote up
def train():
    model = Mynet()

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    xs, ts, paths = data_load('../Dataset/train/images', hf=True, vf=True)

    # training
    mb = 8
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        loss, acc = model.train_on_batch(x=x, y=t)
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 25
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: zfnet_keras.py    MIT License 5 votes vote down vote up
def train():
    model = ZFNet()

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    xs, ts, paths = data_load('../Dataset/train/images', hf=True, vf=True)

    # training
    mb = 8
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        loss, acc = model.train_on_batch(x=x, y=t)
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 26
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: res101_keras.py    MIT License 5 votes vote down vote up
def train():
    model = Res101()

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    xs, ts, paths = data_load('../Dataset/train/images', hf=True, vf=True, rot=1)

    # training
    mb = 16
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = copy.copy(train_ind)[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        loss, acc = model.train_on_batch(x=x, y={'out':t})

        if (i+1) % 10 == 0:
            print("iter >>", i+1, ", loss >>", loss_total, ', accuracy >>', acc)

    model.save('model.h5')

# test 
Example 27
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: resNeXt101_keras.py    MIT License 5 votes vote down vote up
def train():
    model = ResNeXt101()

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    xs, ts, paths = data_load('../Dataset/train/images', hf=True, vf=True, rot=1)

    # training
    mb = 16
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = copy.copy(train_ind)[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        loss, acc = model.train_on_batch(x=x, y={'out':t})

        if (i+1) % 10 == 0:
            print("iter >>", i+1, ", loss >>", loss_total, ', accuracy >>', acc)

    model.save('model.h5')

# test 
Example 28
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: lenet_keras.py    MIT License 5 votes vote down vote up
def train():
    model = LeNet()

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    xs, ts, paths = data_load('../Dataset/train/images')

    # training
    mb = 8
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        loss, acc = model.train_on_batch(x=x, y=t)
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 29
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: res34_keras.py    MIT License 5 votes vote down vote up
def train():
    model = Res34()

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    xs, ts, paths = data_load('../Dataset/train/images', hf=True, vf=True, rot=1)

    # training
    mb = 16
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = copy.copy(train_ind)[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        loss, acc = model.train_on_batch(x=x, y={'out':t})

        if (i+1) % 10 == 0:
            print("iter >>", i+1, ", loss >>", loss_total, ', accuracy >>', acc)

    model.save('model.h5')

# test 
Example 30
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: res50_keras.py    MIT License 5 votes vote down vote up
def train():
    model = Res50()

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    xs, ts, paths = data_load('../Dataset/train/images', hf=True, vf=True, rot=1)

    # training
    mb = 16
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = copy.copy(train_ind)[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        loss, acc = model.train_on_batch(x=x, y={'out':t})

        if (i+1) % 10 == 0:
            print("iter >>", i+1, ", loss >>", loss_total, ', accuracy >>', acc)

    model.save('model.h5')

# test 
Example 31
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: googlenetv1_keras.py    MIT License 5 votes vote down vote up
def train():
    model = GoogLeNetv1()

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    xs, ts, paths = data_load('../Dataset/train/images', hf=True, vf=True, rot=1)

    # training
    mb = 64
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        loss_total, loss, loss_aux1, loss_aux2, acc, acc_aux1, acc_aux2 = \
                model.train_on_batch(x=x, y={'out':t, 'out_aux1':t, 'out_aux2':t})

        if (i+1) % 10 == 0:
            print("iter >>", i+1, ",loss >>", loss_total, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 32
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: resNeXt50_keras.py    MIT License 5 votes vote down vote up
def train():
    model = ResNeXt50()

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    xs, ts, paths = data_load('../Dataset/train/images', hf=True, vf=True, rot=1)

    # training
    mb = 16
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = copy.copy(train_ind)[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        loss, acc = model.train_on_batch(x=x, y={'out':t})

        if (i+1) % 10 == 0:
            print("iter >>", i+1, ", loss >>", loss_total, ', accuracy >>', acc)

    model.save('model.h5')

# test 
Example 33
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: bn_keras.py    MIT License 5 votes vote down vote up
def train():
    model = VGG16()

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    print(model.summary())

    xs, ts, paths = data_load('../Dataset/train/images', hf=True, vf=True)

    # training
    mb = 8
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        loss, acc = model.train_on_batch(x=x, y=t)
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 34
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: xception_keras.py    MIT License 5 votes vote down vote up
def train():
    model = Xception()

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    xs, ts, paths = data_load('../Dataset/train/images', hf=True, vf=True, rot=1)

    # training
    mb = 32
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = copy.copy(train_ind)[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        loss, acc = model.train_on_batch(x=x, y={'out':t})

        if (i+1) % 10 == 0:
            print("iter >>", i+1, ", loss >>", loss_total, ', accuracy >>', acc)

    model.save('model.h5')

# test 
Example 35
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: vgg19_keras.py    MIT License 5 votes vote down vote up
def train():
    model = VGG19()

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    xs, ts, paths = data_load('../Dataset/train/images', hf=True, vf=True)

    # training
    mb = 8
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        loss, acc = model.train_on_batch(x=x, y=t)
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 36
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: nin_keras.py    MIT License 5 votes vote down vote up
def train():
    model = NIN()

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    xs, ts, paths = data_load('../Dataset/train/images', hf=True, vf=True)

    # training
    mb = 8
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        loss, acc = model.train_on_batch(x=x, y=t)
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 37
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: main_keras.py    MIT License 5 votes vote down vote up
def train():
    model = Mynet()

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True),
        metrics=['accuracy'])

    xs, ts, paths = data_load('../Dataset/train/images')

    # training
    mb = 8
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    for i in range(100):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        loss, acc = model.train_on_batch(x=x, y=t)
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 38
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: convae_keras.py    MIT License 5 votes vote down vote up
def Mynet(train=False):
    inputs = Input((img_height, img_width, channel), name='in')
    x = Conv2D(32, (3, 3), padding='same', strides=1, name='enc1')(inputs)
    x = MaxPooling2D((2,2), 2)(x)
    x = Conv2D(16, (3, 3), padding='same', strides=1, name='enc2')(x)
    x = MaxPooling2D((2,2), 2)(x)
    x = keras.layers.Conv2DTranspose(32, (2,2), strides=2, padding='same', name='dec2')(x)
    out = keras.layers.Conv2DTranspose(channel, (2,2), strides=2, padding='same', name='out')(x)
    
    model = Model(inputs=inputs, outputs=out, name='model')
    return model 
Example 39
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: convae_keras.py    MIT License 5 votes vote down vote up
def train():
    model = Mynet(train=True)

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss={'out': 'mse'},
        optimizer=keras.optimizers.Adam(lr=0.001),#"adam", #keras.optimizers.SGD(lr=0.1, momentum=0.9, nesterov=False),
        loss_weights={'out': 1},
        metrics=['accuracy'])


    xs, paths = data_load('../Dataset/train/images/', hf=True, vf=True, rot=1)

    # training
    mb = 64
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        #t = x.copy().reshape([mb, -1])

        loss, acc = model.train_on_batch(x={'in':x}, y={'out':x})
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 40
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: ae_cifar10_keras.py    MIT License 5 votes vote down vote up
def train():
    model = Mynet(train=True)

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss={'out': 'mse'},
        optimizer=keras.optimizers.Adam(lr=0.001),#"adam", #keras.optimizers.SGD(lr=0.1, momentum=0.9, nesterov=False),
        loss_weights={'out': 1},
        metrics=['accuracy'])

    train_x, train_y, test_x, test_y = load_cifar10()
    xs = train_x / 255
    
    # training
    mb = 512
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    
    for i in range(1000):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = x.copy().reshape([mb, -1])

        loss, acc = model.train_on_batch(x={'in':x}, y={'out':t})
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 41
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: convae_cifar10_keras.py    MIT License 5 votes vote down vote up
def Mynet(train=False):
    inputs = Input((img_height, img_width, channel), name='in')
    x = Conv2D(32, (3, 3), padding='same', strides=1, name='enc1')(inputs)
    x = MaxPooling2D((2,2), 2)(x)
    x = Conv2D(16, (3, 3), padding='same', strides=1, name='enc2')(x)
    x = MaxPooling2D((2,2), 2)(x)
    x = keras.layers.Conv2DTranspose(32, (2,2), strides=2, padding='same', name='dec2')(x)
    out = keras.layers.Conv2DTranspose(channel, (2,2), strides=2, padding='same', name='out')(x)
    
    model = Model(inputs=inputs, outputs=out, name='model')
    return model 
Example 42
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: gru_keras.py    MIT License 5 votes vote down vote up
def train():
    # model
    model = Mynet()

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.Adam(lr=0.001),
        metrics=['accuracy'])

    xs, ts = data_load()
    
    # training
    mb = 128
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    
    for i in range(1000):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        loss, acc = model.train_on_batch(x=x, y=t)
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 43
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: lstm_keras.py    MIT License 5 votes vote down vote up
def train():
    # model
    model = Mynet()

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.Adam(lr=0.001),
        metrics=['accuracy'])

    xs, ts = data_load()
    
    # training
    mb = 128
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    
    for i in range(1500):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        loss, acc = model.train_on_batch(x=x, y=t)
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 44
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: bdlstm_keras.py    MIT License 5 votes vote down vote up
def train():
    # model
    model = Mynet()

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss='categorical_crossentropy',
        optimizer=keras.optimizers.Adam(lr=0.001),
        metrics=['accuracy'])

    xs, ts = data_load()
    
    # training
    mb = 128
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    
    for i in range(1000):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        loss, acc = model.train_on_batch(x=x, y=t)
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 45
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: nearest_keras.py    MIT License 5 votes vote down vote up
def Mynet(train=False):
    inputs = Input((img_height, img_width, 3), name='in')
    x = inputs
    for i in range(2):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='conv1_{}'.format(i+1))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)

    x = MaxPooling2D((2,2), 2)(x)
    
    for i in range(2):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='conv2_{}'.format(i+1))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)

    x = keras.layers.UpSampling2D(size=(2,2), interpolation='nearest')(x)

    for i in range(2):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='dec1_{}'.format(i+1))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)
    
    x = Conv2D(num_classes+1, (1, 1), padding='same', strides=1)(x)
    x = Reshape([-1, num_classes+1])(x)
    x = Activation('softmax', name='out')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 46
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: concat_keras.py    MIT License 5 votes vote down vote up
def Mynet(train=False):
    inputs = Input((img_height, img_width, 3), name='in')
    x = inputs
    for i in range(2):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='conv1_{}'.format(i+1))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)

    enc1 = x

    x = MaxPooling2D((2,2), 2)(x)

    for i in range(2):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='conv2_{}'.format(i+1))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)

    x = keras.layers.Conv2DTranspose(32, (2,2), strides=2, padding='same')(x)
    x = Activation('relu')(x)
    x = BatchNormalization()(x)

    x = keras.layers.concatenate([x, enc1])
    x = Conv2D(32, (1, 1), padding='same', strides=1, name='concat_conv')(x)
    x = Activation('relu')(x)
    x = BatchNormalization()(x)
    
    for i in range(2):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='dec1_{}'.format(i+1))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)
    
    x = Conv2D(num_classes+1, (1, 1), padding='same', strides=1)(x)
    x = Reshape([-1, num_classes+1])(x)
    x = Activation('softmax', name='out')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 47
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: transposeconv_keras.py    MIT License 5 votes vote down vote up
def Mynet(train=False):
    inputs = Input((img_height, img_width, 3), name='in')
    x = inputs
    for i in range(2):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='conv1_{}'.format(i+1))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)

    x = MaxPooling2D((2,2), 2)(x)
    
    for i in range(2):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='conv2_{}'.format(i+1))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)

    x = keras.layers.Conv2DTranspose(32, (2,2), strides=2, padding='same')(x)
    x = Activation('relu')(x)
    x = BatchNormalization()(x)

    for i in range(2):
        x = Conv2D(32, (3, 3), padding='same', strides=1, name='dec1_{}'.format(i+1))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)
    
    x = Conv2D(num_classes+1, (1, 1), padding='same', strides=1)(x)
    x = Reshape([-1, num_classes+1])(x)
    x = Activation('softmax', name='out')(x)
    
    model = Model(inputs=inputs, outputs=x, name='model')
    return model 
Example 48
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: bin_test_keras.py    MIT License 5 votes vote down vote up
def train():
    model = Mynet(train=True)

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss={'out': 'binary_crossentropy'},
        optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=False),
        loss_weights={'out': 1},
        metrics=['accuracy'])


    xs, ts, paths = data_load('../Dataset/train/images/', hf=True, vf=True)

    # training
    mb = 4
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        loss, acc = model.train_on_batch(x={'in':x}, y={'out':t})
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 49
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: unet_keras.py    MIT License 5 votes vote down vote up
def crop_layer(layer, size):
    _, h, w, _ = keras.backend.int_shape(layer)
    _, _h, _w, _ = size
    ph = int((h - _h) / 2)
    pw = int((w - _w) / 2)
    return keras.layers.Cropping2D(cropping=((ph, ph), (pw, pw)))(layer) 
Example 50
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: bin_loss_keras.py    MIT License 5 votes vote down vote up
def train():
    model = Mynet(train=True)

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss={'out': 'binary_crossentropy'},
        optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=False),
        loss_weights={'out': 1},
        metrics=['accuracy'])


    xs, ts, paths = data_load('../Dataset/train/images/', hf=True, vf=True)

    # training
    mb = 4
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        loss, acc = model.train_on_batch(x={'in':x}, y={'out':t})
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5') 
Example 51
Project: CapsNet   Author: l11x0m7   File: capsule.py    MIT License 5 votes vote down vote up
def call(self, inputs):
        output = self.conv1(inputs)
        output = layers.Reshape(target_shape=[-1, self.dim_capsule], name='primarycap_reshape')(output)
        return squash(output) 
Example 52
Project: CapsNet   Author: l11x0m7   File: capsule.py    MIT License 5 votes vote down vote up
def CapsuleNet(input_shape, n_class, num_routing):
    """
    The whole capsule network for MNIST recognition.
    """
    # (None, H, W, C)
    x = Input(input_shape)

    conv1 = Conv2D(filters=256, kernel_size=9, padding='valid', activation='relu', name='init_conv')(x)

    # (None, num_capsules, capsule_dim)
    prim_caps = PrimaryCapsules(filters=32, kernel_size=9, dim_capsule=8, padding='valid', strides=(2, 2))(conv1)
    # (None, n_class, dim_vector)
    digit_caps = DigiCaps(num_capsule=n_class, dim_capsule=16, 
            num_routing=num_routing, name='digitcaps')(prim_caps)

    # (None, n_class)
    pred = Length(name='out_caps')(digit_caps)

    # (None, n_class)
    y = Input(shape=(n_class, ))

    # (None, n_class * dim_vector)
    masked = Mask()([digit_caps, y])  

    x_recon = layers.Dense(512, activation='relu')(masked)
    x_recon = layers.Dense(1024, activation='relu')(x_recon)
    x_recon = layers.Dense(784, activation='sigmoid')(x_recon)
    x_recon = layers.Reshape(target_shape=[28, 28, 1], name='out_recon')(x_recon)

    # two-input-two-output keras Model
    return Model([x, y], [pred, x_recon]) 
Example 53
Project: DeepCCS   Author: plpla   File: DeepCCS.py    GNU General Public License v3.0 5 votes vote down vote up
def create_model(self):
        """
            Builds a neural net using a set of arguments
            """
        if len(self.smiles_encoder.converter) == 0 or len(self.adduct_encoder.converter) ==  0:
            raise ValueError("Encoders must be fit before creating a model.")
        smile_input_layer = Input(shape=(250, len(self.smiles_encoder.converter)), name="smile")
        conv = Conv1D(64, kernel_size=4, activation='relu', kernel_initializer='normal')(smile_input_layer)

        previous = conv
        for i in range(6):
            conv = Conv1D(64, kernel_size=4, activation='relu', kernel_initializer='normal')(previous)
            if i == 5:
                pool = MaxPooling1D(pool_size=2, strides=2)(conv)
            else:
                pool = MaxPooling1D(pool_size=2, strides=1)(conv)
            previous = pool

        flat = Flatten()(previous)
        adduct_input_layer = Input(shape=(len(self.adduct_encoder.converter),), name="adduct")
        remix_layer = keras.layers.concatenate([flat, adduct_input_layer], axis=-1)

        previous = remix_layer
        for i in range(2):
            dense_layer = Dense(384, activation="relu", kernel_initializer='normal')(previous)
            previous = dense_layer

        output = Dense(1, activation="linear")(previous)

        opt = getattr(keras.optimizers, 'adam')
        opt = opt(lr=0.0001)
        model = Model(input=[smile_input_layer, adduct_input_layer], outputs=output)
        model.compile(optimizer=opt, loss='mean_squared_error')

        self.model = model 
Example 54
Project: Kickstart-AI   Author: katchu11   File: generative-adversarial-network.py    MIT License 5 votes vote down vote up
def make_trainable(net, val):
    net.trainable = val
    for l in net.layers:
        l.trainable = val


# In[28]: 
Example 55
Project: sleep-convolutions-tf   Author: cliffordlab   File: model.py    MIT License 5 votes vote down vote up
def end_pipe_layers(num_dense_end_pipe=100, num_filters_end_pipe=24,
        kernel_size_end_pipe=5, dropout_end_pipe=0.5):
    layers = []
    layers.append(Conv2D(filters=num_filters_end_pipe, kernel_size=(4, kernel_size_end_pipe), padding='valid', activation='relu'))
    layers.append(Flatten())
    layers.append(Dense(num_dense_end_pipe, activation='relu'))
    layers.append(Dropout(rate=dropout_end_pipe))
    layers.append(Dense(num_dense_end_pipe, activation='relu'))
    layers.append(Dropout(rate=dropout_end_pipe))
    layers.append(Dense(6, activation='linear'))
    layers.append(Activation('softmax'))
    return layers 
Example 56
Project: Sushi-dish-detection   Author: blackrubystudio   File: model.py    MIT License 5 votes vote down vote up
def call(self, inputs, training=None):
        """
        Note about training values:
            None: Train BN layers. This is the normal mode
            False: Freeze BN layers. Good when batch size is small
            True: (don't use). Set layer in training mode even when making inferences
        """
        return super(self.__class__, self).call(inputs, training=training) 
Example 57
Project: Sushi-dish-detection   Author: blackrubystudio   File: model.py    MIT License 5 votes vote down vote up
def identity_block(input_tensor, kernel_size, filters, stage, block,
                   use_bias=True, train_bn=True):
    """The identity_block is the block that has no conv layer at shortcut
    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of middle conv layer at main path
        filters: list of integers, the nb_filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
        use_bias: Boolean. To use or not use a bias in conv layers.
        train_bn: Boolean. Train or freeze Batch Norm layers
    """
    nb_filter1, nb_filter2, nb_filter3 = filters
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
                  use_bias=use_bias)(input_tensor)
    x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
    x = KL.Activation('relu')(x)

    x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
                  name=conv_name_base + '2b', use_bias=use_bias)(x)
    x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
    x = KL.Activation('relu')(x)

    x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
                  use_bias=use_bias)(x)
    x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)

    x = KL.Add()([x, input_tensor])
    x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
    return x 
Example 58
Project: Sushi-dish-detection   Author: blackrubystudio   File: model.py    MIT License 5 votes vote down vote up
def conv_block(input_tensor, kernel_size, filters, stage, block,
               strides=(2, 2), use_bias=True, train_bn=True):
    """conv_block is the block that has a conv layer at shortcut
    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of middle conv layer at main path
        filters: list of integers, the nb_filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
        use_bias: Boolean. To use or not use a bias in conv layers.
        train_bn: Boolean. Train or freeze Batch Norm layers
    Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
    And the shortcut should have subsample=(2,2) as well
    """
    nb_filter1, nb_filter2, nb_filter3 = filters
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
                  name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
    x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
    x = KL.Activation('relu')(x)

    x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
                  name=conv_name_base + '2b', use_bias=use_bias)(x)
    x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
    x = KL.Activation('relu')(x)

    x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
                  '2c', use_bias=use_bias)(x)
    x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)

    shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
                         name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
    shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)

    x = KL.Add()([x, shortcut])
    x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
    return x 
Example 59
Project: Sushi-dish-detection   Author: blackrubystudio   File: model.py    MIT License 5 votes vote down vote up
def resnet_graph(input_image, architecture, stage5=False, train_bn=True):
    """Build a ResNet graph.
        architecture: Can be resnet50 or resnet101
        stage5: Boolean. If False, stage5 of the network is not created
        train_bn: Boolean. Train or freeze Batch Norm layers
    """
    assert architecture in ["resnet50", "resnet101"]
    # Stage 1
    x = KL.ZeroPadding2D((3, 3))(input_image)
    x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
    x = BatchNorm(name='bn_conv1')(x, training=train_bn)
    x = KL.Activation('relu')(x)
    C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
    # Stage 2
    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)
    C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)
    # Stage 3
    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)
    C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)
    # Stage 4
    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)
    block_count = {"resnet50": 5, "resnet101": 22}[architecture]
    for i in range(block_count):
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)
    C4 = x
    # Stage 5
    if stage5:
        x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)
        x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)
        C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)
    else:
        C5 = None
    return [C1, C2, C3, C4, C5]


############################################################
#  Proposal Layer
############################################################ 
Example 60
Project: Sushi-dish-detection   Author: blackrubystudio   File: model.py    MIT License 5 votes vote down vote up
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):
        """Sets model layers as trainable if their names match
        the given regular expression.
        """
        # Print message on the first call (but not on recursive calls)
        if verbose > 0 and keras_model is None:
            log("Selecting layers to train")

        keras_model = keras_model or self.keras_model

        # In multi-GPU training, we wrap the model. Get layers
        # of the inner model because they have the weights.
        layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
            else keras_model.layers

        for layer in layers:
            # Is the layer a model?
            if layer.__class__.__name__ == 'Model':
                print("In model: ", layer.name)
                self.set_trainable(
                    layer_regex, keras_model=layer, indent=indent + 4)
                continue

            if not layer.weights:
                continue
            # Is it trainable?
            trainable = bool(re.fullmatch(layer_regex, layer.name))
            # Update layer. If layer is a container, update inner layer.
            if layer.__class__.__name__ == 'TimeDistributed':
                layer.layer.trainable = trainable
            else:
                layer.trainable = trainable
            # Print trainable layer names
            if trainable and verbose > 0:
                log("{}{:20}   ({})".format(" " * indent, layer.name,
                                            layer.__class__.__name__)) 
Example 61
Project: Sushi-dish-detection   Author: blackrubystudio   File: model.py    MIT License 5 votes vote down vote up
def get_trainable_layers(self):
        """Returns a list of layers that have weights."""
        layers = []
        # Loop through all layers
        for l in self.keras_model.layers:
            # If layer is a wrapper, find inner trainable layer
            l = self.find_trainable_layer(l)
            # Include layer if it has weights
            if l.get_weights():
                layers.append(l)
        return layers 
Example 62
Project: keras-wrn   Author: EricAlcaide   File: wrn.py    MIT License 5 votes vote down vote up
def build_model(input_dims, output_dim, n, k, act= "relu", dropout=None):
	""" Builds the model. Params:
			- n: number of layers. WRNs are of the form WRN-N-K
				 It must satisfy that (N-4)%6 = 0
			- k: Widening factor. WRNs are of the form WRN-N-K
				 It must satisfy that K%2 = 0
			- input_dims: input dimensions for the model
			- output_dim: output dimensions for the model
			- dropout: dropout rate - default=0 (not recomended >0.3)
			- act: activation function - default=relu. Build your custom
				   one with keras.backend (ex: swish, e-swish)
	"""
	# Ensure n & k are correct
	assert (n-4)%6 == 0
	assert k%2 == 0
	n = (n-4)//6 
	# This returns a tensor input to the model
	inputs = Input(shape=(input_dims))

	# Head of the model
	x = Conv2D(16, (3,3), padding="same")(inputs)
	x = BatchNormalization()(x)
	x = Activation('relu')(x)

	# 3 Blocks (normal-residual)
	x = main_block(x, 16*k, n, (1,1), dropout) # 0
	x = main_block(x, 32*k, n, (2,2), dropout) # 1
	x = main_block(x, 64*k, n, (2,2), dropout) # 2
			
	# Final part of the model
	x = AveragePooling2D((8,8))(x)
	x = Flatten()(x)
	outputs = Dense(output_dim, activation="softmax")(x)

	model = Model(inputs=inputs, outputs=outputs)
	return model 
Example 63
Project: neural-fingerprinting   Author: StephanZheng   File: utils_keras.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def cnn_model(logits=False, input_ph=None, img_rows=28, img_cols=28,
              channels=1, nb_filters=64, nb_classes=10):
    """
    Defines a CNN model using Keras sequential model
    :param logits: If set to False, returns a Keras model, otherwise will also
                    return logits tensor
    :param input_ph: The TensorFlow tensor for the input
                    (needed if returning logits)
                    ("ph" stands for placeholder but it need not actually be a
                    placeholder)
    :param img_rows: number of row in the image
    :param img_cols: number of columns in the image
    :param channels: number of color channels (e.g., 1 for MNIST)
    :param nb_filters: number of convolutional filters per layer
    :param nb_classes: the number of output classes
    :return:
    """
    model = Sequential()

    # Define the layers successively (convolution layers are version dependent)
    if keras.backend.image_dim_ordering() == 'th':
        input_shape = (channels, img_rows, img_cols)
    else:
        input_shape = (img_rows, img_cols, channels)

    layers = [conv_2d(nb_filters, (8, 8), (2, 2), "same",
                      input_shape=input_shape),
              Activation('relu'),
              conv_2d((nb_filters * 2), (6, 6), (2, 2), "valid"),
              Activation('relu'),
              conv_2d((nb_filters * 2), (5, 5), (1, 1), "valid"),
              Activation('relu'),
              Flatten(),
              Dense(nb_classes)]

    for layer in layers:
        model.add(layer)

    if logits:
        logits_tensor = model(input_ph)
    model.add(Activation('softmax'))

    if logits:
        return model, logits_tensor
    else:
        return model 
Example 64
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: weather_model.py    Apache License 2.0 4 votes vote down vote up
def RNN_builder(num_output_features, num_decoder_features,
                target_sequence_length,
              num_steps_to_predict, regulariser,
              lr, decay, loss, layers):

    optimiser = keras.optimizers.Adam(lr=lr, decay=decay)
    # Define a decoder sequence.
    decoder_inputs = keras.layers.Input(shape=(37, num_decoder_features), name='decoder_inputs')

    decoder_cells = []

    for hidden_neurons in layers:
        print(hidden_neurons)
        decoder_cells.append(keras.layers.GRUCell(hidden_neurons,
                                                  kernel_regularizer = regulariser,
                                                  recurrent_regularizer = regulariser,
                                                  bias_regularizer = regulariser))

    print(decoder_cells)
    decoder = keras.layers.RNN(decoder_cells, return_sequences=True, return_state=True)
    # Set the initial state of the decoder to be the ouput state of the encoder.
    decoder_outputs_and_states = decoder(decoder_inputs, initial_state=None)

    # Only select the output of the decoder (not the states)
    decoder_outputs = decoder_outputs_and_states[0]

    # Apply a dense layer with linear activation to set output to correct dimension
    # and scale (tanh is default activation for GRU in Keras, our output sine function can be larger then 1)
    
    #decoder_dense1 = keras.layers.Dense(units=64,
    #                                   activation='tanh',
    #                                   kernel_regularizer = regulariser,
    #                                   bias_regularizer = regulariser, name='dense_tanh')

    output_dense = keras.layers.Dense(num_output_features,
                                       activation='sigmoid',
                                       kernel_regularizer = regulariser,
                                       bias_regularizer = regulariser, name='output_sig')

    #densen1=decoder_dense1(decoder_outputs)
    decoder_outputs = output_dense(decoder_outputs)
    # Create a model using the functional API provided by Keras.
    rnn_model = keras.models.Model(inputs=[decoder_inputs], outputs=decoder_outputs)
    return rnn_model 
Example 65
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: competition_model_class.py    Apache License 2.0 4 votes vote down vote up
def build_graph(self):
        keras.backend.clear_session() # clear session/graph    
        self.optimiser = keras.optimizers.Adam(lr=self.lr, decay=self.decay)
        # Define an input sequence.
        encoder_inputs = keras.layers.Input(shape=(None, self.num_input_features), name='encoder_inputs')
        # Create a list of RNN Cells, these are then concatenated into a single layer
        # with the RNN layer.
        encoder_cells = []
        for hidden_neurons in self.layers:
            encoder_cells.append(keras.layers.GRUCell(hidden_neurons,
                                                      kernel_regularizer = self.regulariser,
                                                      recurrent_regularizer = self.regulariser,
                                                      bias_regularizer = self.regulariser))
            
        encoder = keras.layers.RNN(encoder_cells, return_state=True)
        encoder_outputs_and_states = encoder(encoder_inputs)
        # Discard encoder outputs and only keep the states.
        encoder_states = encoder_outputs_and_states[1:]
        # Define a decoder sequence.
        decoder_inputs = keras.layers.Input(shape=(None, self.num_decoder_features), name='decoder_inputs')

        decoder_cells = []
        for hidden_neurons in self.layers:
            decoder_cells.append(keras.layers.GRUCell(hidden_neurons,
                                                      kernel_regularizer = self.regulariser,
                                                      recurrent_regularizer = self.regulariser,
                                                      bias_regularizer = self.regulariser))

        decoder = keras.layers.RNN(decoder_cells, return_sequences=True, return_state=True)
        # Set the initial state of the decoder to be the ouput state of the encoder.
        decoder_outputs_and_states = decoder(decoder_inputs, initial_state=encoder_states)

        # Only select the output of the decoder (not the states)
        decoder_outputs = decoder_outputs_and_states[0]

        # Apply a dense layer with linear activation to set output to correct dimension
        # and scale (tanh is default activation for GRU in Keras, our output sine function can be larger then 1)
        
        decoder_dense1 = keras.layers.Dense(units=64,
                                           activation='tanh',
                                           kernel_regularizer = self.regulariser,
                                           bias_regularizer = self.regulariser, name='dense_tanh')

        output_dense = keras.layers.Dense(self.num_output_features,
                                           activation='sigmoid',
                                           kernel_regularizer = self.regulariser,
                                           bias_regularizer = self.regulariser, name='output_sig')

        #densen1=decoder_dense1(decoder_outputs)
        decoder_outputs = output_dense(decoder_outputs)
        # Create a model using the functional API provided by Keras.
        self.model = keras.models.Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_outputs)
        print(self.model.summary()) 
Example 66
Project: Deep_Learning_Weather_Forecasting   Author: BruceBinBoxing   File: competition_model_class.py    Apache License 2.0 4 votes vote down vote up
def build_graph(self):
        keras.backend.clear_session() # clear session/graph    
        self.optimiser = keras.optimizers.Adam(lr=self.lr, decay=self.decay)
        # Define an input sequence.
        encoder_inputs = keras.layers.Input(shape=(None, self.num_input_features), name='encoder_inputs')
        # Create a list of RNN Cells, these are then concatenated into a single layer
        # with the RNN layer.
        encoder_cells = []
        for hidden_neurons in self.layers:
            encoder_cells.append(keras.layers.GRUCell(hidden_neurons,
                                                      kernel_regularizer = self.regulariser,
                                                      recurrent_regularizer = self.regulariser,
                                                      bias_regularizer = self.regulariser))
            
        encoder = keras.layers.RNN(encoder_cells, return_state=True)
        encoder_outputs_and_states = encoder(encoder_inputs)
        # Discard encoder outputs and only keep the states.
        encoder_states = encoder_outputs_and_states[1:]
        # Define a decoder sequence.
        decoder_inputs = keras.layers.Input(shape=(None, self.num_decoder_features), name='decoder_inputs')
        
        decoder_inputs_id = keras.layers.Input(shape=(None,), name='id_inputs')
        decoder_inputs_id_embd = Embedding(input_dim=10, output_dim=2, name='id_embedding')(decoder_inputs_id)

        #decoder_inputs_time = keras.layers.Input(shape=(None,), name='time_inputs')
        #decoder_inputs_time_embd = Embedding(input_dim=37, output_dim=2, name='time_embedding')(decoder_inputs_time)

        decoder_concat = concatenate([decoder_inputs, decoder_inputs_id_embd], axis=-1)

        decoder_cells = []
        for hidden_neurons in self.layers:
            decoder_cells.append(keras.layers.GRUCell(hidden_neurons,
                                                      kernel_regularizer = self.regulariser,
                                                      recurrent_regularizer = self.regulariser,
                                                      bias_regularizer = self.regulariser))

        decoder = keras.layers.RNN(decoder_cells, return_sequences=True, return_state=True)
        decoder_outputs_and_states = decoder(decoder_concat, initial_state=encoder_states)

        decoder_outputs = decoder_outputs_and_states[0]

        #decoder_dense1 = keras.layers.Dense(units=32,
        #                                   activation='relu',
        #                                   kernel_regularizer = self.regulariser,
        #                                  bias_regularizer = self.regulariser, name='dense_relu')

        output_dense = keras.layers.Dense(self.num_output_features,
                                           activation='sigmoid',
                                           kernel_regularizer = self.regulariser,
                                           bias_regularizer = self.regulariser, name='output_sig')

        #densen1=decoder_dense1(decoder_outputs)
        decoder_outputs = output_dense(decoder_outputs)
        # Create a model using the functional API provided by Keras.
        self.model = keras.models.Model(inputs=[encoder_inputs, decoder_inputs, decoder_inputs_id], outputs=decoder_outputs)
        self.model.compile(optimizer = self.optimiser, loss=self.loss)

        print(self.model.summary()) 
Example 67
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: gan_keras.py    MIT License 4 votes vote down vote up
def train():
    g = G_model()
    d = D_model()
    gan = Combined_model(g=g, d=d)

    g_opt = keras.optimizers.Adam(lr=0.0002, beta_1=0.5)
    d_opt = keras.optimizers.Adam(lr=0.0002, beta_1=0.5)
    #g_opt = keras.optimizers.SGD(lr=0.0002, momentum=0.3, decay=1e-5)
    #d_opt = keras.optimizers.SGD(lr=0.0002, momentum=0.1, decay=1e-5)

    d.trainable = True
    for layer in d.layers:
        layer.trainable = True
    d.compile(loss='binary_crossentropy', optimizer=d_opt)
    g.compile(loss='binary_crossentropy', optimizer=d_opt)
    d.trainable = False
    for layer in d.layers:
        layer.trainable = False
    gan = Combined_model(g=g, d=d)
    gan.compile(loss='binary_crossentropy', optimizer=g_opt)

    xs, paths = data_load('../Dataset/train/images/', hf=True, vf=True, rot=1)

    # training
    mb = 32
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    
    for i in range(5000):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]

        input_noise = np.random.uniform(-1, 1, size=(mb, 100))
        g_output = g.predict(input_noise, verbose=0)
        X = np.concatenate((x, g_output))
        Y = [1] * mb + [0] * mb
        d_loss = d.train_on_batch(X, Y)
        # Generator training
        input_noise = np.random.uniform(-1, 1, size=(mb, 100))
        g_loss = gan.train_on_batch(input_noise, [1] * mb)

        print("iter >>", i+1, ",g_loss >>", g_loss, ',d_loss >>', d_loss)
    
    g.save('model.h5')

# test 
Example 68
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: dcgan_keras.py    MIT License 4 votes vote down vote up
def train():
    g = G_model()
    d = D_model()
    gan = Combined_model(g=g, d=d)

    g_opt = keras.optimizers.Adam(lr=0.0002, beta_1=0.5)
    d_opt = keras.optimizers.Adam(lr=0.0002, beta_1=0.5)
    
    d.trainable = True
    for layer in d.layers:
        layer.trainable = True
    d.compile(loss='binary_crossentropy', optimizer=d_opt)
    g.compile(loss='binary_crossentropy', optimizer=d_opt)
    d.trainable = False
    for layer in d.layers:
        layer.trainable = False
    gan = Combined_model(g=g, d=d)
    gan.compile(loss='binary_crossentropy', optimizer=g_opt)

    xs, paths = data_load('../Dataset/train/images/', hf=True, vf=True, rot=1)

    # training
    mb = 32
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    
    for i in range(10000):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]

        input_noise = np.random.uniform(-1, 1, size=(mb, 100))
        g_output = g.predict(input_noise, verbose=0)
        X = np.concatenate((x, g_output))
        Y = [1] * mb + [0] * mb
        d_loss = d.train_on_batch(X, Y)
        # Generator training
        input_noise = np.random.uniform(-1, 1, size=(mb, 100))
        g_loss = gan.train_on_batch(input_noise, [1] * mb)

        print("iter >>", i+1, ",g_loss >>", g_loss, ',d_loss >>', d_loss)
    
    g.save('model.h5')

# test 
Example 69
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: convae_cifar10_keras.py    MIT License 4 votes vote down vote up
def train():
    model = Mynet(train=True)

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss={'out': 'mse'},
        optimizer=keras.optimizers.Adam(lr=0.001),#"adam", #keras.optimizers.SGD(lr=0.1, momentum=0.9, nesterov=False),
        loss_weights={'out': 1},
        metrics=['accuracy'])

    train_x, train_y, test_x, test_y = load_cifar10()
    xs = train_x / 255

    # training
    mb = 512
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    
    for i in range(5000):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        #t = x.copy().reshape([mb, -1])

        loss, acc = model.train_on_batch(x={'in':x}, y={'out':x})

        if (i+1) % 100 == 0:
            print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 70
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: dcgan_cifar10_keras.py    MIT License 4 votes vote down vote up
def train():
    g = G_model()
    d = D_model()
    gan = Combined_model(g=g, d=d)

    g_opt = keras.optimizers.Adam(lr=0.0002, beta_1=0.5)
    d_opt = keras.optimizers.Adam(lr=0.0002, beta_1=0.5)
    
    d.trainable = True
    for layer in d.layers:
        layer.trainable = True
    d.compile(loss='binary_crossentropy', optimizer=d_opt)
    g.compile(loss='binary_crossentropy', optimizer=d_opt)
    d.trainable = False
    for layer in d.layers:
        layer.trainable = False
    gan = Combined_model(g=g, d=d)
    gan.compile(loss='binary_crossentropy', optimizer=g_opt)

    train_x, train_y, test_x, test_y = load_cifar10()
    xs = train_x / 127.5 - 1

    # training
    mb = 32
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    
    for i in range(10000):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]

        input_noise = np.random.uniform(-1, 1, size=(mb, 100))
        g_output = g.predict(input_noise, verbose=0)
        X = np.concatenate((x, g_output))
        Y = [1] * mb + [0] * mb
        d_loss = d.train_on_batch(X, Y)
        # Generator training
        input_noise = np.random.uniform(-1, 1, size=(mb, 100))
        g_loss = gan.train_on_batch(input_noise, [1] * mb)

        if (i+1) % 100 == 0:
            print("iter >>", i+1, ",g_loss >>", g_loss, ',d_loss >>', d_loss)
    
    g.save('model.h5')

# test 
Example 71
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: nearest_keras.py    MIT License 4 votes vote down vote up
def train():
    model = Mynet(train=True)

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss={'out': 'categorical_crossentropy'},
        optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=False),
        loss_weights={'out': 1},
        metrics=['accuracy'])


    xs, ts, paths = data_load('../Dataset/train/images/', hf=True, vf=True)

    # training
    mb = 4
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        t = np.reshape(t, (mb, -1, num_classes+1))

        loss, acc = model.train_on_batch(x={'in':x}, y={'out':t})
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 72
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: semaseg_dataset_keras.py    MIT License 4 votes vote down vote up
def train():
    model = Mynet(train=True)

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss={'out': 'categorical_crossentropy'},
        optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=False),
        loss_weights={'out': 1},
        metrics=['accuracy'])


    xs, ts, paths = data_load('../Dataset/train/images/', hf=True, vf=True)

    # training
    mb = 4
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        t = np.reshape(t, (mb, -1, num_classes+1))

        loss, acc = model.train_on_batch(x={'in':x}, y={'out':t})
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 73
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: semaseg_test_keras.py    MIT License 4 votes vote down vote up
def train():
    model = Mynet(train=True)

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss={'out': 'categorical_crossentropy'},
        optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=False),
        loss_weights={'out': 1},
        metrics=['accuracy'])


    xs, ts, paths = data_load('../Dataset/train/images/', hf=True, vf=True)

    # training
    mb = 4
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        t = np.reshape(t, (mb, -1, num_classes+1))

        loss, acc = model.train_on_batch(x={'in':x}, y={'out':t})
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 74
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: semaseg_loss_keras.py    MIT License 4 votes vote down vote up
def train():
    model = Mynet(train=True)

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss={'out': 'categorical_crossentropy'},
        optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=False),
        loss_weights={'out': 1},
        metrics=['accuracy'])


    xs, ts, paths = data_load('../Dataset/train/images/', hf=True, vf=True)

    # training
    mb = 4
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        t = np.reshape(t, (mb, -1, num_classes+1))

        loss, acc = model.train_on_batch(x={'in':x}, y={'out':t})
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 75
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: transposeconv_keras.py    MIT License 4 votes vote down vote up
def train():
    model = Mynet(train=True)

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss={'out': 'categorical_crossentropy'},
        optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=False),
        loss_weights={'out': 1},
        metrics=['accuracy'])


    xs, ts, paths = data_load('../Dataset/train/images/', hf=True, vf=True)

    # training
    mb = 4
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    
    for i in range(500):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        t = np.reshape(t, (mb, -1, num_classes+1))

        loss, acc = model.train_on_batch(x={'in':x}, y={'out':t})
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 76
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: unetlike_keras.py    MIT License 4 votes vote down vote up
def train():
    model = Mynet(train=True)

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss={'out': 'categorical_crossentropy'},
        optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=False),
        loss_weights={'out': 1},
        metrics=['accuracy'])


    xs, ts, paths = data_load('../Dataset/train/images/', hf=True, vf=True)

    # training
    mb = 4
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    
    for i in range(1000):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        t = np.reshape(t, (mb, -1, num_classes+1))

        loss, acc = model.train_on_batch(x={'in':x}, y={'out':t})
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 77
Project: DeepLearningMugenKnock   Author: yoyoyo-yo   File: unet_keras.py    MIT License 4 votes vote down vote up
def train():
    model = Mynet(train=True)

    for layer in model.layers:
        layer.trainable = True

    model.compile(
        loss={'out': 'categorical_crossentropy'},
        optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=False),
        loss_weights={'out': 1},
        metrics=['accuracy'])


    xs, ts, paths = data_load('../Dataset/train/images/', hf=True, vf=True)

    # training
    mb = 4
    mbi = 0
    train_ind = np.arange(len(xs))
    np.random.seed(0)
    np.random.shuffle(train_ind)
    
    for i in range(100):
        if mbi + mb > len(xs):
            mb_ind = train_ind[mbi:]
            np.random.shuffle(train_ind)
            mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
            mbi = mb - (len(xs) - mbi)
        else:
            mb_ind = train_ind[mbi: mbi+mb]
            mbi += mb

        x = xs[mb_ind]
        t = ts[mb_ind]

        t = np.reshape(t, (mb, -1, num_classes+1))

        loss, acc = model.train_on_batch(x={'in':x}, y={'out':t})
        print("iter >>", i+1, ",loss >>", loss, ',accuracy >>', acc)

    model.save('model.h5')

# test 
Example 78
Project: Sushi-dish-detection   Author: blackrubystudio   File: model.py    MIT License 4 votes vote down vote up
def fpn_classifier_graph(rois, feature_maps, image_meta,
                         pool_size, num_classes, train_bn=True,
                         fc_layers_size=1024):
    """Builds the computation graph of the feature pyramid network classifier
    and regressor heads.

    rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
          coordinates.
    feature_maps: List of feature maps from different layers of the pyramid,
                  [P2, P3, P4, P5]. Each has a different resolution.
    - image_meta: [batch, (meta data)] Image details. See compose_image_meta()
    pool_size: The width of the square feature map generated from ROI Pooling.
    num_classes: number of classes, which determines the depth of the results
    train_bn: Boolean. Train or freeze Batch Norm layers
    fc_layers_size: Size of the 2 FC layers

    Returns:
        logits: [N, NUM_CLASSES] classifier logits (before softmax)
        probs: [N, NUM_CLASSES] classifier probabilities
        bbox_deltas: [N, (dy, dx, log(dh), log(dw))] Deltas to apply to
                     proposal boxes
    """
    # ROI Pooling
    # Shape: [batch, num_boxes, pool_height, pool_width, channels]
    x = PyramidROIAlign([pool_size, pool_size],
                        name="roi_align_classifier")([rois, image_meta] + feature_maps)
    # Two 1024 FC layers (implemented with Conv2D for consistency)
    x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding="valid"),
                           name="mrcnn_class_conv1")(x)
    x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)
    x = KL.Activation('relu')(x)
    x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),
                           name="mrcnn_class_conv2")(x)
    x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)
    x = KL.Activation('relu')(x)

    shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
                       name="pool_squeeze")(x)

    # Classifier head
    mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
                                            name='mrcnn_class_logits')(shared)
    mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
                                     name="mrcnn_class")(mrcnn_class_logits)

    # BBox head
    # [batch, boxes, num_classes * (dy, dx, log(dh), log(dw))]
    x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
                           name='mrcnn_bbox_fc')(shared)
    # Reshape to [batch, boxes, num_classes, (dy, dx, log(dh), log(dw))]
    s = K.int_shape(x)
    mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)

    return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox 
Example 79
Project: Sushi-dish-detection   Author: blackrubystudio   File: model.py    MIT License 4 votes vote down vote up
def load_weights(self, filepath, by_name=False, exclude=None):
        """Modified version of the corresponding Keras function with
        the addition of multi-GPU support and the ability to exclude
        some layers from loading.
        exclude: list of layer names to exclude
        """
        import h5py
        # Conditional import to support versions of Keras before 2.2
        # TODO: remove in about 6 months (end of 2018)
        try:
            from keras.engine import saving
        except ImportError:
            # Keras before 2.2 used the 'topology' namespace.
            from keras.engine import topology as saving

        if exclude:
            by_name = True

        if h5py is None:
            raise ImportError('`load_weights` requires h5py.')
        f = h5py.File(filepath, mode='r')
        if 'layer_names' not in f.attrs and 'model_weights' in f:
            f = f['model_weights']

        # In multi-GPU training, we wrap the model. Get layers
        # of the inner model because they have the weights.
        keras_model = self.keras_model
        layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\
            else keras_model.layers

        # Exclude some layers
        if exclude:
            layers = filter(lambda l: l.name not in exclude, layers)

        if by_name:
            saving.load_weights_from_hdf5_group_by_name(f, layers)
        else:
            saving.load_weights_from_hdf5_group(f, layers)
        if hasattr(f, 'close'):
            f.close()

        # Update the log directory
        self.set_log_dir(filepath) 
Example 80
Project: Sushi-dish-detection   Author: blackrubystudio   File: model.py    MIT License 4 votes vote down vote up
def compile(self, learning_rate, momentum):
        """Gets the model ready for training. Adds losses, regularization, and
        metrics. Then calls the Keras compile() function.
        """
        # Optimizer object
        optimizer = keras.optimizers.SGD(
            lr=learning_rate, momentum=momentum,
            clipnorm=self.config.GRADIENT_CLIP_NORM)
        # Add Losses
        # First, clear previously set losses to avoid duplication
        self.keras_model._losses = []
        self.keras_model._per_input_losses = {}
        loss_names = [
            "rpn_class_loss",  "rpn_bbox_loss",
            "mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"]
        for name in loss_names:
            layer = self.keras_model.get_layer(name)
            if layer.output in self.keras_model.losses:
                continue
            loss = (
                tf.reduce_mean(layer.output, keepdims=True)
                * self.config.LOSS_WEIGHTS.get(name, 1.))
            self.keras_model.add_loss(loss)

        # Add L2 Regularization
        # Skip gamma and beta weights of batch normalization layers.
        reg_losses = [
            keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)
            for w in self.keras_model.trainable_weights
            if 'gamma' not in w.name and 'beta' not in w.name]
        self.keras_model.add_loss(tf.add_n(reg_losses))

        # Compile
        self.keras_model.compile(
            optimizer=optimizer,
            loss=[None] * len(self.keras_model.outputs))

        # Add metrics for losses
        for name in loss_names:
            if name in self.keras_model.metrics_names:
                continue
            layer = self.keras_model.get_layer(name)
            self.keras_model.metrics_names.append(name)
            loss = (
                tf.reduce_mean(layer.output, keepdims=True)
                * self.config.LOSS_WEIGHTS.get(name, 1.))
            self.keras_model.metrics_tensors.append(loss)