Python keras.models.Input() Examples

The following are 21 code examples of keras.models.Input(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.models , or try the search function .
Example #1
Source File: __init__.py    From ImageAI with MIT License 6 votes vote down vote up
def loadModel(self):

        """
        'loadModel' is used to load the model into the CustomObjectDetection class
        :return: None
        """

        if self.__model_type == "yolov3":
            detection_model_json = json.load(open(self.__detection_config_json_path))

            self.__model_labels = detection_model_json["labels"]
            self.__model_anchors = detection_model_json["anchors"]

            self.__detection_utils = CustomDetectionUtils(labels=self.__model_labels)

            self.__model = yolo_main(Input(shape=(None, None, 3)), 3, len(self.__model_labels))

            self.__model.load_weights(self.__model_path) 
Example #2
Source File: discriminator.py    From inpainting-gmcnn-keras with MIT License 6 votes vote down vote up
def model(self):
    inputs = Input(shape=(self.img_height, self.img_width, self.num_channels))
    
    # Local discriminator
    g_dis = Conv2D(filters=64, kernel_size=5, strides=(2, 2), padding='same')(inputs)
    g_dis = LeakyReLU()(g_dis)
    g_dis = Conv2D(filters=128, kernel_size=5, strides=(2, 2), padding='same')(g_dis)
    g_dis = LeakyReLU()(g_dis)
    g_dis = Conv2D(filters=256, kernel_size=5, strides=(2, 2), padding='same')(g_dis)
    g_dis = LeakyReLU()(g_dis)
    g_dis = Conv2D(filters=512, kernel_size=5, strides=(2, 2), padding='same')(g_dis)
    g_dis = LeakyReLU()(g_dis)
    g_dis = Conv2D(filters=256, kernel_size=5, strides=(2, 2), padding='same')(g_dis)
    g_dis = LeakyReLU()(g_dis)
    g_dis = Conv2D(filters=128, kernel_size=5, strides=(2, 2), padding='same')(g_dis)
    g_dis = LeakyReLU()(g_dis)
    g_dis = Flatten()(g_dis)
    g_dis = Dense(units=1)(g_dis)
    
    model = Model(name=self.model_name, inputs=inputs, outputs=g_dis)
    return model 
Example #3
Source File: discriminator.py    From inpainting-gmcnn-keras with MIT License 6 votes vote down vote up
def model(self):
    inputs_img = Input(shape=(self.img_height, self.img_width, self.num_channels))
    inputs_mask = Input(shape=(self.img_height, self.img_width, self.num_channels))
    
    inputs = Multiply()([inputs_img, inputs_mask])
    
    # Local discriminator
    l_dis = Conv2D(filters=64, kernel_size=5, strides=(2, 2), padding='same')(inputs)
    l_dis = LeakyReLU()(l_dis)
    l_dis = Conv2D(filters=128, kernel_size=5, strides=(2, 2), padding='same')(l_dis)
    l_dis = LeakyReLU()(l_dis)
    l_dis = Conv2D(filters=256, kernel_size=5, strides=(2, 2), padding='same')(l_dis)
    l_dis = LeakyReLU()(l_dis)
    l_dis = Conv2D(filters=512, kernel_size=5, strides=(2, 2), padding='same')(l_dis)
    l_dis = LeakyReLU()(l_dis)
    l_dis = Conv2D(filters=256, kernel_size=5, strides=(2, 2), padding='same')(l_dis)
    l_dis = LeakyReLU()(l_dis)
    l_dis = Conv2D(filters=128, kernel_size=5, strides=(2, 2), padding='same')(l_dis)
    l_dis = LeakyReLU()(l_dis)
    l_dis = Flatten()(l_dis)
    l_dis = Dense(units=1)(l_dis)
    
    model = Model(name=self.model_name, inputs=[inputs_img, inputs_mask], outputs=l_dis)
    return model 
Example #4
Source File: __init__.py    From deep_complex_networks with MIT License 6 votes vote down vote up
def get_shallow_convnet(window_size=4096, channels=2, output_size=84):
    inputs = Input(shape=(window_size, channels))

    conv = ComplexConv1D(
        32, 512, strides=16,
        activation='relu')(inputs)
    pool = AveragePooling1D(pool_size=4, strides=2)(conv)

    pool = Permute([2, 1])(pool)
    flattened = Flatten()(pool)

    dense = ComplexDense(2048, activation='relu')(flattened)
    predictions = ComplexDense(
        output_size, 
        activation='sigmoid',
        bias_initializer=Constant(value=-5))(dense)
    predictions = GetReal(predictions)
    model = Model(inputs=inputs, outputs=predictions)

    model.compile(optimizer=Adam(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model 
Example #5
Source File: model_3.py    From cu-ssp with MIT License 5 votes vote down vote up
def build_model():
  input = Input(shape = (None, ))
  profiles_input = Input(shape = (None, 22))

  # Defining an embedding layer mapping from the words (n_words) to a vector of len 128
  x1 = Embedding(input_dim = n_words, output_dim = 250, input_length = None)(input)  
  x1 = concatenate([x1, profiles_input], axis = 2)
  
  x2 = Embedding(input_dim = n_words, output_dim = 125, input_length = None)(input)
  x2 = concatenate([x2, profiles_input], axis = 2)

  x1 = Dense(1200, activation = "relu")(x1)
  x1 = Dropout(0.5)(x1)

  # Defining a bidirectional LSTM using the embedded representation of the inputs
  x2 = Bidirectional(CuDNNGRU(units = 500, return_sequences = True))(x2)
  x2 = Bidirectional(CuDNNGRU(units = 100, return_sequences = True))(x2)
  COMBO_MOVE = concatenate([x1, x2])
  w = Dense(500, activation = "relu")(COMBO_MOVE) # try 500
  w = Dropout(0.4)(w)
  w = tcn.TCN()(w)
  y = TimeDistributed(Dense(n_tags, activation = "softmax"))(w)

  # Defining the model as a whole and printing the summary
  model = Model([input, profiles_input], y)
  #model.summary()

  # Setting up the model with categorical x-entropy loss and the custom accuracy function as accuracy
  adamOptimizer = Adam(lr=0.0025, beta_1=0.8, beta_2=0.8, epsilon=None, decay=0.0001, amsgrad=False) 
  model.compile(optimizer = adamOptimizer, loss = "categorical_crossentropy", metrics = ["accuracy", accuracy])
  return model


# Defining the decoders so that we can 
Example #6
Source File: darknet.py    From Unified-Gesture-and-Fingertip-Detection with MIT License 5 votes vote down vote up
def model():
    input = Input(shape=(224, 224, 3))
    x = conv_batch_norm_relu(input, 32, (3, 3), padding='same', activation='relu')
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)

    x = conv_batch_norm_relu(x, 64, (3, 3), padding='same', activation='relu')
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)

    x = conv_batch_norm_relu(x, 128, (3, 3), padding='same', activation='relu')
    x = conv_batch_norm_relu(x, 64, (1, 1), padding='same', activation='relu')
    x = conv_batch_norm_relu(x, 128, (3, 3), padding='same', activation='relu')
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)

    x = conv_batch_norm_relu(x, 256, (3, 3), padding='same', activation='relu')
    x = conv_batch_norm_relu(x, 128, (1, 1), padding='same', activation='relu')
    x = conv_batch_norm_relu(x, 256, (3, 3), padding='same', activation='relu')
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)

    x = conv_batch_norm_relu(x, 512, (3, 3), padding='same', activation='relu')
    x = conv_batch_norm_relu(x, 256, (1, 1), padding='same', activation='relu')
    x = conv_batch_norm_relu(x, 512, (3, 3), padding='same', activation='relu')
    x = conv_batch_norm_relu(x, 256, (1, 1), padding='same', activation='relu')
    x = conv_batch_norm_relu(x, 512, (3, 3), padding='same', activation='relu')
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)

    x = conv_batch_norm_relu(x, 1024, (3, 3), padding='same', activation='relu')
    x = conv_batch_norm_relu(x, 512, (1, 1), padding='same', activation='relu')
    x = conv_batch_norm_relu(x, 1024, (3, 3), padding='same', activation='relu')
    x = conv_batch_norm_relu(x, 512, (1, 1), padding='same', activation='relu')
    x = conv_batch_norm_relu(x, 1024, (3, 3), padding='same', activation='relu')
    x = Conv2D(5, (1, 1), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('sigmoid', name='output')(x)
    return Model(inputs=input, outputs=x) 
Example #7
Source File: gmcnn_gan.py    From inpainting-gmcnn-keras with MIT License 5 votes vote down vote up
def define_local_discriminator(self, generator_raw, local_discriminator_raw):
    generator_inputs = Input(shape=(self.img_height, self.img_width, self.num_channels))
    generator_masks = Input(shape=(self.img_height, self.img_width, self.num_channels))
    
    real_samples = Input(shape=(self.img_height, self.img_width, self.num_channels))
    fake_samples = generator_raw.model([generator_inputs, generator_masks])
    # fake_samples = generator_inputs * (1 - generator_masks) + fake_samples * generator_masks
    # fake_samples = Lambda(make_comp_sample)([generator_inputs, fake_samples, generator_masks])
    
    discriminator_output_from_fake_samples = local_discriminator_raw.model(
      [fake_samples, generator_masks])
    discriminator_output_from_real_samples = local_discriminator_raw.model(
      [real_samples, generator_masks])
    
    averaged_samples = custom_layers.RandomWeightedAverage()([real_samples, fake_samples])
    averaged_samples_output = local_discriminator_raw.model([averaged_samples, generator_masks])
    
    partial_gp_loss = partial(gradient_penalty_loss,
                              averaged_samples=averaged_samples,
                              gradient_penalty_weight=self.gradient_penalty_loss_weight)
    partial_gp_loss.__name__ = 'gradient_penalty'
    
    local_discriminator_model = Model(inputs=[real_samples, generator_inputs, generator_masks],
                                      outputs=[discriminator_output_from_real_samples,
                                               discriminator_output_from_fake_samples,
                                               averaged_samples_output])
    
    local_discriminator_model.compile(optimizer=self.discriminator_optimizer,
                                      loss=[wasserstein_loss, wasserstein_loss, partial_gp_loss])
    return local_discriminator_model 
Example #8
Source File: gmcnn_gan.py    From inpainting-gmcnn-keras with MIT License 5 votes vote down vote up
def define_global_discriminator(self, generator_raw, global_discriminator_raw):
    generator_inputs = Input(shape=(self.img_height, self.img_width, self.num_channels))
    generator_masks = Input(shape=(self.img_height, self.img_width, self.num_channels))
    
    real_samples = Input(shape=(self.img_height, self.img_width, self.num_channels))
    fake_samples = generator_raw.model([generator_inputs, generator_masks])
    # fake_samples = generator_inputs * (1 - generator_masks) + fake_samples * generator_masks
    fake_samples = Lambda(make_comp_sample)([generator_inputs, fake_samples, generator_masks])
    
    discriminator_output_from_fake_samples = global_discriminator_raw.model(fake_samples)
    discriminator_output_from_real_samples = global_discriminator_raw.model(real_samples)
    
    averaged_samples = custom_layers.RandomWeightedAverage()([real_samples, fake_samples])
    # We then run these samples through the discriminator as well. Note that we never
    # really use the discriminator output for these samples - we're only running them to
    # get the gradient norm for the gradient penalty loss.
    averaged_samples_outputs = global_discriminator_raw.model(averaged_samples)
    
    # The gradient penalty loss function requires the input averaged samples to get
    # gradients. However, Keras loss functions can only have two arguments, y_true and
    # y_pred. We get around this by making a partial() of the function with the averaged
    # samples here.
    partial_gp_loss = partial(gradient_penalty_loss,
                              averaged_samples=averaged_samples,
                              gradient_penalty_weight=self.gradient_penalty_loss_weight)
    # Functions need names or Keras will throw an error
    partial_gp_loss.__name__ = 'gradient_penalty'
    
    global_discriminator_model = Model(inputs=[real_samples, generator_inputs, generator_masks],
                                       outputs=[discriminator_output_from_real_samples,
                                                discriminator_output_from_fake_samples,
                                                averaged_samples_outputs])
    # We use the Adam paramaters from Gulrajani et al. We use the Wasserstein loss for both
    # the real and generated samples, and the gradient penalty loss for the averaged samples
    global_discriminator_model.compile(optimizer=self.discriminator_optimizer,
                                       loss=[wasserstein_loss, wasserstein_loss, partial_gp_loss])
    
    return global_discriminator_model 
Example #9
Source File: dagmm.py    From AnomalyDetectionTransformations with MIT License 5 votes vote down vote up
def create_dagmm_model(encoder, decoder, estimation_encoder, lambd_diag=0.005):
    x_in = Input(batch_shape=encoder.input_shape)
    zc = encoder(x_in)

    decoder.name = 'reconstruction'
    x_rec = decoder(zc)
    euclid_dist = Lambda(lambda args: K.sqrt(K.sum(K.batch_flatten(K.square(args[0] - args[1])),
                                                   axis=-1, keepdims=True) /
                                             K.sum(K.batch_flatten(K.square(args[0])),
                                                   axis=-1, keepdims=True)),
                         output_shape=(1,))([x_in, x_rec])
    cos_sim = Lambda(lambda args: K.batch_dot(K.l2_normalize(K.batch_flatten(args[0]), axis=-1),
                                              K.l2_normalize(K.batch_flatten(args[1]), axis=-1),
                                              axes=-1),
                     output_shape=(1,))([x_in, x_rec])

    zr = concatenate([euclid_dist, cos_sim])
    z = concatenate([zc, zr])

    gamma = estimation_encoder(z)

    gamma_ks = [Lambda(lambda g: g[:, k:k + 1], output_shape=(1,))(gamma)
                for k in range(estimation_encoder.output_shape[-1])]

    components = [GaussianMixtureComponent(lambd_diag)([z, gamma_k])
                  for gamma_k in gamma_ks]
    density = add(components) if len(components) > 1 else components[0]
    energy = Lambda(lambda dens: -K.log(dens), name='energy')(density)

    dagmm = Model(x_in, [x_rec, energy])

    return dagmm 
Example #10
Source File: poetry_model.py    From poetry_generator_Keras with MIT License 5 votes vote down vote up
def build_model(self):
        '''建立模型'''

        # 输入的dimension
        input_tensor = Input(shape=(self.config.max_len,))
        embedd = Embedding(len(self.num2word) + 2, 300, input_length=self.config.max_len)(input_tensor)
        lstm = Bidirectional(GRU(128, return_sequences=True))(embedd)
        # dropout = Dropout(0.6)(lstm)
        # lstm = LSTM(256)(dropout)
        # dropout = Dropout(0.6)(lstm)
        flatten = Flatten()(lstm)
        dense = Dense(len(self.words), activation='softmax')(flatten)
        self.model = Model(inputs=input_tensor, outputs=dense)
        optimizer = Adam(lr=self.config.learning_rate)
        self.model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) 
Example #11
Source File: models.py    From keras_attention with MIT License 5 votes vote down vote up
def _get_model(self):
        d = 0.5
        rd = 0.5
        rnn_units = 128
        input_text = Input((self.input_length,))
        text_embedding = Embedding(input_dim=self.max_words + 2, output_dim=self.emb_dim,
                                   input_length=self.input_length, mask_zero=True)(input_text)
        text_embedding = SpatialDropout1D(0.5)(text_embedding)
        bilstm = Bidirectional(LSTM(units=rnn_units, return_sequences=True, dropout=d,
                                    recurrent_dropout=rd))(text_embedding)
        x, attn = AttentionWeightedAverage(return_attention=True)(bilstm)
        x = Dropout(0.5)(x)
        out = Dense(units=self.n_classes, activation="softmax")(x)
        model = Model(input_text, out)
        return model 
Example #12
Source File: tcn.py    From speech-music-detection with MIT License 5 votes vote down vote up
def create_tcn(list_n_filters=[8],
               kernel_size=4,
               dilations=[1, 2],
               nb_stacks=1,
               activation='norm_relu',
               n_layers=1,
               dropout_rate=0.05,
               use_skip_connections=True,
               bidirectional=True):
    if bidirectional:
        padding = 'same'
    else:
        padding = 'causal'

    dilations = process_dilations(dilations)

    input_layer = Input(shape=(None, config.N_MELS))

    for i in range(n_layers):
        if i == 0:
            x = TCN(list_n_filters[i], kernel_size, nb_stacks, dilations, activation,
                    padding, use_skip_connections, dropout_rate, return_sequences=True)(input_layer)
        else:
            x = TCN(list_n_filters[i], kernel_size, nb_stacks, dilations, activation,
                    padding, use_skip_connections, dropout_rate, return_sequences=True, name="tcn" + str(i))(x)

    x = Dense(config.CLASSES)(x)
    x = Activation('sigmoid')(x)
    output_layer = x

    return Model(input_layer, output_layer) 
Example #13
Source File: model_1.py    From cu-ssp with MIT License 5 votes vote down vote up
def CNN_BIGRU():
    # Inp is one-hot encoded version of inp_alt
    inp          = Input(shape=(maxlen_seq, n_words))
    inp_alt      = Input(shape=(maxlen_seq,))
    inp_profiles = Input(shape=(maxlen_seq, 22))

    # Concatenate embedded and unembedded input
    x_emb = Embedding(input_dim=n_words, output_dim=64, 
                      input_length=maxlen_seq)(inp_alt)
    x = Concatenate(axis=-1)([inp, x_emb, inp_profiles])

    x = super_conv_block(x)
    x = conv_block(x)
    x = super_conv_block(x)
    x = conv_block(x)
    x = super_conv_block(x)
    x = conv_block(x)

    x = Bidirectional(CuDNNGRU(units = 256, return_sequences = True, recurrent_regularizer=l2(0.2)))(x)
    x = TimeDistributed(Dropout(0.5))(x)
    x = TimeDistributed(Dense(256, activation = "relu"))(x)
    x = TimeDistributed(Dropout(0.5))(x)
    
    y = TimeDistributed(Dense(n_tags, activation = "softmax"))(x)
    
    model = Model([inp, inp_alt, inp_profiles], y)
    
    return model 
Example #14
Source File: embedding.py    From Keras-TextClassification with MIT License 5 votes vote down vote up
def build(self, **kwargs):
        self.embedding_type = 'word2vec'
        print("load word2vec start!")
        self.key_vector = KeyedVectors.load_word2vec_format(self.corpus_path, **kwargs)
        print("load word2vec end!")
        self.embed_size = self.key_vector.vector_size

        self.token2idx = self.ot_dict.copy()
        embedding_matrix = []
        # 首先加self.token2idx中的四个[PAD]、[UNK]、[BOS]、[EOS]
        embedding_matrix.append(np.zeros(self.embed_size))
        embedding_matrix.append(np.random.uniform(-0.5, 0.5, self.embed_size))
        embedding_matrix.append(np.random.uniform(-0.5, 0.5, self.embed_size))
        embedding_matrix.append(np.random.uniform(-0.5, 0.5, self.embed_size))

        for word in self.key_vector.index2entity:
            self.token2idx[word] = len(self.token2idx)
            embedding_matrix.append(self.key_vector[word])

        # self.token2idx = self.token2idx
        self.idx2token = {}
        for key, value in self.token2idx.items():
            self.idx2token[value] = key

        self.vocab_size = len(self.token2idx)
        embedding_matrix = np.array(embedding_matrix)
        self.input = Input(shape=(self.len_max,), dtype='int32')

        self.output = Embedding(self.vocab_size,
                                self.embed_size,
                                input_length=self.len_max,
                                weights=[embedding_matrix],
                                trainable=self.trainable)(self.input)
        self.model = Model(self.input, self.output) 
Example #15
Source File: embedding.py    From Keras-TextClassification with MIT License 5 votes vote down vote up
def build(self, **kwargs):
        self.vocab_size = len(self.token2idx)
        self.input = Input(shape=(self.len_max,), dtype='int32')
        self.output = Embedding(self.vocab_size+1,
                                self.embed_size,
                                input_length=self.len_max,
                                trainable=self.trainable,
                                )(self.input)
        self.model = Model(self.input, self.output) 
Example #16
Source File: gmcnn_gan.py    From inpainting-gmcnn-keras with MIT License 4 votes vote down vote up
def define_generator_model(self, generator_raw, local_discriminator_raw,
                             global_discriminator_raw):
    
    generator_inputs_img = Input(shape=(self.img_height, self.img_width, self.num_channels))
    generator_inputs_mask = Input(shape=(self.img_height, self.img_width, self.num_channels))
    
    generator_outputs = generator_raw.model([generator_inputs_img, generator_inputs_mask])
    global_discriminator_outputs = global_discriminator_raw.model(generator_outputs)
    
    local_discriminator_outputs = local_discriminator_raw.model([generator_outputs,
                                                                 generator_inputs_mask])
    
    generator_model = Model(inputs=[generator_inputs_img, generator_inputs_mask],
                            outputs=[generator_outputs, generator_outputs,
                                     global_discriminator_outputs,
                                     local_discriminator_outputs])
    
    # this partial trick is required for passing additional parameters for loss functions
    partial_cr_loss = partial(confidence_reconstruction_loss,
                              mask=generator_inputs_mask,
                              num_steps=self.num_gaussian_steps,
                              gaussian_kernel_size=self.gaussian_kernel_size,
                              gaussian_kernel_std=self.gaussian_kernel_std)
    
    partial_cr_loss.__name__ = 'confidence_reconstruction_loss'
    
    partial_id_mrf_loss = partial(id_mrf_loss,
                                  mask=generator_inputs_mask,
                                  nn_stretch_sigma=self.nn_stretch_sigma,
                                  batch_size=self.batch_size,
                                  vgg_16_layers=self.vgg_16_layers,
                                  id_mrf_style_weight=self.id_mrf_style_weight,
                                  id_mrf_content_weight=self.id_mrf_content_weight,
                                  id_mrf_loss_weight=self.id_mrf_loss_weight)
    
    partial_id_mrf_loss.__name__ = 'id_mrf_loss'
    
    partial_wasserstein_loss = partial(wasserstein_loss,
                                       wgan_loss_weight=self.adversarial_loss_weight)
    
    partial_wasserstein_loss.__name__ = 'wasserstein_loss'
    
    if self.warm_up_generator:
      # set Wasserstein loss to 0 - total generator loss will be based only on reconstruction loss
      generator_model.compile(optimizer=self.generator_optimizer,
                              loss=[partial_cr_loss, partial_id_mrf_loss, partial_wasserstein_loss,
                                    partial_wasserstein_loss],
                              loss_weights=[1., 0., 0., 0.])
      # metrics=[metrics.psnr])
    else:
      generator_model.compile(optimizer=self.generator_optimizer,
                              loss=[partial_cr_loss, partial_id_mrf_loss, partial_wasserstein_loss,
                                    partial_wasserstein_loss])
    
    return generator_model 
Example #17
Source File: experiments.py    From AnomalyDetectionTransformations with MIT License 4 votes vote down vote up
def _cae_ocsvm_experiment(dataset_load_fn, dataset_name, single_class_ind, gpu_q):
    gpu_to_use = gpu_q.get()
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_to_use

    (x_train, y_train), (x_test, y_test) = dataset_load_fn()

    n_channels = x_train.shape[get_channels_axis()]
    input_side = x_train.shape[2]  # channel side will always be at shape[2]
    enc = conv_encoder(input_side, n_channels)
    dec = conv_decoder(input_side, n_channels)
    x_in = Input(shape=x_train.shape[1:])
    x_rec = dec(enc(x_in))
    cae = Model(x_in, x_rec)
    cae.compile('adam', 'mse')

    x_train_task = x_train[y_train.flatten() == single_class_ind]
    x_test_task = x_test[y_test.flatten() == single_class_ind]  # This is just for visual monitoring
    cae.fit(x=x_train_task, y=x_train_task, batch_size=128, epochs=200, validation_data=(x_test_task, x_test_task))

    x_train_task_rep = enc.predict(x_train_task, batch_size=128)
    if dataset_name in ['cats-vs-dogs']:  # OC-SVM is quadratic on the number of examples, so subsample training set
        subsample_inds = np.random.choice(len(x_train_task_rep), 2500, replace=False)
        x_train_task_rep = x_train_task_rep[subsample_inds]

    x_test_rep = enc.predict(x_test, batch_size=128)
    pg = ParameterGrid({'nu': np.linspace(0.1, 0.9, num=9),
                        'gamma': np.logspace(-7, 2, num=10, base=2)})

    results = Parallel(n_jobs=6)(
        delayed(_train_ocsvm_and_score)(d, x_train_task_rep, y_test.flatten() == single_class_ind, x_test_rep)
        for d in pg)

    best_params, best_auc_score = max(zip(pg, results), key=lambda t: t[-1])
    print(best_params)
    best_ocsvm = OneClassSVM(**best_params).fit(x_train_task_rep)
    scores = best_ocsvm.decision_function(x_test_rep)
    labels = y_test.flatten() == single_class_ind

    res_file_name = '{}_cae-oc-svm_{}_{}.npz'.format(dataset_name,
                                                     get_class_name_from_index(single_class_ind, dataset_name),
                                                     datetime.now().strftime('%Y-%m-%d-%H%M'))
    res_file_path = os.path.join(RESULTS_DIR, dataset_name, res_file_name)
    save_roc_pr_curve_data(scores, labels, res_file_path)

    gpu_q.put(gpu_to_use) 
Example #18
Source File: unet.py    From surface-crack-detection with MIT License 4 votes vote down vote up
def model(weights_input=None):

    inputs = Input(IMAGE_SIZE)
    conv1 = Conv2D(64, 3, activation="relu", padding="same", kernel_initializer="he_normal")(inputs)
    conv1 = Conv2D(64, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(128, 3, activation="relu", padding="same", kernel_initializer="he_normal")(pool1)
    conv2 = Conv2D(128, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(256, 3, activation="relu", padding="same", kernel_initializer="he_normal")(pool2)
    conv3 = Conv2D(256, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(512, 3, activation="relu", padding="same", kernel_initializer="he_normal")(pool3)
    conv4 = Conv2D(512, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv4)
    drop4 = Dropout(0.5)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

    conv5 = Conv2D(1024, 3, activation="relu", padding="same", kernel_initializer="he_normal")(pool4)
    conv5 = Conv2D(1024, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv5)
    drop5 = Dropout(0.5)(conv5)

    up6 = Conv2D(512, 2, activation="relu", padding="same", kernel_initializer="he_normal")(UpSampling2D(size = (2,2))(drop5))
    merge6 = Concatenate(axis=3)([drop4,up6])
    conv6 = Conv2D(512, 3, activation="relu", padding="same", kernel_initializer="he_normal")(merge6)
    conv6 = Conv2D(512, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv6)

    up7 = Conv2D(256, 2, activation="relu", padding="same", kernel_initializer="he_normal")(UpSampling2D(size = (2,2))(conv6))
    merge7 = Concatenate(axis=3)([conv3,up7])
    conv7 = Conv2D(256, 3, activation="relu", padding="same", kernel_initializer="he_normal")(merge7)
    conv7 = Conv2D(256, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv7)

    up8 = Conv2D(128, 2, activation="relu", padding="same", kernel_initializer="he_normal")(UpSampling2D(size = (2,2))(conv7))
    merge8 = Concatenate(axis=3)([conv2,up8])
    conv8 = Conv2D(128, 3, activation="relu", padding="same", kernel_initializer="he_normal")(merge8)
    conv8 = Conv2D(128, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv8)

    up9 = Conv2D(64, 2, activation="relu", padding="same", kernel_initializer="he_normal")(UpSampling2D(size = (2,2))(conv8))
    merge9 = Concatenate(axis=3)([conv1,up9])
    conv9 = Conv2D(64, 3, activation="relu", padding="same", kernel_initializer="he_normal")(merge9)
    conv9 = Conv2D(64, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv9)
    conv9 = Conv2D(2, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv9)

    conv10 = Conv2D(1, 1, activation="sigmoid")(conv9)

    model = Model(inputs=inputs, outputs=conv10)
    model.compile(optimizer=Adam(lr=1e-4), loss="binary_crossentropy", metrics=["accuracy"])

    if weights_input:
        model.load_weights(weights_input)

    return model 
Example #19
Source File: unet.py    From deepdiy with MIT License 4 votes vote down vote up
def unet_net(self):
        inputs = Input((self.img_h, self.img_w, 3))

        conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
        conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

        conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
        conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
        conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
        conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
        pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

        conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4)
        conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
        drop5 = Dropout(0.5)(conv5)

        up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
            UpSampling2D(size=(2, 2))(drop5))
        merge6 = Concatenate(axis=3)([conv4, up6])
        conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6)
        conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)

        up7 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
            UpSampling2D(size=(2, 2))(conv6))
        merge7 = Concatenate(axis=3)([conv3, up7])
        conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
        conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)

        up8 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
            UpSampling2D(size=(2, 2))(conv7))
        merge8 = Concatenate(axis=3)([conv2, up8])
        conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
        conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8)

        up9 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(
            UpSampling2D(size=(2, 2))(conv8))
        merge9 = Concatenate(axis=3)([conv1, up9])
        conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
        conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)
        conv9 = Conv2D(6, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9)

        conv10 = Conv2D(3, 1, activation='sigmoid')(conv9)

        model = Model(inputs=inputs, outputs=conv10)
        model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])
        return model 
Example #20
Source File: vggnet.py    From nn-transfer with MIT License 4 votes vote down vote up
def vggnet_keras():

    # Block 1
    img_input = Input((3, 224, 224))
    x = Conv2D(64, (3, 3), activation='relu',
               padding='same', name='features.0')(img_input)
    x = Conv2D(64, (3, 3), activation='relu',
               padding='same', name='features.2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2
    x = Conv2D(128, (3, 3), activation='relu',
               padding='same', name='features.5')(x)
    x = Conv2D(128, (3, 3), activation='relu',
               padding='same', name='features.7')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    # Block 3
    x = Conv2D(256, (3, 3), activation='relu',
               padding='same', name='features.10')(x)
    x = Conv2D(256, (3, 3), activation='relu',
               padding='same', name='features.12')(x)
    x = Conv2D(256, (3, 3), activation='relu',
               padding='same', name='features.14')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    # Block 4
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.17')(x)
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.19')(x)
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.21')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    # Block 5
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.24')(x)
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.26')(x)
    x = Conv2D(512, (3, 3), activation='relu',
               padding='same', name='features.28')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

    x = Flatten(name='flatten')(x)
    x = Dense(4096, activation='relu', name='classifier.0')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096, activation='relu', name='classifier.3')(x)
    x = Dropout(0.5)(x)
    x = Dense(1000, activation=None, name='classifier.6')(x)

    # Create model.
    model = Model(img_input, x, name='vgg16')

    return model 
Example #21
Source File: __init__.py    From deep_complex_networks with MIT License 4 votes vote down vote up
def get_deep_convnet(window_size=4096, channels=2, output_size=84):
    inputs = Input(shape=(window_size, channels))
    outs = inputs

    outs = (ComplexConv1D(
        16, 6, strides=2, padding='same',
        activation='linear',
        kernel_initializer='complex_independent'))(outs)
    outs = (ComplexBN(axis=-1))(outs)
    outs = (keras.layers.Activation('relu'))(outs)
    outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)

    outs = (ComplexConv1D(
        32, 3, strides=2, padding='same',
        activation='linear',
        kernel_initializer='complex_independent'))(outs)
    outs = (ComplexBN(axis=-1))(outs)
    outs = (keras.layers.Activation('relu'))(outs)
    outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)
    
    outs = (ComplexConv1D(
        64, 3, strides=1, padding='same',
        activation='linear',
        kernel_initializer='complex_independent'))(outs)
    outs = (ComplexBN(axis=-1))(outs)
    outs = (keras.layers.Activation('relu'))(outs)
    outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)

    outs = (ComplexConv1D(
        64, 3, strides=1, padding='same',
        activation='linear',
        kernel_initializer='complex_independent'))(outs)
    outs = (ComplexBN(axis=-1))(outs)
    outs = (keras.layers.Activation('relu'))(outs)
    outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)

    outs = (ComplexConv1D(
        128, 3, strides=1, padding='same',
        activation='relu',
        kernel_initializer='complex_independent'))(outs)
    outs = (ComplexConv1D(
        128, 3, strides=1, padding='same',
        activation='linear',
        kernel_initializer='complex_independent'))(outs)
    outs = (ComplexBN(axis=-1))(outs)
    outs = (keras.layers.Activation('relu'))(outs)
    outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)

    #outs = (keras.layers.MaxPooling1D(pool_size=2))
    #outs = (Permute([2, 1]))
    outs = (keras.layers.Flatten())(outs)
    outs = (keras.layers.Dense(2048, activation='relu',
                           kernel_initializer='glorot_normal'))(outs)
    predictions = (keras.layers.Dense(output_size, activation='sigmoid',
                                 bias_initializer=keras.initializers.Constant(value=-5)))(outs)

    model = Model(inputs=inputs, outputs=predictions)
    model.compile(optimizer=keras.optimizers.Adam(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model