Python tensorflow.keras.layers.Dropout() Examples

The following are 30 code examples of tensorflow.keras.layers.Dropout(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.layers , or try the search function .
Example #1
Source File: lstm_experiment_keras.py    From blood-glucose-prediction with GNU General Public License v3.0 7 votes vote down vote up
def load(input_shape, output_shape, cfg):
    nb_lstm_states = int(cfg['nb_lstm_states'])


    inputs = KL.Input(shape=input_shape)
    x = KL.CuDNNLSTM(units=nb_lstm_states, unit_forget_bias=True)(inputs)

    x = KL.Dense(512)(x)
    x = KL.Activation('relu')(x)
    x = KL.Dropout(0.2)(x)

    x = KL.Dense(256)(x)
    x = KL.Activation('relu')(x)
    x = KL.Dropout(0.3)(x)

    mu = KL.Dense(1)(x)
    std = KL.Dense(1)(x)
    activation_fn = get_activation_function_by_name(cfg['activation_function'])
    std = KL.Activation(activation_fn, name="exponential_activation")(std)

    output = KL.Concatenate(axis=-1)([std, mu])
    model = KM.Model(inputs=[inputs], outputs=[output])

    return model 
Example #2
Source File: xception.py    From keras-tuner with Apache License 2.0 6 votes vote down vote up
def dense(x, dims, activation='relu', batchnorm=True, dropout_rate=0):
    if activation == 'selu':
        x = layers.Dense(dims,  activation='selu',
                         kernel_initializer='lecun_normal',
                         bias_initializer='zeros')(x)
        if dropout_rate:
            x = layers.AlphaDropout(dropout_rate)(x)
    elif activation == 'relu':
        x = layers.Dense(dims, activation='relu')(x)
        if batchnorm:
            x = layers.BatchNormalization()(x)
        if dropout_rate:
            x = layers.Dropout(dropout_rate)(x)
    else:
        msg = 'Unknown activation function: %s' % activation
        ValueError(msg)
    return x 
Example #3
Source File: deeplabv3.py    From imgclsmob with MIT License 6 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 bottleneck_factor=4,
                 data_format="channels_last",
                 **kwargs):
        super(DeepLabv3FinalBlock, self).__init__(**kwargs)
        assert (in_channels % bottleneck_factor == 0)
        self.data_format = data_format
        mid_channels = in_channels // bottleneck_factor

        self.conv1 = conv3x3_block(
            in_channels=in_channels,
            out_channels=mid_channels,
            data_format=data_format,
            name="conv1")
        self.dropout = nn.Dropout(
            rate=0.1,
            name="dropout")
        self.conv2 = conv1x1(
            in_channels=mid_channels,
            out_channels=out_channels,
            use_bias=True,
            data_format=data_format,
            name="conv2") 
Example #4
Source File: densenet_cifar.py    From imgclsmob with MIT License 6 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 dropout_rate,
                 data_format="channels_last",
                 **kwargs):
        super(DenseSimpleUnit, self).__init__(**kwargs)
        self.data_format = data_format
        self.use_dropout = (dropout_rate != 0.0)
        inc_channels = out_channels - in_channels

        self.conv = pre_conv3x3_block(
            in_channels=in_channels,
            out_channels=inc_channels,
            data_format=data_format,
            name="conv")
        if self.use_dropout:
            self.dropout = nn.Dropout(
                rate=dropout_rate,
                name="dropout") 
Example #5
Source File: run.py    From polyaxon-examples with Apache License 2.0 6 votes vote down vote up
def get_model(args):
    model = models.Sequential()
    model.add(
        layers.Conv2D(args.conv1_size, (3, 3), activation=args.conv_activation, input_shape=(28, 28, 1)))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(args.conv2_size, (3, 3), activation=args.conv_activation))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(64, (3, 3), activation=args.conv_activation))
    model.add(layers.Dropout(args.dropout))
    model.add(layers.Flatten())
    model.add(layers.Dense(args.hidden1_size, activation=args.dense_activation))
    model.add(layers.Dense(10, activation='softmax'))

    model.summary()

    model.compile(optimizer=OPTIMIZERS[args.optimizer](learning_rate=args.learning_rate),
                  loss=args.loss,
                  metrics=['accuracy'])

    return model 
Example #6
Source File: persist_load_test.py    From keract with MIT License 6 votes vote down vote up
def test_load_persist(self):
        # define the model.
        model = Sequential()
        model.add(Dense(16, input_shape=(10,)))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax'))
        model.compile(optimizer='adam', loss='categorical_crossentropy')

        # fetch activations.
        x = np.ones((2, 10))
        activations = get_activations(model, x)

        # persist the activations to the disk.
        output = 'activations.json'
        persist_to_json_file(activations, output)

        # read them from the disk.
        activations2 = load_activations_from_json_file(output)

        for a1, a2 in zip(list(activations.values()), list(activations2.values())):
            np.testing.assert_almost_equal(a1, a2) 
Example #7
Source File: sageconv.py    From dgl with Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 in_feats,
                 out_feats,
                 aggregator_type,
                 feat_drop=0.,
                 bias=True,
                 norm=None,
                 activation=None):
        super(SAGEConv, self).__init__()

        self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
        self._out_feats = out_feats
        self._aggre_type = aggregator_type
        self.norm = norm
        self.feat_drop = layers.Dropout(feat_drop)
        self.activation = activation
        # aggregator type: mean/pool/lstm/gcn
        if aggregator_type == 'pool':
            self.fc_pool = layers.Dense(self._in_src_feats)
        if aggregator_type == 'lstm':
            self.lstm = layers.LSTM(units=self._in_src_feats)
        if aggregator_type != 'gcn':
            self.fc_self = layers.Dense(out_feats, use_bias=bias)
        self.fc_neigh = layers.Dense(out_feats, use_bias=bias) 
Example #8
Source File: gcn.py    From dgl with Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 g,
                 in_feats,
                 n_hidden,
                 n_classes,
                 n_layers,
                 activation,
                 dropout):
        super(GCN, self).__init__()
        self.g = g
        self.layers =[]
        # input layer
        self.layers.append(GraphConv(in_feats, n_hidden, activation=activation))
        # hidden layers
        for i in range(n_layers - 1):
            self.layers.append(GraphConv(n_hidden, n_hidden, activation=activation))
        # output layer
        self.layers.append(GraphConv(n_hidden, n_classes))
        self.dropout = layers.Dropout(dropout) 
Example #9
Source File: BidirectionalLSTM.py    From tape-neurips2019 with MIT License 6 votes vote down vote up
def __init__(self,
                 n_symbols: int,
                 n_units: int = 1024,
                 n_layers: int = 3,
                 dropout: Optional[float] = 0.1) -> None:
        super().__init__(n_symbols)

        if dropout is None:
            dropout = 0

        self.embedding = Embedding(n_symbols, 128)

        self.forward_lstm = Stack([
            LSTM(n_units,
                 return_sequences=True) for _ in range(n_layers)],
            name='forward_lstm')

        self.reverse_lstm = Stack([
            LSTM(n_units,
                 return_sequences=True) for _ in range(n_layers)],
            name='reverse_lstm')

        self.dropout = Dropout(dropout) 
Example #10
Source File: icnet.py    From imgclsmob with MIT License 6 votes vote down vote up
def __init__(self,
                 in_channels,
                 upscale_out_size,
                 bottleneck_factor,
                 data_format="channels_last",
                 **kwargs):
        super(PSPBlock, self).__init__(**kwargs)
        assert (in_channels % bottleneck_factor == 0)
        mid_channels = in_channels // bottleneck_factor

        self.pool = PyramidPooling(
            in_channels=in_channels,
            upscale_out_size=upscale_out_size,
            data_format=data_format,
            name="pool")
        self.conv = conv3x3_block(
            in_channels=4096,
            out_channels=mid_channels,
            data_format=data_format,
            name="conv")
        self.dropout = nn.Dropout(
            rate=0.1,
            name="dropout") 
Example #11
Source File: gcn.py    From dgl with Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 g,
                 in_feats,
                 n_hidden,
                 n_classes,
                 n_layers,
                 activation,
                 dropout):
        super(GCN, self).__init__()
        self.g = g
        self.layer_list = []
        # input layer
        self.layer_list.append(GraphConv(in_feats, n_hidden, activation=activation))
        # hidden layers
        for i in range(n_layers - 1):
            self.layer_list.append(GraphConv(n_hidden, n_hidden, activation=activation))
        # output layer
        self.layer_list.append(GraphConv(n_hidden, n_classes))
        self.dropout = layers.Dropout(dropout) 
Example #12
Source File: cifar10.py    From mia with MIT License 6 votes vote down vote up
def attack_model_fn():
    """Attack model that takes target model predictions and predicts membership.

    Following the original paper, this attack model is specific to the class of the input.
    AttachModelBundle creates multiple instances of this model for each class.
    """
    model = tf.keras.models.Sequential()

    model.add(layers.Dense(128, activation="relu", input_shape=(NUM_CLASSES,)))

    model.add(layers.Dropout(0.3, noise_shape=None, seed=None))
    model.add(layers.Dense(64, activation="relu"))
    model.add(layers.Dropout(0.2, noise_shape=None, seed=None))
    model.add(layers.Dense(64, activation="relu"))

    model.add(layers.Dense(1, activation="sigmoid"))
    model.compile("adam", loss="binary_crossentropy", metrics=["accuracy"])
    return model 
Example #13
Source File: efficientnet.py    From keras_imagenet with MIT License 6 votes vote down vote up
def get_dropout(**kwargs):
    """Wrapper over custom dropout. Fix problem of ``None`` shape for tf.keras.
    It is not possible to define FixedDropout class as global object,
    because we do not have modules for inheritance at first time.

    Issue:
        https://github.com/tensorflow/tensorflow/issues/30946
    """
    class FixedDropout(layers.Dropout):
        def _get_noise_shape(self, inputs):
            if self.noise_shape is None:
                return self.noise_shape

            symbolic_shape = backend.shape(inputs)
            noise_shape = [symbolic_shape[axis] if shape is None else shape
                           for axis, shape in enumerate(self.noise_shape)]
            return tuple(noise_shape)

    return FixedDropout 
Example #14
Source File: BeplerModel.py    From tape-neurips2019 with MIT License 6 votes vote down vote up
def __init__(self, n_symbols: int, dropout: float = 0, use_pfam_alphabet: bool = True):
        super().__init__()

        self._use_pfam_alphabet = use_pfam_alphabet

        if use_pfam_alphabet:
            self.embed = Embedding(n_symbols, n_symbols)
        else:
            n_symbols = 21
            self.embed = Embedding(n_symbols + 1, n_symbols)

        self.dropout = Dropout(dropout)
        self.rnn = Stack([
            LSTM(1024, return_sequences=True, use_bias=True,
                 implementation=2, recurrent_activation='sigmoid'),
            LSTM(1024, return_sequences=True, use_bias=True,
                 implementation=2, recurrent_activation='sigmoid')])

        self.compute_logits = Dense(n_symbols, use_bias=True, activation='linear') 
Example #15
Source File: run.py    From polyaxon with Apache License 2.0 6 votes vote down vote up
def get_model(args):
    model = models.Sequential()
    model.add(
        layers.Conv2D(args.conv1_size, (3, 3), activation=args.conv_activation, input_shape=(28, 28, 1)))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(args.conv2_size, (3, 3), activation=args.conv_activation))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(64, (3, 3), activation=args.conv_activation))
    model.add(layers.Dropout(args.dropout))
    model.add(layers.Flatten())
    model.add(layers.Dense(args.hidden1_size, activation=args.dense_activation))
    model.add(layers.Dense(10, activation='softmax'))

    model.summary()

    model.compile(optimizer=OPTIMIZERS[args.optimizer](learning_rate=args.learning_rate),
                  loss=args.loss,
                  metrics=['accuracy'])

    return model 
Example #16
Source File: layers.py    From deepchem with MIT License 6 votes vote down vote up
def build(self, input_shape):
    self.W_list = []
    self.b_list = []
    self.dropouts = []
    init = initializers.get(self.init)
    prev_layer_size = self.n_graph_feat
    for layer_size in self.layer_sizes:
      self.W_list.append(init([prev_layer_size, layer_size]))
      self.b_list.append(backend.zeros(shape=[
          layer_size,
      ]))
      if self.dropout is not None and self.dropout > 0.0:
        self.dropouts.append(Dropout(rate=self.dropout))
      else:
        self.dropouts.append(None)
      prev_layer_size = layer_size
    self.W_list.append(init([prev_layer_size, self.n_outputs]))
    self.b_list.append(backend.zeros(shape=[
        self.n_outputs,
    ]))
    if self.dropout is not None and self.dropout > 0.0:
      self.dropouts.append(Dropout(rate=self.dropout))
    else:
      self.dropouts.append(None)
    self.built = True 
Example #17
Source File: BeplerModel.py    From tape-neurips2019 with MIT License 6 votes vote down vote up
def __init__(self,
                 n_symbols: int,
                 dropout: float = 0,
                 use_pfam_alphabet: bool = True):
        if not use_pfam_alphabet:
            n_symbols = 21

        super().__init__(n_symbols)
        self._use_pfam_alphabet = use_pfam_alphabet

        self.embed = LMEmbed(n_symbols, dropout)
        self.dropout = Dropout(dropout)
        lstm = Stack([
            Bidirectional(
                LSTM(512, return_sequences=True, use_bias=True,
                     recurrent_activation='sigmoid', implementation=2))
            for _ in range(3)])
        self.rnn = lstm
        self.proj = Dense(100, use_bias=True, activation='linear')
        self.random_replace = RandomReplaceMask(0.05, n_symbols) 
Example #18
Source File: seqtoseq.py    From deepchem with MIT License 6 votes vote down vote up
def _create_encoder(self, n_layers, dropout):
    """Create the encoder as a tf.keras.Model."""
    input = self._create_features()
    gather_indices = Input(shape=(2,), dtype=tf.int32)
    prev_layer = input
    for i in range(len(self._filter_sizes)):
      filter_size = self._filter_sizes[i]
      kernel_size = self._kernel_sizes[i]
      if dropout > 0.0:
        prev_layer = Dropout(rate=dropout)(prev_layer)
      prev_layer = Conv1D(
          filters=filter_size, kernel_size=kernel_size,
          activation=tf.nn.relu)(prev_layer)
    prev_layer = Flatten()(prev_layer)
    prev_layer = Dense(
        self._decoder_dimension, activation=tf.nn.relu)(prev_layer)
    prev_layer = BatchNormalization()(prev_layer)
    return tf.keras.Model(inputs=[input, gather_indices], outputs=prev_layer) 
Example #19
Source File: fcn8sd.py    From imgclsmob with MIT License 6 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 bottleneck_factor=4,
                 data_format="channels_last",
                 **kwargs):
        super(FCNFinalBlock, self).__init__(**kwargs)
        assert (in_channels % bottleneck_factor == 0)
        self.data_format = data_format
        mid_channels = in_channels // bottleneck_factor

        self.conv1 = conv3x3_block(
            in_channels=in_channels,
            out_channels=mid_channels,
            data_format=data_format,
            name="conv1")
        self.dropout = nn.Dropout(
            rate=0.1,
            name="dropout")
        self.conv2 = conv1x1(
            in_channels=mid_channels,
            out_channels=out_channels,
            use_bias=True,
            data_format=data_format,
            name="conv2") 
Example #20
Source File: pspnet.py    From imgclsmob with MIT License 6 votes vote down vote up
def __init__(self,
                 in_channels,
                 out_channels,
                 bottleneck_factor=4,
                 data_format="channels_last",
                 **kwargs):
        super(PSPFinalBlock, self).__init__(**kwargs)
        assert (in_channels % bottleneck_factor == 0)
        self.data_format = data_format
        mid_channels = in_channels // bottleneck_factor

        self.conv1 = conv3x3_block(
            in_channels=in_channels,
            out_channels=mid_channels,
            data_format=data_format,
            name="conv1")
        self.dropout = nn.Dropout(
            rate=0.1,
            name="dropout")
        self.conv2 = conv1x1(
            in_channels=mid_channels,
            out_channels=out_channels,
            use_bias=True,
            data_format=data_format,
            name="conv2") 
Example #21
Source File: model.py    From ActionAI with GNU General Public License v3.0 5 votes vote down vote up
def lstm_model():
    model = Sequential()
    model.add(LSTM(16, dropout=0.2, recurrent_dropout=0.2, input_shape=(cfg.pose_vec_dim, cfg.window)))
    model.add(Dense(16, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(len(cfg.activity_dict), activation='softmax'))
    print(model.summary())
    return model 
Example #22
Source File: train.py    From ActionAI with GNU General Public License v3.0 5 votes vote down vote up
def lstm_model():
    model = Sequential()
    model.add(LSTM(32, dropout=0.2, recurrent_dropout=0.2, input_shape=(pose_vec_dim, window)))
    model.add(Dense(32, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(len(class_names), activation='softmax'))
    print(model.summary())
    return model 
Example #23
Source File: train_sequential.py    From ActionAI with GNU General Public License v3.0 5 votes vote down vote up
def lstm_model():
    model = Sequential()
    model.add(LSTM(32, dropout=0.2, recurrent_dropout=0.2, input_shape=(pose_vec_dim, window)))
    model.add(Dense(32, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(len(class_names), activation='softmax'))
    print(model.summary())
    return model 
Example #24
Source File: model_builder.py    From image-quality-assessment with Apache License 2.0 5 votes vote down vote up
def build(self):
        # get base model class
        BaseCnn = getattr(self.base_module, self.base_model_name)

        # load pre-trained model
        self.base_model = BaseCnn(input_shape=(224, 224, 3), weights=self.weights, include_top=False, pooling='avg')

        # add dropout and dense layer
        x = Dropout(self.dropout_rate)(self.base_model.output)
        x = Dense(units=self.n_classes, activation='softmax')(x)

        self.nima_model = Model(self.base_model.inputs, x) 
Example #25
Source File: models.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def DenseLayerNet(inshape, layer_sizes, nb_labels=2, activation='relu', final_activation='softmax', dropout=None, batch_norm=None):
    """
    A densenet that connects a set of dense layers to  a classification
    output. 
    if nb_labels is 0 assume it is a regression net and use linear activation
    (if None specified)
    """
    inputs = KL.Input(shape=inshape, name='input')
    prev_layer = KL.Flatten(name='flat_inputs')(inputs)
    # to prevent overfitting include some kernel and bias regularization
    kreg = keras.regularizers.l1_l2(l1=1e-5, l2=1e-4)
    breg = keras.regularizers.l2(1e-4)

    # connect the list of dense layers to each other
    for lno, layer_size in enumerate(layer_sizes):
        prev_layer = KL.Dense(layer_size, name='dense%d' % lno, activation=activation,kernel_regularizer=kreg, bias_regularizer=breg)(prev_layer)
        if dropout is not None:
            prev_layer = KL.Dropout(dropout, name='dropout%d'%lno)(prev_layer)
        if batch_norm is not None:
            prev_layer = KL.BatchNormalization(name='BatchNorm%d'%lno)(prev_layer)
            
    # tie the previous dense layer to a onehot encoded output layer
    last_layer = KL.Dense(nb_labels, name='last_dense', activation=final_activation)(prev_layer)

    model = keras.models.Model(inputs=inputs, outputs=last_layer)
    return(model)


###############################################################################
# Helper function
############################################################################### 
Example #26
Source File: autoqkeras_test.py    From qkeras with Apache License 2.0 5 votes vote down vote up
def dense_model():
  """Creates test dense model."""

  x = x_in = Input((4,), name="input")
  x = Dense(20, name="dense_0")(x)
  x = BatchNormalization(name="bn0")(x)
  x = Dropout(0.1, name="dp0")(x)
  x = Activation("relu", name="relu_0")(x)
  x = Dense(3, name="dense")(x)
  x = Activation("softmax", name="softmax")(x)

  model = Model(inputs=x_in, outputs=x)
  return model 
Example #27
Source File: GlobalVectorPredictor.py    From tape-neurips2019 with MIT License 5 votes vote down vote up
def __init__(self,
                 d_output: int,
                 input_name: str = 'cls_vector',
                 output_name: str = 'prediction') -> None:
        super().__init__()
        self._d_output = d_output
        self._input_name = input_name
        self._output_name = output_name
        self.predict_vector = Stack([LayerNorm(), Dense(512, 'relu'), Dropout(0.5), Dense(d_output)]) 
Example #28
Source File: mnist_cifar_models.py    From CROWN-IBP with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def get_model_meta(filename):
    print("Loading model " + filename)
    global use_tf_keras
    global Sequential, Dense, Dropout, Activation, Flatten, Lambda, Conv2D, MaxPooling2D, LeakyReLU, regularizers, K
    try:
        from keras.models import load_model as load_model_keras
        ret = get_model_meta_real(filename, load_model_keras)
        # model is successfully loaded. Import layers from keras
        from keras.models import Sequential
        from keras.layers import Input, Dense, Dropout, Activation, Flatten, Lambda
        from keras.layers import Conv2D, MaxPooling2D
        from keras.layers import LeakyReLU
        from keras import regularizers
        from keras import backend as K
        print("Model imported using keras")
    except (KeyboardInterrupt, SystemExit, SyntaxError, NameError, IndentationError):
        raise
    except:
        print("Failed to load model with keras. Trying tf.keras...")
        use_tf_keras = True
        from tensorflow.keras.models import load_model as load_model_tf
        ret = get_model_meta_real(filename, load_model_tf)
        # model is successfully loaded. Import layers from tensorflow.keras
        from tensorflow.keras.models import Sequential
        from tensorflow.keras.layers import Input, Dense, Dropout, Activation, Flatten, Lambda
        from tensorflow.keras.layers import Conv2D, MaxPooling2D
        from tensorflow.keras.layers import LeakyReLU
        from tensorflow.keras import regularizers
        from tensorflow.keras import backend as K
        print("Model imported using tensorflow.keras")
    # put imported functions in global
    Sequential, Dense, Dropout, Activation, Flatten, Lambda, Conv2D, MaxPooling2D, LeakyReLU, regularizers, K = \
        Sequential, Dense, Dropout, Activation, Flatten, Lambda, Conv2D, MaxPooling2D, LeakyReLU, regularizers, K
    return ret 
Example #29
Source File: conftest.py    From alibi with Apache License 2.0 5 votes vote down vote up
def conv_net(request):
    """
    Creates a simple CNN classifier on the data in the request. This is a
    module scoped fixture, so if you need to modify the state of the objects
    returned, copy the objects first.
    """
    import tensorflow as tf
    if tf.executing_eagerly():
        tf.compat.v1.disable_eager_execution()
    data = request.param
    x_train, y_train = data['X_train'], data['y_train']

    def model():
        x_in = Input(shape=(28, 28, 1))
        x = Conv2D(filters=8, kernel_size=2, padding='same', activation='relu')(x_in)
        x = MaxPooling2D(pool_size=2)(x)
        x = Dropout(0.3)(x)
        x = Flatten()(x)
        x_out = Dense(10, activation='softmax')(x)
        cnn = Model(inputs=x_in, outputs=x_out)
        cnn.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

        return cnn

    cnn = model()
    cnn.fit(x_train, y_train, batch_size=256, epochs=1)

    return cnn


# High level fixtures that help us check if the code logs any warnings/correct 
Example #30
Source File: cifar10.py    From mia with MIT License 5 votes vote down vote up
def target_model_fn():
    """The architecture of the target (victim) model.

    The attack is white-box, hence the attacker is assumed to know this architecture too."""

    model = tf.keras.models.Sequential()

    model.add(
        layers.Conv2D(
            32,
            (3, 3),
            activation="relu",
            padding="same",
            input_shape=(WIDTH, HEIGHT, CHANNELS),
        )
    )
    model.add(layers.Conv2D(32, (3, 3), activation="relu"))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Dropout(0.25))

    model.add(layers.Conv2D(64, (3, 3), activation="relu", padding="same"))
    model.add(layers.Conv2D(64, (3, 3), activation="relu"))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Dropout(0.25))

    model.add(layers.Flatten())

    model.add(layers.Dense(512, activation="relu"))
    model.add(layers.Dropout(0.5))

    model.add(layers.Dense(NUM_CLASSES, activation="softmax"))
    model.compile("adam", loss="categorical_crossentropy", metrics=["accuracy"])

    return model