Python keras.backend.bias_add() Examples

The following are 30 code examples of keras.backend.bias_add(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.backend , or try the search function .
Example #1
Source File: binary_layers.py    From nn_playground with MIT License 6 votes vote down vote up
def call(self, inputs):
        binary_kernel = binarize(self.kernel, H=self.H) 
        outputs = K.conv2d(
            inputs,
            binary_kernel,
            strides=self.strides,
            padding=self.padding,
            data_format=self.data_format,
            dilation_rate=self.dilation_rate)

        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
Example #2
Source File: utils.py    From face_landmark_dnn with MIT License 6 votes vote down vote up
def call(self, inputs, training=None):
        outputs = K.depthwise_conv2d(
            inputs,
            self.depthwise_kernel,
            strides=self.strides,
            padding=self.padding,
            dilation_rate=self.dilation_rate,
            data_format=self.data_format)

        if self.bias:
            outputs = K.bias_add(
                outputs, self.bias, data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)

        return outputs 
Example #3
Source File: train_mobilenets.py    From face_landmark_dnn with MIT License 6 votes vote down vote up
def call(self, inputs, training=None):
        outputs = K.depthwise_conv2d(
            inputs,
            self.depthwise_kernel,
            strides=self.strides,
            padding=self.padding,
            dilation_rate=self.dilation_rate,
            data_format=self.data_format)

        if self.bias:
            outputs = K.bias_add(
                outputs, self.bias, data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)

        return outputs 
Example #4
Source File: DenseMoE.py    From mixture-of-experts with GNU General Public License v3.0 6 votes vote down vote up
def call(self, inputs):

        expert_outputs = tf.tensordot(inputs, self.expert_kernel, axes=1)
        if self.use_expert_bias:
            expert_outputs = K.bias_add(expert_outputs, self.expert_bias)
        if self.expert_activation is not None:
            expert_outputs = self.expert_activation(expert_outputs)

        gating_outputs = K.dot(inputs, self.gating_kernel)
        if self.use_gating_bias:
            gating_outputs = K.bias_add(gating_outputs, self.gating_bias)
        if self.gating_activation is not None:
            gating_outputs = self.gating_activation(gating_outputs)

        output = K.sum(expert_outputs * K.repeat_elements(K.expand_dims(gating_outputs, axis=1), self.units, axis=1), axis=2)

        return output 
Example #5
Source File: se_mobilenets.py    From TF.Keras-Commonly-used-models with Apache License 2.0 6 votes vote down vote up
def call(self, inputs, training=None):
        outputs = K.depthwise_conv2d(
            inputs,
            self.depthwise_kernel,
            strides=self.strides,
            padding=self.padding,
            dilation_rate=self.dilation_rate,
            data_format=self.data_format)

        if self.bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)

        return outputs 
Example #6
Source File: layers.py    From keras-text with MIT License 6 votes vote down vote up
def call(self, x, mask=None):
        # x: [..., time_steps, features]
        # ut = [..., time_steps, attention_dims]
        ut = K.dot(x, self.kernel)
        if self.use_bias:
            ut = K.bias_add(ut, self.bias)

        ut = K.tanh(ut)
        if self.use_context:
            ut = ut * self.context_kernel

        # Collapse `attention_dims` to 1. This indicates the weight for each time_step.
        ut = K.sum(ut, axis=-1, keepdims=True)

        # Convert those weights into a distribution but along time axis.
        # i.e., sum of alphas along `time_steps` axis should be 1.
        self.at = _softmax(ut, dim=1)
        if mask is not None:
            self.at *= K.cast(K.expand_dims(mask, -1), K.floatx())

        # Weighted sum along `time_steps` axis.
        return K.sum(x * self.at, axis=-2) 
Example #7
Source File: se_mobilenets.py    From keras-squeeze-excite-network with MIT License 6 votes vote down vote up
def call(self, inputs, training=None):
        outputs = depthwise_conv2d(
            inputs,
            self.depthwise_kernel,
            strides=self.strides,
            padding=self.padding,
            dilation_rate=self.dilation_rate,
            data_format=self.data_format)

        if self.bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)

        return outputs 
Example #8
Source File: gc_mobilenets.py    From keras-global-context-networks with MIT License 6 votes vote down vote up
def call(self, inputs, training=None):
        outputs = K.depthwise_conv2d(
            inputs,
            self.depthwise_kernel,
            strides=self.strides,
            padding=self.padding,
            dilation_rate=self.dilation_rate,
            data_format=self.data_format)

        if self.bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)

        return outputs 
Example #9
Source File: core.py    From enet-keras with MIT License 6 votes vote down vote up
def call(self, inputs, **kwargs):
        outputs = K.conv2d(
            inputs,
            self.kernel,
            strides=self.strides,
            padding=self.padding,
            data_format=self.data_format,
            dilation_rate=self.dilation_rate)
        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)
        outputs = BatchNormalization(momentum=self.momentum)(outputs)
        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
Example #10
Source File: depthwise_conv2d.py    From keras-mobilenet with MIT License 6 votes vote down vote up
def call(self, inputs):

        if self.data_format is None:
            data_format = image_data_format()
        if self.data_format not in {'channels_first', 'channels_last'}:
            raise ValueError('Unknown data_format ' + str(data_format))

        x = _preprocess_conv2d_input(inputs, self.data_format)
        padding = _preprocess_padding(self.padding)
        strides = (1,) + self.strides + (1,)

        outputs = tf.nn.depthwise_conv2d(inputs, self.depthwise_kernel,
                                         strides=strides,
                                         padding=padding,
                                         rate=self.dilation_rate)

        if self.bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
Example #11
Source File: mobile_net_fixed.py    From kaggle-carvana-2017 with MIT License 6 votes vote down vote up
def call(self, inputs, training=None):
        outputs = K.depthwise_conv2d(
            inputs,
            self.depthwise_kernel,
            strides=self.strides,
            padding=self.padding,
            dilation_rate=self.dilation_rate,
            data_format=self.data_format)

        if self.bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)

        return outputs 
Example #12
Source File: layers.py    From FC-AIDE-Keras with MIT License 6 votes vote down vote up
def call(self, inputs):
        if self.rank == 2:
            outputs = K.conv2d(
                inputs,
                self.kernel*self.mask, ### add mask multiplication
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)
        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
Example #13
Source File: layers.py    From FC-AIDE-Keras with MIT License 6 votes vote down vote up
def call(self, inputs):
        if self.rank == 2:
            outputs = K.conv2d(
                inputs,
                self.kernel*self.mask, ### add mask multiplication
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)
        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
Example #14
Source File: layers.py    From FC-AIDE-Keras with MIT License 6 votes vote down vote up
def call(self, inputs):
        if self.rank == 2:
            outputs = K.conv2d(
                inputs,
                self.kernel*self.mask, ### add mask multiplication
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)
        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
Example #15
Source File: mobilenets.py    From CBAM-keras with MIT License 6 votes vote down vote up
def call(self, inputs, training=None):
        outputs = K.depthwise_conv2d(
            inputs,
            self.depthwise_kernel,
            strides=self.strides,
            padding=self.padding,
            dilation_rate=self.dilation_rate,
            data_format=self.data_format)

        if self.bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)

        return outputs 
Example #16
Source File: mobilenets-checkpoint.py    From CBAM-keras with MIT License 6 votes vote down vote up
def call(self, inputs, training=None):
        outputs = K.depthwise_conv2d(
            inputs,
            self.depthwise_kernel,
            strides=self.strides,
            padding=self.padding,
            dilation_rate=self.dilation_rate,
            data_format=self.data_format)

        if self.bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)

        return outputs 
Example #17
Source File: layers.py    From FC-AIDE-Keras with MIT License 6 votes vote down vote up
def call(self, inputs):
        if self.rank == 2:
            outputs = K.conv2d(
                inputs,
                self.kernel*self.mask, ### add mask multiplication
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)
        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
Example #18
Source File: qrnn.py    From embedding-as-service with MIT License 6 votes vote down vote up
def preprocess_input(self, inputs, training=None):
        if self.window_size > 1:
            inputs = K.temporal_padding(inputs, (self.window_size - 1, 0))
        inputs = K.expand_dims(inputs, 2)  # add a dummy dimension

        output = K.conv2d(inputs, self.kernel, strides=self.strides,
                          padding='valid',
                          data_format='channels_last')
        output = K.squeeze(output, 2)  # remove the dummy dimension
        if self.use_bias:
            output = K.bias_add(output, self.bias, data_format='channels_last')

        if self.dropout is not None and 0. < self.dropout < 1.:
            z = output[:, :, :self.units]
            f = output[:, :, self.units:2 * self.units]
            o = output[:, :, 2 * self.units:]
            f = K.in_train_phase(1 - _dropout(1 - f, self.dropout), f, training=training)
            return K.concatenate([z, f, o], -1)
        else:
            return output 
Example #19
Source File: mobilenetv2.py    From mobilenet_v2_keras with MIT License 6 votes vote down vote up
def call(self, inputs, training=None):
        outputs = K.depthwise_conv2d(
            inputs,
            self.depthwise_kernel,
            strides=self.strides,
            padding=self.padding,
            dilation_rate=self.dilation_rate,
            data_format=self.data_format)

        if self.bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)

        return outputs 
Example #20
Source File: mobilenet.py    From keras-FP16-test with Apache License 2.0 6 votes vote down vote up
def call(self, inputs, training=None):
        outputs = K.depthwise_conv2d(
            inputs,
            self.depthwise_kernel,
            strides=self.strides,
            padding=self.padding,
            dilation_rate=self.dilation_rate,
            data_format=self.data_format)

        if self.bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)

        return outputs 
Example #21
Source File: mobilenet.py    From deep-learning-models with MIT License 6 votes vote down vote up
def call(self, inputs, training=None):
        outputs = K.depthwise_conv2d(
            inputs,
            self.depthwise_kernel,
            strides=self.strides,
            padding=self.padding,
            dilation_rate=self.dilation_rate,
            data_format=self.data_format)

        if self.bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)

        return outputs 
Example #22
Source File: ternary_layers.py    From nn_playground with MIT License 6 votes vote down vote up
def step(self, inputs, states):
        if 0 < self.dropout < 1:
            h = ternarize_dot(inputs * states[1], self.kernel)
        else:
            h = ternarize_dot(inputs, self.kernel)
        if self.bias is not None:
            h = K.bias_add(h, self.bias)

        prev_output = states[0]
        if 0 < self.recurrent_dropout < 1:
            prev_output *= states[2]
        output = h + ternarize_dot(prev_output, self.recurrent_kernel)
        if self.activation is not None:
            output = self.activation(output)

        # Properly set learning phase on output tensor.
        if 0 < self.dropout + self.recurrent_dropout:
            output._uses_learning_phase = True
        return output, [output] 
Example #23
Source File: ternary_layers.py    From nn_playground with MIT License 6 votes vote down vote up
def call(self, inputs):
        ternary_kernel = ternarize(self.kernel, H=self.H) 
        outputs = K.conv2d(
            inputs,
            ternary_kernel,
            strides=self.strides,
            padding=self.padding,
            data_format=self.data_format,
            dilation_rate=self.dilation_rate)

        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
Example #24
Source File: layer_norm_layers.py    From nn_playground with MIT License 6 votes vote down vote up
def step(self, x, states):
        h_tm1 = states[0]
        c_tm1 = states[1]
        B_U = states[2]
        B_W = states[3]

        z = LN(K.dot(x * B_W[0], self.kernel), self.gamma_1, self.beta_1) +  \
            LN(K.dot(h_tm1 * B_U[0], self.recurrent_kernel), self.gamma_2, self.beta_2)
        if self.use_bias:
            z = K.bias_add(z, self.bias)

        z0 = z[:, :self.units]
        z1 = z[:, self.units: 2 * self.units]
        z2 = z[:, 2 * self.units: 3 * self.units]
        z3 = z[:, 3 * self.units:]

        i = self.recurrent_activation(z0)
        f = self.recurrent_activation(z1)
        c = f * c_tm1 + i * self.activation(z2)
        o = self.recurrent_activation(z3)

        h = o * self.activation(LN(c, self.gamma_3, self.beta_3))
        return h, [h, c] 
Example #25
Source File: attention.py    From nlp_toolkit with MIT License 6 votes vote down vote up
def call(self, x, mask=None):
        # MLP
        ut = K.dot(x, self.kernel)
        if self.use_bias:
            ut = K.bias_add(ut, self.bias)
        if self.activation:
            ut = K.tanh(ut)
        if self.context_kernel:
            ut = K.dot(ut, self.context_kernel)
        ut = K.squeeze(ut, axis=-1)
        # softmax
        at = K.exp(ut - K.max(ut, axis=-1, keepdims=True))
        if mask is not None:
            at *= K.cast(mask, K.floatx())
        att_weights = at / (K.sum(at, axis=1, keepdims=True) + K.epsilon())
        # output
        atx = x * K.expand_dims(att_weights, axis=-1)
        output = K.sum(atx, axis=1)
        if self.return_attention:
            return [output, att_weights]
        return output 
Example #26
Source File: layers.py    From nn_playground with MIT License 6 votes vote down vote up
def call(self, inputs):
        if self.data_format == 'channels_first':
            sq = K.mean(inputs, [2, 3])
        else:
            sq = K.mean(inputs, [1, 2])

        ex = K.dot(sq, self.kernel1)
        if self.use_bias:
            ex = K.bias_add(ex, self.bias1)
        ex= K.relu(ex)

        ex = K.dot(ex, self.kernel2)
        if self.use_bias:
            ex = K.bias_add(ex, self.bias2)
        ex= K.sigmoid(ex)

        if self.data_format == 'channels_first':
            ex = K.expand_dims(ex, -1)
            ex = K.expand_dims(ex, -1)
        else:
            ex = K.expand_dims(ex, 1)
            ex = K.expand_dims(ex, 1)

        return inputs * ex 
Example #27
Source File: binary_layers.py    From nn_playground with MIT License 6 votes vote down vote up
def call(self, inputs):
        binary_kernel = binarize(self.kernel, H=self.H) 
        outputs = K.conv2d(
            inputs,
            binary_kernel,
            strides=self.strides,
            padding=self.padding,
            data_format=self.data_format,
            dilation_rate=self.dilation_rate)

        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
Example #28
Source File: qrnn.py    From nn_playground with MIT License 6 votes vote down vote up
def preprocess_input(self, inputs, training=None):
        if self.window_size > 1:
            inputs = K.temporal_padding(inputs, (self.window_size-1, 0))
        inputs = K.expand_dims(inputs, 2)  # add a dummy dimension

        output = K.conv2d(inputs, self.kernel, strides=self.strides,
                          padding='valid',
                          data_format='channels_last')
        output = K.squeeze(output, 2)  # remove the dummy dimension
        if self.use_bias:
            output = K.bias_add(output, self.bias, data_format='channels_last')

        if self.dropout is not None and 0. < self.dropout < 1.:
            z = output[:, :, :self.units]
            f = output[:, :, self.units:2 * self.units]
            o = output[:, :, 2 * self.units:]
            f = K.in_train_phase(1 - _dropout(1 - f, self.dropout), f, training=training)
            return K.concatenate([z, f, o], -1)
        else:
            return output 
Example #29
Source File: layers.py    From FC-AIDE-Keras with MIT License 6 votes vote down vote up
def call(self, inputs):
        if self.rank == 2:
            outputs = K.conv2d(
                inputs,
                self.kernel*self.mask, ### add mask multiplication
                strides=self.strides,
                padding=self.padding,
                data_format=self.data_format,
                dilation_rate=self.dilation_rate)
        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)

        if self.activation is not None:
            return self.activation(outputs)
        return outputs 
Example #30
Source File: ternary_layers.py    From nn_playground with MIT License 5 votes vote down vote up
def call(self, inputs):
        ternary_kernel = ternarize(self.kernel, H=self.H)
        output = K.dot(inputs, ternary_kernel)
        if self.use_bias:
            output = K.bias_add(output, self.bias)
        if self.activation is not None:
            output = self.activation(output)
        return output