Python tensorflow.keras.backend.variable() Examples

The following are 30 code examples of tensorflow.keras.backend.variable(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.backend , or try the search function .
Example #1
Source File: metrics.py    From MIScnn with GNU General Public License v3.0 6 votes vote down vote up
def dice_weighted(weights):
    weights = K.variable(weights)

    def weighted_loss(y_true, y_pred, smooth=0.00001):
        axis = identify_axis(y_true.get_shape())
        intersection = y_true * y_pred
        intersection = K.sum(intersection, axis=axis)
        y_true = K.sum(y_true, axis=axis)
        y_pred = K.sum(y_pred, axis=axis)
        dice = ((2 * intersection) + smooth) / (y_true + y_pred + smooth)
        dice = dice * weights
        return -dice
    return weighted_loss

#-----------------------------------------------------#
#              Dice & Crossentropy loss               #
#-----------------------------------------------------# 
Example #2
Source File: test_backend.py    From kapre with MIT License 6 votes vote down vote up
def test_amplitude_to_decibel():
    """test for backend_keras.amplitude_to_decibel"""
    from kapre.backend_keras import amplitude_to_decibel

    x = np.array([[1e-20, 1e-5, 1e-3, 5e-2], [0.3, 1.0, 20.5, 9999]])  # random positive numbers

    amin = 1e-5
    dynamic_range = 80.0

    x_decibel = 10 * np.log10(np.maximum(x, amin))
    x_decibel = x_decibel - np.max(x_decibel, axis=(1,), keepdims=True)
    x_decibel_ref = np.maximum(x_decibel, -1 * dynamic_range)

    x_var = K.variable(x)
    x_decibel_kapre = amplitude_to_decibel(x_var, amin, dynamic_range)

    assert np.allclose(K.eval(x_decibel_kapre), x_decibel_ref, atol=TOL) 
Example #3
Source File: layers.py    From neuron with GNU General Public License v3.0 6 votes vote down vote up
def build(self, input_shape):
        # Create mean and count
        # These are weights because just maintaining variables don't get saved with the model, and we'd like
        # to have these numbers saved when we save the model.
        # But we need to make sure that the weights are untrainable.
        self.mean = self.add_weight(name='mean', 
                                      shape=input_shape[1:],
                                      initializer='zeros',
                                      trainable=False)
        self.count = self.add_weight(name='count', 
                                      shape=[1],
                                      initializer='zeros',
                                      trainable=False)

        # self.mean = K.zeros(input_shape[1:], name='mean')
        # self.count = K.variable(0.0, name='count')
        super(MeanStream, self).build(input_shape)  # Be sure to call this somewhere! 
Example #4
Source File: ttfs.py    From snn_toolbox with MIT License 6 votes vote down vote up
def init_neurons(self, input_shape):
        """Init layer neurons."""

        from snntoolbox.bin.utils import get_log_keys, get_plot_keys

        output_shape = self.compute_output_shape(input_shape)
        self.v_thresh = k.variable(self._v_thresh)
        self.mem = k.variable(self.init_membrane_potential(output_shape))
        self.time = k.variable(self.dt)
        # To save memory and computations, allocate only where needed:
        if self.tau_refrac > 0:
            self.refrac_until = k.zeros(output_shape)
        if any({'spiketrains', 'spikerates', 'correlation', 'spikecounts',
                'hist_spikerates_activations', 'operations',
                'synaptic_operations_b_t', 'neuron_operations_b_t',
                'spiketrains_n_b_l_t'} & (get_plot_keys(self.config) |
               get_log_keys(self.config))):
            self.spiketrain = k.zeros(output_shape)
        self.last_spiketimes = k.variable(-np.ones(output_shape)) 
Example #5
Source File: ttfs_corrective.py    From snn_toolbox with MIT License 5 votes vote down vote up
def init_neurons(self, input_shape):
        """Init layer neurons."""

        from snntoolbox.bin.utils import get_log_keys, get_plot_keys

        output_shape = self.compute_output_shape(input_shape)
        self.mem = k.variable(self.init_membrane_potential(output_shape))
        self.time = k.variable(self.dt)
        if any({'spiketrains', 'spikerates', 'correlation', 'spikecounts',
                'hist_spikerates_activations', 'operations',
                'synaptic_operations_b_t', 'neuron_operations_b_t',
                'spiketrains_n_b_l_t'} & (get_plot_keys(self.config) |
               get_log_keys(self.config))):
            self.spiketrain = k.zeros(output_shape)
        self.last_spiketimes = k.variable(-np.ones(output_shape)) 
Example #6
Source File: losses.py    From ivis with GNU General Public License v2.0 5 votes vote down vote up
def softmax_ratio_pn(y_true, y_pred):
    anchor, positive, negative = tf.unstack(y_pred)

    anchor_positive_distance = _euclidean_distance(anchor, positive)
    anchor_negative_distance = _euclidean_distance(anchor, negative)
    positive_negative_distance = _euclidean_distance(positive, negative)

    minimum_distance = K.min(K.concatenate([anchor_negative_distance, positive_negative_distance]), axis=-1, keepdims=True)

    softmax = K.softmax(K.concatenate([anchor_positive_distance, minimum_distance]))
    ideal_distance = K.variable([0, 1])
    return K.mean(K.maximum(softmax - ideal_distance, 0)) 
Example #7
Source File: ttfs.py    From snn_toolbox with MIT License 5 votes vote down vote up
def get_time(self):
        """Get simulation time variable.

            Returns
            -------

            time: float
                Current simulation time.
            """

        return k.get_value(self.time) 
Example #8
Source File: ttfs.py    From snn_toolbox with MIT License 5 votes vote down vote up
def set_time(self, time):
        """Set simulation time variable.

        Parameters
        ----------

        time: float
            Current simulation time.
        """

        k.set_value(self.time, time) 
Example #9
Source File: ttfs_dyn_thresh.py    From snn_toolbox with MIT License 5 votes vote down vote up
def get_time(self):
        """Get simulation time variable.

            Returns
            -------

            time: float
                Current simulation time.
            """

        return k.get_value(self.time) 
Example #10
Source File: ttfs_dyn_thresh.py    From snn_toolbox with MIT License 5 votes vote down vote up
def set_time(self, time):
        """Set simulation time variable.

        Parameters
        ----------

        time: float
            Current simulation time.
        """

        k.set_value(self.time, time) 
Example #11
Source File: ttfs_corrective.py    From snn_toolbox with MIT License 5 votes vote down vote up
def get_time(self):
        """Get simulation time variable.

            Returns
            -------

            time: float
                Current simulation time.
            """

        return k.get_value(self.time) 
Example #12
Source File: ttfs_corrective.py    From snn_toolbox with MIT License 5 votes vote down vote up
def set_time(self, time):
        """Set simulation time variable.

        Parameters
        ----------

        time: float
            Current simulation time.
        """

        k.set_value(self.time, time) 
Example #13
Source File: inits.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def output_init(shape, name=None, dim_ordering=None):
    ''' initialization for output weights'''
    size = (shape[0], shape[1], shape[2] - shape[3], shape[3])

    # initialize output weights with random and identity
    rpart = np.random.random(size)
#     idpart_ = np.eye(size[3])
    idpart_ = np.ones((size[3], size[3]))
    idpart = np.expand_dims(np.expand_dims(idpart_, 0), 0)
    value = np.concatenate((rpart, idpart), axis=2)
    return K.variable(value, name=name) 
Example #14
Source File: test_custom_unet.py    From keras-unet with MIT License 5 votes vote down vote up
def test_conv2D_block_vanilla():
    v = K.variable(np.ones([1, 16, 16, 1]), dtype=np.float32)
    out = conv2d_block(v, use_batch_norm=True)
    assert out.shape.as_list() == [1, 16, 16, 16] 
Example #15
Source File: test_custom_unet.py    From keras-unet with MIT License 5 votes vote down vote up
def test_conv2D_block_no_BN():
    v = K.variable(np.ones([1, 32, 32, 1]), dtype=np.float32)
    out = conv2d_block(v, use_batch_norm=False)
    assert out.shape.as_list() == [1, 32, 32, 16] 
Example #16
Source File: test_custom_unet.py    From keras-unet with MIT License 5 votes vote down vote up
def test_conv2D_block_filters():
    v = K.variable(np.ones([1, 32, 32, 1]), dtype=np.float32)
    out = conv2d_block(v, use_batch_norm=False, filters=32)
    assert out.shape.as_list() == [1, 32, 32, 32] 
Example #17
Source File: test_custom_unet.py    From keras-unet with MIT License 5 votes vote down vote up
def test_conv2D_block_elu():
    v = K.variable(np.ones([1, 32, 32, 1]), dtype=np.float32)
    out = conv2d_block(v, activation="elu")
    assert out.shape.as_list() == [1, 32, 32, 16] 
Example #18
Source File: test_custom_unet.py    From keras-unet with MIT License 5 votes vote down vote up
def test_conv2D_block_standard_dropout():
    v = K.variable(np.ones([1, 16, 16, 1]), dtype=np.float32)
    out = conv2d_block(v, use_batch_norm=True, dropout_type="standard")
    assert out.shape.as_list() == [1, 16, 16, 16] 
Example #19
Source File: WCCE.py    From TF.Keras-Commonly-used-models with Apache License 2.0 5 votes vote down vote up
def weighted_categorical_crossentropy(weights):
    """
    A weighted version of keras.objectives.categorical_crossentropy
    
    Variables:
        weights: numpy array of shape (C,) where C is the number of classes
    
    Usage:
        weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x.
        loss = weighted_categorical_crossentropy(weights)
        model.compile(loss=loss,optimizer='adam')
    """
    
    weights = K.variable(weights)
        
    def loss(y_true, y_pred):
        # scale predictions so that the class probas of each sample sum to 1
        y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
        # clip to prevent NaN's and Inf's
        y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
        # calc
        loss = y_true * K.log(y_pred) * weights
        loss = -K.sum(loss, -1)
        return loss
    
    return loss 
Example #20
Source File: losses.py    From ivis with GNU General Public License v2.0 5 votes vote down vote up
def softmax_ratio(y_true, y_pred):
    anchor, positive, negative = tf.unstack(y_pred)

    positive_distance = _euclidean_distance(anchor, positive)
    negative_distance = _euclidean_distance(anchor, negative)

    softmax = K.softmax(K.concatenate([positive_distance, negative_distance]))
    ideal_distance = K.variable([0, 1])
    return K.mean(K.maximum(softmax - ideal_distance, 0)) 
Example #21
Source File: time_frequency.py    From kapre with MIT License 5 votes vote down vote up
def build(self, input_shape):
        self.n_ch = input_shape[1]
        self.len_src = input_shape[2]
        self.is_mono = self.n_ch == 1
        if self.image_data_format == 'channels_first':
            self.ch_axis_idx = 1
        else:
            self.ch_axis_idx = 3
        if self.len_src is not None:
            assert self.len_src >= self.n_dft, 'Hey! The input is too short!'

        self.n_frame = conv_output_length(self.len_src, self.n_dft, self.padding, self.n_hop)

        dft_real_kernels, dft_imag_kernels = backend.get_stft_kernels(self.n_dft)
        self.dft_real_kernels = K.variable(dft_real_kernels, dtype=K.floatx(), name="real_kernels")
        self.dft_imag_kernels = K.variable(dft_imag_kernels, dtype=K.floatx(), name="imag_kernels")
        # kernels shapes: (filter_length, 1, input_dim, nb_filter)?
        if self.trainable_kernel:
            self.trainable_weights.append(self.dft_real_kernels)
            self.trainable_weights.append(self.dft_imag_kernels)
        else:
            self.non_trainable_weights.append(self.dft_real_kernels)
            self.non_trainable_weights.append(self.dft_imag_kernels)

        super(Spectrogram, self).build(input_shape)
        # self.built = True 
Example #22
Source File: layers.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, cap=100, **kwargs):
        self.cap = K.variable(cap, dtype='float32')
        super(MeanStream, self).__init__(**kwargs) 
Example #23
Source File: layers.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def build(self, input_shape):
        # Create a trainable weight variable for this layer.
        mult_shape = list(input_shape)[1:] + [self.output_features]
        ndims = len(list(input_shape)[1:-1])
        
        
        # verify initializer
        if self.mult_initializer is None:
            mean = 1/input_shape[-1]
            stddev = 0.01
            self.mult_initializer = keras.initializers.RandomNormal(mean=mean, stddev=stddev)
        
        self.mult = self.add_weight(name='mult-kernel', 
                                      shape=mult_shape,
                                      initializer=self.mult_initializer,
                                      regularizer=self.mult_regularizer,
                                      trainable=True)

        self.trf = self.add_weight(name='def-kernel', 
                                      shape=mult_shape + [ndims],
                                      initializer=keras.initializers.RandomNormal(mean=0, stddev=0.001),
                                      trainable=True)

        if self.use_bias:
            if self.bias_initializer is None:
                mean = 1/input_shape[-1]
                stddev = 0.01
                self.bias_initializer = keras.initializers.RandomNormal(mean=mean, stddev=stddev)
            
            bias_shape = list(input_shape)[1:-1] + [self.output_features]
            self.bias = self.add_weight(name='bias-kernel', 
                                          shape=bias_shape,
                                          initializer=self.bias_initializer,
                                          regularizer=self.bias_regularizer,
                                          trainable=True)
        
        super(LocalCrossLinearTrf, self).build(input_shape) 
Example #24
Source File: layers.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def build(self, input_shape):
        # Create a trainable weight variable for this layer.
        mult_shape = [1] + list(input_shape)[1:] + [self.output_features]
        
        
        # verify initializer
        if self.mult_initializer is None:
            mean = 1/input_shape[-1]
            stddev = 0.01
            self.mult_initializer = keras.initializers.RandomNormal(mean=mean, stddev=stddev)
        
        self.mult = self.add_weight(name='mult-kernel', 
                                      shape=mult_shape,
                                      initializer=self.mult_initializer,
                                      regularizer=self.mult_regularizer,
                                      trainable=True)

        if self.use_bias:
            if self.bias_initializer is None:
                mean = 1/input_shape[-1]
                stddev = 0.01
                self.bias_initializer = keras.initializers.RandomNormal(mean=mean, stddev=stddev)
            
            bias_shape = [1] + list(input_shape)[1:-1] + [self.output_features]
            self.bias = self.add_weight(name='bias-kernel', 
                                          shape=bias_shape,
                                          initializer=self.bias_initializer,
                                          regularizer=self.bias_regularizer,
                                          trainable=True)
        super(LocalCrossLinear, self).build(input_shape) 
Example #25
Source File: layers.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def build(self, input_shape):
        # Create a trainable weight variable for this layer.
        self.kernel = self.add_weight(name='kernel', 
                                      shape=input_shape[1:],
                                      initializer=self.initializer,
                                      trainable=True)
        super(LocalBias, self).build(input_shape)  # Be sure to call this somewhere! 
Example #26
Source File: layers.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def build(self, input_shape):



        # Create a trainable weight variable for this layer.
        self.kernel = self.add_weight(name='mult-kernel',
                                    shape=(np.prod(self.orig_input_shape),
                                           self.output_len),
                                    initializer=self.kernel_initializer,
                                    trainable=True)

        M = K.reshape(self.kernel, [-1, self.output_len])  # D x d
        mt = K.transpose(M) # d x D
        mtm_inv = tf.matrix_inverse(K.dot(mt, M))  # d x d
        self.W = K.dot(mtm_inv, mt) # d x D

        if self.use_bias:
            self.bias = self.add_weight(name='bias-kernel',
                                        shape=(self.output_len, ),
                                        initializer=self.bias_initializer,
                                        trainable=True)

        # self.sigma_sq = self.add_weight(name='bias-kernel',
        #                                 shape=(1, ),
        #                                 initializer=self.initializer,
        #                                 trainable=True)

        super(SpatiallySparse_Dense, self).build(input_shape)  # Be sure to call this somewhere! 
Example #27
Source File: metrics.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def loss(self, y_true, y_pred):
        """ prepare a loss of the given metric/loss operating on non-bg data """
        yt = y_true #.eval()
        ytbg = np.where(yt == 0)
        y_true_fix = K.variable(yt.flat(ytbg))
        y_pred_fix = K.variable(y_pred.flat(ytbg))
        return self.metric(y_true_fix, y_pred_fix)

###############################################################################
# simple function losses
############################################################################### 
Example #28
Source File: metrics.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def loss(self, y_true, y_pred):
        total_loss = K.variable(0)
        for idx, loss in enumerate(self.losses):
            total_loss += self.loss_weights[idx] * loss(y_true, y_pred)
        return total_loss 
Example #29
Source File: metrics.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, nb_labels,
                 weights=None,
                 input_type='prob',
                 dice_type='soft',
                 approx_hard_max=True,
                 vox_weights=None,
                 crop_indices=None,
                 re_norm=False,
                 area_reg=0.1):  # regularization for bottom of Dice coeff
        """
        input_type is 'prob', or 'max_label'
        dice_type is hard or soft
        approx_hard_max - see note below

        Note: for hard dice, we grab the most likely label and then compute a
        one-hot encoding for each voxel with respect to possible labels. To grab the most
        likely labels, argmax() can be used, but only when Dice is used as a metric
        For a Dice *loss*, argmax is not differentiable, and so we can't use it
        Instead, we approximate the prob->one_hot translation when approx_hard_max is True.
        """

        self.nb_labels = nb_labels
        self.weights = None if weights is None else K.variable(weights)
        self.vox_weights = None if vox_weights is None else K.variable(vox_weights)
        self.input_type = input_type
        self.dice_type = dice_type
        self.approx_hard_max = approx_hard_max
        self.area_reg = area_reg
        self.crop_indices = crop_indices
        self.re_norm = re_norm

        if self.crop_indices is not None and vox_weights is not None:
            self.vox_weights = utils.batch_gather(self.vox_weights, self.crop_indices) 
Example #30
Source File: optimizers_225tf.py    From keras-adamw with MIT License 5 votes vote down vote up
def __init__(self, learning_rate=0.01, momentum=0.0, nesterov=False,
                 model=None, zero_penalties=True, batch_size=32,
                 total_iterations=0, total_iterations_wd=None,
                 use_cosine_annealing=False, lr_multipliers=None,
                 weight_decays=None, init_verbose=True,
                 eta_min=0, eta_max=1, t_cur=0, name="SGDW", **kwargs):
        if total_iterations > 1:
            weight_decays = _init_weight_decays(model, zero_penalties,
                                                weight_decays)

        eta_t = kwargs.pop('eta_t', 1.)
        super(SGDW, self).__init__(name, **kwargs)
        self._set_hyper("learning_rate", kwargs.get("lr", learning_rate))
        self._set_hyper("decay", self._initial_decay)

        self._momentum = False
        if isinstance(momentum, ops.Tensor) or callable(momentum) or momentum > 0:
            self._momentum = True
        if isinstance(momentum, (int, float)) and (momentum < 0 or momentum > 1):
            raise ValueError("`momentum` must be between [0, 1].")
        self._set_hyper("momentum", momentum)

        self.nesterov = nesterov
        self.eta_min = K.constant(eta_min, name='eta_min')
        self.eta_max = K.constant(eta_max, name='eta_max')
        self.eta_t = K.variable(eta_t, dtype='float32', name='eta_t')
        self.t_cur = K.variable(t_cur, dtype='int64', name='t_cur')
        self.batch_size = batch_size
        self.total_iterations = total_iterations
        self.total_iterations_wd = total_iterations_wd or total_iterations
        self.lr_multipliers = lr_multipliers
        self.weight_decays = weight_decays or {}
        self.init_verbose = init_verbose
        self.use_cosine_annealing = use_cosine_annealing

        _check_args(self, total_iterations, use_cosine_annealing, weight_decays)
        self._init_lr = kwargs.get('lr', learning_rate)  # to print lr_mult setup
        self._updates_processed = 0  # to track num calls to '_resource_apply_...'
        self._init_notified = False
        self._init_lr = kwargs.get('lr', learning_rate)