Python keras.backend.variable() Examples

The following are 30 code examples of keras.backend.variable(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.backend , or try the search function .
Example #1
Source File: yogi.py    From keras-contrib with MIT License 6 votes vote down vote up
def __init__(self, lr=0.01, beta_1=0.9, beta_2=0.999,
                 epsilon=1e-3, decay=0., **kwargs):
        super(Yogi, self).__init__(**kwargs)
        if beta_1 <= 0 or beta_1 >= 1:
            raise ValueError("beta_1 has to be in ]0, 1[")
        if beta_2 <= 0 or beta_2 >= 1:
            raise ValueError("beta_2 has to be in ]0, 1[")

        with K.name_scope(self.__class__.__name__):
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.lr = K.variable(lr, name='lr')
            self.beta_1 = K.variable(beta_1, name='beta_1')
            self.beta_2 = K.variable(beta_2, name='beta_2')
            self.decay = K.variable(decay, name='decay')
        if epsilon is None:
            epsilon = K.epsilon()
        if epsilon <= 0:
            raise ValueError("epsilon has to be larger than 0")
        self.epsilon = epsilon
        self.initial_decay = decay 
Example #2
Source File: AdamAccumulate.py    From Coloring-greyscale-images with MIT License 6 votes vote down vote up
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,
                 epsilon=None, decay=0., amsgrad=False, accum_iters=1, **kwargs):
        if accum_iters < 1:
            raise ValueError('accum_iters must be >= 1')
        super(AdamAccumulate, self).__init__(**kwargs)
        with K.name_scope(self.__class__.__name__):
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.lr = K.variable(lr, name='lr')
            self.beta_1 = K.variable(beta_1, name='beta_1')
            self.beta_2 = K.variable(beta_2, name='beta_2')
            self.decay = K.variable(decay, name='decay')
        if epsilon is None:
            epsilon = K.epsilon()
        self.epsilon = epsilon
        self.initial_decay = decay
        self.amsgrad = amsgrad
        self.accum_iters = K.variable(accum_iters, K.dtype(self.iterations))
        self.accum_iters_float = K.cast(self.accum_iters, K.floatx()) 
Example #3
Source File: recurrent.py    From keras_bn_library with MIT License 6 votes vote down vote up
def build(self, input_shape):
		self.input_spec = [InputSpec(shape=input_shape)]
		self.input_dim = input_shape[2]

		self.W = self.init((self.output_dim, 4 * self.input_dim),
		                   name='{}_W'.format(self.name))
		self.U = self.inner_init((self.input_dim, 4 * self.input_dim),
		                         name='{}_U'.format(self.name))
		self.b = K.variable(np.hstack((np.zeros(self.input_dim),
		                               K.get_value(self.forget_bias_init((self.input_dim,))),
		                               np.zeros(self.input_dim),
		                               np.zeros(self.input_dim))),
		                    name='{}_b'.format(self.name))

		self.A = self.init((self.input_dim, self.output_dim),
		                    name='{}_A'.format(self.name))
		self.ba = K.zeros((self.output_dim,), name='{}_ba'.format(self.name))


		self.trainable_weights = [self.W, self.U, self.b, self.A, self.ba]

		if self.initial_weights is not None:
			self.set_weights(self.initial_weights)
			del self.initial_weights 
Example #4
Source File: adabound.py    From keras-adabound with MIT License 6 votes vote down vote up
def __init__(self, lr=0.001, final_lr=0.1, beta_1=0.9, beta_2=0.999, gamma=1e-3,
                 epsilon=None, decay=0., amsbound=False, weight_decay=0.0, **kwargs):
        super(AdaBound, self).__init__(**kwargs)

        if not 0. <= gamma <= 1.:
            raise ValueError("Invalid `gamma` parameter. Must lie in [0, 1] range.")

        with K.name_scope(self.__class__.__name__):
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.lr = K.variable(lr, name='lr')
            self.beta_1 = K.variable(beta_1, name='beta_1')
            self.beta_2 = K.variable(beta_2, name='beta_2')
            self.decay = K.variable(decay, name='decay')

        self.final_lr = final_lr
        self.gamma = gamma

        if epsilon is None:
            epsilon = K.epsilon()
        self.epsilon = epsilon
        self.initial_decay = decay
        self.amsbound = amsbound

        self.weight_decay = float(weight_decay)
        self.base_lr = float(lr) 
Example #5
Source File: analogy.py    From image-analogies with MIT License 6 votes vote down vote up
def find_analogy_patches(a, a_prime, b, patch_size=3, patch_stride=1):
    '''This is for precalculating the analogy_loss

    Since A, A', and B never change we only need to calculate the patch matches once.
    '''
    # extract patches from feature maps
    a_patches, a_patches_norm = patches.make_patches(K.variable(a), patch_size, patch_stride)
    a_prime_patches, a_prime_patches_norm = patches.make_patches(K.variable(a_prime), patch_size, patch_stride)
    b_patches, b_patches_norm = patches.make_patches(K.variable(b), patch_size, patch_stride)
    # find best patches and calculate loss
    p = patches.find_patch_matches(b_patches, b_patches_norm, a_patches / a_patches_norm)
    #best_patches = a_prime_patches[p]
    best_patches = K.reshape(a_prime_patches[p], K.shape(b_patches))
    f = K.function([], best_patches)
    best_patches = f([])
    return best_patches 
Example #6
Source File: training.py    From neural-style-keras with MIT License 6 votes vote down vote up
def get_total_loss(content_losses, style_losses, total_var_loss,
                   content_weights, style_weights, tv_weights, class_targets):
    total_loss = K.variable(0.)

    # Compute content losses
    for loss in content_losses:
        weighted_loss = K.mean(K.gather(content_weights, class_targets) * loss)
        weighted_content_losses.append(weighted_loss)
        total_loss += weighted_loss

    # Compute style losses
    for loss in style_losses:
        weighted_loss = K.mean(K.gather(style_weights, class_targets) * loss)
        weighted_style_losses.append(weighted_loss)
        total_loss += weighted_loss

    # Compute tv loss
    weighted_tv_loss = K.mean(K.gather(tv_weights, class_targets) *
                              total_var_loss)
    total_loss += weighted_tv_loss

    return (total_loss, weighted_content_losses, weighted_style_losses,
            weighted_tv_loss) 
Example #7
Source File: padam.py    From keras-contrib with MIT License 6 votes vote down vote up
def __init__(self, lr=1e-1, beta_1=0.9, beta_2=0.999,
                 epsilon=1e-8, decay=0., amsgrad=False, partial=1. / 8., **kwargs):
        if partial < 0 or partial > 0.5:
            raise ValueError(
                "Padam: 'partial' must be a positive float with a maximum "
                "value of `0.5`, since higher values will cause divergence "
                "during training."
            )
        super(Padam, self).__init__(**kwargs)
        with K.name_scope(self.__class__.__name__):
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.lr = K.variable(lr, name='lr')
            self.beta_1 = K.variable(beta_1, name='beta_1')
            self.beta_2 = K.variable(beta_2, name='beta_2')
            self.decay = K.variable(decay, name='decay')
        if epsilon is None:
            epsilon = K.epsilon()
        self.epsilon = epsilon
        self.partial = partial
        self.initial_decay = decay
        self.amsgrad = amsgrad 
Example #8
Source File: lars.py    From keras-contrib with MIT License 6 votes vote down vote up
def __init__(self,
                 lr,
                 momentum=0.9,
                 weight_decay=0.0001,
                 eeta=0.001,
                 epsilon=0.0,
                 nesterov=False,
                 **kwargs):

        if momentum < 0.0:
            raise ValueError("momentum should be positive: %s" % momentum)
        if weight_decay < 0.0:
            raise ValueError("weight_decay is not positive: %s" % weight_decay)
        super(LARS, self).__init__(**kwargs)
        with K.name_scope(self.__class__.__name__):
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.lr = K.variable(lr, name='lr')
            self.momentum = K.variable(momentum, name='momentum')
            self.weight_decay = K.variable(weight_decay, name='weight_decay')
            self.eeta = K.variable(eeta, name='eeta')
        self.epsilon = epsilon
        self.nesterov = nesterov 
Example #9
Source File: layers.py    From voxelmorph with GNU General Public License v3.0 6 votes vote down vote up
def build(self, input_shape):
        # Create mean and count
        # These are weights because just maintaining variables don't get saved with the model, and we'd like
        # to have these numbers saved when we save the model.
        # But we need to make sure that the weights are untrainable.
        self.mean = self.add_weight(name='mean', 
                                      shape=input_shape[1:],
                                      initializer='zeros',
                                      trainable=False)
        self.count = self.add_weight(name='count', 
                                      shape=[1],
                                      initializer='zeros',
                                      trainable=False)

        # self.mean = K.zeros(input_shape[1:], name='mean')
        # self.count = K.variable(0.0, name='count')
        super(MeanStream, self).build(input_shape)  # Be sure to call this somewhere! 
Example #10
Source File: test_subpixelupscaling.py    From keras-contrib with MIT License 6 votes vote down vote up
def test_sub_pixel_upscaling(scale_factor):
    num_samples = 2
    num_row = 16
    num_col = 16
    input_dtype = K.floatx()

    nb_channels = 4 * (scale_factor ** 2)
    input_data = np.random.random((num_samples, nb_channels, num_row, num_col))
    input_data = input_data.astype(input_dtype)

    if K.image_data_format() == 'channels_last':
        input_data = input_data.transpose((0, 2, 3, 1))

    input_tensor = K.variable(input_data)
    expected_output = K.eval(KC.depth_to_space(input_tensor,
                                               scale=scale_factor))

    layer_test(SubPixelUpscaling,
               kwargs={'scale_factor': scale_factor},
               input_data=input_data,
               expected_output=expected_output,
               expected_output_dtype=K.floatx()) 
Example #11
Source File: backend_test.py    From keras-contrib with MIT License 6 votes vote down vote up
def check_composed_tensor_operations(first_function_name, first_function_args,
                                     second_function_name, second_function_args,
                                     input_shape):
    ''' Creates a random tensor t0 with shape input_shape and compute
                 t1 = first_function_name(t0, **first_function_args)
                 t2 = second_function_name(t1, **second_function_args)
        with both Theano and TensorFlow backends and ensures the answers match.
    '''
    val = np.random.random(input_shape) - 0.5
    xth = KTH.variable(val)
    xtf = KTF.variable(val)

    yth = getattr(KCTH, first_function_name)(xth, **first_function_args)
    ytf = getattr(KCTF, first_function_name)(xtf, **first_function_args)

    zth = KTH.eval(getattr(KCTH, second_function_name)(yth, **second_function_args))
    ztf = KTF.eval(getattr(KCTF, second_function_name)(ytf, **second_function_args))

    assert zth.shape == ztf.shape
    assert_allclose(zth, ztf, atol=1e-05) 
Example #12
Source File: train.py    From stochastic_depth_keras with MIT License 5 votes vote down vote up
def residual_drop(x, input_shape, output_shape, strides=(1, 1)):
    global add_tables

    nb_filter = output_shape[0]
    conv = Convolution2D(nb_filter, 3, 3, subsample=strides,
                         border_mode="same", W_regularizer=l2(weight_decay))(x)
    conv = BatchNormalization(axis=1)(conv)
    conv = Activation("relu")(conv)
    conv = Convolution2D(nb_filter, 3, 3,
                         border_mode="same", W_regularizer=l2(weight_decay))(conv)
    conv = BatchNormalization(axis=1)(conv)

    if strides[0] >= 2:
        x = AveragePooling2D(strides)(x)

    if (output_shape[0] - input_shape[0]) > 0:
        pad_shape = (1,
                     output_shape[0] - input_shape[0],
                     output_shape[1],
                     output_shape[2])
        padding = K.zeros(pad_shape)
        padding = K.repeat_elements(padding, K.shape(x)[0], axis=0)
        x = Lambda(lambda y: K.concatenate([y, padding], axis=1),
                   output_shape=output_shape)(x)

    _death_rate = K.variable(death_rate)
    scale = K.ones_like(conv) - _death_rate
    conv = Lambda(lambda c: K.in_test_phase(scale * c, c),
                  output_shape=output_shape)(conv)

    out = merge([conv, x], mode="sum")
    out = Activation("relu")(out)

    gate = K.variable(1, dtype="uint8")
    add_tables += [{"death_rate": _death_rate, "gate": gate}]
    return Lambda(lambda tensors: K.switch(gate, tensors[0], tensors[1]),
                  output_shape=output_shape)([out, x]) 
Example #13
Source File: image_classifier.py    From image-segmentation with MIT License 5 votes vote down vote up
def weighted_focal_loss(weights, gamma):
    weights = K.variable([weights])
    gamma = K.variable([gamma])

    def loss(gt, pr):
        # scale preds so that the class probas of each sample sum to 1
        pr /= tf.reduce_sum(pr, axis=-1, keep_dims=True)
        # manual computation of crossentropy
        pr = tf.clip_by_value(pr, K.epsilon(), 1. - K.epsilon())
        return K.mean(-tf.reduce_sum(gt * K.pow(1. - pr, gamma) * tf.log(pr) * weights, axis=-1))

    return loss 
Example #14
Source File: ftml.py    From keras-contrib with MIT License 5 votes vote down vote up
def __init__(self, lr=0.0025, beta_1=0.6, beta_2=0.999,
                 epsilon=1e-8, decay=0., **kwargs):
        super(FTML, self).__init__(**kwargs)
        self.__dict__.update(locals())
        self.iterations = K.variable(0)
        self.lr = K.variable(lr)
        self.beta_1 = K.variable(beta_1)
        self.beta_2 = K.variable(beta_2)
        self.decay = K.variable(decay)
        self.epsilon = epsilon
        self.inital_decay = decay 
Example #15
Source File: metrics.py    From keras-contrib with MIT License 5 votes vote down vote up
def validate_metric(metric):
    y_a = K.variable(np.random.random((6, 7)))
    y_b = K.variable(np.random.random((6, 7)))
    output = metric(y_a, y_b)
    assert K.eval(output).shape == () 
Example #16
Source File: keras2_emitter.py    From MMdnn with MIT License 5 votes vote down vote up
def emit_FullyConnected(self, IR_node, in_scope=False):
        if in_scope:
            code = "{:<15} = K.bias_add(K.dot({}, K.variable(weights_dict['{}']['weights'])), K.variable(weights_dict['{}']['bias']))".format(
                IR_node.variable_name, 
                self.parent_variable_name(IR_node),
                IR_node.name,
                IR_node.name)
        else:
            code = "{:<15} = layers.Dense(name = '{}', units = {}, use_bias = {})({})".format(
                IR_node.variable_name,
                IR_node.name,
                IR_node.get_attr('units'),
                IR_node.get_attr('use_bias'),
                self.parent_variable_name(IR_node))
        return code 
Example #17
Source File: cosineconvolution2d.py    From keras-contrib with MIT License 5 votes vote down vote up
def build(self, input_shape):
        input_shape = to_tuple(input_shape)
        if self.data_format == 'channels_first':
            stack_size = input_shape[1]
            self.kernel_shape = (self.filters, stack_size, self.nb_row, self.nb_col)
            self.kernel_norm_shape = (1, stack_size, self.nb_row, self.nb_col)
        elif self.data_format == 'channels_last':
            stack_size = input_shape[3]
            self.kernel_shape = (self.nb_row, self.nb_col, stack_size, self.filters)
            self.kernel_norm_shape = (self.nb_row, self.nb_col, stack_size, 1)
        else:
            raise ValueError('Invalid data_format:', self.data_format)
        self.W = self.add_weight(shape=self.kernel_shape,
                                 initializer=partial(self.kernel_initializer),
                                 name='{}_W'.format(self.name),
                                 regularizer=self.kernel_regularizer,
                                 constraint=self.kernel_constraint)

        kernel_norm_name = '{}_kernel_norm'.format(self.name)
        self.kernel_norm = K.variable(np.ones(self.kernel_norm_shape),
                                      name=kernel_norm_name)

        if self.use_bias:
            self.b = self.add_weight(shape=(self.filters,),
                                     initializer='zero',
                                     name='{}_b'.format(self.name),
                                     regularizer=self.bias_regularizer,
                                     constraint=self.bias_constraint)
        else:
            self.b = None

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
        self.built = True 
Example #18
Source File: backend_test.py    From keras-contrib with MIT License 5 votes vote down vote up
def check_single_tensor_operation(function_name, input_shape, **kwargs):
    val = np.random.random(input_shape) - 0.5
    xth = KTH.variable(val)
    xtf = KTF.variable(val)

    zth = KTH.eval(getattr(KCTH, function_name)(xth, **kwargs))
    ztf = KTF.eval(getattr(KCTF, function_name)(xtf, **kwargs))

    assert zth.shape == ztf.shape
    assert_allclose(zth, ztf, atol=1e-05) 
Example #19
Source File: backend_test.py    From keras-contrib with MIT License 5 votes vote down vote up
def test_extract(self, input_shape, kernel_shape):
        xval = np.random.random(input_shape)
        kernel = [kernel_shape, kernel_shape]
        strides = [kernel_shape, kernel_shape]
        xth = KTH.variable(xval)
        xtf = KTF.variable(xval)
        ztf = KTF.eval(KCTF.extract_image_patches(xtf, kernel, strides,
                                                  data_format='channels_first',
                                                  padding='valid'))
        zth = KTH.eval(KCTH.extract_image_patches(xth, kernel, strides,
                                                  data_format='channels_first',
                                                  padding='valid'))
        assert zth.shape == ztf.shape
        assert_allclose(zth, ztf, atol=1e-02) 
Example #20
Source File: base.py    From image-analogies with MIT License 5 votes vote down vote up
def build_loss(self, a_image, ap_image, b_image):
        '''Create an expression for the loss as a function of the image inputs.'''
        loss = K.variable(0.0)
        # get the symbolic outputs of each "key" layer (we gave them unique names).
        loss += self.args.tv_weight * total_variation_loss(self.net_input, *b_image.shape[2:])
        return loss 
Example #21
Source File: normalization.py    From DL.EyeSight with GNU General Public License v3.0 5 votes vote down vote up
def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        gamma = self.gamma_init * np.ones((input_shape[self.axis],))
        self.gamma = K.variable(gamma, name="{}_gamma".format(self.name))
        self.trainable_weights = [self.gamma]
        super(L2Normalization, self).build(input_shape) 
Example #22
Source File: keras_utils.py    From KATE with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def contractive_loss(model, lam=1e-4):
    def loss(y_true, y_pred):
        ent_loss = K.mean(K.binary_crossentropy(y_pred, y_true), axis=-1)

        W = K.variable(value=model.encoder.get_weights()[0])  # N x N_hidden
        W = K.transpose(W)  # N_hidden x N
        h = model.encoder.output
        dh = h * (1 - h)  # N_batch x N_hidden

        # N_batch x N_hidden * N_hidden x 1 = N_batch x 1
        contractive = lam * K.sum(dh**2 * K.sum(W**2, axis=1), axis=1)

        return ent_loss + contractive
    return loss 
Example #23
Source File: models.py    From DigiX_HuaWei_Population_Age_Attribution_Predict with MIT License 5 votes vote down vote up
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, decay=0., **kwargs):
    super(AMSgrad, self).__init__(**kwargs)
    with K.name_scope(self.__class__.__name__):
      self.iterations = K.variable(0, dtype='int64', name='iterations')
      self.lr = K.variable(lr, name='lr')
      self.beta_1 = K.variable(beta_1, name='beta_1')
      self.beta_2 = K.variable(beta_2, name='beta_2')
      self.decay = K.variable(decay, name='decay')
    self.epsilon = epsilon
    self.initial_decay = decay 
Example #24
Source File: inits.py    From voxelmorph with GNU General Public License v3.0 5 votes vote down vote up
def output_init(shape, name=None, dim_ordering=None):
    ''' initialization for output weights'''
    size = (shape[0], shape[1], shape[2] - shape[3], shape[3])

    # initialize output weights with random and identity
    rpart = np.random.random(size)
#     idpart_ = np.eye(size[3])
    idpart_ = np.ones((size[3], size[3]))
    idpart = np.expand_dims(np.expand_dims(idpart_, 0), 0)
    value = np.concatenate((rpart, idpart), axis=2)
    return K.variable(value, name=name) 
Example #25
Source File: layers.py    From voxelmorph with GNU General Public License v3.0 5 votes vote down vote up
def build(self, input_shape):
        # Create a trainable weight variable for this layer.
        self.mult = self.add_weight(name='mult-kernel', 
                                      shape=input_shape[1:],
                                      initializer=self.initializer,
                                      trainable=True)
        self.bias = self.add_weight(name='bias-kernel', 
                                      shape=input_shape[1:],
                                      initializer=self.initializer,
                                      trainable=True)
        super(LocalLinear, self).build(input_shape)  # Be sure to call this somewhere! 
Example #26
Source File: layers.py    From voxelmorph with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, cap=100, **kwargs):
        self.cap = K.variable(cap, dtype='float32')
        super(MeanStream, self).__init__(**kwargs) 
Example #27
Source File: layers.py    From voxelmorph with GNU General Public License v3.0 5 votes vote down vote up
def compute_output_shape(self, input_shape):
        return input_shape


# class LocalParam(InputLayer):

#     def __init__(self, shape, mult=1, my_initializer='RandomNormal', **kwargs):
#         super(LocalParam, self).__init__(input_shape=shape, **kwargs)       
       
#         # Create a trainable weight variable for this layer.
#         self.kernel = self.add_weight(name='kernel', 
#                                       shape=tuple(shape),
#                                       initializer=my_initializer,
#                                       trainable=True)
        
#         outputs = self._inbound_nodes[0].output_tensors
#         z = Input(tensor=K.expand_dims(self.kernel, 0)*mult)
#         if len(outputs) == 1:
#             self._inbound_nodes[0].output_tensors[0] = z
#         else:
#             self._inbound_nodes[0].output_tensors = z
      
#     def get_output(self):  # call() would force inputs
#             outputs = self._inbound_nodes[0].output_tensors
#             if len(outputs) == 1:
#                 return outputs[0]
#             else:
#                 return outputs 
Example #28
Source File: layers.py    From voxelmorph with GNU General Public License v3.0 5 votes vote down vote up
def build(self, input_shape):
        # Create a trainable weight variable for this layer.
        self.kernel = self.add_weight(name='kernel', 
                                      shape=input_shape[1:],
                                      initializer=self.initializer,
                                      trainable=True)
        super(LocalBias, self).build(input_shape)  # Be sure to call this somewhere! 
Example #29
Source File: embeddings.py    From fancy-cnn with MIT License 5 votes vote down vote up
def __init__(self, s=3, skip=True):
        self.skip = skip
        self.s = K.variable(s, name='s_constraint') 
Example #30
Source File: metrics.py    From voxelmorph with GNU General Public License v3.0 5 votes vote down vote up
def loss(self, y_true, y_pred):
        total_loss = K.variable(0)
        for idx, loss in enumerate(self.losses):
            total_loss += self.loss_wts[idx] * loss(y_true, y_pred)
        return total_loss