Python keras.backend.variable() Examples

The following are code examples for showing how to use keras.backend.variable(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: phoneticSimilarity   Author: ronggong   File: regression_train_predict.py    GNU Affero General Public License v3.0 7 votes vote down vote up
def evaluate_model(model, X, y, scaler):
    """
    eval the model with mse loss
    :param model:
    :param X:
    :param y:
    :param scaler:
    :return:
    """

    y_pred = model_prediction(model, X, scaler)

    # print(y.shape, y_pred.shape)
    y = K.variable(y)
    y_pred = K.variable(y_pred)

    loss = K.eval(mean_absolute_error(y, y_pred))

    return loss 
Example 2
Project: AI2-Reasoning-Challenge-ARC   Author: SebiSebi   File: keras_custom_layers.py    GNU General Public License v3.0 6 votes vote down vote up
def main():
    input = Input(shape=(7, 2))
    output_1, state1_h, state1_c = LSTM(4, return_sequences=True,
                                        return_state=True)(input)
    output_2 = LSTM(4)(output_1, initial_state=[state1_h, state1_c])
    output_3 = LinkedAttention(250)([output_1, output_2])

    # state_h and state_c are only for the last timestamp.
    # output_1[-1] == state_h

    model = Model(inputs=[input], outputs=[output_1, output_3])
    model.compile(loss=categorical_crossentropy,
                  optimizer='adam',
                  metrics=['accuracy'])
    model.summary()
    # y = model.predict(np.ones((3, 7, 2)), batch_size=3)

    '''
    x = K.variable(value=np.array([[[1, 4]], [[-3, 2]]]))
    y = K.variable(value=np.array([[[1, 2, 3], [-1, 5, 2]],
                                  [[3, 4, 1], [1, 6, 4]]]))
    z = K.batch_dot(x, y)
    print(x.shape)
    print(K.eval(z))
    ''' 
Example 3
Project: deep-learning-keras   Author: arnaudvl   File: nn.py    MIT License 6 votes vote down vote up
def _calc_metric(self,y_true,y_pred):
        """
        Calculate evaluation metric.
        
        Supports: "roc-auc","norm-gini","mean_squared_error","mean_absolute_error",
                  "categorical_crossentropy","binary_crossentropy".
        """
        if self._val_loss=='roc-auc':
            metric = roc_auc_score(y_true, y_pred)
        elif self._val_loss=='norm-gini':
            metric = (2 * roc_auc_score(y_true, y_pred)) - 1
        elif self._val_loss=='mean_squared_error':
            metric = K.eval(mean_squared_error(K.variable(y_true), K.variable(y_pred)))
        elif self._val_loss=='mean_absolute_error':
            metric = K.eval(mean_absolute_error(K.variable(y_true), K.variable(y_pred)))
        elif self._val_loss=='categorical_crossentropy':
            metric = K.eval(categorical_crossentropy(K.variable(y_true), K.variable(y_pred)))
        elif self._val_loss=='binary_crossentropy':
            metric = K.eval(binary_crossentropy(K.variable(y_true), K.variable(y_pred)))
        else:
            raise ValueError('Invalid value for "custom_eval_stopping["name"], "roc-auc","norm-gini","mean_squared_error", \
                             "mean_absolute_error","categorical_crossentropy","binary_crossentropy" supported.')
        return metric 
Example 4
Project: deep-learning-keras   Author: arnaudvl   File: nn.py    MIT License 6 votes vote down vote up
def _get_metric(self,y_true,y_pred):
        """
        Calculate metric being logged.
        
        Supports: "roc-auc","norm-gini","mean_squared_error","mean_absolute_error",
                  "categorical_crossentropy","binary_crossentropy".
        """
        if self._metric=='roc-auc':
            metric = roc_auc_score(y_true, y_pred)
        elif self._metric=='norm-gini':
            metric = (2 * roc_auc_score(y_true, y_pred)) - 1
        elif self._metric=='mean_squared_error':
            metric = K.eval(mean_squared_error(K.variable(y_true), K.variable(y_pred)))
        elif self._metric=='mean_absolute_error':
            metric = K.eval(mean_absolute_error(K.variable(y_true), K.variable(y_pred)))
        elif self._metric=='categorical_crossentropy':
            metric = K.eval(categorical_crossentropy(K.variable(y_true), K.variable(y_pred)))
        elif self._metric=='binary_crossentropy':
            metric = K.eval(binary_crossentropy(K.variable(y_true), K.variable(y_pred)))
        else:
            raise ValueError('Invalid value for "custom_eval_stopping["name"], "roc-auc","norm-gini","mean_squared_error", \
                             "mean_absolute_error","categorical_crossentropy","binary_crossentropy" supported.')
        return metric 
Example 5
Project: keras_extension   Author: k1414st   File: optimizers.py    MIT License 6 votes vote down vote up
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,
                 epsilon=None, decay=0.,
                 terminal_bound=0.1, lower_bound=0., upper_bound=None, **kwargs):
        super(AdaBound, self).__init__(**kwargs)
        with K.name_scope(self.__class__.__name__):
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.lr = K.variable(lr, name='lr')
            self.beta_1 = K.variable(beta_1, name='beta_1')
            self.beta_2 = K.variable(beta_2, name='beta_2')
            self.decay = K.variable(decay, name='decay')
            if upper_bound is None:
                upper_bound = terminal_bound * 2.
            self.terminal_bound = K.variable(terminal_bound, name='terminal_bound')
            self.lower_bound = K.variable(lower_bound, name='lower_bound')
            self.upper_bound = K.variable(upper_bound, name='upper_bound')
        if epsilon is None:
            epsilon = K.epsilon()
        self.epsilon = epsilon
        self.initial_decay = decay 
Example 6
Project: keras_extension   Author: k1414st   File: optimizers.py    MIT License 6 votes vote down vote up
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,
                 epsilon=None, decay=0.,
                 terminal_bound=0.1, lower_bound=0., upper_bound=None, **kwargs):
        super(AdaBound, self).__init__(**kwargs)
        with K.name_scope(self.__class__.__name__):
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.lr = K.variable(lr, name='lr')
            self.beta_1 = K.variable(beta_1, name='beta_1')
            self.beta_2 = K.variable(beta_2, name='beta_2')
            self.decay = K.variable(decay, name='decay')
            if upper_bound is None:
                upper_bound = terminal_bound * 2.
            self.terminal_bound = K.variable(terminal_bound, name='terminal_bound')
            self.lower_bound = K.variable(lower_bound, name='lower_bound')
            self.upper_bound = K.variable(upper_bound, name='upper_bound')
        if epsilon is None:
            epsilon = K.epsilon()
        self.epsilon = epsilon
        self.initial_decay = decay 
Example 7
Project: auckland-ai-meetup-x-triage   Author: a-i-joe   File: visualizations.py    MIT License 6 votes vote down vote up
def get_gradcam(image,model,layer_name,mode):
    layer = model.get_layer(layer_name)
    image = np.expand_dims(image,0)
    loss = K.variable(0.)
    if mode == "abnormal":
        loss += K.sum(model.output)
    elif mode == "normal":
        loss += K.sum(1 - model.output)
    else:
        raise ValueError("mode must be normal or abnormal")
    #gradients of prediction wrt the conv layer of choice are used
    upstream_grads = K.gradients(loss,layer.output)[0]
    feature_weights = K.mean(upstream_grads,axis=[1,2]) #spatial global avg pool
    heatmap = K.relu(K.dot(layer.output, K.transpose(feature_weights)))
    fetch_heatmap = K.function([model.input, K.learning_phase()], [heatmap])
    return fetch_heatmap([image,0])[0] 
Example 8
Project: keras_bn_library   Author: bnsnapper   File: recurrent.py    MIT License 6 votes vote down vote up
def build(self, input_shape):
		self.input_spec = [InputSpec(shape=input_shape)]
		self.input_dim = input_shape[2]

		self.W = self.init((self.output_dim, 4 * self.input_dim),
		                   name='{}_W'.format(self.name))
		self.U = self.inner_init((self.input_dim, 4 * self.input_dim),
		                         name='{}_U'.format(self.name))
		self.b = K.variable(np.hstack((np.zeros(self.input_dim),
		                               K.get_value(self.forget_bias_init((self.input_dim,))),
		                               np.zeros(self.input_dim),
		                               np.zeros(self.input_dim))),
		                    name='{}_b'.format(self.name))

		self.A = self.init((self.input_dim, self.output_dim),
		                    name='{}_A'.format(self.name))
		self.ba = K.zeros((self.output_dim,), name='{}_ba'.format(self.name))


		self.trainable_weights = [self.W, self.U, self.b, self.A, self.ba]

		if self.initial_weights is not None:
			self.set_weights(self.initial_weights)
			del self.initial_weights 
Example 9
Project: diktya   Author: BioroboticsLab   File: optimizers.py    Apache License 2.0 6 votes vote down vote up
def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)
        for param, grad, c in zip(params, grads, constraints):
            grad_tm1 = K.variable(np.zeros(K.get_value(param).shape))
            step_tm1 = K.variable(
                self.init_step*np.ones(K.get_value(param).shape))
            test = grad * grad_tm1
            diff = T.lt(test, 0)
            steps = step_tm1 * (T.eq(test, 0) +
                                T.gt(test, 0) * self.increase +
                                diff * self.decrease)
            step = T.minimum(self.max_step, T.maximum(self.min_step, steps))
            grad = grad - diff * grad
            self.updates.append((param, c(param - T.sgn(grad) * step)))
            self.updates.append((grad_tm1, grad))
            self.updates.append((step_tm1, step))
        return self.updates 
Example 10
Project: diktya   Author: BioroboticsLab   File: test_regularizers.py    Apache License 2.0 6 votes vote down vote up
def test_weight_orth_regularizer():
    reg = WeightOrthRegularizer(weight=1.)
    loss = K.variable(0.)

    normal_filters = K.random_normal((32, 3, 3))
    uniform_filters = K.random_uniform((32, 3, 3))

    reg.set_param(normal_filters)
    loss_function = K.function([K.learning_phase()], reg(loss))
    normal_loss = loss_function((1,))

    reg.set_param(uniform_filters)
    loss_function = K.function([K.learning_phase()], reg(loss))
    uniform_loss = loss_function((1,))

    assert(normal_loss < uniform_loss) 
Example 11
Project: Coloring-greyscale-images   Author: emilwallner   File: AdamAccumulate.py    MIT License 6 votes vote down vote up
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,
                 epsilon=None, decay=0., amsgrad=False, accum_iters=1, **kwargs):
        if accum_iters < 1:
            raise ValueError('accum_iters must be >= 1')
        super(AdamAccumulate, self).__init__(**kwargs)
        with K.name_scope(self.__class__.__name__):
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.lr = K.variable(lr, name='lr')
            self.beta_1 = K.variable(beta_1, name='beta_1')
            self.beta_2 = K.variable(beta_2, name='beta_2')
            self.decay = K.variable(decay, name='decay')
        if epsilon is None:
            epsilon = K.epsilon()
        self.epsilon = epsilon
        self.initial_decay = decay
        self.amsgrad = amsgrad
        self.accum_iters = K.variable(accum_iters, K.dtype(self.iterations))
        self.accum_iters_float = K.cast(self.accum_iters, K.floatx()) 
Example 12
Project: neural-style-keras   Author: robertomest   File: training.py    MIT License 6 votes vote down vote up
def get_total_loss(content_losses, style_losses, total_var_loss,
                   content_weights, style_weights, tv_weights, class_targets):
    total_loss = K.variable(0.)

    # Compute content losses
    for loss in content_losses:
        weighted_loss = K.mean(K.gather(content_weights, class_targets) * loss)
        weighted_content_losses.append(weighted_loss)
        total_loss += weighted_loss

    # Compute style losses
    for loss in style_losses:
        weighted_loss = K.mean(K.gather(style_weights, class_targets) * loss)
        weighted_style_losses.append(weighted_loss)
        total_loss += weighted_loss

    # Compute tv loss
    weighted_tv_loss = K.mean(K.gather(tv_weights, class_targets) *
                              total_var_loss)
    total_loss += weighted_tv_loss

    return (total_loss, weighted_content_losses, weighted_style_losses,
            weighted_tv_loss) 
Example 13
Project: phoneticSimilarity   Author: ronggong   File: models_RNN.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def evaluate_model(model, X, y, scaler):

    y_pred = np.zeros_like(y)
    for ii in range(len(X)):
        X_sample = np.expand_dims(scaler.transform(X[ii]), axis=0)
        y_pred[ii] = model.predict_on_batch(X_sample)

    print(y.shape, y_pred.shape)
    y = K.variable(y)
    y_pred = K.variable(y_pred)

    loss = K.eval(categorical_crossentropy(y, y_pred))

    return np.mean(loss) 
Example 14
Project: SSD_keras_restnet   Author: hzm8341   File: ssd_layers.py    MIT License 5 votes vote down vote up
def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        shape = (input_shape[self.axis],)
        init_gamma = self.scale * np.ones(shape)
        self.gamma = K.variable(init_gamma, name='{}_gamma'.format(self.name))
        self.trainable_weights = [self.gamma] 
Example 15
Project: sleep-convolutions-tf   Author: cliffordlab   File: model.py    MIT License 5 votes vote down vote up
def build(self, input_shape):
        self.W = K.variable(self.init_val, name='{}_scale'.format(self.name))
        if self.add_summaries:
            tf.summary.scalar('Scale', self.W)
        self.trainable_weights = [self.W]
        super(Scale, self).build(input_shape) 
Example 16
Project: fancy-cnn   Author: textclf   File: embeddings.py    MIT License 5 votes vote down vote up
def __init__(self, s=3, skip=True):
        self.skip = skip
        self.s = K.variable(s, name='s_constraint') 
Example 17
Project: fancy-cnn   Author: textclf   File: embeddings.py    MIT License 5 votes vote down vote up
def __init__(self, s=3, skip=True):
        self.skip = skip
        self.s = K.variable(s, name='s_constraint') 
Example 18
Project: bert_lamb_pretrain   Author: goldenbili   File: keras_lamb.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,
                 epsilon=None, weight_decay=0.01, decay=0., **kwargs):
        super(LAMBOptimizer, self).__init__(**kwargs)
        with K.name_scope(self.__class__.__name__):
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.lr = K.variable(lr, name='lr')
            self.beta_1 = K.variable(beta_1, name='beta_1')
            self.beta_2 = K.variable(beta_2, name='beta_2')
            self.decay = K.variable(decay, name='decay')
        if epsilon is None:
            epsilon = 1e-6
        self.epsilon = epsilon
        self.initial_decay = decay
        self.weight_decay = weight_decay 
Example 19
Project: bert_lamb_pretrain   Author: goldenbili   File: keras_lamb.py    Apache License 2.0 5 votes vote down vote up
def _get_variable_name(self, param_name):
        """Get the variable name from the tensor name."""
        m = re.match("^(.*):\\d+$", param_name)
        if m is not None:
            param_name = m.group(1)
        return param_name 
Example 20
Project: PiCamNN   Author: PiSimo   File: keras_yolo.py    MIT License 5 votes vote down vote up
def yolo_eval(yolo_outputs,
              image_shape,
              max_boxes=10,
              score_threshold=.6,
              iou_threshold=.5):
    """Evaluate YOLO model on given input batch and return filtered boxes."""
    box_xy, box_wh, box_confidence, box_class_probs = yolo_outputs
    boxes = yolo_boxes_to_corners(box_xy, box_wh)
    boxes, scores, classes = yolo_filter_boxes(
        boxes, box_confidence, box_class_probs, threshold=score_threshold)

    # Scale boxes back to original image shape.
    height = image_shape[0]
    width = image_shape[1]
    image_dims = K.stack([height, width, height, width])
    image_dims = K.reshape(image_dims, [1, 4])
    boxes = boxes * image_dims

    # TODO: Something must be done about this ugly hack!
    max_boxes_tensor = K.variable(max_boxes, dtype='int32')
    K.get_session().run(tf.variables_initializer([max_boxes_tensor]))
    nms_index = tf.image.non_max_suppression(
        boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold)
    boxes = K.gather(boxes, nms_index)
    scores = K.gather(scores, nms_index)
    classes = K.gather(classes, nms_index)
    return boxes, scores, classes 
Example 21
Project: keras-lamb   Author: CyberZHG   File: optimizer.py    MIT License 5 votes vote down vote up
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,
                 epsilon=1e-6, decay=0., weight_decay=0.01,
                 lower_bound=1e-3, upper_bound=10.0, **kwargs):
        super(Lamb, self).__init__(**kwargs)
        with K.name_scope(self.__class__.__name__):
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.lr = K.variable(lr, name='lr')
            self.beta_1 = K.variable(beta_1, name='beta_1')
            self.beta_2 = K.variable(beta_2, name='beta_2')
            self.decay = K.variable(decay, name='decay')
            self.weight_decay = K.variable(weight_decay, name='weight_decay')
        self.epsilon = epsilon
        self.initial_decay = decay
        self.lower_bound = lower_bound
        self.upper_bound = upper_bound 
Example 22
Project: trVAE   Author: theislab   File: _utils.py    MIT License 5 votes vote down vote up
def compute_kernel(x, y, kernel='rbf', **kwargs):
    """
        Computes RBF kernel between x and y.
        # Parameters
            x: Tensor
                Tensor with shape [batch_size, z_dim]
            y: Tensor
                Tensor with shape [batch_size, z_dim]
        # Returns
            returns the computed RBF kernel between x and y
    """
    scales = kwargs.get("scales", [])
    if kernel == "rbf":
        x_size = K.shape(x)[0]
        y_size = K.shape(y)[0]
        dim = K.shape(x)[1]
        tiled_x = K.tile(K.reshape(x, K.stack([x_size, 1, dim])), K.stack([1, y_size, 1]))
        tiled_y = K.tile(K.reshape(y, K.stack([1, y_size, dim])), K.stack([x_size, 1, 1]))
        return K.exp(-K.mean(K.square(tiled_x - tiled_y), axis=2) / K.cast(dim, tf.float32))
    elif kernel == 'raphy':
        scales = K.variable(value=np.asarray(scales))
        squared_dist = K.expand_dims(squared_distance(x, y), 0)
        scales = K.expand_dims(K.expand_dims(scales, -1), -1)
        weights = K.eval(K.shape(scales)[0])
        weights = K.variable(value=np.asarray(weights))
        weights = K.expand_dims(K.expand_dims(weights, -1), -1)
        return K.sum(weights * K.exp(-squared_dist / (K.pow(scales, 2))), 0)
    elif kernel == "multi-scale-rbf":
        sigmas = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1, 5, 10, 15, 20, 25, 30, 35, 100, 1e3, 1e4, 1e5, 1e6]

        beta = 1. / (2. * (K.expand_dims(sigmas, 1)))
        distances = squared_distance(x, y)
        s = K.dot(beta, K.reshape(distances, (1, -1)))

        return K.reshape(tf.reduce_sum(tf.exp(-s), 0), K.shape(distances)) / len(sigmas) 
Example 23
Project: cbc_networks   Author: saralajew   File: losses.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, margin=0.3):
        self.margin = K.variable(margin, name='margin') 
Example 24
Project: cbc_networks   Author: saralajew   File: component_input.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self, variable, name=''):
        self.variable = variable
        if name != '':
            self.name = name + '_'
        self.variable = K.variable(self.variable,
                                   name=self.name + 'constant_input_variable')
        self.input = Input(tensor=self.variable,
                           name=self.name + 'constant_input') 
Example 25
Project: 360_aware_saliency   Author: MikhailStartsev   File: models.py    GNU General Public License v3.0 5 votes vote down vote up
def gaussian_priors_init(shape, name=None):
    means = np.random.uniform(low=0.3, high=0.7, size=shape[0] // 2)
    covars = np.random.uniform(low=0.05, high=0.3, size=shape[0] // 2)
    return K.variable(np.concatenate((means, covars), axis=0), name=name) 
Example 26
Project: timeception   Author: noureldien   File: resnet_152_keras.py    GNU General Public License v3.0 5 votes vote down vote up
def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        shape = (int(input_shape[self.axis]),)

        self.gamma = K.variable(self.gamma_init(shape), name='%s_gamma' % self.name)
        self.beta = K.variable(self.beta_init(shape), name='%s_beta' % self.name)
        self.trainable_weights = [self.gamma, self.beta]

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights 
Example 27
Project: DisplaceNet   Author: GKalliatakis   File: keras_layer_L2Normalization.py    MIT License 5 votes vote down vote up
def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        gamma = self.gamma_init * np.ones((input_shape[self.axis],))
        self.gamma = K.variable(gamma, name='{}_gamma'.format(self.name))
        self.trainable_weights = [self.gamma]
        super(L2Normalization, self).build(input_shape) 
Example 28
Project: Un-Fake   Author: Somil112   File: app.py    MIT License 5 votes vote down vote up
def build(self, input_shape, name='embeddings'):        
        fixed_weight = K.variable(self.fixed_weights, name=name+'_fixed')
        variable_weight = K.variable(self.variable_weights, name=name+'_var')
        
        self._trainable_weights.append(variable_weight)
        self._non_trainable_weights.append(fixed_weight)
        
        self.embeddings = K.concatenate([fixed_weight, variable_weight], axis=0)
        
        self.built = True 
Example 29
Project: Un-Fake   Author: Somil112   File: views.py    MIT License 5 votes vote down vote up
def build(self, input_shape, name='embeddings'):        
        fixed_weight = K.variable(self.fixed_weights, name=name+'_fixed')
        variable_weight = K.variable(self.variable_weights, name=name+'_var')
        
        self._trainable_weights.append(variable_weight)
        self._non_trainable_weights.append(fixed_weight)
        
        self.embeddings = K.concatenate([fixed_weight, variable_weight], axis=0)
        
        self.built = True 
Example 30
Project: keras-adamw   Author: OverLordGoldDragon   File: optimizers.py    MIT License 5 votes vote down vote up
def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999,
                 amsgrad=False, batch_size=32, total_iterations=0,
                 total_iterations_wd=None, use_cosine_annealing=False,
                 weight_decays=None, lr_multipliers=None, init_verbose=True,
                 eta_min=0, eta_max=1, t_cur=0, **kwargs):
        self.initial_decay = kwargs.pop('decay', 0.0)
        self.epsilon = kwargs.pop('epsilon', K.epsilon())
        learning_rate = kwargs.pop('lr', learning_rate)
        eta_t = kwargs.pop('eta_t', 1.)
        super(AdamW, self).__init__(**kwargs)

        with K.name_scope(self.__class__.__name__):
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.learning_rate = K.variable(learning_rate, name='learning_rate')
            self.beta_1 = K.variable(beta_1, name='beta_1')
            self.beta_2 = K.variable(beta_2, name='beta_2')
            self.decay = K.variable(self.initial_decay, name='decay')
            self.batch_size = K.variable(batch_size, dtype='int64',
                                         name='batch_size')
            self.eta_min = K.constant(eta_min, name='eta_min')
            self.eta_max = K.constant(eta_max, name='eta_max')
            self.eta_t = K.variable(eta_t, dtype='float32', name='eta_t')
            self.t_cur = K.variable(t_cur, dtype='int64', name='t_cur')

        self.total_iterations = total_iterations
        self.total_iterations_wd = total_iterations_wd or total_iterations
        self.amsgrad = amsgrad
        self.lr_multipliers = lr_multipliers
        self.weight_decays = weight_decays or {}
        self.init_verbose = init_verbose
        self.use_cosine_annealing = use_cosine_annealing

        self._init_notified = False
        _check_args(total_iterations, use_cosine_annealing, self.weight_decays) 
Example 31
Project: keras-adamw   Author: OverLordGoldDragon   File: optimizers.py    MIT License 5 votes vote down vote up
def __init__(self, learning_rate=0.002, beta_1=0.9, beta_2=0.999,
                 batch_size=32, total_iterations=0,
                 total_iterations_wd=None, use_cosine_annealing=False,
                 weight_decays=None, lr_multipliers=None, init_verbose=True,
                 eta_min=0, eta_max=1, t_cur=0, **kwargs):
        self.schedule_decay = kwargs.pop('schedule_decay', 0.004)
        self.epsilon = kwargs.pop('epsilon', K.epsilon())
        learning_rate = kwargs.pop('lr', learning_rate)
        eta_t = kwargs.pop('eta_t', 1.)
        super(NadamW, self).__init__(**kwargs)

        with K.name_scope(self.__class__.__name__):
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.m_schedule = K.variable(1., name='m_schedule')
            self.learning_rate = K.variable(learning_rate, name='learning_rate')
            self.beta_1 = K.variable(beta_1, name='beta_1')
            self.beta_2 = K.variable(beta_2, name='beta_2')
            self.batch_size = K.variable(batch_size, dtype='int64',
                                         name='batch_size')
            self.eta_min = K.constant(eta_min, name='eta_min')
            self.eta_max = K.constant(eta_max, name='eta_max')
            self.eta_t = K.variable(eta_t, dtype='float32', name='eta_t')
            self.t_cur = K.variable(t_cur, dtype='int64', name='t_cur')

        self.total_iterations = total_iterations
        self.total_iterations_wd = total_iterations_wd or total_iterations
        self.lr_multipliers = lr_multipliers
        self.weight_decays = weight_decays or {}
        self.use_cosine_annealing = use_cosine_annealing
        self.init_verbose = init_verbose

        self._init_notified = False
        _check_args(total_iterations, use_cosine_annealing, self.weight_decays) 
Example 32
Project: keras-adamw   Author: OverLordGoldDragon   File: optimizers.py    MIT License 5 votes vote down vote up
def __init__(self, learning_rate=0.01, momentum=0., nesterov=False,
                 batch_size=32, total_iterations=0,
                 total_iterations_wd=None, use_cosine_annealing=False,
                 weight_decays=None, lr_multipliers=None, init_verbose=True,
                 eta_min=0, eta_max=1, t_cur=0, **kwargs):
        self.initial_decay = kwargs.pop('decay', 0.0)
        learning_rate = kwargs.pop('lr', learning_rate)
        eta_t = kwargs.pop('eta_t', 1.)
        super(SGDW, self).__init__(**kwargs)

        with K.name_scope(self.__class__.__name__):
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.learning_rate = K.variable(learning_rate, name='learning_rate')
            self.momentum = K.variable(momentum, name='momentum')
            self.decay = K.variable(self.initial_decay, name='decay')
            self.batch_size = K.variable(batch_size, dtype='int64',
                                         name='batch_size')
            self.eta_min = K.constant(eta_min, name='eta_min')
            self.eta_max = K.constant(eta_max, name='eta_max')
            self.eta_t = K.variable(eta_t, dtype='float32', name='eta_t')
            self.t_cur = K.variable(t_cur, dtype='int64', name='t_cur')

        self.total_iterations = total_iterations
        self.total_iterations_wd = total_iterations_wd or total_iterations
        self.nesterov = nesterov
        self.lr_multipliers = lr_multipliers
        self.weight_decays = weight_decays or {}
        self.init_verbose = init_verbose
        self.use_cosine_annealing = use_cosine_annealing

        self._init_notified = False
        _check_args(total_iterations, use_cosine_annealing, self.weight_decays) 
Example 33
Project: keras-adamw   Author: OverLordGoldDragon   File: optimizers_225.py    MIT License 5 votes vote down vote up
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,
                 amsgrad=False, epsilon=None, decay=0.0,
                 batch_size=32, total_iterations=0,
                 total_iterations_wd=None, use_cosine_annealing=False,
                 weight_decays=None, lr_multipliers=None, init_verbose=True,
                 eta_min=0, eta_max=1, t_cur=0, **kwargs):
        eta_t = kwargs.pop('eta_t', 1.)
        super(AdamW, self).__init__(**kwargs)

        with K.name_scope(self.__class__.__name__):
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.lr = K.variable(lr, name='lr')
            self.beta_1 = K.variable(beta_1, name='beta_1')
            self.beta_2 = K.variable(beta_2, name='beta_2')
            self.decay = K.variable(decay, name='decay')
            self.batch_size = K.variable(batch_size, dtype='int64',
                                         name='batch_size')
            self.eta_min = K.constant(eta_min, name='eta_min')
            self.eta_max = K.constant(eta_max, name='eta_max')
            self.eta_t = K.variable(eta_t, dtype='float32', name='eta_t')
            self.t_cur = K.variable(t_cur, dtype='int64', name='t_cur')

        self.initial_decay = decay
        self.epsilon = epsilon or K.epsilon()
        self.total_iterations = total_iterations
        self.total_iterations_wd = total_iterations_wd or total_iterations
        self.amsgrad = amsgrad
        self.lr_multipliers = lr_multipliers
        self.weight_decays = weight_decays or {}
        self.init_verbose = init_verbose
        self.use_cosine_annealing = use_cosine_annealing

        self._init_notified = False
        _check_args(total_iterations, use_cosine_annealing, self.weight_decays) 
Example 34
Project: keras-adamw   Author: OverLordGoldDragon   File: optimizers_225.py    MIT License 5 votes vote down vote up
def __init__(self, lr=0.002, beta_1=0.9, beta_2=0.999,
                 schedule_decay=0.004, epsilon=None,
                 batch_size=32, total_iterations=0,
                 total_iterations_wd=None, use_cosine_annealing=False,
                 weight_decays=None, lr_multipliers=None, init_verbose=True,
                 eta_min=0, eta_max=1, t_cur=0, **kwargs):
        eta_t = kwargs.pop('eta_t', 1.)
        super(NadamW, self).__init__(**kwargs)

        with K.name_scope(self.__class__.__name__):
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.m_schedule = K.variable(1., name='m_schedule')
            self.lr = K.variable(lr, name='lr')
            self.beta_1 = K.variable(beta_1, name='beta_1')
            self.beta_2 = K.variable(beta_2, name='beta_2')
            self.batch_size = K.variable(batch_size, dtype='int64',
                                         name='batch_size')
            self.eta_min = K.constant(eta_min, name='eta_min')
            self.eta_max = K.constant(eta_max, name='eta_max')
            self.eta_t = K.variable(eta_t, dtype='float32', name='eta_t')
            self.t_cur = K.variable(t_cur, dtype='int64', name='t_cur')

        self.epsilon = epsilon or K.epsilon()
        self.schedule_decay = schedule_decay
        self.total_iterations = total_iterations
        self.total_iterations_wd = total_iterations_wd or total_iterations
        self.lr_multipliers = lr_multipliers
        self.weight_decays = weight_decays or {}
        self.use_cosine_annealing = use_cosine_annealing
        self.init_verbose = init_verbose

        self._init_notified = False
        _check_args(total_iterations, use_cosine_annealing, self.weight_decays) 
Example 35
Project: keras-adamw   Author: OverLordGoldDragon   File: optimizers_v2.py    MIT License 5 votes vote down vote up
def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999,
                 epsilon=None, decay=0., amsgrad=False,
                 batch_size=32, total_iterations=0,
                 total_iterations_wd=None, use_cosine_annealing=False,
                 weight_decays=None, lr_multipliers=None, init_verbose=True,
                 eta_min=0, eta_max=1, t_cur=0, name="AdamW", **kwargs):
        eta_t = kwargs.pop('eta_t', 1.)

        super(AdamW, self).__init__(name, **kwargs)
        self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
        self._set_hyper('decay', self._initial_decay)
        self._set_hyper('beta_1', beta_1)
        self._set_hyper('beta_2', beta_2)

        self.batch_size = K.variable(batch_size, dtype='int64', name='batch_size')
        self.eta_min = K.constant(eta_min, name='eta_min')
        self.eta_max = K.constant(eta_max, name='eta_max')
        self.eta_t = K.variable(eta_t, dtype='float32', name='eta_t')
        self.t_cur = K.variable(t_cur, dtype='int64', name='t_cur')
        self.total_iterations = total_iterations
        self.total_iterations_wd = total_iterations_wd or total_iterations
        self.lr_multipliers = lr_multipliers
        self.weight_decays = weight_decays or {}
        self.init_verbose = init_verbose
        self.use_cosine_annealing = use_cosine_annealing
        self.epsilon = epsilon or backend_config.epsilon()
        self.amsgrad = amsgrad

        _check_args(total_iterations, use_cosine_annealing, self.weight_decays)
        self._updates_processed = 0  # to track num calls to '_resource_apply_...'
        self._init_notified = False 
Example 36
Project: keras-adamw   Author: OverLordGoldDragon   File: optimizers_v2.py    MIT License 5 votes vote down vote up
def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-7,
                 batch_size=32, total_iterations=0,
                 total_iterations_wd=None, use_cosine_annealing=False,
                 weight_decays=None, lr_multipliers=None, init_verbose=True,
                 eta_min=0, eta_max=1, t_cur=0, name="AdamW", **kwargs):

        # Backwards compatibility with keras NAdam optimizer.
        kwargs['decay'] = kwargs.pop('schedule_decay', 0.004)
        eta_t = kwargs.pop('eta_t', 1.)
        learning_rate = kwargs.get('lr', learning_rate)
        if isinstance(learning_rate, learning_rate_schedule.LearningRateSchedule):
            raise ValueError('The Nadam optimizer does not support '
                             'tf.keras.optimizers.LearningRateSchedules as the '
                             'learning rate.')

        super(NadamW, self).__init__(name, **kwargs)
        self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
        self._set_hyper('decay', self._initial_decay)
        self._set_hyper('beta_1', beta_1)
        self._set_hyper('beta_2', beta_2)
        self.epsilon = epsilon or backend_config.epsilon()
        self._m_cache = None

        self.batch_size = K.variable(batch_size, dtype='int64', name='batch_size')
        self.eta_min = K.constant(eta_min, name='eta_min')
        self.eta_max = K.constant(eta_max, name='eta_max')
        self.eta_t = K.variable(eta_t, dtype='float32', name='eta_t')
        self.t_cur = K.variable(t_cur, dtype='int64', name='t_cur')
        self.total_iterations = total_iterations
        self.total_iterations_wd = total_iterations_wd or total_iterations
        self.lr_multipliers = lr_multipliers
        self.weight_decays = weight_decays or {}
        self.init_verbose = init_verbose
        self.use_cosine_annealing = use_cosine_annealing
        self.epsilon = epsilon or backend_config.epsilon()

        _check_args(total_iterations, use_cosine_annealing, self.weight_decays)
        self._updates_processed = 0  # to track num calls to '_resource_apply_...'
        self._init_notified = False 
Example 37
Project: keras-adamw   Author: OverLordGoldDragon   File: optimizers_v2.py    MIT License 5 votes vote down vote up
def __init__(self, learning_rate=0.01, momentum=0.0, nesterov=False,
                 batch_size=32, total_iterations=0,
                 total_iterations_wd=None, use_cosine_annealing=False,
                 weight_decays=None, lr_multipliers=None, init_verbose=True,
                 eta_min=0, eta_max=1, t_cur=0, name="AdamW", **kwargs):

        eta_t = kwargs.pop('eta_t', 1.)
        super(SGDW, self).__init__(name, **kwargs)
        self._set_hyper("learning_rate", kwargs.get("lr", learning_rate))
        self._set_hyper("decay", self._initial_decay)

        self._momentum = False
        if isinstance(momentum, ops.Tensor) or callable(momentum) or momentum > 0:
            self._momentum = True
        if isinstance(momentum, (int, float)) and (momentum < 0 or momentum > 1):
            raise ValueError("`momentum` must be between [0, 1].")
        self._set_hyper("momentum", momentum)

        self.nesterov = nesterov
        self.batch_size = K.variable(batch_size, dtype='int64', name='batch_size')
        self.eta_min = K.constant(eta_min, name='eta_min')
        self.eta_max = K.constant(eta_max, name='eta_max')
        self.eta_t = K.variable(eta_t, dtype='float32', name='eta_t')
        self.t_cur = K.variable(t_cur, dtype='int64', name='t_cur')
        self.total_iterations = total_iterations
        self.total_iterations_wd = total_iterations_wd or total_iterations
        self.lr_multipliers = lr_multipliers
        self.weight_decays = weight_decays or {}
        self.init_verbose = init_verbose
        self.use_cosine_annealing = use_cosine_annealing

        _check_args(total_iterations, use_cosine_annealing, self.weight_decays)
        self._updates_processed = 0  # to track num calls to '_resource_apply_...'
        self._init_notified = False 
Example 38
Project: dockerizeme   Author: dockerizeme   File: snippet.py    Apache License 2.0 5 votes vote down vote up
def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        shape = (int(input_shape[self.axis]),)

        self.gamma = K.variable(self.gamma_init(shape), name='%s_gamma'%self.name)
        self.beta = K.variable(self.beta_init(shape), name='%s_beta'%self.name)
        self.trainable_weights = [self.gamma, self.beta]

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights 
Example 39
Project: dockerizeme   Author: dockerizeme   File: snippet.py    Apache License 2.0 5 votes vote down vote up
def FScore2(y_true, y_pred):
    '''
    The F score, beta=2
    '''
    B2 = K.variable(4)
    OnePlusB2 = K.variable(5)
    pred = K.round(y_pred)
    tp = K.sum(K.cast(K.less(K.abs(pred - K.clip(y_true, .5, 1.)), 0.01), 'float32'), -1)
    fp = K.sum(K.cast(K.greater(pred - y_true, 0.1), 'float32'), -1)
    fn = K.sum(K.cast(K.less(pred - y_true, -0.1), 'float32'), -1)

    f2 = OnePlusB2 * tp / (OnePlusB2 * tp + B2 * fn + fp)

    return K.mean(f2) 
Example 40
Project: Car-Recognition   Author: foamliu   File: scale_layer.py    MIT License 5 votes vote down vote up
def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        shape = (int(input_shape[self.axis]),)

        # Compatibility with TensorFlow >= 1.0.0
        self.gamma = K.variable(self.gamma_init(shape), name='{}_gamma'.format(self.name))
        self.beta = K.variable(self.beta_init(shape), name='{}_beta'.format(self.name))
        #self.gamma = self.gamma_init(shape, name='{}_gamma'.format(self.name))
        #self.beta = self.beta_init(shape, name='{}_beta'.format(self.name))
        self.trainable_weights = [self.gamma, self.beta]

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights 
Example 41
Project: FaceLandmarks   Author: JACKYLUO1991   File: loss.py    Apache License 2.0 5 votes vote down vote up
def smoothL1(y_true, y_pred):
    """
    More robust to noise
    """
    THRESHOLD = K.variable(1.0)
    mae = K.abs(y_true - y_pred)
    flag = K.greater(mae, THRESHOLD)
    loss = K.mean(K.switch(flag, (mae - 0.5), K.pow(mae, 2)), axis=-1)

    return loss 
Example 42
Project: object-detection   Author: kaka-lin   File: test_tiny_yolo.py    MIT License 5 votes vote down vote up
def yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5):
    max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression()
    K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor
    
    # Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep
    nms_indices = tf.image.non_max_suppression(boxes, scores, max_boxes, iou_threshold)
    
    # Use K.gather() to select only nms_indices from scores, boxes and classes
    scores = K.gather(scores, nms_indices)
    boxes = K.gather(boxes, nms_indices)
    classes = K.gather(classes, nms_indices)
    
    return scores, boxes, classes 
Example 43
Project: object-detection   Author: kaka-lin   File: model.py    MIT License 5 votes vote down vote up
def yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5):
    max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression()
    K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor
    
    # Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep
    nms_indices = tf.image.non_max_suppression(boxes, scores, max_boxes, iou_threshold)
    
    # Use K.gather() to select only nms_indices from scores, boxes and classes
    scores = K.gather(scores, nms_indices)
    boxes = K.gather(boxes, nms_indices)
    classes = K.gather(classes, nms_indices)
    
    return scores, boxes, classes 
Example 44
Project: object-detection   Author: kaka-lin   File: keras_yolo.py    MIT License 5 votes vote down vote up
def yolo_eval(yolo_outputs,
              image_shape,
              max_boxes=10,
              score_threshold=.6,
              iou_threshold=.5):
    """Evaluate YOLO model on given input batch and return filtered boxes."""
    box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs
    boxes = yolo_boxes_to_corners(box_xy, box_wh)
    boxes, scores, classes = yolo_filter_boxes(
        box_confidence, boxes, box_class_probs, threshold=score_threshold)
    
    # Scale boxes back to original image shape.
    height = image_shape[0]
    width = image_shape[1]
    image_dims = K.stack([height, width, height, width])
    image_dims = K.reshape(image_dims, [1, 4])
    boxes = boxes * image_dims

    # TODO: Something must be done about this ugly hack!
    max_boxes_tensor = K.variable(max_boxes, dtype='int32')
    K.get_session().run(tf.variables_initializer([max_boxes_tensor]))
    nms_index = tf.image.non_max_suppression(
        boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold)
    boxes = K.gather(boxes, nms_index)
    scores = K.gather(scores, nms_index)
    classes = K.gather(classes, nms_index)
    
    return boxes, scores, classes 
Example 45
Project: kuaikai_perception   Author: pixmoving-moveit   File: keras_layer_L2Normalization.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        gamma = self.gamma_init * np.ones((input_shape[self.axis],))
        self.gamma = K.variable(gamma, name='{}_gamma'.format(self.name))
        self.trainable_weights = [self.gamma]
        super(L2Normalization, self).build(input_shape) 
Example 46
Project: dtc-pointpillars-keras   Author: dtczhl   File: keras_layer_L2Normalization.py    MIT License 5 votes vote down vote up
def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        gamma = self.gamma_init * np.ones((input_shape[self.axis],))
        self.gamma = K.variable(gamma, name='{}_gamma'.format(self.name))
        self.trainable_weights = [self.gamma]
        super(L2Normalization, self).build(input_shape) 
Example 47
Project: iMIMIC-RCVs   Author: medgift   File: scale_layer.py    MIT License 5 votes vote down vote up
def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        shape = (int(input_shape[self.axis]),)

        # Compatibility with TensorFlow >= 1.0.0
        self.gamma = K.variable(self.gamma_init(shape), name='{}_gamma'.format(self.name))
        self.beta = K.variable(self.beta_init(shape), name='{}_beta'.format(self.name))
        #self.gamma = self.gamma_init(shape, name='{}_gamma'.format(self.name))
        #self.beta = self.beta_init(shape, name='{}_beta'.format(self.name))
        self.trainable_weights = [self.gamma, self.beta]

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights 
Example 48
Project: auckland-ai-meetup-x-triage   Author: a-i-joe   File: vis.py    MIT License 5 votes vote down vote up
def get_saliency(image, model):
    """Returns a saliency map with same shape as image. """
    K.set_learning_phase(0)
    K._LEARNING_PHASE = tf.constant(0)
    image = np.expand_dims(image, 0)
    loss = K.variable(0.)
    loss += K.sum(K.square(model.output))
    grads = K.abs(K.gradients(loss, model.input)[0])
    saliency = K.max(grads, axis=3)
    fetch_saliency = K.function([model.input], [loss, saliency])
    outputs, saliency = fetch_saliency([image])
    K.set_learning_phase(True)
    return saliency 
Example 49
Project: auckland-ai-meetup-x-triage   Author: a-i-joe   File: vis.py    MIT License 5 votes vote down vote up
def get_gradcam(image, model, layer_name):
    # remove dropout/noise layers
    K.set_learning_phase(0)
    K._LEARNING_PHASE = tf.constant(0)
    layer = model.get_layer(layer_name)
    image = np.expand_dims(image, 0)
    loss = K.variable(0.)
    loss += K.sum(model.output)
    # gradients of prediction wrt the conv layer of choice are used
    upstream_grads = K.gradients(loss, layer.output)[0]
    feature_weights = K.mean(upstream_grads, axis=[1, 2])
    heatmap = K.relu(K.dot(layer.output, K.transpose(feature_weights)))
    fetch_heatmap = K.function([model.input], [heatmap])
    return fetch_heatmap([image])[0] 
Example 50
Project: auckland-ai-meetup-x-triage   Author: a-i-joe   File: visualizations.py    MIT License 5 votes vote down vote up
def get_saliency(image,model):
    """Returns a saliency map with same shape as image. """
    K.set_learning_phase(0)
    K._LEARNING_PHASE = tf.constant(0)
    image = np.expand_dims(image,0)
    loss = K.variable(0.)
    loss += K.sum(K.square(model.output))
    grads = K.abs(K.gradients(loss,model.input)[0])
    saliency = K.max(grads,axis=3)
    fetch_saliency = K.function([model.input,K.learning_phase()],[loss,saliency])
    outputs, saliency = fetch_saliency([image,0])
    K.set_learning_phase(True)
    return saliency 
Example 51
Project: keras_bn_library   Author: bnsnapper   File: recurrent.py    MIT License 5 votes vote down vote up
def build(self, input_shape):
		self.input_spec = [InputSpec(shape=input_shape)]
		input_dim = input_shape[2]
		self.input_dim = input_dim
		
		if self.stateful:
			self.reset_states()
		else:
			self.states = [None, None]
			self.states_dim = [self.input_dim, self.output_dim]


		self.weight_size = self.output_dim * 4
		self.W = self.add_weight((input_dim, self.weight_size),
                                 initializer=self.init,
                                 name='{}_W'.format(self.name),
                                 regularizer=self.W_regularizer)
		self.U = self.add_weight((input_dim, self.weight_size),
                                 initializer=self.inner_init,
                                 name='{}_U'.format(self.name),
                                 regularizer=self.U_regularizer)

		def b_reg(shape, name=None):
			return K.variable(np.hstack((np.zeros(self.output_dim),
										K.get_value(self.forget_bias_init((self.output_dim,))),
										np.zeros(self.output_dim),
										np.zeros(self.output_dim))),
										name='{}_b'.format(self.name))
		self.b = self.add_weight((self.weight_size,),
                                     initializer=b_reg,
                                     name='{}_b'.format(self.name),
                                     regularizer=self.b_regularizer)


		if self.initial_weights is not None:
			self.set_weights(self.initial_weights)
			del self.initial_weights

		self.built = True 
Example 52
Project: wtte-rnn   Author: ragulpr   File: test_keras.py    MIT License 5 votes vote down vote up
def test_keras_unstack_hack():
    y_true_np = np.random.random([1, 3, 2])
    y_true_np[:, :, 0] = 0
    y_true_np[:, :, 1] = 1

    y_true_keras = K.variable(y_true_np)

    y, u = wtte._keras_unstack_hack(y_true_keras)
    y_true_keras_new = K.stack([y, u], axis=-1)

    np.testing.assert_array_equal(K.eval(y_true_keras_new), y_true_np)

# SANITY CHECK: Use pure Weibull data censored at C(ensoring point).
# Should converge to the generating A(alpha) and B(eta) for each timestep 
Example 53
Project: ssd_keras   Author: ndl-lab   File: ssd_layers.py    MIT License 5 votes vote down vote up
def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        shape = (input_shape[self.axis],)
        init_gamma = self.scale * np.ones(shape)
        self.gamma = K.variable(init_gamma, name='{}_gamma'.format(self.name))
        self.trainable_weights = [self.gamma] 
Example 54
Project: resnet-finetune-demo   Author: cta-ai   File: resnet152.py    MIT License 5 votes vote down vote up
def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        shape = (int(input_shape[self.axis]),)

        self.gamma = K.variable(self.gamma_init(shape), name='%s_gamma'%self.name)
        self.beta = K.variable(self.beta_init(shape), name='%s_beta'%self.name)
        self.trainable_weights = [self.gamma, self.beta]

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights 
Example 55
Project: GewitterGefahr   Author: thunderhoser   File: standalone_utils.py    MIT License 5 votes vote down vote up
def do_2d_pooling(feature_matrix, stride_length_px=2,
                  pooling_type_string=MAX_POOLING_TYPE_STRING):
    """Pools 2-D feature maps.

    m = number of rows after pooling
    n = number of columns after pooling

    :param feature_matrix: Input feature maps (numpy array).  Dimensions must be
        M x N x C or 1 x M x N x C.
    :param stride_length_px: Stride length (pixels).  The pooling window will
        move by this many rows or columns at a time as it slides over each input
        feature map.
    :param pooling_type_string: Pooling type (must be accepted by
        `_check_pooling_type`).
    :return: feature_matrix: Output feature maps (numpy array).  Dimensions will
        be 1 x m x n x C.
    """

    error_checking.assert_is_numpy_array_without_nan(feature_matrix)
    error_checking.assert_is_integer(stride_length_px)
    error_checking.assert_is_geq(stride_length_px, 2)
    _check_pooling_type(pooling_type_string)

    if len(feature_matrix.shape) == 3:
        feature_matrix = numpy.expand_dims(feature_matrix, axis=0)
    error_checking.assert_is_numpy_array(feature_matrix, num_dimensions=4)

    feature_tensor = K.pool2d(
        x=K.variable(feature_matrix), pool_mode=pooling_type_string,
        pool_size=(stride_length_px, stride_length_px),
        strides=(stride_length_px, stride_length_px), padding='valid',
        data_format='channels_last')
    return feature_tensor.eval(session=K.get_session()) 
Example 56
Project: GewitterGefahr   Author: thunderhoser   File: standalone_utils.py    MIT License 5 votes vote down vote up
def do_3d_pooling(feature_matrix, stride_length_px=2,
                  pooling_type_string=MAX_POOLING_TYPE_STRING):
    """Pools 3-D feature maps.

    :param feature_matrix: Input feature maps (numpy array).  Dimensions must be
        M x N x H x C or 1 x M x N x H x C.
    :param stride_length_px: See doc for `do_2d_pooling`.
    :param pooling_type_string: Pooling type (must be accepted by
        `_check_pooling_type`).
    :return: feature_matrix: Output feature maps (numpy array).  Dimensions will
        be 1 x m x n x h x C.
    """

    error_checking.assert_is_numpy_array_without_nan(feature_matrix)
    error_checking.assert_is_integer(stride_length_px)
    error_checking.assert_is_geq(stride_length_px, 2)
    _check_pooling_type(pooling_type_string)

    if len(feature_matrix.shape) == 4:
        feature_matrix = numpy.expand_dims(feature_matrix, axis=0)
    error_checking.assert_is_numpy_array(feature_matrix, num_dimensions=5)

    feature_tensor = K.pool3d(
        x=K.variable(feature_matrix), pool_mode=pooling_type_string,
        pool_size=(stride_length_px, stride_length_px, stride_length_px),
        strides=(stride_length_px, stride_length_px, stride_length_px),
        padding='valid', data_format='channels_last')
    return feature_tensor.eval(session=K.get_session()) 
Example 57
Project: diktya   Author: BioroboticsLab   File: optimizers.py    Apache License 2.0 5 votes vote down vote up
def __init__(self,
                 init_step=0.01,
                 increase=1.10,
                 decrease=0.8,
                 min_step=1e-7,
                 max_step=10,
                 **kwargs):
        self.init_step = init_step
        self.increase = K.variable(increase)
        self.decrease = K.variable(decrease)
        self.min_step = K.variable(min_step)
        self.max_step = K.variable(max_step)
        super(RProp, self).__init__(**kwargs) 
Example 58
Project: diktya   Author: BioroboticsLab   File: regularizers.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, max_sum):
        warnings.warn("SumOfActivityBelowRegularizer is deprecated.")
        # TODO: Write a replacement that uses compute_loss
        self.max_sum = K.variable(max_sum)
        self.uses_learning_phase = True 
Example 59
Project: perceptron-benchmark   Author: advboxes   File: keras_layer_L2Normalization.py    Apache License 2.0 5 votes vote down vote up
def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        gamma = self.gamma_init * np.ones((input_shape[self.axis],))
        self.gamma = K.variable(gamma, name='{}_gamma'.format(self.name))
        self.trainable_weights = [self.gamma]
        super(L2Normalization, self).build(input_shape) 
Example 60
Project: dota2-win-rate-prediction-v1   Author: vpus   File: wrpt_model.py    MIT License 5 votes vote down vote up
def __init__(self, wt, **kwargs):
        self.W = K.variable(wt)
        super(Advantage, self).__init__(**kwargs) 
Example 61
Project: dota2-win-rate-prediction-v1   Author: vpus   File: predict_by_squad.py    MIT License 5 votes vote down vote up
def __init__(self, wt, **kwargs):
        self.W = K.variable(wt)
        super(Advantage, self).__init__(**kwargs) 
Example 62
Project: RecurrentGaze   Author: crisie   File: adamaccum.py    MIT License 5 votes vote down vote up
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,
                 epsilon=1e-8, accum_iters=20, **kwargs):
        super(Adam_accumulate, self).__init__(**kwargs)
        self.__dict__.update(locals())
        self.iterations = K.variable(0)
        self.lr = K.variable(lr)
        self.beta_1 = K.variable(beta_1)
        self.beta_2 = K.variable(beta_2)
        self.accum_iters = K.variable(accum_iters) 
Example 63
Project: RecurrentGaze   Author: crisie   File: adamaccum.py    MIT License 5 votes vote down vote up
def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [(self.iterations, self.iterations + 1)]

        t = self.iterations + 1
        lr_t = self.lr * K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t))

        ms = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]
        vs = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]
        gs = [K.variable(np.zeros(K.get_value(p).shape)) for p in params]
        self.weights = ms + vs

        for p, g, m, v, gg in zip(params, grads, ms, vs, gs):

            flag = K.equal(self.iterations % self.accum_iters, 0)
            flag = K.cast(flag, dtype='float32')

            gg_t = (1 - flag) * (gg + g)
            m_t = (self.beta_1 * m) + (1. - self.beta_1) * (gg + flag * g) / self.accum_iters
            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square((gg + flag * g) / self.accum_iters)
            p_t = p - flag * lr_t * m_t / (K.sqrt(v_t) + self.epsilon)

            self.updates.append((m, flag * m_t + (1 - flag) * m))
            self.updates.append((v, flag * v_t + (1 - flag) * v))
            self.updates.append((gg, gg_t))

            new_p = p_t
            # apply constraints
            if getattr(p, 'constraint', None) is not None:
                c = p.constraints(new_p)
                new_p = c(new_p)
            self.updates.append((p, new_p))
        return self.updates 
Example 64
Project: keras-fcn   Author: JihongJu   File: test_blocks.py    MIT License 5 votes vote down vote up
def test_vgg_fc():
    if K.image_data_format() == 'channels_first':
        x1 = K.variable(np.random.random((1, 512, 14, 14)))
        y1_shape = (1, 512, 8, 8)
    else:
        x1 = K.variable(np.random.random((1, 14, 14, 512)))
        y1_shape = (1, 8, 8, 512) 
Example 65
Project: keras-fcn   Author: JihongJu   File: test_blocks.py    MIT License 5 votes vote down vote up
def test_vgg_deconv():
    if K.image_data_format() == 'channels_first':
        x1 = K.variable(np.random.random((1, 512, 8, 8)))
        y1_shape = (1, 21, 18, 18)
        x2 = K.variable(np.random.random((1, 512, 27, 27)))
        y2_shape = (1, 21, 38, 38)
        x3 = K.variable(np.random.random((1, 256, 53, 53)))
        y3_shape = (1, 21, 312, 312)
    else:
        x1 = K.variable(np.random.random((1, 8, 8, 512)))
        y1_shape = (1, 18, 18, 21)
        x2 = K.variable(np.random.random((1, 27, 27, 512)))
        y2_shape = (1, 38, 38, 21)
        x3 = K.variable(np.random.random((1, 53, 53, 256)))
        y3_shape = (1, 312, 312, 21)

    upscore1 = vgg_deconv(classes=21)(x1, None)
    assert K.int_shape(upscore1) == y1_shape
    assert not np.any(np.isnan(K.eval(upscore1)))

    upscore2 = vgg_deconv(classes=21)(x2, upscore1)
    assert K.int_shape(upscore2) == y2_shape
    assert not np.any(np.isnan(K.eval(upscore2)))

    upscore3 = vgg_deconv(classes=21, kernel_size=(16, 16),
                          strides=(8, 8))(x3, upscore2)
    assert K.int_shape(upscore3) == y3_shape
    assert not np.any(np.isnan(K.eval(upscore3))) 
Example 66
Project: keras-fcn   Author: JihongJu   File: test_blocks.py    MIT License 5 votes vote down vote up
def test_vgg_score():
    if K.image_data_format() == 'channels_first':
        x1 = K.variable(np.random.random((1, 3, 224, 224)))
        x2 = K.variable(np.random.random((1, 21, 312, 312)))
        y_shape = (1, 21, 224, 224)
    else:
        x1 = K.variable(np.random.random((1, 224, 224, 3)))
        x2 = K.variable(np.random.random((1, 312, 312, 21)))
        y_shape = (1, 224, 224, 21)
    score = vgg_score(crop_offset='centered')(x1, x2)
    assert K.int_shape(score) == y_shape 
Example 67
Project: keras-fcn   Author: JihongJu   File: test_layers.py    MIT License 5 votes vote down vote up
def test_bilinear_upsampling_2d():
    num_samples = 2
    stack_size = 2
    input_len_dim1 = 5
    input_len_dim2 = 5
    target_len_dim1 = 8
    target_len_dim2 = 8

    for data_format in ['channels_first', 'channels_last']:
        if data_format == 'channels_first':
            inputs = np.random.rand(num_samples, stack_size,
                                    input_len_dim1, input_len_dim2)
            target = np.random.rand(num_samples, stack_size,
                                    target_len_dim1, target_len_dim2)
            expected_output_shape = (2, 2, 8, 8)
        else:
            inputs = np.random.rand(num_samples,
                                    input_len_dim1, input_len_dim2,
                                    stack_size)
            target = np.random.rand(num_samples, target_len_dim1,
                                    target_len_dim2, stack_size)
            expected_output_shape = (2, 8, 8, 2)
        # shape test
        layer = BilinearUpSampling2D(target_shape=target.shape,
                                     data_format=data_format)
        output = layer(K.variable(inputs))
        assert K.int_shape(output) == expected_output_shape 
Example 68
Project: keras-fcn   Author: JihongJu   File: test_losses.py    MIT License 5 votes vote down vote up
def test_categorical_crossentropy():

    y_true = np.reshape([1, 1, 0, 0], [1, 2, 2]).astype('int')
    y_true = np.eye(2)[y_true]
    y_pred = np.ones((1, 2, 2, 2)) * 0.5

    y_true, y_pred = K.variable(y_true), K.variable(y_pred)

    loss = mean_categorical_crossentropy(y_true, y_pred)
    loss = K.eval(loss)
    assert np.allclose(loss, 0.69314718) 
Example 69
Project: SpineFinder   Author: jfm15   File: keras_weighted_categorical_crossentropy.py    GNU General Public License v3.0 5 votes vote down vote up
def weighted_categorical_crossentropy(weights):
    """
    A weighted version of keras.objectives.categorical_crossentropy

    Variables:
        weights: numpy array of shape (C,) where C is the number of classes

    Usage:
        weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x.
        loss = weighted_categorical_crossentropy(weights)
        model.compile(loss=loss,optimizer='adam')
    """

    weights = K.variable(weights)

    def loss(y_true, y_pred):
        # y_true = K.print_tensor(y_true, message='y_true = ')
        # y_pred = K.print_tensor(y_pred, message='y_pred = ')
        # scale predictions so that the class probas of each sample sum to 1
        y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
        # clip to prevent NaN's and Inf's
        y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
        # calc
        loss = y_true * K.log(y_pred) * weights
        loss = -K.sum(loss, -1)
        return loss

    return loss 
Example 70
Project: NTM-Keras   Author: SigmaQuan   File: controller.py    MIT License 5 votes vote down vote up
def build(self, input_shape, mem_shape, n_heads, hidden_dim):
        input_dim = input_shape[1]
        initial_weight_value = np.random.random((input_dim, self.output_dim))
        self.W = K.variable(initial_weight_value)
        self.trainable_weights = [self.W]
        self.mem_shape = mem_shape
        self.Memory = np.zeros((mem_shape[0], mem_shape[1])) 
Example 71
Project: keras-optimizers   Author: hi-im-ryanli   File: frankenstein.py    MIT License 5 votes vote down vote up
def __init__(self, lr=0.002, beta_1=0.9, beta_2=0.999,
                 epsilon=1e-8, schedule_decay=0.004, **kwargs):
        super(Nadamax, self).__init__(**kwargs)
        with K.name_scope(self.__class__.__name__):
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.m_schedule = K.variable(1., name='m_schedule')
            self.lr = K.variable(lr, name='lr')
            self.beta_1 = K.variable(beta_1, name='beta_1')
            self.beta_2 = K.variable(beta_2, name='beta_2')
        self.epsilon = epsilon
        self.schedule_decay = schedule_decay 
Example 72
Project: keras-optimizers   Author: hi-im-ryanli   File: frankenstein.py    MIT License 5 votes vote down vote up
def __init__(self, lr=0.002, beta_1=0.9, beta_2=0.999,
                 epsilon=1e-8, schedule_decay=0.004, **kwargs):
        super(Radamax, self).__init__(**kwargs)
        with K.name_scope(self.__class__.__name__):
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.m_schedule = K.variable(1., name='m_schedule')
            self.lr = K.variable(lr, name='lr')
            self.beta_1 = K.variable(beta_1, name='beta_1')
            self.beta_2 = K.variable(beta_2, name='beta_2')
        self.epsilon = epsilon
        self.schedule_decay = schedule_decay 
Example 73
Project: keras-optimizers   Author: hi-im-ryanli   File: frankenstein.py    MIT License 5 votes vote down vote up
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, rho=0.95,
                 epsilon=1e-8, decay=0., **kwargs):
        super(AdamDelta, self).__init__(**kwargs)
        with K.name_scope(self.__class__.__name__):
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.lr = K.variable(lr, name='lr')
            self.beta_1 = K.variable(beta_1, name='beta_1')
            self.beta_2 = K.variable(beta_2, name='beta_2')
            self.rho = rho
            self.decay = K.variable(decay, name='decay')
        self.epsilon = epsilon
        self.initial_decay = decay 
Example 74
Project: keras-lookahead   Author: CyberZHG   File: optimizers.py    MIT License 5 votes vote down vote up
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, **kwargs):
        super(Adam, self).__init__(**kwargs)
        with K.name_scope(self.__class__.__name__):
            self.iterations = K.variable(0, dtype='int64', name='iterations')
            self.learning_rate = K.variable(lr, name='lr')
            self.beta_1 = K.variable(beta_1, name='beta_1')
            self.beta_2 = K.variable(beta_2, name='beta_2')
        self.epsilon = K.epsilon() 
Example 75
Project: dynamicgem   Author: Sujit-O   File: dynamic_triad.py    MIT License 5 votes vote down vote up
def make_pretrain_input(self, batch):
        ret = Sampler.make_pretrain_input(self, batch)
        # (data, weight, triad)
        # because embedding variable starts from index 0
        for d in ret[0]:  # data
            d[0] -= self.init_train_begin
        for d in ret[2]:
            d[0] -= self.init_train_begin
        return ret

    # return a list whatever number of inputs required 
Example 76
Project: GewitterGefahr   Author: thunderhoser   File: standalone_utils.py    MIT License 4 votes vote down vote up
def do_2d_convolution(
        feature_matrix, kernel_matrix, pad_edges=False, stride_length_px=1):
    """Convolves 2-D feature maps with 2-D kernel.

    m = number of rows in kernel
    n = number of columns in kernel
    c = number of output feature maps (channels)

    :param feature_matrix: Input feature maps (numpy array).  Dimensions must be
        M x N x C or 1 x M x N x C.
    :param kernel_matrix: Kernel as numpy array.  Dimensions must be
        m x n x C x c.
    :param pad_edges: Boolean flag.  If True, edges of input feature maps will
        be zero-padded during convolution, so spatial dimensions of the output
        feature maps will be the same (M x N).  If False, dimensions
        of the output maps will be (M - m + 1) x (N - n + 1).
    :param stride_length_px: Stride length (pixels).  The kernel will move by
        this many rows or columns at a time as it slides over each input feature
        map.
    :return: feature_matrix: Output feature maps (numpy array).  Dimensions will
        be 1 x M x N x c or 1 x (M - m + 1) x (N - n + 1) x c, depending on
        whether or not edges are padded.
    """

    error_checking.assert_is_numpy_array_without_nan(feature_matrix)
    error_checking.assert_is_numpy_array_without_nan(kernel_matrix)
    error_checking.assert_is_numpy_array(kernel_matrix, num_dimensions=4)
    error_checking.assert_is_boolean(pad_edges)
    error_checking.assert_is_integer(stride_length_px)
    error_checking.assert_is_geq(stride_length_px, 1)

    if len(feature_matrix.shape) == 3:
        feature_matrix = numpy.expand_dims(feature_matrix, axis=0)
    error_checking.assert_is_numpy_array(feature_matrix, num_dimensions=4)

    if pad_edges:
        padding_string = 'same'
    else:
        padding_string = 'valid'

    feature_tensor = K.conv2d(
        x=K.variable(feature_matrix), kernel=K.variable(kernel_matrix),
        strides=(stride_length_px, stride_length_px), padding=padding_string,
        data_format='channels_last')

    return feature_tensor.eval(session=K.get_session()) 
Example 77
Project: GewitterGefahr   Author: thunderhoser   File: standalone_utils.py    MIT License 4 votes vote down vote up
def do_3d_convolution(
        feature_matrix, kernel_matrix, pad_edges=False, stride_length_px=1):
    """Convolves 3-D feature maps with 3-D kernel.

    m = number of rows in kernel
    n = number of columns in kernel
    h = number of height in kernel
    c = number of output feature maps (channels)

    :param feature_matrix: Input feature maps (numpy array).  Dimensions must be
        M x N x H x C or 1 x M x N x H x C.
    :param kernel_matrix: Kernel as numpy array.  Dimensions must be
        m x n x h x C x c.
    :param pad_edges: See doc for `do_2d_convolution`.
    :param stride_length_px: See doc for `do_2d_convolution`.
    :return: feature_matrix: Output feature maps (numpy array).  Dimensions will
        be 1 x M x N x H x c or
        1 x (M - m + 1) x (N - n + 1) x (H - h + 1) x c, depending on
        whether or not edges are padded.
    """

    error_checking.assert_is_numpy_array_without_nan(feature_matrix)
    error_checking.assert_is_numpy_array_without_nan(kernel_matrix)
    error_checking.assert_is_numpy_array(kernel_matrix, num_dimensions=5)
    error_checking.assert_is_boolean(pad_edges)
    error_checking.assert_is_integer(stride_length_px)
    error_checking.assert_is_geq(stride_length_px, 1)

    if len(feature_matrix.shape) == 4:
        feature_matrix = numpy.expand_dims(feature_matrix, axis=0)
    error_checking.assert_is_numpy_array(feature_matrix, num_dimensions=5)

    if pad_edges:
        padding_string = 'same'
    else:
        padding_string = 'valid'

    feature_tensor = K.conv3d(
        x=K.variable(feature_matrix), kernel=K.variable(kernel_matrix),
        strides=(stride_length_px, stride_length_px, stride_length_px),
        padding=padding_string, data_format='channels_last')

    return feature_tensor.eval(session=K.get_session()) 
Example 78
Project: musical-onset-efficient   Author: ronggong   File: bock_crnn_basecode.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def loss_cal(fns, data_path, scaler, model, len_seq):
    """
    Calculate loss
    :param fns:
    :param data_path:
    :param scaler:
    :param model:
    :return:
    """
    y_pred_val_all = np.array([], dtype='float32')
    label_val_all = np.array([], dtype='int')

    for fn in fns:

        mfcc_line, label, sample_weights = featureLabelSampleWeightsLoad(data_path,
                                                                         fn,
                                                                         scaler)

        # pad sequence
        mfcc_line_pad, label_pad, sample_weights_pad, len_padded = \
            featureLabelSampleWeightsPad(mfcc_line, label, sample_weights, len_seq)

        iter_time = len(mfcc_line_pad) / len_seq
        for ii_iter in range(iter_time):

            # create tensor from the padded line
            mfcc_line_tensor, label_tensor, _ = \
                createInputTensor(mfcc_line_pad, label_pad, sample_weights_pad, len_seq, ii_iter)

            y_pred = model.predict_on_batch(mfcc_line_tensor)

            # remove the padded samples
            if ii_iter == iter_time - 1 and len_padded > 0:
                y_pred = y_pred[:, :len_seq - len_padded, :]
                label_tensor = label_tensor[:, :len_seq - len_padded, :]

            # reduce the label dimension
            y_pred = y_pred.reshape((y_pred.shape[1],))
            label_tensor = label_tensor.reshape((label_tensor.shape[1],))

            y_pred_val_all = np.append(y_pred_val_all, y_pred)
            label_val_all = np.append(label_val_all, label_tensor)

    y_true = K.variable(label_val_all)
    y_pred = K.variable(y_pred_val_all)

    loss = K.eval(binary_crossentropy(y_true, y_pred))

    return loss 
Example 79
Project: musical-onset-efficient   Author: ronggong   File: jingju_crnn_basecode.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def loss_cal(fns, data_path, scaler, model, len_seq):
    """
    Calculate loss
    :param fns:
    :param data_path:
    :param scaler:
    :param model:
    :return:
    """
    y_pred_val_all = np.array([], dtype='float32')
    label_val_all = np.array([], dtype='int')

    for fn in fns:

        mfcc_line, label, sample_weights = featureLabelSampleWeightsLoad(data_path,
                                                                         fn,
                                                                         scaler)

        # pad sequence
        mfcc_line_pad, label_pad, sample_weights_pad, len_padded = \
            featureLabelSampleWeightsPad(mfcc_line, label, sample_weights, len_seq)

        iter_time = len(mfcc_line_pad) / len_seq
        for ii_iter in range(iter_time):

            # create tensor from the padded line
            mfcc_line_tensor, label_tensor, _ = \
                createInputTensor(mfcc_line_pad, label_pad, sample_weights_pad, len_seq, ii_iter)

            y_pred = model.predict_on_batch(mfcc_line_tensor)

            # remove the padded samples
            if ii_iter == iter_time - 1 and len_padded > 0:
                y_pred = y_pred[:, :len_seq - len_padded, :]
                label_tensor = label_tensor[:, :len_seq - len_padded, :]

            # reduce the label dimension
            y_pred = y_pred.reshape((y_pred.shape[1],))
            label_tensor = label_tensor.reshape((label_tensor.shape[1],))

            y_pred_val_all = np.append(y_pred_val_all, y_pred)
            label_val_all = np.append(label_val_all, label_tensor)

    y_true = K.variable(label_val_all)
    y_pred = K.variable(y_pred_val_all)

    loss = K.eval(binary_crossentropy(y_true, y_pred))

    return loss 
Example 80
Project: dynamicgem   Author: Sujit-O   File: dynamic_triad.py    MIT License 4 votes vote down vote up
def make_online(self):
        embedding = K.variable(np.random.uniform(0, 1, (self.dataset.nsize, self.flowargs['embdim'])))
        prevemb = K.placeholder(ndim=2, dtype='float32')  # (nsize, d)
        data = K.placeholder(ndim=2, dtype='int32')  # (batchsize, 5), [k, from_pos, to_pos, from_neg, to_neg]
        weight = K.placeholder(ndim=1, dtype='float32')  # (batchsize, )

        if K._BACKEND == 'theano':
            # (batchsize, d) => (batchsize, )
            # data[:, 0] should be always 0, so we simply ignore it
            # note, when you want to use it, that according to data generation procedure, the actual data[:, 0] is not 0
            dist_pos = embedding[data[:, 1]] - embedding[data[:, 2]]
            dist_pos = K.sum(dist_pos * dist_pos, axis=-1)
            dist_neg = embedding[data[:, 3]] - embedding[data[:, 4]]
            dist_neg = K.sum(dist_neg * dist_neg, axis=-1)
        else:
            dist_pos = K.gather(embedding, K.squeeze(K.slice(data, [0, 1], [-1, 1]), axis=1)) - \
                       K.gather(embedding, K.squeeze(K.slice(data, [0, 2], [-1, 1]), axis=1))
            dist_pos = K.sum(dist_pos * dist_pos, axis=-1)
            dist_neg = K.gather(embedding, K.squeeze(K.slice(data, [0, 3], [-1, 1]), axis=1)) - \
                       K.gather(embedding, K.squeeze(K.slice(data, [0, 4], [-1, 1]), axis=1))
            dist_neg = K.sum(dist_neg * dist_neg, axis=-1)

        # (batchsize, )
        margin = 1
        lprox = K.maximum(margin + dist_pos - dist_neg, 0) * weight

        # (1, )
        lprox = K.mean(lprox)

        # lsmooth
        lsmooth = embedding - prevemb  # (nsize, d)
        lsmooth = K.sum(K.square(lsmooth), axis=-1)  # (nsize)
        lsmooth = K.mean(lsmooth)

        loss = lprox + self.flowargs['beta'][0] * lsmooth

        opt = optimizers.get({'class_name': 'Adagrad', 'config': {'lr': self.lr}})
        cstr = {embedding: constraints.get({'class_name': 'maxnorm', 'config': {'max_value': 1, 'axis': 1}})}
        upd = opt.get_updates([embedding], cstr, loss)
        lf = K.function([data, weight, prevemb], [loss], updates=upd)

        return lf, None, [embedding], {}