Python keras.backend.stack() Examples

The following are 30 code examples for showing how to use keras.backend.stack(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module keras.backend , or try the search function .

Example 1
Project: Dropout_BBalpha   Author: YingzhenLi   File: BBalpha_dropout.py    License: MIT License 6 votes vote down vote up
def GenerateMCSamples(inp, layers, K_mc=20):
    if K_mc == 1:
        return apply_layers(inp, layers)
    output_list = []
    for _ in xrange(K_mc):
        output_list += [apply_layers(inp, layers)]  # THIS IS BAD!!! we create new dense layers at every call!!!!
    def pack_out(output_list):
        #output = K.pack(output_list) # K_mc x nb_batch x nb_classes
        output = K.stack(output_list) # K_mc x nb_batch x nb_classes
        return K.permute_dimensions(output, (1, 0, 2)) # nb_batch x K_mc x nb_classes
    def pack_shape(s):
        s = s[0]
        assert len(s) == 2
        return (s[0], K_mc, s[1])
    out = Lambda(pack_out, output_shape=pack_shape)(output_list)
    return out

# evaluation for classification tasks 
Example 2
Project: voxelmorph   Author: voxelmorph   File: test_unsupervised_segmentation.py    License: GNU General Public License v3.0 6 votes vote down vote up
def posterior(atlas_full, ull_pred, flow, fns, max_feats):
    """
    gpu-based implementation of posterior
    given original full atlas and unnormalized log likelihood, warps atlas and computes (normalized) posterior

    variables should be normal format, *not* keras format (i.e. not in batch)

    unfortunately, since full atlases can be quite large (many labels),
    we loop over groups of at most `max_feats` labels and stack at the end
    """

    # run through label groups
    # creating lists and concatenating at the end seems to be more efficient than
    # filling in rows of data of a large array.
    post = []
    warped_atlas = []
    for li, i in enumerate(range(0, atlas_full.shape[-1], max_feats)):
        slc = slice(i, min(i + max_feats, atlas_full.shape[-1]))
        po, wa = fns[li]([atlas_full[...,slc], ull_pred, flow])
        post.append(po)
        warped_atlas.append(wa)

    return np.concatenate(post, -1), np.concatenate(warped_atlas, -1) 
Example 3
Project: costar_plan   Author: jhu-lcsr   File: robot_multi_models.py    License: Apache License 2.0 6 votes vote down vote up
def MakeStacked(ins, x, num_to_stack):
    '''
    Stacked latent representations -- for temporal convolutions in particular
    '''
    new_ins = []
    new_xs = []
    x = Model(ins, x)
    for i in range(num_to_stack):
        new_x_ins = []
        for inx in ins:
            new_x_ins.append(Input(inx.shape[1:]))
        new_ins += new_x_ins
        new_xs.append(x(new_x_ins))
    x = Lambda(lambda x: K.stack(x,axis=2))(new_xs)

    return new_ins, x 
Example 4
Project: deepcaps   Author: brjathu   File: capslayers.py    License: MIT License 6 votes vote down vote up
def call(self, inputs, **kwargs):
        if isinstance(inputs, list):  # true label is provided with shape = [None, n_classes], i.e. one-hot code.
            assert len(inputs) == 2
            inputs, a = inputs
            mask = K.argmax(a, 1)
        else:  # if no true label, mask by the max length of capsules. Mainly used for prediction
            # compute lengths of capsules
            x = K.sqrt(K.sum(K.square(inputs), -1))
            # generate the mask which is a one-hot code.
            # mask.shape=[None, n_classes]=[None, num_capsule]
            mask = K.argmax(x, 1)

        increasing = tf.range(start=0, limit=tf.shape(inputs)[0], delta=1)
        m = tf.stack([increasing, tf.cast(mask, tf.int32)], axis=1)
        # inputs.shape=[None, num_capsule, dim_capsule]
        # mask.shape=[None, num_capsule]
        # masked.shape=[None, num_capsule * dim_capsule]
        # x1 = tf.transpose(inputs, (0))
        masked = tf.gather_nd(inputs, m)

        return masked 
Example 5
Project: mhcflurry   Author: openvax   File: custom_loss.py    License: Apache License 2.0 6 votes vote down vote up
def loss(self, y_true, y_pred):
        from keras import backend as K
        y_true = K.flatten(y_true)

        output_indices = y_true // 10
        updated_y_true = y_true - (10 * output_indices)

        # We index into y_pred using flattened indices since Keras backend
        # supports gather but has no equivalent of tf.gather_nd:
        ordinals = K.arange(K.shape(y_true)[0])
        flattened_indices = (
            ordinals * y_pred.shape[1] + K.cast(output_indices, "int32"))
        updated_y_pred = K.gather(K.flatten(y_pred), flattened_indices)

        # Alternative implementation using tensorflow, which could be used if
        # we drop support for other backends:
        # import tensorflow as tf
        # indexer = K.stack([
        #     ordinals,
        #     K.cast(output_indices, "int32")
        # ], axis=-1)
        #updated_y_pred = tf.gather_nd(y_pred, indexer)

        return MSEWithInequalities().loss(updated_y_true, updated_y_pred) 
Example 6
Project: chemical_vae   Author: aspuru-guzik-group   File: tgru_k2_gpu.py    License: Apache License 2.0 6 votes vote down vote up
def output_sampling(self, output, rand_matrix):
        # Generates a sampled selection based on raw output state vector
        # Creates a cdf vector and compares against a randomly generated vector
        # Requires a pre-generated rand_matrix (i.e. generated outside step function)

        sampled_output = output / K.sum(output, axis=-1, keepdims=True)  # (batch_size, self.units)
        mod_sampled_output = sampled_output / K.exp(self.temperature)
        norm_exp_sampled_output = mod_sampled_output / K.sum(mod_sampled_output, axis=-1, keepdims=True)

        cdf_vector = K.cumsum(norm_exp_sampled_output, axis=-1)
        cdf_minus_vector = cdf_vector - norm_exp_sampled_output

        rand_matrix = K.stack([rand_matrix], axis=0)
        rand_matrix = K.stack([rand_matrix], axis=2)

        compared_greater_output = K.cast(K.greater(cdf_vector, rand_matrix), dtype='float32')
        compared_lesser_output = K.cast(K.less(cdf_minus_vector, rand_matrix), dtype='float32')

        final_output = compared_greater_output * compared_lesser_output
        return final_output 
Example 7
Project: PiCamNN   Author: PiSimo   File: keras_yolo.py    License: MIT License 5 votes vote down vote up
def yolo_eval(yolo_outputs,
              image_shape,
              max_boxes=10,
              score_threshold=.6,
              iou_threshold=.5):
    """Evaluate YOLO model on given input batch and return filtered boxes."""
    box_xy, box_wh, box_confidence, box_class_probs = yolo_outputs
    boxes = yolo_boxes_to_corners(box_xy, box_wh)
    boxes, scores, classes = yolo_filter_boxes(
        boxes, box_confidence, box_class_probs, threshold=score_threshold)

    # Scale boxes back to original image shape.
    height = image_shape[0]
    width = image_shape[1]
    image_dims = K.stack([height, width, height, width])
    image_dims = K.reshape(image_dims, [1, 4])
    boxes = boxes * image_dims

    # TODO: Something must be done about this ugly hack!
    max_boxes_tensor = K.variable(max_boxes, dtype='int32')
    K.get_session().run(tf.variables_initializer([max_boxes_tensor]))
    nms_index = tf.image.non_max_suppression(
        boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold)
    boxes = K.gather(boxes, nms_index)
    scores = K.gather(scores, nms_index)
    classes = K.gather(classes, nms_index)
    return boxes, scores, classes 
Example 8
Project: object-detection   Author: kaka-lin   File: yolo_utils.py    License: MIT License 5 votes vote down vote up
def scale_boxes(boxes, image_shape):
    """ Scales the predicted boxes in order to be drawable on the image"""
    height = image_shape[0]
    width = image_shape[1]
    image_dims = K.stack([height, width, height, width])
    image_dims = K.reshape(image_dims, [1, 4])
    boxes = boxes * image_dims
    return boxes 
Example 9
Project: object-detection   Author: kaka-lin   File: keras_yolo.py    License: MIT License 5 votes vote down vote up
def yolo_eval(yolo_outputs,
              image_shape,
              max_boxes=10,
              score_threshold=.6,
              iou_threshold=.5):
    """Evaluate YOLO model on given input batch and return filtered boxes."""
    box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs
    boxes = yolo_boxes_to_corners(box_xy, box_wh)
    boxes, scores, classes = yolo_filter_boxes(
        box_confidence, boxes, box_class_probs, threshold=score_threshold)
    
    # Scale boxes back to original image shape.
    height = image_shape[0]
    width = image_shape[1]
    image_dims = K.stack([height, width, height, width])
    image_dims = K.reshape(image_dims, [1, 4])
    boxes = boxes * image_dims

    # TODO: Something must be done about this ugly hack!
    max_boxes_tensor = K.variable(max_boxes, dtype='int32')
    K.get_session().run(tf.variables_initializer([max_boxes_tensor]))
    nms_index = tf.image.non_max_suppression(
        boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold)
    boxes = K.gather(boxes, nms_index)
    scores = K.gather(scores, nms_index)
    classes = K.gather(classes, nms_index)
    
    return boxes, scores, classes 
Example 10
Project: wtte-rnn   Author: ragulpr   File: test_keras.py    License: MIT License 5 votes vote down vote up
def test_keras_unstack_hack():
    y_true_np = np.random.random([1, 3, 2])
    y_true_np[:, :, 0] = 0
    y_true_np[:, :, 1] = 1

    y_true_keras = K.variable(y_true_np)

    y, u = wtte._keras_unstack_hack(y_true_keras)
    y_true_keras_new = K.stack([y, u], axis=-1)

    np.testing.assert_array_equal(K.eval(y_true_keras_new), y_true_np)

# SANITY CHECK: Use pure Weibull data censored at C(ensoring point).
# Should converge to the generating A(alpha) and B(eta) for each timestep 
Example 11
Project: neural-style-keras   Author: robertomest   File: training.py    License: MIT License 5 votes vote down vote up
def gram_matrix(x, norm_by_channels=False):
    '''
    Returns the Gram matrix of the tensor x.
    '''
    if K.ndim(x) == 3:
        features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
        shape = K.shape(x)
        C, H, W = shape[0], shape[1], shape[2]
        gram = K.dot(features, K.transpose(features))
    elif K.ndim(x) == 4:
        # Swap from (H, W, C) to (B, C, H, W)
        x = K.permute_dimensions(x, (0, 3, 1, 2))
        shape = K.shape(x)
        B, C, H, W = shape[0], shape[1], shape[2], shape[3]
        # Reshape as a batch of 2D matrices with vectorized channels
        features = K.reshape(x, K.stack([B, C, H*W]))
        # This is a batch of Gram matrices (B, C, C).
        gram = K.batch_dot(features, features, axes=2)
    else:
        raise ValueError('The input tensor should be either a 3d (H, W, C) or 4d (B, H, W, C) tensor.')
    # Normalize the Gram matrix
    if norm_by_channels:
        denominator = C * H * W # Normalization from Johnson
    else:
        denominator = H * W # Normalization from Google
    gram = gram /  K.cast(denominator, x.dtype)

    return gram 
Example 12
Project: keras-centernet   Author: see--   File: decode.py    License: MIT License 5 votes vote down vote up
def _ctdet_decode(hm, reg, wh, k=100, output_stride=4):
  hm = K.sigmoid(hm)
  hm = _nms(hm)
  hm_shape = K.shape(hm)
  reg_shape = K.shape(reg)
  wh_shape = K.shape(wh)
  batch, width, cat = hm_shape[0], hm_shape[2], hm_shape[3]

  hm_flat = K.reshape(hm, (batch, -1))
  reg_flat = K.reshape(reg, (reg_shape[0], -1, reg_shape[-1]))
  wh_flat = K.reshape(wh, (wh_shape[0], -1, wh_shape[-1]))

  def _process_sample(args):
    _hm, _reg, _wh = args
    _scores, _inds = tf.math.top_k(_hm, k=k, sorted=True)
    _classes = K.cast(_inds % cat, 'float32')
    _inds = K.cast(_inds / cat, 'int32')
    _xs = K.cast(_inds % width, 'float32')
    _ys = K.cast(K.cast(_inds / width, 'int32'), 'float32')
    _wh = K.gather(_wh, _inds)
    _reg = K.gather(_reg, _inds)

    _xs = _xs + _reg[..., 0]
    _ys = _ys + _reg[..., 1]

    _x1 = _xs - _wh[..., 0] / 2
    _y1 = _ys - _wh[..., 1] / 2
    _x2 = _xs + _wh[..., 0] / 2
    _y2 = _ys + _wh[..., 1] / 2

    # rescale to image coordinates
    _x1 = output_stride * _x1
    _y1 = output_stride * _y1
    _x2 = output_stride * _x2
    _y2 = output_stride * _y2

    _detection = K.stack([_x1, _y1, _x2, _y2, _scores, _classes], -1)
    return _detection

  detections = K.map_fn(_process_sample, [hm_flat, reg_flat, wh_flat], dtype=K.floatx())
  return detections 
Example 13
Project: FSA-Net   Author: shamangary   File: layers.py    License: Apache License 2.0 5 votes vote down vote up
def _make_regular_grids(self, batch_size, height, width):
        # making a single regular grid
        x_linspace = K_linspace(-1., 1., width)
        y_linspace = K_linspace(-1., 1., height)
        x_coordinates, y_coordinates = K_meshgrid(x_linspace, y_linspace)
        x_coordinates = K.flatten(x_coordinates)
        y_coordinates = K.flatten(y_coordinates)
        ones = K.ones_like(x_coordinates)
        grid = K.concatenate([x_coordinates, y_coordinates, ones], 0)

        # repeating grids for each batch
        grid = K.flatten(grid)
        grids = K.tile(grid, K.stack([batch_size]))
        return K.reshape(grids, (batch_size, 3, height * width)) 
Example 14
Project: social_lstm_keras_tf   Author: t2kasa   File: my_social_model.py    License: GNU General Public License v3.0 5 votes vote down vote up
def _stack_permute_axis_zero(xs):
    xs = Lambda(lambda xs: K.stack(xs, axis=0))(xs)

    # axes (0, 1) are permuted
    perm = [1, 0] + list(range(2, xs.shape.ndims))
    xs = Lambda(lambda xs: K.permute_dimensions(xs, perm))(xs)
    return xs 
Example 15
Project: social_lstm_keras_tf   Author: t2kasa   File: tf_normal_sampler.py    License: GNU General Public License v3.0 5 votes vote down vote up
def _to_normal2d(output_batch) -> ds.MultivariateNormalTriL:
    """
    :param output_batch: (n_samples, 5)
    :return
    """

    # mean of x and y
    x_mean = Lambda(lambda o: o[:, 0])(output_batch)
    y_mean = Lambda(lambda o: o[:, 1])(output_batch)

    # std of x and y
    # std is must be 0 or positive
    x_std = Lambda(lambda o: K.exp(o[:, 2]))(output_batch)
    y_std = Lambda(lambda o: K.exp(o[:, 3]))(output_batch)

    # correlation coefficient
    # correlation coefficient range is [-1, 1]
    cor = Lambda(lambda o: K.tanh(o[:, 4]))(output_batch)

    loc = Concatenate()([
        Lambda(lambda x_mean: K.expand_dims(x_mean, 1))(x_mean),
        Lambda(lambda y_mean: K.expand_dims(y_mean, 1))(y_mean)
    ])

    x_var = Lambda(lambda x_std: K.square(x_std))(x_std)
    y_var = Lambda(lambda y_std: K.square(y_std))(y_std)
    xy_cor = Multiply()([x_std, y_std, cor])

    cov = Lambda(lambda inputs: K.stack(inputs, axis=0))(
        [x_var, xy_cor, xy_cor, y_var])
    cov = Lambda(lambda cov: K.permute_dimensions(cov, (1, 0)))(cov)
    cov = Reshape((2, 2))(cov)

    scale_tril = Lambda(lambda cov: tf.cholesky(cov))(cov)
    mvn = ds.MultivariateNormalTriL(loc, scale_tril)

    return mvn 
Example 16
Project: keras-gcnn   Author: basveeling   File: pooling.py    License: MIT License 5 votes vote down vote up
def call(self, x):
        shape = K.shape(x)
        stack_shape = K.stack([shape[0], shape[1], shape[2], shape[3] // self.nti, self.nti])
        input_reshaped = K.reshape(x, stack_shape)
        mean_per_group = K.mean(input_reshaped, -1)
        return mean_per_group 
Example 17
Project: Keras-TextClassification   Author: yongzhuo   File: triangle_position_embedding.py    License: MIT License 5 votes vote down vote up
def call(self, inputs, mask=None):
        input_shape = K.shape(inputs)
        if self.mode == self.MODE_ADD:
            batch_size, seq_len, output_dim = input_shape[0], input_shape[1], input_shape[2]
            pos_input = K.tile(K.expand_dims(K.arange(seq_len), axis=0), [batch_size, 1])
        elif self.mode == self.MODE_CONCAT:
            batch_size, seq_len, output_dim = input_shape[0], input_shape[1], self.output_dim
            pos_input = K.tile(K.expand_dims(K.arange(seq_len), axis=0), [batch_size, 1])
        else:
            output_dim = self.output_dim
            pos_input = inputs
        if K.dtype(pos_input) != K.floatx():
            pos_input = K.cast(pos_input, K.floatx())
        evens = K.arange(output_dim // 2) * 2
        odds = K.arange(output_dim // 2) * 2 + 1
        even_embd = K.sin(
            K.dot(
                K.expand_dims(pos_input, -1),
                K.expand_dims(1.0 / K.pow(
                    10000.0,
                    K.cast(evens, K.floatx()) / K.cast(output_dim, K.floatx())
                ), 0)
            )
        )
        odd_embd = K.cos(
            K.dot(
                K.expand_dims(pos_input, -1),
                K.expand_dims(1.0 / K.pow(
                    10000.0, K.cast((odds - 1), K.floatx()) / K.cast(output_dim, K.floatx())
                ), 0)
            )
        )
        embd = K.stack([even_embd, odd_embd], axis=-1)
        output = K.reshape(embd, [-1, K.shape(inputs)[1], output_dim])
        if self.mode == self.MODE_CONCAT:
            output = K.concatenate([inputs, output], axis=-1)
        if self.mode == self.MODE_ADD:
            output += inputs
        return output 
Example 18
Project: maskrcnn   Author: shtamura   File: loss.py    License: MIT License 5 votes vote down vote up
def head_offsets_loss(gt_offsets, gt_labels, pred_offsets):
    """ヘッドのオフセット回帰の損失関数
    positive(gt_fg > 0)データのみ評価対象とする

    gt_offsets: 正解オフセット
        [N, R, 4]
    gt_labels: 正解データのラベルID
        [N, R]
    pred_offsets: ラベル毎の予測値
        [N, R, n_labels, 4].
    """

    # 正解データのラベルIDに対応するオフセットのみを損失評価対象とする。
    # 論文には以下のようにあるので、正解ラベルのBBoxのみで良さそう。
    # The second task loss, Lloc, is defined over a tuple of true bounding-box
    # regression targets for class u, v = (vx, vy, vw, vh), and a predicted
    # tuple tu = (tux , tuy , tuw, tuh ), again for class u.
    pos_idx = tf.where(gt_labels > 0)
    i = K.cast(pos_idx[:, 0], tf.int32)
    j = K.cast(pos_idx[:, 1], tf.int32)
    k = K.cast(tf.gather_nd(gt_labels, pos_idx), tf.int32)
    pos_pred_idx = K.stack((i, j, k), axis=1)
    pred_offsets = tf.gather_nd(pred_offsets, pos_pred_idx)
    gt_offsets = tf.gather_nd(gt_offsets, pos_idx)

    loss = offsets_loss(gt_offsets, pred_offsets)
    loss = log.tfprint(loss, "head_offsets_loss")
    return loss 
Example 19
Project: maskrcnn   Author: shtamura   File: loss.py    License: MIT License 5 votes vote down vote up
def head_mask_loss(gt_masks, gt_labels, pred_masks):
    """マスクの損失関数

    gt_masks: 正解データ。
        マスクデータをbboxの領域のみ切り抜いてconfig.mask_out_shapeにリサイズしたデータ。
        [N, R, h, w]
        バイナリマスク
    gt_labels: 正解データのラベルID
        [N, R]
    pred_masks: 予測値
        バイナリマスク
        [N, R, n_labels h, w]
    ※h, w は config.mask_out_shape になる。
    """
    # Positiveなラベルが付与されているRoIのみ評価対象とする
    pos_idx = tf.where(gt_labels > 0)
    i = K.cast(pos_idx[:, 0], tf.int32)
    j = K.cast(pos_idx[:, 1], tf.int32)
    k = K.cast(tf.gather_nd(gt_labels, pos_idx), tf.int32)
    # i = log.tfprint(i, "i:head_mask_loss")
    # j = log.tfprint(j, "j:head_mask_loss")
    # k = log.tfprint(k, "k:head_mask_loss")
    pos_pred_idx = K.stack((i, j, k), axis=1)
    # pos_pred_idx = log.tfprint(pos_pred_idx, "pos_pred_idx:head_mask_loss")
    pred_masks = tf.gather_nd(pred_masks, pos_pred_idx)
    gt_masks = tf.gather_nd(gt_masks, pos_idx)

    loss = K.switch(tf.size(gt_masks) > 0,
                    K.binary_crossentropy(gt_masks, pred_masks),
                    tf.constant(0.0))
    loss = K.mean(loss)
    loss = log.tfprint(loss, "head_mask_loss")
    return loss 
Example 20
Project: n-beats   Author: philipperemy   File: model.py    License: MIT License 5 votes vote down vote up
def seasonality_model(thetas, backcast_length, forecast_length, is_forecast):
    p = thetas.get_shape().as_list()[-1]
    p1, p2 = (p // 2, p // 2) if p % 2 == 0 else (p // 2, p // 2 + 1)
    t = linear_space(backcast_length, forecast_length, fwd_looking=is_forecast)
    s1 = K.stack([K.cos(2 * np.pi * i * t) for i in range(p1)], axis=0)
    s2 = K.stack([K.sin(2 * np.pi * i * t) for i in range(p2)], axis=0)
    if p == 1:
        s = s2
    else:
        s = K.concatenate([s1, s2], axis=0)
    s = K.cast(s, np.float32)
    return K.dot(thetas, s) 
Example 21
Project: n-beats   Author: philipperemy   File: model.py    License: MIT License 5 votes vote down vote up
def trend_model(thetas, backcast_length, forecast_length, is_forecast):
    p = thetas.shape[-1]
    t = linear_space(backcast_length, forecast_length, fwd_looking=is_forecast)
    t = K.transpose(K.stack([t ** i for i in range(p)], axis=0))
    t = K.cast(t, np.float32)
    return K.dot(thetas, K.transpose(t)) 
Example 22
Project: recurrent-attention-for-QA-SQUAD-based-on-keras   Author: wentaozhu   File: rnnlayer.py    License: MIT License 5 votes vote down vote up
def time_distributed_dense(x, w, b=None, dropout=None,
                           input_dim=None, units=None, timesteps=None):
    """Apply `y . w + b` for every temporal slice y of x.
    # Arguments
        x: input tensor.
        w: weight matrix.
        b: optional bias vector.
        dropout: wether to apply dropout (same dropout mask
            for every temporal slice of the input).
        input_dim: integer; optional dimensionality of the input.
        units: integer; optional dimensionality of the output.
        timesteps: integer; optional number of timesteps.
    # Returns
        Output tensor.
    """
    if not input_dim:
        input_dim = K.shape(x)[2]
    if not timesteps:
        timesteps = K.shape(x)[1]
    if not units:
        units = K.shape(w)[1]

    if dropout is not None and 0. < dropout < 1.:
        # apply the same dropout pattern at every timestep
        ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))
        dropout_matrix = K.dropout(ones, dropout)
        expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)
        x = K.in_train_phase(x * expanded_dropout_matrix, x)

    # collapse time dimension and batch dimension together
    x = K.reshape(x, (-1, input_dim))
    x = K.dot(x, w)
    if b:
        x += b
    # reshape to 3D tensor
    if K.backend() == 'tensorflow':
        x = K.reshape(x, K.stack([-1, timesteps, units]))
        x.set_shape([None, None, units])
    else:
        x = K.reshape(x, (-1, timesteps, units))
    return x 
Example 23
Project: bidaf-keras   Author: ParikhKadam   File: combine_outputs.py    License: GNU General Public License v3.0 5 votes vote down vote up
def call(self, inputs):
        span_begin_probabilities, span_end_probabilities = inputs
        return K.stack([span_begin_probabilities, span_end_probabilities], axis = 1) 
Example 24
Project: deepcaps   Author: brjathu   File: capslayers.py    License: MIT License 5 votes vote down vote up
def call(self, input_tensor, training=None):

        input_transposed = tf.transpose(input_tensor, [0, 3, 4, 1, 2])
        input_shape = K.shape(input_transposed)
        input_tensor_reshaped = K.reshape(input_tensor, [input_shape[0], 1, self.input_num_capsule * self.input_num_atoms, self.input_height, self.input_width])

        input_tensor_reshaped.set_shape((None, 1, self.input_num_capsule * self.input_num_atoms, self.input_height, self.input_width))

        # conv = Conv3D(input_tensor_reshaped, self.W, (self.strides, self.strides),
        #                 padding=self.padding, data_format='channels_first')

        conv = K.conv3d(input_tensor_reshaped, self.W, strides=(self.input_num_atoms, self.strides, self.strides), padding=self.padding, data_format='channels_first')

        votes_shape = K.shape(conv)
        _, _, _, conv_height, conv_width = conv.get_shape()
        conv = tf.transpose(conv, [0, 2, 1, 3, 4])
        votes = K.reshape(conv, [input_shape[0], self.input_num_capsule, self.num_capsule, self.num_atoms, votes_shape[3], votes_shape[4]])
        votes.set_shape((None, self.input_num_capsule, self.num_capsule, self.num_atoms, conv_height.value, conv_width.value))

        logit_shape = K.stack([input_shape[0], self.input_num_capsule, self.num_capsule, votes_shape[3], votes_shape[4]])
        biases_replicated = K.tile(self.b, [1, 1, conv_height.value, conv_width.value])

        activations = update_routing(
            votes=votes,
            biases=biases_replicated,
            logit_shape=logit_shape,
            num_dims=6,
            input_dim=self.input_num_capsule,
            output_dim=self.num_capsule,
            num_routing=self.routings)

        a2 = tf.transpose(activations, [0, 3, 4, 1, 2])
        return a2 
Example 25
Project: enet-keras   Author: PavlosMelissinos   File: pooling.py    License: MIT License 5 votes vote down vote up
def call(self, inputs, output_shape=None):
        """
        Seen on https://github.com/tensorflow/tensorflow/issues/2169
        Replace with unpool op when/if issue merged
        Add theano backend
        """
        updates, mask = inputs[0], inputs[1]
        with K.tf.variable_scope(self.name):
            mask = K.cast(mask, 'int32')
            input_shape = K.tf.shape(updates, out_type='int32')
            #  calculation new shape
            if output_shape is None:
                output_shape = (input_shape[0], input_shape[1] * self.size[0], input_shape[2] * self.size[1], input_shape[3])
            self.output_shape1 = output_shape

            # calculation indices for batch, height, width and feature maps
            one_like_mask = K.ones_like(mask, dtype='int32')
            batch_shape = K.concatenate([[input_shape[0]], [1], [1], [1]], axis=0)
            batch_range = K.reshape(K.tf.range(output_shape[0], dtype='int32'), shape=batch_shape)
            b = one_like_mask * batch_range
            y = mask // (output_shape[2] * output_shape[3])
            x = (mask // output_shape[3]) % output_shape[2]
            feature_range = K.tf.range(output_shape[3], dtype='int32')
            f = one_like_mask * feature_range

            # transpose indices & reshape update values to one dimension
            updates_size = K.tf.size(updates)
            indices = K.transpose(K.reshape(K.stack([b, y, x, f]), [4, updates_size]))
            values = K.reshape(updates, [updates_size])
            ret = K.tf.scatter_nd(indices, values, output_shape)
            return ret 
Example 26
Project: YOLO-Pi   Author: CiscoBlockChain   File: keras_yolo.py    License: Apache License 2.0 5 votes vote down vote up
def yolo_eval(yolo_outputs,
              image_shape,
              max_boxes=10,
              score_threshold=.6,
              iou_threshold=.5):
    """Evaluate YOLO model on given input batch and return filtered boxes."""
    box_xy, box_wh, box_confidence, box_class_probs = yolo_outputs
    boxes = yolo_boxes_to_corners(box_xy, box_wh)
    boxes, scores, classes = yolo_filter_boxes(
        boxes, box_confidence, box_class_probs, threshold=score_threshold)

    # Scale boxes back to original image shape.
    height = image_shape[0]
    width = image_shape[1]
    image_dims = K.stack([height, width, height, width])
    image_dims = K.reshape(image_dims, [1, 4])
    boxes = boxes * image_dims

    # TODO: Something must be done about this ugly hack!
    max_boxes_tensor = K.variable(max_boxes, dtype='int32')
    K.get_session().run(tf.variables_initializer([max_boxes_tensor]))
    nms_index = tf.image.non_max_suppression(
        boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold)
    boxes = K.gather(boxes, nms_index)
    scores = K.gather(scores, nms_index)
    classes = K.gather(classes, nms_index)
    return boxes, scores, classes 
Example 27
Project: SegCaps   Author: lalonderodney   File: capsule_layers.py    License: Apache License 2.0 5 votes vote down vote up
def call(self, input_tensor, training=None):

        input_transposed = tf.transpose(input_tensor, [3, 0, 1, 2, 4])
        input_shape = K.shape(input_transposed)
        input_tensor_reshaped = K.reshape(input_transposed, [
            input_shape[0] * input_shape[1], self.input_height, self.input_width, self.input_num_atoms])
        input_tensor_reshaped.set_shape((None, self.input_height, self.input_width, self.input_num_atoms))

        conv = K.conv2d(input_tensor_reshaped, self.W, (self.strides, self.strides),
                        padding=self.padding, data_format='channels_last')

        votes_shape = K.shape(conv)
        _, conv_height, conv_width, _ = conv.get_shape()

        votes = K.reshape(conv, [input_shape[1], input_shape[0], votes_shape[1], votes_shape[2],
                                 self.num_capsule, self.num_atoms])
        votes.set_shape((None, self.input_num_capsule, conv_height.value, conv_width.value,
                         self.num_capsule, self.num_atoms))

        logit_shape = K.stack([
            input_shape[1], input_shape[0], votes_shape[1], votes_shape[2], self.num_capsule])
        biases_replicated = K.tile(self.b, [conv_height.value, conv_width.value, 1, 1])

        activations = update_routing(
            votes=votes,
            biases=biases_replicated,
            logit_shape=logit_shape,
            num_dims=6,
            input_dim=self.input_num_capsule,
            output_dim=self.num_capsule,
            num_routing=self.routings)

        return activations 
Example 28
Project: uncertainty-adversarial-paper   Author: lsgos   File: utilities.py    License: MIT License 5 votes vote down vote up
def predict(self, X):
        mc_preds = np.concatenate( [np.stack([m.predict(X) for _ in range(self.n_mc)])
                                    for m in self.ms], axis=0)
        return mc_preds.mean(axis=0) 
Example 29
Project: uncertainty-adversarial-paper   Author: lsgos   File: utilities.py    License: MIT License 5 votes vote down vote up
def get_results(self, X):
        mc_preds = np.concatenate( [np.stack([m.predict(X) for _ in range(self.n_mc)])
                                    for m in self.ms], axis=0)
        preds = mc_preds.mean(axis=0)
        ent = - 1 *np.sum(preds * np.log(preds + 1e-10), axis=-1)
        bald = ent - np.mean( - 1 * np.sum(mc_preds * np.log(mc_preds + 1e-10), axis=-1), axis=0)
        return preds, ent, bald 
Example 30
Project: uncertainty-adversarial-paper   Author: lsgos   File: utilities.py    License: MIT License 5 votes vote down vote up
def __call__(self, X):
        """
        Returns the mean prediction of the entire ensemble as a keras tensor to allow differentiation
        """
        return K.mean(
            K.stack(
                [K.mean(mc_dropout_preds(m, X, n_mc=self.n_mc), axis=0)
                     for m in self.ms]
            ),
            axis=0)