Python tensorflow.keras.backend.ndim() Examples

The following are 30 code examples of tensorflow.keras.backend.ndim(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow.keras.backend , or try the search function .
Example #1
Source File: modes.py    From spektral with MIT License 6 votes vote down vote up
def autodetect_mode(a, b):
    """
    Return a code identifying the mode of operation (single, mixed, inverted mixed and
    batch), given a and b. See `ops.modes` for meaning of codes.
    :param a: Tensor or SparseTensor.
    :param b: Tensor or SparseTensor.
    :return: mode of operation as an integer code.
    """
    a_dim = K.ndim(a)
    b_dim = K.ndim(b)
    if b_dim == 2:
        if a_dim == 2:
            return SINGLE
        elif a_dim == 3:
            return iMIXED
    elif b_dim == 3:
        if a_dim == 2:
            return MIXED
        elif a_dim == 3:
            return BATCH
    return UNKNOWN 
Example #2
Source File: utils.py    From neuron with GNU General Public License v3.0 6 votes vote down vote up
def _softmax(x, axis=-1, alpha=1):
    """
    building on keras implementation, with additional alpha parameter

    Softmax activation function.
    # Arguments
        x : Tensor.
        axis: Integer, axis along which the softmax normalization is applied.
        alpha: a value to multiply all x
    # Returns
        Tensor, output of softmax transformation.
    # Raises
        ValueError: In case `dim(x) == 1`.
    """
    x = alpha * x
    ndim = K.ndim(x)
    if ndim == 2:
        return K.softmax(x)
    elif ndim > 2:
        e = K.exp(x - K.max(x, axis=axis, keepdims=True))
        s = K.sum(e, axis=axis, keepdims=True)
        return e / s
    else:
        raise ValueError('Cannot apply softmax to a tensor that is 1D') 
Example #3
Source File: backend_keras.py    From kapre with MIT License 6 votes vote down vote up
def amplitude_to_decibel(x, amin=1e-10, dynamic_range=80.0):
    """[K] Convert (linear) amplitude to decibel (log10(x)).

    Parameters
    ----------
    x: Keras *batch* tensor or variable. It has to be batch because of sample-wise `K.max()`.

    amin: minimum amplitude. amplitude smaller than `amin` is set to this.

    dynamic_range: dynamic_range in decibel

    """
    log_spec = 10 * K.log(K.maximum(x, amin)) / np.log(10).astype(K.floatx())
    if K.ndim(x) > 1:
        axis = tuple(range(K.ndim(x))[1:])
    else:
        axis = None

    log_spec = log_spec - K.max(log_spec, axis=axis, keepdims=True)  # [-?, 0]
    log_spec = K.maximum(log_spec, -1 * dynamic_range)  # [-80, 0]
    return log_spec 
Example #4
Source File: backend.py    From bert4keras with Apache License 2.0 6 votes vote down vote up
def sequence_masking(x, mask, mode=0, axis=None):
    """为序列条件mask的函数
    mask: 形如(batch_size, seq_len)的0-1矩阵;
    mode: 如果是0,则直接乘以mask;
          如果是1,则在padding部分减去一个大正数。
    axis: 序列所在轴,默认为1;
    """
    if mask is None or mode not in [0, 1]:
        return x
    else:
        if axis is None:
            axis = 1
        if axis == -1:
            axis = K.ndim(x) - 1
        assert axis > 0, 'axis muse be greater than 0'
        for _ in range(axis - 1):
            mask = K.expand_dims(mask, 1)
        for _ in range(K.ndim(x) - K.ndim(mask) - axis + 1):
            mask = K.expand_dims(mask, K.ndim(mask))
        if mode == 0:
            return x * mask
        else:
            return x - (1 - mask) * 1e12 
Example #5
Source File: RigidTransformation3DImputation.py    From aitom with GNU General Public License v3.0 6 votes vote down vote up
def _fftshift(self, x, axes=None):
        """
        Shift the zero-frequency component to the center of the spectrum.
        This function swaps half-spaces for all axes listed (defaults to all).
        Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.
        Parameters
        ----------
        x : array_like, Tensor
            Input array.
        axes : int or shape tuple, optional
            Axes over which to shift.  Default is None, which shifts all axes.
        Returns
        -------
        y : Tensor.
        """
        if axes is None:
            axes = tuple(range(ndim(x)))
            shift = [dim // 2 for dim in tf.shape(x)]
        elif isinstance(axes, int):
            shift = tf.shape(x)[axes] // 2
        else:
            shift = [tf.shape(x)[ax] // 2 for ax in axes]

        return _roll(x, shift, axes) 
Example #6
Source File: RigidTransformation3DImputation.py    From aitom with GNU General Public License v3.0 6 votes vote down vote up
def _ifftshift(self, x, axes=None):
        """
        The inverse of `fftshift`. Although identical for even-length `x`, the
        functions differ by one sample for odd-length `x`.
        Parameters
        ----------
        x : array_like, Tensor.
        axes : int or shape tuple, optional
            Axes over which to calculate.  Defaults to None, which shifts all axes.
        Returns
        -------
        y : Tensor.
        """
        if axes is None:
            axes = tuple(range(ndim(x)))
            shift = [-(dim // 2) for dim in tf.shape(x)]
        elif isinstance(axes, int):
            shift = -(tf.shape(x)[axes] // 2)
        else:
            shift = [-(tf.shape(x)[ax] // 2) for ax in axes]

        return _roll(x, shift, axes) 
Example #7
Source File: training.py    From medaka with Mozilla Public License 2.0 6 votes vote down vote up
def cat_acc(y_true, y_pred):
    """Keras loss function for sparse_categorical_accuracy.

    :param y_true: tensor of true class labels.
    :param y_pred: class output scores from network.

    :returns: categorical accuracy.
    """
    # sparse_categorical_accuracy is broken in keras 2.2.4
    #   https://github.com/keras-team/keras/issues/11348#issuecomment-439969957
    # this is taken from e59570ae
    from tensorflow.keras import backend as K
    # reshape in case it's in shape (num_samples, 1) instead of (num_samples,)
    if K.ndim(y_true) == K.ndim(y_pred):
        y_true = K.squeeze(y_true, -1)
    # convert dense predictions to labels
    y_pred_labels = K.argmax(y_pred, axis=-1)
    y_pred_labels = K.cast(y_pred_labels, K.floatx())
    return K.cast(K.equal(y_true, y_pred_labels), K.floatx()) 
Example #8
Source File: global_pool.py    From spektral with MIT License 6 votes vote down vote up
def call(self, inputs):
        if self.data_mode == 'disjoint':
            X, I = inputs
            if K.ndim(I) == 2:
                I = I[:, 0]
        else:
            X = inputs
        attn_coeff = K.dot(X, self.attn_kernel)
        attn_coeff = K.squeeze(attn_coeff, -1)
        attn_coeff = K.softmax(attn_coeff)
        if self.data_mode == 'single':
            output = K.dot(attn_coeff[None, ...], X)
        elif self.data_mode == 'batch':
            output = K.batch_dot(attn_coeff, X)
        else:
            output = attn_coeff[:, None] * X
            output = tf.math.segment_sum(output, I)

        return output 
Example #9
Source File: global_pool.py    From spektral with MIT License 6 votes vote down vote up
def call(self, inputs):
        if self.data_mode == 'disjoint':
            X, I = inputs
            if K.ndim(I) == 2:
                I = I[:, 0]
        else:
            X = inputs
        inputs_linear = self.features_layer(X)
        attn = self.attention_layer(X)
        masked_inputs = inputs_linear * attn
        if self.data_mode in {'single', 'batch'}:
            output = K.sum(masked_inputs, axis=-2,
                           keepdims=self.data_mode == 'single')
        else:
            output = tf.math.segment_sum(masked_inputs, I)

        return output 
Example #10
Source File: matmul.py    From spektral with MIT License 6 votes vote down vote up
def matmul_AT_B(a, b):
    """
    Computes A.T * B, dealing automatically with sparsity and data modes.
    :param a: Tensor or SparseTensor with rank 2 or 3.
    :param b: Tensor or SparseTensor with rank 2 or 3.
    :return: Tensor or SparseTensor with rank = max(rank(a), rank(b)).
    """
    mode = modes.autodetect_mode(a, b)
    if mode == modes.SINGLE or mode == modes.MIXED:
        # Single (rank(a)=2, rank(b)=2)
        # Mixed (rank(a)=2, rank(b)=3)
        a_t = ops.transpose(a)
    elif mode == modes.iMIXED or mode == modes.BATCH:
        # Inverted mixed (rank(a)=3, rank(b)=2)
        # Batch (rank(a)=3, rank(b)=3)
        a_t = ops.transpose(a, (0, 2, 1))
    else:
        raise ValueError('Expected ranks to be 2 or 3, got {} and {}'.format(
            K.ndim(a), K.ndim(b)
        ))

    return matmul_A_B(a_t, b) 
Example #11
Source File: network.py    From DeepPavlov with Apache License 2.0 6 votes vote down vote up
def gather_indexes(A: tf.Tensor, B: tf.Tensor) -> tf.Tensor:
    """
    Args:
        A: a tensor with data
        B: an integer tensor with indexes

    Returns:
        `answer` a tensor such that ``answer[i, j] = A[i, B[i, j]]``.
        In case `B` is one-dimensional, the output is ``answer[i] = A[i, B[i]]``

    """
    are_indexes_one_dim = (kb.ndim(B) == 1)
    if are_indexes_one_dim:
        B = tf.expand_dims(B, -1)
    first_dim_indexes = tf.expand_dims(tf.range(tf.shape(B)[0]), -1)
    first_dim_indexes = tf.tile(first_dim_indexes, [1, tf.shape(B)[1]])
    indexes = tf.stack([first_dim_indexes, B], axis=-1)
    answer = tf.gather_nd(A, indexes)
    if are_indexes_one_dim:
        answer = answer[:,0]
    return answer 
Example #12
Source File: common_tagger.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def repeat_(x, k):
    tile_factor = [1, k] + [1] * (K.ndim(x) - 1)
    return K.tile(x[:, None, :], tile_factor) 
Example #13
Source File: cells.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def call(self, inputs, **kwargs):
        assert isinstance(inputs, list) and len(inputs) == 3
        first, second, features = inputs[0], inputs[1], inputs[2]
        if not self.from_logits:
            first = K.clip(first, 1e-10, 1.0)
            second = K.clip(second, 1e-10, 1.0)
            first_, second_ = K.log(first), K.log(second)
        else:
            first_, second_ = first, second
        # embedded_features.shape = (M, T, 1)
        if self.use_intermediate_layer:
            features = K.dot(features, self.first_kernel)
            features = K.bias_add(features, self.first_bias, data_format="channels_last")
            features = self.intermediate_activation(features)
        embedded_features = K.dot(features, self.features_kernel)
        embedded_features = K.bias_add(
            embedded_features, self.features_bias, data_format="channels_last")
        if self.use_dimension_bias:
            tiling_shape = [1] * (K.ndim(first) - 1) + [K.shape(first)[-1]]
            embedded_features = K.tile(embedded_features, tiling_shape)
            embedded_features = K.bias_add(
                embedded_features, self.dimensions_bias, data_format="channels_last")
        sigma = K.sigmoid(embedded_features)

        result = weighted_sum(first_, second_, sigma,
                              self.first_threshold, self.second_threshold)
        probs = K.softmax(result)
        if self.return_logits:
            return [probs, result]
        return probs 
Example #14
Source File: cells.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def TemporalDropout(inputs, dropout=0.0):
    """
    Drops with :dropout probability temporal steps of input 3D tensor
    """
    # TO DO: adapt for >3D tensors
    if dropout == 0.0:
        return inputs
    inputs_func = lambda x: K.ones_like(inputs[:, :, 0:1])
    inputs_mask = Lambda(inputs_func)(inputs)
    inputs_mask = Dropout(dropout)(inputs_mask)
    tiling_shape = [1, 1, K.shape(inputs)[2]] + [1] * (K.ndim(inputs) - 3)
    inputs_mask = Lambda(K.tile, arguments={"n": tiling_shape},
                         output_shape=inputs._keras_shape[1:])(inputs_mask)
    answer = Multiply()([inputs, inputs_mask])
    return answer 
Example #15
Source File: backend.py    From bert4keras with Apache License 2.0 5 votes vote down vote up
def batch_gather(params, indices):
    """同tf旧版本的batch_gather
    """
    try:
        return tf.gather(params, indices, batch_dims=K.ndim(indices) - 1)
    except Exception as e1:
        try:
            return tf.batch_gather(params, indices)
        except Exception as e2:
            raise ValueError('%s\n%s\n' % (e1.message, e2.message)) 
Example #16
Source File: network.py    From DeepPavlov with Apache License 2.0 5 votes vote down vote up
def biaffine_layer(deps: tf.Tensor, heads: tf.Tensor, deps_dim: int,
                   heads_dim: int, output_dim: int, name: str = "biaffine_layer") -> tf.Tensor:
    """Implements a biaffine layer from [Dozat, Manning, 2016].

    Args:
        deps: the 3D-tensor of dependency states,
        heads: the 3D-tensor of head states,
        deps_dim: the dimension of dependency states,
        heads_dim: the dimension of head_states,
        output_dim: the output dimension
        name: the name of a layer

    Returns:
        `answer` the output 3D-tensor

    """
    input_shape = [kb.shape(deps)[i] for i in range(tf.keras.backend.ndim(deps))]
    first_input = tf.reshape(deps, [-1, deps_dim])  # first_input.shape = (B*L, D1)
    second_input = tf.reshape(heads, [-1, heads_dim])  # second_input.shape = (B*L, D2)
    with tf.variable_scope(name):
        kernel_shape = (deps_dim, heads_dim * output_dim)
        kernel = tf.get_variable('kernel', shape=kernel_shape, initializer=xavier_initializer())
        first = tf.matmul(first_input, kernel)  # (B*L, D2*H)
        first = tf.reshape(first, [-1, heads_dim, output_dim])  # (B*L, D2, H)
        answer = kb.batch_dot(first, second_input, axes=[1, 1])  # (B*L, H)
        first_bias = tf.get_variable('first_bias', shape=(deps_dim, output_dim),
                                     initializer=xavier_initializer())
        answer += tf.matmul(first_input, first_bias)
        second_bias = tf.get_variable('second_bias', shape=(heads_dim, output_dim),
                                      initializer=xavier_initializer())
        answer += tf.matmul(second_input, second_bias)
        label_bias = tf.get_variable('label_bias', shape=(output_dim,),
                                     initializer=xavier_initializer())
        answer = kb.bias_add(answer, label_bias)
        answer = tf.reshape(answer, input_shape[:-1] + [output_dim])  # (B, L, H)
    return answer 
Example #17
Source File: layers.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def call(self, x):
        axes = self.axes
        if axes is None:
            axes = tuple(range(K.ndim(x)))
            shift = [0] + [-(dim // 2) for dim in x.shape.as_list()[1:-1]] + [0]
        elif isinstance(axes, int):
            shift = -(x.shape[axes] // 2)
        else:
            shift = [-(x.shape[ax] // 2) for ax in axes]

        return _roll(x, shift, axes) 
Example #18
Source File: utils.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def _quilt(patches, patch_size, grid_size, patch_stride, verbose=False, **kwargs):
    assert len(patches.shape) >= 2, "patches has bad shape %s" % pformat(patches.shape)

    # reshape to be [nb_patches x nb_vox]
    patches = np.reshape(patches, (patches.shape[0], -1, 1))

    # quilt
    quilted_vol = pl.quilt(patches, patch_size, grid_size, patch_stride=patch_stride, **kwargs)
    assert quilted_vol.ndim == len(patch_size), "problem with dimensions after quilt"

    # return
    return quilted_vol


# TO MOVE (numpy softmax) 
Example #19
Source File: utils.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def batch_gather(reference, indices):
    """
    C+P From Keras pull request https://github.com/keras-team/keras/pull/6377/files
    
    Batchwise gathering of row indices.

    The numpy equivalent is `reference[np.arange(batch_size), indices]`, where
    `batch_size` is the first dimension of the reference tensor.

    # Arguments
        reference: A tensor with ndim >= 2 of shape.
          (batch_size, dim1, dim2, ..., dimN)
        indices: A 1d integer tensor of shape (batch_size) satisfying
          0 <= i < dim2 for each element i.

    # Returns
        The selected tensor with shape (batch_size, dim2, ..., dimN).

    # Examples
        1. If reference is `[[3, 5, 7], [11, 13, 17]]` and indices is `[2, 1]`
        then the result is `[7, 13]`.

        2. If reference is
        ```
          [[[2, 3], [4, 5], [6, 7]],
           [[10, 11], [12, 13], [16, 17]]]
        ```
        and indices is `[2, 1]` then the result is `[[6, 7], [12, 13]]`.
    """
    batch_size = K.shape(reference)[0]
    indices = tf.stack([tf.range(batch_size), indices], axis=1)
    return tf.gather_nd(reference, indices) 
Example #20
Source File: layers.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def call(self, x):
        axes = self.axes
        if axes is None:
            axes = tuple(range(K.ndim(x)))
            shift = [0] + [dim // 2 for dim in x.shape] + [0]
        elif isinstance(axes, int):
            shift = x.shape[axes] // 2
        else:
            shift = [x.shape[ax] // 2 for ax in axes]

        return _roll(x, shift, axes) 
Example #21
Source File: layers.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, filters,
                 kernel_size,
                 strides=(1, 1, 1),
                 padding='valid',
                 data_format=None,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        
        super(LocallyConnected3D, self).__init__(**kwargs)
        self.filters = filters
        self.kernel_size = conv_utils.normalize_tuple(
            kernel_size, 3, 'kernel_size')
        self.strides = conv_utils.normalize_tuple(strides, 3, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        if self.padding != 'valid':
            raise ValueError('Invalid border mode for LocallyConnected3D '
                             '(only "valid" is supported): ' + padding)
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(ndim=5) 
Example #22
Source File: message_passing.py    From spektral with MIT License 5 votes vote down vote up
def get_inputs(inputs):
        if len(inputs) == 3:
            X, A, E = inputs
            assert K.ndim(E) == 2, 'E must have rank 2'
        elif len(inputs) == 2:
            X, A = inputs
            E = None
        else:
            raise ValueError('Expected 2 or 3 inputs tensors (X, A, E), got {}.'
                             .format(len(inputs)))
        assert K.ndim(X) == 2, 'X must have rank 2'
        assert K.is_sparse(A), 'A must be a SparseTensor'
        assert K.ndim(A) == 2, 'A must have rank 2'

        return X, A, E 
Example #23
Source File: global_pool.py    From spektral with MIT License 5 votes vote down vote up
def call(self, inputs):
        if self.data_mode == 'disjoint':
            X = inputs[0]
            I = inputs[1]
            if K.ndim(I) == 2:
                I = I[:, 0]
        else:
            X = inputs

        if self.data_mode == 'disjoint':
            return self.pooling_op(X, I)
        else:
            return self.batch_pooling_op(X, axis=-2, keepdims=(self.data_mode == 'single')) 
Example #24
Source File: matmul.py    From spektral with MIT License 5 votes vote down vote up
def dot(a, b, transpose_a=False, transpose_b=False):
    """
    Dot product between `a` and `b`, with automatic handling of batch dimensions.
    Supports both dense and sparse multiplication (including sparse-sparse).
    The innermost dimension of `a` must match the outermost dimension of `b`,
    unless there is a shared batch dimension.
    Note that doing sparse-sparse multiplication of any rank and sparse-dense
    multiplication with rank higher than 2 may result in slower computations.
    :param a: Tensor or SparseTensor with rank 2 or 3.
    :param b: Tensor or SparseTensor with rank 2 or 3.
    :param transpose_a: bool, transpose innermost two dimensions of a.
    :param transpose_b: bool, transpose innermost two dimensions of b.
    :return: Tensor or SparseTensor with rank 2 or 3.
    """
    a_is_sparse_tensor = isinstance(a, tf.SparseTensor)
    b_is_sparse_tensor = isinstance(b, tf.SparseTensor)

    # Handle case where we can use faster sparse-dense matmul
    if K.ndim(a) == 2 and K.ndim(b) == 2:
        if transpose_a:
            a = ops.transpose(a)
        if transpose_b:
            b = ops.transpose(b)
        if a_is_sparse_tensor and not b_is_sparse_tensor:
            return tf.sparse.sparse_dense_matmul(a, b)
        elif not a_is_sparse_tensor and b_is_sparse_tensor:
            return ops.transpose(
                tf.sparse.sparse_dense_matmul(ops.transpose(b), ops.transpose(a))
            )

    # Fallthrough to tfsp implementation
    # Defaults to tf.matmul if neither is sparse
    if a_is_sparse_tensor:
        a = tfsp.CSRSparseMatrix(a)
    if b_is_sparse_tensor:
        b = tfsp.CSRSparseMatrix(b)
    out = tfsp.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
    if hasattr(out, 'to_sparse_tensor'):
        return out.to_sparse_tensor()

    return out 
Example #25
Source File: graph.py    From spektral with MIT License 5 votes vote down vote up
def degree_matrix(A, return_sparse_batch=False):
    """
    Computes the degree matrix of A, deals with sparse A and batch mode
    automatically.
    :param A: Tensor or SparseTensor with rank k = {2, 3}.
    :param return_sparse_batch: if operating in batch mode, return a
    SparseTensor. Note that the sparse degree Tensor returned by this function
    cannot be used for sparse matrix multiplication afterwards.
    :return: SparseTensor of rank k.
    """
    D = degrees(A)

    batch_mode = K.ndim(D) == 2
    N = tf.shape(D)[-1]
    batch_size = tf.shape(D)[0] if batch_mode else 1

    inner_index = tf.tile(tf.stack([tf.range(N)] * 2, axis=1), (batch_size, 1))
    if batch_mode:
        if return_sparse_batch:
            outer_index = ops.repeat(
                tf.range(batch_size), tf.ones(batch_size) * tf.cast(N, tf.float32)
            )
            indices = tf.concat([outer_index[:, None], inner_index], 1)
            dense_shape = (batch_size, N, N)
        else:
            return tf.linalg.diag(D)
    else:
        indices = inner_index
        dense_shape = (N, N)

    indices = tf.cast(indices, tf.int64)
    values = tf.reshape(D, (-1,))
    return tf.SparseTensor(indices, values, dense_shape) 
Example #26
Source File: graph.py    From spektral with MIT License 5 votes vote down vote up
def normalize_A(A):
    """
    Computes symmetric normalization of A, dealing with sparse A and batch mode
    automatically.
    :param A: Tensor or SparseTensor with rank k = {2, 3}.
    :return: Tensor or SparseTensor of rank k.
    """
    D = degrees(A)
    D = tf.sqrt(D)[:, None] + K.epsilon()
    perm = (0, 2, 1) if K.ndim(A) == 3 else (1, 0)
    output = (A / D) / ops.transpose(D, perm=perm)

    return output 
Example #27
Source File: layers.py    From neuron with GNU General Public License v3.0 4 votes vote down vote up
def build(self, input_shape):
        
        if self.data_format == 'channels_last':
            input_row, input_col, input_z = input_shape[1:-1]
            input_filter = input_shape[4]
        else:
            input_row, input_col, input_z = input_shape[2:]
            input_filter = input_shape[1]
        if input_row is None or input_col is None:
            raise ValueError('The spatial dimensions of the inputs to '
                             ' a LocallyConnected3D layer '
                             'should be fully-defined, but layer received '
                             'the inputs shape ' + str(input_shape))
        output_row = conv_utils.conv_output_length(input_row, self.kernel_size[0],
                                                   self.padding, self.strides[0])
        output_col = conv_utils.conv_output_length(input_col, self.kernel_size[1],
                                                   self.padding, self.strides[1])
        output_z = conv_utils.conv_output_length(input_z, self.kernel_size[2],
                                                   self.padding, self.strides[2])
        self.output_row = output_row
        self.output_col = output_col
        self.output_z = output_z
        self.kernel_shape = (output_row * output_col * output_z,
                             self.kernel_size[0] *
                             self.kernel_size[1] *
                             self.kernel_size[2] * input_filter,
                             self.filters)
        self.kernel = self.add_weight(shape=self.kernel_shape,
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)
        if self.use_bias:
            self.bias = self.add_weight(shape=(output_row, output_col, output_z, self.filters),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None
        if self.data_format == 'channels_first':
            self.input_spec = InputSpec(ndim=5, axes={1: input_filter})
        else:
            self.input_spec = InputSpec(ndim=5, axes={-1: input_filter})
        self.built = True 
Example #28
Source File: mincut_pool.py    From spektral with MIT License 4 votes vote down vote up
def call(self, inputs):
        if len(inputs) == 3:
            X, A, I = inputs
            if K.ndim(I) == 2:
                I = I[:, 0]
        else:
            X, A = inputs
            I = None

        # Check if the layer is operating in batch mode (X and A have rank 3)
        batch_mode = K.ndim(X) == 3

        # Compute cluster assignment matrix
        S = self.mlp(X)

        # MinCut regularization
        A_pooled = ops.matmul_AT_B_A(S, A)
        num = tf.linalg.trace(A_pooled)
        D = ops.degree_matrix(A)
        den = tf.linalg.trace(ops.matmul_AT_B_A(S, D)) + K.epsilon()
        cut_loss = -(num / den)
        if batch_mode:
            cut_loss = K.mean(cut_loss)
        self.add_loss(cut_loss)

        # Orthogonality regularization
        SS = ops.matmul_AT_B(S, S)
        I_S = tf.eye(self.k, dtype=SS.dtype)
        ortho_loss = tf.norm(
            SS / tf.norm(SS, axis=(-1, -2), keepdims=True) - I_S / tf.norm(I_S),
            axis=(-1, -2)
        )
        if batch_mode:
            ortho_loss = K.mean(ortho_loss)
        self.add_loss(ortho_loss)

        # Pooling
        X_pooled = ops.matmul_AT_B(S, X)
        A_pooled = tf.linalg.set_diag(
            A_pooled, tf.zeros(K.shape(A_pooled)[:-1], dtype=A_pooled.dtype)
        )  # Remove diagonal
        A_pooled = ops.normalize_A(A_pooled)

        output = [X_pooled, A_pooled]

        if I is not None:
            I_mean = tf.math.segment_mean(I, I)
            I_pooled = ops.repeat(I_mean, tf.ones_like(I_mean) * self.k)
            output.append(I_pooled)

        if self.return_mask:
            output.append(S)

        return output 
Example #29
Source File: topk_pool.py    From spektral with MIT License 4 votes vote down vote up
def call(self, inputs):
        if len(inputs) == 3:
            X, A, I = inputs
            self.data_mode = 'disjoint'
        else:
            X, A = inputs
            I = tf.zeros(tf.shape(X)[:1])
            self.data_mode = 'single'
        if K.ndim(I) == 2:
            I = I[:, 0]
        I = tf.cast(I, tf.int32)

        A_is_sparse = K.is_sparse(A)

        # Get mask
        y = self.compute_scores(X, A, I)
        N = K.shape(X)[-2]
        indices = ops.segment_top_k(y[:, 0], I, self.ratio, self.top_k_var)
        mask = tf.scatter_nd(tf.expand_dims(indices, 1), tf.ones_like(indices), (N,))

        # Multiply X and y to make layer differentiable
        features = X * self.gating_op(y)

        axis = 0 if len(K.int_shape(A)) == 2 else 1  # Cannot use negative axis in tf.boolean_mask
        # Reduce X
        X_pooled = tf.boolean_mask(features, mask, axis=axis)

        # Reduce A
        A_dense = tf.sparse.to_dense(A) if A_is_sparse else A
        A_pooled = tf.boolean_mask(A_dense, mask, axis=axis)
        A_pooled = tf.boolean_mask(A_pooled, mask, axis=axis + 1)
        if A_is_sparse:
            A_pooled = ops.dense_to_sparse(A_pooled)

        output = [X_pooled, A_pooled]

        # Reduce I
        if self.data_mode == 'disjoint':
            I_pooled = tf.boolean_mask(I[:, None], mask)[:, 0]
            output.append(I_pooled)

        if self.return_mask:
            output.append(mask)

        return output 
Example #30
Source File: deeplabcut.py    From DeepPoseKit with Apache License 2.0 4 votes vote down vote up
def _preprocess_symbolic_input(x, data_format, mode, **kwargs):
    """Preprocesses a tensor encoding a batch of images.
    # Arguments
        x: Input tensor, 3D or 4D.
        data_format: Data format of the image tensor.
        mode: One of "caffe", "tf" or "torch".
            - caffe: will convert the images from RGB to BGR,
                then will zero-center each color channel with
                respect to the ImageNet dataset,
                without scaling.
            - tf: will scale pixels between -1 and 1,
                sample-wise.
            - torch: will scale pixels between 0 and 1 and then
                will normalize each channel with respect to the
                ImageNet dataset.
    # Returns
        Preprocessed tensor.
    """

    if mode == "tf":
        x /= 127.5
        x -= 1.0
        return x

    if mode == "torch":
        x /= 255.0
        mean = [0.485, 0.456, 0.406]
        std = [0.229, 0.224, 0.225]
    else:
        if data_format == "channels_first":
            # 'RGB'->'BGR'
            if backend.ndim(x) == 3:
                x = x[::-1, ...]
            else:
                x = x[:, ::-1, ...]
        else:
            # 'RGB'->'BGR'
            x = x[..., ::-1]
        mean = [103.939, 116.779, 123.68]
        std = None

    mean_tensor = backend.constant(-np.array(mean))

    # Zero-center by mean pixel
    if backend.dtype(x) != backend.dtype(mean_tensor):
        x = backend.bias_add(
            x, backend.cast(mean_tensor, backend.dtype(x)), data_format=data_format
        )
    else:
        x = backend.bias_add(x, mean_tensor, data_format)
    if std is not None:
        x /= std
    return x