Python keras.backend.cos() Examples

The following are 23 code examples of keras.backend.cos(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module keras.backend , or try the search function .
Example #1
Source File: position_embedding.py    From nlp_toolkit with MIT License 6 votes vote down vote up
def call(self, x):
        if (self.size is None) or (self.mode == 'sum'):
            self.size = int(x.shape[-1])
        batch_size, seq_len = K.shape(x)[0], K.shape(x)[1]
        position_j = 1. / K.pow(10000.,
                                2 * K.arange(self.size / 2, dtype='float32'
                                             ) / self.size)
        position_j = K.expand_dims(position_j, 0)
        # K.arange不支持变长,只好用这种方法生成
        position_i = K.cumsum(K.ones_like(x[:, :, 0]), 1) - 1
        position_i = K.expand_dims(position_i, 2)
        position_ij = K.dot(position_i, position_j)
        position_ij = K.concatenate(
            [K.cos(position_ij), K.sin(position_ij)], 2)
        if self.mode == 'sum':
            return position_ij + x
        elif self.mode == 'concat':
            return K.concatenate([position_ij, x], 2) 
Example #2
Source File: data_utils.py    From RecurrentGaze with MIT License 6 votes vote down vote up
def euler2rot_mat(euler_angles):
    """
    Convert euler to rotation matrix, using the XYZ convention R = Rx * Ry * Rz, left-handed positive sign
    (from Openface)
    :param euler_angles: euler angles
    :return: rotation matrix
    """
    s1 = np.sin(euler_angles[0])
    s2 = np.sin(euler_angles[1])
    s3 = np.sin(euler_angles[2])
    c1 = np.cos(euler_angles[0])
    c2 = np.cos(euler_angles[1])
    c3 = np.cos(euler_angles[2])

    rot_mat = np.empty((3,3), dtype=np.float32)
    rot_mat[0, 0] = c2 * c3
    rot_mat[0, 1] = -c2 * s3
    rot_mat[0, 2] = s2
    rot_mat[1, 0] = c1 * s3 + c3 * s1 * s2
    rot_mat[1, 1] = c1 * c3 - s1 * s2 * s3
    rot_mat[1, 2] = -c2 * s1
    rot_mat[2, 0] = s1 * s3 - c1 * c3 * s2
    rot_mat[2, 1] = c3 * s1 + c1 * s2 * s3
    rot_mat[2, 2] = c1 * c2
    return np.linalg.inv(rot_mat) 
Example #3
Source File: position.py    From keras-transformer with MIT License 6 votes vote down vote up
def positional_signal(hidden_size: int, length: int,
                      min_timescale: float = 1.0, max_timescale: float = 1e4):
    """
    Helper function, constructing basic positional encoding.
    The code is partially based on implementation from Tensor2Tensor library
    https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/layers/common_attention.py
    """

    if hidden_size % 2 != 0:
        raise ValueError(
            f"The hidden dimension of the model must be divisible by 2."
            f"Currently it is {hidden_size}")
    position = K.arange(0, length, dtype=K.floatx())
    num_timescales = hidden_size // 2
    log_timescale_increment = K.constant(
        (np.log(float(max_timescale) / float(min_timescale)) /
         (num_timescales - 1)),
        dtype=K.floatx())
    inv_timescales = (
            min_timescale *
            K.exp(K.arange(num_timescales, dtype=K.floatx()) *
                  -log_timescale_increment))
    scaled_time = K.expand_dims(position, 1) * K.expand_dims(inv_timescales, 0)
    signal = K.concatenate([K.sin(scaled_time), K.cos(scaled_time)], axis=1)
    return K.expand_dims(signal, axis=0) 
Example #4
Source File: test_model_saving.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_saving_custom_activation_function():
    x = Input(shape=(3,))
    output = Dense(3, activation=K.cos)(x)

    model = Model(x, output)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname, custom_objects={'cos': K.cos})
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #5
Source File: test_model_saving.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_saving_custom_activation_function():
    x = Input(shape=(3,))
    output = Dense(3, activation=K.cos)(x)

    model = Model(x, output)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname, custom_objects={'cos': K.cos})
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #6
Source File: test_model_saving.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_saving_custom_activation_function():
    x = Input(shape=(3,))
    output = Dense(3, activation=K.cos)(x)

    model = Model(x, output)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname, custom_objects={'cos': K.cos})
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #7
Source File: test_model_saving.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_saving_custom_activation_function():
    x = Input(shape=(3,))
    output = Dense(3, activation=K.cos)(x)

    model = Model(x, output)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname, custom_objects={'cos': K.cos})
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #8
Source File: test_model_saving.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_saving_custom_activation_function():
    x = Input(shape=(3,))
    output = Dense(3, activation=K.cos)(x)

    model = Model(x, output)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname, custom_objects={'cos': K.cos})
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #9
Source File: test_model_saving.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_saving_custom_activation_function():
    x = Input(shape=(3,))
    output = Dense(3, activation=K.cos)(x)

    model = Model(x, output)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname, custom_objects={'cos': K.cos})
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #10
Source File: test_model_saving.py    From DeepLearning_Wavelet-LSTM with MIT License 6 votes vote down vote up
def test_saving_custom_activation_function():
    x = Input(shape=(3,))
    output = Dense(3, activation=K.cos)(x)

    model = Model(x, output)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname, custom_objects={'cos': K.cos})
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05) 
Example #11
Source File: attention.py    From Self-Attention-Keras with Apache License 2.0 5 votes vote down vote up
def call(self, x):
        if (self.size == None) or (self.mode == 'sum'):
            self.size = int(x.shape[-1])
        batch_size, seq_len = K.shape(x)[0], K.shape(x)[1]
        position_j = 1. / K.pow(10000., 2 * K.arange(self.size / 2, dtype='float32') / self.size)
        position_j = K.expand_dims(position_j, 0)
        position_i = K.cumsum(K.ones_like(x[:, :, 0]), 1) - 1  # K.arange不支持变长,只好用这种方法生成
        position_i = K.expand_dims(position_i, 2)
        position_ij = K.dot(position_i, position_j)
        position_ij = K.concatenate([K.cos(position_ij), K.sin(position_ij)], 2)
        if self.mode == 'sum':
            return position_ij + x
        elif self.mode == 'concat':
            return K.concatenate([position_ij, x], 2) 
Example #12
Source File: transform_rnn.py    From View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition with MIT License 5 votes vote down vote up
def _rotation_y(theta):
    r1 = K.cos(theta[:,0:1])
    r2 = K.sin(theta[:,0:1])
    zero = K.zeros_like(r1)
    one = K.ones_like(r1)
    first = K.reshape(K.concatenate([r1,zero,r2,zero],axis=1),(-1,1,4))
    second = K.reshape(K.concatenate([zero,one,zero,zero],axis=1),(-1,1,4))
    third = K.reshape(K.concatenate([-r2,zero,r1,zero],axis=1),(-1,1,4))
    fourth = K.reshape(K.concatenate([zero,zero,zero,one],axis=1),(-1,1,4))
    rotation_y = K.concatenate([first,second,third,fourth],axis=1)
    rotation_y = T.reshape(rotation_y,[-1,4,4])
    return rotation_y 
Example #13
Source File: model.py    From n-beats with MIT License 5 votes vote down vote up
def seasonality_model(thetas, backcast_length, forecast_length, is_forecast):
    p = thetas.get_shape().as_list()[-1]
    p1, p2 = (p // 2, p // 2) if p % 2 == 0 else (p // 2, p // 2 + 1)
    t = linear_space(backcast_length, forecast_length, fwd_looking=is_forecast)
    s1 = K.stack([K.cos(2 * np.pi * i * t) for i in range(p1)], axis=0)
    s2 = K.stack([K.sin(2 * np.pi * i * t) for i in range(p2)], axis=0)
    if p == 1:
        s = s2
    else:
        s = K.concatenate([s1, s2], axis=0)
    s = K.cast(s, np.float32)
    return K.dot(thetas, s) 
Example #14
Source File: custom.py    From DLWP with MIT License 5 votes vote down vote up
def latitude_weighted_loss(loss_function=mean_squared_error, lats=None, output_shape=(), axis=-2, weighting='cosine'):
    """
    Create a loss function that weights inputs by a function of latitude before calculating the loss.

    :param loss_function: method: Keras loss function to apply after the weighting
    :param lats: ndarray: 1-dimensional array of latitude coordinates
    :param output_shape: tuple: shape of expected model output
    :param axis: int: latitude axis in model output shape
    :param weighting: str: type of weighting to apply. Options are:
            cosine: weight by the cosine of the latitude (default)
            midlatitude: weight by the cosine of the latitude but also apply a 25% reduction to the equator and boost
                to the mid-latitudes
    :return: callable loss function
    """
    if weighting not in ['cosine', 'midlatitude']:
        raise ValueError("'weighting' must be one of 'cosine' or 'midlatitude'")
    if lats is not None:
        lat_tensor = K.zeros(lats.shape)
        lat_tensor.assign(K.cast_to_floatx(lats[:]))

        weights = K.cos(lat_tensor * np.pi / 180.)
        if weighting == 'midlatitude':
            weights = weights + 0.5 * K.pow(K.sin(lat_tensor * 2 * np.pi / 180.), 2.)

        weight_shape = output_shape[axis:]
        for d in weight_shape[1:]:
            weights = K.expand_dims(weights, axis=-1)
            weights = K.repeat_elements(weights, d, axis=-1)

    else:
        weights = K.ones(output_shape)

    def lat_loss(y_true, y_pred):
        return loss_function(y_true * weights, y_pred * weights)

    return lat_loss 
Example #15
Source File: custom.py    From DLWP with MIT License 5 votes vote down vote up
def __init__(self, loss_function, lats, data_format='channels_last', weighting='cosine'):
        """
        Initialize a weighted loss.

        :param loss_function: method: Keras loss function to apply after the weighting
        :param lats: ndarray: 1-dimensional array of latitude coordinates
        :param data_format: Keras data_format ('channels_first' or 'channels_last')
        :param weighting: str: type of weighting to apply. Options are:
            cosine: weight by the cosine of the latitude (default)
            midlatitude: weight by the cosine of the latitude but also apply a 25% reduction to the equator and boost
                to the mid-latitudes
        """
        self.loss_function = loss_function
        self.lats = lats
        self.data_format = K.normalize_data_format(data_format)
        if weighting not in ['cosine', 'midlatitude']:
            raise ValueError("'weighting' must be one of 'cosine' or 'midlatitude'")
        self.weighting = weighting
        lat_tensor = K.zeros(lats.shape)
        print(lats)
        lat_tensor.assign(K.cast_to_floatx(lats[:]))
        self.weights = K.cos(lat_tensor * np.pi / 180.)
        if self.weighting == 'midlatitude':
            self.weights = self.weights - 0.25 * K.sin(lat_tensor * 2 * np.pi / 180.)
        self.is_init = False

        self.__name__ = 'latitude_weighted_loss' 
Example #16
Source File: cornell_grasp_dataset_reader.py    From costar_plan with Apache License 2.0 5 votes vote down vote up
def parse_example_proto(examples_serialized, have_image_id=False):
    feature_map = {
        'image/encoded': tf.FixedLenFeature([], dtype=tf.string,
                                            default_value=''),
        'image/filename': tf.FixedLenFeature([], dtype=tf.string,
                                             default_value=''),
        'image/height': tf.FixedLenFeature([], dtype=tf.int64),
        'image/width': tf.FixedLenFeature([], dtype=tf.int64)
    }

    # TODO(ahundt) remove boolean once we are set up with k-fold cross validation of images and objects
    if have_image_id:
        feature_map['object/id'] = tf.FixedLenFeature([], dtype=tf.int64)

    for i in range(4):
        y_key = 'bbox/y' + str(i)
        x_key = 'bbox/x' + str(i)
        feature_map[y_key] = tf.VarLenFeature(dtype=tf.float32)
        feature_map[x_key] = tf.VarLenFeature(dtype=tf.float32)
    feature_map['bbox/cy'] = tf.VarLenFeature(dtype=tf.float32)
    feature_map['bbox/cx'] = tf.VarLenFeature(dtype=tf.float32)
    feature_map['bbox/tan'] = tf.VarLenFeature(dtype=tf.float32)
    feature_map['bbox/theta'] = tf.VarLenFeature(dtype=tf.float32)
    feature_map['bbox/sin_theta'] = tf.VarLenFeature(dtype=tf.float32)
    feature_map['bbox/cos_theta'] = tf.VarLenFeature(dtype=tf.float32)
    feature_map['bbox/width'] = tf.VarLenFeature(dtype=tf.float32)
    feature_map['bbox/height'] = tf.VarLenFeature(dtype=tf.float32)
    feature_map['bbox/grasp_success'] = tf.VarLenFeature(dtype=tf.int64)
    # feature_map['bbox/sin_2_theta'] = tf.sin(feature_map['bbox/theta'] * 2.0)
    # feature_map['bbox/cos_2_theta'] = tf.cos(feature_map['bbox/theta'] * 2.0)

    features = tf.parse_single_example(examples_serialized, feature_map)

    return features 
Example #17
Source File: sinerelu.py    From keras-contrib with MIT License 5 votes vote down vote up
def call(self, Z):
        m = self.epsilon * (K.sin(Z) - K.cos(Z))
        A = K.maximum(m, Z)
        return A 
Example #18
Source File: triangle_position_embedding.py    From Keras-TextClassification with MIT License 5 votes vote down vote up
def call(self, inputs, mask=None):
        input_shape = K.shape(inputs)
        if self.mode == self.MODE_ADD:
            batch_size, seq_len, output_dim = input_shape[0], input_shape[1], input_shape[2]
            pos_input = K.tile(K.expand_dims(K.arange(seq_len), axis=0), [batch_size, 1])
        elif self.mode == self.MODE_CONCAT:
            batch_size, seq_len, output_dim = input_shape[0], input_shape[1], self.output_dim
            pos_input = K.tile(K.expand_dims(K.arange(seq_len), axis=0), [batch_size, 1])
        else:
            output_dim = self.output_dim
            pos_input = inputs
        if K.dtype(pos_input) != K.floatx():
            pos_input = K.cast(pos_input, K.floatx())
        evens = K.arange(output_dim // 2) * 2
        odds = K.arange(output_dim // 2) * 2 + 1
        even_embd = K.sin(
            K.dot(
                K.expand_dims(pos_input, -1),
                K.expand_dims(1.0 / K.pow(
                    10000.0,
                    K.cast(evens, K.floatx()) / K.cast(output_dim, K.floatx())
                ), 0)
            )
        )
        odd_embd = K.cos(
            K.dot(
                K.expand_dims(pos_input, -1),
                K.expand_dims(1.0 / K.pow(
                    10000.0, K.cast((odds - 1), K.floatx()) / K.cast(output_dim, K.floatx())
                ), 0)
            )
        )
        embd = K.stack([even_embd, odd_embd], axis=-1)
        output = K.reshape(embd, [-1, K.shape(inputs)[1], output_dim])
        if self.mode == self.MODE_CONCAT:
            output = K.concatenate([inputs, output], axis=-1)
        if self.mode == self.MODE_ADD:
            output += inputs
        return output 
Example #19
Source File: data_utils.py    From RecurrentGaze with MIT License 5 votes vote down vote up
def numpy_angles2vector(angles):
    """
    Numpy version of angles2vector. Convert 2D angle (yaw and pitch) to 3D unit vector
    :param angles: list of 2D angles
    :return: computed 3D vectors
    """
    x = (-1.0)*np.sin(angles[:, 0]) * np.cos(angles[:, 1])
    y = (-1.0)*np.sin(angles[:, 1])
    z = (-1.0)*np.cos(angles[:, 0]) * np.cos(angles[:, 1])
    vec = np.transpose(np.concatenate([[x], [y], [z]], axis=0))
    return vec 
Example #20
Source File: data_utils.py    From RecurrentGaze with MIT License 5 votes vote down vote up
def angles2vector(angles):
    """
    Convert 2D angle (yaw and pitch) to 3D unit vector
    :param angles: list of 2D angles
    :return: computed 3D vectors
    """
    x = (-1.0) * K.sin(angles[:, 0]) * K.cos(angles[:, 1])
    y = (-1.0) * K.sin(angles[:, 1])
    z = (-1.0) * K.cos(angles[:, 0]) * K.cos(angles[:, 1])
    vec = K.transpose(K.concatenate([[x], [y], [z]], axis=0))
    return vec 
Example #21
Source File: transform_rnn.py    From View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition with MIT License 5 votes vote down vote up
def _rotation_z(theta):
    r1 = K.cos(theta[:,2:3])
    r2 = K.sin(theta[:,2:3])
    zero = K.zeros_like(r1)
    one = K.ones_like(r1)
    first = K.reshape(K.concatenate([r1,-r2,zero,zero],axis=1),(-1,1,4))
    second = K.reshape(K.concatenate([r2,r1,zero,zero],axis=1),(-1,1,4))
    third = K.reshape(K.concatenate([zero,zero,one,zero],axis=1),(-1,1,4))
    fourth = K.reshape(K.concatenate([zero,zero,zero,one],axis=1),(-1,1,4))
    rotation_z = K.concatenate([first,second,third,fourth],axis=1)
    rotation_z = T.reshape(rotation_z,[-1,4,4])
    return rotation_z 
Example #22
Source File: transform_rnn.py    From View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition with MIT License 5 votes vote down vote up
def _rotation_x(theta):
    r1 = K.cos(theta[:,1:2])
    r2 = K.sin(theta[:,1:2])
    zero = K.zeros_like(r1)
    one = K.ones_like(r1)
    first = K.reshape(K.concatenate([one,zero,zero,zero],axis=1),(-1,1,4))
    second = K.reshape(K.concatenate([zero,r1,-r2,zero],axis=1),(-1,1,4))
    third = K.reshape(K.concatenate([zero,r2,r1,zero],axis=1),(-1,1,4))
    fourth = K.reshape(K.concatenate([zero,zero,zero,one],axis=1),(-1,1,4))
    rotation_x = K.concatenate([first,second,third,fourth],axis=1)
    rotation_x = T.reshape(rotation_x,[-1,4,4])
    return rotation_x 
Example #23
Source File: cornell_grasp_dataset_reader.py    From costar_plan with Apache License 2.0 4 votes vote down vote up
def crop_to_gripper_transform(
        input_image_shape, grasp_center_coordinate, grasp_center_rotation_theta, cropped_image_shape,
        random_translation_max_pixels=None, random_rotation=None, seed=None):
    """ Transform and rotate image to be centered and aligned with proposed grasp.

        Given a gripper center coodinate and rotation angle,
        transform and rotate the image so it is centered with 0 theta.
    """
    # TODO(ahundt) projective grasp coordinate transform with xy sin cos rot and multiply also see transform_crop_and_resize_image.
    transforms = []
    input_image_shape_float = tf.cast(input_image_shape, tf.float32)
    cropped_image_shape = tf.cast(cropped_image_shape, tf.float32)
    half_image_shape = (cropped_image_shape / 2)[:2]

    crop_offset = - grasp_center_coordinate + half_image_shape

    crop_offset = crop_offset[::-1]
    if random_translation_max_pixels is not None:
        crop_offset = crop_offset + rcp.random_translation_offset(random_translation_max_pixels)[:2]
    # reverse yx to xy
    transforms += [tf.contrib.image.translations_to_projective_transforms(crop_offset)]
    input_height_f = cropped_image_shape[0]
    input_width_f = cropped_image_shape[1]

    if random_rotation is not None and random_rotation is not False:
        if isinstance(random_rotation, bool) and random_rotation:
            random_rotation = tf.convert_to_tensor(
                [-math.pi, math.pi], dtype=tf.float32, name='random_theta')
        if isinstance(random_rotation, float):
            random_rotation = tf.convert_to_tensor(
                [-random_rotation, random_rotation], dtype=tf.float32, name='random_theta')
        print('random rotation: ' + str(random_rotation))
        theta = tf.random_uniform([1], minval=random_rotation[0], maxval=random_rotation[1], seed=seed, dtype=tf.float32)
        grasp_center_rotation_theta += theta
    transforms += [tf.contrib.image.angles_to_projective_transforms(
                         grasp_center_rotation_theta, input_height_f, input_width_f)]
    transform = tf.contrib.image.compose_transforms(*transforms)
    # TODO(ahundt) rename features random_* to a more general name, and make the same change in random_crop.py
    features = {
        # TODO(ahundt) should these be positive or negative?
        'random_rotation': -grasp_center_rotation_theta,
        'random_translation_offset': crop_offset,
        'random_projection_transform': transform
    }

    return transform, features