Python tensorflow.imag() Examples

The following are 28 code examples of tensorflow.imag(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: layers.py    From neuron with GNU General Public License v3.0 6 votes vote down vote up
def call(self, inputx):
        
        if not inputx.dtype in [tf.complex64, tf.complex128]:
            print('Warning: inputx is not complex. Converting.', file=sys.stderr)
        
            # if inputx is float, this will assume 0 imag channel
            inputx = tf.cast(inputx, tf.complex64)

        # get the right fft
        if self.ndims == 1:
            fft = tf.fft
        elif self.ndims == 2:
            fft = tf.fft2d
        else:
            fft = tf.fft3d

        perm_dims = [0, self.ndims + 1] + list(range(1, self.ndims + 1))
        invert_perm_ndims = [0] + list(range(2, self.ndims + 2)) + [1]
        
        perm_inputx = K.permute_dimensions(inputx, perm_dims)  # [batch_size, nb_features, *vol_size]
        fft_inputx = fft(perm_inputx)
        return K.permute_dimensions(fft_inputx, invert_perm_ndims) 
Example #2
Source File: layers.py    From neuron with GNU General Public License v3.0 6 votes vote down vote up
def call(self, inputx):
        
        if not inputx.dtype in [tf.complex64, tf.complex128]:
            print('Warning: inputx is not complex. Converting.', file=sys.stderr)
        
            # if inputx is float, this will assume 0 imag channel
            inputx = tf.cast(inputx, tf.complex64)
        
        # get the right fft
        if self.ndims == 1:
            ifft = tf.ifft
        elif self.ndims == 2:
            ifft = tf.ifft2d
        else:
            ifft = tf.ifft3d

        perm_dims = [0, self.ndims + 1] + list(range(1, self.ndims + 1))
        invert_perm_ndims = [0] + list(range(2, self.ndims + 2)) + [1]
        
        perm_inputx = K.permute_dimensions(inputx, perm_dims)  # [batch_size, nb_features, *vol_size]
        ifft_inputx = ifft(perm_inputx)
        return K.permute_dimensions(ifft_inputx, invert_perm_ndims) 
Example #3
Source File: cwise_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _compareGradient(self, x):
    # x[:, 0] is real, x[:, 1] is imag.  We combine real and imag into
    # complex numbers. Then, we extract real and imag parts and
    # computes the squared sum. This is obviously the same as sum(real
    # * real) + sum(imag * imag). We just want to make sure the
    # gradient function is checked.
    with self.test_session():
      inx = tf.convert_to_tensor(x)
      real, imag = tf.split(1, 2, inx)
      real, imag = tf.reshape(real, [-1]), tf.reshape(imag, [-1])
      cplx = tf.complex(real, imag)
      cplx = tf.conj(cplx)
      loss = tf.reduce_sum(
          tf.square(tf.real(cplx))) + tf.reduce_sum(
              tf.square(tf.imag(cplx)))
      epsilon = 1e-3
      jacob_t, jacob_n = tf.test.compute_gradient(inx,
                                                  list(x.shape),
                                                  loss,
                                                  [1],
                                                  x_init_value=x,
                                                  delta=epsilon)
    self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon) 
Example #4
Source File: yellowfin.py    From MobileNet with Apache License 2.0 6 votes vote down vote up
def get_mu_tensor(self):
    const_fact = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var
    coef = tf.Variable([-1.0, 3.0, 0.0, 1.0], dtype=tf.float32, name="cubic_solver_coef")
    coef = tf.scatter_update(coef, tf.constant(2), -(3 + const_fact) )        
    roots = tf.py_func(np.roots, [coef], Tout=tf.complex64, stateful=False)
    
    # filter out the correct root
    root_idx = tf.logical_and(tf.logical_and(tf.greater(tf.real(roots), tf.constant(0.0) ),
      tf.less(tf.real(roots), tf.constant(1.0) ) ), tf.less(tf.abs(tf.imag(roots) ), 1e-5) )
    # in case there are two duplicated roots satisfying the above condition
    root = tf.reshape(tf.gather(tf.gather(roots, tf.where(root_idx) ), tf.constant(0) ), shape=[] )
    tf.assert_equal(tf.size(root), tf.constant(1) )

    dr = self._h_max / self._h_min
    mu = tf.maximum(tf.real(root)**2, ( (tf.sqrt(dr) - 1)/(tf.sqrt(dr) + 1) )**2)    
    return mu 
Example #5
Source File: models.py    From DeepLearningImplementations with MIT License 5 votes vote down vote up
def stack_real_imag(x):

    stack_axis = len(x.get_shape().as_list())
    return tf.stack((tf.real(x), tf.imag(x)), axis=stack_axis) 
Example #6
Source File: ops.py    From tfdeploy with MIT License 5 votes vote down vote up
def test_Imag(self):
        t = tf.imag(tf.Variable(self.random(3, 4, complex=True)))
        self.check(t) 
Example #7
Source File: scattering.py    From DeepLearningImplementations with MIT License 5 votes vote down vote up
def stack_real_imag(x):

    stack_axis = len(x.get_shape().as_list())
    return tf.stack((tf.real(x), tf.imag(x)), axis=stack_axis) 
Example #8
Source File: tfutil.py    From multisensory with Apache License 2.0 5 votes vote down vote up
def angle(z):
  # from https://github.com/tensorflow/tensorflow/issues/483
  """
  Returns the elementwise arctan of z, choosing the quadrant correctly.

  Quadrant I: arctan(y/x)
  Qaudrant II: \pi + arctan(y/x) (phase of x<0, y=0 is \pi)
  Quadrant III: -\pi + arctan(y/x)
  Quadrant IV: arctan(y/x)

  Inputs:
      z: tf.complex64 or tf.complex128 tensor
  Retunrs:
      Angle of z
  """
  return tf.atan2(tf.imag(z), tf.real(z))
  # if z.dtype == tf.complex128:
  #     dtype = tf.float64
  # else:
  #     dtype = tf.float32
  # x = tf.real(z)
  # y = tf.imag(z)
  # xneg = tf.cast(x < 0.0, dtype)
  # yneg = tf.cast(y < 0.0, dtype)
  # ypos = tf.cast(y >= 0.0, dtype)

  # offset = xneg * (ypos - yneg) * np.pi

  # return tf.atan(y / x) + offset 
Example #9
Source File: model.py    From DeepMRI with GNU General Public License v3.0 5 votes vote down vote up
def dc(generated, X_k, mask):
    gene_complex = real2complex(generated)
    gene_complex = tf.transpose(gene_complex,[0, 3, 1, 2])
    mask = tf.transpose(mask,[0, 3, 1, 2])
    X_k = tf.transpose(X_k,[0, 3, 1, 2])
    gene_fft = tf.fft2d(gene_complex)
    out_fft = X_k + gene_fft * (1.0 - mask)
    output_complex = tf.ifft2d(out_fft)
    output_complex = tf.transpose(output_complex, [0, 2, 3, 1])
    output_real = tf.cast(tf.real(output_complex), dtype=tf.float32)
    output_imag = tf.cast(tf.imag(output_complex), dtype=tf.float32)
    output = tf.concat([output_real,output_imag], axis=-1)
    return output 
Example #10
Source File: model.py    From DeepMRI with GNU General Public License v3.0 5 votes vote down vote up
def complex2real(x):
    x_real = tf.real(x)
    x_imag = tf.imag(x)
    return tf.concat([x_real,x_imag], axis=-1) 
Example #11
Source File: datasets.py    From spherical-cnn with MIT License 5 votes vote down vote up
def from_cached_tfrecords(args):
    """ Use tf.Dataset, but feeding it using a placeholder w/ the whole dataset. """
    # this may seem a bit weird
    # we take tfrecords but load them into placeholders during training
    # we found that it loaded faster this way when this was first implemented
    # letting tf.Dataset loading all simulataneously is conceptually better

    res, nch = args.input_res, args.nchannels

    x = tf.placeholder(args.dtype, (None, res, res, nch))
    y = tf.placeholder('int64', (None))

    dataset = tf.contrib.data.Dataset.from_tensor_slices((x, y))

    # inputs are complex numbers
    # magnitude is ray length
    # phase is angle between ray and normal
    # we found that it is best to treat them independently, though
    dataset = dataset.map(lambda x, y: (tf.concat([tf.abs(x),
                                                   tf.imag(x/(tf.cast(tf.abs(x), 'complex64') +1e-8))],
                                                  axis=-1), y))

    # we use same batch sizes for train/val/test
    dataset = dataset.batch(args.train_bsize)
    iterator = dataset.make_initializable_iterator()

    fnames = {}
    for t in ['train', 'test', 'val']:
        fnames[t] = glob.glob(args.dset_dir + '/{}*.tfrecord'.format(t))

    out = {'x': x, 'y': y, 'fnames': fnames}
    print('loading dataset; number of tfrecords: {}'
          .format({k: len(v) for k, v in out['fnames'].items()}))

    return iterator, out 
Example #12
Source File: test_tf_wpe.py    From nara_wpe with MIT License 5 votes vote down vote up
def test_recursive_wpe(self):
        with self.test_session() as sess:
            T = 5000
            D = 2
            K = 1
            delay = 3
            Y = np.random.normal(size=(D, T)) \
                + 1j * np.random.normal(size=(D, T))
            Y = tf.convert_to_tensor(Y[None])
            power = tf.reduce_mean(tf.real(Y) ** 2 + tf.imag(Y) ** 2, axis=1)
            inv_power = tf.reciprocal(power)
            step_enhanced = tf_wpe.wpe_step(
                Y, inv_power, taps=K, delay=D)
            recursive_enhanced = tf_wpe.recursive_wpe(
                tf.transpose(Y, (2, 0, 1)),
                tf.transpose(power),
                1.,
                taps=K,
                delay=D,
                only_use_final_filters=True
            )
            recursive_enhanced = tf.transpose(recursive_enhanced, (1, 2, 0))
            recursive_enhanced, step_enhanced = sess.run(
                [recursive_enhanced, step_enhanced]
            )
        np.testing.assert_allclose(
            recursive_enhanced[..., -200:],
            step_enhanced[..., -200:],
            atol=0.01, rtol=0.2
        ) 
Example #13
Source File: tfmri.py    From dl-cs with MIT License 5 votes vote down vote up
def complex_to_channels(image,
                        data_format='channels_last',
                        name='complex2channels'):
    """Convert data from complex to channels."""
    if len(image.shape) != 3 and len(image.shape) != 4:
        raise TypeError('Input data must be have 3 or 4 dimensions')

    axis_c = -1 if data_format == 'channels_last' else -3

    if image.dtype is not tf.complex64 and image.dtype is not tf.complex128:
        raise TypeError('Input data must be complex')

    with tf.name_scope(name):
        image_out = tf.concat((tf.real(image), tf.imag(image)), axis_c)
    return image_out 
Example #14
Source File: tfmri_test.py    From dl-cs with MIT License 5 votes vote down vote up
def test_channels_to_complex(self):
        data = tf.random_uniform([2, 10, 10, 2])
        data_complex = tfmri.channels_to_complex(data)
        diff_r = np.real(data_complex) - data[..., 0:1]
        diff_i = np.imag(data_complex) - data[..., 1:]
        diff = np.mean(diff_r ** 2 + diff_i ** 2)
        self.assertTrue(diff < eps)

        data_complex = tfmri.channels_to_complex(
            data, data_format='channels_first')
        diff_r = np.real(data_complex) - data[:, 0:5, ...]
        diff_i = np.imag(data_complex) - data[:, 5:, ...]
        diff = np.mean(diff_r ** 2 + diff_i ** 2)
        self.assertTrue(diff < eps)

        data = tf.random_uniform([10, 10, 2])
        data_complex = tfmri.channels_to_complex(
            data, data_format='channels_first')
        diff_r = np.real(data_complex) - data[0:5, ...]
        diff_i = np.imag(data_complex) - data[5:, ...]
        diff = np.mean(diff_r ** 2 + diff_i ** 2)
        self.assertTrue(diff < eps)

        with self.assertRaises(TypeError):
            # Not enough dimensions
            tfmri.channels_to_complex(tf.random_uniform([10, 10]))
        with self.assertRaises(TypeError):
            # Too many dimensions
            tfmri.channels_to_complex(tf.random_uniform([10, 10, 1, 1, 1]))
        with self.assertRaises(TypeError):
            tfmri.channels_to_complex(tf.random_uniform([10, 10, 1]))
        with self.assertRaises(TypeError):
            tfmri.channels_to_complex(
                tf.random_uniform([5, 10, 1]), data_format='channels_first')
        with self.assertRaises(TypeError):
            tfmri.channels_to_complex(
                tf.random_uniform([1, 5, 10, 1]), data_format='channels_first') 
Example #15
Source File: tfmri_test.py    From dl-cs with MIT License 5 votes vote down vote up
def test_complex_to_channels(self):
        data_r = tf.random_uniform([3, 10, 10, 2])
        data_i = tf.random_uniform([3, 10, 10, 2])
        data = tf.complex(data_r, data_i)
        data_out = tfmri.complex_to_channels(data)
        diff_r = data_r - tf.real(data)
        diff_i = data_i - tf.imag(data)
        diff = np.mean(diff_r ** 2 + diff_i ** 2)
        self.assertTrue(diff < eps)
        self.assertEqual(data_out.shape[-1], 4)

        data_out = tfmri.complex_to_channels(
            data, data_format='channels_first')
        diff_r = data_r - tf.real(data)
        diff_i = data_i - tf.imag(data)
        diff = np.mean(diff_r ** 2 + diff_i ** 2)
        self.assertTrue(diff < eps)
        self.assertEqual(data_out.shape[1], 20)

        with self.assertRaises(TypeError):
            # Input must be complex
            data_out = tfmri.complex_to_channels(data_r)
        with self.assertRaises(TypeError):
            # shape error
            data_r = tf.random_uniform([1, 3, 10, 10, 2])
            data_i = tf.random_uniform([1, 3, 10, 10, 2])
            data = tf.complex(data_r, data_i)
            data_out = tfmri.complex_to_channels(data)
        with self.assertRaises(TypeError):
            # shape error
            data_r = tf.random_uniform([10, 2])
            data_i = tf.random_uniform([10, 2])
            data = tf.complex(data_r, data_i)
            data_out = tfmri.complex_to_channels(data) 
Example #16
Source File: train_mri_vn.py    From mri-variationalnetwork with MIT License 5 votes vote down vote up
def mriForwardOp(self, u, coil_sens, sampling_mask):
        with tf.variable_scope('mriForwardOp'):
            # apply sensitivites
            coil_imgs = tf.expand_dims(u, axis=1) * coil_sens
            # centered Fourier transform
            Fu = tf.contrib.icg.fftc2d(coil_imgs)
            # apply sampling mask
            mask = tf.expand_dims(sampling_mask, axis=1)
            kspace = tf.complex(tf.real(Fu) * mask, tf.imag(Fu) * mask)
        return kspace 
Example #17
Source File: train_mri_vn.py    From mri-variationalnetwork with MIT License 5 votes vote down vote up
def mriAdjointOpWithOS(self, f, coil_sens, sampling_mask):
        with tf.variable_scope('mriAdjointOp'):
            # variables to remove frequency encoding oversampling
            pad_u = tf.cast(tf.multiply(tf.cast(tf.shape(sampling_mask)[1], tf.float32), 0.25) + 1, tf.int32)
            pad_l = tf.cast(tf.multiply(tf.cast(tf.shape(sampling_mask)[1], tf.float32), 0.25) - 1, tf.int32)
            # apply mask and perform inverse centered Fourier transform
            mask = tf.expand_dims(sampling_mask, axis=1)
            Finv = tf.contrib.icg.ifftc2d(tf.complex(tf.real(f) * mask, tf.imag(f) * mask))
            # multiply coil images with sensitivities and sum up over channels
            img = tf.reduce_sum(Finv * tf.conj(coil_sens), 1)[:, pad_u:-pad_l, :]
        return img 
Example #18
Source File: train_mri_vn.py    From mri-variationalnetwork with MIT License 5 votes vote down vote up
def mriForwardOpWithOS(self, u, coil_sens, sampling_mask):
        with tf.variable_scope('mriForwardOp'):
            # add frequency encoding oversampling
            pad_u = tf.cast(tf.multiply(tf.cast(tf.shape(sampling_mask)[1], tf.float32), 0.25) + 1, tf.int32)
            pad_l = tf.cast(tf.multiply(tf.cast(tf.shape(sampling_mask)[1], tf.float32), 0.25) - 1, tf.int32)
            u_pad = tf.pad(u, [[0, 0], [pad_u, pad_l], [0, 0]])
            u_pad = tf.expand_dims(u_pad, axis=1)
            # apply sensitivites
            coil_imgs = u_pad * coil_sens
            # centered Fourier transform
            Fu = tf.contrib.icg.fftc2d(coil_imgs)
            # apply sampling mask
            mask = tf.expand_dims(sampling_mask, axis=1)
            kspace = tf.complex(tf.real(Fu) * mask, tf.imag(Fu) * mask)
        return kspace 
Example #19
Source File: cwise_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _compareMulGradient(self, data):
    # data is a float matrix of shape [n, 4].  data[:, 0], data[:, 1],
    # data[:, 2], data[:, 3] are real parts of x, imaginary parts of
    # x, real parts of y and imaginary parts of y.
    with self.test_session():
      inp = tf.convert_to_tensor(data)
      xr, xi, yr, yi = tf.split(1, 4, inp)

      def vec(x):  # Reshape to a vector
        return tf.reshape(x, [-1])
      xr, xi, yr, yi = vec(xr), vec(xi), vec(yr), vec(yi)

      def cplx(r, i):  # Combine to a complex vector
        return tf.complex(r, i)
      x, y = cplx(xr, xi), cplx(yr, yi)
      # z is x times y in complex plane.
      z = x * y
      # Defines the loss function as the sum of all coefficients of z.
      loss = tf.reduce_sum(tf.real(z) + tf.imag(z))
      epsilon = 0.005
      jacob_t, jacob_n = tf.test.compute_gradient(inp,
                                                  list(data.shape),
                                                  loss,
                                                  [1],
                                                  x_init_value=data,
                                                  delta=epsilon)
    self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon) 
Example #20
Source File: cwise_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testConj128(self):
    real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)
    imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)
    cplx = real + 1j * imag
    self._compareConj(cplx, use_gpu=False)
    self._compareConj(cplx, use_gpu=True) 
Example #21
Source File: cwise_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testRealImag128(self):
    real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float64)
    imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float64)
    cplx = real + 1j * imag
    self._compareRealImag(cplx, use_gpu=False)
    self._compareRealImag(cplx, use_gpu=True) 
Example #22
Source File: cwise_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testRealImag64(self):
    real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
    imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
    cplx = real + 1j * imag
    self._compareRealImag(cplx, use_gpu=False)
    self._compareRealImag(cplx, use_gpu=True) 
Example #23
Source File: cwise_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _compareRealImag(self, cplx, use_gpu):
    np_real, np_imag = np.real(cplx), np.imag(cplx)
    with self.test_session(use_gpu=use_gpu) as sess:
      inx = tf.convert_to_tensor(cplx)
      tf_real = tf.real(inx)
      tf_imag = tf.imag(inx)
      tf_real_val, tf_imag_val = sess.run([tf_real, tf_imag])
    self.assertAllEqual(np_real, tf_real_val)
    self.assertAllEqual(np_imag, tf_imag_val)
    self.assertShapeEqual(np_real, tf_real)
    self.assertShapeEqual(np_imag, tf_imag) 
Example #24
Source File: cwise_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testMake(self):
    real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
    imag = (np.arange(-3, 3) / 5.).reshape([1, 3, 2]).astype(np.float32)
    for use_gpu in [False, True]:
      self._compareMake(real, imag, use_gpu)
      self._compareMake(real, 12.0, use_gpu)
      self._compareMake(23.0, imag, use_gpu) 
Example #25
Source File: cwise_ops_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def _compareMake(self, real, imag, use_gpu):
    np_ans = real + (1j) * imag
    with self.test_session(use_gpu=use_gpu):
      real = tf.convert_to_tensor(real)
      imag = tf.convert_to_tensor(imag)
      tf_ans = tf.complex(real, imag)
      out = tf_ans.eval()
    self.assertAllEqual(np_ans, out)
    self.assertShapeEqual(np_ans, tf_ans) 
Example #26
Source File: layers.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def call(self, inputx):
        
        assert inputx.dtype in [tf.complex64, tf.complex128], 'inputx is not complex.'
        
        return tf.concat([tf.real(inputx), tf.imag(inputx)], -1) 
Example #27
Source File: paramdefinitions.py    From mri-variationalnetwork with MIT License 4 votes vote down vote up
def add_convolution_params(params, const_params, config):
    def generate_random_numbers(config, zero_mean=True):
        init = np.random.randn(config['num_stages'],
                               config['filter_size'],
                               config['filter_size'],
                               config['features_in'],
                               config['features_out']).astype(np.float32) / \
               np.sqrt(config['filter_size'] ** 2 * config['features_in'])
        if zero_mean:
            init -= np.mean(init, axis=(1, 2, 3), keepdims=True)

        return init

    # define prox calculations
    if 'prox_zero_mean' in config and config['prox_zero_mean'] == False:
        prox_zero_mean = False
    else:
        prox_zero_mean = True

    if 'prox_norm' in config and config['prox_norm'] == False:
        prox_norm = False
    else:
        prox_norm = True

    print('kernel {}'.format(config['name']))
    print('  prox_zero_mean: ', prox_zero_mean)
    print('  prox_norm: ', prox_norm)

    # filter kernels
    k_0 = generate_random_numbers(config) + 1j * generate_random_numbers(config)
    k = tf.Variable(initial_value=k_0, dtype=tf.complex64, name=config['name'])

    prox_k = proxmaps.zero_mean_norm_ball(k, zero_mean=prox_zero_mean, normalize=prox_norm, axis=(1,2,3))

    params.add(k, prox=prox_k)

    # add kernels to summary
    def get_kernel_img(k):
        _, _, n_f_in, n_f_out = k.shape
        k_img = tf.concat([tf.concat([k[:, :, in_f, out_f] for in_f in range(n_f_in)], axis=0)
                           for out_f in range(n_f_out)], axis=1)
        k_img = tf.expand_dims(tf.expand_dims(k_img, -1), 0)
        return k_img

    with tf.variable_scope('kernel_%s_summary' % config['name']):
        for i in range(config['num_stages']):
            tf.summary.image('%s_%d_real' % (config['name'], i + 1), get_kernel_img(tf.real(k[i])), collections=['images'])
            tf.summary.image('%s_%d_imag' % (config['name'], i + 1), get_kernel_img(tf.imag(k[i])), collections=['images']) 
Example #28
Source File: proxmaps.py    From mri-variationalnetwork with MIT License 4 votes vote down vote up
def zero_mean_norm_ball(x, zero_mean=True, normalize=True, norm_bound=1.0, norm='l2', mask=None, axis=(0, ...)):
    """ project onto zero-mean and norm-one ball
    :param x: tf variable which should be projected
    :param zero_mean: boolean True for zero-mean. default=True
    :param normalize: boolean True for l_2-norm ball projection. default:True
    :param norm_bound: defines the size of the norm ball
    :param norm: type of the norm
    :param mask: binary mask to compute the mean and norm
    :param axis: defines the axis for the reduction (mean and norm)
    :return: projection ops
    """

    if mask is None:
        shape = []
        for i in range(len(x.shape)):
            if i in axis:
                shape.append(x.shape[i])
            else:
                shape.append(1)
        mask = tf.ones(shape, dtype=np.float32)

    with tf.variable_scope('prox_' + x.name.split(':')[0]):
        x_masked = tf.complex(tf.real(x) * mask, tf.imag(x) * mask)

        if zero_mean:
            x_mean_real = tf.reduce_sum(tf.real(x_masked), axis=axis, keepdims=True) / tf.reduce_sum(mask, axis=axis,
                                                                                                     keepdims=True)
            x_mean_imag = tf.reduce_sum(tf.imag(x_masked), axis=axis, keepdims=True) / tf.reduce_sum(mask, axis=axis,
                                                                                                     keepdims=True)
            x_mean = tf.complex(x_mean_real * mask, x_mean_imag * mask)
            x_zm = x_masked - x_mean
        else:
            x_zm = x_masked

        if normalize:
            if norm == 'l2':
                x_proj = tf.assign(x, x_zm / tf.complex(tf.maximum(tf.sqrt(tf.reduce_sum(tf.real(x_zm * tf.conj(x_zm)),
                                                                                         axis=axis, keepdims=True)) /
                                                                   norm_bound, 1), tf.zeros_like(x_zm, tf.float32)))
            else:
                raise ValueError("Norm '%s' not defined." % norm)
        elif zero_mean:
            x_proj = tf.assign(x, x_zm)
        else:
            x_proj = None

    return x_proj