Python tensorflow.complex() Examples

The following are 30 code examples of tensorflow.complex(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: layers.py    From neuron with GNU General Public License v3.0 6 votes vote down vote up
def call(self, inputx):
        
        if not inputx.dtype in [tf.complex64, tf.complex128]:
            print('Warning: inputx is not complex. Converting.', file=sys.stderr)
        
            # if inputx is float, this will assume 0 imag channel
            inputx = tf.cast(inputx, tf.complex64)

        # get the right fft
        if self.ndims == 1:
            fft = tf.fft
        elif self.ndims == 2:
            fft = tf.fft2d
        else:
            fft = tf.fft3d

        perm_dims = [0, self.ndims + 1] + list(range(1, self.ndims + 1))
        invert_perm_ndims = [0] + list(range(2, self.ndims + 2)) + [1]
        
        perm_inputx = K.permute_dimensions(inputx, perm_dims)  # [batch_size, nb_features, *vol_size]
        fft_inputx = fft(perm_inputx)
        return K.permute_dimensions(fft_inputx, invert_perm_ndims) 
Example #2
Source File: layers.py    From neuron with GNU General Public License v3.0 6 votes vote down vote up
def call(self, inputx):
        
        if not inputx.dtype in [tf.complex64, tf.complex128]:
            print('Warning: inputx is not complex. Converting.', file=sys.stderr)
        
            # if inputx is float, this will assume 0 imag channel
            inputx = tf.cast(inputx, tf.complex64)
        
        # get the right fft
        if self.ndims == 1:
            ifft = tf.ifft
        elif self.ndims == 2:
            ifft = tf.ifft2d
        else:
            ifft = tf.ifft3d

        perm_dims = [0, self.ndims + 1] + list(range(1, self.ndims + 1))
        invert_perm_ndims = [0] + list(range(2, self.ndims + 2)) + [1]
        
        perm_inputx = K.permute_dimensions(inputx, perm_dims)  # [batch_size, nb_features, *vol_size]
        ifft_inputx = ifft(perm_inputx)
        return K.permute_dimensions(ifft_inputx, invert_perm_ndims) 
Example #3
Source File: train_specgan.py    From wavegan with MIT License 6 votes vote down vote up
def invert_spectra_griffin_lim(X_mag, nfft, nhop, ngl):
    X = tf.complex(X_mag, tf.zeros_like(X_mag))

    def b(i, X_best):
        x = tf.contrib.signal.inverse_stft(X_best, nfft, nhop)
        X_est = tf.contrib.signal.stft(x, nfft, nhop)
        phase = X_est / tf.cast(tf.maximum(1e-8, tf.abs(X_est)), tf.complex64)
        X_best = X * phase
        return i + 1, X_best

    i = tf.constant(0)
    c = lambda i, _: tf.less(i, ngl)
    _, X = tf.while_loop(c, b, [i, X], back_prop=False)

    x = tf.contrib.signal.inverse_stft(X, nfft, nhop)
    x = x[:, :_SLICE_LEN]

    return x 
Example #4
Source File: cwise_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _testBCastByFunc(self, funcs, xs, ys):
    dtypes = [
        np.float16,
        np.float32,
        np.float64,
        np.int32,
        np.int64,
        np.complex64,
        np.complex128,
    ]
    for dtype in dtypes:
      for (np_func, tf_func) in funcs:
        if (dtype in (np.complex64, np.complex128) and
              tf_func in (_FLOORDIV, tf.floordiv)):
          continue  # floordiv makes no sense for complex numbers
        self._compareBCast(xs, ys, dtype, np_func, tf_func)
        self._compareBCast(ys, xs, dtype, np_func, tf_func) 
Example #5
Source File: cwise_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _compareGradient(self, x):
    # x[:, 0] is real, x[:, 1] is imag.  We combine real and imag into
    # complex numbers. Then, we extract real and imag parts and
    # computes the squared sum. This is obviously the same as sum(real
    # * real) + sum(imag * imag). We just want to make sure the
    # gradient function is checked.
    with self.test_session():
      inx = tf.convert_to_tensor(x)
      real, imag = tf.split(1, 2, inx)
      real, imag = tf.reshape(real, [-1]), tf.reshape(imag, [-1])
      cplx = tf.complex(real, imag)
      cplx = tf.conj(cplx)
      loss = tf.reduce_sum(
          tf.square(tf.real(cplx))) + tf.reduce_sum(
              tf.square(tf.imag(cplx)))
      epsilon = 1e-3
      jacob_t, jacob_n = tf.test.compute_gradient(inx,
                                                  list(x.shape),
                                                  loss,
                                                  [1],
                                                  x_init_value=x,
                                                  delta=epsilon)
    self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon) 
Example #6
Source File: fft_ops_test.py    From deep_image_model with Apache License 2.0 6 votes vote down vote up
def _checkGrad(self, func, x, y, use_gpu=False):
    with self.test_session(use_gpu=use_gpu):
      inx = tf.convert_to_tensor(x)
      iny = tf.convert_to_tensor(y)
      # func is a forward or inverse FFT function (batched or unbatched)
      z = func(tf.complex(inx, iny))
      # loss = sum(|z|^2)
      loss = tf.reduce_sum(tf.real(z * tf.conj(z)))
      ((x_jacob_t, x_jacob_n),
       (y_jacob_t, y_jacob_n)) = tf.test.compute_gradient(
           [inx, iny],
           [list(x.shape), list(y.shape)],
           loss,
           [1],
           x_init_value=[x, y],
           delta=1e-2)
    self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=1e-2)
    self.assertAllClose(y_jacob_t, y_jacob_n, rtol=1e-2, atol=1e-2) 
Example #7
Source File: tfmri_test.py    From dl-cs with MIT License 6 votes vote down vote up
def test_fftc(self):
        shape = [10, 10, 2]
        data = tf.complex(
            tf.random_uniform(shape), tf.random_uniform(shape))
        fdata = tfmri.fftc(data)
        fdata_np = self._fftnc(data, axes=(-2,))
        diff = np.mean(np.abs(fdata_np - fdata) ** 2)
        self.assertTrue(diff < eps)

        fdata = tfmri.fftc(data, data_format='channels_first')
        fdata_np = self._fftnc(data, axes=(-1,))
        diff = np.mean(np.abs(fdata_np - fdata) ** 2)
        self.assertTrue(diff < eps)

        fdata = tfmri.fftc(data, orthonorm=False)
        fdata_np = self._fftnc(data, axes=(-2,), norm=None)
        diff = np.mean(np.abs(fdata_np - fdata) ** 2)
        self.assertTrue(diff < eps) 
Example #8
Source File: tfmri_test.py    From dl-cs with MIT License 6 votes vote down vote up
def test_ifftc(self):
        shape = [10, 10, 2]
        data = tf.complex(
            tf.random_uniform(shape), tf.random_uniform(shape))
        fdata_np = self._fftnc(data, axes=(-2,), transpose=True)
        fdata = tfmri.ifftc(data)
        diff = np.mean(np.abs(fdata_np - fdata) ** 2)
        self.assertTrue(diff < eps)
        fdata = tfmri.fftc(data, transpose=True)
        diff = np.mean(np.abs(fdata_np - fdata) ** 2)
        self.assertTrue(diff < eps)

        fdata = tfmri.ifftc(data, data_format='channels_first')
        fdata_np = self._fftnc(data, axes=(-1,), transpose=True)
        diff = np.mean(np.abs(fdata_np - fdata) ** 2)
        self.assertTrue(diff < eps)

        fdata = tfmri.ifftc(data, orthonorm=False)
        fdata_np = self._fftnc(data, axes=(-2,), norm=None, transpose=True)
        diff = np.mean(np.abs(fdata_np - fdata) ** 2)
        self.assertTrue(diff < eps) 
Example #9
Source File: tfmri_test.py    From dl-cs with MIT License 6 votes vote down vote up
def test_fft2c(self):
        shape = [10, 10, 2]
        data = tf.complex(
            tf.random_uniform(shape), tf.random_uniform(shape))
        fdata = tfmri.fft2c(data)
        fdata_np = self._fftnc(data, axes=(-3, -2))
        diff = np.mean(np.abs(fdata_np - fdata) ** 2)
        self.assertTrue(diff < eps)

        fdata = tfmri.fft2c(data, data_format='channels_first')
        fdata_np = self._fftnc(data, axes=(-2, -1))
        diff = np.mean(np.abs(fdata_np - fdata) ** 2)
        self.assertTrue(diff < eps)

        fdata = tfmri.fft2c(data, orthonorm=False)
        fdata_np = self._fftnc(data, axes=(-3, -2), norm=None)
        diff = np.mean(np.abs(fdata_np - fdata) ** 2)
        self.assertTrue(diff < eps) 
Example #10
Source File: tfmri_test.py    From dl-cs with MIT License 6 votes vote down vote up
def test_ifft2c(self):
        shape = [10, 10, 2]
        data = tf.complex(
            tf.random_uniform(shape), tf.random_uniform(shape))
        fdata_np = self._fftnc(data, axes=(-3, -2), transpose=True)
        fdata = tfmri.ifft2c(data)
        diff = np.mean(np.abs(fdata_np - fdata) ** 2)
        self.assertTrue(diff < eps)
        fdata = tfmri.fft2c(data, transpose=True)
        diff = np.mean(np.abs(fdata_np - fdata) ** 2)
        self.assertTrue(diff < eps)

        fdata = tfmri.ifft2c(data, data_format='channels_first')
        fdata_np = self._fftnc(data, axes=(-2, -1), transpose=True)
        diff = np.mean(np.abs(fdata_np - fdata) ** 2)
        self.assertTrue(diff < eps)

        fdata = tfmri.ifft2c(data, orthonorm=False)
        fdata_np = self._fftnc(data, axes=(-3, -2), norm=None, transpose=True)
        diff = np.mean(np.abs(fdata_np - fdata) ** 2)
        self.assertTrue(diff < eps) 
Example #11
Source File: tfmri.py    From dl-cs with MIT License 6 votes vote down vote up
def channels_to_complex(image,
                        data_format='channels_last',
                        name='channels2complex'):
    """Convert data from channels to complex."""
    if len(image.shape) != 3 and len(image.shape) != 4:
        raise TypeError('Input data must be have 3 or 4 dimensions')

    axis_c = -1 if data_format == 'channels_last' else -3
    shape_c = image.shape[axis_c].value

    if shape_c and (shape_c % 2 != 0):
        raise TypeError(
            'Number of channels (%d) must be divisible by 2' % shape_c)
    if image.dtype is tf.complex64 or image.dtype is tf.complex128:
        raise TypeError('Input data cannot be complex')

    with tf.name_scope(name):
        image_real, image_imag = tf.split(image, 2, axis=axis_c)
        image_out = tf.complex(image_real, image_imag)
    return image_out 
Example #12
Source File: tf_image.py    From burst-denoising with Apache License 2.0 6 votes vote down vote up
def hdrplus_merge(imgs, N, c, sig):
    ccast_tf = lambda x : tf.complex(x, tf.zeros_like(x))

    # imgs is [batch, h, w, ch]
    rcw = tf.expand_dims(rcwindow(N), axis=-1)
    imgs = imgs * rcw
    imgs = tf.transpose(imgs, [0, 3, 1, 2])
    imgs_f = tf.fft2d(ccast_tf(imgs))
    imgs_f = tf.transpose(imgs_f, [0, 2, 3, 1])
    Dz2 = tf.square(tf.abs(imgs_f[...,0:1] - imgs_f))
    Az = Dz2 / (Dz2 + c*sig**2)
    filt0 = 1 + tf.expand_dims(tf.reduce_sum(Az[...,1:], axis=-1), axis=-1)
    filts = tf.concat([filt0, 1 - Az[...,1:]], axis=-1)
    output_f = tf.reduce_mean(imgs_f * ccast_tf(filts), axis=-1)
    output_f = tf.real(tf.ifft2d(output_f))

    return output_f 
Example #13
Source File: tensorflow_backend.py    From kymatio with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def complex_modulus(x):
    """Computes complex modulus.

        Parameters
        ----------
        x : tensor
            Input tensor whose complex modulus is to be calculated.

        Returns
        -------
        modulus : tensor
            Tensor the same size as input_array. modulus holds the
            result of the complex modulus.

    """
    modulus = tf.abs(x)
    return modulus 
Example #14
Source File: scattering.py    From DeepLearningImplementations with MIT License 6 votes vote down vote up
def compute_fft(x, direction="C2C", inverse=False):

    if direction == 'C2R':
        inverse = True

    x_shape = x.get_shape().as_list()
    h, w = x_shape[-2], x_shape[-3]

    x_complex = tf.complex(x[..., 0], x[..., 1])

    if direction == 'C2R':
        out = tf.real(tf.ifft2d(x_complex)) * h * w
        return out

    else:
        if inverse:
            out = stack_real_imag(tf.ifft2d(x_complex)) * h * w
        else:
            out = stack_real_imag(tf.fft2d(x_complex))
        return out 
Example #15
Source File: models.py    From DeepLearningImplementations with MIT License 6 votes vote down vote up
def compute_fft(x, direction="C2C", inverse=False):

    if direction == 'C2R':
        inverse = True

    x_shape = x.get_shape().as_list()
    h, w = x_shape[-2], x_shape[-3]

    x_complex = tf.complex(x[..., 0], x[..., 1])

    if direction == 'C2R':
        out = tf.real(tf.ifft2d(x_complex)) * h * w
        return out

    else:
        if inverse:
            out = stack_real_imag(tf.ifft2d(x_complex)) * h * w
        else:
            out = stack_real_imag(tf.fft2d(x_complex))
        return out 
Example #16
Source File: geom_ops.py    From rgn with MIT License 5 votes vote down vote up
def reduce_mean_angle(weights, angles, use_complex=False, name=None):
    """ Computes the weighted mean of angles. Accepts option to compute use complex exponentials or real numbers.

        Complex number-based version is giving wrong gradients for some reason, but forward calculation is fine.

        See https://en.wikipedia.org/wiki/Mean_of_circular_quantities

    Args:
        weights: [BATCH_SIZE, NUM_ANGLES]
        angles:  [NUM_ANGLES, NUM_DIHEDRALS]

    Returns:
                 [BATCH_SIZE, NUM_DIHEDRALS]

    """

    with tf.name_scope(name, 'reduce_mean_angle', [weights, angles]) as scope:
        weights = tf.convert_to_tensor(weights, name='weights')
        angles  = tf.convert_to_tensor(angles,  name='angles')

        if use_complex:
            # use complexed-valued exponentials for calculation
            cwts =        tf.complex(weights, 0.) # cast to complex numbers
            exps = tf.exp(tf.complex(0., angles)) # convert to point on complex plane

            unit_coords = tf.matmul(cwts, exps) # take the weighted mixture of the unit circle coordinates

            return tf.angle(unit_coords, name=scope) # return angle of averaged coordinate

        else:
            # use real-numbered pairs of values
            sins = tf.sin(angles)
            coss = tf.cos(angles)

            y_coords = tf.matmul(weights, sins)
            x_coords = tf.matmul(weights, coss)

            return tf.atan2(y_coords, x_coords, name=scope) 
Example #17
Source File: ops.py    From tfdeploy with MIT License 5 votes vote down vote up
def random(self, *shapes, **kwargs):
        if all(isinstance(i, int) for i in shapes):
            if kwargs.get("complex", False):
                return (self.random(*shapes) + 1j * self.random(*shapes)).astype(np.complex64)
            else:
                return np.random.rand(*shapes)
        else:
            return tuple(self.random(*shape) for shape in shapes) 
Example #18
Source File: ops.py    From tfdeploy with MIT License 5 votes vote down vote up
def test_Svd(self):
        t = tf.svd(self.random(4, 5, 3, 2).astype("float32"))
        self.check(t, ndigits=4, abs=True)


    #
    # complex number ops
    # 
Example #19
Source File: ops.py    From tfdeploy with MIT License 5 votes vote down vote up
def test_Complex(self):
        t = tf.complex(*self.random((3, 4), (3, 4)))
        self.check(t) 
Example #20
Source File: ops.py    From tfdeploy with MIT License 5 votes vote down vote up
def test_Conj(self):
        t = tf.conj(self.random(3, 4, complex=True))
        self.check(t) 
Example #21
Source File: ops.py    From tfdeploy with MIT License 5 votes vote down vote up
def test_Imag(self):
        t = tf.imag(tf.Variable(self.random(3, 4, complex=True)))
        self.check(t) 
Example #22
Source File: ops.py    From tfdeploy with MIT License 5 votes vote down vote up
def test_FFT2D(self):
        # only defined for gpu
        if DEVICE == GPU:
            t = tf.fft2d(self.random(3, 4, complex=True))
            self.check(t) 
Example #23
Source File: ops.py    From tfdeploy with MIT License 5 votes vote down vote up
def test_IFFT2D(self):
        # only defined for gpu
        if DEVICE == GPU:
            t = tf.ifft2d(self.random(3, 4, complex=True))
            self.check(t) 
Example #24
Source File: ops.py    From tfdeploy with MIT License 5 votes vote down vote up
def test_FFT3D(self):
        # only defined for gpu
        if DEVICE == GPU:
            t = tf.fft3d(self.random(3, 4, 5, complex=True))
            self.check(t) 
Example #25
Source File: ops.py    From tfdeploy with MIT License 5 votes vote down vote up
def test_IFFT3D(self):
        # only defined for gpu
        if DEVICE == GPU:
            t = tf.ifft3d(self.random(3, 4, 5, complex=True))
            self.check(t)


    #
    # reduction
    # 
Example #26
Source File: layers.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def call(self, inputx):
        
        assert inputx.dtype in [tf.complex64, tf.complex128], 'inputx is not complex.'
        
        return tf.concat([tf.real(inputx), tf.imag(inputx)], -1) 
Example #27
Source File: layers.py    From neuron with GNU General Public License v3.0 5 votes vote down vote up
def call(self, inputx):
        nb_channels = inputx.shape[-1] // 2
        return tf.complex(inputx[...,:nb_channels], inputx[...,nb_channels:]) 
Example #28
Source File: sequential_batch_fft_ops.py    From RGB-N with MIT License 5 votes vote down vote up
def _SequentialBatchFFTGrad(op, grad):
    if (grad.dtype == tf.complex64):
        size = tf.cast(tf.shape(grad)[1], tf.float32)
        return (sequential_batch_ifft(grad, op.get_attr("compute_size"))
            * tf.complex(size, 0.))
    else:
        size = tf.cast(tf.shape(grad)[1], tf.float64)
        return (sequential_batch_ifft(grad, op.get_attr("compute_size"))
            * tf.complex(size, tf.zeros([], tf.float64))) 
Example #29
Source File: sequential_batch_fft_ops.py    From RGB-N with MIT License 5 votes vote down vote up
def _SequentialBatchIFFTGrad(op, grad):
    if (grad.dtype == tf.complex64):
        rsize = 1. / tf.cast(tf.shape(grad)[1], tf.float32)
        return (sequential_batch_fft(grad, op.get_attr("compute_size"))
            * tf.complex(rsize, 0.))
    else:
        rsize = 1. / tf.cast(tf.shape(grad)[1], tf.float64)
        return (sequential_batch_fft(grad, op.get_attr("compute_size"))
            * tf.complex(rsize, tf.zeros([], tf.float64))) 
Example #30
Source File: BP_Decoder.py    From Iterative-BP-CNN with GNU General Public License v3.0 5 votes vote down vote up
def one_bp_iteration(self, xe_v2c_pre_iter, H_sumC_to_V, H_sumV_to_C, xe_0):
        xe_tanh = tf.tanh(tf.to_double(tf.truediv(xe_v2c_pre_iter, [2.0])))
        xe_tanh = tf.to_float(xe_tanh)
        xe_tanh_temp = tf.sign(xe_tanh)
        xe_sum_log_img = tf.matmul(H_sumC_to_V, tf.multiply(tf.truediv((1 - xe_tanh_temp), [2.0]), [3.1415926]))
        xe_sum_log_real = tf.matmul(H_sumC_to_V, tf.log(1e-8 + tf.abs(xe_tanh)))
        xe_sum_log_complex = tf.complex(xe_sum_log_real, xe_sum_log_img)
        xe_product = tf.real(tf.exp(xe_sum_log_complex))
        xe_product_temp = tf.multiply(tf.sign(xe_product), -2e-7)
        xe_pd_modified = tf.add(xe_product, xe_product_temp)
        xe_v_sumc = tf.multiply(self.atanh(xe_pd_modified), [2.0])
        xe_c_sumv = tf.add(xe_0, tf.matmul(H_sumV_to_C, xe_v_sumc))
        return xe_v_sumc, xe_c_sumv