Python tensorflow.reshape() Examples

The following are 30 code examples for showing how to use tensorflow.reshape(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module tensorflow , or try the search function .

Example 1
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: convolution.py    License: MIT License 6 votes vote down vote up
def _forward(self):
        inp = self.inp.out
        shape = inp.get_shape().as_list()
        _, h, w, c = shape
        s = self.lay.stride
        out = list()
        for i in range(int(h/s)):
            row_i = list()
            for j in range(int(w/s)):
                si, sj = s * i, s * j
                boxij = inp[:, si: si+s, sj: sj+s,:]
                flatij = tf.reshape(boxij, [-1,1,1,c*s*s])
                row_i += [flatij]
            out += [tf.concat(row_i, 2)]

        self.out = tf.concat(out, 1) 
Example 2
Project: neural-fingerprinting   Author: StephanZheng   File: util.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def train_lr_rfeinman(densities_pos, densities_neg, uncerts_pos, uncerts_neg):
    """
    TODO
    :param densities_pos:
    :param densities_neg:
    :param uncerts_pos:
    :param uncerts_neg:
    :return:
    """
    values_neg = np.concatenate(
        (densities_neg.reshape((1, -1)),
         uncerts_neg.reshape((1, -1))),
        axis=0).transpose([1, 0])
    values_pos = np.concatenate(
        (densities_pos.reshape((1, -1)),
         uncerts_pos.reshape((1, -1))),
        axis=0).transpose([1, 0])

    values = np.concatenate((values_neg, values_pos))
    labels = np.concatenate(
        (np.zeros_like(densities_neg), np.ones_like(densities_pos)))

    lr = LogisticRegressionCV(n_jobs=-1).fit(values, labels)

    return values, labels, lr 
Example 3
Project: neural-fingerprinting   Author: StephanZheng   File: madry_mnist_model.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def fprop(self, x):

        output = OrderedDict()
        # first convolutional layer
        h_conv1 = tf.nn.relu(self._conv2d(x, self.W_conv1) + self.b_conv1)
        h_pool1 = self._max_pool_2x2(h_conv1)

        # second convolutional layer
        h_conv2 = tf.nn.relu(
            self._conv2d(h_pool1, self.W_conv2) + self.b_conv2)
        h_pool2 = self._max_pool_2x2(h_conv2)

        # first fully connected layer

        h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
        h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, self.W_fc1) + self.b_fc1)

        # output layer
        logits = tf.matmul(h_fc1, self.W_fc2) + self.b_fc2

        output = deterministic_dict(locals())
        del output["self"]
        output[self.O_PROBS] = tf.nn.softmax(logits=logits)

        return output 
Example 4
Project: neural-fingerprinting   Author: StephanZheng   File: utils.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def preprocess_batch(images_batch, preproc_func=None):
    """
    Creates a preprocessing graph for a batch given a function that processes
    a single image.

    :param images_batch: A tensor for an image batch.
    :param preproc_func: (optional function) A function that takes in a
        tensor and returns a preprocessed input.
    """
    if preproc_func is None:
        return images_batch

    with tf.variable_scope('preprocess'):
        images_list = tf.split(images_batch, int(images_batch.shape[0]))
        result_list = []
        for img in images_list:
            reshaped_img = tf.reshape(img, img.shape[1:])
            processed_img = preproc_func(reshaped_img)
            result_list.append(tf.expand_dims(processed_img, axis=0))
        result_images = tf.concat(result_list, axis=0)
    return result_images 
Example 5
Project: spleeter   Author: deezer   File: tensor.py    License: MIT License 6 votes vote down vote up
def pad_and_reshape(instr_spec, frame_length, F):
    """
    :param instr_spec:
    :param frame_length:
    :param F:
    :returns:
    """
    spec_shape = tf.shape(instr_spec)
    extension_row = tf.zeros((spec_shape[0], spec_shape[1], 1, spec_shape[-1]))
    n_extra_row = (frame_length) // 2 + 1 - F
    extension = tf.tile(extension_row, [1, 1, n_extra_row, 1])
    extended_spec = tf.concat([instr_spec, extension], axis=2)
    old_shape = tf.shape(extended_spec)
    new_shape = tf.concat([
        [old_shape[0] * old_shape[1]],
        old_shape[2:]],
        axis=0)
    processed_instr_spec = tf.reshape(extended_spec, new_shape)
    return processed_instr_spec 
Example 6
Project: spleeter   Author: deezer   File: __init__.py    License: MIT License 6 votes vote down vote up
def _inverse_stft(self, stft_t, time_crop=None):
        """ Inverse and reshape the given STFT

        :param stft_t: input STFT
        :returns: inverse STFT (waveform)
        """
        inversed = inverse_stft(
            tf.transpose(stft_t, perm=[2, 0, 1]),
            self._frame_length,
            self._frame_step,
            window_fn=lambda frame_length, dtype: (
                hann_window(frame_length, periodic=True, dtype=dtype))
        ) * self.WINDOW_COMPENSATION_FACTOR
        reshaped = tf.transpose(inversed)
        if time_crop is None:
            time_crop = tf.shape(self._features['waveform'])[0]
        return reshaped[:time_crop, :] 
Example 7
Project: deep-learning-note   Author: wdxtub   File: 18_basic_tfrecord.py    License: MIT License 6 votes vote down vote up
def read_from_tfrecord(filenames):
    tfrecord_file_queue = tf.train.string_input_producer(filenames, name='queue')
    reader = tf.TFRecordReader()
    _, tfrecord_serialized = reader.read(tfrecord_file_queue)

    tfrecord_features = tf.parse_single_example(tfrecord_serialized, features={
        'label': tf.FixedLenFeature([],tf.int64),
        'shape': tf.FixedLenFeature([],tf.string),
        'image': tf.FixedLenFeature([],tf.string),
    }, name='features')

    image = tf.decode_raw(tfrecord_features['image'], tf.uint8)
    shape = tf.decode_raw(tfrecord_features['shape'], tf.int32)

    image = tf.reshape(image, shape)
    label = tfrecord_features['label']
    return label, shape, image 
Example 8
Project: ARU-Net   Author: TobiasGruening   File: layers.py    License: GNU General Public License v2.0 6 votes vote down vote up
def images_to_sequence(tensor):
  """Convert a batch of images into a batch of sequences.

  Args:
    tensor: a (num_images, height, width, depth) tensor

  Returns:
    (width, num_images*height, depth) sequence tensor
  """
  transposed = tf.transpose(tensor, [2, 0, 1, 3])

  shapeT = tf.shape(transposed)
  shapeL = transposed.get_shape().as_list()
  # Calculate the ouput size of the upsampled tensor
  n_shape = tf.stack([
      shapeT[0],
      shapeT[1]*shapeT[2],
      shapeL[3]
  ])
  reshaped = tf.reshape(transposed, n_shape)
  return reshaped 
Example 9
Project: ARU-Net   Author: TobiasGruening   File: layers.py    License: GNU General Public License v2.0 6 votes vote down vote up
def sequence_to_images(tensor, num_batches):
  """Convert a batch of sequences into a batch of images.

  Args:
    tensor: (num_steps, num_batchesRNN, depth) sequence tensor
    num_batches: the number of image batches

  Returns:
    (num_batches, height, width, depth) tensor
  """

  shapeT = tf.shape(tensor)
  shapeL = tensor.get_shape().as_list()
  # Calculate the ouput size of the upsampled tensor
  height = tf.to_int32(shapeT[1] / num_batches)
  n_shape = tf.stack([
      shapeT[0],
      num_batches,
      height,
      shapeL[2]
  ])

  reshaped = tf.reshape(tensor, n_shape)
  return tf.transpose(reshaped, [1, 2, 0, 3]) 
Example 10
Project: DOTA_models   Author: ringringyi   File: resnet_v2_test.py    License: Apache License 2.0 6 votes vote down vote up
def create_test_input(batch_size, height, width, channels):
  """Create test input tensor.

  Args:
    batch_size: The number of images per batch or `None` if unknown.
    height: The height of each image or `None` if unknown.
    width: The width of each image or `None` if unknown.
    channels: The number of channels per image or `None` if unknown.

  Returns:
    Either a placeholder `Tensor` of dimension
      [batch_size, height, width, channels] if any of the inputs are `None` or a
    constant `Tensor` with the mesh grid values along the spatial dimensions.
  """
  if None in [batch_size, height, width, channels]:
    return tf.placeholder(tf.float32, (batch_size, height, width, channels))
  else:
    return tf.to_float(
        np.tile(
            np.reshape(
                np.reshape(np.arange(height), [height, 1]) +
                np.reshape(np.arange(width), [1, width]),
                [1, height, width, 1]),
            [batch_size, 1, 1, channels])) 
Example 11
Project: DOTA_models   Author: ringringyi   File: real_nvp_utils.py    License: Apache License 2.0 6 votes vote down vote up
def depool_2x2(input_, stride=2):
    """Depooling."""
    shape = input_.get_shape().as_list()
    batch_size = shape[0]
    height = shape[1]
    width = shape[2]
    channels = shape[3]
    res = tf.reshape(input_, [batch_size, height, 1, width, 1, channels])
    res = tf.concat(
        axis=2, values=[res, tf.zeros([batch_size, height, stride - 1, width, 1, channels])])
    res = tf.concat(axis=4, values=[
        res, tf.zeros([batch_size, height, stride, width, stride - 1, channels])
    ])
    res = tf.reshape(res, [batch_size, stride * height, stride * width, channels])

    return res


# random flip on a batch of images 
Example 12
Project: DOTA_models   Author: ringringyi   File: real_nvp_utils.py    License: Apache License 2.0 6 votes vote down vote up
def batch_random_flip(input_):
    """Simultaneous horizontal random flip."""
    if isinstance(input_, (float, int)):
        return input_
    shape = input_.get_shape().as_list()
    batch_size = shape[0]
    height = shape[1]
    width = shape[2]
    channels = shape[3]
    res = tf.split(axis=0, num_or_size_splits=batch_size, value=input_)
    res = [elem[0, :, :, :] for elem in res]
    res = [tf.image.random_flip_left_right(elem) for elem in res]
    res = [tf.reshape(elem, [1, height, width, channels]) for elem in res]
    res = tf.concat(axis=0, values=res)

    return res


# build a one hot representation corresponding to the integer tensor
# the one-hot dimension is appended to the integer tensor shape 
Example 13
Project: DOTA_models   Author: ringringyi   File: sequence_layers.py    License: Apache License 2.0 6 votes vote down vote up
def get_image_feature(self, char_index):
    """Returns a subset of image features for a character.

    Args:
      char_index: an index of a character.

    Returns:
      A tensor with shape [batch_size, ?]. The output depth depends on the
      depth of input net.
    """
    batch_size, features_num, _ = [d.value for d in self._net.get_shape()]
    slice_len = int(features_num / self._params.seq_length)
    # In case when features_num != seq_length, we just pick a subset of image
    # features, this choice is arbitrary and there is no intuitive geometrical
    # interpretation. If features_num is not dividable by seq_length there will
    # be unused image features.
    net_slice = self._net[:, char_index:char_index + slice_len, :]
    feature = tf.reshape(net_slice, [batch_size, -1])
    logging.debug('Image feature: %s', feature)
    return feature 
Example 14
Project: Black-Box-Audio   Author: rtaori   File: tf_logits.py    License: MIT License 5 votes vote down vote up
def compute_mfcc(audio, **kwargs):
    """
    Compute the MFCC for a given audio waveform. This is
    identical to how DeepSpeech does it, but does it all in
    TensorFlow so that we can differentiate through it.
    """

    batch_size, size = audio.get_shape().as_list()
    audio = tf.cast(audio, tf.float32)

    # 1. Pre-emphasizer, a high-pass filter
    audio = tf.concat((audio[:, :1], audio[:, 1:] - 0.97*audio[:, :-1], np.zeros((batch_size,1000),dtype=np.float32)), 1)

    # 2. windowing into frames of 320 samples, overlapping
    windowed = tf.stack([audio[:, i:i+400] for i in range(0,size-320,160)],1)

    # 3. Take the FFT to convert to frequency space
    ffted = tf.spectral.rfft(windowed, [512])
    ffted = 1.0 / 512 * tf.square(tf.abs(ffted))

    # 4. Compute the Mel windowing of the FFT
    energy = tf.reduce_sum(ffted,axis=2)+1e-30
    filters = np.load("filterbanks.npy").T
    feat = tf.matmul(ffted, np.array([filters]*batch_size,dtype=np.float32))+1e-30

    # 5. Take the DCT again, because why not
    feat = tf.log(feat)
    feat = tf.spectral.dct(feat, type=2, norm='ortho')[:,:,:26]

    # 6. Amplify high frequencies for some reason
    _,nframes,ncoeff = feat.get_shape().as_list()
    n = np.arange(ncoeff)
    lift = 1 + (22/2.)*np.sin(np.pi*n/22)
    feat = lift*feat
    width = feat.get_shape().as_list()[1]

    # 7. And now stick the energy next to the features
    feat = tf.concat((tf.reshape(tf.log(energy),(-1,width,1)), feat[:, :, 1:]), axis=2)
    
    return feat 
Example 15
Project: Black-Box-Audio   Author: rtaori   File: tf_logits.py    License: MIT License 5 votes vote down vote up
def get_logits(new_input, length, first=[]):
    """
    Compute the logits for a given waveform.

    First, preprocess with the TF version of MFC above,
    and then call DeepSpeech on the features.
    """
    # new_input = tf.Print(new_input, [tf.shape(new_input)])

    # We need to init DeepSpeech the first time we're called
    if first == []:
        first.append(False)
        # Okay, so this is ugly again.
        # We just want it to not crash.
        tf.app.flags.FLAGS.alphabet_config_path = "DeepSpeech/data/alphabet.txt"
        DeepSpeech.initialize_globals()
        print('initialized deepspeech globals')

    batch_size = new_input.get_shape()[0]

    # 1. Compute the MFCCs for the input audio
    # (this is differentable with our implementation above)
    empty_context = np.zeros((batch_size, 9, 26), dtype=np.float32)
    new_input_to_mfcc = compute_mfcc(new_input)[:, ::2]
    features = tf.concat((empty_context, new_input_to_mfcc, empty_context), 1)

    # 2. We get to see 9 frames at a time to make our decision,
    # so concatenate them together.
    features = tf.reshape(features, [new_input.get_shape()[0], -1])
    features = tf.stack([features[:, i:i+19*26] for i in range(0,features.shape[1]-19*26+1,26)],1)
    features = tf.reshape(features, [batch_size, -1, 19*26])

    # 3. Whiten the data
    mean, var = tf.nn.moments(features, axes=[0,1,2])
    features = (features-mean)/(var**.5)

    # 4. Finally we process it with DeepSpeech
    logits = DeepSpeech.BiRNN(features, length, [0]*10)

    return logits 
Example 16
Project: BERT-Classification-Tutorial   Author: Socialbird-AILab   File: modeling.py    License: Apache License 2.0 5 votes vote down vote up
def create_attention_mask_from_input_mask(from_tensor, to_mask):
    """Create 3D attention mask from a 2D tensor mask.

    Args:
      from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
      to_mask: int32 Tensor of shape [batch_size, to_seq_length].

    Returns:
      float Tensor of shape [batch_size, from_seq_length, to_seq_length].
    """
    from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
    batch_size = from_shape[0]
    from_seq_length = from_shape[1]

    to_shape = get_shape_list(to_mask, expected_rank=2)
    to_seq_length = to_shape[1]

    to_mask = tf.cast(
        tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)

    # We don't assume that `from_tensor` is a mask (although it could be). We
    # don't actually care if we attend *from* padding tokens (only *to* padding)
    # tokens so we create a tensor of all ones.
    #
    # `broadcast_ones` = [batch_size, from_seq_length, 1]
    broadcast_ones = tf.ones(
        shape=[batch_size, from_seq_length, 1], dtype=tf.float32)

    # Here we broadcast along two dimensions to create the mask.
    mask = broadcast_ones * to_mask

    return mask 
Example 17
Project: BERT-Classification-Tutorial   Author: Socialbird-AILab   File: modeling.py    License: Apache License 2.0 5 votes vote down vote up
def reshape_to_matrix(input_tensor):
    """Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
    ndims = input_tensor.shape.ndims
    if ndims < 2:
        raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
                         (input_tensor.shape))
    if ndims == 2:
        return input_tensor

    width = input_tensor.shape[-1]
    output_tensor = tf.reshape(input_tensor, [-1, width])
    return output_tensor 
Example 18
Project: BERT-Classification-Tutorial   Author: Socialbird-AILab   File: modeling.py    License: Apache License 2.0 5 votes vote down vote up
def reshape_from_matrix(output_tensor, orig_shape_list):
    """Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
    if len(orig_shape_list) == 2:
        return output_tensor

    output_shape = get_shape_list(output_tensor)

    orig_dims = orig_shape_list[0:-1]
    width = output_shape[-1]

    return tf.reshape(output_tensor, orig_dims + [width]) 
Example 19
Project: deep-siamese-text-similarity   Author: dhwajraj   File: siamese_network_semantic.py    License: MIT License 5 votes vote down vote up
def __init__(
        self, sequence_length, vocab_size, embedding_size, hidden_units, l2_reg_lambda, batch_size, trainableEmbeddings):

        # Placeholders for input, output and dropout
        self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x1")
        self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x2")
        self.input_y = tf.placeholder(tf.float32, [None], name="input_y")
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

        # Keeping track of l2 regularization loss (optional)
        l2_loss = tf.constant(0.0, name="l2_loss")
          
        # Embedding layer
        with tf.name_scope("embedding"):
            self.W = tf.Variable(
                tf.constant(0.0, shape=[vocab_size, embedding_size]),
                trainable=trainableEmbeddings,name="W")
            self.embedded_words1 = tf.nn.embedding_lookup(self.W, self.input_x1)
            self.embedded_words2 = tf.nn.embedding_lookup(self.W, self.input_x2)
        print self.embedded_words1
        # Create a convolution + maxpool layer for each filter size
        with tf.name_scope("output"):
            self.out1=self.stackedRNN(self.embedded_words1, self.dropout_keep_prob, "side1", embedding_size, sequence_length, hidden_units)
            self.out2=self.stackedRNN(self.embedded_words2, self.dropout_keep_prob, "side2", embedding_size, sequence_length, hidden_units)
            self.distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.out1,self.out2)),1,keep_dims=True))
            self.distance = tf.div(self.distance, tf.add(tf.sqrt(tf.reduce_sum(tf.square(self.out1),1,keep_dims=True)),tf.sqrt(tf.reduce_sum(tf.square(self.out2),1,keep_dims=True))))
            self.distance = tf.reshape(self.distance, [-1], name="distance")
        with tf.name_scope("loss"):
            self.loss = self.contrastive_loss(self.input_y,self.distance, batch_size)
        #### Accuracy computation is outside of this class.
        with tf.name_scope("accuracy"):
            self.temp_sim = tf.subtract(tf.ones_like(self.distance),tf.rint(self.distance), name="temp_sim") #auto threshold 0.5
            correct_predictions = tf.equal(self.temp_sim, self.input_y)
            self.accuracy=tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy") 
Example 20
Project: deep-siamese-text-similarity   Author: dhwajraj   File: siamese_network.py    License: MIT License 5 votes vote down vote up
def __init__(
        self, sequence_length, vocab_size, embedding_size, hidden_units, l2_reg_lambda, batch_size):

        # Placeholders for input, output and dropout
        self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x1")
        self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x2")
        self.input_y = tf.placeholder(tf.float32, [None], name="input_y")
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

        # Keeping track of l2 regularization loss (optional)
        l2_loss = tf.constant(0.0, name="l2_loss")
          
        # Embedding layer
        with tf.name_scope("embedding"):
            self.W = tf.Variable(
                tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
                trainable=True,name="W")
            self.embedded_chars1 = tf.nn.embedding_lookup(self.W, self.input_x1)
            #self.embedded_chars_expanded1 = tf.expand_dims(self.embedded_chars1, -1)
            self.embedded_chars2 = tf.nn.embedding_lookup(self.W, self.input_x2)
            #self.embedded_chars_expanded2 = tf.expand_dims(self.embedded_chars2, -1)

        # Create a convolution + maxpool layer for each filter size
        with tf.name_scope("output"):
            self.out1=self.BiRNN(self.embedded_chars1, self.dropout_keep_prob, "side1", embedding_size, sequence_length, hidden_units)
            self.out2=self.BiRNN(self.embedded_chars2, self.dropout_keep_prob, "side2", embedding_size, sequence_length, hidden_units)
            self.distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.out1,self.out2)),1,keep_dims=True))
            self.distance = tf.div(self.distance, tf.add(tf.sqrt(tf.reduce_sum(tf.square(self.out1),1,keep_dims=True)),tf.sqrt(tf.reduce_sum(tf.square(self.out2),1,keep_dims=True))))
            self.distance = tf.reshape(self.distance, [-1], name="distance")
        with tf.name_scope("loss"):
            self.loss = self.contrastive_loss(self.input_y,self.distance, batch_size)
        #### Accuracy computation is outside of this class.
        with tf.name_scope("accuracy"):
            self.temp_sim = tf.subtract(tf.ones_like(self.distance),tf.rint(self.distance), name="temp_sim") #auto threshold 0.5
            correct_predictions = tf.equal(self.temp_sim, self.input_y)
            self.accuracy=tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy") 
Example 21
Project: icme2019   Author: ShenDezhou   File: core.py    License: MIT License 5 votes vote down vote up
def call(self, inputs, **kwargs):
        x = inputs
        if self.use_bias:
            x = tf.nn.bias_add(x, self.global_bias, data_format='NHWC')
        output = activation_fun(self.activation, x)
        output = tf.reshape(output, (-1, 1))

        return output 
Example 22
Project: cwavegan   Author: acheketa   File: tpu_model.py    License: MIT License 5 votes vote down vote up
def tf_repeat(output, idx, dim1, dim2, bias):
    # tensor equivalent of np.repeat
    # 1d to 3d array tensor
    if bias:
        idx = tf.tile(idx, [1, dim1 * dim2])
        idx = tf.reshape(idx, [-1, dim1, dim2])
        return output * idx
    else:
        return output 
Example 23
Project: cwavegan   Author: acheketa   File: wavegan.py    License: MIT License 5 votes vote down vote up
def tf_repeat(output, idx, dim1, dim2):
    # tensor equivalent of np.repeat
    # 1d to 3d array tensor
    idx = tf.tile(idx, [1, dim1 * dim2])
    idx = tf.reshape(idx, [-1, dim1, dim2])
    return output * idx 
Example 24
Project: disentangling_conditional_gans   Author: zalandoresearch   File: tfutil.py    License: MIT License 5 votes vote down vote up
def flatten(x):
    with tf.name_scope('Flatten'):
        return tf.reshape(x, [-1]) 
Example 25
Project: disentangling_conditional_gans   Author: zalandoresearch   File: loss.py    License: MIT License 5 votes vote down vote up
def tf_repeat(tensor, repeats):
    expanded_tensor = tf.expand_dims(tensor, -1)
    multiples = [1] + repeats
    tiled_tensor = tf.tile(expanded_tensor, multiples = multiples)
    repeated_tensor = tf.reshape(tiled_tensor, tf.shape(tensor) * repeats)
    return repeated_tensor

#----------------------------------------------------------------------------
# Generator loss function used in the paper (WGAN + AC-GAN). 
Example 26
Project: disentangling_conditional_gans   Author: zalandoresearch   File: networks.py    License: MIT License 5 votes vote down vote up
def dense(x, fmaps, gain=np.sqrt(2), use_wscale=False):
    if len(x.shape) > 2:
        x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])])
    w = get_weight([x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
    w = tf.cast(w, x.dtype)
    return tf.matmul(x, w)

#----------------------------------------------------------------------------
# Convolutional layer. 
Example 27
Project: disentangling_conditional_gans   Author: zalandoresearch   File: networks.py    License: MIT License 5 votes vote down vote up
def apply_bias(x):
    b = tf.get_variable('bias', shape=[x.shape[1]], initializer=tf.initializers.zeros())
    b = tf.cast(b, x.dtype)
    if len(x.shape) == 2:
        return x + b
    else:
        return x + tf.reshape(b, [1, -1, 1, 1])

#----------------------------------------------------------------------------
# Leaky ReLU activation. Same as tf.nn.leaky_relu, but supports FP16. 
Example 28
Project: disentangling_conditional_gans   Author: zalandoresearch   File: networks.py    License: MIT License 5 votes vote down vote up
def upscale2d(x, factor=2):
    assert isinstance(factor, int) and factor >= 1
    if factor == 1: return x
    with tf.variable_scope('Upscale2D'):
        s = x.shape
        x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
        x = tf.tile(x, [1, 1, 1, factor, 1, factor])
        x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
        return x

#----------------------------------------------------------------------------
# Fused upscale2d + conv2d.
# Faster and uses less memory than performing the operations separately. 
Example 29
Project: disentangling_conditional_gans   Author: zalandoresearch   File: train.py    License: MIT License 5 votes vote down vote up
def process_reals(x, lod, mirror_augment, drange_data, drange_net):
    with tf.name_scope('ProcessReals'):
        with tf.name_scope('DynamicRange'):
            x = tf.cast(x, tf.float32)
            x = misc.adjust_dynamic_range(x, drange_data, drange_net)
        if mirror_augment:
            with tf.name_scope('MirrorAugment'):
                s = tf.shape(x)
                mask = tf.random_uniform([s[0], 1, 1, 1], 0.0, 1.0)
                mask = tf.tile(mask, [1, s[1], s[2], s[3]])
                x = tf.where(mask < 0.5, x, tf.reverse(x, axis=[3]))
        with tf.name_scope('FadeLOD'): # Smooth crossfade between consecutive levels-of-detail.
            s = tf.shape(x)
            y = tf.reshape(x, [-1, s[1], s[2]//2, 2, s[3]//2, 2])
            y = tf.reduce_mean(y, axis=[3, 5], keep_dims=True)
            y = tf.tile(y, [1, 1, 1, 2, 1, 2])
            y = tf.reshape(y, [-1, s[1], s[2], s[3]])
            x = tfutil.lerp(x, y, lod - tf.floor(lod))
        with tf.name_scope('UpscaleLOD'): # Upscale to match the expected input/output size of the networks.
            s = tf.shape(x)
            factor = tf.cast(2 ** tf.floor(lod), tf.int32)
            x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
            x = tf.tile(x, [1, 1, 1, factor, 1, factor])
            x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
        return x

#----------------------------------------------------------------------------
# Just-in-time processing of masks before feeding them to the networks. 
Example 30
Project: disentangling_conditional_gans   Author: zalandoresearch   File: dataset.py    License: MIT License 5 votes vote down vote up
def parse_tfrecord_tf(record):
    features = tf.parse_single_example(record, features={
        'shape': tf.FixedLenFeature([3], tf.int64),
        'data': tf.FixedLenFeature([], tf.string)})
    data = tf.decode_raw(features['data'], tf.uint8)
    return tf.reshape(data, features['shape'])