Python tensorflow.int32() Examples
The following are 30
code examples of tensorflow.int32().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.

Example #1
Source File: dataset.py From disentangling_conditional_gans with MIT License | 6 votes |
def __init__(self, resolution=1024, num_channels=3, dtype='uint8', dynamic_range=[0,255], label_size=0, label_dtype='float32'): self.resolution = resolution self.resolution_log2 = int(np.log2(resolution)) self.shape = [num_channels, resolution, resolution] self.dtype = dtype self.dynamic_range = dynamic_range self.label_size = label_size self.label_dtype = label_dtype self._tf_minibatch_var = None self._tf_lod_var = None self._tf_minibatch_np = None self._tf_labels_np = None assert self.resolution == 2 ** self.resolution_log2 with tf.name_scope('Dataset'): self._tf_minibatch_var = tf.Variable(np.int32(0), name='minibatch_var') self._tf_lod_var = tf.Variable(np.int32(0), name='lod_var')
Example #2
Source File: face_attack.py From Adversarial-Face-Attack with GNU General Public License v3.0 | 6 votes |
def structure(self, input_tensor): """ Args: input_tensor: NHWC """ rnd = tf.random_uniform((), 135, 160, dtype=tf.int32) rescaled = tf.image.resize_images( input_tensor, [rnd, rnd], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) h_rem = 160 - rnd w_rem = 160 - rnd pad_left = tf.random_uniform((), 0, w_rem, dtype=tf.int32) pad_right = w_rem - pad_left pad_top = tf.random_uniform((), 0, h_rem, dtype=tf.int32) pad_bottom = h_rem - pad_top padded = tf.pad(rescaled, [[0, 0], [pad_top, pad_bottom], [ pad_left, pad_right], [0, 0]]) padded.set_shape((input_tensor.shape[0], 160, 160, 3)) output = tf.cond(tf.random_uniform(shape=[1])[0] < tf.constant(0.9), lambda: padded, lambda: input_tensor) return output
Example #3
Source File: spectrogram.py From spleeter with MIT License | 6 votes |
def time_stretch( spectrogram, factor=1.0, method=tf.image.ResizeMethod.BILINEAR): """ Time stretch a spectrogram preserving shape in tensorflow. Note that this is an approximation in the frequency domain. :param spectrogram: Input spectrogram to be time stretched as tensor. :param factor: (Optional) Time stretch factor, must be >0, default to 1. :param mehtod: (Optional) Interpolation method, default to BILINEAR. :returns: Time stretched spectrogram as tensor with same shape. """ T = tf.shape(spectrogram)[0] T_ts = tf.cast(tf.cast(T, tf.float32) * factor, tf.int32)[0] F = tf.shape(spectrogram)[1] ts_spec = tf.image.resize_images( spectrogram, [T_ts, F], method=method, align_corners=True) return tf.image.resize_image_with_crop_or_pad(ts_spec, T, F)
Example #4
Source File: spectrogram.py From spleeter with MIT License | 6 votes |
def pitch_shift( spectrogram, semitone_shift=0.0, method=tf.image.ResizeMethod.BILINEAR): """ Pitch shift a spectrogram preserving shape in tensorflow. Note that this is an approximation in the frequency domain. :param spectrogram: Input spectrogram to be pitch shifted as tensor. :param semitone_shift: (Optional) Pitch shift in semitone, default to 0.0. :param mehtod: (Optional) Interpolation method, default to BILINEAR. :returns: Pitch shifted spectrogram (same shape as spectrogram). """ factor = 2 ** (semitone_shift / 12.) T = tf.shape(spectrogram)[0] F = tf.shape(spectrogram)[1] F_ps = tf.cast(tf.cast(F, tf.float32) * factor, tf.int32)[0] ps_spec = tf.image.resize_images( spectrogram, [T, F_ps], method=method, align_corners=True) paddings = [[0, 0], [0, tf.maximum(0, F - F_ps)], [0, 0]] return tf.pad(ps_spec[:, :F, :], paddings, 'CONSTANT')
Example #5
Source File: 2_simple_mnist.py From deep-learning-note with MIT License | 6 votes |
def input_fn(partition, training, batch_size): """Generate an input_fn for the Estimator.""" def _input_fn(): if partition == "train": dataset = tf.data.Dataset.from_generator( generator(x_train, y_train), (tf.float32, tf.int32), ((28, 28), ())) else: dataset = tf.data.Dataset.from_generator( generator(x_test, y_test), (tf.float32, tf.int32), ((28, 28), ())) if training: dataset = dataset.shuffle(10 * batch_size, seed=RANDOM_SEED).repeat() dataset = dataset.map(preprocess_image).batch(batch_size) iterator = dataset.make_one_shot_iterator() features, labels = iterator.get_next() return features, labels return _input_fn
Example #6
Source File: 9_w2v_eager.py From deep-learning-note with MIT License | 6 votes |
def main(): dataset = tf.data.Dataset.from_generator(gen, (tf.int32, tf.int32), (tf.TensorShape([BATCH_SIZE]), tf.TensorShape([BATCH_SIZE, 1]))) optimizer = tf.compat.v1.train.GradientDescentOptimizer(LEARNING_RATE) model = Word2Vec(vocab_size=VOCAB_SIZE, embed_size=EMBED_SIZE) grad_fn = tfe.implicit_value_and_gradients(model.compute_loss) total_loss = 0.0 num_train_steps = 0 while num_train_steps < NUM_TRAIN_STEPS: for center_words, target_words in tfe.Iterator(dataset): if num_train_steps >= NUM_TRAIN_STEPS: break loss_batch, grads = grad_fn(center_words, target_words) total_loss += loss_batch optimizer.apply_gradients(grads) if (num_train_steps + 1) % SKIP_STEP == 0: print('Average loss at step {}: {:5.1f}'.format( num_train_steps, total_loss / SKIP_STEP )) total_loss = 0.0 num_train_steps += 1
Example #7
Source File: inception_preprocessing.py From DOTA_models with Apache License 2.0 | 6 votes |
def apply_with_random_selector(x, func, num_cases): """Computes func(x, sel), with sel sampled from [0...num_cases-1]. Args: x: input Tensor. func: Python function to apply. num_cases: Python int32, number of cases to sample sel from. Returns: The result of func(x, sel), where func receives the value of the selector as a python integer, but sel is sampled dynamically. """ sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32) # Pass the real x only to one of the func calls. return control_flow_ops.merge([ func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case) for case in range(num_cases)])[0]
Example #8
Source File: vgg_preprocessing.py From DOTA_models with Apache License 2.0 | 6 votes |
def _aspect_preserving_resize(image, smallest_side): """Resize images preserving the original aspect ratio. Args: image: A 3-D image `Tensor`. smallest_side: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: resized_image: A 3-D tensor containing the resized image. """ smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) shape = tf.shape(image) height = shape[0] width = shape[1] new_height, new_width = _smallest_size_at_least(height, width, smallest_side) image = tf.expand_dims(image, 0) resized_image = tf.image.resize_bilinear(image, [new_height, new_width], align_corners=False) resized_image = tf.squeeze(resized_image) resized_image.set_shape([None, None, 3]) return resized_image
Example #9
Source File: word2vec_optimized.py From DOTA_models with Apache License 2.0 | 6 votes |
def read_analogies(self): """Reads through the analogy question file. Returns: questions: a [n, 4] numpy array containing the analogy question's word ids. questions_skipped: questions skipped due to unknown words. """ questions = [] questions_skipped = 0 with open(self._options.eval_data, "rb") as analogy_f: for line in analogy_f: if line.startswith(b":"): # Skip comments. continue words = line.strip().lower().split(b" ") ids = [self._word2id.get(w.strip()) for w in words] if None in ids or len(ids) != 4: questions_skipped += 1 else: questions.append(np.array(ids)) print("Eval analogy file: ", self._options.eval_data) print("Questions: ", len(questions)) print("Skipped: ", questions_skipped) self._analogy_questions = np.array(questions, dtype=np.int32)
Example #10
Source File: word2vec.py From DOTA_models with Apache License 2.0 | 6 votes |
def read_analogies(self): """Reads through the analogy question file. Returns: questions: a [n, 4] numpy array containing the analogy question's word ids. questions_skipped: questions skipped due to unknown words. """ questions = [] questions_skipped = 0 with open(self._options.eval_data, "rb") as analogy_f: for line in analogy_f: if line.startswith(b":"): # Skip comments. continue words = line.strip().lower().split(b" ") ids = [self._word2id.get(w.strip()) for w in words] if None in ids or len(ids) != 4: questions_skipped += 1 else: questions.append(np.array(ids)) print("Eval analogy file: ", self._options.eval_data) print("Questions: ", len(questions)) print("Skipped: ", questions_skipped) self._analogy_questions = np.array(questions, dtype=np.int32)
Example #11
Source File: inception_preprocessing.py From DOTA_models with Apache License 2.0 | 6 votes |
def apply_with_random_selector(x, func, num_cases): """Computes func(x, sel), with sel sampled from [0...num_cases-1]. Args: x: input Tensor. func: Python function to apply. num_cases: Python int32, number of cases to sample sel from. Returns: The result of func(x, sel), where func receives the value of the selector as a python integer, but sel is sampled dynamically. """ sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32) # Pass the real x only to one of the func calls. return control_flow_ops.merge([ func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case) for case in range(num_cases) ])[0]
Example #12
Source File: memory.py From DOTA_models with Apache License 2.0 | 6 votes |
def get_hash_slots(self, query): """Gets hashed-to buckets for batch of queries. Args: query: 2-d Tensor of query vectors. Returns: A list of hashed-to buckets for each hash function. """ binary_hash = [ tf.less(tf.matmul(query, self.hash_vecs[i], transpose_b=True), 0) for i in xrange(self.num_libraries)] hash_slot_idxs = [ tf.reduce_sum( tf.to_int32(binary_hash[i]) * tf.constant([[2 ** i for i in xrange(self.num_hashes)]], dtype=tf.int32), 1) for i in xrange(self.num_libraries)] return hash_slot_idxs
Example #13
Source File: train_eval.py From DOTA_models with Apache License 2.0 | 6 votes |
def batch_of_random_bools(batch_size, n): """Return a batch of random "boolean" numbers. Args: batch_size: Batch size dimension of returned tensor. n: number of entries per batch. Returns: A [batch_size, n] tensor of "boolean" numbers, where each number is preresented as -1 or 1. """ as_int = tf.random_uniform( [batch_size, n], minval=0, maxval=2, dtype=tf.int32) expanded_range = (as_int * 2) - 1 return tf.cast(expanded_range, tf.float32)
Example #14
Source File: multiple_grid_anchor_generator_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def test_construct_anchor_grid_unnormalized(self): base_anchor_size = tf.constant([1, 1], dtype=tf.float32) box_specs_list = [[(1.0, 1.0)]] exp_anchor_corners = [[0., 0., 320., 320.], [0., 320., 320., 640.]] anchor_generator = ag.MultipleGridAnchorGenerator(box_specs_list, base_anchor_size) anchors = anchor_generator.generate( feature_map_shape_list=[(tf.constant(1, dtype=tf.int32), tf.constant( 2, dtype=tf.int32))], im_height=320, im_width=640) anchor_corners = anchors.get() with self.test_session(): anchor_corners_out = anchor_corners.eval() self.assertAllClose(anchor_corners_out, exp_anchor_corners)
Example #15
Source File: tf_example_decoder.py From DOTA_models with Apache License 2.0 | 6 votes |
def _reshape_instance_masks(self, keys_to_tensors): """Reshape instance segmentation masks. The instance segmentation masks are reshaped to [num_instances, height, width] and cast to boolean type to save memory. Args: keys_to_tensors: a dictionary from keys to tensors. Returns: A 3-D boolean tensor of shape [num_instances, height, width]. """ masks = keys_to_tensors['image/segmentation/object'] if isinstance(masks, tf.SparseTensor): masks = tf.sparse_tensor_to_dense(masks) height = keys_to_tensors['image/height'] width = keys_to_tensors['image/width'] to_shape = tf.cast(tf.stack([-1, height, width]), tf.int32) return tf.cast(tf.reshape(masks, to_shape), tf.bool)
Example #16
Source File: ops.py From DOTA_models with Apache License 2.0 | 6 votes |
def expanded_shape(orig_shape, start_dim, num_dims): """Inserts multiple ones into a shape vector. Inserts an all-1 vector of length num_dims at position start_dim into a shape. Can be combined with tf.reshape to generalize tf.expand_dims. Args: orig_shape: the shape into which the all-1 vector is added (int32 vector) start_dim: insertion position (int scalar) num_dims: length of the inserted all-1 vector (int scalar) Returns: An int32 vector of length tf.size(orig_shape) + num_dims. """ with tf.name_scope('ExpandedShape'): start_dim = tf.expand_dims(start_dim, 0) # scalar to rank-1 before = tf.slice(orig_shape, [0], start_dim) add_shape = tf.ones(tf.reshape(num_dims, [1]), dtype=tf.int32) after = tf.slice(orig_shape, start_dim, [-1]) new_shape = tf.concat([before, add_shape, after], 0) return new_shape
Example #17
Source File: shape_utils_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def test_pad_or_clip_tensor_using_integer_input(self): t1 = tf.constant([1], dtype=tf.int32) tt1 = shape_utils.pad_or_clip_tensor(t1, 2) t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32) tt2 = shape_utils.pad_or_clip_tensor(t2, 2) t3 = tf.constant([1, 2, 3], dtype=tf.int32) tt3 = shape_utils.clip_tensor(t3, 2) t4 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32) tt4 = shape_utils.clip_tensor(t4, 2) self.assertEqual(2, tt1.get_shape()[0]) self.assertEqual(2, tt2.get_shape()[0]) self.assertEqual(2, tt3.get_shape()[0]) self.assertEqual(2, tt4.get_shape()[0]) with self.test_session() as sess: tt1_result, tt2_result, tt3_result, tt4_result = sess.run( [tt1, tt2, tt3, tt4]) self.assertAllEqual([1, 0], tt1_result) self.assertAllClose([[0.1, 0.2], [0, 0]], tt2_result) self.assertAllEqual([1, 2], tt3_result) self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], tt4_result)
Example #18
Source File: shape_utils_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def test_pad_or_clip_tensor_using_tensor_input(self): t1 = tf.constant([1], dtype=tf.int32) tt1 = shape_utils.pad_or_clip_tensor(t1, tf.constant(2)) t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32) tt2 = shape_utils.pad_or_clip_tensor(t2, tf.constant(2)) t3 = tf.constant([1, 2, 3], dtype=tf.int32) tt3 = shape_utils.clip_tensor(t3, tf.constant(2)) t4 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32) tt4 = shape_utils.clip_tensor(t4, tf.constant(2)) with self.test_session() as sess: tt1_result, tt2_result, tt3_result, tt4_result = sess.run( [tt1, tt2, tt3, tt4]) self.assertAllEqual([1, 0], tt1_result) self.assertAllClose([[0.1, 0.2], [0, 0]], tt2_result) self.assertAllEqual([1, 2], tt3_result) self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], tt4_result)
Example #19
Source File: ops_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def test_normalized_to_image_coordinates(self): normalized_boxes = tf.placeholder(tf.float32, shape=(None, 1, 4)) normalized_boxes_np = np.array([[[0.0, 0.0, 1.0, 1.0]], [[0.5, 0.5, 1.0, 1.0]]]) image_shape = tf.convert_to_tensor([1, 4, 4, 3], dtype=tf.int32) absolute_boxes = ops.normalized_to_image_coordinates(normalized_boxes, image_shape, parallel_iterations=2) expected_boxes = np.array([[[0, 0, 4, 4]], [[2, 2, 4, 4]]]) with self.test_session() as sess: absolute_boxes = sess.run(absolute_boxes, feed_dict={normalized_boxes: normalized_boxes_np}) self.assertAllEqual(absolute_boxes, expected_boxes)
Example #20
Source File: ops_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def test_indices_to_dense_vector_size_at_inference(self): size = 5000 num_indices = 250 all_indices = np.arange(size) rand_indices = np.random.permutation(all_indices)[0:num_indices] expected_output = np.zeros(size, dtype=np.float32) expected_output[rand_indices] = 1. tf_all_indices = tf.placeholder(tf.int32) tf_rand_indices = tf.constant(rand_indices) indicator = ops.indices_to_dense_vector(tf_rand_indices, tf.shape(tf_all_indices)[0]) feed_dict = {tf_all_indices: all_indices} with self.test_session() as sess: output = sess.run(indicator, feed_dict=feed_dict) self.assertAllEqual(output, expected_output) self.assertEqual(output.dtype, expected_output.dtype)
Example #21
Source File: ops_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def test_position_sensitive_with_single_bin(self): num_spatial_bins = [1, 1] image_shape = [2, 3, 3, 4] crop_size = [2, 2] image = tf.random_uniform(image_shape) boxes = tf.random_uniform((6, 4)) box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32) # When a single bin is used, position-sensitive crop and pool should be # the same as non-position sensitive crop and pool. crop = tf.image.crop_and_resize(image, boxes, box_ind, crop_size) crop_and_pool = tf.reduce_mean(crop, [1, 2], keep_dims=True) ps_crop_and_pool = ops.position_sensitive_crop_regions( image, boxes, box_ind, crop_size, num_spatial_bins, global_pool=True) with self.test_session() as sess: expected_output, output = sess.run((crop_and_pool, ps_crop_and_pool)) self.assertAllClose(output, expected_output)
Example #22
Source File: run_audio_attack.py From Black-Box-Audio with MIT License | 5 votes |
def setup_graph(self, input_audio_batch, target_phrase): batch_size = input_audio_batch.shape[0] weird = (input_audio_batch.shape[1] - 1) // 320 logits_arg2 = np.tile(weird, batch_size) dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32) dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32) pass_in = np.clip(input_audio_batch, -2**15, 2**15-1) seq_len = np.tile(weird, batch_size).astype(np.int32) with tf.variable_scope('', reuse=tf.AUTO_REUSE): inputs = tf.placeholder(tf.float32, shape=pass_in.shape, name='a') len_batch = tf.placeholder(tf.float32, name='b') arg2_logits = tf.placeholder(tf.int32, shape=logits_arg2.shape, name='c') arg1_dense = tf.placeholder(tf.float32, shape=dense_arg1.shape, name='d') arg2_dense = tf.placeholder(tf.int32, shape=dense_arg2.shape, name='e') len_seq = tf.placeholder(tf.int32, shape=seq_len.shape, name='f') logits = get_logits(inputs, arg2_logits) target = ctc_label_dense_to_sparse(arg1_dense, arg2_dense, len_batch) ctcloss = tf.nn.ctc_loss(labels=tf.cast(target, tf.int32), inputs=logits, sequence_length=len_seq) decoded, _ = tf.nn.ctc_greedy_decoder(logits, arg2_logits, merge_repeated=True) sess = tf.Session() saver = tf.train.Saver(tf.global_variables()) saver.restore(sess, "models/session_dump") func1 = lambda a, b, c, d, e, f: sess.run(ctcloss, feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f}) func2 = lambda a, b, c, d, e, f: sess.run([ctcloss, decoded], feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f}) return (func1, func2)
Example #23
Source File: run_audio_attack.py From Black-Box-Audio with MIT License | 5 votes |
def getctcloss(self, input_audio_batch, target_phrase, decode=False): batch_size = input_audio_batch.shape[0] weird = (input_audio_batch.shape[1] - 1) // 320 logits_arg2 = np.tile(weird, batch_size) dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32) dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32) pass_in = np.clip(input_audio_batch, -2**15, 2**15-1) seq_len = np.tile(weird, batch_size).astype(np.int32) if decode: return self.funcs[1](pass_in, batch_size, logits_arg2, dense_arg1, dense_arg2, seq_len) else: return self.funcs[0](pass_in, batch_size, logits_arg2, dense_arg1, dense_arg2, seq_len)
Example #24
Source File: modeling_test.py From BERT-Classification-Tutorial with Apache License 2.0 | 5 votes |
def ids_tensor(cls, shape, vocab_size, rng=None, name=None): """Creates a random int32 tensor of the shape within the vocab size.""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) return tf.constant(value=values, dtype=tf.int32, shape=shape, name=name)
Example #25
Source File: modeling.py From BERT-Classification-Tutorial with Apache License 2.0 | 5 votes |
def create_attention_mask_from_input_mask(from_tensor, to_mask): """Create 3D attention mask from a 2D tensor mask. Args: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. to_mask: int32 Tensor of shape [batch_size, to_seq_length]. Returns: float Tensor of shape [batch_size, from_seq_length, to_seq_length]. """ from_shape = get_shape_list(from_tensor, expected_rank=[2, 3]) batch_size = from_shape[0] from_seq_length = from_shape[1] to_shape = get_shape_list(to_mask, expected_rank=2) to_seq_length = to_shape[1] to_mask = tf.cast( tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32) # We don't assume that `from_tensor` is a mask (although it could be). We # don't actually care if we attend *from* padding tokens (only *to* padding) # tokens so we create a tensor of all ones. # # `broadcast_ones` = [batch_size, from_seq_length, 1] broadcast_ones = tf.ones( shape=[batch_size, from_seq_length, 1], dtype=tf.float32) # Here we broadcast along two dimensions to create the mask. mask = broadcast_ones * to_mask return mask
Example #26
Source File: tpu_model.py From cwavegan with MIT License | 5 votes |
def apply_phaseshuffle(x, rad, pad_type='reflect'): b, x_len, nch = x.get_shape().as_list() phase = tf.random_uniform([], minval=-rad, maxval=rad + 1, dtype=tf.int32) pad_l = tf.maximum(phase, 0) pad_r = tf.maximum(-phase, 0) phase_start = pad_r x = tf.pad(x, [[0, 0], [pad_l, pad_r], [0, 0]], mode=pad_type) x = x[:, phase_start:phase_start+x_len] x.set_shape([b, x_len, nch]) return x
Example #27
Source File: wavegan.py From cwavegan with MIT License | 5 votes |
def apply_phaseshuffle(x, rad, pad_type='reflect'): b, x_len, nch = x.get_shape().as_list() phase = tf.random_uniform([], minval=-rad, maxval=rad + 1, dtype=tf.int32) pad_l = tf.maximum(phase, 0) pad_r = tf.maximum(-phase, 0) phase_start = pad_r x = tf.pad(x, [[0, 0], [pad_l, pad_r], [0, 0]], mode=pad_type) x = x[:, phase_start:phase_start+x_len] x.set_shape([b, x_len, nch]) return x
Example #28
Source File: train.py From disentangling_conditional_gans with MIT License | 5 votes |
def process_reals(x, lod, mirror_augment, drange_data, drange_net): with tf.name_scope('ProcessReals'): with tf.name_scope('DynamicRange'): x = tf.cast(x, tf.float32) x = misc.adjust_dynamic_range(x, drange_data, drange_net) if mirror_augment: with tf.name_scope('MirrorAugment'): s = tf.shape(x) mask = tf.random_uniform([s[0], 1, 1, 1], 0.0, 1.0) mask = tf.tile(mask, [1, s[1], s[2], s[3]]) x = tf.where(mask < 0.5, x, tf.reverse(x, axis=[3])) with tf.name_scope('FadeLOD'): # Smooth crossfade between consecutive levels-of-detail. s = tf.shape(x) y = tf.reshape(x, [-1, s[1], s[2]//2, 2, s[3]//2, 2]) y = tf.reduce_mean(y, axis=[3, 5], keep_dims=True) y = tf.tile(y, [1, 1, 1, 2, 1, 2]) y = tf.reshape(y, [-1, s[1], s[2], s[3]]) x = tfutil.lerp(x, y, lod - tf.floor(lod)) with tf.name_scope('UpscaleLOD'): # Upscale to match the expected input/output size of the networks. s = tf.shape(x) factor = tf.cast(2 ** tf.floor(lod), tf.int32) x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1]) x = tf.tile(x, [1, 1, 1, factor, 1, factor]) x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor]) return x #---------------------------------------------------------------------------- # Just-in-time processing of masks before feeding them to the networks.
Example #29
Source File: dataset.py From disentangling_conditional_gans with MIT License | 5 votes |
def get_random_labels_tf(self, minibatch_size): # => labels if self.label_size > 0: return tf.gather(self._tf_labels_var, tf.random_uniform([minibatch_size], 0, self._np_labels.shape[0], dtype=tf.int32)) else: return tf.zeros([minibatch_size, 0], self.label_dtype) # Get random labels as NumPy array.
Example #30
Source File: dataset.py From disentangling_conditional_gans with MIT License | 5 votes |
def get_minibatch_tf(self): # => images, labels with tf.name_scope('SyntheticDataset'): shrink = tf.cast(2.0 ** tf.cast(self._tf_lod_var, tf.float32), tf.int32) shape = [self.shape[0], self.shape[1] // shrink, self.shape[2] // shrink] images = self._generate_images(self._tf_minibatch_var, self._tf_lod_var, shape) labels = self._generate_labels(self._tf_minibatch_var) return images, labels