Python tensorflow.fill() Examples
The following are 30
code examples of tensorflow.fill().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
tensorflow
, or try the search function
.
Example #1
Source File: inputs.py From ffn with Apache License 2.0 | 6 votes |
def soften_labels(bool_labels, softness=0.05, scope='soften_labels'): """Converts boolean labels into float32. Args: bool_labels: Tensor with dtype `boolean` softness: The float value to use for False. 1 - softness is implicitly used for True scope: passed to op_scope Returns: Tensor with same shape as bool_labels with dtype `float32` and values 0.05 for False and 0.95 for True. """ with tf.op_scope([bool_labels, softness], scope): label_shape = tf.shape(bool_labels, name='label_shape') return tf.where(bool_labels, tf.fill(label_shape, 1.0 - softness, name='soft_true'), tf.fill(label_shape, softness, name='soft_false'))
Example #2
Source File: dataset.py From CRNN.tf2 with MIT License | 6 votes |
def decode(self, inputs, from_pred=True, method='greedy'): if from_pred: logit_length = tf.fill([tf.shape(inputs)[0]], tf.shape(inputs)[1]) if method == 'greedy': decoded, _ = tf.nn.ctc_greedy_decoder( inputs=tf.transpose(inputs, perm=[1, 0, 2]), sequence_length=logit_length, merge_repeated=self.merge_repeated) elif method == 'beam_search': decoded, _ = tf.nn.ctc_beam_search_decoder( inputs=tf.transpose(inputs, perm=[1, 0, 2]), sequence_length=logit_length) inputs = decoded[0] decoded = tf.sparse.to_dense(inputs, default_value=self.blank_index).numpy() decoded = self.map2string(decoded) return decoded
Example #3
Source File: wide_and_deep_model.py From youtube-8m with Apache License 2.0 | 6 votes |
def create_model(self, model_input, vocab_size, num_frames, l2_penalty=1e-8, **unused_params): """ A super model that combine one or more models """ models = FLAGS.wide_and_deep_models outputs = [] for model_name in map(lambda x: x.strip(), models.split(",")): model = getattr(frame_level_models, model_name, None)() output = model.create_model(model_input, vocab_size, num_frames, l2_penalty=l2_penalty, **unused_params)["predictions"] outputs.append(tf.expand_dims(output, axis=2)) num_models = len(outputs) model_outputs = tf.concat(outputs, axis=2) # linear_combination = tf.get_variable("combine", shape=[vocab_size,num_models], # dtype=tf.float32, initializer=tf.zeros_initializer(), # regularizer=slim.l2_regularizer(l2_penalty)) # combination = tf.nn.softmax(linear_combination) combination = tf.fill(dims=[vocab_size,num_models], value=1.0/num_models) output_sum = tf.einsum("ijk,jk->ij", model_outputs, combination) return {"predictions": output_sum}
Example #4
Source File: transformer.py From nematus with BSD 3-Clause "New" or "Revised" License | 6 votes |
def _convert_inputs(self, inputs): # Convert from time-major to batch-major. Note that we take factor 0 # from x and ignore any other factors. source_ids = tf.transpose(a=inputs.x[0], perm=[1,0]) source_mask = tf.transpose(a=inputs.x_mask, perm=[1,0]) target_ids_out = tf.transpose(a=inputs.y, perm=[1,0]) target_mask = tf.transpose(a=inputs.y_mask, perm=[1,0]) # target_ids_in is a bit more complicated since we need to insert # the special <GO> symbol (with value 1) at the start of each sentence max_len, batch_size = tf.shape(input=inputs.y)[0], tf.shape(input=inputs.y)[1] go_symbols = tf.fill(value=1, dims=[1, batch_size]) tmp = tf.concat([go_symbols, inputs.y], 0) tmp = tmp[:-1, :] target_ids_in = tf.transpose(a=tmp, perm=[1,0]) return (source_ids, source_mask, target_ids_in, target_ids_out, target_mask)
Example #5
Source File: preprocessor_test.py From DOTA_models with Apache License 2.0 | 6 votes |
def testRandomPixelValueScale(self): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_pixel_value_scale, {})) images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images_min = tf.to_float(images) * 0.9 / 255.0 images_max = tf.to_float(images) * 1.1 / 255.0 images = tensor_dict[fields.InputDataFields.image] values_greater = tf.greater_equal(images, images_min) values_less = tf.less_equal(images, images_max) values_true = tf.fill([1, 4, 4, 3], True) with self.test_session() as sess: (values_greater_, values_less_, values_true_) = sess.run( [values_greater, values_less, values_true]) self.assertAllClose(values_greater_, values_true_) self.assertAllClose(values_less_, values_true_)
Example #6
Source File: tf_atari_wrappers.py From fine-lm with MIT License | 6 votes |
def simulate(self, action): with tf.name_scope("environment/simulate"): # Do we need this? initializer = (tf.zeros_like(self._observ), tf.fill((len(self),), 0.0), tf.fill((len(self),), False)) def not_done_step(a, _): reward, done = self._batch_env.simulate(action) with tf.control_dependencies([reward, done]): # TODO(piotrmilos): possibly ignore envs with done r0 = tf.maximum(a[0], self._batch_env.observ) r1 = tf.add(a[1], reward) r2 = tf.logical_or(a[2], done) return (r0, r1, r2) simulate_ret = tf.scan(not_done_step, tf.range(self.skip), initializer=initializer, parallel_iterations=1, infer_shape=False) simulate_ret = [ret[-1, ...] for ret in simulate_ret] with tf.control_dependencies([self._observ.assign(simulate_ret[0])]): return tf.identity(simulate_ret[1]), tf.identity(simulate_ret[2])
Example #7
Source File: tf_atari_wrappers.py From fine-lm with MIT License | 6 votes |
def simulate(self, action): with tf.name_scope("environment/simulate"): # Do we need this? initializer = (tf.zeros(self.old_shape, dtype=tf.float32), tf.fill((len(self),), 0.0), tf.fill((len(self),), False)) def not_done_step(a, _): reward, done = self._batch_env.simulate(action) with tf.control_dependencies([reward, done]): r0 = self._batch_env.observ + 0 r1 = tf.add(a[1], reward) r2 = tf.logical_or(a[2], done) return (r0, r1, r2) simulate_ret = tf.scan(not_done_step, tf.range(self.skip), initializer=initializer, parallel_iterations=1, infer_shape=False) observations, rewards, dones = simulate_ret split_observations = tf.split(observations, self.skip, axis=0) split_observations = [tf.squeeze(o, axis=0) for o in split_observations] observation = tf.concat(split_observations, axis=-1) with tf.control_dependencies([self._observ.assign(observation)]): return tf.identity(rewards[-1, ...]), tf.identity(dones[-1, ...])
Example #8
Source File: readers.py From youtube-8m with Apache License 2.0 | 6 votes |
def resize_axis(tensor, axis, new_size, fill_value=0): tensor = tf.convert_to_tensor(tensor) shape = tf.unstack(tf.shape(tensor)) pad_shape = shape[:] pad_shape[axis] = tf.maximum(0, new_size - shape[axis]) shape[axis] = tf.minimum(shape[axis], new_size) shape = tf.stack(shape) resized = tf.concat([ tf.slice(tensor, tf.zeros_like(shape), shape), tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype)) ], axis) # Update shape. new_shape = tensor.get_shape().as_list() # A copy is being made. new_shape[axis] = new_size resized.set_shape(new_shape) return resized
Example #9
Source File: metrics.py From CRNN.tf2 with MIT License | 6 votes |
def update_state(self, y_true, y_pred, sample_weight=None): """ Maybe have more fast implementation. """ b = tf.shape(y_true)[0] max_width = tf.maximum(tf.shape(y_true)[1], tf.shape(y_pred)[1]) logit_length = tf.fill([tf.shape(y_pred)[0]], tf.shape(y_pred)[1]) decoded, _ = tf.nn.ctc_greedy_decoder( inputs=tf.transpose(y_pred, perm=[1, 0, 2]), sequence_length=logit_length) y_true = tf.sparse.reset_shape(y_true, [b, max_width]) y_pred = tf.sparse.reset_shape(decoded[0], [b, max_width]) y_true = tf.sparse.to_dense(y_true, default_value=-1) y_pred = tf.sparse.to_dense(y_pred, default_value=-1) y_true = tf.cast(y_true, tf.int32) y_pred = tf.cast(y_pred, tf.int32) values = tf.math.reduce_any(tf.math.not_equal(y_true, y_pred), axis=1) values = tf.cast(values, tf.int32) values = tf.reduce_sum(values) self.total.assign_add(b) self.count.assign_add(b - values)
Example #10
Source File: preprocessor_test.py From object_detector_app with MIT License | 6 votes |
def testRandomPixelValueScale(self): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_pixel_value_scale, {})) images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images_min = tf.to_float(images) * 0.9 / 255.0 images_max = tf.to_float(images) * 1.1 / 255.0 images = tensor_dict[fields.InputDataFields.image] values_greater = tf.greater_equal(images, images_min) values_less = tf.less_equal(images, images_max) values_true = tf.fill([1, 4, 4, 3], True) with self.test_session() as sess: (values_greater_, values_less_, values_true_) = sess.run( [values_greater, values_less, values_true]) self.assertAllClose(values_greater_, values_true_) self.assertAllClose(values_less_, values_true_)
Example #11
Source File: preprocessor_test.py From ros_people_object_detection_tensorflow with Apache License 2.0 | 6 votes |
def testRandomPixelValueScale(self): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_pixel_value_scale, {})) images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images_min = tf.to_float(images) * 0.9 / 255.0 images_max = tf.to_float(images) * 1.1 / 255.0 images = tensor_dict[fields.InputDataFields.image] values_greater = tf.greater_equal(images, images_min) values_less = tf.less_equal(images, images_max) values_true = tf.fill([1, 4, 4, 3], True) with self.test_session() as sess: (values_greater_, values_less_, values_true_) = sess.run( [values_greater, values_less, values_true]) self.assertAllClose(values_greater_, values_true_) self.assertAllClose(values_less_, values_true_)
Example #12
Source File: gaussian_distribution.py From tf-example-models with Apache License 2.0 | 6 votes |
def initialize(self, dtype=tf.float64): if self.tf_mean is None: if self.mean is not None: self.tf_mean = tf.Variable(self.mean, dtype=dtype) else: self.tf_mean = tf.Variable(tf.cast(tf.fill([self.dims], 0.0), dtype)) if self.tf_covariance is None: if self.covariance is not None: self.tf_covariance = self.covariance else: self.tf_covariance = FullCovariance(self.dims) self.tf_covariance.initialize(dtype) if self.tf_ln2piD is None: self.tf_ln2piD = tf.constant(np.log(2 * np.pi) * self.dims, dtype=dtype)
Example #13
Source File: tf_gmm.py From tf-example-models with Apache License 2.0 | 6 votes |
def plot_fitted_data(points, c_means, c_variances): """Plots the data and given Gaussian components""" plt.plot(points[:, 0], points[:, 1], "b.", zorder=0) plt.plot(c_means[:, 0], c_means[:, 1], "r.", zorder=1) for i in range(c_means.shape[0]): std = np.sqrt(c_variances[i]) plt.axes().add_artist(pat.Ellipse( c_means[i], 2 * std[0], 2 * std[1], fill=False, color="red", linewidth=2, zorder=1 )) plt.show() # PREPARING DATA # generating DATA_POINTS points from a GMM with COMPONENTS components
Example #14
Source File: uniform.py From tensorprob with MIT License | 6 votes |
def UniformInt(name=None): X = tf.placeholder(config.int_dtype, name=name) Distribution.logp = tf.fill(tf.shape(X), config.dtype(0)) def integral(lower, upper): val = tf.cond( tf.logical_or( tf.is_inf(tf.ceil(tf.cast(lower, config.dtype))), tf.is_inf(tf.floor(tf.cast(upper, config.dtype))) ), lambda: tf.constant(1, dtype=config.dtype), lambda: tf.cast(upper, config.dtype) - tf.cast(lower, config.dtype), ) return val Distribution.integral = integral return X
Example #15
Source File: preprocessor_test.py From vehicle_counting_tensorflow with MIT License | 6 votes |
def testRandomPixelValueScale(self): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_pixel_value_scale, {})) images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images_min = tf.to_float(images) * 0.9 / 255.0 images_max = tf.to_float(images) * 1.1 / 255.0 images = tensor_dict[fields.InputDataFields.image] values_greater = tf.greater_equal(images, images_min) values_less = tf.less_equal(images, images_max) values_true = tf.fill([1, 4, 4, 3], True) with self.test_session() as sess: (values_greater_, values_less_, values_true_) = sess.run( [values_greater, values_less, values_true]) self.assertAllClose(values_greater_, values_true_) self.assertAllClose(values_less_, values_true_)
Example #16
Source File: simple_gaussian_cnn_model.py From garage with MIT License | 6 votes |
def _build(self, obs_input, name=None): """Build model. Args: obs_input (tf.Tensor): Entire time-series observation input. name (str): Inner model name, also the variable scope of the inner model, if exist. One example is garage.tf.models.Sequential. Returns: tf.tensor: Action. tf.tensor: Mean. tf.Tensor: Log of standard deviation. garage.distributions.DiagonalGaussian: Distribution. """ del name return_var = tf.compat.v1.get_variable( 'return_var', (), initializer=tf.constant_initializer(0.5)) mean = tf.fill((tf.shape(obs_input)[0], self.output_dim), return_var) log_std = tf.fill((tf.shape(obs_input)[0], self.output_dim), 0.5) action = mean + log_std * 0.5 dist = DiagonalGaussian(self.output_dim) # action will be 0.5 + 0.5 * 0.5 = 0.75 return action, mean, log_std, dist
Example #17
Source File: simple_gaussian_mlp_model.py From garage with MIT License | 6 votes |
def _build(self, obs_input, name=None): """Build model. Args: obs_input (tf.Tensor): Entire time-series observation input. name (str): Inner model name, also the variable scope of the inner model, if exist. One example is garage.tf.models.Sequential. Returns: tf.tensor: Mean. tf.Tensor: Log of standard deviation. garage.distributions.DiagonalGaussian: Distribution. """ del name return_var = tf.compat.v1.get_variable( 'return_var', (), initializer=tf.constant_initializer(0.5)) mean = tf.fill((tf.shape(obs_input)[0], self.output_dim), return_var) log_std = tf.fill((tf.shape(obs_input)[0], self.output_dim), np.log(0.5)) dist = DiagonalGaussian(self.output_dim) # action will be 0.5 + 0.5 * 0.5 = 0.75 return mean, log_std, dist
Example #18
Source File: core.py From lm-human-preferences with MIT License | 6 votes |
def where(cond, true, false, name=None): """Similar to tf.where, but broadcasts scalar values.""" with tf.name_scope(name, 'where', [cond, true, false]) as name: cond = tf.convert_to_tensor(cond, name='cond', dtype=tf.bool) true = tf.convert_to_tensor(true, name='true', dtype=false.dtype if isinstance(false, tf.Tensor) else None) false = tf.convert_to_tensor(false, name='false', dtype=true.dtype) if true.shape.rank == false.shape.rank == 0: shape = tf.shape(cond) true = tf.fill(shape, true) false = tf.fill(shape, false) elif true.shape.rank == 0: true = tf.fill(tf.shape(false), true) elif false.shape.rank == 0: false = tf.fill(tf.shape(true), false) return tf.where(cond, true, false, name=name)
Example #19
Source File: simple_mlp_model.py From garage with MIT License | 6 votes |
def _build(self, obs_input, name=None): """Build model given input placeholder(s). Args: obs_input (tf.Tensor): Tensor input for state. name (str): Inner model name, also the variable scope of the inner model, if exist. One example is garage.tf.models.Sequential. Return: tf.Tensor: Tensor output of the model. """ del name return_var = tf.compat.v1.get_variable( 'return_var', (), initializer=tf.constant_initializer(0.5)) return tf.fill((tf.shape(obs_input)[0], self.output_dim), return_var)
Example #20
Source File: simple_mlp_merge_model.py From garage with MIT License | 6 votes |
def _build(self, obs_input, act_input, name=None): """Build model given input placeholder(s). Args: obs_input (tf.Tensor): Tensor input for state. act_input (tf.Tensor): Tensor input for action. name (str): Inner model name, also the variable scope of the inner model, if exist. One example is garage.tf.models.Sequential. Return: tf.Tensor: Tensor output of the model. """ del name del act_input return_var = tf.compat.v1.get_variable( 'return_var', (), initializer=tf.constant_initializer(0.5)) return tf.fill((tf.shape(obs_input)[0], self.output_dim), return_var)
Example #21
Source File: preprocessor_test.py From Person-Detection-and-Tracking with MIT License | 6 votes |
def testRandomPixelValueScale(self): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_pixel_value_scale, {})) images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images_min = tf.to_float(images) * 0.9 / 255.0 images_max = tf.to_float(images) * 1.1 / 255.0 images = tensor_dict[fields.InputDataFields.image] values_greater = tf.greater_equal(images, images_min) values_less = tf.less_equal(images, images_max) values_true = tf.fill([1, 4, 4, 3], True) with self.test_session() as sess: (values_greater_, values_less_, values_true_) = sess.run( [values_greater, values_less, values_true]) self.assertAllClose(values_greater_, values_true_) self.assertAllClose(values_less_, values_true_)
Example #22
Source File: constant_fill.py From onnx-tensorflow with Apache License 2.0 | 6 votes |
def version_1(cls, node, **kwargs): tensor_dict = kwargs["tensor_dict"] if "shape" in node.attrs: shape = node.attrs["shape"] else: shape = tensor_dict[ node.inputs[0]].get_shape().as_list() if node.attrs.get( "input_as_shape", 0) == 0 else tensor_dict[node.inputs[0]] if "extra_shape" in node.attrs: shape = tf.concat([shape, node.attrs["extra_shape"]], 0) value = node.attrs.get("value", 0.) if "dtype" in node.attrs: return [tf.cast(tf.fill(shape, value), dtype=node.attrs["dtype"])] return [cls.make_tensor_from_onnx_node(node, inputs=[shape], **kwargs)]
Example #23
Source File: multi_problem_v2.py From BERT with Apache License 2.0 | 6 votes |
def get_multi_dataset(datasets, pmf=None): """Returns a Dataset that samples records from one or more Datasets. Args: datasets: A list of one or more Dataset objects to sample from. pmf: A tensor of shape [len(datasets)], the probabilities to sample each dataset with. This tensor is often constructed with the global_step. If this is None, we sample from the datasets uniformly at random. Returns: A Dataset object containing records from multiple datasets. Note that because this dataset iterates through other datasets it is stateful, thus you will need to call make_initializable_iterator instead of make_one_shot_iterator. """ pmf = tf.fill([len(datasets)], 1.0 / len(datasets)) if pmf is None else pmf samplers = [d.repeat().make_one_shot_iterator().get_next for d in datasets] sample = lambda _: categorical_case(pmf, samplers) return tf.data.Dataset.from_tensors([]).repeat().map(sample)
Example #24
Source File: transformer_memory.py From BERT with Apache License 2.0 | 6 votes |
def reset(self, entries_to_reset): """Reset the entries in the memory. Args: entries_to_reset: a 1D tensor. Returns: the reset op. """ num_updates = tf.size(entries_to_reset) update_vals = tf.scatter_update( self.mem_vals, entries_to_reset, tf.tile(tf.expand_dims( tf.fill([self.memory_size, self.val_depth], .0), 0), [num_updates, 1, 1])) update_logits = tf.scatter_update( self.mean_logits, entries_to_reset, tf.tile(tf.expand_dims( tf.fill([self.memory_size], .0), 0), [num_updates, 1])) reset_op = tf.group([update_vals, update_logits]) return reset_op
Example #25
Source File: uniform.py From tensorprob with MIT License | 6 votes |
def Uniform(name=None): X = tf.placeholder(config.dtype, name=name) Distribution.logp = tf.fill(tf.shape(X), config.dtype(0)) def integral(lower, upper): return tf.cond( tf.logical_or( tf.is_inf(tf.cast(lower, config.dtype)), tf.is_inf(tf.cast(upper, config.dtype)) ), lambda: tf.constant(1, dtype=config.dtype), lambda: tf.cast(upper, config.dtype) - tf.cast(lower, config.dtype), ) Distribution.integral = integral return X
Example #26
Source File: simple_cnn_model.py From garage with MIT License | 5 votes |
def _build(self, obs_input, name=None): """Build model given input placeholder(s). Args: obs_input (tf.Tensor): Tensor input for state. name (str): Inner model name, also the variable scope of the inner model, if exist. One example is garage.tf.models.Sequential. Return: tf.Tensor: Tensor output of the model. """ del name height_size = obs_input.get_shape().as_list()[1] width_size = obs_input.get_shape().as_list()[2] for filter_iter, stride in zip(self.filters, self.strides): if self.padding == 'SAME': height_size = int((height_size + stride - 1) / stride) width_size = int((width_size + stride - 1) / stride) else: height_size = int( (height_size - filter_iter[1][0]) / stride) + 1 width_size = int((width_size - filter_iter[1][1]) / stride) + 1 flatten_shape = height_size * width_size * self.filters[-1][0] return_var = tf.compat.v1.get_variable( 'return_var', (), initializer=tf.constant_initializer(0.5)) return tf.fill((tf.shape(obs_input)[0], flatten_shape), return_var)
Example #27
Source File: utilities.py From tensorprob with MIT License | 5 votes |
def set_logp_to_neg_inf(X, logp, bounds): """Set `logp` to negative infinity when `X` is outside the allowed bounds. # Arguments X: tensorflow.Tensor The variable to apply the bounds to logp: tensorflow.Tensor The log probability corrosponding to `X` bounds: list of `Region` objects The regions corrosponding to allowed regions of `X` # Returns logp: tensorflow.Tensor The newly bounded log probability """ conditions = [] for l, u in bounds: lower_is_neg_inf = not isinstance(l, tf.Tensor) and np.isneginf(l) upper_is_pos_inf = not isinstance(u, tf.Tensor) and np.isposinf(u) if not lower_is_neg_inf and upper_is_pos_inf: conditions.append(tf.greater(X, l)) elif lower_is_neg_inf and not upper_is_pos_inf: conditions.append(tf.less(X, u)) elif not (lower_is_neg_inf or upper_is_pos_inf): conditions.append(tf.logical_and(tf.greater(X, l), tf.less(X, u))) if len(conditions) > 0: is_inside_bounds = conditions[0] for condition in conditions[1:]: is_inside_bounds = tf.logical_or(is_inside_bounds, condition) logp = tf.select( is_inside_bounds, logp, tf.fill(tf.shape(X), config.dtype(-np.inf)) ) return logp
Example #28
Source File: preprocessor_test.py From ros_people_object_detection_tensorflow with Apache License 2.0 | 5 votes |
def createColorfulTestImage(self): ch255 = tf.fill([1, 100, 200, 1], tf.constant(255, dtype=tf.uint8)) ch128 = tf.fill([1, 100, 200, 1], tf.constant(128, dtype=tf.uint8)) ch0 = tf.fill([1, 100, 200, 1], tf.constant(0, dtype=tf.uint8)) imr = tf.concat([ch255, ch0, ch0], 3) img = tf.concat([ch255, ch255, ch0], 3) imb = tf.concat([ch255, ch0, ch255], 3) imw = tf.concat([ch128, ch128, ch128], 3) imu = tf.concat([imr, img], 2) imd = tf.concat([imb, imw], 2) im = tf.concat([imu, imd], 1) return im
Example #29
Source File: shrink.py From onnx-tensorflow with Apache License 2.0 | 5 votes |
def version_9(cls, node, **kwargs): tensor_dict = kwargs["tensor_dict"] input_tensor = tensor_dict[node.inputs[0]] input_shape = tf.shape(input_tensor, out_type=tf.int64) # handle defaults for attributes lambd = node.attrs["lambd"] if "lambd" in node.attrs else 0.5 bias = node.attrs["bias"] if "bias" in node.attrs else 0.0 # make tensors in the right shape lambd_tensor = tf.fill(input_shape, tf.constant(lambd, input_tensor.dtype)) lambd_neg_tensor = tf.fill(input_shape, tf.constant(lambd * -1, input_tensor.dtype)) bias_tensor = tf.fill(input_shape, tf.constant(bias, input_tensor.dtype)) zeros_tensor = tf.zeros(input_shape, input_tensor.dtype) # prepare return values and conditions input_plus = tf.add(input_tensor, bias_tensor) input_minus = tf.subtract(input_tensor, bias_tensor) greater_cond = tf.greater(input_tensor, lambd_tensor) less_cond = tf.less(input_tensor, lambd_neg_tensor) return [ tf.where(less_cond, input_plus, tf.where(greater_cond, input_minus, zeros_tensor)) ]
Example #30
Source File: time_dependent_parameters.py From rlgraph with Apache License 2.0 | 5 votes |
def _graph_fn_get(self, time_percentage=None): if get_backend() == "tf": if time_percentage is not None: return tf.fill(tf.shape(time_percentage), self.from_) else: return self.from_ elif get_backend() == "pytorch": return torch.full(time_percentage.size(), self.from_)