Python tensorflow.Op() Examples

The following are 9 code examples of tensorflow.Op(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: nn.py    From hart with GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, inpt, n_hidden, n_output, transfer_hidden=tf.nn.elu, transfer=None,
                 hidden_weight_init=None, hidden_bias_init=None,weight_init=None, bias_init=None,
                 name=None):
        """
        :param inpt: inpt tensor
        :param n_hidden: scalar ot list, number of hidden units
        :param n_output: scalar, number of output units
        :param transfer_hidden: scalar or list, transfers for hidden units. If list, len must be == len(n_hidden).
        :param transfer: tf.Op or None
        """

        self.n_hidden = nest.flatten(n_hidden)
        self.n_output = n_output
        self.hidden_weight_init = hidden_weight_init
        self.hidden_bias_init = hidden_bias_init

        transfer_hidden = nest.flatten(transfer_hidden)
        if len(transfer_hidden) == 1:
            transfer_hidden *= len(self.n_hidden)
        self.transfer_hidden = transfer_hidden

        self.transfer = transfer
        super(MLP, self).__init__(inpt, name, weight_init, bias_init) 
Example #2
Source File: bdqn.py    From rltf with MIT License 6 votes vote down vote up
def __init__(self, sigma_e, tau, mode="mean", **kwargs):
    """
    Args:
      obs_shape: list. Shape of the observation tensor
      n_actions: int. Number of possible actions
      opt_conf: rltf.optimizers.OptimizerConf. Configuration for the optimizer
      gamma: float. Discount factor
      sigma_e: float. Standard deviation of the noise observation for BLR
      tau: float. Standard deviation for the weight prior in BLR
      huber_loss: bool. Whether to use huber loss or not
    """

    super().__init__(**kwargs)

    self.agent_blr  = [BLR(tau=tau, sigma_e=sigma_e, mode=mode)   for _ in range(self.n_actions)]
    self.target_blr = [BLR(tau=tau, sigma_e=sigma_e, mode="mean") for _ in range(self.n_actions)]

    # Custom TF Tensors and Ops
    self._target    = None    # BLR target
    self._phi       = None    # BLR features
    self.train_blr  = None    # Op for updating the BLR weight posterior
    self.reset_blr  = None    # Op for reseting the BLR to initial weights
    self.a_var      = None    # Tensor with BLR var 
Example #3
Source File: bdqn.py    From rltf with MIT License 6 votes vote down vote up
def _build_train_blr_op(self, phi, target, name):
    """Build the Bayesian Linear Regression ops and estimates
    Args:
      phi: tf.Tensor, shape: `[None, dim_phi]`. The feature tensor
      target: tf.Tensor, as returned by `self._compute_target()`; `[None]`
    Returns:
      tf.Op: The train Op for BLR
    """
    target = tf.expand_dims(target, axis=-1)

    def train_blr(blr, a):
      """Given a BLR instance, select only the examples for the corresponding action"""
      mask = tf.expand_dims(tf.equal(self.act_t_ph, a), axis=-1)
      mask = tf.cast(mask, tf.float32)  # out shape: [None]
      X = phi * mask                    # out shape: [None, dim_phi]
      y = target * mask                 # out shape: [None, 1]
      return blr.train(X, y)

    w_updates = [train_blr(blr, i) for i, blr in enumerate(self.agent_blr)]

    return tf.group(*w_updates, name=name) 
Example #4
Source File: blr.py    From rltf with MIT License 6 votes vote down vote up
def train(self, X, y):
    """Compute the weight posteriror of Bayesian Linear Regression
    Args:
      X: tf.Tensor, `shape=[None, D]`. The feature matrix
      y: tf.Tensor, `shape=[None, 1]`. The correct outputs
    Returns:
      tf.Op which performs the update operation
    """
    X = self._cast_input(X)
    y = self._cast_input(y)

    # Compute the posterior precision matrix
    w_Lambda = self.w_Lambda + self.beta * tf.matmul(X, X, transpose_a=True)

    # Compute the posterior covariance matrix
    X_norm  = 1.0 / self.sigma * X
    w_Sigma = tf_inv.woodburry_inverse(self.w_Sigma, tf.transpose(X_norm), X_norm)

    error = tf.losses.mean_squared_error(tf.matmul(w_Lambda, w_Sigma), tf.eye(self.w_dim))
    tf.summary.scalar("debug/BLR/inv_error", error)

    # Compute the posterior mean
    w_mu = tf.matmul(w_Sigma, self.beta * tf.matmul(X, y, True) + tf.matmul(self.w_Lambda, self.w_mu))

    return self._tf_update_params(w_mu, w_Sigma, w_Lambda) 
Example #5
Source File: prior.py    From attend_infer_repeat with GNU General Public License v3.0 6 votes vote down vote up
def masked_apply(tensor, op, mask):
    """Applies `op` to tensor only at locations indicated by `mask` and sets the rest to zero.

    Similar to doing `tensor = tf.where(mask, op(tensor), tf.zeros_like(tensor))` but it behaves correctly
    when `op(tensor)` is NaN or inf while tf.where does not.

    :param tensor: tf.Tensor
    :param op: tf.Op
    :param mask: tf.Tensor with dtype == bool
    :return: tf.Tensor
    """
    chosen = tf.boolean_mask(tensor, mask)
    applied = op(chosen)
    idx = tf.to_int32(tf.where(mask))
    result = tf.scatter_nd(idx, applied, tf.shape(tensor))
    return result 
Example #6
Source File: utils.py    From Deep-Learning-with-TensorFlow-Second-Edition with MIT License 5 votes vote down vote up
def sigmoid(x):
    return 1 / (1 + np.exp(-x))


# Predefined loss functions
# Should take 2 tf.Ops: outputs and targets and should return tf.Op of loss
# Be carefull about dimentionality -- maybe tf.transpose(outputs) is needed 
Example #7
Source File: trainer.py    From FRU with MIT License 5 votes vote down vote up
def evaluate(session, op_to_evaluate, feed_dict, batch_size):
        """ evaluate.

        Evaluate an operation with provided data dict using a batch size
        to save GPU memory.

        Args:
            session: `tf.Session`. Session for running operations.
            op_to_evaluate: `tf.Op`. Operation to be evaluated.
            feed_dict: `dict`. Data dictionary to feed op_to_evaluate.
            batch_size: `int`. Batch size to be used for evaluation.

        Ret:
            `float`. op_to_evaluate mean over all batches.

        """
        tflearn.is_training(False, session)
        n_test_samples = len(get_dict_first_element(feed_dict))
        batches = make_batches(n_test_samples, batch_size)
        index_array = np.arange(n_test_samples)
        avg = 0.0
        for i, (batch_start, batch_end) in enumerate(batches):
            batch_ids = index_array[batch_start:batch_end]
            feed_batch = {}
            for key in feed_dict:
                # Make batch for multi-dimensional data
                if np.ndim(feed_dict[key]) > 0:
                    feed_batch[key] = slice_array(feed_dict[key], batch_ids)
                else:
                    feed_batch[key] = feed_dict[key]
            avg += session.run(op_to_evaluate, feed_batch) / len(batches)
        return avg 
Example #8
Source File: bdqn.py    From rltf with MIT License 5 votes vote down vote up
def __init__(self, **kwargs):

    super().__init__(mode="ts", **kwargs)

    # Custom TF Tensors and Ops
    self.reset_ts   = None    # Op that resamples the parameters for TS 
Example #9
Source File: blr.py    From rltf with MIT License 5 votes vote down vote up
def _tf_update_params(self, w_mu, w_Sigma, w_Lambda):
    """
    Returns:
      tf.Op which performs an update on all weight parameters
    """
    mu_op     = tf.assign(self.w_mu,      w_mu)
    Sigma_op  = tf.assign(self.w_Sigma,   w_Sigma)
    Lambda_op = tf.assign(self.w_Lambda,  w_Lambda)
    return tf.group(mu_op, Sigma_op, Lambda_op)