Python tensorflow.make_template() Examples

The following are 30 code examples of tensorflow.make_template(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: celeba64_5bit_official.py    From flowpp with MIT License 6 votes vote down vote up
def __init__(self, dequant_flow):
        super().__init__()
        assert isinstance(dequant_flow, Flow)
        self.dequant_flow = dequant_flow

        def deep_processor(x, *, init, ema, dropout_p):
            (this, that), _ = CheckerboardSplit().forward(x)
            processed_context = conv2d(tf.concat([this, that], 3), name='proj', num_units=32, init=init, ema=ema)
            for i in range(5):
                processed_context = gated_resnet(
                    processed_context, name='c{}'.format(i),
                    a=None, dropout_p=dropout_p, ema=ema, init=init,
                    use_nin=False
                )
                processed_context = norm(processed_context, name='dqln{}'.format(i), ema=ema)
                
            return processed_context

        self.context_proc = tf.make_template("context_proc", deep_processor) 
Example #2
Source File: utils.py    From zhusuan with MIT License 6 votes vote down vote up
def reuse_variables(scope):
    """
    A decorator for transparent reuse of tensorflow
    `Variables <https://www.tensorflow.org/api_docs/python/tf/Variable>`_ in a
    function. The decorated function will automatically create variables the
    first time they are called and reuse them thereafter.

    .. note::

        This decorator is internally implemented by tensorflow's
        :func:`make_template` function. See `its doc
        <https://www.tensorflow.org/api_docs/python/tf/make_template>`_
        for requirements on the target function.

    :param scope: A string. The scope name passed to tensorflow
        `variable_scope()
        <https://www.tensorflow.org/api_docs/python/tf/variable_scope>`_.
    """
    return lambda f: tf.make_template(scope, f) 
Example #3
Source File: mpnn.py    From mpnn with Apache License 2.0 6 votes vote down vote up
def init_fprop(self):
    """Initializes self.fprop. This should be called from subclasses' ctors.

    This function will contruct all of the variables defined with
    tf.get_variable in the sub classes fprop method and make a template
    out of the fprop method. In this way, instead of using variable scopes
    for variable reuse, the instantiation of the subclass will construct all
    of the model variables, and subsequent calls of the objects fprop method
    will add the fprop ops to the tensorflow graph using the tf variables
    which were defined when init_fprop was first called. This way variable
    reuse is trival by simply called model.fprop on different tensors. If you
    don't want to reuse variables, you will instead define a different model
    object.
    """
    scope_name = self.__class__.__name__

    self.fprop = tf.make_template(
        scope_name, self._fprop, create_scope_now_=True)

    if getattr(self.hparams, "use_placeholders", True):
      # Call self.fprop() to initialize variables in a dummy name scope
      # to manage the pollution
      with tf.name_scope("UNUSED"):
        args, kwargs = self.get_fprop_placeholders()
        self.fprop(*args, **kwargs) 
Example #4
Source File: optimizer.py    From rec-rl with Apache License 2.0 6 votes vote down vote up
def __init__(self, summaries=None, summary_labels=None):
        """
        Creates a new optimizer instance.
        """
        self.variables = dict()
        self.summaries = summaries
        if summary_labels is None:
            self.summary_labels = dict()
        else:
            self.summary_labels = summary_labels

        def custom_getter(getter, name, registered=False, **kwargs):
            variable = getter(name=name, registered=True, **kwargs)
            if not registered:
                assert kwargs.get('trainable', False)
                self.variables[name] = variable
            return variable

        # TensorFlow function
        self.step = tf.make_template(
            name_='step',
            func_=self.tf_step,
            custom_getter=custom_getter
        ) 
Example #5
Source File: graph_utils.py    From seq2seq with Apache License 2.0 6 votes vote down vote up
def templatemethod(name_):
  """This decorator wraps a method with `tf.make_template`. For example,

  @templatemethod
  def my_method():
    # Create variables
  """

  def template_decorator(func):
    """Inner decorator function"""

    def func_wrapper(*args, **kwargs):
      """Inner wrapper function"""
      templated_func = tf.make_template(name_, func)
      return templated_func(*args, **kwargs)

    return func_wrapper

  return template_decorator 
Example #6
Source File: graph_utils.py    From conv_seq2seq with Apache License 2.0 6 votes vote down vote up
def templatemethod(name_):
  """This decorator wraps a method with `tf.make_template`. For example,

  @templatemethod
  def my_method():
    # Create variables
  """

  def template_decorator(func):
    """Inner decorator function"""

    def func_wrapper(*args, **kwargs):
      """Inner wrapper function"""
      templated_func = tf.make_template(name_, func)
      return templated_func(*args, **kwargs)

    return func_wrapper

  return template_decorator 
Example #7
Source File: preprocessor.py    From rec-rl with Apache License 2.0 6 votes vote down vote up
def __init__(self, scope='preprocessor', summary_labels=None):
        self.summary_labels = set(summary_labels or ())
        self.variables = dict()
        self.summaries = list()

        def custom_getter(getter, name, registered=False, **kwargs):
            variable = getter(name=name, registered=True, **kwargs)
            if not registered:
                self.variables[name] = variable
            return variable

        self.process = tf.make_template(
            name_=(scope + '/process'),
            func_=self.tf_process,
            custom_getter_=custom_getter
        ) 
Example #8
Source File: exploration.py    From rec-rl with Apache License 2.0 6 votes vote down vote up
def __init__(self, scope='exploration', summary_labels=None):
        self.summary_labels = set(summary_labels or ())

        self.variables = dict()
        self.summaries = list()

        def custom_getter(getter, name, registered=False, **kwargs):
            variable = getter(name=name, registered=True, **kwargs)
            if not registered:
                self.variables[name] = variable
            return variable

        self.explore = tf.make_template(
            name_=(scope + '/explore'),
            func_=self.tf_explore,
            custom_getter_=custom_getter
        ) 
Example #9
Source File: rssm.py    From dreamer with Apache License 2.0 6 votes vote down vote up
def __init__(
      self, state_size, belief_size, embed_size,
      future_rnn=True, mean_only=False, min_stddev=0.1, activation=tf.nn.elu,
      num_layers=1):
    self._state_size = state_size
    self._belief_size = belief_size
    self._embed_size = embed_size
    self._future_rnn = future_rnn
    self._cell = tf.contrib.rnn.GRUBlockCell(self._belief_size)
    self._kwargs = dict(units=self._embed_size, activation=activation)
    self._mean_only = mean_only
    self._min_stddev = min_stddev
    self._num_layers = num_layers
    super(RSSM, self).__init__(
        tf.make_template('transition', self._transition),
        tf.make_template('posterior', self._posterior)) 
Example #10
Source File: det.py    From ncp with Apache License 2.0 6 votes vote down vote up
def define_graph(config):
  network_tpl = tf.make_template('network', network, config=config)
  inputs = tf.placeholder(tf.float32, [None, config.num_inputs])
  targets = tf.placeholder(tf.float32, [None, 1])
  num_visible = tf.placeholder(tf.int32, [])
  batch_size = tf.shape(inputs)[0]
  data_dist = network_tpl(inputs)
  losses = [
      -data_dist.log_prob(targets),
  ]
  loss = sum(tf.reduce_sum(loss) for loss in losses) / tf.to_float(batch_size)
  optimizer = tf.train.AdamOptimizer(config.learning_rate)
  gradients, variables = zip(*optimizer.compute_gradients(
      loss, colocate_gradients_with_ops=True))
  if config.clip_gradient:
    gradients, _ = tf.clip_by_global_norm(gradients, config.clip_gradient)
  optimize = optimizer.apply_gradients(zip(gradients, variables))
  data_mean = data_dist.mean()
  data_noise = data_dist.stddev()
  data_uncertainty = data_dist.stddev()
  return tools.AttrDict(locals()) 
Example #11
Source File: imagenet64_official.py    From flowpp with MIT License 6 votes vote down vote up
def __init__(self, dequant_flow):
        super().__init__()
        assert isinstance(dequant_flow, Flow)
        self.dequant_flow = dequant_flow

        def deep_processor(x, *, init, ema, dropout_p):
            (this, that), _ = CheckerboardSplit().forward(x)
            processed_context = conv2d(tf.concat([this, that], 3), name='proj', num_units=32, init=init, ema=ema)
            for i in range(5):
                processed_context = gated_resnet(
                    processed_context, name='c{}'.format(i),
                    a=None, dropout_p=dropout_p, ema=ema, init=init,
                    use_nin=False
                )
                processed_context = norm(processed_context, name='dqln{}'.format(i), ema=ema)
                
            return processed_context

        self.context_proc = tf.make_template("context_proc", deep_processor) 
Example #12
Source File: drnn.py    From planet with Apache License 2.0 6 votes vote down vote up
def __init__(
      self, state_size, belief_size, embed_size,
      mean_only=False, min_stddev=1e-1, activation=tf.nn.elu,
      encoder_to_decoder=False, sample_to_sample=True,
      sample_to_encoder=True, decoder_to_encoder=False,
      decoder_to_sample=True, action_to_decoder=False):
    self._state_size = state_size
    self._belief_size = belief_size
    self._embed_size = embed_size
    self._encoder_cell = tf.contrib.rnn.GRUBlockCell(self._belief_size)
    self._decoder_cell = tf.contrib.rnn.GRUBlockCell(self._belief_size)
    self._kwargs = dict(units=self._embed_size, activation=tf.nn.relu)
    self._mean_only = mean_only
    self._min_stddev = min_stddev
    self._encoder_to_decoder = encoder_to_decoder
    self._sample_to_sample = sample_to_sample
    self._sample_to_encoder = sample_to_encoder
    self._decoder_to_encoder = decoder_to_encoder
    self._decoder_to_sample = decoder_to_sample
    self._action_to_decoder = action_to_decoder
    posterior_tpl = tf.make_template('posterior', self._posterior)
    super(DRNN, self).__init__(posterior_tpl, posterior_tpl) 
Example #13
Source File: rssm.py    From planet with Apache License 2.0 6 votes vote down vote up
def __init__(
      self, state_size, belief_size, embed_size,
      future_rnn=True, mean_only=False, min_stddev=0.1, activation=tf.nn.elu,
      num_layers=1):
    self._state_size = state_size
    self._belief_size = belief_size
    self._embed_size = embed_size
    self._future_rnn = future_rnn
    self._cell = tf.contrib.rnn.GRUBlockCell(self._belief_size)
    self._kwargs = dict(units=self._embed_size, activation=activation)
    self._mean_only = mean_only
    self._min_stddev = min_stddev
    self._num_layers = num_layers
    super(RSSM, self).__init__(
        tf.make_template('transition', self._transition),
        tf.make_template('posterior', self._posterior)) 
Example #14
Source File: imagenet64_5bit_official.py    From flowpp with MIT License 6 votes vote down vote up
def __init__(self, dequant_flow):
        super().__init__()
        assert isinstance(dequant_flow, Flow)
        self.dequant_flow = dequant_flow

        def deep_processor(x, *, init, ema, dropout_p):
            (this, that), _ = CheckerboardSplit().forward(x)
            processed_context = conv2d(tf.concat([this, that], 3), name='proj', num_units=32, init=init, ema=ema)
            for i in range(5):
                processed_context = gated_resnet(
                    processed_context, name='c{}'.format(i),
                    a=None, dropout_p=dropout_p, ema=ema, init=init,
                    use_nin=False
                )
                processed_context = norm(processed_context, name='dqln{}'.format(i), ema=ema)
                
            return processed_context

        self.context_proc = tf.make_template("context_proc", deep_processor) 
Example #15
Source File: vae.py    From cdvae-vc with MIT License 6 votes vote down vote up
def __init__(self, arch, normalizers=None):
        '''
        Variational Auto Encoder (VAE)
        Arguments:
            `arch`: network architecture (`dict`)
        '''
        self.arch = arch
        self.normalizers = normalizers
        self.feat_type = arch['feat_type']

        with tf.name_scope('SpeakerCode'):
            self.y_emb = self._l2_regularized_embedding(
                self.arch['y_dim'],
                self.arch['z_dim'],
                'y_embedding')

        self.enc = tf.make_template(
            'Encoder',
            self.encoder)
        
        self.dec = tf.make_template(
            'Decoder',
            self.decoder) 
Example #16
Source File: celeba128_5bit_official.py    From flowpp with MIT License 6 votes vote down vote up
def __init__(self, dequant_flow):
        super().__init__()
        assert isinstance(dequant_flow, Flow)
        self.dequant_flow = dequant_flow

        def deep_processor(x, *, init, ema, dropout_p):
            (this, that), _ = CheckerboardSplit().forward(x)
            processed_context = conv2d(tf.concat([this, that], 3), name='proj', num_units=32, init=init, ema=ema)
            for i in range(5):
                processed_context = gated_resnet(
                    processed_context, name='c{}'.format(i),
                    a=None, dropout_p=dropout_p, ema=ema, init=init,
                    use_nin=False
                )
                processed_context = norm(processed_context, name='dqln{}'.format(i), ema=ema)
                
            return processed_context

        self.context_proc = tf.make_template("context_proc", deep_processor) 
Example #17
Source File: module_base.py    From texar with Apache License 2.0 6 votes vote down vote up
def __init__(self, hparams=None):
        if not hasattr(self, '_hparams'):
            self._hparams = HParams(hparams, self.default_hparams())
        else:
            # Probably already parsed by subclasses. We rely on subclass
            # implementations to get this right.
            # As a sanity check, we require `hparams` to be `None` in this case.
            if hparams is not None:
                raise ValueError(
                    "`self._hparams` already exists. Argument `hparams` "
                    "must be set to `None` in this case.")
        self._template = tf.make_template(self._hparams.name, self._build,
                                          create_scope_now_=True)
        self._unique_name = self.variable_scope.name.split("/")[-1]
        self._trainable_variables = []
        self._built = False 
Example #18
Source File: graph_utils.py    From reaction_prediction_seq2seq with Apache License 2.0 6 votes vote down vote up
def templatemethod(name_):
  """This decorator wraps a method with `tf.make_template`. For example,

  @templatemethod
  def my_method():
    # Create variables
  """

  def template_decorator(func):
    """Inner decorator function"""

    def func_wrapper(*args, **kwargs):
      """Inner wrapper function"""
      templated_func = tf.make_template(name_, func)
      return templated_func(*args, **kwargs)

    return func_wrapper

  return template_decorator 
Example #19
Source File: graph_utils.py    From natural-language-summary-generation-from-structured-data with MIT License 6 votes vote down vote up
def templatemethod(name_):
  """This decorator wraps a method with `tf.make_template`. For example,

  @templatemethod
  def my_method():
    # Create variables
  """

  def template_decorator(func):
    """Inner decorator function"""

    def func_wrapper(*args, **kwargs):
      """Inner wrapper function"""
      templated_func = tf.make_template(name_, func)
      return templated_func(*args, **kwargs)

    return func_wrapper

  return template_decorator 
Example #20
Source File: det_mix_ncp.py    From ncp with Apache License 2.0 5 votes vote down vote up
def define_graph(config):
  network_tpl = tf.make_template('network', network, config=config)
  inputs = tf.placeholder(tf.float32, [None, config.num_inputs])
  targets = tf.placeholder(tf.float32, [None, 1])
  num_visible = tf.placeholder(tf.int32, [])
  batch_size = tf.to_float(tf.shape(inputs)[0])
  data_mean, data_noise, data_uncertainty = network_tpl(inputs)
  ood_inputs = inputs + tf.random_normal(
      tf.shape(inputs), 0.0, config.noise_std)
  ood_mean, ood_noise, ood_uncertainty = network_tpl(ood_inputs)
  losses = [
      -tfd.Normal(data_mean, data_noise).log_prob(targets),
      -tfd.Bernoulli(data_uncertainty).log_prob(0),
      -tfd.Bernoulli(ood_uncertainty).log_prob(1),
  ]
  if config.center_at_target:
    losses.append(-tfd.Normal(ood_mean, ood_noise).log_prob(targets))
  loss = sum(tf.reduce_sum(loss) for loss in losses) / batch_size
  optimizer = tf.train.AdamOptimizer(config.learning_rate)
  gradients, variables = zip(*optimizer.compute_gradients(
      loss, colocate_gradients_with_ops=True))
  if config.clip_gradient:
    gradients, _ = tf.clip_by_global_norm(gradients, config.clip_gradient)
  optimize = optimizer.apply_gradients(zip(gradients, variables))
  data_uncertainty = tf.sigmoid(data_uncertainty)
  if not config.center_at_target:
    data_mean = (1 - data_uncertainty) * data_mean + data_uncertainty * 0
  data_noise = (1 - data_uncertainty) * data_noise + data_uncertainty * 0.1
  return tools.AttrDict(locals()) 
Example #21
Source File: abl_noattn.py    From flowpp with MIT License 5 votes vote down vote up
def __init__(self, filters, blocks, components, init_scale=0.1):
        def f(x, *, vcfg: VarConfig, context=None, dropout_p=0., verbose=True):
            if vcfg.init and verbose:
                # debug stuff
                xmean, xvar = tf.nn.moments(x, axes=list(range(len(x.shape))))
                x = tf.Print(
                    x, [tf.shape(x), xmean, tf.sqrt(xvar), tf.reduce_min(x), tf.reduce_max(x)],
                    message='{} (shape/mean/std/min/max) '.format(self.template.variable_scope.name), summarize=10
                )
            B, H, W, C = x.shape.as_list()
            pos_emb = get_var('pos_emb', shape=[H, W, filters], initializer=tf.random_normal_initializer(stddev=0.01),
                              vcfg=vcfg)
            x = conv2d(x, name='proj_in', num_units=filters, vcfg=vcfg)
            for i_block in range(blocks):
                with tf.variable_scope(f'block{i_block}'):
                    x = gated_conv(x, name='conv', a=context, use_nin=True, dropout_p=dropout_p, vcfg=vcfg)
                    x = layernorm(x, name='ln1', vcfg=vcfg)
                    x = gated_nin(x, name='attn', pos_emb=pos_emb, dropout_p=dropout_p, vcfg=vcfg)
                    x = layernorm(x, name='ln2', vcfg=vcfg)
            x = conv2d(x, name='proj_out', num_units=C * (2 + 3 * components), init_scale=init_scale, vcfg=vcfg)
            assert x.shape == [B, H, W, C * (2 + 3 * components)]
            x = tf.reshape(x, [B, H, W, C, 2 + 3 * components])

            s, t = tf.tanh(x[:, :, :, :, 0]), x[:, :, :, :, 1]
            ml_logits, ml_means, ml_logscales = tf.split(x[:, :, :, :, 2:], 3, axis=4)
            assert s.shape == t.shape == [B, H, W, C]
            assert ml_logits.shape == ml_means.shape == ml_logscales.shape == [B, H, W, C, components]

            return Compose([
                MixLogisticCDF(logits=ml_logits, means=ml_means, logscales=ml_logscales),
                Inverse(Sigmoid()),
                ElemwiseAffine(scales=tf.exp(s), logscales=s, biases=t),
            ])

        self.template = tf.make_template(self.__class__.__name__, f) 
Example #22
Source File: flows.py    From flowpp with MIT License 5 votes vote down vote up
def __init__(self, filters, blocks, components, heads=4, init_scale=0.1, enable_print=True):
        def f(x, *, vcfg: VarConfig, context=None, dropout_p=0., verbose=True):
            if vcfg.init and verbose and enable_print:
                # debug stuff
                xmean, xvar = tf.nn.moments(x, axes=list(range(len(x.shape))))
                x = tf.Print(
                    x, [tf.shape(x), xmean, tf.sqrt(xvar), tf.reduce_min(x), tf.reduce_max(x)],
                    message='{} (shape/mean/std/min/max) '.format(self.template.variable_scope.name), summarize=10
                )
            B, H, W, C = x.shape.as_list()
            pos_emb = get_var('pos_emb', shape=[H, W, filters], initializer=tf.random_normal_initializer(stddev=0.01),
                              vcfg=vcfg)
            x = conv2d(x, name='proj_in', num_units=filters, vcfg=vcfg)
            for i_block in range(blocks):
                with tf.variable_scope('block{}'.format(i_block)):
                    x = gated_conv(x, name='conv', a=context, use_nin=True, dropout_p=dropout_p, vcfg=vcfg)
                    x = layernorm(x, name='ln1', vcfg=vcfg)
                    x = gated_attn(x, name='attn', pos_emb=pos_emb, heads=heads, dropout_p=dropout_p, vcfg=vcfg)
                    x = layernorm(x, name='ln2', vcfg=vcfg)
            x = conv2d(x, name='proj_out', num_units=C * (2 + 3 * components), init_scale=init_scale, vcfg=vcfg)
            assert x.shape == [B, H, W, C * (2 + 3 * components)]
            x = tf.reshape(x, [B, H, W, C, 2 + 3 * components])

            s, t = tf.tanh(x[:, :, :, :, 0]), x[:, :, :, :, 1]
            ml_logits, ml_means, ml_logscales = tf.split(x[:, :, :, :, 2:], 3, axis=4)
            assert s.shape == t.shape == [B, H, W, C]
            assert ml_logits.shape == ml_means.shape == ml_logscales.shape == [B, H, W, C, components]

            return Compose([
                MixLogisticCDF(logits=ml_logits, means=ml_means, logscales=ml_logscales),
                Inverse(Sigmoid()),
                ElemwiseAffine(scales=tf.exp(s), logscales=s, biases=t),
            ])

        self.template = tf.make_template(self.__class__.__name__, f) 
Example #23
Source File: flows.py    From flowpp with MIT License 5 votes vote down vote up
def __init__(self, noisy_identity_init=0.001):
        def f(input_, forward, vcfg):
            assert not isinstance(input_, list)
            if isinstance(input_, tuple):
                is_tuple = True
            else:
                assert isinstance(input_, tf.Tensor)
                input_ = [input_]
                is_tuple = False

            out, logds = [], []
            for i, x in enumerate(input_):
                _, img_h, img_w, img_c = x.shape.as_list()
                if noisy_identity_init:
                    # identity + gaussian noise
                    initializer = (
                            np.eye(img_c) + noisy_identity_init * np.random.randn(img_c, img_c)
                    ).astype(np.float32)
                else:
                    # random orthogonal
                    initializer = np.linalg.qr(np.random.randn(img_c, img_c))[0].astype(np.float32)
                W = get_var('W{}'.format(i), shape=None, initializer=initializer, vcfg=vcfg)
                out.append(self._nin(x, W if forward else tf.matrix_inverse(W)))
                logds.append(
                    (1 if forward else -1) * img_h * img_w *
                    tf.to_float(tf.log(tf.abs(tf.matrix_determinant(tf.to_double(W)))))
                )
            logd = tf.fill([input_[0].shape[0]], tf.add_n(logds))

            if not is_tuple:
                assert len(out) == 1
                return out[0], logd
            return tuple(out), logd

        self.template = tf.make_template(self.__class__.__name__, f) 
Example #24
Source File: graph_module.py    From seq2seq with Apache License 2.0 5 votes vote down vote up
def __init__(self, name):
    """
    Initialize the module. Each subclass must call this constructor with a name.

    Args:
      name: Name of this module. Used for `tf.make_template`.
    """
    self.name = name
    self._template = tf.make_template(name, self._build, create_scope_now_=True)
    # Docstrings for the class should be the docstring for the _build method
    self.__doc__ = self._build.__doc__
    # pylint: disable=E1101
    self.__call__.__func__.__doc__ = self._build.__doc__ 
Example #25
Source File: flows.py    From flowpp with MIT License 5 votes vote down vote up
def __init__(self, init_scale=1.):
        def f(input_, forward, vcfg):
            assert not isinstance(input_, list)
            if isinstance(input_, tuple):
                is_tuple = True
            else:
                assert isinstance(input_, tf.Tensor)
                input_ = [input_]
                is_tuple = False

            bs = int(input_[0].shape[0])
            g_and_b = []
            for (i, x) in enumerate(input_):
                g, b = init_normalization(x, name='norm{}'.format(i), init_scale=init_scale, vcfg=vcfg)
                g = tf.maximum(g, 1e-10)
                assert x.shape[0] == bs and g.shape == b.shape == x.shape[1:]
                g_and_b.append((g, b))

            logd = tf.fill([bs], tf.add_n([tf.reduce_sum(tf.log(g)) for (g, _) in g_and_b]))
            if forward:
                out = [x * g[None] + b[None] for (x, (g, b)) in zip(input_, g_and_b)]
            else:
                out = [(x - b[None]) / g[None] for (x, (g, b)) in zip(input_, g_and_b)]
                logd = -logd

            if not is_tuple:
                assert len(out) == 1
                return out[0], logd
            return tuple(out), logd

        self.template = tf.make_template(self.__class__.__name__, f) 
Example #26
Source File: celeba64_3bit_official.py    From flowpp with MIT License 5 votes vote down vote up
def __init__(self, init_scale=1.):
        def f(input_, forward, init, ema):
            assert not isinstance(input_, list)
            if isinstance(input_, tuple):
                is_tuple = True
            else:
                assert isinstance(input_, tf.Tensor)
                input_ = [input_]
                is_tuple = False

            bs = int(input_[0].shape[0])
            g_and_b = []
            for (i, x) in enumerate(input_):
                g, b = init_normalization(x, name='norm{}'.format(i), init_scale=init_scale, init=init, ema=ema)
                g = tf.maximum(g, 1e-10)
                assert x.shape[0] == bs and g.shape == b.shape == x.shape[1:]
                g_and_b.append((g, b))

            logd = tf.fill([bs], tf.add_n([tf.reduce_sum(tf.log(g)) for (g, _) in g_and_b]))
            if forward:
                out = [x * g[None] + b[None] for (x, (g, b)) in zip(input_, g_and_b)]
            else:
                out = [(x - b[None]) / g[None] for (x, (g, b)) in zip(input_, g_and_b)]
                logd = -logd

            if not is_tuple:
                assert len(out) == 1
                return out[0], logd
            return tuple(out), logd

        self.template = tf.make_template(self.__class__.__name__, f) 
Example #27
Source File: rnn.py    From chemopt with MIT License 5 votes vote down vote up
def __init__(self,
               hidden_size,
               forget_bias=1.0,
               initializers=None,
               use_peepholes=False,
               use_batch_norm_h=False,
               use_batch_norm_x=False,
               use_batch_norm_c=False,
               max_unique_stats=1,
               name="lstm"):
        super(LSTM, self).__init__()
        self.name_ = name
        self._template = tf.make_template(self.name_, self._build,
                                          create_scope_now_=True)
        self._hidden_size = hidden_size
        self._forget_bias = forget_bias
        self._use_peepholes = use_peepholes
        self._max_unique_stats = max_unique_stats
        self._use_batch_norm_h = use_batch_norm_h
        self._use_batch_norm_x = use_batch_norm_x
        self._use_batch_norm_c = use_batch_norm_c
        self.possible_keys = self.get_possible_initializer_keys(use_peepholes=use_peepholes, use_batch_norm_h=use_batch_norm_h,
            use_batch_norm_x=use_batch_norm_x, use_batch_norm_c=use_batch_norm_c)
        self._initializers = util.check_initializers(initializers,
                                                     self.possible_keys)
        if max_unique_stats < 1:
            raise ValueError("max_unique_stats must be >= 1")
        if max_unique_stats != 1 and not (
            use_batch_norm_h or use_batch_norm_x or use_batch_norm_c):
            raise ValueError("max_unique_stats specified but batch norm disabled")

        if use_batch_norm_h:
            self._batch_norm_h = LSTM.IndexedStatsBatchNorm(max_unique_stats,
                                                          "batch_norm_h")
        if use_batch_norm_x:
            self._batch_norm_x = LSTM.IndexedStatsBatchNorm(max_unique_stats,
                                                          "batch_norm_x")
        if use_batch_norm_c:
            self._batch_norm_c = LSTM.IndexedStatsBatchNorm(max_unique_stats,
                                                          "batch_norm_c") 
Example #28
Source File: celeba128_5bit_official.py    From flowpp with MIT License 5 votes vote down vote up
def __init__(self, init_scale=1.):
        def f(input_, forward, init, ema):
            assert not isinstance(input_, list)
            if isinstance(input_, tuple):
                is_tuple = True
            else:
                assert isinstance(input_, tf.Tensor)
                input_ = [input_]
                is_tuple = False

            bs = int(input_[0].shape[0])
            g_and_b = []
            for (i, x) in enumerate(input_):
                g, b = init_normalization(x, name='norm{}'.format(i), init_scale=init_scale, init=init, ema=ema)
                g = tf.maximum(g, 1e-10)
                assert x.shape[0] == bs and g.shape == b.shape == x.shape[1:]
                g_and_b.append((g, b))

            logd = tf.fill([bs], tf.add_n([tf.reduce_sum(tf.log(g)) for (g, _) in g_and_b]))
            if forward:
                out = [x * g[None] + b[None] for (x, (g, b)) in zip(input_, g_and_b)]
            else:
                out = [(x - b[None]) / g[None] for (x, (g, b)) in zip(input_, g_and_b)]
                logd = -logd

            if not is_tuple:
                assert len(out) == 1
                return out[0], logd
            return tuple(out), logd

        self.template = tf.make_template(self.__class__.__name__, f) 
Example #29
Source File: real_nvp_flow.py    From mapr2 with Apache License 2.0 5 votes vote down vote up
def conditioned_real_nvp_template(hidden_layers,
                                  shift_only=False,
                                  activation=tf.nn.relu,
                                  name=None,
                                  *args,  # pylint: disable=keyword-arg-before-vararg
                                  **kwargs):

    with tf.name_scope(name, "conditioned_real_nvp_template"):

        def _fn(x, output_units, **condition_kwargs):
            """MLP which concatenates the condition kwargs to input."""
            x = tf.concat(
                (x, *[condition_kwargs[k] for k in sorted(condition_kwargs)]),
                axis=-1)

            for units in hidden_layers:
                x = tf.layers.dense(
                    inputs=x,
                    units=units,
                    activation=activation,
                    *args,  # pylint: disable=keyword-arg-before-vararg
                    **kwargs)
            x = tf.layers.dense(
                inputs=x,
                units=(1 if shift_only else 2) * output_units,
                activation=None,
                *args,  # pylint: disable=keyword-arg-before-vararg
                **kwargs)

            if shift_only:
                return x, None

            shift, log_scale = tf.split(x, 2, axis=-1)
            return shift, log_scale

        return tf.make_template("conditioned_real_nvp_template", _fn) 
Example #30
Source File: imagenet64_5bit_official.py    From flowpp with MIT License 5 votes vote down vote up
def __init__(self, init_scale=1.):
        def f(input_, forward, init, ema):
            assert not isinstance(input_, list)
            if isinstance(input_, tuple):
                is_tuple = True
            else:
                assert isinstance(input_, tf.Tensor)
                input_ = [input_]
                is_tuple = False

            bs = int(input_[0].shape[0])
            g_and_b = []
            for (i, x) in enumerate(input_):
                g, b = init_normalization(x, name='norm{}'.format(i), init_scale=init_scale, init=init, ema=ema)
                g = tf.maximum(g, 1e-10)
                assert x.shape[0] == bs and g.shape == b.shape == x.shape[1:]
                g_and_b.append((g, b))

            logd = tf.fill([bs], tf.add_n([tf.reduce_sum(tf.log(g)) for (g, _) in g_and_b]))
            if forward:
                out = [x * g[None] + b[None] for (x, (g, b)) in zip(input_, g_and_b)]
            else:
                out = [(x - b[None]) / g[None] for (x, (g, b)) in zip(input_, g_and_b)]
                logd = -logd

            if not is_tuple:
                assert len(out) == 1
                return out[0], logd
            return tuple(out), logd

        self.template = tf.make_template(self.__class__.__name__, f)