Python tensorflow.assert_variables_initialized() Examples

The following are 30 code examples of tensorflow.assert_variables_initialized(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module tensorflow , or try the search function .
Example #1
Source File: ops.py    From TheNumericsOfGANs with MIT License 6 votes vote down vote up
def get_input_moments(x, is_init=False, name=None):
    '''Input normalization'''
    with tf.variable_scope(name, default_name='input_norm'):
        if is_init:
            # data based initialization of parameters
            mean, variance = tf.nn.moments(x, [0])
            std = tf.sqrt(variance + 1e-8)
            mean0 = tf.get_variable('mean0', dtype=tf.float32,
                                    initializer=mean, trainable=False)
            std0 = tf.get_variable('std0', dtype=tf.float32,
                                   initializer=std, trainable=False)
            return mean, std

        else:
            mean0 = tf.get_variable('mean0')
            std0 = tf.get_variable('std0')
            tf.assert_variables_initialized([mean0, std0])
            return mean0, std0 
Example #2
Source File: test_utils.py    From Automated-Cardiac-Segmentation-and-Disease-Diagnosis with MIT License 5 votes vote down vote up
def __init__(self, model, conf, model_path=None):
        self.model = model
        self.conf = conf

        print('Defining the session')
        sess_config = tf.ConfigProto()
        sess_config.allow_soft_placement = True
        sess_config.gpu_options.allow_growth = True
        self.sess = tf.Session(config = sess_config)
        self.sess.run(tf.global_variables_initializer())        
        try:
            self.sess.run(tf.assert_variables_initialized())
        except tf.errors.FailedPreconditionError:
            raise RuntimeError('Not all variables initialized')

        self.saver = tf.train.Saver(tf.global_variables())
        if model_path:
            print('Restoring model from: ' + str(model_path))
            self.saver.restore(self.sess, model_path)

        self.binary_opening_filter = sitk.BinaryMorphologicalOpeningImageFilter()
        self.binary_opening_filter.SetKernelRadius(1)

        self.binary_closing_filter = sitk.BinaryMorphologicalClosingImageFilter()
        self.binary_closing_filter.SetKernelRadius(1)

        self.erosion_filter = sitk.BinaryErodeImageFilter()
        self.erosion_filter.SetKernelRadius(1)

        self.dilation_filter = sitk.BinaryDilateImageFilter()
        self.dilation_filter.SetKernelRadius(1) 
Example #3
Source File: nn.py    From fast-pixel-cnn with MIT License 5 votes vote down vote up
def deconv2d(x, num_filters, filter_size=[3,3], stride=[1,1], pad='SAME', nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
    ''' transposed convolutional layer '''
    name = get_name('deconv2d', counters)
    xs = int_shape(x)
    if pad=='SAME':
        target_shape = [xs[0], xs[1]*stride[0], xs[2]*stride[1], num_filters]
    else:
        target_shape = [xs[0], xs[1]*stride[0] + filter_size[0]-1, xs[2]*stride[1] + filter_size[1]-1, num_filters]
    with tf.variable_scope(name):
        if init:
            # data based initialization of parameters
            V = tf.get_variable('V', filter_size+[num_filters,int(x.get_shape()[-1])], tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0,1,3])
            x_init = tf.nn.conv2d_transpose(x, V_norm, target_shape, [1]+stride+[1], padding=pad)
            m_init, v_init = tf.nn.moments(x_init, [0,1,2])
            scale_init = init_scale/tf.sqrt(v_init + 1e-8)
            g = tf.get_variable('g', dtype=tf.float32, initializer=scale_init, trainable=True)
            b = tf.get_variable('b', dtype=tf.float32, initializer=-m_init*scale_init, trainable=True)
            x_init = tf.reshape(scale_init,[1,1,1,num_filters])*(x_init-tf.reshape(m_init,[1,1,1,num_filters]))
            if nonlinearity is not None:
                x_init = nonlinearity(x_init)
            return x_init

        else:
            V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema)
            tf.assert_variables_initialized([V,g,b])

            # use weight normalization (Salimans & Kingma, 2016)
            W = tf.reshape(g,[1,1,num_filters,1])*tf.nn.l2_normalize(V,[0,1,3])

            # calculate convolutional layer output
            x = tf.nn.conv2d_transpose(x, W, target_shape, [1]+stride+[1], padding=pad)
            x = tf.nn.bias_add(x, b)

            # apply nonlinearity
            if nonlinearity is not None:
                x = nonlinearity(x)
            return x 
Example #4
Source File: nn.py    From fast-pixel-cnn with MIT License 5 votes vote down vote up
def conv2d(x, num_filters, filter_size=[3,3], stride=[1,1], pad='SAME', nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
    ''' convolutional layer '''
    name = get_name('conv2d', counters)
    with tf.variable_scope(name):
        if init:
            # data based initialization of parameters
            V = tf.get_variable('V', filter_size+[int(x.get_shape()[-1]),num_filters], tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0,1,2])
            x_init = tf.nn.conv2d(x, V_norm, [1]+stride+[1], pad)
            m_init, v_init = tf.nn.moments(x_init, [0,1,2])
            scale_init = init_scale/tf.sqrt(v_init + 1e-8)
            g = tf.get_variable('g', dtype=tf.float32, initializer=scale_init, trainable=True)
            b = tf.get_variable('b', dtype=tf.float32, initializer=-m_init*scale_init, trainable=True)
            x_init = tf.reshape(scale_init,[1,1,1,num_filters])*(x_init-tf.reshape(m_init,[1,1,1,num_filters]))
            if nonlinearity is not None:
                x_init = nonlinearity(x_init)
            return x_init

        else:
            V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema)
            tf.assert_variables_initialized([V,g,b])

            # use weight normalization (Salimans & Kingma, 2016)
            W = tf.reshape(g,[1,1,1,num_filters])*tf.nn.l2_normalize(V,[0,1,2])

            # calculate convolutional layer output
            x = tf.nn.bias_add(tf.nn.conv2d(x, W, [1]+stride+[1], pad), b)

            # apply nonlinearity
            if nonlinearity is not None:
                x = nonlinearity(x)
            return x 
Example #5
Source File: nn.py    From fast-pixel-cnn with MIT License 5 votes vote down vote up
def dense(x, num_units, nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
    ''' fully connected layer '''
    name = get_name('dense', counters)
    with tf.variable_scope(name):
        if init:
            # data based initialization of parameters
            V = tf.get_variable('V', [int(x.get_shape()[1]),num_units], tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0])
            x_init = tf.matmul(x, V_norm)
            m_init, v_init = tf.nn.moments(x_init, [0])
            scale_init = init_scale/tf.sqrt(v_init + 1e-10)
            g = tf.get_variable('g', dtype=tf.float32, initializer=scale_init, trainable=True)
            b = tf.get_variable('b', dtype=tf.float32, initializer=-m_init*scale_init, trainable=True)
            x_init = tf.reshape(scale_init,[1,num_units])*(x_init-tf.reshape(m_init,[1,num_units]))
            if nonlinearity is not None:
                x_init = nonlinearity(x_init)
            return x_init

        else:
            #V,g,b = get_vars_maybe_avg(['V','g','b'], ema)
            V = tf.get_variable('V', [int(x.get_shape()[1]),num_units], tf.float32)
            g = tf.get_variable('g', [num_units], tf.float32)
            b = tf.get_variable('b', [num_units], tf.float32)
            if ema is not None:
                V, g, b = ema.average(V), ema.average(g), ema.average(b)
            #tf.assert_variables_initialized([V,g,b])

            # use weight normalization (Salimans & Kingma, 2016)
            x = tf.matmul(x, V)
            scaler = g/tf.sqrt(tf.reduce_sum(tf.square(V),[0]))
            x = tf.reshape(scaler,[1,num_units])*x + tf.reshape(b,[1,num_units])

            # apply nonlinearity
            if nonlinearity is not None:
                x = nonlinearity(x)
            return x 
Example #6
Source File: arch_ops.py    From compare_gan with Apache License 2.0 5 votes vote down vote up
def weight_norm_deconv2d(x, output_dim,
                         k_h, k_w, d_h, d_w,
                         init=False, init_scale=1.0,
                         stddev=0.02,
                         name="wn_deconv2d",
                         initializer=tf.truncated_normal_initializer):
  """Performs Transposed Convolution with Weight Normalization."""
  xs = x.get_shape().as_list()
  target_shape = [xs[0], xs[1] * d_h, xs[2] * d_w, output_dim]
  with tf.variable_scope(name):
    if init:
      v = tf.get_variable(
          "V", [k_h, k_w] + [output_dim, int(x.get_shape()[-1])],
          tf.float32, initializer(0, stddev), trainable=True)
      v_norm = tf.nn.l2_normalize(v.initialized_value(), [0, 1, 3])
      x_init = tf.nn.conv2d_transpose(x, v_norm, target_shape,
                                      [1, d_h, d_w, 1], padding="SAME")
      m_init, v_init = tf.nn.moments(x_init, [0, 1, 2])
      scale_init = init_scale/tf.sqrt(v_init + 1e-8)
      g = tf.get_variable("g", dtype=tf.float32,
                          initializer=scale_init, trainable=True)
      b = tf.get_variable("b", dtype=tf.float32,
                          initializer=-m_init*scale_init, trainable=True)
      x_init = tf.reshape(scale_init, [1, 1, 1, output_dim]) * (
          x_init - tf.reshape(m_init, [1, 1, 1, output_dim]))
      return x_init
    else:
      v = tf.get_variable("v")
      g = tf.get_variable("g")
      b = tf.get_variable("b")
      tf.assert_variables_initialized([v, g, b])
      w = tf.reshape(g, [1, 1, output_dim, 1]) * tf.nn.l2_normalize(
          v, [0, 1, 3])
      x = tf.nn.conv2d_transpose(x, w, target_shape, strides=[1, d_h, d_w, 1],
                                 padding="SAME")
      x = tf.nn.bias_add(x, b)
      return x 
Example #7
Source File: arch_ops.py    From compare_gan with Apache License 2.0 5 votes vote down vote up
def weight_norm_conv2d(input_, output_dim,
                       k_h, k_w, d_h, d_w,
                       init, init_scale,
                       stddev=0.02,
                       name="wn_conv2d",
                       initializer=tf.truncated_normal_initializer):
  """Performs convolution with Weight Normalization."""
  with tf.variable_scope(name):
    if init:
      v = tf.get_variable(
          "V", [k_h, k_w] + [int(input_.get_shape()[-1]), output_dim],
          tf.float32, initializer(0, stddev), trainable=True)
      v_norm = tf.nn.l2_normalize(v.initialized_value(), [0, 1, 2])
      x_init = tf.nn.conv2d(input_, v_norm, strides=[1, d_h, d_w, 1],
                            padding="SAME")
      m_init, v_init = tf.nn.moments(x_init, [0, 1, 2])
      scale_init = init_scale / tf.sqrt(v_init + 1e-8)
      g = tf.get_variable(
          "g", dtype=tf.float32, initializer=scale_init, trainable=True)
      b = tf.get_variable(
          "b", dtype=tf.float32, initializer=-m_init*scale_init, trainable=True)
      x_init = tf.reshape(scale_init, [1, 1, 1, output_dim]) * (
          x_init - tf.reshape(m_init, [1, 1, 1, output_dim]))
      return x_init
    else:
      v = tf.get_variable("V")
      g = tf.get_variable("g")
      b = tf.get_variable("b")
      tf.assert_variables_initialized([v, g, b])
      w = tf.reshape(g, [1, 1, 1, output_dim]) * tf.nn.l2_normalize(
          v, [0, 1, 2])
      x = tf.nn.bias_add(
          tf.nn.conv2d(input_, w, [1, d_h, d_w, 1], padding="SAME"), b)
      return x 
Example #8
Source File: arch_ops.py    From compare_gan with Apache License 2.0 5 votes vote down vote up
def weight_norm_linear(input_, output_size,
                       init=False, init_scale=1.0,
                       name="wn_linear",
                       initializer=tf.truncated_normal_initializer,
                       stddev=0.02):
  """Linear layer with Weight Normalization (Salimans, Kingma '16)."""
  with tf.variable_scope(name):
    if init:
      v = tf.get_variable("V", [int(input_.get_shape()[1]), output_size],
                          tf.float32, initializer(0, stddev), trainable=True)
      v_norm = tf.nn.l2_normalize(v.initialized_value(), [0])
      x_init = tf.matmul(input_, v_norm)
      m_init, v_init = tf.nn.moments(x_init, [0])
      scale_init = init_scale / tf.sqrt(v_init + 1e-10)
      g = tf.get_variable("g", dtype=tf.float32,
                          initializer=scale_init, trainable=True)
      b = tf.get_variable("b", dtype=tf.float32, initializer=
                          -m_init*scale_init, trainable=True)
      x_init = tf.reshape(scale_init, [1, output_size]) * (
          x_init - tf.reshape(m_init, [1, output_size]))
      return x_init
    else:
      # Note that the original implementation uses Polyak averaging.
      v = tf.get_variable("V")
      g = tf.get_variable("g")
      b = tf.get_variable("b")
      tf.assert_variables_initialized([v, g, b])
      x = tf.matmul(input_, v)
      scaler = g / tf.sqrt(tf.reduce_sum(tf.square(v), [0]))
      x = tf.reshape(scaler, [1, output_size]) * x + tf.reshape(
          b, [1, output_size])
      return x 
Example #9
Source File: ops.py    From memoryGAN with MIT License 5 votes vote down vote up
def deconv2d_wn(x, num_filters, filter_size=[3,3], stride=[1,1], pad='SAME', nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
    ''' transposed convolutional layer '''
    name = get_name('deconv2d', counters)
    xs = int_shape(x)
    if pad=='SAME':
        target_shape = [xs[0], xs[1]*stride[0], xs[2]*stride[1], num_filters]
    else:
        target_shape = [xs[0], xs[1]*stride[0] + filter_size[0]-1, xs[2]*stride[1] + filter_size[1]-1, num_filters]
    with tf.variable_scope(name):
        if init:
            # data based initialization of parameters
            V = tf.get_variable('V', filter_size+[num_filters,int(x.get_shape()[-1])], tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0,1,3])
            x_init = tf.nn.conv2d_transpose(x, V_norm, target_shape, [1]+stride+[1], padding=pad)
            m_init, v_init = tf.nn.moments(x_init, [0,1,2])
            scale_init = init_scale/tf.sqrt(v_init + 1e-8)
            g = tf.get_variable('g', dtype=tf.float32, initializer=scale_init, trainable=True)
            b = tf.get_variable('b', dtype=tf.float32, initializer=-m_init*scale_init, trainable=True)
            x_init = tf.reshape(scale_init,[1,1,1,num_filters])*(x_init-tf.reshape(m_init,[1,1,1,num_filters]))
            if nonlinearity is not None:
                x_init = nonlinearity(x_init)
            return x_init

        else:
            V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema)
            tf.assert_variables_initialized([V,g,b])

            # use weight normalization (Salimans & Kingma, 2016)
            W = tf.reshape(g,[1,1,num_filters,1])*tf.nn.l2_normalize(V,[0,1,3])

            # calculate convolutional layer output
            x = tf.nn.conv2d_transpose(x, W, target_shape, [1]+stride+[1], padding=pad)
            x = tf.nn.bias_add(x, b)

            # apply nonlinearity
            if nonlinearity is not None:
                x = nonlinearity(x)
            return x 
Example #10
Source File: ops.py    From memoryGAN with MIT License 5 votes vote down vote up
def conv2d_wn(x, num_filters, filter_size=[3,3], stride=[1,1], pad='SAME', nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
    ''' convolutional layer '''
    name = get_name('conv2d', counters)
    with tf.variable_scope(name):
        if init:
            # data based initialization of parameters
            V = tf.get_variable('V', filter_size+[int(x.get_shape()[-1]),num_filters], tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0,1,2])
            x_init = tf.nn.conv2d(x, V_norm, [1]+stride+[1], pad)
            m_init, v_init = tf.nn.moments(x_init, [0,1,2])
            scale_init = init_scale/tf.sqrt(v_init + 1e-8)
            g = tf.get_variable('g', dtype=tf.float32, initializer=scale_init, trainable=True)
            b = tf.get_variable('b', dtype=tf.float32, initializer=-m_init*scale_init, trainable=True)
            x_init = tf.reshape(scale_init,[1,1,1,num_filters])*(x_init-tf.reshape(m_init,[1,1,1,num_filters]))
            if nonlinearity is not None:
                x_init = nonlinearity(x_init)
            return x_init

        else:
            V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema)
            tf.assert_variables_initialized([V,g,b])

            # use weight normalization (Salimans & Kingma, 2016)
            W = tf.reshape(g,[1,1,1,num_filters])*tf.nn.l2_normalize(V,[0,1,2])

            # calculate convolutional layer output
            x = tf.nn.bias_add(tf.nn.conv2d(x, W, [1]+stride+[1], pad), b)

            # apply nonlinearity
            if nonlinearity is not None:
                x = nonlinearity(x)
            return x 
Example #11
Source File: ops.py    From memoryGAN with MIT License 5 votes vote down vote up
def fc_wn(x, num_units, nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
    ''' fully connected layer '''
    name = get_name('dense', counters)
    with tf.variable_scope(name):
        if init:
            # data based initialization of parameters
            V = tf.get_variable('V', [int(x.get_shape()[1]),num_units], tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0])
            x_init = tf.matmul(x, V_norm)
            m_init, v_init = tf.nn.moments(x_init, [0])
            scale_init = init_scale/tf.sqrt(v_init + 1e-10)
            g = tf.get_variable('g', dtype=tf.float32, initializer=scale_init, trainable=True)
            b = tf.get_variable('b', dtype=tf.float32, initializer=-m_init*scale_init, trainable=True)
            x_init = tf.reshape(scale_init,[1,num_units])*(x_init-tf.reshape(m_init,[1,num_units]))
            if nonlinearity is not None:
                x_init = nonlinearity(x_init)
            return x_init

        else:
            V,g,b = get_vars_maybe_avg(['V','g','b'], ema)
            tf.assert_variables_initialized([V,g,b])

            # use weight normalization (Salimans & Kingma, 2016)
            x = tf.matmul(x, V)
            scaler = g/tf.sqrt(tf.reduce_sum(tf.square(V),[0]))
            x = tf.reshape(scaler,[1,num_units])*x + tf.reshape(b,[1,num_units])

            # apply nonlinearity
            if nonlinearity is not None:
                x = nonlinearity(x)
            return x 
Example #12
Source File: nn.py    From pixelsnail-public with MIT License 5 votes vote down vote up
def conv2d(x, num_filters, filter_size=[3, 3], stride=[1, 1], pad='SAME', nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
    ''' convolutional layer '''
    name = get_name('conv2d', counters)
    with tf.variable_scope(name):
        if init:
            # data based initialization of parameters
            V = tf.get_variable('V', filter_size + [int(x.get_shape()[-1]), num_filters],
                                tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0, 1, 2])
            x_init = tf.nn.conv2d(x, V_norm, [1] + stride + [1], pad)
            m_init, v_init = tf.nn.moments(x_init, [0, 1, 2])
            scale_init = init_scale / tf.sqrt(v_init + 1e-8)
            g = tf.get_variable('g', dtype=tf.float32,
                                initializer=scale_init, trainable=True)
            b = tf.get_variable('b', dtype=tf.float32,
                                initializer=-m_init * scale_init, trainable=True)
            x_init = tf.reshape(scale_init, [
                                1, 1, 1, num_filters]) * (x_init - tf.reshape(m_init, [1, 1, 1, num_filters]))
            if nonlinearity is not None:
                x_init = nonlinearity(x_init)
            return x_init

        else:
            V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema)
            # tf.assert_variables_initialized([V, g, b])

            # use weight normalization (Salimans & Kingma, 2016)
            W = tf.reshape(g, [1, 1, 1, num_filters]) * \
                tf.nn.l2_normalize(V, [0, 1, 2])

            # calculate convolutional layer output
            x = tf.nn.bias_add(tf.nn.conv2d(x, W, [1] + stride + [1], pad), b)

            # apply nonlinearity
            if nonlinearity is not None:
                x = nonlinearity(x)
            return x 
Example #13
Source File: nn.py    From pixelsnail-public with MIT License 5 votes vote down vote up
def dense(x, num_units, nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
    ''' fully connected layer '''
    name = get_name('dense', counters)
    with tf.variable_scope(name):
        if init:
            # data based initialization of parameters
            V = tf.get_variable('V', [int(x.get_shape()[
                                1]), num_units], tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0])
            x_init = tf.matmul(x, V_norm)
            m_init, v_init = tf.nn.moments(x_init, [0])
            scale_init = init_scale / tf.sqrt(v_init + 1e-10)
            g = tf.get_variable('g', dtype=tf.float32,
                                initializer=scale_init, trainable=True)
            b = tf.get_variable('b', dtype=tf.float32,
                                initializer=-m_init * scale_init, trainable=True)
            x_init = tf.reshape(
                scale_init, [1, num_units]) * (x_init - tf.reshape(m_init, [1, num_units]))
            if nonlinearity is not None:
                x_init = nonlinearity(x_init)
            return x_init

        else:
            V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema)
            # tf.assert_variables_initialized([V, g, b])

            # use weight normalization (Salimans & Kingma, 2016)
            x = tf.matmul(x, V)
            scaler = g / tf.sqrt(tf.reduce_sum(tf.square(V), [0]))
            x = tf.reshape(scaler, [1, num_units]) * \
                x + tf.reshape(b, [1, num_units])

            # apply nonlinearity
            if nonlinearity is not None:
                x = nonlinearity(x)
            return x 
Example #14
Source File: ops.py    From TheNumericsOfGANs with MIT License 5 votes vote down vote up
def fully_connected(x, num_outputs, activation_fn=None,
                    init_scale=1., is_init=False, ema=None, name=None):
    ''' fully connected layer '''
    with tf.variable_scope(name, default_name='Full'):
        if is_init:
            # data based initialization of parameters
            V = tf.get_variable('V', [int(x.get_shape()[1]), num_outputs], tf.float32,
                                tf.random_normal_initializer(0, 0.05), trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0])
            x_init = tf.matmul(x, V_norm)
            m_init, v_init = tf.nn.moments(x_init, [0])
            scale_init = init_scale / tf.sqrt(v_init + 1e-10)
            g = tf.get_variable('g', dtype=tf.float32,
                                initializer=scale_init, trainable=True)
            b = tf.get_variable('b', dtype=tf.float32,
                                initializer=-m_init * scale_init, trainable=True)
            x_init = scale_init * (x_init - m_init)
            if activation_fn is not None:
                x_init = activation_fn(x_init)
            return x_init

        else:
            V = tf.get_variable('V')
            g = tf.get_variable('g')
            b = tf.get_variable('b')

            tf.assert_variables_initialized([V, g, b])

            # use weight normalization (Salimans & Kingma, 2016)
            x = tf.matmul(x, V)
            scaler = g / tf.sqrt(tf.reduce_sum(tf.square(V), [0]))
            x = scaler * x + b

            # apply activation_fn
            if activation_fn is not None:
                x = activation_fn(x)
            return x 
Example #15
Source File: nn.py    From weightnorm with MIT License 5 votes vote down vote up
def dense(x, num_units, nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
    ''' fully connected layer '''
    name = get_name('dense', counters)
    with tf.variable_scope(name):
        if init:
            # data based initialization of parameters
            V = tf.get_variable('V', [int(x.get_shape()[1]),num_units], tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0])
            x_init = tf.matmul(x, V_norm)
            m_init, v_init = tf.nn.moments(x_init, [0])
            scale_init = init_scale/tf.sqrt(v_init + 1e-10)
            g = tf.get_variable('g', dtype=tf.float32, initializer=scale_init, trainable=True)
            b = tf.get_variable('b', dtype=tf.float32, initializer=-m_init*scale_init, trainable=True)
            x_init = tf.reshape(scale_init,[1,num_units])*(x_init-tf.reshape(m_init,[1,num_units]))
            if nonlinearity is not None:
                x_init = nonlinearity(x_init)
            return x_init

        else:
            V,g,b = get_vars_maybe_avg(['V','g','b'], ema)
            tf.assert_variables_initialized([V,g,b])

            # use weight normalization (Salimans & Kingma, 2016)
            x = tf.matmul(x, V)
            scaler = g/tf.sqrt(tf.reduce_sum(tf.square(V),[0]))
            x = tf.reshape(scaler,[1,num_units])*x + tf.reshape(b,[1,num_units])

            # apply nonlinearity
            if nonlinearity is not None:
                x = nonlinearity(x)
            return x 
Example #16
Source File: nn.py    From DualLearning with MIT License 5 votes vote down vote up
def deconv2d(x, num_filters, filter_size=[3,3], stride=[1,1], pad='SAME', nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
    ''' transposed convolutional layer '''
    name = get_name('deconv2d', counters)
    xs = int_shape(x)
    if pad=='SAME':
        target_shape = [xs[0], xs[1]*stride[0], xs[2]*stride[1], num_filters]
    else:
        target_shape = [xs[0], xs[1]*stride[0] + filter_size[0]-1, xs[2]*stride[1] + filter_size[1]-1, num_filters]
    with tf.variable_scope(name):
        if init:
            # data based initialization of parameters
            V = tf.get_variable('V', filter_size+[num_filters,int(x.get_shape()[-1])], tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0,1,3])
            x_init = tf.nn.conv2d_transpose(x, V_norm, target_shape, [1]+stride+[1], padding=pad)
            m_init, v_init = tf.nn.moments(x_init, [0,1,2])
            scale_init = init_scale/tf.sqrt(v_init + 1e-8)
            g = tf.get_variable('g', dtype=tf.float32, initializer=scale_init, trainable=True)
            b = tf.get_variable('b', dtype=tf.float32, initializer=-m_init*scale_init, trainable=True)
            x_init = tf.reshape(scale_init,[1,1,1,num_filters])*(x_init-tf.reshape(m_init,[1,1,1,num_filters]))
            if nonlinearity is not None:
                x_init = nonlinearity(x_init)
            return x_init

        else:
            V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema)
            # tf.assert_variables_initialized([V,g,b])

            # use weight normalization (Salimans & Kingma, 2016)
            W = tf.reshape(g,[1,1,num_filters,1])*tf.nn.l2_normalize(V,[0,1,3])

            # calculate convolutional layer output
            x = tf.nn.conv2d_transpose(x, W, target_shape, [1]+stride+[1], padding=pad)
            x = tf.nn.bias_add(x, b)

            # apply nonlinearity
            if nonlinearity is not None:
                x = nonlinearity(x)
            return x 
Example #17
Source File: session_manager_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testPrepareSessionSucceedsWithInitFeedDict(self):
    with tf.Graph().as_default():
      p = tf.placeholder(tf.float32, shape=(3,))
      v = tf.Variable(p, name="v")
      sm = tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
      sess = sm.prepare_session("",
                                init_op=tf.global_variables_initializer(),
                                init_feed_dict={p: [1.0, 2.0, 3.0]})
      self.assertAllClose([1.0, 2.0, 3.0], sess.run(v)) 
Example #18
Source File: nn.py    From weightnorm with MIT License 5 votes vote down vote up
def conv2d(x, num_filters, filter_size=[3,3], stride=[1,1], pad='SAME', nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
    ''' convolutional layer '''
    name = get_name('conv2d', counters)
    with tf.variable_scope(name):
        if init:
            # data based initialization of parameters
            V = tf.get_variable('V', filter_size+[int(x.get_shape()[-1]),num_filters], tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0,1,2])
            x_init = tf.nn.conv2d(x, V_norm, [1]+stride+[1], pad)
            m_init, v_init = tf.nn.moments(x_init, [0,1,2])
            scale_init = init_scale/tf.sqrt(v_init + 1e-8)
            g = tf.get_variable('g', dtype=tf.float32, initializer=scale_init, trainable=True)
            b = tf.get_variable('b', dtype=tf.float32, initializer=-m_init*scale_init, trainable=True)
            x_init = tf.reshape(scale_init,[1,1,1,num_filters])*(x_init-tf.reshape(m_init,[1,1,1,num_filters]))
            if nonlinearity is not None:
                x_init = nonlinearity(x_init)
            return x_init

        else:
            V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema)
            tf.assert_variables_initialized([V,g,b])

            # use weight normalization (Salimans & Kingma, 2016)
            W = tf.reshape(g,[1,1,1,num_filters])*tf.nn.l2_normalize(V,[0,1,2])

            # calculate convolutional layer output
            x = tf.nn.bias_add(tf.nn.conv2d(x, W, [1]+stride+[1], pad), b)

            # apply nonlinearity
            if nonlinearity is not None:
                x = nonlinearity(x)
            return x 
Example #19
Source File: nn.py    From weightnorm with MIT License 5 votes vote down vote up
def deconv2d(x, num_filters, filter_size=[3,3], stride=[1,1], pad='SAME', nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
    ''' transposed convolutional layer '''
    name = get_name('deconv2d', counters)
    xs = int_shape(x)
    if pad=='SAME':
        target_shape = [xs[0], xs[1]*stride[0], xs[2]*stride[1], num_filters]
    else:
        target_shape = [xs[0], xs[1]*stride[0] + filter_size[0]-1, xs[2]*stride[1] + filter_size[1]-1, num_filters]
    with tf.variable_scope(name):
        if init:
            # data based initialization of parameters
            V = tf.get_variable('V', filter_size+[num_filters,int(x.get_shape()[-1])], tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0,1,3])
            x_init = tf.nn.conv2d_transpose(x, V_norm, target_shape, [1]+stride+[1], padding=pad)
            m_init, v_init = tf.nn.moments(x_init, [0,1,2])
            scale_init = init_scale/tf.sqrt(v_init + 1e-8)
            g = tf.get_variable('g', dtype=tf.float32, initializer=scale_init, trainable=True)
            b = tf.get_variable('b', dtype=tf.float32, initializer=-m_init*scale_init, trainable=True)
            x_init = tf.reshape(scale_init,[1,1,1,num_filters])*(x_init-tf.reshape(m_init,[1,1,1,num_filters]))
            if nonlinearity is not None:
                x_init = nonlinearity(x_init)
            return x_init

        else:
            V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema)
            tf.assert_variables_initialized([V,g,b])

            # use weight normalization (Salimans & Kingma, 2016)
            W = tf.reshape(g,[1,1,num_filters,1])*tf.nn.l2_normalize(V,[0,1,3])

            # calculate convolutional layer output
            x = tf.nn.conv2d_transpose(x, W, target_shape, [1]+stride+[1], padding=pad)
            x = tf.nn.bias_add(x, b)

            # apply nonlinearity
            if nonlinearity is not None:
                x = nonlinearity(x)
            return x 
Example #20
Source File: variables_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testNoVars(self):
    with tf.Graph().as_default():
      self.assertEqual(None, tf.assert_variables_initialized()) 
Example #21
Source File: variables_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testVariables(self):
    with tf.Graph().as_default(), self.test_session() as sess:
      v = tf.Variable([1, 2])
      w = tf.Variable([3, 4])
      _ = v, w
      inited = tf.assert_variables_initialized()
      with self.assertRaisesOpError("Attempting to use uninitialized value"):
        sess.run(inited)
      tf.global_variables_initializer().run()
      sess.run(inited) 
Example #22
Source File: variables_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testVariableList(self):
    with tf.Graph().as_default(), self.test_session() as sess:
      v = tf.Variable([1, 2])
      w = tf.Variable([3, 4])
      inited = tf.assert_variables_initialized([v])
      with self.assertRaisesOpError("Attempting to use uninitialized value"):
        inited.op.run()
      sess.run(w.initializer)
      with self.assertRaisesOpError("Attempting to use uninitialized value"):
        inited.op.run()
      v.initializer.run()
      inited.op.run() 
Example #23
Source File: session_manager_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testPrepareSessionSucceeds(self):
    with tf.Graph().as_default():
      v = tf.Variable([1.0, 2.0, 3.0], name="v")
      sm = tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
      sess = sm.prepare_session("", init_op=tf.global_variables_initializer())
      self.assertAllClose([1.0, 2.0, 3.0], sess.run(v)) 
Example #24
Source File: nn.py    From DualLearning with MIT License 5 votes vote down vote up
def conv2d(x, num_filters, filter_size=[3,3], stride=[1,1], pad='SAME', nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
    ''' convolutional layer '''
    name = get_name('conv2d', counters)
    with tf.variable_scope(name):
        if init:
            # data based initialization of parameters
            V = tf.get_variable('V', filter_size+[int(x.get_shape()[-1]),num_filters], tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0,1,2])
            x_init = tf.nn.conv2d(x, V_norm, [1]+stride+[1], pad)
            m_init, v_init = tf.nn.moments(x_init, [0,1,2])
            scale_init = init_scale/tf.sqrt(v_init + 1e-8)
            g = tf.get_variable('g', dtype=tf.float32, initializer=scale_init, trainable=True)
            b = tf.get_variable('b', dtype=tf.float32, initializer=-m_init*scale_init, trainable=True)
            x_init = tf.reshape(scale_init,[1,1,1,num_filters])*(x_init-tf.reshape(m_init,[1,1,1,num_filters]))
            if nonlinearity is not None:
                x_init = nonlinearity(x_init)
            return x_init

        else:
            V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema)
            # tf.assert_variables_initialized([V,g,b])

            # use weight normalization (Salimans & Kingma, 2016)
            W = tf.reshape(g,[1,1,1,num_filters])*tf.nn.l2_normalize(V,[0,1,2])

            # calculate convolutional layer output
            x = tf.nn.bias_add(tf.nn.conv2d(x, W, [1]+stride+[1], pad), b)

            # apply nonlinearity
            if nonlinearity is not None:
                x = nonlinearity(x)
            return x 
Example #25
Source File: session_manager_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testRecoverSession(self):
    # Create a checkpoint.
    checkpoint_dir = os.path.join(self.get_temp_dir(), "recover_session")
    try:
      gfile.DeleteRecursively(checkpoint_dir)
    except errors.OpError:
      pass                      # Ignore
    gfile.MakeDirs(checkpoint_dir)

    with tf.Graph().as_default():
      v = tf.Variable(1, name="v")
      sm = tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
      saver = tf.train.Saver({"v": v})
      sess, initialized = sm.recover_session("", saver=saver,
                                             checkpoint_dir=checkpoint_dir)
      self.assertFalse(initialized)
      sess.run(v.initializer)
      self.assertEquals(1, sess.run(v))
      saver.save(sess, os.path.join(checkpoint_dir,
                                    "recover_session_checkpoint"))
    # Create a new Graph and SessionManager and recover.
    with tf.Graph().as_default():
      v = tf.Variable(2, name="v")
      with self.test_session():
        self.assertEqual(False, tf.is_variable_initialized(v).eval())
      sm2 = tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
      saver = tf.train.Saver({"v": v})
      sess, initialized = sm2.recover_session("", saver=saver,
                                              checkpoint_dir=checkpoint_dir)
      self.assertTrue(initialized)
      self.assertEqual(
          True, tf.is_variable_initialized(
              sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
      self.assertEquals(1, sess.run(v)) 
Example #26
Source File: session_manager_test.py    From deep_image_model with Apache License 2.0 5 votes vote down vote up
def testWaitForSessionReturnsNoneAfterTimeout(self):
    with tf.Graph().as_default():
      tf.Variable(1, name="v")
      sm = tf.train.SessionManager(ready_op=tf.assert_variables_initialized(),
                                   recovery_wait_secs=1)

      # Set max_wait_secs to allow us to try a few times.
      with self.assertRaises(errors.DeadlineExceededError):
        sm.wait_for_session(master="", max_wait_secs=3) 
Example #27
Source File: nn.py    From DualLearning with MIT License 5 votes vote down vote up
def dense(x, num_units, nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
    ''' fully connected layer '''
    name = get_name('dense', counters)
    with tf.variable_scope(name):
        if init:
            # data based initialization of parameters
            V = tf.get_variable('V', [int(x.get_shape()[1]),num_units], tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
            V_norm = tf.nn.l2_normalize(V.initialized_value(), [0])
            x_init = tf.matmul(x, V_norm)
            m_init, v_init = tf.nn.moments(x_init, [0])
            scale_init = init_scale/tf.sqrt(v_init + 1e-10)
            g = tf.get_variable('g', dtype=tf.float32, initializer=scale_init, trainable=True)
            b = tf.get_variable('b', dtype=tf.float32, initializer=-m_init*scale_init, trainable=True)
            x_init = tf.reshape(scale_init,[1,num_units])*(x_init-tf.reshape(m_init,[1,num_units]))
            if nonlinearity is not None:
                x_init = nonlinearity(x_init)
            return x_init

        else:
            V,g,b = get_vars_maybe_avg(['V','g','b'], ema)
            # According to the comments at
            # https: // github.com / openai / pixel - cnn / issues / 17,
            # I simply comment the following line
            # tf.assert_variables_initialized([V,g,b])

            # use weight normalization (Salimans & Kingma, 2016)
            x = tf.matmul(x, V)
            scaler = g/tf.sqrt(tf.reduce_sum(tf.square(V),[0]))
            x = tf.reshape(scaler,[1,num_units])*x + tf.reshape(b,[1,num_units])

            # apply nonlinearity
            if nonlinearity is not None:
                x = nonlinearity(x)
            return x 
Example #28
Source File: session_manager_test.py    From deep_image_model with Apache License 2.0 4 votes vote down vote up
def testPrepareSessionFails(self):
    checkpoint_dir = os.path.join(self.get_temp_dir(), "prepare_session")
    checkpoint_dir2 = os.path.join(self.get_temp_dir(), "prepare_session2")
    try:
      gfile.DeleteRecursively(checkpoint_dir)
      gfile.DeleteRecursively(checkpoint_dir2)
    except errors.OpError:
      pass                      # Ignore
    gfile.MakeDirs(checkpoint_dir)

    with tf.Graph().as_default():
      v = tf.Variable([1.0, 2.0, 3.0], name="v")
      sm = tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
      saver = tf.train.Saver({"v": v})
      sess = sm.prepare_session("", init_op=tf.global_variables_initializer(),
                                saver=saver, checkpoint_dir=checkpoint_dir)
      self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
      checkpoint_filename = os.path.join(checkpoint_dir,
                                         "prepare_session_checkpoint")
      saver.save(sess, checkpoint_filename)
    # Create a new Graph and SessionManager and recover.
    with tf.Graph().as_default():
      # Renames the checkpoint directory.
      os.rename(checkpoint_dir, checkpoint_dir2)
      gfile.MakeDirs(checkpoint_dir)
      v = tf.Variable([6.0, 7.0, 8.0], name="v")
      with self.test_session():
        self.assertEqual(False, tf.is_variable_initialized(v).eval())
      tf.train.SessionManager(ready_op=tf.assert_variables_initialized())
      saver = tf.train.Saver({"v": v})
      # This should fail as there's no checkpoint within 2 seconds.
      with self.assertRaisesRegexp(
          RuntimeError, "no init_op or init_fn or local_init_op was given"):
        sess = sm.prepare_session("", init_op=None, saver=saver,
                                  checkpoint_dir=checkpoint_dir,
                                  wait_for_checkpoint=True, max_wait_secs=2)
      # Rename the checkpoint directory back.
      gfile.DeleteRecursively(checkpoint_dir)
      os.rename(checkpoint_dir2, checkpoint_dir)
      # This should succeed as there's checkpoint.
      sess = sm.prepare_session("", init_op=None, saver=saver,
                                checkpoint_dir=checkpoint_dir,
                                wait_for_checkpoint=True, max_wait_secs=2)
      self.assertEqual(
          True, tf.is_variable_initialized(
              sess.graph.get_tensor_by_name("v:0")).eval(session=sess)) 
Example #29
Source File: ops.py    From TheNumericsOfGANs with MIT License 4 votes vote down vote up
def conv2d_transpose(x, num_outputs, kernel_size=[3, 3], stride=[1, 1],
                     pad='SAME', activation_fn=None,
                     init_scale=1., is_init=False, ema=None, name=None):
    ''' transposed convolutional layer '''
    xs = int_shape(x)
    norm_axes = [0, 1, 2]
    if pad == 'SAME':
        target_shape = [xs[0], xs[1] * stride[0],
                        xs[2] * stride[1], num_outputs]
    else:
        target_shape = [xs[0], xs[1] * stride[0] + kernel_size[0] -
                        1, xs[2] * stride[1] + kernel_size[1] - 1, num_outputs]
    with tf.variable_scope(name, default_name='Conv2DTrp'):
        if is_init:
            # data based initialization of parameters
            v = tf.get_variable('V', kernel_size + [num_outputs, int(x.get_shape()[-1])],
                                tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
            v_norm = tf.nn.l2_normalize(v.initialized_value(), [0, 1, 3])
            x_init = tf.nn.conv2d_transpose(x, v_norm, target_shape, [1] + stride + [1],
                                            padding=pad)
            m_init, v_init = tf.nn.moments(x_init, norm_axes)
            scale_init = init_scale / tf.sqrt(v_init + 1e-8)
            g = tf.get_variable('g', dtype=tf.float32,
                                initializer=scale_init, trainable=True)
            b = tf.get_variable('b', dtype=tf.float32,
                                initializer=-m_init * scale_init, trainable=True)
            x_init = scale_init * (x_init - m_init)
            if activation_fn is not None:
                x_init = activation_fn(x_init)
            return x_init

        else:
            v = tf.get_variable('V')
            g = tf.get_variable('g')
            b = tf.get_variable('b')

            tf.assert_variables_initialized([v, g, b])

            # use weight normalization (salimans & kingma, 2016)
            w = tf.nn.l2_normalize(v, [0, 1, 3])

            # calculate convolutional layer output
            x = g * tf.nn.conv2d_transpose(x, w, target_shape, [1] + stride + [1], padding=pad)
            x = x + b

            # apply activation_fn
            if activation_fn is not None:
                x = activation_fn(x)
            return x 
Example #30
Source File: pg_reinforce.py    From Codes-for-RL-PER with MIT License 4 votes vote down vote up
def __init__(self, session,
                     optimizer,
                     policy_network,
                     state_dim,
                     num_actions,
                     init_exp=0.5,         # initial exploration prob
                     final_exp=0.0,        # final exploration prob
                     anneal_steps=10000,   # N steps for annealing exploration
                     discount_factor=0.99, # discount future rewards
                     reg_param=0.001,      # regularization constants
                     max_gradient=5,       # max gradient norms
                     summary_writer=None,
                     summary_every=100):

    # tensorflow machinery
    self.session        = session
    self.optimizer      = optimizer
    self.summary_writer = summary_writer

    # model components
    self.policy_network = policy_network

    # training parameters
    self.state_dim       = state_dim
    self.num_actions     = num_actions
    self.discount_factor = discount_factor
    self.max_gradient    = max_gradient
    self.reg_param       = reg_param

    # exploration parameters
    self.exploration  = init_exp
    self.init_exp     = init_exp
    self.final_exp    = final_exp
    self.anneal_steps = anneal_steps

    # counters
    self.train_iteration = 0

    # rollout buffer
    self.state_buffer  = []
    self.reward_buffer = []
    self.action_buffer = []

    # record reward history for normalization
    self.all_rewards = []
    self.max_reward_length = 1000000

    # create and initialize variables
    self.create_variables()
    var_lists = tf.get_collection(tf.GraphKeys.VARIABLES)
    self.session.run(tf.initialize_variables(var_lists))

    # make sure all variables are initialized
    self.session.run(tf.assert_variables_initialized())

    if self.summary_writer is not None:
      # graph was not available when journalist was created
      self.summary_writer.add_graph(self.session.graph)
      self.summary_every = summary_every