Python six.moves.zip() Examples

The following are 30 code examples of six.moves.zip(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module six.moves , or try the search function .
Example #1
Source File: misc.py    From mmdetection with Apache License 2.0 8 votes vote down vote up
def multi_apply(func, *args, **kwargs):
    """Apply function to a list of arguments.

    Note:
        This function applies the ``func`` to multiple inputs and
            map the multiple outputs of the ``func`` into different
            list. Each list contains the same type of outputs corresponding
            to different inputs.

    Args:
        func (Function): A function that will be applied to a list of
            arguments

    Returns:
        tuple(list): A tuple containing multiple list, each list contains
            a kind of returned results by the function
    """
    pfunc = partial(func, **kwargs) if kwargs else func
    map_results = map(pfunc, *args)
    return tuple(map(list, zip(*map_results))) 
Example #2
Source File: linthompsamp.py    From striatum with BSD 2-Clause "Simplified" License 7 votes vote down vote up
def _linthompsamp_score(self, context):
        """Thompson Sampling"""
        action_ids = list(six.viewkeys(context))
        context_array = np.asarray([context[action_id]
                                    for action_id in action_ids])
        model = self._model_storage.get_model()
        B = model['B']  # pylint: disable=invalid-name
        mu_hat = model['mu_hat']
        v = self.R * np.sqrt(24 / self.epsilon
                             * self.context_dimension
                             * np.log(1 / self.delta))
        mu_tilde = self.random_state.multivariate_normal(
            mu_hat.flat, v**2 * np.linalg.inv(B))[..., np.newaxis]
        estimated_reward_array = context_array.dot(mu_hat)
        score_array = context_array.dot(mu_tilde)

        estimated_reward_dict = {}
        uncertainty_dict = {}
        score_dict = {}
        for action_id, estimated_reward, score in zip(
                action_ids, estimated_reward_array, score_array):
            estimated_reward_dict[action_id] = float(estimated_reward)
            score_dict[action_id] = float(score)
            uncertainty_dict[action_id] = float(score - estimated_reward)
        return estimated_reward_dict, uncertainty_dict, score_dict 
Example #3
Source File: layers_test.py    From lingvo with Apache License 2.0 6 votes vote down vote up
def testBasicGrad(self):
    p = self._testParams(dtype=tf.float64)
    with self.session(use_gpu=False, graph=tf.Graph()) as sess:
      lm = p.Instantiate()
      inputs, paddings, targets = self._testInputs(dtype=tf.float64)
      xent_output, _ = lm.FPropDefaultTheta(
          inputs=inputs,
          paddings=paddings,
          labels=py_utils.NestedMap(
              class_weights=1 - paddings, class_ids=targets))

      lm_vars = lm.vars.Flatten()
      # Now add the backward graph.
      grads = tf.gradients(xent_output.avg_xent, lm_vars)

      self.evaluate(tf.global_variables_initializer())
      self.assertEqual(len(lm_vars), len(grads))
      for x, grad_x in zip(lm_vars, grads):
        grad_symbolic = self.evaluate(grad_x)
        grad_numeric = test_utils.ComputeNumericGradient(
            sess, xent_output.avg_xent, x, delta=1e-6)
        self.assertAllClose(grad_symbolic, grad_numeric, atol=0.005) 
Example #4
Source File: plain_agent.py    From streetlearn with Apache License 2.0 6 votes vote down vote up
def unroll(self, actions, env_outputs, core_state):
    """Manual implementation of the network unroll."""
    _, _, done, _ = env_outputs

    torso_outputs = snt.BatchApply(self._torso)((actions, env_outputs))

    # Note, in this implementation we can't use CuDNN RNN to speed things up due
    # to the state reset. This can be XLA-compiled (LSTMBlockCell needs to be
    # changed to implement snt.LSTMCell).
    initial_core_state = self._core.zero_state(tf.shape(actions)[1], tf.float32)
    core_output_list = []
    for input_, d in zip(tf.unstack(torso_outputs), tf.unstack(done)):
      # If the episode ended, the core state should be reset before the next.
      core_state = nest.map_structure(
          functools.partial(tf.where, d), initial_core_state, core_state)
      core_output, core_state = self._core(input_, core_state)
      core_output_list.append(core_output)

    return snt.BatchApply(self._head)(tf.stack(core_output_list)), core_state 
Example #5
Source File: builder_layers_test.py    From lingvo with Apache License 2.0 6 votes vote down vote up
def testLinearLayer(self):
    g = tf.Graph()
    with g.as_default():
      tf.random.set_seed(24332)
      p = layers.LinearLayer.Params().Set(
          name='test', input_dims=10, output_dims=5)
      l = p.Instantiate()
      xs = []
      ys = []
      for shape in ([2, 10], [2, 3, 10], [2, 3, 5, 10], [2, 3, 5, 7, 10]):
        x = tf.random.normal(shape=shape)
        y = l.FPropDefaultTheta(x)
        xs += [x]
        ys += [y]

    with self.session(graph=g):
      self.evaluate(tf.global_variables_initializer())
      xs_val, ys_val, w_val = self.evaluate([xs, ys, l.vars])

    self.assertEqual(w_val.w.shape, (10, 5))
    for (xv, yv) in zip(xs_val, ys_val):
      self.assertAllClose(np.matmul(xv, w_val.w), yv) 
Example #6
Source File: arithmetic.py    From mathematics_dataset with Apache License 2.0 6 votes vote down vote up
def split(self, args):
    """Splits the entropy and op counts up."""
    non_integer_count = sum(not arg.is_Integer for arg in args)
    assert non_integer_count <= self.count - 1
    count_split = combinatorics.uniform_non_negative_integers_with_sum(
        len(args), (self.count - 1) - non_integer_count)
    for i, arg in enumerate(args):
      if not arg.is_Integer:
        count_split[i] += 1
    if all(count == 0 for count in count_split):
      assert self.entropy == 0
      entropies = np.zeros(len(count_split))
    else:
      entropies = (
          np.random.dirichlet(np.maximum(1e-9, count_split)) * self.entropy)
    return [_SampleArgs(op_count, entropy)
            for op_count, entropy in zip(count_split, entropies)] 
Example #7
Source File: export_kitti_detection.py    From lingvo with Apache License 2.0 6 votes vote down vote up
def ExportKITTIDetection(out_dir, source_id, location_cam, dimension_cam,
                         rotation_cam, bboxes_2d, scores, class_name, is_first):
  """Write detections to a text file in KITTI format."""
  tf.logging.info("Exporting %s for %s" % (class_name, source_id))
  fname = out_dir + "/" + source_id + ".txt"
  with tf.io.gfile.GFile(fname, "a") as fid:
    # Ensure we always create a file even when there's no detection.
    # TODO(shlens): Test whether this is actually necessary on the KITTI
    # eval server.
    if is_first:
      fid.write("")
    for location, dimension, ry, bbox_2d, score in zip(
        location_cam, dimension_cam, rotation_cam, bboxes_2d, scores):
      if score < FLAGS.score_threshold:
        continue
      # class_name, truncated(ignore), alpha(ignore), bbox2D x 4
      part1 = [class_name, -1, -1, -10] + list(bbox_2d)
      # dimesion x 3, location x 3, rotation_y x 1, score x 1
      fill = tuple(part1 + list(dimension) + list(location) + [ry] + [score])
      kitti_format_string = ("%s %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf "
                             "%lf %lf %lf %lf")
      kitti_line = kitti_format_string % fill
      fid.write(kitti_line + "\n") 
Example #8
Source File: builder_lib.py    From lingvo with Apache License 2.0 6 votes vote down vote up
def _ParMap(self, name, key_to_sub):
    """Perform parallel layers and create a NestedMap from the outputs.

    Parallel branches on an input `NestedMap`. Each branch should expect the
    same `NestedMap` as input; each branch's output will be mapped to the
    specified key in key_to_sub.

    Args:
      name: String layer name.
      key_to_sub: Dictionary mapping keys to sub params. Each sub should expect
        a NestedMap input.

    Returns:
      Params for this layer.
    """
    sorted_keys = sorted(key_to_sub.keys())
    sorted_subs = [key_to_sub[k] for k in sorted_keys]

    def _MakeNestedMap(*vals):
      return py_utils.NestedMap(dict(zip(sorted_keys, vals)))

    return self._ApplyFnMulti(name, _MakeNestedMap, *sorted_subs) 
Example #9
Source File: probability.py    From mathematics_dataset with Apache License 2.0 6 votes vote down vote up
def _level_set_event(values, length, verb):
  """Generates `LevelSetEvent`; see _generate_sequence_event."""
  counts = combinatorics.uniform_non_negative_integers_with_sum(
      len(values), length)
  counts_dict = dict(list(zip(values, counts)))
  event = probability.CountLevelSetEvent(counts_dict)

  shuffled_values = list(values)
  random.shuffle(shuffled_values)

  counts_and_values = [
      '{} {}'.format(counts_dict[value], value)
      for value in shuffled_values
      if counts_dict[value] > 0
  ]
  counts_and_values = _word_series(counts_and_values)
  template = random.choice([
      '{verbing} {counts_and_values}',
  ])
  verbing = _GERUNDS[verb]
  event_description = template.format(
      counts_and_values=counts_and_values, verbing=verbing)
  return event, event_description 
Example #10
Source File: point_detector.py    From lingvo with Apache License 2.0 6 votes vote down vote up
def _Placeholders(self):
    """Return a NestedMap of placeholders to fill in for inference.

    Runs the configured input pipeline to generate the expected shapes and types
    of the inputs.

    Returns:
      A NestedMap of placeholders matching the input structure of
       the inference model.
    """
    p = self.params
    with tf.Graph().as_default():
      inputs = self.params.input.Instantiate()

    # Turn those inputs into placeholders.
    placeholders = []
    for input_shape, dtype in zip(inputs.Shape().Flatten(),
                                  inputs.DType().Flatten()):
      batched_input_shape = [p.inference_batch_size] + input_shape.as_list()
      placeholders.append(tf.placeholder(dtype, batched_input_shape))

    result = inputs.DType().Pack(placeholders)
    return result 
Example #11
Source File: layers_test.py    From lingvo with Apache License 2.0 6 votes vote down vote up
def _TransformerMultiSourceInputs(self, depth=3, dtype=tf.float32):
    np.random.seed(NUMPY_RANDOM_SEED)
    src_names = ['en1', 'en2', 'de']
    slens = [11, 10, 9]
    sbatch = 3
    tlen = 5
    source_vecs = tf.constant(
        np.random.uniform(size=(tlen, sbatch*2, depth)), dtype)
    source_padding = tf.constant(np.zeros([tlen, sbatch*2, 1]), dtype)
    aux_source_vecs = py_utils.NestedMap()
    aux_source_paddings = py_utils.NestedMap()
    for slen, sname in zip(slens, src_names):
      aux_source_vecs[sname] = tf.constant(
          np.random.uniform(size=[slen, sbatch, depth]), dtype)
      aux_source_paddings[sname] = tf.constant(np.zeros([slen, sbatch]), dtype)
    return (source_vecs, source_padding, aux_source_vecs, aux_source_paddings) 
Example #12
Source File: polynomials.py    From mathematics_dataset with Apache License 2.0 6 votes vote down vote up
def coefficients_to_polynomial(coefficients, variables):
  """Converts array of lists of coefficients to a polynomial."""
  coefficients = np.asarray(coefficients)
  shape = coefficients.shape

  indices = list(zip(*np.indices(shape).reshape([len(shape), -1])))
  monomials = []
  for power in indices:
    coeffs = coefficients.item(power)
    if (number.is_integer_or_rational(coeffs)
        or isinstance(coeffs, sympy.Symbol)):
      coeffs = [coeffs]
    elif not isinstance(coeffs, list):
      raise ValueError('Unrecognized coeffs={} type={}'
                       .format(coeffs, type(coeffs)))
    for coeff in coeffs:
      monomials.append(monomial(coeff, variables, power))
  random.shuffle(monomials)
  return ops.Add(*monomials) 
Example #13
Source File: probability.py    From mathematics_dataset with Apache License 2.0 6 votes vote down vote up
def inverse(self, event):
    # Specialization for `FiniteProductEvent`; don't need to take all sequences.
    if isinstance(event, FiniteProductEvent):
      assert len(event.events) == len(self._random_variables)
      zipped = list(zip(self._random_variables, event.events))
      return FiniteProductEvent(tuple(
          random_variable.inverse(sub_event)
          for random_variable, sub_event in zipped))

    # Try fallback of mapping each sequence separately.
    try:
      all_sequences = event.all_sequences()
    except AttributeError:
      raise ValueError('Unhandled event type {}'.format(type(event)))

    mapped = set()
    for sequence in all_sequences:
      assert len(sequence) == len(self._random_variables)
      zipped = list(zip(self._random_variables, sequence))
      mapped_sequence = FiniteProductEvent(tuple(
          random_variable.inverse(DiscreteEvent({element}))
          for random_variable, element in zipped))
      mapped.update(mapped_sequence.all_sequences())
    return SequenceEvent(mapped) 
Example #14
Source File: bnn_sgmcmc.py    From zhusuan with MIT License 6 votes vote down vote up
def build_bnn(x, layer_sizes, logstds, n_particles):
    bn = zs.BayesianNet()
    h = tf.tile(x[None, ...], [n_particles, 1, 1])
    for i, (n_in, n_out) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
        w = bn.normal("w" + str(i), tf.zeros([n_out, n_in + 1]),
                      logstd=logstds[i], group_ndims=2, n_samples=n_particles)
        h = tf.concat([h, tf.ones(tf.shape(h)[:-1])[..., None]], -1)
        h = tf.einsum("imk,ijk->ijm", w, h) / tf.sqrt(
            tf.cast(tf.shape(h)[2], tf.float32))
        if i < len(layer_sizes) - 2:
            h = tf.nn.relu(h)

    y_mean = bn.deterministic("y_mean", tf.squeeze(h, 2))
    y_logstd = -0.95
    bn.normal("y", y_mean, logstd=y_logstd)
    return bn 
Example #15
Source File: sgmcmc.py    From zhusuan with MIT License 6 votes vote down vote up
def _apply_updates(self, grad_func):
        qs = self._var_list
        self._define_variables(qs)
        update_ops, infos = self._update(qs, grad_func)

        with tf.control_dependencies([self.t.assign_add(1)]):
            sample_op = tf.group(*update_ops)
        list_attrib = zip(*map(lambda d: six.itervalues(d), infos))
        list_attrib_with_k = map(lambda l: dict(zip(self._latent_k, l)),
                                 list_attrib)
        attrib_names = list(six.iterkeys(infos[0]))
        dict_info = dict(zip(attrib_names, list_attrib_with_k))
        SGMCMCInfo = namedtuple("SGMCMCInfo", attrib_names)
        sgmcmc_info = SGMCMCInfo(**dict_info)

        return sample_op, sgmcmc_info 
Example #16
Source File: bnn_vi.py    From zhusuan with MIT License 6 votes vote down vote up
def build_bnn(x, layer_sizes, n_particles):
    bn = zs.BayesianNet()
    h = tf.tile(x[None, ...], [n_particles, 1, 1])
    for i, (n_in, n_out) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
        w = bn.normal("w" + str(i), tf.zeros([n_out, n_in + 1]), std=1.,
                      group_ndims=2, n_samples=n_particles)
        h = tf.concat([h, tf.ones(tf.shape(h)[:-1])[..., None]], -1)
        h = tf.einsum("imk,ijk->ijm", w, h) / tf.sqrt(
            tf.cast(tf.shape(h)[2], tf.float32))
        if i < len(layer_sizes) - 2:
            h = tf.nn.relu(h)

    y_mean = bn.deterministic("y_mean", tf.squeeze(h, 2))
    y_logstd = tf.get_variable("y_logstd", shape=[],
                               initializer=tf.constant_initializer(0.))
    bn.normal("y", y_mean, logstd=y_logstd)
    return bn 
Example #17
Source File: hmc.py    From zhusuan with MIT License 6 votes vote down vote up
def update(self, x):
        # x: (chain_dims data_dims)
        new_t = tf.assign(self.t, self.t + 1)
        weight = (1 - self.decay) / (1 - tf.pow(self.decay, new_t))
        # incr: (chain_dims data_dims)
        incr = [weight * (q - mean) for q, mean in zip(x, self.mean)]
        # mean: (1,...,1 data_dims)
        update_mean = [mean.assign_add(
            tf.reduce_mean(i, axis=self.chain_axes, keepdims=True))
            for mean, i in zip(self.mean, incr)]
        # var: (1,...,1 data_dims)
        new_var = [
            (1 - weight) * var +
            tf.reduce_mean(i * (q - mean), axis=self.chain_axes,
                           keepdims=True)
            for var, i, q, mean in zip(self.var, incr, x, update_mean)]

        update_var = [tf.assign(var, n_var)
                      for var, n_var in zip(self.var, new_var)]
        return update_var 
Example #18
Source File: hmc.py    From zhusuan with MIT License 5 votes vote down vote up
def leapfrog_integrator(q, p, step_size1, step_size2, grad, mass):
    q = [x + step_size1 * y for x, y in zip(q, velocity(p, mass))]
    # p = p + epsilon / 2 * gradient q
    grads = grad(q)
    p = [x + step_size2 * y for x, y in zip(p, grads)]
    return q, p 
Example #19
Source File: layers_test.py    From lingvo with Apache License 2.0 5 votes vote down vote up
def testBasicGrad(self):
    time, batch, dims, vocab = 5, 3, 6, 8
    p = self._testParams(dims, vocab)
    p.dtype = tf.float64

    with self.session(use_gpu=False, graph=tf.Graph()) as sess:
      lm = p.Instantiate()
      np.random.seed(12345)
      inputs = np.random.normal(size=[time, batch, dims])
      inputs = tf.constant(inputs, tf.float64)
      paddings = np.zeros([time, batch])
      paddings[-1] = 1.0
      paddings = tf.constant(paddings, tf.float64)
      targets = tf.constant(
          np.random.randint(vocab, size=(time, batch)), tf.int32)

      xent_output, _ = lm.FPropDefaultTheta(
          inputs=inputs,
          paddings=paddings,
          state0=lm.zero_state(lm.theta, batch),
          labels=py_utils.NestedMap(
              class_weights=1 - paddings, class_ids=targets))

      lm_vars = lm.vars.Flatten()
      # Now add the backward graph.
      grads = tf.gradients(xent_output.avg_xent, lm_vars)

      self.evaluate(tf.global_variables_initializer())
      self.assertEqual(len(lm_vars), len(grads))
      for x, grad_x in zip(lm_vars, grads):
        grad_symbolic = self.evaluate(grad_x)
        grad_numeric = test_utils.ComputeNumericGradient(
            sess, xent_output.avg_xent, x, delta=1e-6)
        self.assertAllClose(grad_symbolic, grad_numeric, atol=0.005) 
Example #20
Source File: decoder_test.py    From lingvo with Apache License 2.0 5 votes vote down vote up
def _VerifyHypothesesMatch(self, hyp1, hyp2):
    tf.logging.info('hyp1 = %s', hyp1)
    tf.logging.info('hyp2 = %s', hyp2)
    self.assertEqual(hyp1.beam_id, hyp2.beam_id)
    self.assertEqual(list(hyp1.ids), list(hyp2.ids))
    self.assertAllClose(hyp1.scores, hyp2.scores)
    self.assertEqual(len(hyp1.atten_vecs), len(hyp2.atten_vecs))
    for av1, av2 in zip(hyp1.atten_vecs, hyp2.atten_vecs):
      self.assertAllClose(av1.prob, av2.prob) 
Example #21
Source File: test_utils_test.py    From model-optimization with Apache License 2.0 5 votes vote down vote up
def common_asserts_for_test_data(self, data):
    """See base class."""
    for x, decoded_x in zip(data.x, data.decoded_x):
      self.assertTrue(
          np.isclose(decoded_x, x - 1) or np.isclose(decoded_x, x) or
          np.isclose(decoded_x, x + 1))
    self.assertAllEqual(data.encoded_x[self._VALUES_KEY], data.decoded_x) 
Example #22
Source File: wpm_encode_file.py    From lingvo with Apache License 2.0 5 votes vote down vote up
def _RunEncoding():
  sess = tf.Session()
  enc = wpm_encoder.WpmEncoder(FLAGS.wpm_filepath)
  src_txt_placeholder = tf.placeholder(tf.string, [])
  src_encode_op = enc.Encode(src_txt_placeholder)
  tgt_txt_placeholder = tf.placeholder(tf.string, [])
  tgt_encode_op = enc.Encode(tgt_txt_placeholder)
  pairs = list(
      zip(FLAGS.source_filepaths.split(','), FLAGS.target_filepaths.split(',')))
  with tf.python_io.TFRecordWriter(FLAGS.output_filepath) as outf:
    n = 0
    for p in pairs:
      with tf.io.gfile.GFile(p[0], 'r') as sourcef:
        with tf.io.gfile.GFile(p[1], 'r') as targetf:
          for textp in zip(sourcef.readlines(), targetf.readlines()):
            n += 1
            if n % 10000 == 0:
              tf.logging.info('Watermark[%d]: %d', FLAGS.shard_id, n)
            if n % FLAGS.num_shards != FLAGS.shard_id:
              continue
            source_text = _Preprocess(textp[0])
            target_text = _Preprocess(textp[1])
            # By convention:
            # * source always ends in </s>, never starts with <s>.
            # * target never ends in </s>, always starts with <s>.
            _AssertTextFormat(source_text)
            _AssertTextFormat(target_text)
            ((src_i, src_s), (tgt_i, tgt_s)) = sess.run(
                [src_encode_op, tgt_encode_op],
                feed_dict={
                    src_txt_placeholder: source_text,
                    tgt_txt_placeholder: target_text
                },
            )
            ex = _MakeTfExample(enc, src_i, src_s, tgt_i, tgt_s)
            if not ex:  # Too long.
              continue
            encoded = ex.SerializeToString()
            outf.write(encoded) 
Example #23
Source File: builder_lib.py    From lingvo with Apache License 2.0 5 votes vote down vote up
def _MLP(self, name, dims, use_bn=True, activation_fn=None):
    l = []
    for n, (i, o) in enumerate(zip(dims[:-1], dims[1:])):
      l += [self._FC('l%03d' % n, i, o, use_bn=use_bn,
                     activation_fn=activation_fn)]
    return self._Seq(name, *l) 
Example #24
Source File: multi_gpu.py    From zhusuan with MIT License 5 votes vote down vote up
def average_gradients(tower_grads):
    """
    Calculate the average gradient for each shared variable across all towers.

    Note that this function provides a synchronization point across all towers.

    :param tower_grads: List of lists of (gradient, variable) tuples.
        The outer list is over individual gradients. The inner list is over
        the gradient calculation for each tower.
    :return: List of pairs of (gradient, variable) where the gradient has
        been averaged across all towers.
    """
    average_grads = []
    for grad_and_vars in zip(*tower_grads):
        # Note that each grad_and_vars looks like the following:
        #   ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
        if grad_and_vars[0][0] is None:
            continue
        grads = []
        for g, _ in grad_and_vars:
            # Add 0 dimension to the gradients to represent the tower.
            expanded_g = tf.expand_dims(g, 0)

            # Append on a 'tower' dimension which we will average over below.
            grads.append(expanded_g)

        # Average over the 'tower' dimension.
        grad = tf.concat(grads, 0)
        grad = tf.reduce_mean(grad, 0)

        # Keep in mind that the Variables are redundant because they are shared
        # across towers. So .. we will just return the first tower's pointer to
        # the Variable.
        v = grad_and_vars[0][1]
        grad_and_var = (grad, v)
        average_grads.append(grad_and_var)
    return average_grads 
Example #25
Source File: builder_layers_test.py    From lingvo with Apache License 2.0 5 votes vote down vote up
def testParalellMultiOutputsLayer(self):
    g = tf.Graph()
    with g.as_default():
      tf.random.set_seed(24332)

      def Merge(xs):
        rets = []
        for x in zip(*xs):
          if x[0] is None:
            rets.append(None)
          else:
            rets.append(tf.add_n(list(x)))
        return tuple(rets)

      p = layers.ParallelLayer.Params().Set(
          name='parallel',
          merge=Merge,
          sub=[
              lingvo_layers.ConvLayer.Params().Set(
                  name='p%d' % i,
                  filter_shape=(3, 3, 3, 5),
                  filter_stride=(1, 1),
                  batch_norm=False) for i in range(3)
          ])
      l = p.Instantiate()
      x = tf.zeros(shape=[2, 32, 32, 3])
      y0, y1 = l.FPropDefaultTheta(x)
      y_sum = tf.reduce_sum(y0)
      # Ensures the 2nd return value (None) are handled properly.
      self.assertEqual(None, y1)

    with self.session(graph=g):
      self.evaluate(tf.global_variables_initializer())
      y_sum_val = self.evaluate(y_sum)

    self.assertEqual(y_sum_val, 0.) 
Example #26
Source File: bnn_vi.py    From zhusuan with MIT License 5 votes vote down vote up
def build_mean_field_variational(layer_sizes, n_particles):
    bn = zs.BayesianNet()
    for i, (n_in, n_out) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
        w_mean = tf.get_variable(
            "w_mean_" + str(i), shape=[n_out, n_in + 1],
            initializer=tf.constant_initializer(0.))
        w_logstd = tf.get_variable(
            "w_logstd_" + str(i), shape=[n_out, n_in + 1],
            initializer=tf.constant_initializer(0.))
        bn.normal("w" + str(i), w_mean, logstd=w_logstd,
                  n_samples=n_particles, group_ndims=2)
    return bn 
Example #27
Source File: variational_dropout.py    From zhusuan with MIT License 5 votes vote down vote up
def q(n, net_size, n_particles):
    bn = zs.BayesianNet()
    for i, [n_in, n_out] in enumerate(zip(net_size[:-1], net_size[1:])):
        with tf.variable_scope('layer' + str(i)):
            logit_alpha = tf.get_variable('logit_alpha', [n_in])

        std = tf.sqrt(tf.nn.sigmoid(logit_alpha) + 1e-10)
        std = tf.tile(tf.expand_dims(std, 0), [n, 1])
        eps = bn.normal('layer' + str(i) + '/eps',
                        1., std=std,
                        n_samples=n_particles, group_ndims=1)
    return bn 
Example #28
Source File: metrics.py    From lingvo with Apache License 2.0 5 votes vote down vote up
def _Zip(self, values):
    assert isinstance(values, list)
    return list(zip(values[::2], values[1::2])) 
Example #29
Source File: metrics.py    From lingvo with Apache License 2.0 5 votes vote down vote up
def PackMetricsValues(self, values):
    """Packs numpy values into a dict of metrics."""
    for k, v in zip(sorted(self._metrics.keys()), self._Zip(values)):
      self._metrics[k] = v 
Example #30
Source File: decoder_test.py    From lingvo with Apache License 2.0 5 votes vote down vote up
def _DecoderGradientCheckerHelper(self,
                                    decoder_cls,
                                    feed_att_context_to_softmax=False):
    with self.session(use_gpu=True, graph=tf.Graph()) as sess:
      tf.random.set_seed(_TF_RANDOM_SEED)
      p = self._DecoderParams(dtype=tf.float64, decoder_cls=decoder_cls)
      p.feed_attention_context_vec_to_softmax = feed_att_context_to_softmax
      dec = p.Instantiate()
      encoder_outputs, targets = self._Inputs(dtype=tf.float64)
      loss, _ = dec.FPropDefaultTheta(encoder_outputs, targets).metrics['loss']
      all_vars = tf.trainable_variables()
      grads = tf.gradients(loss, all_vars)
      print('num of vars ', len(all_vars))

      def DenseGrad(var, grad):
        if isinstance(grad, tf.Tensor):
          return grad
        elif isinstance(grad, tf.IndexedSlices):
          return tf.math.unsorted_segment_sum(grad.values, grad.indices,
                                              tf.shape(var)[0])

      grads = [DenseGrad(x, y) for x, y in zip(all_vars, grads)]

      self.evaluate(tf.global_variables_initializer())
      symbolic_grads = [gd.eval() for gd in grads]
      numerical_grads = []
      for v in all_vars:
        numerical_grads.append(
            test_utils.ComputeNumericGradient(sess, loss, v, delta=1e-5))

      rets = {}
      for v, x, y in zip(all_vars, symbolic_grads, numerical_grads):
        print('symbolic_grads, numerical_grads :', v.name)
        print(x)
        print(y)
        self.assertAllClose(x, y)
        rets[v.name] = x

      return rets