Python six.moves.xrange() Examples

The following are 30 code examples of six.moves.xrange(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module six.moves , or try the search function .
Example #1
Source File: utils.py    From ctc_tensorflow_example with MIT License 6 votes vote down vote up
def sparse_tuple_from(sequences, dtype=np.int32):
    """Create a sparse representention of x.
    Args:
        sequences: a list of lists of type dtype where each element is a sequence
    Returns:
        A tuple with (indices, values, shape)
    """
    indices = []
    values = []

    for n, seq in enumerate(sequences):
        indices.extend(zip([n]*len(seq), range(len(seq))))
        values.extend(seq)

    indices = np.asarray(indices, dtype=np.int64)
    values = np.asarray(values, dtype=dtype)
    shape = np.asarray([len(sequences), np.asarray(indices).max(0)[1]+1], dtype=np.int64)

    return indices, values, shape 
Example #2
Source File: trainer.py    From Representation-Learning-by-Learning-to-Count with MIT License 6 votes vote down vote up
def train(self, dataset):
        log.infov("Training Starts!")
        pprint(self.batch_train)

        max_steps = 2500000

        output_save_step = 1000

        for s in xrange(max_steps):
            step, summary, loss, loss_pair, loss_unpair, step_time = \
                self.run_single_step(self.batch_train, dataset, step=s, is_train=True)

            if s % 10 == 0:
                self.log_step_message(step, loss, loss_pair, loss_unpair, step_time)
                self.summary_writer.add_summary(summary, global_step=step)

            if s % output_save_step == 0:
                log.infov("Saved checkpoint at %d", s)
                save_path = self.saver.save(self.session,
                                            os.path.join(self.train_dir, 'model'),
                                            global_step=step) 
Example #3
Source File: async_benchmark.py    From arctic with GNU Lesser General Public License v2.1 6 votes vote down vote up
def run_scenario(result_text, rounds, num_requests, num_chunks, parallel_lz4,
                 use_async, async_arctic_pool_workers=None):
    aclz4.enable_parallel_lz4(parallel_lz4)
    if async_arctic_pool_workers is not None:
        ASYNC_ARCTIC.reset(pool_size=int(async_arctic_pool_workers), timeout=10)
    measurements = []
    for curr_round in xrange(rounds):
        # print("Running round {}".format(curr_round))
        clean_lib()
        start = time.time()
        if use_async:
            async_bench(num_requests, num_chunks)
        else:
            serial_bench(num_requests, num_chunks)
        measurements.append(time.time() - start)
    print("{}: async={}, chunks/write={}, writes/round={}, rounds={}, "
          "parallel_lz4={}, async_arctic_pool_workers={}: {}".format(
        result_text, use_async, num_chunks, num_requests, rounds, parallel_lz4, async_arctic_pool_workers,
        ["{:.3f}".format(x) for x in get_stats(measurements[1:] if len(measurements) > 1 else measurements)])) 
Example #4
Source File: openai_gym.py    From rlgraph with Apache License 2.0 6 votes vote down vote up
def _step_and_skip(self, actions):
        # TODO - allow for goal reward substitution for multi-goal envs
        if self.frameskip is None:
            # Frames kipping is unset or set as env property.
            return self.gym_env.step(actions)
        else:
            # Do frameskip loop in our wrapper class.
            step_reward = 0.0
            terminal = None
            info = None
            for i in range_(self.frameskip):
                state, reward, terminal, info = self.gym_env.step(actions)
                if i == self.frameskip - 2:
                    self.state_buffer[0] = state
                if i == self.frameskip - 1:
                    self.state_buffer[1] = state
                step_reward += reward
                if terminal:
                    break

            max_frame = self.state_buffer.max(axis=0)

            return max_frame, step_reward, terminal, info 
Example #5
Source File: grid_world.py    From rlgraph with Apache License 2.0 6 votes vote down vote up
def render_txt(self):
        actor = "X"
        if self.action_type == "ftj":
            actor = "^" if self.orientation == 0 else ">" if self.orientation == 90 else "v" if \
                self.orientation == 180 else "<"

        # paints itself
        txt = ""
        for row in range_(len(self.world)):
            for col, val in enumerate(self.world[row]):
                if self.x == col and self.y == row:
                    txt += actor
                else:
                    txt += val
            txt += "\n"
        txt += "\n"
        return txt 
Example #6
Source File: grid_world.py    From rlgraph with Apache License 2.0 6 votes vote down vote up
def update_cam_pixels(self):
        # Init camera?
        if self.camera_pixels is None:
            self.camera_pixels = np.zeros(shape=(self.n_row, self.n_col, 3), dtype=np.int32)
        self.camera_pixels[:, :, :] = 0  # reset everything

        # 1st channel -> Walls (127) and goal (255).
        # 2nd channel -> Dangers (fire=127, holes=255)
        # 3rd channel -> Actor position (255).
        for row in range_(self.n_row):
            for col in range_(self.n_col):
                field = self.world[row, col]
                if field == "F":
                    self.camera_pixels[row, col, 0] = 127
                elif field == "H":
                    self.camera_pixels[row, col, 0] = 255
                elif field == "W":
                    self.camera_pixels[row, col, 1] = 127
                elif field == "G":
                    self.camera_pixels[row, col, 1] = 255  # will this work (goal==2x wall)?
        # Overwrite player's position.
        self.camera_pixels[self.y, self.x, 2] = 255 
Example #7
Source File: utils.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def topsort(nodes):
  n = len(nodes)
  deg = [0]*n
  g = [[] for _ in xrange(n)]
  for i,node in enumerate(nodes):
    if 'inputs' in node:
      for j in node['inputs']:
        deg[i] += 1
        g[j[0]].append(i)
  from collections import deque
  q = deque([i for i in xrange(n) if deg[i]==0])
  res = []
  for its in xrange(n):
    i = q.popleft()
    res.append(nodes[i])
    for j in g[i]:
      deg[j] -= 1
      if deg[j] == 0:
        q.append(j)
  new_ids=dict([(node['name'],i) for i,node in enumerate(res)])
  for node in res:
    if 'inputs' in node:
      for j in node['inputs']:
        j[0]=new_ids[nodes[j[0]]['name']]
  return res 
Example #8
Source File: test_spaces.py    From rlgraph with Apache License 2.0 6 votes vote down vote up
def test_complex_space_sampling_and_check_via_contains(self):
        """
        Tests a complex Space on sampling and `contains` functionality.
        """
        space = Dict(
            a=dict(aa=float, ab=bool),
            b=dict(ba=float),
            c=float,
            d=IntBox(low=0, high=1),
            e=IntBox(5),
            f=FloatBox(shape=(2, 2)),
            g=Tuple(float, FloatBox(shape=())),
            add_batch_rank=True
        )

        samples = space.sample(size=100, horizontal=True)
        for i in range_(len(samples)):
            self.assertTrue(space.contains(samples[i])) 
Example #9
Source File: test_imagenet_attacks.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def load_images(input_dir, metadata_file_path, batch_shape):
    """Retrieve numpy arrays of images and labels, read from a directory."""
    num_images = batch_shape[0]
    with open(metadata_file_path) as input_file:
        reader = csv.reader(input_file)
        header_row = next(reader)
        rows = list(reader)

    row_idx_image_id = header_row.index('ImageId')
    row_idx_true_label = header_row.index('TrueLabel')
    images = np.zeros(batch_shape)
    labels = np.zeros(num_images, dtype=np.int32)
    for idx in xrange(num_images):
        row = rows[idx]
        filepath = os.path.join(input_dir, row[row_idx_image_id] + '.png')

        with tf.gfile.Open(filepath, 'rb') as f:
            image = np.array(
                Image.open(f).convert('RGB')).astype(np.float) / 255.0
        images[idx, :, :, :] = image
        labels[idx] = int(row[row_idx_true_label])
    return images, labels 
Example #10
Source File: attacks_tf.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def jacobian_graph(predictions, x, nb_classes):
    """
    Create the Jacobian graph to be ran later in a TF session
    :param predictions: the model's symbolic output (linear output,
        pre-softmax)
    :param x: the input placeholder
    :param nb_classes: the number of classes the model has
    :return:
    """
    # This function will return a list of TF gradients
    list_derivatives = []

    # Define the TF graph elements to compute our derivatives for each class
    for class_ind in xrange(nb_classes):
        derivatives, = tf.gradients(predictions[:, class_ind], x)
        list_derivatives.append(derivatives)

    return list_derivatives 
Example #11
Source File: ranknet.py    From tfranknet with GNU General Public License v2.0 5 votes vote down vote up
def get_bias(self, layer=None):
        if layer is None:
            biases = [self.sess.run(self.biases[n])
                       for n in xrange(self.layer_num-1)]
            return biases
        else:
            return self.sess.run(self.biases[layer]) 
Example #12
Source File: test_noise_components.py    From rlgraph with Apache License 2.0 5 votes vote down vote up
def test_constant_noise(self):
        real_noise = 200.0

        noise_component = ConstantNoise(value=real_noise)
        test = ComponentTest(component=noise_component, action_space=self.action_input_space)

        for _ in range_(1000):
            test.test(("get_noise", None), expected_outputs=real_noise) 
Example #13
Source File: space_utils.py    From rlgraph with Apache License 2.0 5 votes vote down vote up
def get_list_registry(from_space, capacity=None, initializer=0, flatten=True, add_batch_rank=False):
    """
    Creates a list storage for a space by providing an ordered dict mapping space names
    to empty lists.

    Args:
        from_space: Space to create registry from.
        capacity (Optional[int]): Optional capacity to initalize list.
        initializer (Optional(any)): Optional initializer for list if capacity is not None.
        flatten (bool): Whether to produce a FlattenedDataOp with auto-keys.

        add_batch_rank (Optional[bool,int]): If from_space is given and is True, will add a 0th rank (None) to
            the created variable. If it is an int, will add that int instead of None.
            Default: False.

    Returns:
        dict: Container dict mapping spaces to empty lists.
    """
    if flatten:
        if capacity is not None:
            var = from_space.flatten(
                custom_scope_separator="-", scope_separator_at_start=False,
                mapping=lambda k, primitive: [initializer for _ in range_(capacity)]
            )
        else:
            var = from_space.flatten(
                custom_scope_separator="-", scope_separator_at_start=False,
                mapping=lambda k, primitive: []
            )
    else:
        if capacity is not None:
            var = [initializer for _ in range_(capacity)]
        else:
            var = []
    return var 
Example #14
Source File: openai_gym.py    From rlgraph with Apache License 2.0 5 votes vote down vote up
def noop_reset(self):
        """
        Steps through reset and warm-start.
        """
        if isinstance(self.gym_env, gym.wrappers.Monitor):
            self.gym_env.stats_recorder.done = True
        state = self.gym_env.reset()
        if self.max_num_noops > 0:
            num_noops = np.random.randint(low=1, high=self.max_num_noops + 1)
            # Do a number of noops to vary starting positions.
            for _ in range_(num_noops):
                state, reward, terminal, info = self.gym_env.step(self.noop_action)
                if terminal:
                    state = self.gym_env.reset()
        return state if self.force_float32 is False else np.array(state, dtype=np.float32) 
Example #15
Source File: test_sequence_preprocessor.py    From rlgraph with Apache License 2.0 5 votes vote down vote up
def test_sequence_preprocessor(self):
        space = FloatBox(shape=(1,), add_batch_rank=True)
        sequencer = Sequence(sequence_length=3, add_rank=True)
        test = ComponentTest(component=sequencer, input_spaces=dict(inputs=space))

        vars = sequencer.get_variables("index", "buffer", global_scope=False)
        index, buffer = vars["index"], vars["buffer"]

        for _ in range_(3):
            test.test("reset")
            index_value, buffer_value = test.read_variable_values(index, buffer)
            self.assertEqual(index_value, -1)
            test.test(("call", np.array([[0.1]])),
                      expected_outputs=np.array([[[0.1, 0.1, 0.1]]]))
            index_value, buffer_value = test.read_variable_values(index, buffer)
            self.assertEqual(index_value, 0)
            test.test(("call", np.array([[0.2]])),
                      expected_outputs=np.array([[[0.1, 0.1, 0.2]]]))
            index_value, buffer_value = test.read_variable_values(index, buffer)
            self.assertEqual(index_value, 1)
            test.test(("call", np.array([[0.3]])),
                      expected_outputs=np.array([[[0.1, 0.2, 0.3]]]))
            index_value, buffer_value = test.read_variable_values(index, buffer)
            self.assertEqual(index_value, 2)
            test.test(("call", np.array([[0.4]])),
                      expected_outputs=np.array([[[0.2, 0.3, 0.4]]]))
            index_value, buffer_value = test.read_variable_values(index, buffer)
            self.assertEqual(index_value, 0)
            test.test(("call", np.array([[0.5]])),
                      expected_outputs=np.array([[[0.3, 0.4, 0.5]]]))
            index_value, buffer_value = test.read_variable_values(index, buffer)
            self.assertEqual(index_value, 1)

        test.terminate()

    # TODO: Make it irrelevent whether we test a python or a tf Component (API and handling should be 100% identical) 
Example #16
Source File: test_python_prioritized_replay.py    From rlgraph with Apache License 2.0 5 votes vote down vote up
def test_insert(self):
        """
        Simply tests insert op without checking internal logic.
        """
        memory = MemPrioritizedReplay(
            capacity=self.capacity,
            next_states=True,
            alpha=self.alpha,
            beta=self.beta
        )
        memory.create_variables(self.input_spaces)

        observation = memory.record_space_flat.sample(size=1)
        memory.insert_records(observation)

        # Test chunked insert
        observation = memory.record_space_flat.sample(size=5)
        memory.insert_records(observation)

        # Also test Apex version
        memory = ApexMemory(
            capacity=self.capacity,
            alpha=self.alpha,
            beta=self.beta
        )
        observation = self.apex_space.sample(size=5)
        for i in range_(5):
            memory.insert_records((
                observation['states'][i],
                observation['actions'][i],
                observation['reward'][i],
                observation['terminals'][i],
                observation['states'][i],
                observation["weights"][i]
            )) 
Example #17
Source File: test_python_memory_performance.py    From rlgraph with Apache License 2.0 5 votes vote down vote up
def test_rlgraph_combined_ops(self):
        """
        Tests a combined workflow of insert, sample, update on the prioritized replay memory.
        """
        memory = ApexMemory(
            capacity=self.capacity,
            alpha=1.0
        )

        chunksize = 32
        chunks = int(self.inserts / chunksize)
        records = [self.record_space.sample(size=chunksize) for _ in range_(chunks)]
        loss_values = [np.random.random(size=self.sample_batch_size) for _ in range_(chunks)]

        start = time.monotonic()
        for chunk, loss_values in zip(records, loss_values):
            # Each record now is a chunk.
            for i in range_(chunksize):
                memory.insert_records((
                    ray_compress(chunk['states'][i]),
                    chunk['actions'][i],
                    chunk['reward'][i],
                    chunk['terminals'][i],
                    None
                ))
            batch, indices, weights = memory.get_records(self.sample_batch_size)
            memory.update_records(indices, loss_values)

        end = time.monotonic() - start
        tp = len(records) / end
        print('RLGraph: Testing combined op performance:')
        print('Ran {} combined ops, throughput: {} combined ops/s, total time: {} s'.format(
            len(records), tp, end
        )) 
Example #18
Source File: test_python_memory_performance.py    From rlgraph with Apache License 2.0 5 votes vote down vote up
def test_rlgraph_sampling(self):
        """
        Tests RLgraph's sampling performance.
        """
        memory = ApexMemory(
            capacity=self.capacity,
            alpha=1.0
        )

        records = [self.record_space.sample(size=1) for _ in range_(self.inserts)]
        for record in records:
            memory.insert_records((
                 ray_compress(record['states']),
                 record['actions'],
                 record['reward'],
                 record['terminals'],
                 None
            ))
        start = time.monotonic()
        for _ in range_(self.samples):
            batch_tuple = memory.get_records(self.sample_batch_size)
        end = time.monotonic() - start
        tp = self.samples / end
        print('#### Testing RLGraph Prioritized Replay memory ####')
        print('Testing sampling performance:')
        print('Sampled {} batches, throughput: {} batches/s, total time: {} s'.format(
            self.samples, tp, end
        )) 
Example #19
Source File: test_python_memory_performance.py    From rlgraph with Apache License 2.0 5 votes vote down vote up
def test_rlgraph_updating(self):
        """
        Tests RLGraph's memory performance.
        """
        memory = ApexMemory(
            capacity=self.capacity,
            alpha=1.0
        )

        records = [self.record_space.sample(size=1) for _ in range_(self.inserts)]
        for record in records:
            memory.insert_records((
                 record['states'],
                 record['actions'],
                 record['reward'],
                 record['terminals'],
                 None
            ))
        loss_values = [np.random.random(size=self.sample_batch_size) for _ in range_(self.samples)]
        indices = [np.random.randint(low=0, high=self.inserts, size=self.sample_batch_size) for _
                   in range_(self.samples)]

        start = time.monotonic()
        for index, loss in zip(indices, loss_values):
            memory.update_records(index, loss)
        end = time.monotonic() - start
        tp = len(indices) / end
        print('#### Testing RLGraph Prioritized Replay memory ####')
        print('Testing updating performance:')
        print('Updates {} loss batches, throughput: {} updates/s, total time: {} s'.format(
            len(indices), tp, end
        )) 
Example #20
Source File: test_python_memory_performance.py    From rlgraph with Apache License 2.0 5 votes vote down vote up
def test_ray_combined_ops(self):
        """
        Tests a combined workflow of insert, sample, update on the prioritized replay memory.
        """
        assert get_distributed_backend() == "ray"
        memory = PrioritizedReplayBuffer(
            size=self.capacity,
            alpha=1.0,
            clip_rewards=True
        )
        chunksize = 32

        # Test chunked inserts -> done via external for loop in Ray.
        chunks = int(self.inserts / chunksize)
        records = [self.record_space.sample(size=chunksize) for _ in range_(chunks)]
        loss_values = [np.random.random(size=self.sample_batch_size) for _ in range_(chunks)]
        start = time.monotonic()

        for chunk, loss_values in zip(records, loss_values):
            # Insert.
            for i in range_(chunksize):
                memory.add(
                    obs_t=ray_compress(chunk['states'][i]),
                    action=chunk['actions'][i],
                    reward=chunk['reward'][i],
                    obs_tp1=ray_compress(chunk['states'][i]),
                    done=chunk['terminals'][i],
                    weight=None
                )
            # Sample.
            batch_tuple = memory.sample(self.sample_batch_size, beta=1.0)
            indices = batch_tuple[-1]
            # Update
            memory.update_priorities(indices, loss_values)

        end = time.monotonic() - start
        tp = len(records) / end
        print('Ray: testing combined insert/sample/update performance:')
        print('Ran {} combined ops, throughput: {} combined ops/s, total time: {} s'.format(
            len(records), tp, end
        )) 
Example #21
Source File: ranknet.py    From tfranknet with GNU General Public License v2.0 5 votes vote down vote up
def get_weight(self, layer=None):
        if layer is None:
            weights = [self.sess.run(self.weights[n])
                        for n in xrange(self.layer_num-1)]
            return weights
        else:
            return self.sess.run(self.weights[layer]) 
Example #22
Source File: ranknet.py    From tfranknet with GNU General Public License v2.0 5 votes vote down vote up
def set_bias(self, bias, layer=None):
        if layer is None:
            assign_op = [self.biases[n].assign(bias[n])
                              for n in xrange(self.layer_num-1)]
        else:
            assign_op = self.biases[layer].assign(bias)
        self.sess.run(assign_op) 
Example #23
Source File: ranknet.py    From tfranknet with GNU General Public License v2.0 5 votes vote down vote up
def _obtain_score(cls, data, weights, biases, act_func, label):
        num_layer = len(weights) + 1
        outputs = [data]
        for n in xrange(num_layer-1):
            with tf.name_scope("layer"+str(n)+"_"+label):
                input_n = outputs[n]
                w_n = weights[n]
                b_n = biases[n]
                output_n = act_func(tf.matmul(input_n, w_n) + b_n)
                outputs.append(output_n)
        score = outputs[-1]
        return score 
Example #24
Source File: test_sequence_preprocessor.py    From rlgraph with Apache License 2.0 5 votes vote down vote up
def test_python_sequence_preprocessor(self):
        seq_len = 3
        space = FloatBox(shape=(1,), add_batch_rank=True)
        sequencer = Sequence(sequence_length=seq_len, batch_size=4, add_rank=True, backend="python")
        sequencer.create_variables(input_spaces=dict(inputs=space))

        #test = ComponentTest(component=sequencer, input_spaces=dict(call=space))

        for _ in range_(3):
            sequencer._graph_fn_reset()
            self.assertEqual(sequencer.index, -1)
            input_ = np.asarray([[1.0], [2.0], [3.0], [4.0]])
            out = sequencer._graph_fn_call(input_)
            self.assertEqual(sequencer.index, 0)
            recursive_assert_almost_equal(
                out, np.asarray([[[1.0, 1.0, 1.0]], [[2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0]], [[4.0, 4.0, 4.0]]])
            )
            input_ = np.asarray([[1.1], [2.2], [3.3], [4.4]])
            out = sequencer._graph_fn_call(input_)
            self.assertEqual(sequencer.index, 1)
            recursive_assert_almost_equal(
                out, np.asarray([[[1.0, 1.0, 1.1]], [[2.0, 2.0, 2.2]], [[3.0, 3.0, 3.3]], [[4.0, 4.0, 4.4]]])
            )
            input_ = np.asarray([[1.11], [2.22], [3.33], [4.44]])
            out = sequencer._graph_fn_call(input_)
            self.assertEqual(sequencer.index, 2)
            recursive_assert_almost_equal(
                out, np.asarray([[[1.0, 1.1, 1.11]], [[2.0, 2.2, 2.22]], [[3.0, 3.3, 3.33]], [[4.0, 4.4, 4.44]]])
            )
            input_ = np.asarray([[10], [20], [30], [40]])
            out = sequencer._graph_fn_call(input_)
            self.assertEqual(sequencer.index, 0)
            recursive_assert_almost_equal(
                out, np.asarray([[[1.1, 1.11, 10]], [[2.2, 2.22, 20]], [[3.3, 3.33, 30]], [[4.4, 4.44, 40]]])
            ) 
Example #25
Source File: ranknet.py    From tfranknet with GNU General Public License v2.0 5 votes vote down vote up
def _setup_pretraining(self):
        """
        Set up a data flow graph for pretraining by sAE
        """
        input_dim = self.input_dim
        weights = self.weights
        biases = self.biases
        layer_num = self.layer_num
        lr = self.learning_rate
        act_func = ACTIVATE_FUNC[self.activate_func]
        optimizer = OPTIMIZER[self.optimizer]

        self.pt_input = input_ = tf.placeholder("float", shape=[None, None],
                                                name="input_to_ae")
        self.pretrain_layer = []
        self.encode = []
        self.recon_errs = []
        with tf.name_scope("pretraing"):
            for n in xrange(layer_num-2):
                w = weights[n]
                b = biases[n]
                label = str(n)
                encoded, recon_err = self._get_reconstruction_error(input_,
                                                                    w, b,
                                                                    act_func,
                                                                    label)
                opt_op = optimizer(lr).minimize(recon_err)
                self.pretrain_layer.append(opt_op)
                self.encode.append(encoded)
                self.recon_errs.append(recon_err) 
Example #26
Source File: ranknet.py    From tfranknet with GNU General Public License v2.0 5 votes vote down vote up
def _setup_variables(self):
        hidden_units = self.hidden_units
        layer_units = [self.input_dim] + hidden_units + [1]
        layer_num = len(layer_units)
        #setting weights and biases
        self.weights = weights = []
        self.biases = biases = []
        for n in xrange(layer_num-1):
            w_shape = [layer_units[n], layer_units[n+1]]
            b_shape = [layer_units[n+1]]
            w = self._get_weight_variable(w_shape, n)
            b = self._get_bias_variable(b_shape, n)
            weights.append(w)
            biases.append(b) 
Example #27
Source File: ranknet.py    From tfranknet with GNU General Public License v2.0 5 votes vote down vote up
def _fine_tuning(self, data1, data2, logdir):
        #set data to enqueue op(not executed yet)
        data1 = np.array(data1)
        data2 = np.array(data2)
        with self.graph.as_default():
            enq = self.queue.enqueue_many((data1, data2))
        #Not used after defining enqueue op
        del data1
        del data2

        qr = tf.train.QueueRunner(self.queue, [enq]*self.threads)
        sess = self.sess
        coord = tf.train.Coordinator()
        enqueue_threads = qr.create_threads(sess, coord=coord, start=True)
        if logdir:
            writer = tf.train.SummaryWriter(logdir, sess.graph_def)
        #Run the training loop, controlling termination with the coord
        try:
            for step in xrange(self.max_steps):
                if coord.should_stop():
                    break
                cost, sm, _ = sess.run([self.cost, self.summary,
                                        self.optimize])
                if self.verbose and step%self.verbose_step==0:
                    print("The %dth cost:%f"%(step, cost))
                if logdir:
                    writer.add_summary(sm, step)
        except Exception as e:
            coord.request_stop(e)
        coord.request_stop()
        coord.join(enqueue_threads)
        return cost 
Example #28
Source File: ranknet.py    From tfranknet with GNU General Public License v2.0 5 votes vote down vote up
def pretrain(self, data):
        input_dim = data.shape[1]
        data_size = data.shape[0]
        self.initialize_graph(input_dim)
        layer_num = self.layer_num
        max_steps = self.max_steps
        batch_size = self.batch_size
        sess = self.sess
        pretrain_layer = self.pretrain_layer
        recon_errs = []
        for n in xrange(layer_num-2):
            np.random.shuffle(data)
            #train loop
            for step in xrange(max_steps):
                start = step * batch_size
                stop = (step+1) * batch_size
                if start >= data_size:
                    break
                batch = data[start: stop]
                _, err = sess.run([pretrain_layer[n], self.recon_errs[n]],
                        feed_dict={self.pt_input: batch})
                mean_err = err / batch.shape[0]
                if step%self.verbose_step == 0 and self.verbose:
                    print("Reconstruction Error:", mean_err)
            data = sess.run(self.encode[n], feed_dict={self.pt_input: data})
            recon_errs.append(mean_err)
        return recon_errs 
Example #29
Source File: test_sequence_preprocessor.py    From rlgraph with Apache License 2.0 5 votes vote down vote up
def test_sequence_preprocessor_with_container_space(self):
        # Test with no batch rank.
        space = Tuple(
            FloatBox(shape=(1,)),
            FloatBox(shape=(2, 2)),
            add_batch_rank=False
        )

        component_to_test = Sequence(sequence_length=4, add_rank=False)
        test = ComponentTest(component=component_to_test, input_spaces=dict(inputs=space))

        for i in range_(3):
            test.test("reset")

            test.test(("call", (tuple([np.array([0.5]), np.array([[0.6, 0.7], [0.8, 0.9]])]),)),
                      expected_outputs=(np.array([0.5, 0.5, 0.5, 0.5]), np.array([[0.6, 0.7] * 4,
                                                                                  [0.8, 0.9] * 4])))
            test.test(("call", (tuple([np.array([0.6]), np.array([[1.1, 1.1], [1.1, 1.1]])]),)),
                      expected_outputs=(np.array([0.5, 0.5, 0.5, 0.6]), np.array([[0.6, 0.7, 0.6, 0.7,
                                                                                   0.6, 0.7, 1.1, 1.1],
                                                                                  [0.8, 0.9, 0.8, 0.9,
                                                                                   0.8, 0.9, 1.1, 1.1]])))
            test.test(("call", (tuple([np.array([0.7]), np.array([[2.0, 2.1], [2.2, 2.3]])]),)),
                      expected_outputs=(np.array([0.5, 0.5, 0.6, 0.7]), np.array([[0.6, 0.7, 0.6, 0.7,
                                                                                   1.1, 1.1, 2.0, 2.1],
                                                                                  [0.8, 0.9, 0.8, 0.9,
                                                                                   1.1, 1.1, 2.2, 2.3]])))

        test.terminate() 
Example #30
Source File: test_noise_components.py    From rlgraph with Apache License 2.0 5 votes vote down vote up
def test_ornstein_uhlenbeck_noise(self):
        ou_theta = 0.15
        ou_mu = 10.0
        ou_sigma = 2.0

        noise_component = OrnsteinUhlenbeckNoise(
            theta=ou_theta, mu=ou_mu, sigma=ou_sigma
        )
        test = ComponentTest(component=noise_component, action_space=self.action_input_space)

        # Collect outputs in `collected` list to compare moments.
        collected = list()
        collect_outs = lambda component_test, outs: collected.append(outs)

        for _ in range_(1000):
            test.test(("get_noise", None), fn_test=collect_outs)

        test_mean = np.mean(collected)
        test_sd = np.std(collected)

        print("Moments: {} / {}".format(test_mean, test_sd))

        # Empiric mean should be within 2 sd of real mean.
        self.assertGreater(ou_mu, test_mean - test_sd * 2)
        self.assertLess(ou_mu, test_mean + test_sd * 2)

        # Empiric sd should be within 45% and 200% interval.
        self.assertGreater(ou_sigma, test_sd * 0.45)
        self.assertLess(ou_sigma, test_sd * 2.0)

        # TODO: Maybe test time correlation?