Python numpy.cast() Examples

The following are 30 code examples of numpy.cast(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: data_utils.py    From EvolutionaryGAN with MIT License 6 votes vote down vote up
def ImgBatchRescale(img,center=True,scale=True, convert_back=False):
    img = np.array(img)
    img = np.cast['float32'](img)
    if convert_back is True:
        b,C,H,W = img.shape
        print img.dtype
        imgh = np.zeros((b,H,W,C),dtype=img.dtype)
        for i in range(b):
            imgh[i,:,:,:] = convert_img_back(img[i,:,:,:])
        img = imgh
    if center and scale:
        img = ((img+1) / 2 * 255).astype(np.uint8) 
    elif center:
	img = (img + 127.5).astype(np.uint8) 
    elif scale:
	img = (img * 255).astype(np.uint8) 
    return img 
Example #2
Source File: nn.py    From opt-mmd with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_output_for(self, input, deterministic=False, **kwargs):
        if deterministic:
            norm_features = (input-self.avg_batch_mean.dimshuffle(*self.dimshuffle_args)) / T.sqrt(1e-6 + self.avg_batch_var).dimshuffle(*self.dimshuffle_args)
        else:
            batch_mean = T.mean(input,axis=self.axes_to_sum).flatten()
            centered_input = input-batch_mean.dimshuffle(*self.dimshuffle_args)
            batch_var = T.mean(T.square(centered_input),axis=self.axes_to_sum).flatten()
            batch_stdv = T.sqrt(1e-6 + batch_var)
            norm_features = centered_input / batch_stdv.dimshuffle(*self.dimshuffle_args)

            # BN updates
            new_m = 0.9*self.avg_batch_mean + 0.1*batch_mean
            new_v = 0.9*self.avg_batch_var + T.cast((0.1*input.shape[0])/(input.shape[0]-1),th.config.floatX)*batch_var
            self.bn_updates = [(self.avg_batch_mean, new_m), (self.avg_batch_var, new_v)]

        if hasattr(self, 'g'):
            activation = norm_features*self.g.dimshuffle(*self.dimshuffle_args)
        else:
            activation = norm_features
        if hasattr(self, 'b'):
            activation += self.b.dimshuffle(*self.dimshuffle_args)

        return self.nonlinearity(activation) 
Example #3
Source File: nn.py    From weightnorm with MIT License 6 votes vote down vote up
def adam_updates(params, cost, lr=0.001, mom1=0.9, mom2=0.999):
    updates = []
    grads = T.grad(cost, params)
    t = th.shared(np.cast[th.config.floatX](1.))
    for p, g in zip(params, grads):
        v = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
        mg = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
        v_t = mom1*v + (1. - mom1)*g
        mg_t = mom2*mg + (1. - mom2)*T.square(g)
        v_hat = v_t / (1. - mom1 ** t)
        mg_hat = mg_t / (1. - mom2 ** t)
        g_t = v_hat / T.sqrt(mg_hat + 1e-8)
        p_t = p - lr * g_t
        updates.append((v, v_t))
        updates.append((mg, mg_t))
        updates.append((p, p_t))
    updates.append((t, t+1))
    return updates 
Example #4
Source File: nn.py    From deligan with MIT License 6 votes vote down vote up
def get_output_for(self, input, deterministic=False, **kwargs):
        if deterministic:
            norm_features = (input-self.avg_batch_mean.dimshuffle(*self.dimshuffle_args)) / T.sqrt(1e-6 + self.avg_batch_var).dimshuffle(*self.dimshuffle_args)
        else:
            batch_mean = T.mean(input,axis=self.axes_to_sum).flatten()
            centered_input = input-batch_mean.dimshuffle(*self.dimshuffle_args)
            batch_var = T.mean(T.square(centered_input),axis=self.axes_to_sum).flatten()
            batch_stdv = T.sqrt(1e-6 + batch_var)
            norm_features = centered_input / batch_stdv.dimshuffle(*self.dimshuffle_args)

            # BN updates
            new_m = 0.9*self.avg_batch_mean + 0.1*batch_mean
            new_v = 0.9*self.avg_batch_var + T.cast((0.1*input.shape[0])/(input.shape[0]-1),th.config.floatX)*batch_var
            self.bn_updates = [(self.avg_batch_mean, new_m), (self.avg_batch_var, new_v)]

        if hasattr(self, 'g'):
            activation = norm_features*self.g.dimshuffle(*self.dimshuffle_args)
        else:
            activation = norm_features
        if hasattr(self, 'b'):
            activation += self.b.dimshuffle(*self.dimshuffle_args)

        return self.nonlinearity(activation) 
Example #5
Source File: sgd_alt.py    From adversarial with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __call__(self, algorithm):
        """
        Adjusts the learning rate according to the linear decay schedule

        Parameters
        ----------
        algorithm : WRITEME
        """
        if self._count == 0:
            self._base_lr = algorithm.learning_rate.get_value()
            self._step = ((self._base_lr - self._base_lr * self.decay_factor) /
                          (self.saturate - self.start + 1))
        self._count += 1
        if self._count >= self.start:
            if self._count < self.saturate:
                new_lr = self._base_lr - self._step * (self._count
                        - self.start + 1)
            else:
                new_lr = self._base_lr * self.decay_factor
        else:
            new_lr = self._base_lr
        assert new_lr > 0
        new_lr = np.cast[config.floatX](new_lr)
        algorithm.learning_rate.set_value(new_lr) 
Example #6
Source File: sgd_alt.py    From adversarial with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def on_monitor(self, model, dataset, algorithm):
        """
        Adjusts the learning rate according to the decay schedule.

        Parameters
        ----------
        model : a Model instance
        dataset : Dataset
        algorithm : WRITEME
        """

        if not self._initialized:
            self._init_lr = algorithm.learning_rate.get_value()
            if self._init_lr < self.min_lr:
                raise ValueError("The initial learning rate is smaller than " +
                                 "the minimum allowed learning rate.")
            self._initialized = True
        self._count += 1
        algorithm.learning_rate.set_value(np.cast[config.floatX](
            self.current_lr())) 
Example #7
Source File: nn.py    From opt-mmd with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def adam_updates(params, cost, lr=0.001, mom1=0.9, mom2=0.999):
    updates = []
    grads = T.grad(cost, params)
    t = th.shared(np.cast[th.config.floatX](1.))
    for p, g in zip(params, grads):
        v = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
        mg = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
        v_t = mom1*v + (1. - mom1)*g
        mg_t = mom2*mg + (1. - mom2)*T.square(g)
        v_hat = v_t / (1. - mom1 ** t)
        mg_hat = mg_t / (1. - mom2 ** t)
        g_t = v_hat / T.sqrt(mg_hat + 1e-8)
        p_t = p - lr * g_t
        updates.append((v, v_t))
        updates.append((mg, mg_t))
        updates.append((p, p_t))
    updates.append((t, t+1))
    return updates 
Example #8
Source File: nn.py    From weightnorm with MIT License 6 votes vote down vote up
def adamax_updates(params, cost, lr=0.001, mom1=0.9, mom2=0.999):
    updates = []
    grads = T.grad(cost, params)
    for p, g in zip(params, grads):
        mg = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
        v = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
        if mom1>0:
            v_t = mom1*v + (1. - mom1)*g
            updates.append((v,v_t))
        else:
            v_t = g
        mg_t = T.maximum(mom2*mg, abs(g))
        g_t = v_t / (mg_t + 1e-6)
        p_t = p - lr * g_t
        updates.append((mg, mg_t))
        updates.append((p, p_t))
    return updates 
Example #9
Source File: nn.py    From weightnorm with MIT License 6 votes vote down vote up
def get_output_for(self, input, deterministic=False, **kwargs):
        if deterministic:
            norm_features = (input-self.avg_batch_mean.dimshuffle(*self.dimshuffle_args)) / T.sqrt(1e-6 + self.avg_batch_var).dimshuffle(*self.dimshuffle_args)
        else:
            batch_mean = T.mean(input,axis=self.axes_to_sum).flatten()
            centered_input = input-batch_mean.dimshuffle(*self.dimshuffle_args)
            batch_var = T.mean(T.square(centered_input),axis=self.axes_to_sum).flatten()
            batch_stdv = T.sqrt(1e-6 + batch_var)
            norm_features = centered_input / batch_stdv.dimshuffle(*self.dimshuffle_args)
            
            # BN updates
            new_m = 0.9*self.avg_batch_mean + 0.1*batch_mean
            new_v = 0.9*self.avg_batch_var + T.cast((0.1*input.shape[0])/(input.shape[0]-1.), th.config.floatX)*batch_var
            self.bn_updates = [(self.avg_batch_mean, new_m), (self.avg_batch_var, new_v)]
            
        if hasattr(self, 'g'):
            activation = norm_features*self.g.dimshuffle(*self.dimshuffle_args)
        else:
            activation = norm_features
        if hasattr(self, 'b'):
            activation += self.b.dimshuffle(*self.dimshuffle_args)
            
        return self.nonlinearity(activation) 
Example #10
Source File: sgd.py    From adversarial with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def on_monitor(self, model, dataset, algorithm):
        """
        Updates the learning rate based on the linear decay schedule.

        Parameters
        ----------
        model : a Model instance
        dataset : Dataset
        algorithm : WRITEME
        """
        if not self._initialized:
            self._init_lr = algorithm.learning_rate.get_value()
            self._step = ((self._init_lr - self._init_lr * self.decay_factor) /
                          (self.saturate - self.start + 1))
            self._initialized = True
        self._count += 1
        algorithm.learning_rate.set_value(np.cast[config.floatX](
            self.current_lr())) 
Example #11
Source File: sgd.py    From adversarial with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def on_monitor(self, model, dataset, algorithm):
        """
        Adjusts the learning rate according to the decay schedule.

        Parameters
        ----------
        model : a Model instance
        dataset : Dataset
        algorithm : WRITEME
        """

        if not self._initialized:
            self._init_lr = algorithm.learning_rate.get_value()
            if self._init_lr < self.min_lr:
                raise ValueError("The initial learning rate is smaller than " +
                                 "the minimum allowed learning rate.")
            self._initialized = True
        self._count += 1
        algorithm.learning_rate.set_value(np.cast[config.floatX](
            self.current_lr())) 
Example #12
Source File: sgd_alt.py    From adversarial with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def on_monitor(self, model, dataset, algorithm):
        """
        Updates the learning rate based on the linear decay schedule.

        Parameters
        ----------
        model : a Model instance
        dataset : Dataset
        algorithm : WRITEME
        """
        if not self._initialized:
            self._init_lr = algorithm.learning_rate.get_value()
            self._step = ((self._init_lr - self._init_lr * self.decay_factor) /
                          (self.saturate - self.start + 1))
            self._initialized = True
        self._count += 1
        algorithm.learning_rate.set_value(np.cast[config.floatX](
            self.current_lr())) 
Example #13
Source File: test_nnet.py    From D-VAE with MIT License 6 votes vote down vote up
def test_stabilize_log_softmax():
    mode = theano.compile.mode.get_default_mode()
    mode = mode.including('local_log_softmax', 'specialize')

    x = matrix()
    y = softmax(x)
    z = theano.tensor.log(y)

    f = theano.function([x], z, mode=mode)
    assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')

    # check that the softmax has been optimized out
    for node in f.maker.fgraph.toposort():
        assert not isinstance(node.op, y.owner.op.__class__)

    # call the function so debug mode can verify the optimized
    # version matches the unoptimized version
    rng = numpy.random.RandomState([2012, 8, 22])
    f(numpy.cast[config.floatX](rng.randn(2, 3))) 
Example #14
Source File: test_extra_ops.py    From D-VAE with MIT License 6 votes vote down vote up
def test_perform(self):
        x = tensor.matrix()
        y = tensor.scalar()
        f = function([x, y], fill_diagonal(x, y))
        for shp in [(8, 8), (5, 8), (8, 5)]:
            a = numpy.random.rand(*shp).astype(config.floatX)
            val = numpy.cast[config.floatX](numpy.random.rand())
            out = f(a, val)
            # We can't use numpy.fill_diagonal as it is bugged.
            assert numpy.allclose(numpy.diag(out), val)
            assert (out == val).sum() == min(a.shape)

        # test for 3d tensor
        a = numpy.random.rand(3, 3, 3).astype(config.floatX)
        x = tensor.tensor3()
        y = tensor.scalar()
        f = function([x, y], fill_diagonal(x, y))
        val = numpy.cast[config.floatX](numpy.random.rand() + 10)
        out = f(a, val)
        # We can't use numpy.fill_diagonal as it is bugged.
        assert out[0, 0, 0] == val
        assert out[1, 1, 1] == val
        assert out[2, 2, 2] == val
        assert (out == val).sum() == min(a.shape) 
Example #15
Source File: test_extra_ops.py    From D-VAE with MIT License 6 votes vote down vote up
def test_perform(self):
        x = tensor.matrix()
        y = tensor.scalar()
        z = tensor.iscalar()

        f = function([x, y, z], fill_diagonal_offset(x, y, z))
        for test_offset in (-5, -4, -1, 0, 1, 4, 5):
            for shp in [(8, 8), (5, 8), (8, 5), (5, 5)]:
                a = numpy.random.rand(*shp).astype(config.floatX)
                val = numpy.cast[config.floatX](numpy.random.rand())
                out = f(a, val, test_offset)
                # We can't use numpy.fill_diagonal as it is bugged.
                assert numpy.allclose(numpy.diag(out, test_offset), val)
                if test_offset >= 0:
                   assert (out == val).sum() == min( min(a.shape),
                                            a.shape[1]-test_offset )
                else:
                    assert (out == val).sum() == min( min(a.shape),
                                            a.shape[0]+test_offset ) 
Example #16
Source File: test_basic_ops.py    From D-VAE with MIT License 6 votes vote down vote up
def test_elemwise_comparaison_cast():
    """
    test if an elemwise comparaison followed by a cast to float32 are
    pushed to gpu.
    """

    a = tensor.fmatrix()
    b = tensor.fmatrix()
    av = theano._asarray(numpy.random.rand(4, 4), dtype='float32')
    bv = numpy.ones((4, 4), dtype='float32')

    for g, ans in [(tensor.lt, av < bv), (tensor.gt, av > bv),
                   (tensor.le, av <= bv), (tensor.ge, av >= bv)]:

        f = pfunc([a, b], tensor.cast(g(a, b), 'float32'), mode=mode_with_gpu)

        out = f(av, bv)
        assert numpy.all(out == ans)
        assert any([isinstance(node.op, cuda.GpuElemwise)
                    for node in f.maker.fgraph.toposort()]) 
Example #17
Source File: nn.py    From deligan with MIT License 6 votes vote down vote up
def get_output_for(self, input, deterministic=False, **kwargs):
        if deterministic:
            norm_features = (input-self.avg_batch_mean.dimshuffle(*self.dimshuffle_args)) / T.sqrt(1e-6 + self.avg_batch_var).dimshuffle(*self.dimshuffle_args)
        else:
            batch_mean = T.mean(input,axis=self.axes_to_sum).flatten()
            centered_input = input-batch_mean.dimshuffle(*self.dimshuffle_args)
            batch_var = T.mean(T.square(centered_input),axis=self.axes_to_sum).flatten()
            batch_stdv = T.sqrt(1e-6 + batch_var)
            norm_features = centered_input / batch_stdv.dimshuffle(*self.dimshuffle_args)

            # BN updates
            new_m = 0.9*self.avg_batch_mean + 0.1*batch_mean
            new_v = 0.9*self.avg_batch_var + T.cast((0.1*input.shape[0])/(input.shape[0]-1),th.config.floatX)*batch_var
            self.bn_updates = [(self.avg_batch_mean, new_m), (self.avg_batch_var, new_v)]

        if hasattr(self, 'g'):
            activation = norm_features*self.g.dimshuffle(*self.dimshuffle_args)
        else:
            activation = norm_features
        if hasattr(self, 'b'):
            activation += self.b.dimshuffle(*self.dimshuffle_args)

        return self.nonlinearity(activation) 
Example #18
Source File: nn.py    From deligan with MIT License 6 votes vote down vote up
def adam_updates(params, cost, lr=0.001, mom1=0.9, mom2=0.999):
    updates = []
    grads = T.grad(cost, params)
    t = th.shared(np.cast[th.config.floatX](1.))
    for p, g in zip(params, grads):
        v = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
        mg = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
        v_t = mom1*v + (1. - mom1)*g
        mg_t = mom2*mg + (1. - mom2)*T.square(g)
        v_hat = v_t / (1. - mom1 ** t)
        mg_hat = mg_t / (1. - mom2 ** t)
        g_t = v_hat / T.sqrt(mg_hat + 1e-8)
        p_t = p - lr * g_t
        updates.append((v, v_t))
        updates.append((mg, mg_t))
        updates.append((p, p_t))
    updates.append((t, t+1))
    return updates 
Example #19
Source File: test_gradient.py    From D-VAE with MIT License 6 votes vote down vote up
def test_grad_disconnected(self):

        # tests corner cases of gradient for shape and alloc

        x = theano.tensor.vector(name='x')
        total = x.sum()
        total.name = 'total'
        num_elements = x.shape[0]
        num_elements.name = 'num_elements'
        silly_vector = theano.tensor.alloc(total / num_elements, num_elements)
        silly_vector.name = 'silly_vector'
        cost = silly_vector.sum()
        cost.name = 'cost'
        # note that cost simplifies to be the same as "total"
        g = gradient.grad(cost, x, add_names=False)
        # we still need to pass in x because it determines the shape of
        # the output
        f = theano.function([x], g)
        rng = np.random.RandomState([2012, 9, 5])
        x = np.cast[x.dtype](rng.randn(3))
        g = f(x)
        assert np.allclose(g, np.ones(x.shape, dtype=x.dtype)) 
Example #20
Source File: constructors.py    From OpenDeep with Apache License 2.0 6 votes vote down vote up
def as_floatX(variable):
    """
    Casts a given variable into dtype `theano.config.floatX`. Numpy ndarrays will
    remain numpy ndarrays, python floats will become 0-D ndarrays and
    all other types will be treated as theano tensors.

    Parameters
    ----------
    variable: int, float, numpy array, or tensor
        The input to convert to type `theano.config.floatX`.

    Returns
    -------
    numpy array or tensor
        The input `variable` casted as type `theano.config.floatX`.
    """
    if isinstance(variable, (integer_types, float, numpy.number, numpy.ndarray)):
        return numpy.cast[theano.config.floatX](variable)

    return theano.tensor.cast(variable, theano.config.floatX) 
Example #21
Source File: utils.py    From GroundHog with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def as_floatX(variable):
    """
       This code is taken from pylearn2:
       Casts a given variable into dtype config.floatX
       numpy ndarrays will remain numpy ndarrays
       python floats will become 0-D ndarrays
       all other types will be treated as theano tensors
    """

    if isinstance(variable, float):
        return numpy.cast[theano.config.floatX](variable)

    if isinstance(variable, numpy.ndarray):
        return numpy.cast[theano.config.floatX](variable)

    return theano.tensor.cast(variable, theano.config.floatX) 
Example #22
Source File: test_basic.py    From D-VAE with MIT License 6 votes vote down vote up
def test_correct_answer(self):
        a = T.matrix()
        b = T.matrix()

        x = T.tensor3()
        y = T.tensor3()

        A = numpy.cast[theano.config.floatX](numpy.random.rand(5, 3))
        B = numpy.cast[theano.config.floatX](numpy.random.rand(7, 2))
        X = numpy.cast[theano.config.floatX](numpy.random.rand(5, 6, 1))
        Y = numpy.cast[theano.config.floatX](numpy.random.rand(1, 9, 3))

        make_list((3., 4.))
        c = make_list((a, b))
        z = make_list((x, y))
        fc = theano.function([a, b], c)
        fz = theano.function([x, y], z)
        self.assertTrue((m == n).all() for m, n in zip(fc(A, B), [A, B]))
        self.assertTrue((m == n).all() for m, n in zip(fz(X, Y), [X, Y])) 
Example #23
Source File: nn.py    From GELUs with MIT License 6 votes vote down vote up
def adam_updates(params, cost, lr=0.001, mom1=0.9, mom2=0.999):
    updates = []
    grads = T.grad(cost, params)
    t = th.shared(np.cast[th.config.floatX](1.))
    for p, g in zip(params, grads):
        v = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
        mg = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
        v_t = mom1*v + (1. - mom1)*g
        mg_t = mom2*mg + (1. - mom2)*T.square(g)
        v_hat = v_t / (1. - mom1 ** t)
        mg_hat = mg_t / (1. - mom2 ** t)
        g_t = v_hat / T.sqrt(mg_hat + 1e-8)
        p_t = p - lr * g_t
        updates.append((v, v_t))
        updates.append((mg, mg_t))
        updates.append((p, p_t))
    updates.append((t, t+1))
    return updates 
Example #24
Source File: nn.py    From GELUs with MIT License 6 votes vote down vote up
def adamax_updates(params, cost, lr=0.001, mom1=0.9, mom2=0.999):
    updates = []
    grads = T.grad(cost, params)
    for p, g in zip(params, grads):
        mg = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
        v = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
        if mom1>0:
            v_t = mom1*v + (1. - mom1)*g
            updates.append((v,v_t))
        else:
            v_t = g
        mg_t = T.maximum(mom2*mg, abs(g))
        g_t = v_t / (mg_t + 1e-6)
        p_t = p - lr * g_t
        updates.append((mg, mg_t))
        updates.append((p, p_t))
    return updates 
Example #25
Source File: test_bernoulli_mlp_regressor.py    From garage with MIT License 6 votes vote down vote up
def test_is_pickleable(self):
        bmr = BernoulliMLPRegressor(input_shape=(1, ), output_dim=2)

        with tf.compat.v1.variable_scope(
                'BernoulliMLPRegressor/NormalizedInputMLPModel', reuse=True):
            bias = tf.compat.v1.get_variable('mlp/hidden_0/bias')
        bias.load(tf.ones_like(bias).eval())
        bias1 = bias.eval()

        result1 = np.cast['int'](bmr.predict(np.ones((1, 1))))
        h = pickle.dumps(bmr)

        with tf.compat.v1.Session(graph=tf.Graph()):
            bmr_pickled = pickle.loads(h)
            result2 = np.cast['int'](bmr_pickled.predict(np.ones((1, 1))))
            assert np.array_equal(result1, result2)

            with tf.compat.v1.variable_scope(
                    'BernoulliMLPRegressor/NormalizedInputMLPModel',
                    reuse=True):
                bias2 = tf.compat.v1.get_variable('mlp/hidden_0/bias').eval()

            assert np.array_equal(bias1, bias2) 
Example #26
Source File: test_bernoulli_mlp_regressor.py    From garage with MIT License 6 votes vote down vote up
def test_fit_with_no_trust_region(self, input_shape, output_dim):
        bmr = BernoulliMLPRegressor(input_shape=input_shape,
                                    output_dim=output_dim,
                                    use_trust_region=False)

        observations, returns = get_train_data(input_shape, output_dim)

        for _ in range(150):
            bmr.fit(observations, returns)

        paths, expected = get_test_data(input_shape, output_dim)
        prediction = np.cast['int'](bmr.predict(paths['observations']))

        assert np.allclose(prediction, expected, rtol=0, atol=0.1)

        x_mean = self.sess.run(bmr.model._networks['default'].x_mean)
        x_mean_expected = np.mean(observations, axis=0, keepdims=True)
        x_std = self.sess.run(bmr.model._networks['default'].x_std)
        x_std_expected = np.std(observations, axis=0, keepdims=True)

        assert np.allclose(x_mean, x_mean_expected)
        assert np.allclose(x_std, x_std_expected) 
Example #27
Source File: test_bernoulli_mlp_regressor.py    From garage with MIT License 6 votes vote down vote up
def test_fit_unnormalized(self, input_shape, output_dim):
        bmr = BernoulliMLPRegressor(input_shape=input_shape,
                                    output_dim=output_dim,
                                    normalize_inputs=False)

        observations, returns = get_train_data(input_shape, output_dim)

        for _ in range(150):
            bmr.fit(observations, returns)

        paths, expected = get_test_data(input_shape, output_dim)

        prediction = np.cast['int'](bmr.predict(paths['observations']))

        assert np.allclose(prediction, expected, rtol=0, atol=0.1)

        x_mean = self.sess.run(bmr.model._networks['default'].x_mean)
        x_mean_expected = np.zeros_like(x_mean)
        x_std = self.sess.run(bmr.model._networks['default'].x_std)
        x_std_expected = np.ones_like(x_std)

        assert np.allclose(x_mean, x_mean_expected)
        assert np.allclose(x_std, x_std_expected)

    # yapf: disable 
Example #28
Source File: test_bernoulli_mlp_regressor.py    From garage with MIT License 6 votes vote down vote up
def test_fit_normalized(self, input_shape, output_dim):
        bmr = BernoulliMLPRegressor(input_shape=input_shape,
                                    output_dim=output_dim)

        observations, returns = get_train_data(input_shape, output_dim)

        for _ in range(150):
            bmr.fit(observations, returns)

        paths, expected = get_test_data(input_shape, output_dim)

        prediction = np.cast['int'](bmr.predict(paths['observations']))
        assert np.allclose(prediction, expected, rtol=0, atol=0.1)

        x_mean = self.sess.run(bmr.model._networks['default'].x_mean)
        x_mean_expected = np.mean(observations, axis=0, keepdims=True)
        x_std = self.sess.run(bmr.model._networks['default'].x_std)
        x_std_expected = np.std(observations, axis=0, keepdims=True)

        assert np.allclose(x_mean, x_mean_expected)
        assert np.allclose(x_std, x_std_expected)

    # yapf: disable 
Example #29
Source File: xor.py    From gandlf with MIT License 6 votes vote down vote up
def get_training_data(num_samples):
    """Generates some training data."""

    # As (x, y) Cartesian coordinates.
    x = np.random.randint(0, 2, size=(num_samples, 2))

    y = x[:, 0] + 2 * x[:, 1]  # 2-digit binary to integer.
    y = np.cast['int32'](y)

    x = np.cast['float32'](x) * 1.6 - 0.8  # Scales to [-1, 1].
    x += np.random.uniform(-0.1, 0.1, size=x.shape)

    y_ohe = np.cast['float32'](np.eye(4)[y])
    y = np.cast['float32'](np.expand_dims(y, -1))

    return x, y, y_ohe 
Example #30
Source File: bernoulli.py    From garage with MIT License 5 votes vote down vote up
def sample(self, dist_info):
        p = np.asarray(dist_info['p'])
        return np.cast['int'](
            np.random.uniform(low=0., high=1., size=p.shape) < p)