Python autograd.numpy.array() Examples

The following are code examples for showing how to use autograd.numpy.array(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Efficient_Augmentation   Author: mkuchnik   File: LogisticRegression.py    MIT License 6 votes vote down vote up
def log_losses(self, X=None, y=None, L2_alpha=None):
        # TODO check
        if X is None or y is None:
            assert X is None and y is None
            X = self.X_
            y = self.y_
        assert len(X.shape) == 2, "X must be 2d"
        X_b = LogisticRegression._X_b(X)
        if L2_alpha is None:
            L2_alpha = self.L2_alpha

        W_b = self.W_b
        losses = []
        for _xx, _yy in zip(X_b, y):
            loss = LogisticRegression._log_loss(W=W_b.flatten(),
                                                X=_xx.reshape(1, -1),
                                                y=_yy,
                                                L2_alpha=L2_alpha,
                                                intercept=self.fit_intercept,
                                                n_classes=self.n_classes_,
                                                W_shape=W_b.shape)
            losses.append(loss)
        losses = np.array(losses)
        return losses 
Example 2
Project: Efficient_Augmentation   Author: mkuchnik   File: LogisticRegression.py    MIT License 6 votes vote down vote up
def losses(self, X=None, y=None, L2_alpha=None):
        # TODO check
        if X is None or y is None:
            assert X is None and y is None
            X = self.X_
            y = self.y_
        X_b = LogisticRegression._X_b(X)
        if L2_alpha is None:
            L2_alpha = self.L2_alpha

        W_b = self.W_b
        losses = []
        for _xx, _yy in zip(X_b, y):
            loss = LogisticRegression._loss(W=W_b,
                                            X=_xx,
                                            y=_yy,
                                            L2_alpha=L2_alpha,
                                            intercept=self.fit_intercept)
            losses.append(loss)
        losses = np.array(losses)
        return losses 
Example 3
Project: Efficient_Augmentation   Author: mkuchnik   File: LogisticRegression.py    MIT License 6 votes vote down vote up
def W_b(self):
        """
        Gets the weights concatenated with the bias (bias trick)
        """
        weights = np.array(self.coef_)
        if self.multi_class == "multinomial" and self.n_classes_ > 2:
            assert weights.shape == (self.n_classes_, self.X_.shape[1])
            weights = weights[:, :]  # Unpack the weights
            if not self.fit_intercept:
                return weights
            intercept = np.array(self.intercept_).reshape(-1, 1)
            assert intercept.shape == (self.n_classes_, 1)
            W_b = np.concatenate((weights, intercept), axis=1)
            return W_b
        else:
            assert weights.shape == (1, self.X_.shape[1])
            weights = weights[0, :]  # Unpack the weights
            if not self.fit_intercept:
                return weights
            intercept = np.array(self.intercept_)
            assert intercept.shape == (1,)
            W_b = np.concatenate((weights, intercept), axis=0)
            return W_b 
Example 4
Project: STSC   Author: wOOL   File: stsc_autograd.py    The Unlicense 6 votes vote down vote up
def get_rotation_matrix(X, C):
    ij_list = [(i, j) for i in range(C) for j in range(C) if i < j]

    def cost(theta_list):
        U_list = generate_U_list(ij_list, theta_list, C)
        R = reduce(npy.dot, U_list, npy.eye(C))
        Z = X.dot(R)
        M = npy.max(Z, axis=1, keepdims=True)
        return npy.sum((Z / M) ** 2)

    theta_list_init = npy.array([0.0] * int(C * (C - 1) / 2))
    opt = minimize(cost,
                   x0=theta_list_init,
                   method='CG',
                   jac=grad(cost),
                   options={'disp': False})
    return opt.fun, reduce(npy.dot, generate_U_list(ij_list, opt.x, C), npy.eye(C)) 
Example 5
Project: lightML   Author: jfzhang95   File: LinearClassification.py    MIT License 6 votes vote down vote up
def transform(self, X, y):
        """transform function"""
        XMat = np.array(X)
        yMat = np.array(y)

        if XMat.shape[0] != yMat.shape[0]:
            yMat = yMat.T
        assert XMat.shape[0] == yMat.shape[0]

        XMat -= XMat.mean(axis=0)
        Sw, Sb = calc_Sw_Sb(XMat, yMat)
        evals, evecs = eig(Sw, Sb)

        np.ascontiguousarray(evals)
        np.ascontiguousarray(evecs)

        idx = np.argsort(evals)
        idx = idx[::-1]
        evecs = evecs[:, idx]

        self.W = evecs[:, :self.n_components]
        X_transformed = np.dot(XMat, self.W)

        return X_transformed 
Example 6
Project: lightML   Author: jfzhang95   File: LinearClassification.py    MIT License 6 votes vote down vote up
def calc_Sw_Sb(X, y):
    XMat = np.array(X)
    yMat = np.array(y)
    n_samples, n_features = XMat.shape

    Sw = np.zeros((n_features, n_features))
    Sb = np.zeros((n_features, n_features))

    X_cov = np.cov(XMat.T)

    labels = np.unique(yMat)
    for c in range(len(labels)):
        idx = np.squeeze(np.where(yMat == labels[c]))
        X_c = np.squeeze(XMat[idx[0],:])
        X_c_cov = np.cov(X_c.T)
        Sw += float(idx.shape[0]) / n_samples * X_c_cov

    Sb = X_cov - Sw
    return Sw, Sb 
Example 7
Project: lightML   Author: jfzhang95   File: LinearClassification.py    MIT License 6 votes vote down vote up
def another_Sw_Sb(X, y):
    XMat = np.array(X)
    yMat = np.array(y)
    n_samples, n_features = XMat.shape

    Sw = np.zeros((n_features, n_features))
    Sb = np.zeros((n_features, n_features))

    labels = np.unique(yMat)
    for c in range(len(labels)):
        idx = np.squeeze(np.where(yMat == labels[c]))
        X_c = np.squeeze(XMat[idx[0], :]).T
        Sw += X_c.shape[0] * np.cov(X_c)

    total_mean = np.mean(XMat, axis=0)
    for c in range(len(labels)):
        idx = np.squeeze(np.where(yMat == labels[c]))
        X_c = np.squeeze(XMat[idx[0], :])
        mean_c = (np.mean(X_c, axis=0) - total_mean)
        Sb += X_c.shape[0] * mean_c.T * mean_c

    return Sw, Sb 
Example 8
Project: ReducedVarianceReparamGradients   Author: andymiller   File: models.py    MIT License 6 votes vote down vote up
def set_lnpdf(model="baseball", dset="boston"):
    if model == "baseball":
        return lambda x: np.squeeze(baseball.lnpdf_flat(x, 0)), baseball.D, model
    if model == "frisk":
        lnpdf, unpack, num_params, frisk_df, param_names = \
            frisk.make_model_funs(crime=2., precinct_type=1)
        return lnpdf, num_params, model
    if model == "normal":
        D, r       = 10, 2
        mu0        = np.zeros(D)
        C_true     = np.random.randn(D, r) * 2.
        v_true     = np.random.randn(D)
        Sigma_true = np.dot(C_true, C_true.T) + np.diag(np.exp(v_true))
        print Sigma_true
        lnpdf = lambda x: misc.make_fixed_cov_mvn_logpdf(Sigma_true)(x, mean=mu0)
        return lnpdf, D, model
    if model == "bnn":
        (Xtrain, Ytrain), (Xtest, Ytest) = \
            uci.load_dataset(dset, split_seed=0)
        lnpdf, predict, loglike, parser, (std_X, ustd_X), (std_Y, ustd_Y) = \
            nn.make_nn_regression_funs(Xtrain[:100], Ytrain[:100],
                                       layer_sizes=None, obs_variance=None)
        lnpdf_vec = lambda ths: np.array([lnpdf(th)
                                          for th in np.atleast_2d(ths)])
        return lnpdf_vec, parser.N, "-".join([model, dset]) 
Example 9
Project: document_classification   Author: scotthlee   File: tools.py    Apache License 2.0 6 votes vote down vote up
def y_to_t(y):
	#quick type change, just in case
	y = deepcopy(np.array(y))

	#calculating t, which will replace 1/0 in y
	n_pos = np.sum(y == 1)
	n_neg = np.sum(y == 0)
	t_pos = np.true_divide(n_pos + 1, n_pos + 2)
	t_neg = np.true_divide(1, n_neg + 2)

	#replacing values in y with the appropriate t
	y[np.where(y == 1)] = t_pos
	y[np.where(y == 0)] = t_neg
	return y

#calculates cross-entropy using the sigmoid (for Platt scaling) 
Example 10
Project: momi2   Author: popgenmethods   File: demography.py    GNU General Public License v3.0 6 votes vote down vote up
def get_treeseq_configs(treeseq, sampled_n):
    mat = np.zeros((len(sampled_n), sum(sampled_n)), dtype=int)
    j = 0
    for i, n in enumerate(sampled_n):
        for _ in range(n):
            mat[i, j] = 1
            j += 1
    mat = scipy.sparse.csr_matrix(mat)

    def get_config(genos):
        derived_counts = mat.dot(genos)
        return np.array([
            sampled_n - derived_counts,
            derived_counts
        ]).T

    for v in treeseq.variants():
        yield get_config(v.genotypes) 
Example 11
Project: momi2   Author: popgenmethods   File: sfs.py    GNU General Public License v3.0 6 votes vote down vote up
def _entropy(self):
        counts = self._total_freqs
        n_snps = float(self.n_snps())
        p = counts / n_snps
        # return np.sum(p * np.log(p))
        ret = np.sum(p * np.log(p))

        # correct for missing data
        sampled_n = np.sum(self.configs.value, axis=2)
        sampled_n_counts = co.Counter()
        assert len(counts) == len(sampled_n)
        for c, n in zip(counts, sampled_n):
            n = tuple(n)
            sampled_n_counts[n] += c
        sampled_n_counts = np.array(
            list(sampled_n_counts.values()), dtype=float)

        ret = ret + np.sum(sampled_n_counts / n_snps *
                           np.log(n_snps / sampled_n_counts))
        assert not np.isnan(ret)
        return ret 
Example 12
Project: momi2   Author: popgenmethods   File: configurations.py    GNU General Public License v3.0 6 votes vote down vote up
def build_config_list(sampled_pops, counts, sampled_n=None, ascertainment_pop=None):
    """
    if sampled_n is not None, counts is the derived allele counts

    if sampled_n is None, counts has an extra trailing axis:
      counts[...,0] is ancestral allele count,
      counts[...,1] is derived allele count
    """
    if sampled_n is not None:
        sampled_n = np.array(sampled_n, dtype=int)
        counts1 = np.array(counts, dtype=int, ndmin=2)
        counts0 = sampled_n - counts1
        counts = np.array([counts0, counts1], dtype=int)
        counts = np.transpose(counts, axes=[1, 2, 0])
    counts = np.array(counts, ndmin=3, dtype=int)
    assert counts.shape[1:] == (len(sampled_pops), 2)
    counts.setflags(write=False)
    return ConfigList(sampled_pops, counts, sampled_n, ascertainment_pop) 
Example 13
Project: momi2   Author: popgenmethods   File: configurations.py    GNU General Public License v3.0 6 votes vote down vote up
def build_full_config_list(sampled_pops, sampled_n, ascertainment_pop=None):
    sampled_n = np.array(sampled_n)
    if ascertainment_pop is None:
        ascertainment_pop = [True] * len(sampled_pops)
    ascertainment_pop = np.array(ascertainment_pop)

    ranges = [list(range(n + 1)) for n in sampled_n]
    config_list = []
    for x in it.product(*ranges):
        x = np.array(x, dtype=int)
        if not (np.all(x[ascertainment_pop] == 0) or np.all(
                x[ascertainment_pop] == sampled_n[ascertainment_pop])):
            config_list.append(x)
    return build_config_list(
        sampled_pops, np.array(config_list, dtype=int), sampled_n,
        ascertainment_pop=ascertainment_pop) 
Example 14
Project: momi2   Author: popgenmethods   File: configurations.py    GNU General Public License v3.0 6 votes vote down vote up
def subsample_probs(self, subconfig):
        """
        Returns the probability of subsampling subconfig
        from each config.
        """
        subconfig = np.array(subconfig)
        total_counts_dict = {p: n for p, n in zip(self.sampled_pops,
                                                  subconfig.sum(axis=1))
                             if n > 0}
        derived_counts_dict = {p: [0]*(n+1)
                               for p, n in total_counts_dict.items()}
        for p, d in zip(self.sampled_pops, subconfig[:, 1]):
            if p in derived_counts_dict:
                derived_counts_dict[p][d] = 1

        num = self.count_subsets(derived_counts_dict, total_counts_dict)
        denom = self.count_subsets({}, total_counts_dict)

        # avoid 0/0
        assert np.all(num[denom == 0] == 0)
        denom[denom == 0] = 1
        return num / denom

    # TODO: remove this method (and self.sampled_n attribute) 
Example 15
Project: momi2   Author: popgenmethods   File: configurations.py    GNU General Public License v3.0 6 votes vote down vote up
def _build_old_new_idxs(self, folded):
        idxs = self.full_configs._augmented_idxs(folded)

        denom_idx_key = 'denom_idx'
        denom_idx = idxs[denom_idx_key]
        idxs = {k: v[self.sub_idxs]
                for k, v in list(idxs.items()) if k != denom_idx_key}

        old_idxs = np.array(
            list(set(sum(map(list, idxs.values()), [denom_idx]))))
        old_2_new_idxs = {old_id: new_id for new_id,
                          old_id in enumerate(old_idxs)}

        idxs = {k: np.array([old_2_new_idxs[old_id]
                             for old_id in v], dtype=int)
                for k, v in list(idxs.items())}
        idxs[denom_idx_key] = old_2_new_idxs[denom_idx]
        return old_idxs, idxs 
Example 16
Project: momi2   Author: popgenmethods   File: confidence_region.py    GNU General Public License v3.0 6 votes vote down vote up
def _many_score_cov(params, data, demo_func, **kwargs):
    params = np.array(params)

    def f_vec(x):
        ret = _composite_log_likelihood(
            data, demo_func(*x), vector=True, **kwargs)
        # centralize
        return ret - np.mean(ret)

    # g_out = einsum('ij,ik', jacobian(f_vec)(params), jacobian(f_vec)(params))
    # but computed in a roundabout way because jacobian implementation is slow
    def _g_out_antihess(x):
        l = f_vec(x)
        lc = make_constant(l)
        return np.sum(0.5 * (l**2 - l * lc - lc * l))
    return autograd.hessian(_g_out_antihess)(params) 
Example 17
Project: momi2   Author: popgenmethods   File: size_history.py    GNU General Public License v3.0 6 votes vote down vote up
def sfs(self, n):
        if n == 0:
            return np.array([0.])
        Et_jj = self.etjj(n)
        #assert np.all(Et_jj[:-1] - Et_jj[1:] >= 0.0) and np.all(Et_jj >= 0.0) and np.all(Et_jj <= self.tau)

        ret = np.sum(Et_jj[:, None] * Wmatrix(n), axis=0)

        before_tmrca = self.tau - np.sum(ret * np.arange(1, n) / n)
        # ignore branch length above untruncated TMRCA
        if self.tau == float('inf'):
            before_tmrca = 0.0

        ret = np.concatenate((np.array([0.0]), ret, np.array([before_tmrca])))
        return ret

    # def transition_prob(self, v, axis=0):
    #     return moran_model.moran_action(self.scaled_time, v, axis=axis) 
Example 18
Project: momi2   Author: popgenmethods   File: likelihood.py    GNU General Public License v3.0 6 votes vote down vote up
def _mut_factor_het(sfs, demo, mut_rate, vector, p_missing):
    mut_rate = mut_rate * np.ones(sfs.n_loci)
    E_het = expected_heterozygosity(
        demo,
        restrict_to_pops=np.array(
            sfs.sampled_pops)[sfs.ascertainment_pop])

    p_missing = p_missing * np.ones(len(sfs.ascertainment_pop))
    p_missing = p_missing[sfs.ascertainment_pop]
    lambd = np.einsum("i,j->ij", mut_rate, E_het * (1.0 - p_missing))

    counts = sfs.avg_pairwise_hets[:, sfs.ascertainment_pop]
    ret = -lambd + counts * np.log(lambd) - scipy.special.gammaln(counts + 1)
    ret = ret * sfs.sampled_n[sfs.ascertainment_pop] / float(
        np.sum(sfs.sampled_n[sfs.ascertainment_pop]))
    if not vector:
        ret = np.sum(ret)
    else:
        ret = np.sum(ret, axis=1)
    return ret 
Example 19
Project: momi2   Author: popgenmethods   File: likelihood.py    GNU General Public License v3.0 6 votes vote down vote up
def _subsfs_list(sfs, n_chunks, rnd):
    n_snps = int(sfs.n_snps())
    logger.debug("Splitting {} SNPs into {} minibatches".format(n_snps, n_chunks))

    logger.debug("Building list of length {}".format(n_snps))
    idxs = np.zeros(n_snps, dtype=int)
    total_counts = np.array(sfs._total_freqs, dtype=int)
    curr = 0
    for i, cnt in enumerate(total_counts):
        idxs[curr:(curr+cnt)] = i
        curr += cnt

    logger.debug("Permuting list of {} SNPs".format(n_snps))
    idxs = rnd.permutation(idxs)

    logger.debug("Splitting permuted SNPs into {} minibatches".format(n_chunks))
    ret = []
    for chunk in range(n_chunks):
        chunk_idxs, chunk_cnts = np.unique(idxs[chunk::n_chunks],
                                           return_counts=True)
        sub_configs = _ConfigList_Subset(sfs.configs, chunk_idxs)
        ret.append(Sfs.from_matrix(
            np.array([chunk_cnts]).T, sub_configs,
            folded=sfs.folded, length=None))
    return ret 
Example 20
Project: ParametricGP   Author: maziarraissi   File: parametric_GP.py    MIT License 5 votes vote down vote up
def __init__(self, X, y, M=10, max_iter = 2000, N_batch = 1, 
                 monitor_likelihood = 10, lrate = 1e-3):
        (N,D) = X.shape
        N_subset = min(N, 10000)
        idx = np.random.choice(N, N_subset, replace=False)
        kmeans = KMeans(n_clusters=M, random_state=0).fit(X[idx,:])
        Z = kmeans.cluster_centers_
    
        hyp = np.log(np.ones(D+1))
        logsigma_n = np.array([-4.0])
        hyp = np.concatenate([hyp, logsigma_n])
    
        m = np.zeros((M,1))
        S = kernel(Z,Z,hyp[:-1])

        self.X = X
        self.y = y
        
        self.M = M
        self.Z = Z
        self.m = m
        self.S = S
        
        self.hyp= hyp
        
        self.max_iter = max_iter
        self.N_batch = N_batch
        self.monitor_likelihood = monitor_likelihood
        self.jitter = 1e-8
        self.jitter_cov = 1e-8
        
        # Adam optimizer parameters
        self.mt_hyp = np.zeros(hyp.shape)
        self.vt_hyp = np.zeros(hyp.shape)
        self.lrate = lrate 
Example 21
Project: Efficient_Augmentation   Author: mkuchnik   File: LinearSVM.py    MIT License 5 votes vote down vote up
def W_b(self):
        """
        Gets the weights concatenated with the bias (bias trick)
        """
        weights = np.array(self.coef_)
        assert weights.shape == (1, self.X_.shape[1])
        weights = weights[0, :]  # Unpack the weights
        if not self.fit_intercept:
            return weights
        else:
            intercept = np.array(self.intercept_)
            assert intercept.shape == (1,)
            W_b = np.concatenate((weights, intercept), axis=0)
            return W_b 
Example 22
Project: Efficient_Augmentation   Author: mkuchnik   File: LinearSVM.py    MIT License 5 votes vote down vote up
def W_b(self, value):
        if self.fit_intercept:
            self.coef_ = np.array([value[:-1]])
            self.intercept_ = value[-1:]
        else:
            self.coef_ = np.array([value]) 
Example 23
Project: Efficient_Augmentation   Author: mkuchnik   File: LogisticRegression.py    MIT License 5 votes vote down vote up
def fit(self, X, y, sample_weight=None):
        assert len(X.shape) == 2, "X must be 2d"
        sklearn.utils.validation.check_X_y(X, y)
        sklearn.utils.multiclass.check_classification_targets(y)
        self.X_ = np.array(X)
        self.y_ = np.array(y)
        self.classes_ = sklearn.utils.multiclass.unique_labels(y)
        self.n_classes_ = len(self.classes_)
        assert self.n_classes_ >= 1, "Must have more than 1 class"
        if self.multi_class != "multinomial" and self.n_classes_ > 2:
            raise NotImplementedError("Only multinomial multiclass is"
                                      " supported.")
        self.sklearn_model_ = sklearn.linear_model.LogisticRegression(
                penalty=self.penalty,
                dual=self.dual,
                tol=self.tol,
                fit_intercept=self.fit_intercept,
                intercept_scaling=self.intercept_scaling,
                class_weight=self.class_weight,
                random_state=self.random_state,
                solver=self.solver,
                max_iter=self.max_iter,
                multi_class=self.multi_class,
                verbose=self.verbose,
                warm_start=self.warm_start,
                n_jobs=self.n_jobs)
        self.sklearn_model.fit(X, y, sample_weight)

        return self 
Example 24
Project: Efficient_Augmentation   Author: mkuchnik   File: LogisticRegression.py    MIT License 5 votes vote down vote up
def hess_losses(self, X=None, y=None, L2_alpha=None):
        if X is None or y is None:
            assert X is None and y is None
            X = self.X_
            y = self.y_
        if L2_alpha is None:
            L2_alpha = self.L2_alpha
        W_b = self.W_b
        X_b = LogisticRegression._X_b(X)
        hess_loss = autograd.hessian(LogisticRegression._log_loss)
        hesses = []
        for _xx, _yy in zip(X_b, y):
            #print("W_B flat", W_b.flatten().shape)
            hess = hess_loss(W_b.flatten(),
                             _xx.reshape(1, -1),  # TODO remove reshape?
                             _yy,
                             L2_alpha,
                             intercept=self.fit_intercept,
                             n_classes=self.n_classes_,
                             W_shape=W_b.shape)
            if hess.shape[0] == 1:
                # TODO make this not happen
                hess = np.squeeze(hess)
            #print("Hess", hess.shape)
            hesses.append(hess)
        hesses = np.array(hesses)
        #print("hesses", hesses.shape)
        if len(W_b.shape) == 1:
            assert hesses.shape[1] == W_b.shape[0]
            assert hesses.shape[2] == W_b.shape[0]
        return hesses 
Example 25
Project: Efficient_Augmentation   Author: mkuchnik   File: LogisticRegression.py    MIT License 5 votes vote down vote up
def W_b(self, value):
        # TODO multinomial
        if self.fit_intercept:
            self.coef_ = np.array([value[:-1]])
            self.intercept_ = value[-1:]
        else:
            self.coef_ = np.array([value]) 
Example 26
Project: Efficient_Augmentation   Author: mkuchnik   File: LogisticRegression.py    MIT License 5 votes vote down vote up
def indices_to_one_hot(data, nb_classes):
    """
    Convert an iterable of indices to one-hot encoded labels.
    From: https://stackoverflow.com/questions/37292872/how-can-i-one-hot-encode-in-python
    """
    targets = np.array(data).reshape(-1)
    return np.eye(nb_classes)[targets] 
Example 27
Project: ilqr-gym   Author: neka-nat   File: cartpole_continuous.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self):
        self.gravity = 9.8
        self.masscart = 1.0
        self.masspole = 0.1
        self.total_mass = (self.masspole + self.masscart)
        self.length = 0.5 # actually half the pole's length
        self.polemass_length = (self.masspole * self.length)
        self.max_force = 20.0
        self.tau = 0.02  # seconds between state updates

        # Angle at which to fail the episode
        self.theta_threshold_radians = 12 * 2 * np.pi / 360
        self.x_threshold = 2.4

        # Angle limit set to 2 * theta_threshold_radians so failing observation is still within bounds
        high = np.array([
            self.x_threshold * 2,
            np.finfo(np.float32).max,
            self.theta_threshold_radians * 2,
            np.finfo(np.float32).max])

        self.action_space = spaces.Box(low=-self.max_force, high=self.max_force, shape=(1,))
        self.observation_space = spaces.Box(-high, high)

        self._seed()
        self.viewer = None
        self.state = None

        self.steps_beyond_done = None 
Example 28
Project: ilqr-gym   Author: neka-nat   File: cartpole_continuous.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _state_eq(self, st, u):
        x, x_dot, theta, theta_dot = st
        force = u[0]
        costheta = np.cos(theta)
        sintheta = np.sin(theta)
        temp = (force + self.polemass_length * theta_dot * theta_dot * sintheta) / self.total_mass
        thetaacc = (self.gravity * sintheta - costheta* temp) / (self.length * (4.0/3.0 - self.masspole * costheta * costheta / self.total_mass))
        xacc  = temp - self.polemass_length * thetaacc * costheta / self.total_mass
        x  = x + self.tau * x_dot
        x_dot = x_dot + self.tau * xacc
        theta = theta + self.tau * theta_dot
        theta_dot = theta_dot + self.tau * thetaacc
        return np.array([x, x_dot, theta, theta_dot]) 
Example 29
Project: ilqr-gym   Author: neka-nat   File: cartpole_continuous.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _reset(self):
        self.state = np.array([0.0, 0.0, 0.5 * np.pi, 0.0])
        self.steps_beyond_done = None
        return np.array(self.state) 
Example 30
Project: ilqr-gym   Author: neka-nat   File: ilqr_gym.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def forward(self, x_seq, u_seq, k_seq, kk_seq):
        x_seq_hat = np.array(x_seq)
        u_seq_hat = np.array(u_seq)
        for t in range(len(u_seq)):
            control = k_seq[t] + np.matmul(kk_seq[t], (x_seq_hat[t] - x_seq[t]))
            u_seq_hat[t] = np.clip(u_seq[t] + control, -self.umax, self.umax)
            x_seq_hat[t + 1] = self.f(x_seq_hat[t], u_seq_hat[t])
        return x_seq_hat, u_seq_hat 
Example 31
Project: lightML   Author: jfzhang95   File: LinearClassification.py    MIT License 5 votes vote down vote up
def fit(self, X, y):
        # def loss function
        def calc_loss(W):
            y_pred = logistic_predictions(W, XMat)
            label_probabilities = y_pred * yMat + (1 - y_pred) * (1 - yMat)
            if self.reg is None:
                return -np.sum(np.log(label_probabilities))
            elif self.reg == 'l1':
                return -np.sum(np.log(label_probabilities))+np.sum(self.alpha*(np.abs(W[0:-1])))
            elif self.reg == 'l2':
                return -np.sum(np.log(label_probabilities))+np.sum(self.alpha*W[0:-1]*W[0:-1])
            else:
                print("the reg can only be None, l1 or l2!")
                return ValueError

        verbose = self.verbose
        print_step = self.print_step
        max_iters = self.max_iters

        XMat = np.array(X)
        yMat = np.array(y)

        if XMat.shape[0] != yMat.shape[0]:
            yMat = yMat.T
        assert XMat.shape[0] == yMat.shape[0]

        grad_fun = elementwise_grad(calc_loss)

        n_samples, n_features = X.shape
        n_outdim = y.shape[1]
        XMat = np.hstack([XMat, np.ones((n_samples, 1))])

        self.W = np.random.randn(n_features+1, n_outdim) * 0.1
        for it in range(max_iters + 1):
            loss = calc_loss(self.W)
            grad_params = grad_fun(self.W)
            # update params
            self.W -= self.lr * grad_params

            if verbose and it % print_step == 0:
                print('iteration %d / %d: loss %f' % (it, max_iters, loss)) 
Example 32
Project: lightML   Author: jfzhang95   File: LinearClassification.py    MIT License 5 votes vote down vote up
def predict(self, X):
        """Predict function"""
        XMat = np.array(X)
        n_samples = XMat.shape[0]
        XMat = np.hstack([XMat, np.ones((n_samples, 1))])
        ypred = logistic_predictions(self.W, XMat)
        for i in range(n_samples):
            if ypred[i] > 0.5:
                ypred[i] = 1
            else:
                ypred[i] = 0

        return ypred 
Example 33
Project: lightML   Author: jfzhang95   File: LinearRegression.py    MIT License 5 votes vote down vote up
def fit(self, X, y):
        # def loss function
        def calc_linear_loss(W):
            y_pred = np.dot(XMat, W)
            return np.sqrt((np.power(yMat - y_pred, 2))).mean()

        verbose = self.verbose
        print_step = self.print_step
        max_iters = self.max_iters

        XMat = np.array(X)
        yMat = np.array(y)

        if XMat.shape[0] != yMat.shape[0]:
            yMat = yMat.T
        assert XMat.shape[0] == yMat.shape[0]

        grad_fun = grad(calc_linear_loss)

        n_samples, n_features = X.shape
        n_outdim = y.shape[1]
        XMat = np.hstack([XMat, np.ones((n_samples, 1))])

        self.W = np.random.randn(n_features+1, n_outdim)
        for it in range(max_iters+1):
            loss = calc_linear_loss(self.W)
            grad_params = grad_fun(self.W)

            # update params
            self.W -= self.lr * grad_params

            if verbose and it % print_step == 0:
                print('iteration %d / %d: loss %f' % (it, max_iters, loss)) 
Example 34
Project: lightML   Author: jfzhang95   File: LinearRegression.py    MIT License 5 votes vote down vote up
def predict(self, X):
        """Predict function"""
        XMat = np.array(X)
        n_samples = XMat.shape[0]
        XMat = np.hstack([XMat, np.ones((n_samples, 1))])
        ypred = np.dot(XMat, self.W)

        return ypred 
Example 35
Project: lightML   Author: jfzhang95   File: LinearRegression.py    MIT License 5 votes vote down vote up
def fit(self, X, y):
        # def loss function
        def calc_linear_loss(W):
            y_pred = np.dot(XMat, W)
            return np.sqrt((np.power(yMat - y_pred, 2))).mean() \
                        + np.sum(self.alpha * W[0:-1] * W[0:-1])

        verbose = self.verbose
        print_step = self.print_step
        max_iters = self.max_iters

        XMat = np.array(X)
        yMat = np.array(y)

        if XMat.shape[0] != yMat.shape[0]:
            yMat = yMat.T
        assert XMat.shape[0] == yMat.shape[0]

        grad_fun = elementwise_grad(calc_linear_loss)

        n_samples, n_features = X.shape
        n_outdim = y.shape[1]
        XMat = np.hstack([XMat, np.ones((n_samples, 1))])

        self.W = np.random.randn(n_features+1, n_outdim)
        for it in range(max_iters+1):
            loss = calc_linear_loss(self.W)
            grad_params = grad_fun(self.W)

            # update params
            self.W -= self.lr * grad_params

            if verbose and it % print_step == 0:
                print('iteration %d / %d: loss %f' % (it, max_iters, loss)) 
Example 36
Project: lightML   Author: jfzhang95   File: LinearRegression.py    MIT License 5 votes vote down vote up
def predict(self, X):
        """Predict function"""
        XMat = np.array(X)
        n_samples = XMat.shape[0]
        XMat = np.hstack([XMat, np.ones((n_samples, 1))])
        ypred = np.dot(XMat, self.W)
        return ypred 
Example 37
Project: lightML   Author: jfzhang95   File: LinearRegression.py    MIT License 5 votes vote down vote up
def fit(self, X, y):
        # def loss function
        def calc_linear_loss(W):
            y_pred = np.dot(XMat, W)
            return np.sqrt((np.power(yMat - y_pred, 2))).mean() \
                   + np.sum(self.alpha * np.abs(W[0:-1]))

        verbose = self.verbose
        print_step = self.print_step
        max_iters = self.max_iters

        XMat = np.array(X)
        yMat = np.array(y)

        if XMat.shape[0] != yMat.shape[0]:
            yMat = yMat.T
        assert XMat.shape[0] == yMat.shape[0]

        grad_fun = elementwise_grad(calc_linear_loss)

        n_samples, n_features = X.shape
        n_outdim = y.shape[1]
        XMat = np.hstack([XMat, np.ones((n_samples, 1))])

        self.W = np.random.randn(n_features + 1, n_outdim)
        for it in range(max_iters+1):
            loss = calc_linear_loss(self.W)
            grad_params = grad_fun(self.W)

            # update params
            self.W -= self.lr * grad_params

            if verbose and it % print_step == 0:
                print('iteration %d / %d: loss %f' % (it, max_iters, loss)) 
Example 38
Project: autograd-forward   Author: BB-UCL   File: test_numpy.py    MIT License 5 votes vote down vote up
def test_nan_to_num():
    y = np.array([0., np.nan, np.inf, -np.inf])
    fun = lambda x: np.sum(np.sin(np.nan_to_num(x + y)))
    x = np.random.randn(4)
    combo_check(fun, [0], [x]) 
Example 39
Project: quanser-openai-driver   Author: BlueRiverTech   File: lqr.py    MIT License 5 votes vote down vote up
def forward_model(state, action, dt=1 / 300.0):
    theta = state[0]
    alpha = state[1]
    theta_dot = state[2]
    alpha_dot = state[3]
    Vm = action

    tau = (km * (Vm - km * theta_dot)) / Rm  # torque

    # fmt: off
    alpha_dot_dot = (2.0*Lp*Lr*mp*(4.0*Dr*theta_dot + Lp**2*alpha_dot*mp*theta_dot*np.sin(2.0*alpha) + 2.0*Lp*Lr*alpha_dot**2*mp*np.sin(alpha) - 4.0*(tau))*np.cos(alpha) - 0.5*(4.0*Jr + Lp**2*mp*np.sin(alpha)**2 + 4.0*Lr**2*mp)*(-8.0*Dp*alpha_dot + Lp**2*mp*theta_dot**2*np.sin(2.0*alpha) + 4.0*Lp*g*mp*np.sin(alpha)))/(4.0*Lp**2*Lr**2*mp**2*np.cos(alpha)**2 - (4.0*Jp + Lp**2*mp)*(4.0*Jr + Lp**2*mp*np.sin(alpha)**2 + 4.0*Lr**2*mp))
    theta_dot_dot = (-Lp*Lr*mp*(-8.0*Dp*alpha_dot + Lp**2*mp*theta_dot**2*np.sin(2.0*alpha) + 4.0*Lp*g*mp*np.sin(alpha))*np.cos(alpha) + (4.0*Jp + Lp**2*mp)*(4.0*Dr*theta_dot + Lp**2*alpha_dot*mp*theta_dot*np.sin(2.0*alpha) + 2.0*Lp*Lr*alpha_dot**2*mp*np.sin(alpha) - 4.0*(tau)))/(4.0*Lp**2*Lr**2*mp**2*np.cos(alpha)**2 - (4.0*Jp + Lp**2*mp)*(4.0*Jr + Lp**2*mp*np.sin(alpha)**2 + 4.0*Lr**2*mp))
    # fmt: on

    # Works around a single operating point
    theta += theta_dot * dt  # TODO: Shouldn't the angles be ahead of the velocities?
    alpha += alpha_dot * dt
    theta_dot += theta_dot_dot * dt
    alpha_dot += alpha_dot_dot * dt

    theta %= 2 * np.pi
    alpha %= 2 * np.pi

    # For continuous version of LQR
    # state = np.array([theta_dot,alpha_dot,theta_dot_dot,alpha_dot_dot]).reshape((4,))

    # For discrete version of LQR
    state = np.array([theta, alpha, theta_dot, alpha_dot]).reshape((4,))
    return state 
Example 40
Project: quanser-openai-driver   Author: BlueRiverTech   File: lqr.py    MIT License 5 votes vote down vote up
def computeAB(current_state, current_control):
    # Linearizing Dynamics
    forward_dynamics_model = lambda state, action: forward_model(state, action)
    a_mat = jacobian(forward_dynamics_model, 0)
    b_mat = jacobian(forward_dynamics_model, 1)
    A = a_mat(current_state, current_control)
    B = b_mat(current_state, current_control)

    # Correct continuous time linearization from Quanser Workbook -
    # A = np.array( [[0,0,1.0000,0],[0,0,0 ,1.0000], [0,149.2751,-0.0104,0],[0,261.6091,-0.0103,0]]).reshape((4,4))
    # B = np.array([ 0,0,49.7275,49.1493]).reshape((4,1))

    return A, B 
Example 41
Project: quanser-openai-driver   Author: BlueRiverTech   File: lqr.py    MIT License 5 votes vote down vote up
def LQR_control():
    # Cost matrices for LQR
    Q = np.diag(np.array([1, 1, 1, 1]))  # state_dimension = 4
    R = np.eye(1)  # control_dimension = 1

    A, B = computeAB(np.array([0.0, 0.0, 0.0, 0.0]), np.array([0.0]))

    # Use if discrete forward dynamics is used
    X = sp_linalg.solve_discrete_are(A, B, Q, R)
    K = np.dot(np.linalg.pinv(R + np.dot(B.T, np.dot(X, B))), np.dot(B.T, np.dot(X, A)))

    # Use for continuous version of LQR
    # X = sp_linalg.solve_continuous_are(A, B, Q, R)
    # K = np.dot(np.linalg.pinv(R), np.dot(B.T, X))
    return np.squeeze(K, 0) 
Example 42
Project: ReducedVarianceReparamGradients   Author: andymiller   File: example.py    MIT License 5 votes vote down vote up
def run_timed_opt(gradfun, num_iters):
    """ runs num_iters without computing intermediate values, 
    then computes 2000 sample elbo values (for timing)
    """
    mc_opt = FilteredOptimization(
              grad_fun    = gradfun,
              init_params = lam0.copy(),
              save_params = True,
              save_grads  = False,
              grad_filter = AdamFilter(),
              fun         = lambda lam, t: 0.,
              callback    = lambda th, t, g: 0.)
    print("  ... optimizing ")
    mc_opt.run(num_iters=num_iters, step_size=step_size)
    print("  ... wall time: %2.4f" % mc_opt.wall_clock)
    print("computing ELBO values")

    # compute ~ 50 equally spaced elbo values here
    skip = 16
    fun_vals = np.array([vbobj.elbo_mc(lam, n_samps=500)
                         for lam in pyprind.prog_bar(mc_opt.param_trace[::skip])])
    return fun_vals, mc_opt.wall_clock, mc_opt


#################################################
# define pure MC gradient function and optimize #
################################################# 
Example 43
Project: ReducedVarianceReparamGradients   Author: andymiller   File: bbvi_base.py    MIT License 5 votes vote down vote up
def __init__(self, lnpdf, D, glnpdf=None, lnpdf_is_vectorized=False):
        """ Black Box Variational Inference using stochastic gradients"""
        if lnpdf_is_vectorized:
            self.lnpdf = lnpdf
            if glnpdf is None:
                self.glnpdf = elementwise_grad(lnpdf)
        else:
            # create vectorized version
            self.glnpdf_single = grad(lnpdf)
            self.glnpdf = lambda z: np.array([self.glnpdf_single(zi)
                                              for zi in np.atleast_2d(z)])
            self.lnpdf = lambda z: np.array([lnpdf(zi)
                                             for zi in np.atleast_2d(z)])
            #if glnpdf is None:
            #    self.glnpdf = grad(lnpdf)

        # hessian and elementwise_grad of glnpdf
        self.gglnpdf   = elementwise_grad(self.glnpdf)
        self.hlnpdf    = hessian(self.lnpdf)
        self.hvplnpdf  = hessian_vector_product(self.lnpdf)

        # this function creates a generator of Hessian-vector product functions
        #  - make hvp = hvp_maker(lnpdf)(z)
        #  - now  hvp(v) = hessian(lnpdf)(z) v
        self.hvplnpdf_maker = make_hvp(self.lnpdf)

    #################################################
    # BBVI exposes these gradient functions         #
    ################################################# 
Example 44
Project: ReducedVarianceReparamGradients   Author: andymiller   File: frisk.py    MIT License 5 votes vote down vote up
def process_dataset():
    data_dir = os.path.dirname(__file__)
    df = pd.read_csv(os.path.join(data_dir, 'data/frisk/frisk_with_noise.dat'), skiprows=6, delim_whitespace=True)

    # compute proportion black in precinct, black = 1
    # first aggregate by precinct/ethnicity, and sum over populations
    popdf = df[['pop', 'precinct', 'eth']]. \
                groupby(['precinct', 'eth'])['pop'].apply(sum)
    percent_black = np.array([ popdf[i][1] / float(popdf[i].sum())
                               for i in xrange(1, 76)] )
    precinct_type = pd.cut(percent_black, [0, .1, .4, 1.])  #
    df['precinct_type'] = precinct_type.codes[df.precinct.values-1]
    return df 
Example 45
Project: ReducedVarianceReparamGradients   Author: andymiller   File: opt.py    MIT License 5 votes vote down vote up
def track_progress(self, noisy_grad, filtered_grad):

        # if function passed in --- save values
        if self.fun is not None:
            self.fun_vals.append(self.fun(self.params, self.t))

        # report on gradient
        if self.callback is not None:
            self.callback(self.params, self.t, noisy_grad)

        # update object attributes
        if self.save_params:
            self.param_trace.append(self.params.copy())

        if self.save_grads:
            self.grad_trace.append(noisy_grad)

        if self.save_filtered_grads:
            self.filtered_grad_trace.append(filtered_grad)

        if self.true_grad_fun is not None:
            true_grad = self.true_grad_fun(self.params, self.t)
            self.true_grad_trace.append(true_grad)

        if (self.num_marginal_samples_to_save > 0) and \
           (self.t % self.marginal_sample_skip == 0):
            nms    = self.num_marginal_samples_to_save
            print "  ... saving %d marginal samples (iter %d)"%(nms, self.t)
            msamps = np.array([self.grad_fun(self.params, self.t)
                               for _ in pyprind.prog_bar(xrange(nms))])
            self.marginal_samples[self.t] = msamps 
Example 46
Project: ReducedVarianceReparamGradients   Author: andymiller   File: opt.py    MIT License 5 votes vote down vote up
def plot_gradients(self, dims=[1, 10, 50]):
        import matplotlib.pyplot as plt
        import seaborn as sns; sns.set_style("white")
        noisy_grads    = np.array(self.grad_trace)
        true_grads     = np.array(self.true_grad_trace)
        filt_grads     = np.array([g[0] for g in self.filtered_grad_trace])
        filt_grads_var = np.array([g[1] for g in self.filtered_grad_trace])
        tgrid = np.arange(true_grads.shape[0])
        fig, axarr = plt.subplots(len(dims), 1, figsize=(12, 3*len(dims)))
        for d, ax in zip(dims, axarr.flatten()):
            ax.plot(tgrid, true_grads[:,d], label="true")
            ax.plot(tgrid, filt_grads[:,d], label="filtered")
            ax.fill_between(tgrid, filt_grads[:,d]-2*np.sqrt(filt_grads_var[:,d]),
                                   filt_grads[:,d]+2*np.sqrt(filt_grads_var[:,d]),
                                   alpha = .25)
            ax.scatter(tgrid, noisy_grads[:,d], s=3)
            #yrange = (np.max([filt_grads[:,d], true_grads[:,d]]),
            #          np.min([filt_grads[:,d], true_grads[:,d]]))
            #ax.set_ylim(yrange)
            ax.set_xlim((tgrid[0], tgrid[-1]))
        ax.legend()
        print "Adam average grad deviation:", \
            np.sqrt(np.mean((filt_grads - true_grads)**2))
        print "sample average deviation: ", \
            np.sqrt(np.mean((noisy_grads - true_grads)**2))
        return fig, axarr 
Example 47
Project: document_classification   Author: scotthlee   File: tools.py    Apache License 2.0 5 votes vote down vote up
def process(self, 
             df, 
             x_name, 
             y_name=None, 
             ngrams=2, 
             max_features=35000, 
             method='counts', 
             binary=True, 
             sparse=False):
		# Choosing the particular flavor of vectorizer
		if method == 'counts':
			vectorizer = CountVectorizer(max_features=max_features, 
                                ngram_range=(1, ngrams), 
                                decode_error='replace', 
                                binary=binary)
		elif method == 'tfidf':
			vectorizer = TfidfVectorizer(max_features=max_features, 
                                ngram_range=(1, ngrams),
                                decode_error='replace')

		# Fitting the vectorizer and converting the counts to an array
		full_fit = vectorizer.fit_transform(df[x_name])
		full_counts = full_fit.toarray()
		self.vocabulary_ = vectorizer.vocabulary_

		# Passing the attributes up to the class instance
		self.data = df
		if sparse:
			full_counts = csr_matrix(full_counts)
		self.X = full_counts
		if y_name != None:
			self.y = np.array(df[y_name])
		return

	# Splits the data into training and test sets; 
Example 48
Project: document_classification   Author: scotthlee   File: tools.py    Apache License 2.0 5 votes vote down vote up
def linear_prediction(x, w, b, neg=0, binary=True):
	guesses = np.matmul(x, w.transpose()) + b
	if binary:
		prediction = np.array(np.sign(guesses), dtype=int)
        	if neg == 0:
            		prediction[prediction == -1] = 0
	else:
		prediction = guesses
	return prediction

# Returns the accuracy of a classifier based on inputs, 
# outputs, training weights, and training bias 
Example 49
Project: document_classification   Author: scotthlee   File: tools.py    Apache License 2.0 5 votes vote down vote up
def platt_scale(x, y, mod, max_iter=1000, step=.001):
	#getting variables for the Platt scaling
	t = y_to_t(y)
	n_pos = np.sum(y == 1)
	n_neg = np.sum(y == 0)
	A = 0.0
	B = np.log(np.true_divide(n_neg + 1, n_pos + 1))

	#getting the predictions
	if type(mod).__name__ != 'LinearSVC':
		#mnb-ifying the input
		X = np.multiply(mod.r, x)
		preds = linear_prediction(X, mod.int_coef_, mod.bias, binary=False)
	else:
		X = deepcopy(x)
		preds = linear_prediction(X, mod.coef_, mod.intercept_, binary=False)

	#minimizing A and B via gradient descent
	vals = np.array([A, B])
	gradient = jacobian(platt_loss)
	for i in range(max_iter):
		vals -= gradient(vals, preds, y)*step

	#returning the
	A = vals[0]
	B = vals[1]
	probs = platt_probs(A, B, preds)
	return probs 
Example 50
Project: momi2   Author: popgenmethods   File: util.py    GNU General Public License v3.0 5 votes vote down vote up
def check_psd(X, **tol_kwargs):
    X = check_symmetric(X)
    d, U = np.linalg.eigh(X)
    d = truncate0(d, **tol_kwargs)
    ret = np.dot(U, np.dot(np.diag(d), np.transpose(U)))
    #assert np.allclose(ret, X)
    return np.array(ret, ndmin=2)