Python numpy.column_stack() Examples

The following are 30 code examples of numpy.column_stack(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source Project: pymoo   Author: msu-coinlab   File: truss2d.py    License: Apache License 2.0 7 votes vote down vote up
def _evaluate(self, x, out, *args, **kwargs):

        # variable names for convenient access
        x1 = x[:, 0]
        x2 = x[:, 1]
        y = x[:, 2]

        # first objectives
        f1 = x1 * anp.sqrt(16 + anp.square(y)) + x2 * anp.sqrt((1 + anp.square(y)))

        # measure which are needed for the second objective
        sigma_ac = 20 * anp.sqrt(16 + anp.square(y)) / (y * x1)
        sigma_bc = 80 * anp.sqrt(1 + anp.square(y)) / (y * x2)

        # take the max
        f2 = anp.max(anp.column_stack((sigma_ac, sigma_bc)), axis=1)

        # define a constraint
        g1 = f2 - self.Smax

        out["F"] = anp.column_stack([f1, f2])
        out["G"] = g1 
Example #2
Source Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: metric.py    License: Apache License 2.0 6 votes vote down vote up
def update(self, labels, preds):
        """Updates the internal evaluation result.

        Parameters
        ----------
        labels : list of `NDArray`
            The labels of the data.

        preds : list of `NDArray`
            Predicted values.
        """
        mx.metric.check_label_shapes(labels, preds)

        for label, pred in zip(labels, preds):
            label = label.asnumpy()
            pred = pred.asnumpy()
            pred = np.column_stack((1 - pred, pred))

            label = label.ravel()
            num_examples = pred.shape[0]
            assert label.shape[0] == num_examples, (label.shape[0], num_examples)
            prob = pred[np.arange(num_examples, dtype=np.int64), np.int64(label)]
            self.sum_metric += (-np.log(prob + self.eps)).sum()
            self.num_inst += num_examples 
Example #3
Source Project: pymoo   Author: msu-coinlab   File: wfg.py    License: Apache License 2.0 6 votes vote down vote up
def _positional_to_optimal(self, K):
        k, l = self.k, self.l

        suffix = np.full((len(K), self.l), 0.0)
        X = np.column_stack([K, suffix])
        X[:, self.k + self.l - 1] = 0.35

        for i in range(self.k + self.l - 2, self.k - 1, -1):
            m = X[:, i + 1:k + l]
            val = m.sum(axis=1) / m.shape[1]
            X[:, i] = 0.35 ** ((0.02 + 1.96 * val) ** -1)

        ret = X * (2 * (np.arange(self.n_var) + 1))
        return ret


# ---------------------------------------------------------------------------------------------------------
# TRANSFORMATIONS
# --------------------------------------------------------------------------------------------------------- 
Example #4
Source Project: pymoo   Author: msu-coinlab   File: point_crossover.py    License: Apache License 2.0 6 votes vote down vote up
def _do(self, problem, X, **kwargs):

        # get the X of parents and count the matings
        _, n_matings, n_var = X.shape

        # start point of crossover
        r = np.row_stack([np.random.permutation(n_var - 1) + 1 for _ in range(n_matings)])[:, :self.n_points]
        r.sort(axis=1)
        r = np.column_stack([r, np.full(n_matings, n_var)])

        # the mask do to the crossover
        M = np.full((n_matings, n_var), False)

        # create for each individual the crossover range
        for i in range(n_matings):

            j = 0
            while j < r.shape[1] - 1:
                a, b = r[i, j], r[i, j + 1]
                M[i, a:b] = True
                j += 2

        _X = crossover_mask(X, M)

        return _X 
Example #5
Source Project: pymoo   Author: msu-coinlab   File: performance.py    License: Apache License 2.0 6 votes vote down vote up
def geometric_mean_var(z):
    for row in np.eye(z.shape[1]):
        if not np.any(np.all(row == z, axis=1)):
            z = np.row_stack([z, row])
    n_points, n_dim = z.shape

    D = vectorized_cdist(z, z)
    np.fill_diagonal(D, np.inf)

    k = n_dim - 1
    I = D.argsort(axis=1)[:, :k]

    first = np.column_stack([np.arange(n_points) for _ in range(k)])

    val = gmean(D[first, I], axis=1)

    return val.var() 
Example #6
Source Project: pymoo   Author: msu-coinlab   File: performance.py    License: Apache License 2.0 6 votes vote down vote up
def mean_mean(z):
    for row in np.eye(z.shape[1]):
        if not np.any(np.all(row == z, axis=1)):
            z = np.row_stack([z, row])
    n_points, n_dim = z.shape

    D = vectorized_cdist(z, z)
    np.fill_diagonal(D, np.inf)

    k = n_dim - 1
    I = D.argsort(axis=1)[:, :k]

    first = np.column_stack([np.arange(n_points) for _ in range(k)])

    val = np.mean(D[first, I], axis=1)

    return val.mean() 
Example #7
Source Project: pymoo   Author: msu-coinlab   File: reference_direction.py    License: Apache License 2.0 6 votes vote down vote up
def map_onto_unit_simplex(rnd, method):
    n_points, n_dim = rnd.shape

    if method == "sum":
        ret = rnd / rnd.sum(axis=1)[:, None]

    elif method == "kraemer":
        M = sys.maxsize

        rnd *= M
        rnd = rnd[:, :n_dim - 1]
        rnd = np.column_stack([np.zeros(n_points), rnd, np.full(n_points, M)])

        rnd = np.sort(rnd, axis=1)

        ret = np.full((n_points, n_dim), np.nan)
        for i in range(1, n_dim + 1):
            ret[:, i - 1] = rnd[:, i] - rnd[:, i - 1]
        ret /= M

    else:
        raise Exception("Invalid unit simplex mapping!")

    return ret 
Example #8
Source Project: pymoo   Author: msu-coinlab   File: test_gradient.py    License: Apache License 2.0 6 votes vote down vote up
def _evaluate(self, x, out, *args, **kwargs):

        f1 = x[:, 0]
        c = np.sum(x[:, 1:], axis=1)
        g = 1.0 + 9.0 * c / (self.n_var - 1)
        f2 = g * (1 - np.power(f1 * 1.0 / g, 0.5) - (f1 * 1.0 / g) * np.sin(10 * np.pi * f1))

        out["F"] = np.column_stack([f1, f2])

        if "dF" in out:
            dF = np.zeros([x.shape[0], self.n_obj, self.n_var], dtype=np.float)

            dF[:, 0, 0], dF[:, 0, 1:] = 1, 0
            dF[:, 1, 0] = -0.5 * np.sqrt(g / x[:, 0]) - np.sin(10 * np.pi * x[:, 0]) - 10 * np.pi * x[:, 0] * np.cos(
                10 * np.pi * x[:, 0])
            dF[:, 1, 1:] = (9 / (self.n_var - 1)) * (1 - 0.5 * np.sqrt(x[:, 0] / g))[:, None]
            out["dF"] = dF 
Example #9
Source Project: refractiveindex.info-scripts   Author: polyanskiy   File: Kaiser 1962 - CaF2.py    License: GNU General Public License v3.0 6 votes vote down vote up
def SaveYML(w_um, RefInd, filename, references='', comments=''):
    
    header = np.empty(9, dtype=object)
    header[0] = '# this file is part of refractiveindex.info database'
    header[1] = '# refractiveindex.info database is in the public domain'
    header[2] = '# copyright and related rights waived via CC0 1.0'
    header[3] = ''
    header[4] = 'REFERENCES:' + references
    header[5] = 'COMMENTS:' + comments
    header[6] = 'DATA:'
    header[7] = '  - type: tabulated nk'
    header[8] = '    data: |'
    
    export = np.column_stack((w_um, np.real(RefInd), np.imag(RefInd)))
    np.savetxt(filename, export, fmt='%4.2f %#.4g %#.4g', delimiter=' ', header='\n'.join(header), comments='',newline='\n        ')
    return

###############################################################################

## Wavelengths to sample ## 
Example #10
def SaveYML(w_um, RefInd, filename, references='', comments=''):
    
    header = np.empty(9, dtype=object)
    header[0] = '# this file is part of refractiveindex.info database'
    header[1] = '# refractiveindex.info database is in the public domain'
    header[2] = '# copyright and related rights waived via CC0 1.0'
    header[3] = ''
    header[4] = 'REFERENCES:' + references
    header[5] = 'COMMENTS:' + comments
    header[6] = 'DATA:'
    header[7] = '  - type: tabulated nk'
    header[8] = '    data: |'
    
    export = np.column_stack((w_um, np.real(RefInd), np.imag(RefInd)))
    np.savetxt(filename, export, fmt='%4.2f %#.4g %#.3e', delimiter=' ', header='\n'.join(header), comments='',newline='\n        ')
    return

###############################################################################

## Wavelengths to sample ## 
Example #11
Source Project: refractiveindex.info-scripts   Author: polyanskiy   File: Zhang 1998 - Kapton.py    License: GNU General Public License v3.0 6 votes vote down vote up
def SaveYML(w_um, RefInd, filename, references='', comments=''):
    
    header = np.empty(9, dtype=object)
    header[0] = '# this file is part of refractiveindex.info database'
    header[1] = '# refractiveindex.info database is in the public domain'
    header[2] = '# copyright and related rights waived via CC0 1.0'
    header[3] = ''
    header[4] = 'REFERENCES:' + references
    header[5] = 'COMMENTS:' + comments
    header[6] = 'DATA:'
    header[7] = '  - type: tabulated nk'
    header[8] = '    data: |'
    
    export = np.column_stack((w_um, np.real(RefInd), np.imag(RefInd)))
    np.savetxt(filename, export, fmt='%4.3f %#.4g %#.3e', delimiter=' ', header='\n'.join(header), comments='',newline='\n        ')
    return

###############################################################################

## Wavelengths to sample ## 
Example #12
def SaveYML(w_um, RefInd, filename, references='', comments=''):
    
    header = np.empty(9, dtype=object)
    header[0] = '# this file is part of refractiveindex.info database'
    header[1] = '# refractiveindex.info database is in the public domain'
    header[2] = '# copyright and related rights waived via CC0 1.0'
    header[3] = ''
    header[4] = 'REFERENCES:' + references
    header[5] = 'COMMENTS:' + comments
    header[6] = 'DATA:'
    header[7] = '  - type: tabulated nk'
    header[8] = '    data: |'
    
    export = np.column_stack((w_um, np.real(RefInd), np.imag(RefInd)))
    np.savetxt(filename, export, fmt='%4.2f %#.4g %#.3e', delimiter=' ', header='\n'.join(header), comments='',newline='\n        ')
    return

###############################################################################

## Wavelengths to sample ## 
Example #13
Source Project: refractiveindex.info-scripts   Author: polyanskiy   File: Kaiser 1962 - BaF2.py    License: GNU General Public License v3.0 6 votes vote down vote up
def SaveYML(w_um, RefInd, filename, references='', comments=''):
    
    header = np.empty(9, dtype=object)
    header[0] = '# this file is part of refractiveindex.info database'
    header[1] = '# refractiveindex.info database is in the public domain'
    header[2] = '# copyright and related rights waived via CC0 1.0'
    header[3] = ''
    header[4] = 'REFERENCES:' + references
    header[5] = 'COMMENTS:' + comments
    header[6] = 'DATA:'
    header[7] = '  - type: tabulated nk'
    header[8] = '    data: |'
    
    export = np.column_stack((w_um, np.real(RefInd), np.imag(RefInd)))
    np.savetxt(filename, export, fmt='%4.2f %#.4g %#.4g', delimiter=' ', header='\n'.join(header), comments='',newline='\n        ')
    return

###############################################################################

## Wavelengths to sample ## 
Example #14
Source Project: feets   Author: quatrope   File: ext_dmdt.py    License: MIT License 6 votes vote down vote up
def fit(self, magnitude, time, dt_bins, dm_bins):
        def delta_calc(idx):
            t0 = time[idx]
            m0 = magnitude[idx]
            deltat = time[idx + 1 :] - t0
            deltam = magnitude[idx + 1 :] - m0

            deltat[np.where(deltat < 0)] *= -1
            deltam[np.where(deltat < 0)] *= -1

            return np.column_stack((deltat, deltam))

        lc_len = len(time)
        n_vals = int(0.5 * lc_len * (lc_len - 1))

        deltas = np.vstack(tuple(delta_calc(idx) for idx in range(lc_len - 1)))

        deltat = deltas[:, 0]
        deltam = deltas[:, 1]

        bins = [dt_bins, dm_bins]
        counts = np.histogram2d(deltat, deltam, bins=bins, normed=False)[0]
        result = np.fix(255.0 * counts / n_vals + 0.999).astype(int)

        return {"DMDT": result} 
Example #15
Source Project: pulse2percept   Author: pulse2percept   File: beyeler2019.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def calc_axon_contribution(self, axons):
        xyret = np.column_stack((self.grid.xret.ravel(),
                                 self.grid.yret.ravel()))
        # Only include axon segments that are < `max_d2` from the soma. These
        # axon segments will have `sensitivity` > `self.min_ax_sensitivity`:
        max_d2 = -2.0 * self.axlambda ** 2 * np.log(self.min_ax_sensitivity)
        axon_contrib = []
        for xy, bundle in zip(xyret, axons):
            idx = np.argmin((bundle[:, 0] - xy[0]) ** 2 +
                            (bundle[:, 1] - xy[1]) ** 2)
            # Cut off the part of the fiber that goes beyond the soma:
            axon = np.flipud(bundle[0: idx + 1, :])
            # Add the exact location of the soma:
            axon = np.insert(axon, 0, xy, axis=0)
            # For every axon segment, calculate distance from soma by
            # summing up the individual distances between neighboring axon
            # segments (by "walking along the axon"):
            d2 = np.cumsum(np.diff(axon[:, 0], axis=0) ** 2 +
                           np.diff(axon[:, 1], axis=0) ** 2)
            idx_d2 = d2 < max_d2
            sensitivity = np.exp(-d2[idx_d2] / (2.0 * self.axlambda ** 2))
            idx_d2 = np.insert(idx_d2, 0, False)
            contrib = np.column_stack((axon[idx_d2, :], sensitivity))
            axon_contrib.append(contrib)
        return axon_contrib 
Example #16
Source Project: pulse2percept   Author: pulse2percept   File: test_beyeler2019.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_AxonMapModel_calc_axon_contribution(engine):
    model = AxonMapModel(xystep=2, engine=engine, n_axons=10,
                         xrange=(-20, 20), yrange=(-15, 15),
                         axons_range=(-30, 30))
    model.build()
    xyret = np.column_stack((model.spatial.grid.xret.ravel(),
                             model.spatial.grid.yret.ravel()))
    bundles = model.spatial.grow_axon_bundles()
    axons = model.spatial.find_closest_axon(bundles)
    contrib = model.spatial.calc_axon_contribution(axons)

    # Check lambda math:
    for ax, xy in zip(contrib, xyret):
        axon = np.insert(ax, 0, list(xy) + [0], axis=0)
        d2 = np.cumsum(np.diff(axon[:, 0], axis=0) ** 2 +
                       np.diff(axon[:, 1], axis=0) ** 2)
        sensitivity = np.exp(-d2 / (2.0 * model.spatial.axlambda ** 2))
        npt.assert_almost_equal(sensitivity, ax[:, 2]) 
Example #17
Source Project: Reinforcement_Learning_for_Traffic_Light_Control   Author: quantumiracle   File: RL_brain.py    License: Apache License 2.0 6 votes vote down vote up
def store_transition(self, s, a, r, s_):
        if not hasattr(self, 'memory_counter'):
            self.memory_counter = 0
        #print(s,s_.size)
        s=s.reshape(-1)
        s_=s_.reshape(-1)
        transition = np.hstack((s, [a, r], s_))
        #transition = np.column_stack((s, [a, r], s_))
        #transition = np.concatenate((s, [a, r], s_), axis=1)
        #transition = scipy.sparse.hstack([s, [a, r], s_]).toarray()

        # replace the old memory with new memory
        index = self.memory_counter % self.memory_size
        self.memory[index, :] = transition

        self.memory_counter += 1 
Example #18
Source Project: Reinforcement_Learning_for_Traffic_Light_Control   Author: quantumiracle   File: RL_brain.py    License: Apache License 2.0 6 votes vote down vote up
def store_transition(self, s, a, r, s_):
        if not hasattr(self, 'memory_counter'):
            self.memory_counter = 0
        #print(s,s_.size)
        s=s.reshape(-1)
        s_=s_.reshape(-1)
        transition = np.hstack((s, [a, r], s_))
        #transition = np.column_stack((s, [a, r], s_))
        #transition = np.concatenate((s, [a, r], s_), axis=1)
        #transition = scipy.sparse.hstack([s, [a, r], s_]).toarray()

        # replace the old memory with new memory
        index = self.memory_counter % self.memory_size
        self.memory[index, :] = transition

        self.memory_counter += 1 
Example #19
Source Project: Reinforcement_Learning_for_Traffic_Light_Control   Author: quantumiracle   File: RL_brain.py    License: Apache License 2.0 6 votes vote down vote up
def store_transition(self, s, a, r, s_):
        self.lo.acquire()

        s=s.reshape(-1)
        s_=s_.reshape(-1)
        transition = np.hstack((s, [a, r], s_))
        #transition = np.column_stack((s, [a, r], s_))
        #transition = np.concatenate((s, [a, r], s_), axis=1)
        #transition = scipy.sparse.hstack([s, [a, r], s_]).toarray()

        # replace the old memory with new memory
        index = self.memory_counter % self.memory_size
        self.memory[index, :] = transition

        self.memory_counter += 1
        self.lo.release()
        # print(index) 
Example #20
Source Project: Reinforcement_Learning_for_Traffic_Light_Control   Author: quantumiracle   File: RL_brain2.py    License: Apache License 2.0 6 votes vote down vote up
def store_transition(self, s, a, r, s_):
        if not hasattr(self, 'memory_counter'):
            self.memory_counter = 0
        #print(s,s_.size)
        s=s.reshape(-1)
        s_=s_.reshape(-1)
        transition = np.hstack((s, [a, r], s_))
        #transition = np.column_stack((s, [a, r], s_))
        #transition = np.concatenate((s, [a, r], s_), axis=1)
        #transition = scipy.sparse.hstack([s, [a, r], s_]).toarray()

        # replace the old memory with new memory
        index = self.memory_counter % self.memory_size
        self.memory[index, :] = transition

        self.memory_counter += 1 
Example #21
Source Project: Reinforcement_Learning_for_Traffic_Light_Control   Author: quantumiracle   File: RL_brain2.py    License: Apache License 2.0 6 votes vote down vote up
def store_transition(self, s, a, r, s_):
        if not hasattr(self, 'memory_counter'):
            self.memory_counter = 0
        #print(s,s_.size)
        s=s.reshape(-1)
        s_=s_.reshape(-1)
        transition = np.hstack((s, [a, r], s_))
        #transition = np.column_stack((s, [a, r], s_))
        #transition = np.concatenate((s, [a, r], s_), axis=1)
        #transition = scipy.sparse.hstack([s, [a, r], s_]).toarray()

        # replace the old memory with new memory
        index = self.memory_counter % self.memory_size
        self.memory[index, :] = transition

        self.memory_counter += 1 
Example #22
Source Project: FRIDA   Author: LCAV   File: tools_fri_doa_plane.py    License: MIT License 5 votes vote down vote up
def compute_b(G_lst, GtG_lst, beta_lst, Rc0, num_bands, a_ri):
    """
    compute the uniform sinusoidal samples b from the updated annihilating
    filter coeffiients.
    :param GtG_lst: list of G^H G for different subbands
    :param beta_lst: list of beta-s for different subbands
    :param Rc0: right-dual matrix, here it is the convolution matrix associated with c
    :param num_bands: number of bands
    :param L: size of b: L by 1
    :param a_ri: a 2D numpy array. each column corresponds to the measurements within a subband
    :return:
    """
    b_lst = []
    a_Gb_lst = []
    for loop in range(num_bands):
        GtG_loop = GtG_lst[loop]
        beta_loop = beta_lst[loop]
        b_loop = beta_loop - \
                 linalg.solve(GtG_loop,
                              np.dot(Rc0.T,
                                     linalg.solve(np.dot(Rc0, linalg.solve(GtG_loop, Rc0.T)),
                                                  np.dot(Rc0, beta_loop)))
                              )

        b_lst.append(b_loop)
        a_Gb_lst.append(a_ri[:, loop] - np.dot(G_lst[loop], b_loop))

    return np.column_stack(b_lst), linalg.norm(np.concatenate(a_Gb_lst)) 
Example #23
Source Project: pymoo   Author: msu-coinlab   File: wfg.py    License: Apache License 2.0 5 votes vote down vote up
def _post(self, t, a):
        x = []
        for i in range(t.shape[1] - 1):
            x.append(np.maximum(t[:, -1], a[i]) * (t[:, i] - 0.5) + 0.5)
        x.append(t[:, -1])
        return np.column_stack(x) 
Example #24
Source Project: pymoo   Author: msu-coinlab   File: wfg.py    License: Apache License 2.0 5 votes vote down vote up
def _calculate(self, x, s, h):
        return x[:, -1][:, None] + s * np.column_stack(h) 
Example #25
Source Project: pymoo   Author: msu-coinlab   File: wfg.py    License: Apache License 2.0 5 votes vote down vote up
def _positional_to_optimal(self, K):
        suffix = np.full((len(K), self.l), 0.35)
        X = np.column_stack([K, suffix])
        return X * self.xu 
Example #26
Source Project: pymoo   Author: msu-coinlab   File: wfg.py    License: Apache License 2.0 5 votes vote down vote up
def t4(x, m, n, k):
        w = np.arange(2, 2 * n + 1, 2)
        gap = k // (m - 1)
        t = []
        for m in range(1, m):
            _y = x[:, (m - 1) * gap: (m * gap)]
            _w = w[(m - 1) * gap: (m * gap)]
            t.append(_reduction_weighted_sum(_y, _w))
        t.append(_reduction_weighted_sum(x[:, k:n], w[k:n]))
        return np.column_stack(t) 
Example #27
Source Project: pymoo   Author: msu-coinlab   File: wfg.py    License: Apache License 2.0 5 votes vote down vote up
def t2(x, n, k):
        y = [x[:, i] for i in range(k)]

        l = n - k
        ind_non_sep = k + l // 2

        i = k + 1
        while i <= ind_non_sep:
            head = k + 2 * (i - k) - 2
            tail = k + 2 * (i - k)
            y.append(_reduction_non_sep(x[:, head:tail], 2))
            i += 1

        return np.column_stack(y) 
Example #28
Source Project: pymoo   Author: msu-coinlab   File: wfg.py    License: Apache License 2.0 5 votes vote down vote up
def t2(x, m, k):
        gap = k // (m - 1)
        t = [_reduction_weighted_sum_uniform(x[:, (m - 1) * gap: (m * gap)]) for m in range(1, m)]
        t.append(_reduction_weighted_sum_uniform(x[:, k:]))
        return np.column_stack(t) 
Example #29
Source Project: pymoo   Author: msu-coinlab   File: wfg.py    License: Apache License 2.0 5 votes vote down vote up
def t2(x, m, n, k):
        gap = k // (m - 1)
        t = [_reduction_non_sep(x[:, (m - 1) * gap: (m * gap)], gap) for m in range(1, m)]
        t.append(_reduction_non_sep(x[:, k:], n - k))
        return np.column_stack(t) 
Example #30
Source Project: pymoo   Author: msu-coinlab   File: wfg.py    License: Apache License 2.0 5 votes vote down vote up
def t1(x, n, k):
        ret = []
        for i in range(k, n):
            aux = _reduction_weighted_sum_uniform(x[:, :i])
            ret.append(_transformation_param_dependent(x[:, i], aux, A=0.98 / 49.98, B=0.02, C=50.0))
        return np.column_stack(ret)