Python numpy.column_stack() Examples

The following are 30 code examples of numpy.column_stack(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: truss2d.py    From pymoo with Apache License 2.0 7 votes vote down vote up
def _evaluate(self, x, out, *args, **kwargs):

        # variable names for convenient access
        x1 = x[:, 0]
        x2 = x[:, 1]
        y = x[:, 2]

        # first objectives
        f1 = x1 * anp.sqrt(16 + anp.square(y)) + x2 * anp.sqrt((1 + anp.square(y)))

        # measure which are needed for the second objective
        sigma_ac = 20 * anp.sqrt(16 + anp.square(y)) / (y * x1)
        sigma_bc = 80 * anp.sqrt(1 + anp.square(y)) / (y * x2)

        # take the max
        f2 = anp.max(anp.column_stack((sigma_ac, sigma_bc)), axis=1)

        # define a constraint
        g1 = f2 - self.Smax

        out["F"] = anp.column_stack([f1, f2])
        out["G"] = g1 
Example #2
Source File: RL_brain2.py    From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 6 votes vote down vote up
def store_transition(self, s, a, r, s_):
        if not hasattr(self, 'memory_counter'):
            self.memory_counter = 0
        #print(s,s_.size)
        s=s.reshape(-1)
        s_=s_.reshape(-1)
        transition = np.hstack((s, [a, r], s_))
        #transition = np.column_stack((s, [a, r], s_))
        #transition = np.concatenate((s, [a, r], s_), axis=1)
        #transition = scipy.sparse.hstack([s, [a, r], s_]).toarray()

        # replace the old memory with new memory
        index = self.memory_counter % self.memory_size
        self.memory[index, :] = transition

        self.memory_counter += 1 
Example #3
Source File: wfg.py    From pymoo with Apache License 2.0 6 votes vote down vote up
def _positional_to_optimal(self, K):
        k, l = self.k, self.l

        suffix = np.full((len(K), self.l), 0.0)
        X = np.column_stack([K, suffix])
        X[:, self.k + self.l - 1] = 0.35

        for i in range(self.k + self.l - 2, self.k - 1, -1):
            m = X[:, i + 1:k + l]
            val = m.sum(axis=1) / m.shape[1]
            X[:, i] = 0.35 ** ((0.02 + 1.96 * val) ** -1)

        ret = X * (2 * (np.arange(self.n_var) + 1))
        return ret


# ---------------------------------------------------------------------------------------------------------
# TRANSFORMATIONS
# --------------------------------------------------------------------------------------------------------- 
Example #4
Source File: point_crossover.py    From pymoo with Apache License 2.0 6 votes vote down vote up
def _do(self, problem, X, **kwargs):

        # get the X of parents and count the matings
        _, n_matings, n_var = X.shape

        # start point of crossover
        r = np.row_stack([np.random.permutation(n_var - 1) + 1 for _ in range(n_matings)])[:, :self.n_points]
        r.sort(axis=1)
        r = np.column_stack([r, np.full(n_matings, n_var)])

        # the mask do to the crossover
        M = np.full((n_matings, n_var), False)

        # create for each individual the crossover range
        for i in range(n_matings):

            j = 0
            while j < r.shape[1] - 1:
                a, b = r[i, j], r[i, j + 1]
                M[i, a:b] = True
                j += 2

        _X = crossover_mask(X, M)

        return _X 
Example #5
Source File: performance.py    From pymoo with Apache License 2.0 6 votes vote down vote up
def geometric_mean_var(z):
    for row in np.eye(z.shape[1]):
        if not np.any(np.all(row == z, axis=1)):
            z = np.row_stack([z, row])
    n_points, n_dim = z.shape

    D = vectorized_cdist(z, z)
    np.fill_diagonal(D, np.inf)

    k = n_dim - 1
    I = D.argsort(axis=1)[:, :k]

    first = np.column_stack([np.arange(n_points) for _ in range(k)])

    val = gmean(D[first, I], axis=1)

    return val.var() 
Example #6
Source File: performance.py    From pymoo with Apache License 2.0 6 votes vote down vote up
def mean_mean(z):
    for row in np.eye(z.shape[1]):
        if not np.any(np.all(row == z, axis=1)):
            z = np.row_stack([z, row])
    n_points, n_dim = z.shape

    D = vectorized_cdist(z, z)
    np.fill_diagonal(D, np.inf)

    k = n_dim - 1
    I = D.argsort(axis=1)[:, :k]

    first = np.column_stack([np.arange(n_points) for _ in range(k)])

    val = np.mean(D[first, I], axis=1)

    return val.mean() 
Example #7
Source File: reference_direction.py    From pymoo with Apache License 2.0 6 votes vote down vote up
def map_onto_unit_simplex(rnd, method):
    n_points, n_dim = rnd.shape

    if method == "sum":
        ret = rnd / rnd.sum(axis=1)[:, None]

    elif method == "kraemer":
        M = sys.maxsize

        rnd *= M
        rnd = rnd[:, :n_dim - 1]
        rnd = np.column_stack([np.zeros(n_points), rnd, np.full(n_points, M)])

        rnd = np.sort(rnd, axis=1)

        ret = np.full((n_points, n_dim), np.nan)
        for i in range(1, n_dim + 1):
            ret[:, i - 1] = rnd[:, i] - rnd[:, i - 1]
        ret /= M

    else:
        raise Exception("Invalid unit simplex mapping!")

    return ret 
Example #8
Source File: test_gradient.py    From pymoo with Apache License 2.0 6 votes vote down vote up
def _evaluate(self, x, out, *args, **kwargs):

        f1 = x[:, 0]
        c = np.sum(x[:, 1:], axis=1)
        g = 1.0 + 9.0 * c / (self.n_var - 1)
        f2 = g * (1 - np.power(f1 * 1.0 / g, 0.5) - (f1 * 1.0 / g) * np.sin(10 * np.pi * f1))

        out["F"] = np.column_stack([f1, f2])

        if "dF" in out:
            dF = np.zeros([x.shape[0], self.n_obj, self.n_var], dtype=np.float)

            dF[:, 0, 0], dF[:, 0, 1:] = 1, 0
            dF[:, 1, 0] = -0.5 * np.sqrt(g / x[:, 0]) - np.sin(10 * np.pi * x[:, 0]) - 10 * np.pi * x[:, 0] * np.cos(
                10 * np.pi * x[:, 0])
            dF[:, 1, 1:] = (9 / (self.n_var - 1)) * (1 - 0.5 * np.sqrt(x[:, 0] / g))[:, None]
            out["dF"] = dF 
Example #9
Source File: Kaiser 1962 - CaF2.py    From refractiveindex.info-scripts with GNU General Public License v3.0 6 votes vote down vote up
def SaveYML(w_um, RefInd, filename, references='', comments=''):
    
    header = np.empty(9, dtype=object)
    header[0] = '# this file is part of refractiveindex.info database'
    header[1] = '# refractiveindex.info database is in the public domain'
    header[2] = '# copyright and related rights waived via CC0 1.0'
    header[3] = ''
    header[4] = 'REFERENCES:' + references
    header[5] = 'COMMENTS:' + comments
    header[6] = 'DATA:'
    header[7] = '  - type: tabulated nk'
    header[8] = '    data: |'
    
    export = np.column_stack((w_um, np.real(RefInd), np.imag(RefInd)))
    np.savetxt(filename, export, fmt='%4.2f %#.4g %#.4g', delimiter=' ', header='\n'.join(header), comments='',newline='\n        ')
    return

###############################################################################

## Wavelengths to sample ## 
Example #10
Source File: Tsuda 2018 - PMMA (BB model).py    From refractiveindex.info-scripts with GNU General Public License v3.0 6 votes vote down vote up
def SaveYML(w_um, RefInd, filename, references='', comments=''):
    
    header = np.empty(9, dtype=object)
    header[0] = '# this file is part of refractiveindex.info database'
    header[1] = '# refractiveindex.info database is in the public domain'
    header[2] = '# copyright and related rights waived via CC0 1.0'
    header[3] = ''
    header[4] = 'REFERENCES:' + references
    header[5] = 'COMMENTS:' + comments
    header[6] = 'DATA:'
    header[7] = '  - type: tabulated nk'
    header[8] = '    data: |'
    
    export = np.column_stack((w_um, np.real(RefInd), np.imag(RefInd)))
    np.savetxt(filename, export, fmt='%4.2f %#.4g %#.3e', delimiter=' ', header='\n'.join(header), comments='',newline='\n        ')
    return

###############################################################################

## Wavelengths to sample ## 
Example #11
Source File: Zhang 1998 - Kapton.py    From refractiveindex.info-scripts with GNU General Public License v3.0 6 votes vote down vote up
def SaveYML(w_um, RefInd, filename, references='', comments=''):
    
    header = np.empty(9, dtype=object)
    header[0] = '# this file is part of refractiveindex.info database'
    header[1] = '# refractiveindex.info database is in the public domain'
    header[2] = '# copyright and related rights waived via CC0 1.0'
    header[3] = ''
    header[4] = 'REFERENCES:' + references
    header[5] = 'COMMENTS:' + comments
    header[6] = 'DATA:'
    header[7] = '  - type: tabulated nk'
    header[8] = '    data: |'
    
    export = np.column_stack((w_um, np.real(RefInd), np.imag(RefInd)))
    np.savetxt(filename, export, fmt='%4.3f %#.4g %#.3e', delimiter=' ', header='\n'.join(header), comments='',newline='\n        ')
    return

###############################################################################

## Wavelengths to sample ## 
Example #12
Source File: Kaiser 1962 - BaF2.py    From refractiveindex.info-scripts with GNU General Public License v3.0 6 votes vote down vote up
def SaveYML(w_um, RefInd, filename, references='', comments=''):
    
    header = np.empty(9, dtype=object)
    header[0] = '# this file is part of refractiveindex.info database'
    header[1] = '# refractiveindex.info database is in the public domain'
    header[2] = '# copyright and related rights waived via CC0 1.0'
    header[3] = ''
    header[4] = 'REFERENCES:' + references
    header[5] = 'COMMENTS:' + comments
    header[6] = 'DATA:'
    header[7] = '  - type: tabulated nk'
    header[8] = '    data: |'
    
    export = np.column_stack((w_um, np.real(RefInd), np.imag(RefInd)))
    np.savetxt(filename, export, fmt='%4.2f %#.4g %#.4g', delimiter=' ', header='\n'.join(header), comments='',newline='\n        ')
    return

###############################################################################

## Wavelengths to sample ## 
Example #13
Source File: ext_dmdt.py    From feets with MIT License 6 votes vote down vote up
def fit(self, magnitude, time, dt_bins, dm_bins):
        def delta_calc(idx):
            t0 = time[idx]
            m0 = magnitude[idx]
            deltat = time[idx + 1 :] - t0
            deltam = magnitude[idx + 1 :] - m0

            deltat[np.where(deltat < 0)] *= -1
            deltam[np.where(deltat < 0)] *= -1

            return np.column_stack((deltat, deltam))

        lc_len = len(time)
        n_vals = int(0.5 * lc_len * (lc_len - 1))

        deltas = np.vstack(tuple(delta_calc(idx) for idx in range(lc_len - 1)))

        deltat = deltas[:, 0]
        deltam = deltas[:, 1]

        bins = [dt_bins, dm_bins]
        counts = np.histogram2d(deltat, deltam, bins=bins, normed=False)[0]
        result = np.fix(255.0 * counts / n_vals + 0.999).astype(int)

        return {"DMDT": result} 
Example #14
Source File: beyeler2019.py    From pulse2percept with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def calc_axon_contribution(self, axons):
        xyret = np.column_stack((self.grid.xret.ravel(),
                                 self.grid.yret.ravel()))
        # Only include axon segments that are < `max_d2` from the soma. These
        # axon segments will have `sensitivity` > `self.min_ax_sensitivity`:
        max_d2 = -2.0 * self.axlambda ** 2 * np.log(self.min_ax_sensitivity)
        axon_contrib = []
        for xy, bundle in zip(xyret, axons):
            idx = np.argmin((bundle[:, 0] - xy[0]) ** 2 +
                            (bundle[:, 1] - xy[1]) ** 2)
            # Cut off the part of the fiber that goes beyond the soma:
            axon = np.flipud(bundle[0: idx + 1, :])
            # Add the exact location of the soma:
            axon = np.insert(axon, 0, xy, axis=0)
            # For every axon segment, calculate distance from soma by
            # summing up the individual distances between neighboring axon
            # segments (by "walking along the axon"):
            d2 = np.cumsum(np.diff(axon[:, 0], axis=0) ** 2 +
                           np.diff(axon[:, 1], axis=0) ** 2)
            idx_d2 = d2 < max_d2
            sensitivity = np.exp(-d2[idx_d2] / (2.0 * self.axlambda ** 2))
            idx_d2 = np.insert(idx_d2, 0, False)
            contrib = np.column_stack((axon[idx_d2, :], sensitivity))
            axon_contrib.append(contrib)
        return axon_contrib 
Example #15
Source File: test_beyeler2019.py    From pulse2percept with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def test_AxonMapModel_calc_axon_contribution(engine):
    model = AxonMapModel(xystep=2, engine=engine, n_axons=10,
                         xrange=(-20, 20), yrange=(-15, 15),
                         axons_range=(-30, 30))
    model.build()
    xyret = np.column_stack((model.spatial.grid.xret.ravel(),
                             model.spatial.grid.yret.ravel()))
    bundles = model.spatial.grow_axon_bundles()
    axons = model.spatial.find_closest_axon(bundles)
    contrib = model.spatial.calc_axon_contribution(axons)

    # Check lambda math:
    for ax, xy in zip(contrib, xyret):
        axon = np.insert(ax, 0, list(xy) + [0], axis=0)
        d2 = np.cumsum(np.diff(axon[:, 0], axis=0) ** 2 +
                       np.diff(axon[:, 1], axis=0) ** 2)
        sensitivity = np.exp(-d2 / (2.0 * model.spatial.axlambda ** 2))
        npt.assert_almost_equal(sensitivity, ax[:, 2]) 
Example #16
Source File: RL_brain.py    From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 6 votes vote down vote up
def store_transition(self, s, a, r, s_):
        if not hasattr(self, 'memory_counter'):
            self.memory_counter = 0
        #print(s,s_.size)
        s=s.reshape(-1)
        s_=s_.reshape(-1)
        transition = np.hstack((s, [a, r], s_))
        #transition = np.column_stack((s, [a, r], s_))
        #transition = np.concatenate((s, [a, r], s_), axis=1)
        #transition = scipy.sparse.hstack([s, [a, r], s_]).toarray()

        # replace the old memory with new memory
        index = self.memory_counter % self.memory_size
        self.memory[index, :] = transition

        self.memory_counter += 1 
Example #17
Source File: RL_brain.py    From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 6 votes vote down vote up
def store_transition(self, s, a, r, s_):
        if not hasattr(self, 'memory_counter'):
            self.memory_counter = 0
        #print(s,s_.size)
        s=s.reshape(-1)
        s_=s_.reshape(-1)
        transition = np.hstack((s, [a, r], s_))
        #transition = np.column_stack((s, [a, r], s_))
        #transition = np.concatenate((s, [a, r], s_), axis=1)
        #transition = scipy.sparse.hstack([s, [a, r], s_]).toarray()

        # replace the old memory with new memory
        index = self.memory_counter % self.memory_size
        self.memory[index, :] = transition

        self.memory_counter += 1 
Example #18
Source File: RL_brain.py    From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 6 votes vote down vote up
def store_transition(self, s, a, r, s_):
        self.lo.acquire()

        s=s.reshape(-1)
        s_=s_.reshape(-1)
        transition = np.hstack((s, [a, r], s_))
        #transition = np.column_stack((s, [a, r], s_))
        #transition = np.concatenate((s, [a, r], s_), axis=1)
        #transition = scipy.sparse.hstack([s, [a, r], s_]).toarray()

        # replace the old memory with new memory
        index = self.memory_counter % self.memory_size
        self.memory[index, :] = transition

        self.memory_counter += 1
        self.lo.release()
        # print(index) 
Example #19
Source File: RL_brain2.py    From Reinforcement_Learning_for_Traffic_Light_Control with Apache License 2.0 6 votes vote down vote up
def store_transition(self, s, a, r, s_):
        if not hasattr(self, 'memory_counter'):
            self.memory_counter = 0
        #print(s,s_.size)
        s=s.reshape(-1)
        s_=s_.reshape(-1)
        transition = np.hstack((s, [a, r], s_))
        #transition = np.column_stack((s, [a, r], s_))
        #transition = np.concatenate((s, [a, r], s_), axis=1)
        #transition = scipy.sparse.hstack([s, [a, r], s_]).toarray()

        # replace the old memory with new memory
        index = self.memory_counter % self.memory_size
        self.memory[index, :] = transition

        self.memory_counter += 1 
Example #20
Source File: Tsuda 2018 - PMMA (LD model).py    From refractiveindex.info-scripts with GNU General Public License v3.0 6 votes vote down vote up
def SaveYML(w_um, RefInd, filename, references='', comments=''):
    
    header = np.empty(9, dtype=object)
    header[0] = '# this file is part of refractiveindex.info database'
    header[1] = '# refractiveindex.info database is in the public domain'
    header[2] = '# copyright and related rights waived via CC0 1.0'
    header[3] = ''
    header[4] = 'REFERENCES:' + references
    header[5] = 'COMMENTS:' + comments
    header[6] = 'DATA:'
    header[7] = '  - type: tabulated nk'
    header[8] = '    data: |'
    
    export = np.column_stack((w_um, np.real(RefInd), np.imag(RefInd)))
    np.savetxt(filename, export, fmt='%4.2f %#.4g %#.3e', delimiter=' ', header='\n'.join(header), comments='',newline='\n        ')
    return

###############################################################################

## Wavelengths to sample ## 
Example #21
Source File: metric.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def update(self, labels, preds):
        """Updates the internal evaluation result.

        Parameters
        ----------
        labels : list of `NDArray`
            The labels of the data.

        preds : list of `NDArray`
            Predicted values.
        """
        mx.metric.check_label_shapes(labels, preds)

        for label, pred in zip(labels, preds):
            label = label.asnumpy()
            pred = pred.asnumpy()
            pred = np.column_stack((1 - pred, pred))

            label = label.ravel()
            num_examples = pred.shape[0]
            assert label.shape[0] == num_examples, (label.shape[0], num_examples)
            prob = pred[np.arange(num_examples, dtype=np.int64), np.int64(label)]
            self.sum_metric += (-np.log(prob + self.eps)).sum()
            self.num_inst += num_examples 
Example #22
Source File: so_mmga7.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def plot(algorithm):
        pop = algorithm.pop
        sc = Scatter(title=algorithm.n_gen)
        sc.add(curve(algorithm.problem), plot_type="line", color="black")
        sc.add(np.column_stack([pop.get("X"), pop.get("F")]), color="red")
        sc.do() 
Example #23
Source File: so_mmga.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def plot(algorithm):
        pop = algorithm.pop
        sc = Scatter(title=algorithm.n_gen)
        sc.add(curve(algorithm.problem), plot_type="line", color="black")
        sc.add(np.column_stack([pop.get("X"), pop.get("F")]), color="red")
        sc.do() 
Example #24
Source File: so_mmga3.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def plot(algorithm):
        pop = algorithm.pop
        sc = Scatter(title=algorithm.n_gen)
        sc.add(curve(algorithm.problem), plot_type="line", color="black")
        sc.add(np.column_stack([pop.get("X"), pop.get("F")]), color="red")
        sc.do() 
Example #25
Source File: test_algorithms.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def test_no_pareto_front_given(self):
        class ZDT1NoPF(ZDT):
            def _evaluate(self, x, out, *args, **kwargs):
                f1 = x[:, 0]
                g = 1 + 9.0 / (self.n_var - 1) * np.sum(x[:, 1:], axis=1)
                f2 = g * (1 - np.power((f1 / g), 0.5))
                out["F"] = np.column_stack([f1, f2])

        algorithm = NSGA2(pop_size=100, eliminate_duplicates=True)
        minimize(ZDT1NoPF(), algorithm, ('n_gen', 20), seed=1, verbose=True) 
Example #26
Source File: test_algorithms.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def test_no_feasible_solution_found(self):
        class MyProblem(Problem):

            def __init__(self):
                super().__init__(n_var=2,
                                 n_obj=1,
                                 n_constr=36,
                                 xl=np.array([0, 0]),
                                 xu=np.array([100, 100]))

            def _evaluate(self, x, out, *args, **kwargs):
                f1 = x[:, 0] + x[:, 1]
                out["F"] = np.column_stack([f1])
                out["G"] = np.ones(len(x))

        res = minimize(MyProblem(),
                       NSGA2(),
                       ("n_gen", 10),
                       seed=1)

        self.assertEqual(res.X, None)
        self.assertEqual(res.F, None)
        self.assertEqual(res.G, None)

        res = minimize(MyProblem(),
                       NSGA2(),
                       ("n_gen", 10),
                       seed=1,
                       verbose=True,
                       return_least_infeasible=True,
                       save_history=True)

        self.assertAlmostEqual(res.CV[0], 1.0) 
Example #27
Source File: io.py    From EarthSim with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def save_shapefile(cdsdata, path, template):
    """
    Accepts bokeh ColumnDataSource data and saves it as a shapefile,
    using an existing template to determine the required schema.
    """
    collection = fiona.open(template)
    arrays = [np.column_stack([xs, ys]) for xs, ys in zip(cdsdata['xs'], cdsdata['ys'])]
    polys = gv.Polygons(arrays, crs=ccrs.GOOGLE_MERCATOR)
    projected = gv.operation.project_path(polys, projection=ccrs.PlateCarree())
    data = [list(map(tuple, arr)) for arr in projected.split(datatype='array')]
    shape_data = list(collection.items())[0][1]
    shape_data['geometry']['coordinates'] = data
    with fiona.open(path, 'w', collection.driver, collection.schema, collection.crs) as c:
        c.write(shape_data) 
Example #28
Source File: pascalvoc_util.py    From cnn-levelset with MIT License 5 votes vote down vote up
def load_annotations(self, img_names):
        y = [np.column_stack(self.get_class_bbox(img))
             for img
             in img_names[self.img_idx]]

        return np.array(y) 
Example #29
Source File: wfg.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def t1(x, n):
        ret = []
        for i in range(0, n - 1):
            aux = _reduction_weighted_sum_uniform(x[:, i + 1:])
            ret.append(_transformation_param_dependent(x[:, i], aux))
        return np.column_stack(ret) 
Example #30
Source File: wfg.py    From pymoo with Apache License 2.0 5 votes vote down vote up
def t3(x, m, n, k):
        gap = k // (m - 1)
        t = [_reduction_non_sep(x[:, (m - 1) * gap: (m * gap)], gap) for m in range(1, m)]
        t.append(_reduction_non_sep(x[:, k:], n - k))
        return np.column_stack(t)