Python numpy.array() Examples

The following are 30 code examples of numpy.array(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: display_methods.py    From indras_net with GNU General Public License v3.0 8 votes vote down vote up
def get_arrays(self, varieties, var):
        x_array = np.array(varieties[var][X])
        y_array = np.array(varieties[var][Y])
        return (x_array, y_array) 
Example #2
Source File: display_methods.py    From indras_net with GNU General Public License v3.0 8 votes vote down vote up
def create_lines(self, x, varieties):
        """
        Draw just the data portion.
        """
        lines = pd.DataFrame()
        for i, var in enumerate(varieties):
            self.legend.append(var)
            data = varieties[var]["data"]
            color = get_color(varieties[var], i)
            x_array = np.array(x)
            y_array = np.array(data)
            line = pd.DataFrame({"x": x_array,
                                 "y": y_array,
                                 "color": color,
                                 "var": var})
            lines = lines.append(line, ignore_index=True, sort=False)
        return lines 
Example #3
Source File: estimator_utils.py    From EDeN with MIT License 7 votes vote down vote up
def plot_stats(x=None, y=None, label=None, color='navy'):
    """plot_stats."""
    y = np.array(y)
    y0 = y[0]
    y1 = y[1]
    y2 = y[2]
    y3 = y[3]
    y4 = y[4]
    plt.fill_between(x, y3, y4, color=color, alpha=0.08)
    plt.fill_between(x, y1, y2, color=color, alpha=0.08)
    plt.plot(x, y0, '-', lw=2, color=color, label=label)
    plt.plot(x, y0,
             linestyle='None',
             markerfacecolor='white',
             markeredgecolor=color,
             marker='o',
             markeredgewidth=2,
             markersize=8) 
Example #4
Source File: TripletSampler.py    From Caffe-Python-Data-Layer with BSD 2-Clause "Simplified" License 7 votes vote down vote up
def hard_negative_multilabel(self):
        """Hard Negative Sampling based on multilabel assumption

        Search the negative sample with largest distance (smallest sim)
        with the anchor within self._k negative samplels
        """
        # During early iterations of sampling, use random sampling instead
        if self._iteration <= self._n:
            return self.random_multilabel()

        anchor_class_id, negative_class_id = np.random.choice(
            self._index.keys(), 2)
        anchor_id, positive_id = np.random.choice(
            self._index[anchor_class_id], 2)
        negative_ids = np.random.choice(
            self._index[negative_class_id], self._k)
        # calcualte the smallest simlarity one with negatives
        anchor_label = parse_label(self._labels[anchor_id])
        positive_label = parse_label(self._labels[positive_id])
        negative_labels = [parse_label(self._labels[negative_id]) for
                           negative_id in negative_ids]
        p_sim = intersect_sim(anchor_label, positive_label)
        n_sims = np.array(
            [intersect_sim(anchor_label, negative_label) for
             negative_label in negative_labels])
        min_sim_id = np.argmin(n_sims)
        negative_id = negative_ids[min_sim_id]
        n_sim = n_sims[min_sim_id]
        margin = p_sim - n_sim
        return (anchor_id, positive_id, negative_id, margin) 
Example #5
Source File: buyer_action_s.py    From indras_net with GNU General Public License v3.0 7 votes vote down vote up
def matrix_reduction(agent):
    matrix, res = agent["strategy"]["data_collection"](agent)
    col = len(matrix[0])
    if col > len(matrix):  # not enought for matrix reduction
        return -1
    i = 0
    x = []
    while i < len(matrix) and len(x) == 0:
        A = numpy.array(matrix[i:i + col])
        b = numpy.array(res[i:i + col])
        try:
            x = numpy.linalg.solve(A, b)
        except numpy.linalg.LinAlgError:
            i += 1
    if len(x) == 0:
        return -1
    else:
        for emoji in agent["emoji_experienced"]:
            index = agent["emoji_experienced"][emoji]
            agent["emoji_scores"][emoji] = round(x[index][0], 2)
        agent["predicted_base_line"] = round(x[-1][0], 2)
        return 0 
Example #6
Source File: display_methods.py    From indras_net with GNU General Public License v3.0 7 votes vote down vote up
def get_arrays(self, varieties, var):
        x_array = np.array(varieties[var][X])
        y_array = np.array(varieties[var][Y])
        return (x_array, y_array) 
Example #7
Source File: estimator_utils.py    From EDeN with MIT License 6 votes vote down vote up
def make_train_test_sets(pos_graphs, neg_graphs,
                         test_proportion=.3, random_state=2):
    """make_train_test_sets."""
    random.seed(random_state)
    random.shuffle(pos_graphs)
    random.shuffle(neg_graphs)
    pos_dim = len(pos_graphs)
    neg_dim = len(neg_graphs)
    tr_pos_graphs = pos_graphs[:-int(pos_dim * test_proportion)]
    te_pos_graphs = pos_graphs[-int(pos_dim * test_proportion):]
    tr_neg_graphs = neg_graphs[:-int(neg_dim * test_proportion)]
    te_neg_graphs = neg_graphs[-int(neg_dim * test_proportion):]
    tr_graphs = tr_pos_graphs + tr_neg_graphs
    te_graphs = te_pos_graphs + te_neg_graphs
    tr_targets = [1] * len(tr_pos_graphs) + [0] * len(tr_neg_graphs)
    te_targets = [1] * len(te_pos_graphs) + [0] * len(te_neg_graphs)
    tr_graphs, tr_targets = paired_shuffle(tr_graphs, tr_targets)
    te_graphs, te_targets = paired_shuffle(te_graphs, te_targets)
    return (tr_graphs, np.array(tr_targets)), (te_graphs, np.array(te_targets)) 
Example #8
Source File: __init__.py    From vergeml with MIT License 6 votes vote down vote up
def load_predictions(env, nclasses):
    path = os.path.join(env.stats_dir(), "predictions.csv")

    if not os.path.exists(path):
        raise FileExistsError(path)

    with open(path, newline='') as csvfile:
        y_score = []
        y_test = []
        csv_reader = csv.reader(csvfile, dialect="excel")
        for row in csv_reader:
            assert len(row) == nclasses * 2
            y_score.append(list(map(float, row[:nclasses])))
            y_test.append(list(map(float, row[nclasses:])))
        
        y_score = np.array(y_score)
        y_test = np.array(y_test)

        return y_test, y_score 
Example #9
Source File: sequence.py    From EDeN with MIT License 6 votes vote down vote up
def _annotate_importance(self, seq, data_matrix):
        # compute distance from hyperplane as proxy of vertex importance
        if self.estimator is None:
            # if we do not provide an estimator then consider default margin of
            # 1 for all vertices
            scores = np.array([1] * data_matrix.shape[0])
        else:
            if hasattr(self.estimator, 'decision_function'):
                scores = self.estimator.decision_function(data_matrix)
            elif hasattr(self.estimator, 'predict_proba'):
                scores = self.estimator.predict_proba(data_matrix)
                scores = scores[:, -1]
        # compute the list of sparse vectors representation
        vec = []
        for i in range(data_matrix.shape[0]):
            vec.append(data_matrix.getrow(i))
        return scores, vec 
Example #10
Source File: problem.py    From fenics-topopt with MIT License 6 votes vote down vote up
def lk(E=1.):
        """element stiffness matrix"""
        nu = 0.3
        k = np.array([0.5 - nu / 6., 0.125 + nu / 8., -0.25 - nu / 12.,
            -0.125 + 0.375 * nu, -0.25 + nu / 12., -0.125 - nu / 8., nu / 6.,
            0.125 - 0.375 * nu])
        KE = E / (1 - nu**2) * np.array([
            [k[0], k[1], k[2], k[3], k[4], k[5], k[6], k[7]],
            [k[1], k[0], k[7], k[6], k[5], k[4], k[3], k[2]],
            [k[2], k[7], k[0], k[5], k[6], k[3], k[4], k[1]],
            [k[3], k[6], k[5], k[0], k[7], k[2], k[1], k[4]],
            [k[4], k[5], k[6], k[7], k[0], k[1], k[2], k[3]],
            [k[5], k[4], k[3], k[2], k[1], k[0], k[7], k[6]],
            [k[6], k[3], k[4], k[1], k[2], k[7], k[0], k[5]],
            [k[7], k[2], k[1], k[4], k[3], k[6], k[5], k[0]]])
        return KE 
Example #11
Source File: TripletDataLayer.py    From Caffe-Python-Data-Layer with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def get_next_minibatch(self):
        # generate using in-thread functions
        data = []
        p_data = []
        n_data = []
        label = []
        for i in range(self._batch_size):
            datum_ = self.get_a_datum()
            # print(len(datum_), ":".join([str(x.shape) for x in datum_]))
            data.append(datum_[0])
            p_data.append(datum_[1])
            n_data.append(datum_[2])
            if len(datum_) == 4:
                # datum and label / margin
                label.append(datum_[-1])
        batch = [np.array(data),
                 np.array(p_data),
                 np.array(n_data)]
        if len(label):
            label = np.array(label).reshape(self._batch_size, 1, 1, 1)
            batch.append(label)
        return batch 
Example #12
Source File: problem.py    From fenics-topopt with MIT License 6 votes vote down vote up
def lk(E=1.):
        """element stiffness matrix"""
        nu = 0.3
        k = np.array([0.5 - nu / 6., 0.125 + nu / 8., -0.25 - nu / 12.,
            -0.125 + 0.375 * nu, -0.25 + nu / 12., -0.125 - nu / 8., nu / 6.,
            0.125 - 0.375 * nu])
        KE = E / (1 - nu**2) * np.array([
            [k[0], k[1], k[2], k[3], k[4], k[5], k[6], k[7]],
            [k[1], k[0], k[7], k[6], k[5], k[4], k[3], k[2]],
            [k[2], k[7], k[0], k[5], k[6], k[3], k[4], k[1]],
            [k[3], k[6], k[5], k[0], k[7], k[2], k[1], k[4]],
            [k[4], k[5], k[6], k[7], k[0], k[1], k[2], k[3]],
            [k[5], k[4], k[3], k[2], k[1], k[0], k[7], k[6]],
            [k[6], k[3], k[4], k[1], k[2], k[7], k[0], k[5]],
            [k[7], k[2], k[1], k[4], k[3], k[6], k[5], k[0]]])
        return KE 
Example #13
Source File: TripletDataLayer.py    From Caffe-Python-Data-Layer with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def get_next_minibatch(self):
        if self._prefetch:
            # get mini-batch from prefetcher
            batch = self._conn.recv()
        else:
            # generate using in-thread functions
            data = []
            p_data = []
            n_data = []
            label = []
            for i in range(self._batch_size):
                datum_ = self.get_a_datum()
                data.append(datum_[0])
                p_data.append(datum_[1])
                n_data.append(datum_[2])
                if len(datum_) == 4:
                    # datum and label / margin
                    label.append(datum_[-1])
            batch = [np.array(data),
                     np.array(p_data),
                     np.array(n_data)]
            if len(label):
                label = np.array(label).reshape(self._batch_size, 1, 1, 1)
                batch.append(label)
        return batch 
Example #14
Source File: TripletDataLayer.py    From Caffe-Python-Data-Layer with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def get_a_datum(self):
        """Get a datum:

        Sampling -> decode images -> stack numpy array
        """
        sample = self._sampler.sample()
        if self._compressed:
            datum_ = [
                extract_sample(self._data[id], self._mean, self._resize) for
                id in sample[:3]]
        else:
            datum_ = [self._data[id] for id in sample[:3]]
        if len(sample) == 4:
            datum_.append(sample[-1])
        return datum_ 
Example #15
Source File: BasePythonDataLayer.py    From Caffe-Python-Data-Layer with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def get_next_minibatch(self):
        """Generate next mini-batch

        The return value is array of numpy array: [data, label]
        Reshape funcion will be called based on resutls of this function

        Needs to implement in each class
        """
        pass 
Example #16
Source File: BasePythonDataLayer.py    From Caffe-Python-Data-Layer with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def setup(self, bottom, top):
        layer_params = yaml.load(self.param_str)
        self._layer_params = layer_params
        # default batch_size = 256
        self._batch_size = int(layer_params.get('batch_size', 256))
        self._resize = layer_params.get('resize', -1)
        self._mean_file = layer_params.get('mean_file', None)
        self._source_type = layer_params.get('source_type', 'CSV')
        self._shuffle = layer_params.get('shuffle', False)
        # read image_mean from file and preload all data into memory
        # will read either file or array into self._mean
        self.set_mean()
        self.preload_db()
        self._compressed = self._layer_params.get('compressed', True)
        if not self._compressed:
            self.decompress_data() 
Example #17
Source File: DataManager.py    From Caffe-Python-Data-Layer with BSD 2-Clause "Simplified" License 6 votes vote down vote up
def load_all(self):
        """The function to load all data and labels

        Give:
        data: the list of raw data, needs to be decompressed
              (e.g., raw JPEG string)
        labels: numpy array, with each element is a string
        """
        start = time.time()
        print("Start Loading Data from BCF {}".format(
            'MEMORY' if self._bcf_mode == 'MEM' else 'FILE'))

        self._labels = np.loadtxt(self._label_fn).astype(str)

        if self._bcf.size() != self._labels.shape[0]:
            raise Exception("Number of samples in data"
                            "and labels are not equal")
        else:
            for idx in range(self._bcf.size()):
                datum_str = self._bcf.get(idx)
                self._data.append(datum_str)
        end = time.time()
        print("Loading {} samples Done: Time cost {} seconds".format(
            len(self._data), end - start))

        return self._data, self._labels 
Example #18
Source File: run_audio_attack.py    From Black-Box-Audio with MIT License 6 votes vote down vote up
def __init__(self, input_wave_file, output_wave_file, target_phrase):
        self.pop_size = 100
        self.elite_size = 10
        self.mutation_p = 0.005
        self.noise_stdev = 40
        self.noise_threshold = 1
        self.mu = 0.9
        self.alpha = 0.001
        self.max_iters = 3000
        self.num_points_estimate = 100
        self.delta_for_gradient = 100
        self.delta_for_perturbation = 1e3
        self.input_audio = load_wav(input_wave_file).astype(np.float32)
        self.pop = np.expand_dims(self.input_audio, axis=0)
        self.pop = np.tile(self.pop, (self.pop_size, 1))
        self.output_wave_file = output_wave_file
        self.target_phrase = target_phrase
        self.funcs = self.setup_graph(self.pop, np.array([toks.index(x) for x in target_phrase])) 
Example #19
Source File: test_utils_times.py    From aospy with Apache License 2.0 6 votes vote down vote up
def test_ensure_time_as_index_with_change():
    # Time bounds array doesn't index time initially, which gets fixed.
    arr = xr.DataArray([-93], dims=[TIME_STR], coords={TIME_STR: [3]})
    arr[TIME_STR].attrs['units'] = 'days since 2000-01-01 00:00:00'
    arr[TIME_STR].attrs['calendar'] = 'standard'
    ds = arr.to_dataset(name='a')
    ds.coords[TIME_WEIGHTS_STR] = xr.DataArray(
        [1], dims=[TIME_STR], coords={TIME_STR: arr[TIME_STR]}
    )
    ds.coords[TIME_BOUNDS_STR] = xr.DataArray(
        [[3.5, 4.5]], dims=[TIME_STR, BOUNDS_STR],
        coords={TIME_STR: arr[TIME_STR]}
    )
    ds = ds.isel(**{TIME_STR: 0})
    actual = ensure_time_as_index(ds)
    expected = arr.to_dataset(name='a')
    expected.coords[TIME_WEIGHTS_STR] = xr.DataArray(
        [1], dims=[TIME_STR], coords={TIME_STR: arr[TIME_STR]}
    )
    expected.coords[TIME_BOUNDS_STR] = xr.DataArray(
        [[3.5, 4.5]], dims=[TIME_STR, BOUNDS_STR],
        coords={TIME_STR: arr[TIME_STR]}
        )
    xr.testing.assert_identical(actual, expected) 
Example #20
Source File: ml.py    From EDeN with MIT License 6 votes vote down vote up
def make_data_matrix(positive_data_matrix=None,
                     negative_data_matrix=None,
                     target=None):
    """make_data_matrix."""
    assert(positive_data_matrix is not None), 'ERROR: expecting non null\
    positive_data_matrix'
    if negative_data_matrix is None:
        negative_data_matrix = positive_data_matrix.multiply(-1)
    if target is None and negative_data_matrix is not None:
        yp = [1] * positive_data_matrix.shape[0]
        yn = [-1] * negative_data_matrix.shape[0]
        y = np.array(yp + yn)
        data_matrix = vstack(
            [positive_data_matrix, negative_data_matrix], format="csr")
    if target is not None:
        data_matrix = positive_data_matrix
        y = target
    return data_matrix, y 
Example #21
Source File: graph_layout.py    From EDeN with MIT License 6 votes vote down vote up
def _scale(self, init_pos):
        _min = -0.5
        _max = 0.5
        pos = dict()
        max_x = max([init_pos[id][0] for id in init_pos])
        min_x = min([init_pos[id][0] for id in init_pos])
        max_y = max([init_pos[id][1] for id in init_pos])
        min_y = min([init_pos[id][1] for id in init_pos])
        for id in init_pos:
            x = init_pos[id][0]
            y = init_pos[id][1]
            # standardize
            x = (x - min_x) / (max_x - min_x)
            y = (y - min_y) / (max_y - min_y)
            # rescale
            x = x * (_max - _min) + _min
            y = y * (_max - _min) + _min
            pos[id] = np.array([x, y])
        return pos 
Example #22
Source File: test_utils_times.py    From aospy with Apache License 2.0 6 votes vote down vote up
def test_add_uniform_time_weights():
    time = np.array([15, 46, 74])
    data = np.zeros((3))
    ds = xr.DataArray(data,
                      coords=[time],
                      dims=[TIME_STR],
                      name='a').to_dataset()
    units_str = 'days since 2000-01-01 00:00:00'
    cal_str = 'noleap'
    ds[TIME_STR].attrs['units'] = units_str
    ds[TIME_STR].attrs['calendar'] = cal_str

    with pytest.raises(KeyError):
        ds[TIME_WEIGHTS_STR]

    ds = add_uniform_time_weights(ds)
    time_weights_expected = xr.DataArray(
        [1, 1, 1], coords=ds[TIME_STR].coords, name=TIME_WEIGHTS_STR)
    time_weights_expected.attrs['units'] = 'days'
    assert ds[TIME_WEIGHTS_STR].identical(time_weights_expected) 
Example #23
Source File: link_prediction.py    From EDeN with MIT License 5 votes vote down vote up
def make_train_test_set(graph, radius,
                        test_proportion=.3, ratio_neg_to_pos=10):
    """make_train_test_set."""
    pos = [(u, v) for u, v in graph.edges()]
    neg = [(u, v) for u, v in nx.non_edges(graph)]
    random.shuffle(pos)
    random.shuffle(neg)
    pos_dim = len(pos)
    neg_dim = len(neg)
    max_n_neg = min(pos_dim * ratio_neg_to_pos, neg_dim)
    neg = neg[:max_n_neg]
    neg_dim = len(neg)
    tr_pos = pos[:-int(pos_dim * test_proportion)]
    te_pos = pos[-int(pos_dim * test_proportion):]
    tr_neg = neg[:-int(neg_dim * test_proportion)]
    te_neg = neg[-int(neg_dim * test_proportion):]

    # remove edges
    tr_graph = graph.copy()
    tr_graph.remove_edges_from(te_pos)
    tr_pos_graphs = list(_make_subgraph_set(tr_graph, radius, tr_pos))
    tr_neg_graphs = list(_make_subgraph_set(tr_graph, radius, tr_neg))
    te_pos_graphs = list(_make_subgraph_set(tr_graph, radius, te_pos))
    te_neg_graphs = list(_make_subgraph_set(tr_graph, radius, te_neg))

    tr_graphs = tr_pos_graphs + tr_neg_graphs
    te_graphs = te_pos_graphs + te_neg_graphs
    tr_targets = [1] * len(tr_pos_graphs) + [0] * len(tr_neg_graphs)

    te_targets = [1] * len(te_pos_graphs) + [0] * len(te_neg_graphs)
    tr_graphs, tr_targets = paired_shuffle(tr_graphs, tr_targets)
    te_graphs, te_targets = paired_shuffle(te_graphs, te_targets)

    return (tr_graphs, np.array(tr_targets)), (te_graphs, np.array(te_targets)) 
Example #24
Source File: ml.py    From EDeN with MIT License 5 votes vote down vote up
def load_target(name):
    """Return a numpy array of integers to be used as target vector.

    Parameters
    ----------
    name : string
        A pointer to the data source.

    """
    target = [y.strip() for y in read(name) if y]
    return np.array(target).astype(int) 
Example #25
Source File: boundary_conditions.py    From fenics-topopt with MIT License 5 votes vote down vote up
def get_fixed_nodes(self):
        # Return a list of fixed nodes for the problem
        dofs = np.arange(2 * (self.nelx + 1) * (self.nely + 1))
        fixed = np.union1d(dofs[0:2 * (self.nely + 1):2],
            np.array([2 * (self.nelx + 1) * (self.nely + 1) - 1]))
        return fixed 
Example #26
Source File: von_mises_stress.py    From fenics-topopt with MIT License 5 votes vote down vote up
def B(side):
        """ Precomputed strain-displacement matrix. """
        n = -0.5 / side
        p = 0.5 / side
        return numpy.array([[p, 0, n, 0, n, 0, p, 0],
                            [0, p, 0, p, 0, n, 0, n],
                            [p, p, p, n, n, n, n, p]]) 
Example #27
Source File: graph.py    From EDeN with MIT License 5 votes vote down vote up
def transform(self, graphs):
        """Transform a list of networkx graphs into a sparse matrix.

        Parameters
        ----------
        graphs : list[graphs]
            The input list of networkx graphs.

        Returns
        -------
        data_matrix : array-like, shape = [n_samples, n_features]
            Vector representation of input graphs.

        >>> # transforming the same graph
        >>> import networkx as nx
        >>> def get_path_graph(length=4):
        ...     g = nx.path_graph(length)
        ...     for n,d in g.nodes(data=True):
        ...         d['label'] = 'C'
        ...     for a,b,d in g.edges(data=True):
        ...         d['label'] = '1'
        ...     return g
        >>> g = get_path_graph(4)
        >>> g2 = get_path_graph(5)
        >>> g2.remove_node(4)
        >>> v = Vectorizer()
        >>> def vec_to_hash(vec):
        ...     return hash(tuple(vec.data + vec.indices))
        >>> vec_to_hash(v.transform([g])) == vec_to_hash(v.transform([g2]))
        True
        """
        instance_id = None
        feature_rows = []
        for instance_id, graph in enumerate(graphs):
            self._test_goodness(graph)
            feature_rows.append(self._transform(graph))
        if instance_id is None:
            raise Exception('ERROR: something went wrong:\
                no graphs are present in current iterator.')
        data_matrix = self._convert_dict_to_sparse_matrix(feature_rows)
        return data_matrix 
Example #28
Source File: problem.py    From fenics-topopt with MIT License 5 votes vote down vote up
def build_indices(self, nelx, nely):
        """ FE: Build the index vectors for the for coo matrix format. """
        self.KE = self.lk()
        self.edofMat = np.zeros((nelx * nely, 8), dtype=int)
        for elx in range(nelx):
            for ely in range(nely):
                el = ely + elx * nely
                n1 = (nely + 1) * elx + ely
                n2 = (nely + 1) * (elx + 1) + ely
                self.edofMat[el, :] = np.array([2 * n1 + 2, 2 * n1 + 3,
                    2 * n2 + 2, 2 * n2 + 3, 2 * n2, 2 * n2 + 1, 2 * n1,
                    2 * n1 + 1])
        # Construct the index pointers for the coo format
        self.iK = np.kron(self.edofMat, np.ones((8, 1))).flatten()
        self.jK = np.kron(self.edofMat, np.ones((1, 8))).flatten() 
Example #29
Source File: problem.py    From fenics-topopt with MIT License 5 votes vote down vote up
def compute_displacements(self, xPhys):
        # Setup and solve FE problem
        sK = ((self.KE.flatten()[np.newaxis]).T * (
            self.Emin + (xPhys)**self.penal *
            (self.Emax - self.Emin))).flatten(order='F')
        K = scipy.sparse.coo_matrix((sK, (self.iK, self.jK)),
            shape=(self.ndof, self.ndof)).tocsc()
        # Remove constrained dofs from matrix and convert to coo
        K = deleterowcol(K, self.fixed, self.fixed).tocoo()
        # Solve system
        K1 = cvxopt.spmatrix(K.data, K.row.astype(np.int), K.col.astype(np.int))
        B = cvxopt.matrix(self.f[self.free, :])
        cvxopt.cholmod.linsolve(K1, B)
        self.u[self.free, :] = np.array(B)[:, :] 
Example #30
Source File: von_mises_stress.py    From fenics-topopt with MIT License 5 votes vote down vote up
def E(nu):
        """ Precomputed constitutive matrix. """
        return numpy.array([[1, nu, 0],
                            [nu, 1, 0],
                            [0, 0, (1 - nu) / 2.]]) / (1 - nu**2)