Python numpy.average() Examples

The following are 30 code examples of numpy.average(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: bottom_up.py    From Dispersion-based-Clustering with MIT License 6 votes vote down vote up
def linkage_calculation(self, dist, labels, penalty): 
        cluster_num = len(self.label_to_images.keys())
        start_index = np.zeros(cluster_num,dtype=np.int)
        end_index = np.zeros(cluster_num,dtype=np.int)
        counts=0
        i=0
        for key in sorted(self.label_to_images.keys()):
            start_index[i] = counts
            end_index[i] = counts + len(self.label_to_images[key])
            counts = end_index[i]
            i=i+1
        dist=dist.numpy()
        linkages = np.zeros([cluster_num, cluster_num])
        for i in range(cluster_num):
            for j in range(i, cluster_num):
                linkage = dist[start_index[i]:end_index[i], start_index[j]:end_index[j]]
                linkages[i,j] = np.average(linkage)



        linkages = linkages.T + linkages - linkages * np.eye(cluster_num)
        intra = linkages.diagonal()
        penalized_linkages = linkages + penalty * ((intra * np.ones_like(linkages)).T + intra).T
        return linkages, penalized_linkages 
Example #2
Source File: forest_distributed_decision_trees.py    From discomll with Apache License 2.0 6 votes vote down vote up
def reduce_fit(interface, state, label, inp):
    import numpy as np
    out = interface.output(0)
    out.add("X_names", state["X_names"])

    forest = []
    group_fillins = []
    for i, (k, value) in enumerate(inp):
        if k == "tree":
            forest.append(value)
        elif len(value) > 0:
            group_fillins.append(value)
    out.add("forest", forest)

    fill_in_values = []
    if len(group_fillins) > 0:
        for i, type in enumerate(state["X_meta"]):
            if type == "c":
                fill_in_values.append(np.average([sample[i] for sample in group_fillins]))
            else:
                fill_in_values.append(np.bincount([sample[i] for sample in group_fillins]).argmax())
    out.add("fill_in_values", fill_in_values) 
Example #3
Source File: distributed_random_forest.py    From discomll with Apache License 2.0 6 votes vote down vote up
def reduce_fit(interface, state, label, inp):
    import numpy as np
    out = interface.output(0)
    out.add("X_names", state["X_names"])

    forest = []
    group_fillins = []
    for i, (k, value) in enumerate(inp):
        if k == "tree":
            forest.append(value)
        elif len(value) > 0:
            group_fillins.append(value)
    out.add("forest", forest)

    fill_in_values = []
    if len(group_fillins) > 0:
        for i, type in enumerate(state["X_meta"]):
            if type == "c":
                fill_in_values.append(np.average([sample[i] for sample in group_fillins]))
            else:
                fill_in_values.append(np.bincount([sample[i] for sample in group_fillins]).argmax())
    out.add("fill_in_values", fill_in_values) 
Example #4
Source File: v1_validation.py    From Attentive-Filtering-Network with MIT License 6 votes vote down vote up
def utt_scores(scores, scp, utt2label):
    """return predictions and labels per utterance
    """
    utt2len   = ako.read_key_len(scp)
    utt2label = ako.read_key_label(utt2label)
    key_list  = ako.read_all_key(scp)

    preds, labels = [], []
    idx = 0
    for key in key_list:
        frames_per_utt = utt2len[key]
        avg_scores = np.average(scores[idx:idx+frames_per_utt])
        idx = idx + frames_per_utt
        preds.append(avg_scores)
        labels.append(utt2label[key])

    return np.array(preds), np.array(labels) 
Example #5
Source File: v3_validation.py    From Attentive-Filtering-Network with MIT License 6 votes vote down vote up
def utt_scores(scores, scp, utt2label):
    """return predictions and labels per utterance
    """
    utt2len   = ako.read_key_len(scp)
    utt2label = ako.read_key_label(utt2label)
    key_list  = ako.read_all_key(scp)

    preds, labels = [], []
    idx = 0
    for key in key_list:
        frames_per_utt = utt2len[key]
        avg_scores = np.average(scores[idx:idx+frames_per_utt])
        idx = idx + frames_per_utt
        preds.append(avg_scores)
        labels.append(utt2label[key])

    return np.array(preds), np.array(labels) 
Example #6
Source File: v1_validation.py    From Attentive-Filtering-Network with MIT License 6 votes vote down vote up
def compute_loss(model, device, data_loader, threshold=0.5):
    model.eval()
    loss = 0
    correct = 0
    scores  = []

    with torch.no_grad():
        for data, target in data_loader:
            data, target = data.to(device), target.to(device)
            target = target.view(-1,1).float()
            #output, hidden = model(data, None)
            output = model(data)
            loss += F.binary_cross_entropy(output, target, size_average=False)
            pred = output > 0.5
            correct += pred.byte().eq(target.byte()).sum().item() # not really meaningful

            scores.append(output.data.cpu().numpy())

    loss /= len(data_loader.dataset) # average loss
    scores = np.vstack(scores) # scores per frame

    return loss, scores, correct 
Example #7
Source File: v3_validation.py    From Attentive-Filtering-Network with MIT License 6 votes vote down vote up
def compute_loss(model, device, data_loader):
    model.eval()
    loss = 0
    correct = 0
    scores  = []

    with torch.no_grad():
        for data, target in data_loader:
            data, target = data.to(device), target.to(device)
            target = target.view(-1,1).float()
            #output, hidden = model(data, None)
            output = model(data)
            loss += F.binary_cross_entropy(output, target, size_average=False)

            scores.append(output.data.cpu().numpy())

    loss /= len(data_loader.dataset) # average loss
    scores = np.vstack(scores) # scores per frame

    return loss, scores 
Example #8
Source File: v1_prediction.py    From Attentive-Filtering-Network with MIT License 6 votes vote down vote up
def compute_utt_eer(scores, scp, utt2label, threshold):
    """utterance-based eer
    """
    utt2len   = ako.read_key_len(scp)
    utt2label = ako.read_key_label(utt2label)
    key_list  = ako.read_all_key(scp)

    preds, labels = [], []
    idx = 0
    for key in key_list:
        frames_per_utt = utt2len[key]
        avg_scores = np.average(scores[idx:idx+frames_per_utt])
        idx = idx + frames_per_utt
        if avg_scores < threshold:
            preds.append(0)
        else: preds.append(1)
        labels.append(utt2label[key])

    eer = compute_eer(labels, preds)
    confuse_mat = compute_confuse(labels, preds)
    return eer, confuse_mat 
Example #9
Source File: noduleCADEvaluationLUNA16.py    From DeepLung with GNU General Public License v3.0 6 votes vote down vote up
def compute_mean_ci(interp_sens, confidence = 0.95):
    sens_mean = np.zeros((interp_sens.shape[1]),dtype = 'float32')
    sens_lb   = np.zeros((interp_sens.shape[1]),dtype = 'float32')
    sens_up   = np.zeros((interp_sens.shape[1]),dtype = 'float32')
    
    Pz = (1.0-confidence)/2.0
    print(interp_sens.shape)
    for i in range(interp_sens.shape[1]):
        # get sorted vector
        vec = interp_sens[:,i]
        vec.sort()

        sens_mean[i] = np.average(vec)
        sens_lb[i] = vec[int(math.floor(Pz*len(vec)))]
        sens_up[i] = vec[int(math.floor((1.0-Pz)*len(vec)))]

    return sens_mean,sens_lb,sens_up 
Example #10
Source File: bottom_up.py    From Dispersion-based-Clustering with MIT License 6 votes vote down vote up
def generate_average_feature(self, labels):
        #extract feature/classifier
        u_feas, fcs = self.get_feature(self.u_data) #2048, 1024

        #images of the same cluster
        label_to_images = {}
        for idx, l in enumerate(labels):
            self.label_to_images[l] = self.label_to_images.get(l, []) + [idx]
            #label_to_image: key is a label and USAGE u_data[label_to_images[key]]=key to set the new label

        # used from u_data to re-arrange them to label index array
        sort_image_by_label = list(itertools.chain.from_iterable([label_to_images[key] for key in sorted(label_to_images.keys())]))
        # USAGE u_data[sort_image_by_label] then the data is sorted according to its class label
        #calculate average feature/classifier of a cluster
        feature_avg = np.zeros((len(label_to_images), len(u_feas[0])))
        fc_avg = np.zeros((len(label_to_images), len(fcs[0])))
        for l in label_to_images:
            feas = u_feas[label_to_images[l]]
            feature_avg[l] = np.mean(feas, axis=0)
            fc_avg[l] = np.mean(fcs[label_to_images[l]], axis=0)
        return u_feas, feature_avg, label_to_images, fc_avg   # [m 2048], [c 2018] [] [c 1024] 
Example #11
Source File: test_optimization_methods.py    From simnibs with GNU General Public License v3.0 6 votes vote down vote up
def test_2_targets_field_component(self, optimization_variables_avg):
        l, Q, A = optimization_variables_avg
        l2 = l[::-1]
        l = np.vstack([l ,l2])
        m = 2e-3
        m1 = 4e-3
        x = optimization_methods.optimize_field_component(l, max_el_current=m,
                                                          max_total_current=m1)

        l_avg = np.average(l, axis=0)
        x_sp = optimize_comp(l_avg, np.ones_like(l2), max_el_current=m, max_total_current=m1)

        assert np.linalg.norm(x, 1) <= 2 * m1 + 1e-4
        assert np.all(np.abs(x) <= m + 1e-6)
        assert np.isclose(l_avg.dot(x), l_avg.dot(x_sp),
                          rtol=1e-4, atol=1e-4)
        assert np.isclose(np.sum(x), 0) 
Example #12
Source File: ensemble_cpu.py    From kaggle-carvana-2017 with MIT License 6 votes vote down vote up
def ensemble_image(files, dirs, ensembling_dir, strategy):
    for file in files:
        images = []
        for dir in dirs:
            file_path = os.path.join(dir, file)
            if os.path.exists(file_path):
                images.append(imread(file_path, mode='L'))
        images = np.array(images)

        if strategy == 'average':
            ensembled = average_strategy(images)
        elif strategy == 'hard_voting':
            ensembled = hard_voting(images)
        else:
            raise ValueError('Unknown ensembling strategy')
        imsave(os.path.join(ensembling_dir, file), ensembled) 
Example #13
Source File: optimization_methods.py    From simnibs with GNU General Public License v3.0 6 votes vote down vote up
def _lp_variables(l, target_mean, max_total_current, max_el_current):
        n = l.shape[1]
        if max_el_current is None and max_total_current is None:
            raise ValueError(
                'max_el_current and max_total_current can be simultaneously None')
        if max_total_current is not None:
            A_ub = [np.ones((1, 2 * n))]
            b_ub = [2 * max_total_current]
        else:
            A_ub = []
            b_ub = []
        #Constraint on target intensity
        l_ = np.hstack([l, -l])
        # the LP will maximize the average of all targets, and limit the electric field
        # at each individual target
        l_avg = np.average(l_, axis=0)
        A_ub = np.vstack(A_ub + [l_])
        b_ub = np.hstack(b_ub + [target_mean])
        A_eq = np.hstack([np.ones((1, n)), -np.ones((1, n))])
        b_eq = np.array([0.])
        bounds = (0, max_el_current)
        return l_avg, A_ub, b_ub, A_eq, b_eq, bounds 
Example #14
Source File: main.py    From ConvLab with MIT License 6 votes vote down vote up
def pprint(self, name, window=None, prefix=None):
        str_losses = []
        for key, loss in self.losses.items():
            if loss is None:
                continue
            aver_loss = np.average(loss) if window is None else np.average(loss[-window:])
            if 'nll' in key:
                str_losses.append('{} PPL {:.3f}'.format(key, np.exp(aver_loss)))
            else:
                str_losses.append('{} {:.3f}'.format(key, aver_loss))


        if prefix:
            return '{}: {} {}'.format(prefix, name, ' '.join(str_losses))
        else:
            return '{} {}'.format(name, ' '.join(str_losses)) 
Example #15
Source File: main.py    From ConvLab with MIT License 6 votes vote down vote up
def validate_rl(dialog_eval, ctx_gen, num_episode=200):
    print("Validate on training goals for {} episode".format(num_episode))
    reward_list = []
    agree_list = []
    sent_metric = UniquenessSentMetric()
    word_metric = UniquenessWordMetric()
    for _ in range(num_episode):
        ctxs = ctx_gen.sample()
        conv, agree, rewards = dialog_eval.run(ctxs)
        true_reward = rewards[0] if agree else 0
        reward_list.append(true_reward)
        agree_list.append(float(agree if agree is not None else 0.0))
        for turn in conv:
            if turn[0] == 'System':
                sent_metric.record(turn[1])
                word_metric.record(turn[1])
    results = {'sys_rew': np.average(reward_list),
               'avg_agree': np.average(agree_list),
               'sys_sent_unique': sent_metric.value(),
               'sys_unique': word_metric.value()}
    return results 
Example #16
Source File: record.py    From ConvLab with MIT License 6 votes vote down vote up
def record_rl_task(n_epsd, dialog, goal_gen, rl_f):
    conv_list = []
    reward_list = []
    sent_metric = UniquenessSentMetric()
    word_metric = UniquenessWordMetric()
    print("Begin RL testing")
    cnt = 0
    for g_key, goal in goal_gen.iter(1):
        cnt += 1
        conv, success = dialog.run(g_key, goal)
        true_reward = success
        reward_list.append(true_reward)
        conv_list.append(conv)
        for turn in conv:
            if turn[0] == 'System':
                sent_metric.record(turn[1])
                word_metric.record(turn[1])

    # json.dump(conv_list, text_f, indent=4)
    aver_reward = np.average(reward_list)
    unique_sent_num = sent_metric.value()
    unique_word_num = word_metric.value()
    rl_f.write('{}\t{}\t{}\t{}\n'.format(n_epsd, aver_reward, unique_sent_num, unique_word_num))
    rl_f.flush()
    print("End RL testing") 
Example #17
Source File: fem.py    From simnibs with GNU General Public License v3.0 6 votes vote down vote up
def _sim_tdcs_pair(mesh, cond, ref_electrode, el_surf, el_c, units, solver_options):
    logger.info('Simulating electrode pair {0} - {1}'.format(
        ref_electrode, el_surf))
    S = FEMSystem.tdcs(mesh, cond, [ref_electrode, el_surf], [0., 1.],
                       solver_options=solver_options)
    v = S.solve()
    v = mesh_io.NodeData(v, name='v', mesh=mesh)
    flux = np.array([
        _calc_flux_electrodes(v, cond,
                              [el_surf - 1000, el_surf - 600,
                               el_surf - 2000, el_surf - 1600],
                              units=units),
        _calc_flux_electrodes(v, cond,
                              [ref_electrode - 1000, ref_electrode - 600,
                               ref_electrode - 2000, ref_electrode - 1600],
                              units=units)])
    current = np.average(np.abs(flux))
    error = np.abs(np.abs(flux[0]) - np.abs(flux[1])) / current
    logger.info('Estimated current calibration error: {0:.1%}'.format(error))
    return el_c / current * v.value 
Example #18
Source File: optimize_ortho.py    From pyqmc with MIT License 5 votes vote down vote up
def dist_correlated_sample(wfs, configs, *args, client, npartitions=None, **kwargs):

    if npartitions is None:
        npartitions = sum(client.nthreads().values())

    coord = configs.split(npartitions)
    allruns = []
    for nodeconfigs in coord:
        allruns.append(
            client.submit(
                correlated_sample,
                wfs,
                nodeconfigs,
                *args,
                **kwargs
            )
        )

    allresults = [r.result() for r in allruns]
    df = {}
    for k in allresults[0].keys():
        df[k] = np.array([x[k] for x in allresults])
    confweight = np.array([len(c.configs) for c in coord], dtype=float)
    confweight /= confweight.mean()
    rhowt = np.einsum("i...,i->i...", df["rhoprime"], confweight)
    wt = df["weight"] * rhowt
    df["total"] = np.average(df["total"], weights=wt, axis=0)
    df["overlap"] = np.average(df["overlap"], weights=confweight, axis=0)
    df["weight"] = np.average(df["weight"], weights=rhowt, axis=0)

    # df["weight"] = np.mean(df["weight"], axis=0)
    df["rhoprime"] = np.mean(rhowt, axis=0)
    return df 
Example #19
Source File: accumulators.py    From pyqmc with MIT License 5 votes vote down vote up
def avg(self, configs, wf, weights=None):
        """
        Compute (weighted) average
        """

        nconf = configs.configs.shape[0]
        if weights is None:
            weights = np.ones(nconf)
        weights = weights / np.sum(weights)

        pgrad = wf.pgradient()
        den = self.enacc(configs, wf)
        energy = den["total"]
        dp = self.transform.serialize_gradients(pgrad)

        node_cut, f = self._node_regr(configs, wf)

        dp_regularized = dp * f[:, np.newaxis]

        d = {k: np.average(it, weights=weights, axis=0) for k, it in den.items()}
        d["dpH"] = np.einsum("i,ij->j", energy, weights[:, np.newaxis] * dp_regularized)
        d["dppsi"] = np.average(dp_regularized, weights=weights, axis=0)
        d["dpidpj"] = np.einsum(
            "ij,ik->jk", dp, weights[:, np.newaxis] * dp_regularized
        )

        return d 
Example #20
Source File: test_vmc.py    From pyqmc with MIT License 5 votes vote down vote up
def test_accumulator():
    """ Tests that the accumulator gets inserted into the data output correctly.
    """
    import pandas as pd
    from pyqmc.mc import vmc, initial_guess
    from pyscf import gto, scf
    from pyqmc.energy import energy
    from pyqmc.slater import PySCFSlater
    from pyqmc.accumulators import EnergyAccumulator

    mol = gto.M(
        atom="Li 0. 0. 0.; Li 0. 0. 1.5", basis="cc-pvtz", unit="bohr", verbose=5
    )
    mf = scf.RHF(mol).run()
    nconf = 5000
    wf = PySCFSlater(mol, mf)
    coords = initial_guess(mol, nconf)

    df, coords = vmc(
        wf, coords, nsteps=30, accumulators={"energy": EnergyAccumulator(mol)}
    )
    df = pd.DataFrame(df)
    eaccum = EnergyAccumulator(mol)
    eaccum_energy = eaccum(coords, wf)
    df = pd.DataFrame(df)
    print(df["energytotal"][29] == np.average(eaccum_energy["total"]))

    assert df["energytotal"][29] == np.average(eaccum_energy["total"]) 
Example #21
Source File: test_mesh_io.py    From simnibs with GNU General Public License v3.0 5 votes vote down vote up
def sphere3_baricenters(sphere3_msh):
    baricenters = np.zeros((sphere3_msh.elm.nr, 3), dtype=float)
    th_indexes = np.where(sphere3_msh.elm.elm_type == 4)[0]
    baricenters[th_indexes] = np.average(
        sphere3_msh.nodes.node_coord[
            sphere3_msh.elm.node_number_list[th_indexes, :4] - 1], axis=1)

    tr_indexes = np.where(sphere3_msh.elm.elm_type == 2)[0]
    baricenters[tr_indexes] = np.average(
        sphere3_msh.nodes.node_coord[
            sphere3_msh.elm.node_number_list[tr_indexes, :3] - 1], axis=1)
    return baricenters 
Example #22
Source File: test_transformations.py    From simnibs with GNU General Public License v3.0 5 votes vote down vote up
def test_surf2surf(sphere3_msh):
    in_surf = sphere3_msh.crop_mesh(1005)
    in_nodes = in_surf.nodes.node_coord
    in_nodes /= np.average(np.linalg.norm(in_nodes, axis=1))
    field = in_nodes[:, 0]
    out_surf = sphere3_msh.crop_mesh(1004)
    out_nodes = out_surf.nodes.node_coord
    out_nodes /= np.average(np.linalg.norm(out_nodes, axis=1))
    out_field, _ = transformations._surf2surf(field, in_surf, out_surf)
    assert np.allclose(out_field, out_nodes[:, 0], atol=1e-1) 
Example #23
Source File: test_electrode_placement.py    From simnibs with GNU General Public License v3.0 5 votes vote down vote up
def test_baricentric(self):
        tri = np.array([[0., 0.], [0., 1.], [np.sqrt(3) / 2., 1./2.]])
        p = np.average(tri, axis=0)
        bari = electrode_placement._baricentric_coordinates(p, tri)
        assert np.allclose(bari, 1./3.) 
Example #24
Source File: optimization_methods.py    From simnibs with GNU General Public License v3.0 5 votes vote down vote up
def optimize_field_component(l, max_total_current=None, max_el_current=None):
    ''' Optimize the intensity of the field in the given direction without regard to
    focality
    
    Parameters
    ------------
        l: np.ndarray
            Linear objective, obtained from target_matrices.
            If "l" is muti-targeted, optimizes the average of the targets
        max_total_current: float (optional)
            Maximal current flow though all electrodes. Default: No maximum
        max_el_current: float (optional)
            Maximal current flow though each electrode. Default: No maximum

    Returns
    --------
        x: np.ndarray
            Optimal electrode currents
    '''
    if max_total_current is None and max_el_current is None:
        raise ValueError('Please define a maximal total current or maximal electrode ' +
                         'current')

    l = np.average(np.atleast_2d(l), axis=0)
    n = l.shape[0]

    if max_total_current is not None:
        A_ub = np.ones((1, 2 * n))
        b_ub = np.array([2 * max_total_current])
    else:
        A_ub = None
        b_ub = None

    l_ = np.hstack([l, -l])
    A_eq = np.hstack([np.ones((1, n)), -np.ones((1, n))])
    b_eq = np.array([0.])
    sol = scipy.optimize.linprog(-l_, A_ub, b_ub, A_eq, b_eq,
                                 bounds=(0, max_el_current))
    x_ = sol.x

    return x_[:n] - x_[n:] 
Example #25
Source File: optimization_methods.py    From simnibs with GNU General Public License v3.0 5 votes vote down vote up
def energy_matrix(leadfield, weights, avg_reference=True):
    ''' Calculate the energy matrix for optimization
    x.dot(Q.dot(x)) = e
    "x" are the electrode currents
    "e" is the average squared electric field norm

    Parameters
    -----------
    leadfield: ndarray (n_electrodes x n_points x 3)
        Leadfield
    weights: array
        weith for each element (each leadfield column), for example volume or area
    avg_reference: bool (optional)
        Wether or not to re-reference to an average frame. Default: True


    Returns:
    ----------
    l: np.ndarray
        linear part of objective, the mean field in the target indices and target
        direction
    Q_out: np.ndarray
        Quadratic part of objective, the mean energy in the head
    Q_in: np.ndarray
        The squared average of the norm of the field in the target area in the target area
    '''

    lf = leadfield
    Q = sum(lf[..., i].dot((lf[..., i] * weights).T) for i in range(3))
    Q /= np.sum(weights)

    if avg_reference:
        P = np.linalg.pinv(
            np.vstack([-np.ones(Q.shape[0]), np.eye(Q.shape[0])]))
        Q = P.T.dot(Q).dot(P)

    return Q 
Example #26
Source File: opt_struct.py    From simnibs with GNU General Public License v3.0 5 votes vote down vote up
def mean_intensity(self, field):
        ''' Calculates the mean intensity of the given field in this target 
        
        Parameters
        -----------
        field: Nx3 NodeData or ElementData
            Electric field

        Returns
        ------------
        intensity: float
            Mean intensity in this target and in the target direction
        '''
        if (self.positions is None) == (self.indexes is None): # negative XOR operation
            raise ValueError('Please set either positions or indexes')

        assert self.mesh is not None, 'Please set a mesh'
        assert field.nr_comp == 3, 'Field must have 3 components'

        indexes, mapping = _find_indexes(self.mesh, self.lf_type,
                                         positions=self.positions,
                                         indexes=self.indexes,
                                         tissues=self.tissues,
                                         radius=self.radius)

        directions = _find_directions(self.mesh, self.lf_type,
                                      self.directions, indexes,
                                      mapping)

        f = field[indexes]
        components = np.sum(f * directions, axis=1)

        if self.lf_type == 'node':
            weights = self.mesh.nodes_volumes_or_areas()[indexes]
        elif self.lf_type == 'element':
            weights = self.mesh.elements_volumes_and_areas()[indexes]
        else:
            raise ValueError("lf_type must be 'node' or 'element'."
                             " Got: {0} instead".format(self.lf_type))

        return np.average(components, weights=weights) 
Example #27
Source File: transformations.py    From simnibs with GNU General Public License v3.0 5 votes vote down vote up
def _surf2surf(field, in_surf, out_surf, kdtree=None):
    ''' Interpolates the field defined in in_vertices to the field defined in
    out_vertices using nearest neighbour '''
    if kdtree is None:
        # Normalize the radius of the input sphere
        in_v = np.copy(in_surf.nodes.node_coord)
        in_v /= np.average(np.linalg.norm(in_v, axis=1))
        kdtree = scipy.spatial.cKDTree(in_v)
    out_v = np.copy(out_surf.nodes.node_coord)
    # Normalize the radius of the output sphere
    out_v /= np.average(np.linalg.norm(out_v, axis=1))
    _, closest = kdtree.query(out_v)
    return field[closest], kdtree 
Example #28
Source File: mesh_io.py    From simnibs with GNU General Public License v3.0 5 votes vote down vote up
def _check_th_node_ordering(self):
        th_nodes = self.nodes[self.elm[self.elm.tetrahedra]]
        th_baricenters = np.average(th_nodes, axis=1)
        face_points = [[0, 2, 1], [0, 1, 3], [0, 3, 2], [1, 2, 3]]
        for t, b in enumerate(th_baricenters):
            for i in range(4):
                d = th_nodes[t, face_points[i]] - b
                assert np.linalg.det(d) > 0, \
                    'Found a face pointing the wrong way!' 
Example #29
Source File: mesh_io.py    From simnibs with GNU General Public License v3.0 5 votes vote down vote up
def elements_baricenters(self):
        """ Calculates the baricenter of the elements

        Returns
        ------------
        baricenters: ElementData
            ElementData with the baricentes of the elements

        """
        bar = ElementData(
            np.zeros((self.elm.nr, 3), dtype=float),
            'baricenter', mesh=self)
        bar.mesh = self
        th_indexes = self.elm.tetrahedra
        tr_indexes = self.elm.triangles

        if len(th_indexes) > 0:
            bar[th_indexes] = np.average(
                self.nodes[self.elm[th_indexes]],
                axis=1)

        if len(tr_indexes) > 0:
            bar[tr_indexes] = np.average(
                self.nodes[self.elm[tr_indexes][:, :3]],
                axis=1)

        return bar 
Example #30
Source File: test_mesh_io.py    From simnibs with GNU General Public License v3.0 5 votes vote down vote up
def test_elements_baricenters(self, sphere3_msh):
        baricenters = sphere3_msh.elements_baricenters()

        b = np.zeros((sphere3_msh.elm.nr, 3), dtype=float)
        th_indexes = np.where(sphere3_msh.elm.elm_type == 4)[0]
        b[th_indexes] = np.average(
            sphere3_msh.nodes.node_coord[
                sphere3_msh.elm.node_number_list[th_indexes, :4] - 1], axis=1)

        tr_indexes = np.where(sphere3_msh.elm.elm_type == 2)[0]
        b[tr_indexes] = np.average(
            sphere3_msh.nodes.node_coord[
                sphere3_msh.elm.node_number_list[tr_indexes, :3] - 1], axis=1)

        np.testing.assert_almost_equal(baricenters.value, b)