Python numpy.argmin() Examples

The following are 30 code examples for showing how to use numpy.argmin(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module numpy , or try the search function .

Example 1
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: TripletSampler.py    License: BSD 2-Clause "Simplified" License 7 votes vote down vote up
def hard_negative_multilabel(self):
        """Hard Negative Sampling based on multilabel assumption

        Search the negative sample with largest distance (smallest sim)
        with the anchor within self._k negative samplels
        """
        # During early iterations of sampling, use random sampling instead
        if self._iteration <= self._n:
            return self.random_multilabel()

        anchor_class_id, negative_class_id = np.random.choice(
            self._index.keys(), 2)
        anchor_id, positive_id = np.random.choice(
            self._index[anchor_class_id], 2)
        negative_ids = np.random.choice(
            self._index[negative_class_id], self._k)
        # calcualte the smallest simlarity one with negatives
        anchor_label = parse_label(self._labels[anchor_id])
        positive_label = parse_label(self._labels[positive_id])
        negative_labels = [parse_label(self._labels[negative_id]) for
                           negative_id in negative_ids]
        p_sim = intersect_sim(anchor_label, positive_label)
        n_sims = np.array(
            [intersect_sim(anchor_label, negative_label) for
             negative_label in negative_labels])
        min_sim_id = np.argmin(n_sims)
        negative_id = negative_ids[min_sim_id]
        n_sim = n_sims[min_sim_id]
        margin = p_sim - n_sim
        return (anchor_id, positive_id, negative_id, margin) 
Example 2
Project: DDPAE-video-prediction   Author: jthsieh   File: metrics.py    License: MIT License 6 votes vote down vote up
def find_match(self, pred, gt):
    '''
    Match component to balls.
    '''
    batch_size, n_frames_input, n_components, _ = pred.shape
    diff = pred.reshape(batch_size, n_frames_input, n_components, 1, 2) - \
               gt.reshape(batch_size, n_frames_input, 1, n_components, 2)
    diff = np.sum(np.sum(diff ** 2, axis=-1), axis=1)
    # Direct indices
    indices = np.argmin(diff, axis=2)
    ambiguous = np.zeros(batch_size, dtype=np.int8)
    for i in range(batch_size):
      _, counts = np.unique(indices[i], return_counts=True)
      if not np.all(counts == 1):
        ambiguous[i] = 1
    return indices, ambiguous 
Example 3
Project: models   Author: kipoi   File: dataloader.py    License: MIT License 6 votes vote down vote up
def _extract(self, intervals, out, **kwargs):

        def find_closest(ldm, interval, use_strand=True):
            """Uses
            """
            # subset the positions to the appropriate strand
            # and extract the positions
            ldm_positions = ldm.loc[interval.chrom]
            if use_strand and interval.strand != ".":
                ldm_positions = ldm_positions.loc[interval.strand]
            ldm_positions = ldm_positions.position.values

            int_midpoint = (interval.end + interval.start) // 2
            dist = (ldm_positions - 1) - int_midpoint  # -1 for 0, 1 indexed positions
            if use_strand and interval.strand == "-":
                dist = - dist

            return dist[np.argmin(np.abs(dist))]

        out[:] = np.array([[find_closest(self.landmarks[ldm_name], interval, self.use_strand)
                            for ldm_name in self.columns]
                           for interval in intervals], dtype=float)

        return out 
Example 4
Project: EXOSIMS   Author: dsavransky   File: SLSQPScheduler.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def whichTimeComesNext(self, absTs):
        """ Determine which absolute time comes next from current time
        Specifically designed to determine when the next local zodiacal light event occurs form fZQuads 
        Args:
            absTs (list) - the absolute times of different events (list of absolute times)
        Return:
            absT (astropy time quantity) - the absolute time which occurs next
        """
        TK = self.TimeKeeping
        #Convert Abs Times to norm Time
        tabsTs = list()
        for i in np.arange(len(absTs)):
            tabsTs.append((absTs[i] - TK.missionStart).value) # all should be in first year
        tSinceStartOfThisYear = TK.currentTimeNorm.copy().value%365.25
        if len(tabsTs) == len(np.where(tSinceStartOfThisYear < np.asarray(tabsTs))[0]): # time strictly less than all absTs
            absT = absTs[np.argmin(tabsTs)]
        elif len(tabsTs) == len(np.where(tSinceStartOfThisYear > np.asarray(tabsTs))[0]):
            absT = absTs[np.argmin(tabsTs)]
        else: #Some are above and some are below
            tmptabsTsInds = np.where(tSinceStartOfThisYear < np.asarray(tabsTs))[0]
            absT = absTs[np.argmin(np.asarray(tabsTs)[tmptabsTsInds])] # of times greater than current time, returns smallest

        return absT 
Example 5
Project: sopt   Author: Lyrichu   File: Gradients.py    License: MIT License 6 votes vote down vote up
def run(self):
        variables = self.init_variables
        for i in range(self.epochs):
            grads = gradients(self.func,variables)
            if self.func_type == gradients_config.func_type_min:
                variables -= self.lr*grads
            else:
                variables += self.lr*grads
            self.generations_points.append(variables)
            self.generations_targets.append(self.func(variables))

        if self.func_type == gradients_config.func_type_min:
            self.global_best_target = np.min(np.array(self.generations_targets))
            self.global_best_index = np.argmin(np.array(self.generations_targets))
            self.global_best_point = self.generations_points[int(self.global_best_index)]
        else:
            self.global_best_target = np.max(np.array(self.generations_targets))
            self.global_best_index = np.argmax(np.array(self.generations_targets))
            self.global_best_point = self.generations_points[int(self.global_best_index)] 
Example 6
Project: sopt   Author: Lyrichu   File: Gradients.py    License: MIT License 6 votes vote down vote up
def run(self):
        variables = self.init_variables
        self.s = np.zeros(self.variables_num)
        for i in range(self.epochs):
            grads = gradients(self.func,variables)
            self.s += np.square(grads)
            if self.func_type == gradients_config.func_type_min:
                variables -= self.lr*grads/(np.sqrt(self.s+self.eps))
            else:
                variables += self.lr*grads/(np.sqrt(self.s+self.eps))
            self.generations_points.append(variables)
            self.generations_targets.append(self.func(variables))

        if self.func_type == gradients_config.func_type_min:
            self.global_best_target = np.min(np.array(self.generations_targets))
            self.global_best_index = np.argmin(np.array(self.generations_targets))
            self.global_best_point = self.generations_points[int(self.global_best_index)]
        else:
            self.global_best_target = np.max(np.array(self.generations_targets))
            self.global_best_index = np.argmax(np.array(self.generations_targets))
            self.global_best_point = self.generations_points[int(self.global_best_index)] 
Example 7
Project: sopt   Author: Lyrichu   File: Gradients.py    License: MIT License 6 votes vote down vote up
def run(self):
        variables = self.init_variables
        self.s = np.zeros(self.variables_num)
        for i in range(self.epochs):
            grads = gradients(self.func,variables)
            self.s = self.beta*self.s + (1-self.beta)*np.square(grads)
            if self.func_type == gradients_config.func_type_min:
                variables -= self.lr*grads/(np.sqrt(self.s+self.eps))
            else:
                variables += self.lr*grads/(np.sqrt(self.s+self.eps))
            self.generations_points.append(variables)
            self.generations_targets.append(self.func(variables))

        if self.func_type == gradients_config.func_type_min:
            self.global_best_target = np.min(np.array(self.generations_targets))
            self.global_best_index = np.argmin(np.array(self.generations_targets))
            self.global_best_point = self.generations_points[int(self.global_best_index)]
        else:
            self.global_best_target = np.max(np.array(self.generations_targets))
            self.global_best_index = np.argmax(np.array(self.generations_targets))
            self.global_best_point = self.generations_points[int(self.global_best_index)] 
Example 8
Project: sopt   Author: Lyrichu   File: Gradients.py    License: MIT License 6 votes vote down vote up
def run(self):
        variables = self.init_variables
        self.s = np.zeros(self.variables_num)
        self.m = np.zeros(self.variables_num)
        for i in range(self.epochs):
            grads = gradients(self.func,variables)
            self.m = self.beta1*self.m +(1-self.beta1)*grads
            self.s = self.beta2*self.s + (1-self.beta2)*np.square(grads)
            self.m /= (1-self.beta1**(i+1))
            self.s /= (1-self.beta2**(i+1))
            if self.func_type == gradients_config.func_type_min:
                variables -= self.lr*self.m/(np.sqrt(self.s+self.eps))
            else:
                variables += self.lr*self.m/(np.sqrt(self.s+self.eps))
            self.generations_points.append(variables)
            self.generations_targets.append(self.func(variables))

        if self.func_type == gradients_config.func_type_min:
            self.global_best_target = np.min(np.array(self.generations_targets))
            self.global_best_index = np.argmin(np.array(self.generations_targets))
            self.global_best_point = self.generations_points[int(self.global_best_index)]
        else:
            self.global_best_target = np.max(np.array(self.generations_targets))
            self.global_best_index = np.argmax(np.array(self.generations_targets))
            self.global_best_point = self.generations_points[int(self.global_best_index)] 
Example 9
Project: pymoo   Author: msu-coinlab   File: pseudo_weights.py    License: Apache License 2.0 6 votes vote down vote up
def _do(self, F, return_pseudo_weights=False, **kwargs):

        # here the normalized values are ignored but the ideal and nadir points are estimated to calculate trade-off
        _, norm, ideal_point, nadir_point = normalize(F, self.ideal_point, self.nadir_point,
                                                      estimate_bounds_if_none=True, return_bounds=True)

        # normalized distance to the worst solution
        pseudo_weights = ((nadir_point - F) / norm)

        # normalize weights to sum up to one
        pseudo_weights = pseudo_weights / np.sum(pseudo_weights, axis=1)[:, None]

        # search for the closest individual having this pseudo weights
        I = np.argmin(np.sum(np.abs(pseudo_weights - self.weights), axis=1))

        if return_pseudo_weights:
            return I, pseudo_weights
        else:
            return I 
Example 10
Project: pymoo   Author: msu-coinlab   File: nsga3.py    License: Apache License 2.0 6 votes vote down vote up
def get_extreme_points_c(F, ideal_point, extreme_points=None):
    # calculate the asf which is used for the extreme point decomposition
    weights = np.eye(F.shape[1])
    weights[weights == 0] = 1e6

    # add the old extreme points to never loose them for normalization
    _F = F
    if extreme_points is not None:
        _F = np.concatenate([extreme_points, _F], axis=0)

    # use __F because we substitute small values to be 0
    __F = _F - ideal_point
    __F[__F < 1e-3] = 0

    # update the extreme points for the normalization having the highest asf value each
    F_asf = np.max(__F * weights[:, None, :], axis=2)

    I = np.argmin(F_asf, axis=1)
    extreme_points = _F[I, :]

    return extreme_points 
Example 11
Project: pyscf   Author: pyscf   File: mpi.py    License: Apache License 2.0 6 votes vote down vote up
def work_balanced_partition(tasks, costs=None):
    if costs is None:
        costs = numpy.ones(tasks)
    if rank == 0:
        segsize = float(sum(costs)) / pool.size
        loads = []
        cum_costs = numpy.cumsum(costs)
        start_id = 0
        for k in range(pool.size):
            stop_id = numpy.argmin(abs(cum_costs - (k+1)*segsize)) + 1
            stop_id = max(stop_id, start_id+1)
            loads.append([start_id,stop_id])
            start_id = stop_id
        comm.bcast(loads)
    else:
        loads = comm.bcast()
    if rank < len(loads):
        start, stop = loads[rank]
        return tasks[start:stop]
    else:
        return tasks[:0] 
Example 12
Project: pyscf   Author: pyscf   File: stability.py    License: Apache License 2.0 6 votes vote down vote up
def ghf_stability(mf, verbose=None):
    log = logger.new_logger(mf, verbose)
    with_symmetry = True
    g, hop, hdiag = newton_ah.gen_g_hop_ghf(mf, mf.mo_coeff, mf.mo_occ,
                                            with_symmetry=with_symmetry)
    hdiag *= 2
    def precond(dx, e, x0):
        hdiagd = hdiag - e
        hdiagd[abs(hdiagd)<1e-8] = 1e-8
        return dx/hdiagd
    def hessian_x(x): # See comments in function rhf_internal
        return hop(x).real * 2

    x0 = numpy.zeros_like(g)
    x0[g!=0] = 1. / hdiag[g!=0]
    if not with_symmetry:  # allow to break point group symmetry
        x0[numpy.argmin(hdiag)] = 1
    e, v = lib.davidson(hessian_x, x0, precond, tol=1e-4, verbose=log)
    if e < -1e-5:
        log.note('GHF wavefunction has an internal instability')
        mo = _rotate_mo(mf.mo_coeff, mf.mo_occ, v)
    else:
        log.note('GHF wavefunction is stable in the internal stability analysis')
        mo = mf.mo_coeff
    return mo 
Example 13
Project: pyscf   Author: pyscf   File: stability.py    License: Apache License 2.0 6 votes vote down vote up
def rohf_internal(mf, with_symmetry=True, verbose=None):
    log = logger.new_logger(mf, verbose)
    g, hop, hdiag = newton_ah.gen_g_hop_rohf(mf, mf.mo_coeff, mf.mo_occ,
                                             with_symmetry=with_symmetry)
    hdiag *= 2
    def precond(dx, e, x0):
        hdiagd = hdiag - e
        hdiagd[abs(hdiagd)<1e-8] = 1e-8
        return dx/hdiagd
    def hessian_x(x): # See comments in function rhf_internal
        return hop(x).real * 2

    x0 = numpy.zeros_like(g)
    x0[g!=0] = 1. / hdiag[g!=0]
    if not with_symmetry:  # allow to break point group symmetry
        x0[numpy.argmin(hdiag)] = 1
    e, v = lib.davidson(hessian_x, x0, precond, tol=1e-4, verbose=log)
    if e < -1e-5:
        log.note('ROHF wavefunction has an internal instability.')
        mo = _rotate_mo(mf.mo_coeff, mf.mo_occ, v)
    else:
        log.note('ROHF wavefunction is stable in the internal stability analysis')
        mo = mf.mo_coeff
    return mo 
Example 14
Project: libTLDA   Author: wmkouw   File: suba.py    License: MIT License 5 votes vote down vote up
def find_medioid(self, X, Y):
        """
        Find point with minimal distance to all other points.

        Parameters
        ----------
        X : array
            data set, with N samples x D features.
        Y : array
            labels to select for which samples to compute distances.

        Returns
        -------
        x : array
            medioid
        ix : int
            index of medioid

        """
        # Initiate an array with infinities
        A = np.full((X.shape[0],), np.inf)

        # Insert sum of distances to other points
        A[Y] = np.sum(squareform(pdist(X[Y, :])), axis=1)

        # Find the index of the point with the smallest distance
        ix = np.argmin(A)

        return X[ix, :], ix 
Example 15
Project: FRIDA   Author: LCAV   File: doa.py    License: MIT License 5 votes vote down vote up
def polar_distance(x1, x2):
    """
    Given two arrays of numbers x1 and x2, pairs the cells that are the
    closest and provides the pairing matrix index: x1(index(1,:)) should be as
    close as possible to x2(index(2,:)). The function outputs the average of 
    the absolute value of the differences abs(x1(index(1,:))-x2(index(2,:))).
    :param x1: vector 1
    :param x2: vector 2
    :return: d: minimum distance between d
             index: the permutation matrix
    """
    x1 = np.reshape(x1, (1, -1), order='F')
    x2 = np.reshape(x2, (1, -1), order='F')
    N1 = x1.size
    N2 = x2.size
    diffmat = np.arccos(np.cos(x1 - np.reshape(x2, (-1, 1), order='F')))
    min_N1_N2 = np.min([N1, N2])
    index = np.zeros((min_N1_N2, 2), dtype=int)
    if min_N1_N2 > 1:
        for k in range(min_N1_N2):
            d2 = np.min(diffmat, axis=0)
            index2 = np.argmin(diffmat, axis=0)
            index1 = np.argmin(d2)
            index2 = index2[index1]
            index[k, :] = [index1, index2]
            diffmat[index2, :] = float('inf')
            diffmat[:, index1] = float('inf')
        d = np.mean(np.arccos(np.cos(x1[:, index[:, 0]] - x2[:, index[:, 1]])))
    else:
        d = np.min(diffmat)
        index = np.argmin(diffmat)
        if N1 == 1:
            index = np.array([1, index])
        else:
            index = np.array([index, 1])
    return d, index 
Example 16
Project: neural-pipeline   Author: toodef   File: gridsearch_train.py    License: MIT License 5 votes vote down vote up
def _calc_min(self) -> float:
            return self._values[np.argmin(self._values)] 
Example 17
Project: neural-pipeline   Author: toodef   File: gridsearch_train.py    License: MIT License 5 votes vote down vote up
def _calc_around_min(self, num_around: int) -> float:
            min_idx = np.argmin(self._values)
            num_back = min_idx - num_around
            if num_back < 0:
                num_back = 0
            num_forward = min_idx + num_around
            if num_forward > len(self._values) - 1:
                num_forward = len(self._values) - 1
            return np.mean(self._values[num_back: num_forward]) 
Example 18
Project: neuropythy   Author: noahbenson   File: core.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def curve_intersection(c1, c2, grid=16):
    '''
    curve_intersect(c1, c2) yields the parametric distances (t1, t2) such that c1(t1) == c2(t2).
      
    The optional parameter grid may specify the number of grid-points
    to use in the initial search for a start-point (default: 16).
    '''
    from scipy.optimize import minimize
    from neuropythy.geometry import segment_intersection_2D
    if c1.coordinates.shape[1] > c2.coordinates.shape[1]:
        (t1,t2) = curve_intersection(c2, c1, grid=grid)
        return (t2,t1)
    # before doing a search, see if there are literal exact intersections of the segments
    x1s  = c1.coordinates.T
    x2s  = c2.coordinates
    for (ts,te,xs,xe) in zip(c1.t[:-1], c1.t[1:], x1s[:-1], x1s[1:]):
        pts = segment_intersection_2D((xs,xe), (x2s[:,:-1], x2s[:,1:]))
        ii = np.where(np.isfinite(pts[0]))[0]
        if len(ii) > 0:
            ii = ii[0]
            def f(t): return np.sum((c1(t[0]) - c2(t[1]))**2)
            t01 = 0.5*(ts + te)
            t02 = 0.5*(c2.t[ii] + c2.t[ii+1])
            (t1,t2) = minimize(f, (t01, t02)).x
            return (t1,t2)
    if pimms.is_vector(grid): (ts1,ts2) = [c.t[0] + (c.t[-1] - c.t[0])*grid for c in (c1,c2)]
    else:                     (ts1,ts2) = [np.linspace(c.t[0], c.t[-1], grid) for c in (c1,c2)]
    (pts1,pts2) = [c(ts) for (c,ts) in zip([c1,c2],[ts1,ts2])]
    ds = np.sqrt([np.sum((pts2.T - pp)**2, axis=1) for pp in pts1.T])
    (ii,jj) = np.unravel_index(np.argmin(ds), ds.shape)
    (t01,t02) = (ts1[ii], ts2[jj])
    ttt = []
    def f(t): return np.sum((c1(t[0]) - c2(t[1]))**2)
    (t1,t2) = minimize(f, (t01, t02)).x
    return (t1,t2) 
Example 19
Project: discomll   Author: romanorac   File: k_medoids.py    License: Apache License 2.0 5 votes vote down vote up
def fit(sim_mat, D_len, cidx):
    """
    Algorithm maximizes energy between clusters, which is distinction in this algorithm. Distance matrix contains mostly 0, which are overlooked due to search of maximal distances. Algorithm does not try to retain k clusters.

    D: numpy array - Symmetric distance matrix
    k: int - number of clusters
    """

    min_energy = np.inf
    for j in range(3):
        # select indices in each sample that maximizes its dimension
        inds = [np.argmin([sim_mat[idy].get(idx, 0) for idx in cidx]) for idy in range(D_len) if idy in sim_mat]

        cidx = []
        energy = 0  # current enengy
        for i in np.unique(inds):
            indsi = np.where(inds == i)[0]  # find indices for every cluster

            minind, min_value = 0, 0
            for index, idy in enumerate(indsi):
                if idy in sim_mat:
                    # value = sum([sim_mat[idy].get(idx,0) for idx in indsi])
                    value = 0
                    for idx in indsi:
                        value += sim_mat[idy].get(idx, 0)
                    if value < min_value:
                        minind, min_value = index, value
            energy += min_value
            cidx.append(indsi[minind])  # new centers

        if energy < min_energy:
            min_energy, inds_min, cidx_min = energy, inds, cidx

    return inds_min, cidx_min  # cluster for every instance, medoids indices 
Example 20
Project: robosuite   Author: StanfordVL   File: panda_nut_assembly.py    License: MIT License 5 votes vote down vote up
def _gripper_visualization(self):
        """
        Do any needed visualization here. Overrides superclass implementations.
        """
        # color the gripper site appropriately based on distance to nearest object
        if self.gripper_visualization:
            # find closest object
            square_dist = lambda x: np.sum(
                np.square(x - self.sim.data.get_site_xpos("grip_site"))
            )
            dists = np.array(list(map(square_dist, self.sim.data.site_xpos)))
            dists[self.eef_site_id] = np.inf  # make sure we don't pick the same site
            dists[self.eef_cylinder_id] = np.inf
            ob_dists = dists[
                self.object_site_ids
            ]  # filter out object sites we care about
            min_dist = np.min(ob_dists)
            ob_id = np.argmin(ob_dists)
            ob_name = self.object_names[ob_id]

            # set RGBA for the EEF site here
            max_dist = 0.1
            scaled = (1.0 - min(min_dist / max_dist, 1.)) ** 15
            rgba = np.zeros(4)
            rgba[0] = 1 - scaled
            rgba[1] = scaled
            rgba[3] = 0.5

            self.sim.model.site_rgba[self.eef_site_id] = rgba 
Example 21
Project: robosuite   Author: StanfordVL   File: sawyer_pick_place.py    License: MIT License 5 votes vote down vote up
def _gripper_visualization(self):
        """
        Do any needed visualization here. Overrides superclass implementations.
        """
        # color the gripper site appropriately based on distance to nearest object
        if self.gripper_visualization:
            # find closest object
            square_dist = lambda x: np.sum(
                np.square(x - self.sim.data.get_site_xpos("grip_site"))
            )
            dists = np.array(list(map(square_dist, self.sim.data.site_xpos)))
            dists[self.eef_site_id] = np.inf  # make sure we don't pick the same site
            dists[self.eef_cylinder_id] = np.inf
            ob_dists = dists[
                self.object_site_ids
            ]  # filter out object sites we care about
            min_dist = np.min(ob_dists)
            ob_id = np.argmin(ob_dists)
            ob_name = self.object_names[ob_id]

            # set RGBA for the EEF site here
            max_dist = 0.1
            scaled = (1.0 - min(min_dist / max_dist, 1.)) ** 15
            rgba = np.zeros(4)
            rgba[0] = 1 - scaled
            rgba[1] = scaled
            rgba[3] = 0.5

            self.sim.model.site_rgba[self.eef_site_id] = rgba 
Example 22
Project: robosuite   Author: StanfordVL   File: panda_pick_place.py    License: MIT License 5 votes vote down vote up
def _gripper_visualization(self):
        """
        Do any needed visualization here. Overrides superclass implementations.
        """
        # color the gripper site appropriately based on distance to nearest object
        if self.gripper_visualization:
            # find closest object
            square_dist = lambda x: np.sum(
                np.square(x - self.sim.data.get_site_xpos("grip_site"))
            )
            dists = np.array(list(map(square_dist, self.sim.data.site_xpos)))
            dists[self.eef_site_id] = np.inf  # make sure we don't pick the same site
            dists[self.eef_cylinder_id] = np.inf
            ob_dists = dists[
                self.object_site_ids
            ]  # filter out object sites we care about
            min_dist = np.min(ob_dists)
            ob_id = np.argmin(ob_dists)
            ob_name = self.object_names[ob_id]

            # set RGBA for the EEF site here
            max_dist = 0.1
            scaled = (1.0 - min(min_dist / max_dist, 1.)) ** 15
            rgba = np.zeros(4)
            rgba[0] = 1 - scaled
            rgba[1] = scaled
            rgba[3] = 0.5

            self.sim.model.site_rgba[self.eef_site_id] = rgba 
Example 23
Project: robosuite   Author: StanfordVL   File: sawyer_nut_assembly.py    License: MIT License 5 votes vote down vote up
def _gripper_visualization(self):
        """
        Do any needed visualization here. Overrides superclass implementations.
        """
        # color the gripper site appropriately based on distance to nearest object
        if self.gripper_visualization:
            # find closest object
            square_dist = lambda x: np.sum(
                np.square(x - self.sim.data.get_site_xpos("grip_site"))
            )
            dists = np.array(list(map(square_dist, self.sim.data.site_xpos)))
            dists[self.eef_site_id] = np.inf  # make sure we don't pick the same site
            dists[self.eef_cylinder_id] = np.inf
            ob_dists = dists[
                self.object_site_ids
            ]  # filter out object sites we care about
            min_dist = np.min(ob_dists)
            ob_id = np.argmin(ob_dists)
            ob_name = self.object_names[ob_id]

            # set RGBA for the EEF site here
            max_dist = 0.1
            scaled = (1.0 - min(min_dist / max_dist, 1.)) ** 15
            rgba = np.zeros(4)
            rgba[0] = 1 - scaled
            rgba[1] = scaled
            rgba[3] = 0.5

            self.sim.model.site_rgba[self.eef_site_id] = rgba 
Example 24
Project: dogTorch   Author: ehsanik   File: metrics.py    License: MIT License 5 votes vote down vote up
def record_output(self, output, output_indices, target, prev_absolute_imu,
                      next_absolute_imu, batch_size=1):
        # return
        assert output.dim() == 4
        size = output.size()
        output_labels = torch.LongTensor(size[0], size[1], size[2],
                                         self.centroids.size(1)).zero_()
        target_labels = torch.LongTensor(size[0], size[1], size[2])
        for batch_id in range(size[0]):
            for seq_id in range(size[1]):
                for imu_id in range(size[2]):
                    output_distances = []
                    target_distances = []
                    for centroid in self.centroids[imu_id]:
                        output_distances.append(
                            self.get_angle_diff(
                                output[batch_id, seq_id, imu_id], centroid))
                        target_distances.append(
                            self.get_angle_diff(
                                target[batch_id, seq_id, imu_id], centroid))
                    output_label = np.argmin(np.array(output_distances))
                    output_labels[batch_id, seq_id, imu_id, output_label] = 1
                    target_labels[batch_id, seq_id, imu_id] = int(
                        np.argmin(np.array(target_distances)))
        self.metric.record_output(output_labels, output_indices, target_labels,
                                  prev_absolute_imu, next_absolute_imu,
                                  batch_size) 
Example 25
Project: TradzQAI   Author: kkuette   File: wallet.py    License: Apache License 2.0 5 votes vote down vote up
def calc_max_return(self, capital):
        if len(capital) < 2:
            return 0
        max = np.argmax(np.maximum.accumulate(capital) + capital)
        if max == 0:
            return 0
        min_before_max = np.argmin(capital[:max])
        _return = 100 * (capital[max] - capital[min_before_max]) / capital[min_before_max]
        return _return 
Example 26
Project: TOPFARM   Author: DTUWindEnergy   File: tlib.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def dist_from_poly(x,y, poly):
    """
    Calculate the minimum distance from an edge of the polygon.
    It's positive if the point is inside the polygon, and negative otherwise.

    :param x, y: floats the position of the point
    :param poly: ndarray([n,2]) the points defining the polygon

    :return dist_from_poly: float
    """
    if point_in_poly(x,y,poly):
        in_ = 1
    else:
        in_ = -1
    disto = zeros([poly.shape[0]])
    for i in range(len(poly)):
        i1 = i
        if i == len(poly) -1:
            i2 = 0
        else:
            i2 = i + 1
        P1 = poly[i1, 0:2]
        P2 = poly[i2, 0:2]
        #print P1, P2, [x,y]
        disto[i] = dist_from_segment(P1, P2, [x,y])
    disto[argmin(disto)] = disto[argmin(disto)] * in_
    #print disto
    return disto 
Example 27
Project: TOPFARM   Author: DTUWindEnergy   File: tlib.py    License: GNU Affero General Public License v3.0 5 votes vote down vote up
def locate_ij(self, P):
        """
        Find the closest control point to the reference point P

        :param P: tuple, list or array
                    The reference point
        :return: [i,j]
                 indices in self.positions
        """
        a = np.argmin([(x-P[0])**2 + (y-P[1])**2 for x,y in self.positions])
        return int(np.ceil(a/self.nx)), a % self.nx 
Example 28
Project: display_ocr   Author: arturaugusto   File: digital_display_ocr.py    License: GNU General Public License v2.0 5 votes vote down vote up
def normalize_contrs(img,cntr_pts):
  ratio = img.shape[0] / 300.0
  norm_pts = np.zeros((4,2), dtype="float32")

  s = cntr_pts.sum(axis=1)
  norm_pts[0] = cntr_pts[np.argmin(s)]
  norm_pts[2] = cntr_pts[np.argmax(s)]

  d = np.diff(cntr_pts,axis=1)
  norm_pts[1] = cntr_pts[np.argmin(d)]
  norm_pts[3] = cntr_pts[np.argmax(d)]

  norm_pts *= ratio

  (top_left, top_right, bottom_right, bottom_left) = norm_pts

  width1 = np.sqrt(((bottom_right[0] - bottom_left[0]) ** 2) + ((bottom_right[1] - bottom_left[1]) ** 2))
  width2 = np.sqrt(((top_right[0] - top_left[0]) ** 2) + ((top_right[1] - top_left[1]) ** 2))
  height1 = np.sqrt(((top_right[0] - bottom_right[0]) ** 2) + ((top_right[1] - bottom_right[1]) ** 2))
  height2 = np.sqrt(((top_left[0] - bottom_left[0]) ** 2) + ((top_left[1] - bottom_left[1]) ** 2))

  max_width = max(int(width1), int(width2))
  max_height = max(int(height1), int(height2))

  dst = np.array([[0,0], [max_width -1, 0],[max_width -1, max_height -1],[0, max_height-1]], dtype="float32")
  persp_matrix = cv2.getPerspectiveTransform(norm_pts,dst)
  return cv2.warpPerspective(img,persp_matrix,(max_width,max_height)) 
Example 29
Project: fine-lm   Author: akzaidi   File: yellowfin_test.py    License: MIT License 5 votes vote down vote up
def tune_everything(self, x0squared, c, t, gmin, gmax):
    del t
    # First tune based on dynamic range
    if c == 0:
      dr = gmax / gmin
      mustar = ((np.sqrt(dr) - 1) / (np.sqrt(dr) + 1))**2
      alpha_star = (1 + np.sqrt(mustar))**2/gmax

      return alpha_star, mustar

    dist_to_opt = x0squared
    grad_var = c
    max_curv = gmax
    min_curv = gmin
    const_fact = dist_to_opt * min_curv**2 / 2 / grad_var
    coef = [-1, 3, -(3 + const_fact), 1]
    roots = np.roots(coef)
    roots = roots[np.real(roots) > 0]
    roots = roots[np.real(roots) < 1]
    root = roots[np.argmin(np.imag(roots))]

    assert root > 0 and root < 1 and np.absolute(root.imag) < 1e-6

    dr = max_curv / min_curv
    assert max_curv >= min_curv
    mu = max(((np.sqrt(dr) - 1) / (np.sqrt(dr) + 1))**2, root**2)

    lr_min = (1 - np.sqrt(mu))**2 / min_curv

    alpha_star = lr_min
    mustar = mu

    return alpha_star, mustar 
Example 30
Project: medicaldetectiontoolkit   Author: MIC-DKFZ   File: dataloader_utils.py    License: Apache License 2.0 5 votes vote down vote up
def get_class_balanced_patients(class_targets, batch_size, num_classes, slack_factor=0.1):
    '''
    samples patients towards equilibrium of classes on a roi-level. For highly imbalanced datasets, this might be a too strong requirement.
    Hence a slack factor determines the ratio of the batch, that is randomly sampled, before class-balance is triggered.
    :param class_targets: list of patient targets. where each patient target is a list of class labels of respective rois.
    :param batch_size:
    :param num_classes:
    :param slack_factor:
    :return: batch_ixs: list of indices referring to a subset in class_targets-list, sampled to build one batch.
    '''
    batch_ixs = []
    class_count = {k: 0 for k in range(num_classes)}
    weakest_class = 0
    for ix in range(batch_size):

        keep_looking = True
        while keep_looking:
            #choose a random patient.
            cand = np.random.choice(len(class_targets), 1)[0]
            # check the least occuring class among this patient's rois.
            tmp_weakest_class = np.argmin([class_targets[cand].count(ii) for ii in range(num_classes)])
            # if current batch already bigger than the slack_factor ratio, then
            # check that weakest class in this patient is not the weakest in current batch (since needs to be boosted)
            # also that at least one roi of this patient belongs to weakest class. If True, keep patient, else keep looking.
            if (tmp_weakest_class != weakest_class and class_targets[cand].count(weakest_class) > 0) or ix < int(batch_size * slack_factor):
                keep_looking = False

        for c in range(num_classes):
            class_count[c] += class_targets[cand].count(c)
        weakest_class = np.argmin(([class_count[c] for c in range(num_classes)]))
        batch_ixs.append(cand)

    return batch_ixs