Python numpy.median() Examples

The following are 30 code examples of numpy.median(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: test_nanfunctions.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_out(self):
        mat = np.random.rand(3, 3)
        nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
        resout = np.zeros(3)
        tgt = np.median(mat, axis=1)
        res = np.nanmedian(nan_mat, axis=1, out=resout)
        assert_almost_equal(res, resout)
        assert_almost_equal(res, tgt)
        # 0-d output:
        resout = np.zeros(())
        tgt = np.median(mat, axis=None)
        res = np.nanmedian(nan_mat, axis=None, out=resout)
        assert_almost_equal(res, resout)
        assert_almost_equal(res, tgt)
        res = np.nanmedian(nan_mat, axis=(0, 1), out=resout)
        assert_almost_equal(res, resout)
        assert_almost_equal(res, tgt) 
Example #2
Source File: dynamic_roi_head.py    From mmdetection with Apache License 2.0 6 votes vote down vote up
def update_hyperparameters(self):
        """Update hyperparameters like IoU thresholds for assigner and beta for
        SmoothL1 loss based on the training statistics.

        Returns:
            tuple[float]: the updated ``iou_thr`` and ``beta``.
        """
        new_iou_thr = max(self.train_cfg.dynamic_rcnn.initial_iou,
                          np.mean(self.iou_history))
        self.iou_history = []
        self.bbox_assigner.pos_iou_thr = new_iou_thr
        self.bbox_assigner.neg_iou_thr = new_iou_thr
        self.bbox_assigner.min_pos_iou = new_iou_thr
        new_beta = min(self.train_cfg.dynamic_rcnn.initial_beta,
                       np.median(self.beta_history))
        self.beta_history = []
        self.bbox_head.loss_bbox.beta = new_beta
        return new_iou_thr, new_beta 
Example #3
Source File: bayestar.py    From dustmaps with GNU General Public License v2.0 6 votes vote down vote up
def _raise_on_mode(self, mode):
        """
        Checks that the provided query mode is one of the accepted values. If
        not, raises a :obj:`ValueError`.
        """
        valid_modes = [
            'random_sample',
            'random_sample_per_pix',
            'samples',
            'median',
            'mean',
            'best',
            'percentile']

        if mode not in valid_modes:
            raise ValueError(
                '"{}" is not a valid `mode`. Valid modes are:\n'
                '  {}'.format(mode, valid_modes)
            ) 
Example #4
Source File: post_proc.py    From HorizonNet with MIT License 6 votes vote down vote up
def vote(vec, tol):
    vec = np.sort(vec)
    n = np.arange(len(vec))[::-1]
    n = n[:, None] - n[None, :] + 1.0
    l = squareform(pdist(vec[:, None], 'minkowski', p=1) + 1e-9)

    invalid = (n < len(vec) * 0.4) | (l > tol)
    if (~invalid).sum() == 0 or len(vec) < tol:
        best_fit = np.median(vec)
        p_score = 0
    else:
        l[invalid] = 1e5
        n[invalid] = -1
        score = n
        max_idx = score.argmax()
        max_row = max_idx // len(vec)
        max_col = max_idx % len(vec)
        assert max_col > max_row
        best_fit = vec[max_row:max_col+1].mean()
        p_score = (max_col - max_row + 1) / len(vec)

    l1_score = np.abs(vec - best_fit).mean()

    return best_fit, p_score, l1_score 
Example #5
Source File: rsp_findpeaks.py    From NeuroKit with MIT License 6 votes vote down vote up
def _rsp_findpeaks_outliers(rsp_cleaned, extrema, amplitude_min=0.3):

    # Only consider those extrema that have a minimum vertical distance to
    # their direct neighbor, i.e., define outliers in absolute amplitude
    # difference between neighboring extrema.
    vertical_diff = np.abs(np.diff(rsp_cleaned[extrema]))
    median_diff = np.median(vertical_diff)
    min_diff = np.where(vertical_diff > (median_diff * amplitude_min))[0]
    extrema = extrema[min_diff]

    # Make sure that the alternation of peaks and troughs is unbroken. If
    # alternation of sign in extdiffs is broken, remove the extrema that
    # cause the breaks.
    amplitudes = rsp_cleaned[extrema]
    extdiffs = np.sign(np.diff(amplitudes))
    extdiffs = np.add(extdiffs[0:-1], extdiffs[1:])
    removeext = np.where(extdiffs != 0)[0] + 1
    extrema = np.delete(extrema, removeext)
    amplitudes = np.delete(amplitudes, removeext)

    return extrema, amplitudes 
Example #6
Source File: filters.py    From pyfilter with MIT License 6 votes vote down vote up
def test_ParallellFiltersAndStability(self):
        x, y = self.model.sample_path(50)

        shape = 3000

        linear = AffineProcess((f, g), (1., 1.), self.norm, self.norm)
        self.model.hidden = linear

        filt = SISR(self.model, 1000).set_nparallel(shape).initialize().longfilter(y)

        filtermeans = filt.result.filter_means

        x = filtermeans[:, :1]
        mape = ((x - filtermeans[:, 1:]) / x).abs()

        assert mape.median(0)[0].max() < 0.05 
Example #7
Source File: filters.py    From pyfilter with MIT License 6 votes vote down vote up
def test_ParallelUnscented(self):
        x, y = self.model.sample_path(50)

        shape = 30

        linear = AffineProcess((f, g), (1., 1.), self.norm, self.norm)
        self.model.hidden = linear

        filt = SISR(self.model, 1000, proposal=Unscented()).set_nparallel(shape).initialize().longfilter(y)

        filtermeans = filt.result.filter_means

        x = filtermeans[:, :1]
        mape = ((x - filtermeans[:, 1:]) / x).abs()

        assert mape.median(0)[0].max() < 0.05 
Example #8
Source File: tedana.py    From me-ica with GNU Lesser General Public License v2.1 6 votes vote down vote up
def makeadmask(cdat,min=True,getsum=False):

	nx,ny,nz,Ne,nt = cdat.shape

	mask = np.ones((nx,ny,nz),dtype=np.bool)

	if min:
		mask = cdat[:,:,:,:,:].prod(axis=-1).prod(-1)!=0
		return mask
	else:
		#Make a map of longest echo that a voxel can be sampled with,
		#with minimum value of map as X value of voxel that has median
		#value in the 1st echo. N.b. larger factor leads to bias to lower TEs
		emeans = cdat[:,:,:,:,:].mean(-1)
		medv = emeans[:,:,:,0] == stats.scoreatpercentile(emeans[:,:,:,0][emeans[:,:,:,0]!=0],33,interpolation_method='higher')
		lthrs = np.squeeze(np.array([ emeans[:,:,:,ee][medv]/3 for ee in range(Ne) ]))
		if len(lthrs.shape)==1: lthrs = np.atleast_2d(lthrs).T
		lthrs = lthrs[:,lthrs.sum(0).argmax()]
		mthr = np.ones([nx,ny,nz,ne])
		for ee in range(Ne): mthr[:,:,:,ee]*=lthrs[ee]
		mthr = np.abs(emeans[:,:,:,:])>mthr
		masksum = np.array(mthr,dtype=np.int).sum(-1)
		mask = masksum!=0
		if getsum: return mask,masksum
		else: return mask 
Example #9
Source File: predict_spending_rough.py    From kaggle-code with MIT License 6 votes vote down vote up
def fill_and_adj_numeric(df):
	#there are NA for page views, fill median for this == 1
	df.pageviews.fillna(df.pageviews.median(), inplace = True)

	df.hits.fillna(df.hits.median(), inplace = True)
	df.visits.fillna(df.visits.median(), inplace = True)

	#are boolean, fill NaN with zeros, add to categorical
	df.isTrueDirect.fillna(0, inplace = True)
	df.bounces.fillna(0, inplace = True)
	df.newVisits.fillna(0, inplace = True)
	df.visitNumber.fillna(1, inplace = True)

	for col in ['isTrueDirect', 'bounces', 'newVisits']:
		df[col] = df[col].astype(int)

	return df 
Example #10
Source File: offset_ui_tool.py    From TGC-Designer-Tools with Apache License 2.0 6 votes vote down vote up
def drawNewLocation(ax, image_dict, result, image_scale, radio, sx, sy, event, ar):
    x_offset = 0.0
    y_offset = 0.0
    if sx is not None and sy is not None:
        x_offset = sx.val
        y_offset = sy.val

    vosm = np.copy(image_dict["Visible"])
    vosm = OSMTGC.addOSMToImage(result.ways, vosm, pc, image_scale, x_offset, y_offset)
    image_dict["Visible Golf"] = vosm

    hosm = np.copy(image_dict["Heightmap"]).astype('float32')
    hosm = np.clip(hosm, 0.0, 3.5*np.median( hosm[ hosm >= 0.0 ])) # Limit outlier pixels
    hosm = hosm / np.max(hosm)
    hosm = cv2.cvtColor(hosm, cv2.COLOR_GRAY2RGB)
    hosm = OSMTGC.addOSMToImage(result.ways, hosm, pc, image_scale, x_offset, y_offset)
    image_dict["Heightmap Golf"] = hosm

    # Always set to Visible Golf after drawing new golf features
    ax.imshow(image_dict["Visible Golf"], origin='lower')
    radio.set_active(1) 
Example #11
Source File: metrics.py    From vehicle_counting_tensorflow with MIT License 6 votes vote down vote up
def compute_median_rank_at_k(tp_fp_list, k):
  """Computes MedianRank@k, where k is the top-scoring labels.

  Args:
    tp_fp_list: a list of numpy arrays; each numpy array corresponds to the all
        detection on a single image, where the detections are sorted by score in
        descending order. Further, each numpy array element can have boolean or
        float values. True positive elements have either value >0.0 or True;
        any other value is considered false positive.
    k: number of top-scoring proposals to take.

  Returns:
    median_rank: median rank of all true positive proposals among top k by
      score.
  """
  ranks = []
  for i in range(len(tp_fp_list)):
    ranks.append(
        np.where(tp_fp_list[i][0:min(k, tp_fp_list[i].shape[0])] > 0)[0])
  concatenated_ranks = np.concatenate(ranks)
  return np.median(concatenated_ranks) 
Example #12
Source File: test_laplaceAlgorithms.py    From decompose with MIT License 6 votes vote down vote up
def test_laplace_sample():
    """Test whether the mean and the variance of the samples are correct."""
    mu = np.array([-1, 0., 1.])
    beta = np.array([0.5, 1., 2.])
    nSamples = 1000000

    nParameters = mu.shape[0]
    parameters = {"mu": tf.constant(mu),
                  "beta": tf.constant(beta)}
    tfNSamples = tf.constant(nSamples)
    r = LaplaceAlgorithms.sample(parameters=parameters,
                                 nSamples=tfNSamples)

    with tf.Session() as sess:
        r = sess.run(r)

    assert(r.shape == (nSamples, nParameters))
    muHat = np.median(r, axis=0)
    assert(np.allclose(muHat, mu, atol=1e-1))
    betaHat = np.sqrt(np.var(r, axis=0)/2.)
    assert(np.allclose(betaHat, beta, atol=1e-1)) 
Example #13
Source File: robust_fit.py    From yatsm with MIT License 6 votes vote down vote up
def mad(resid, c=0.6745):
    """
    Returns Median-Absolute-Deviation (MAD) for residuals

    Args:
        resid (np.ndarray): residuals
        c (float): scale factor to get to ~standard normal (default: 0.6745)
                 (i.e. 1 / 0.75iCDF ~= 1.4826 = 1 / 0.6745)

    Returns:
        float: MAD 'robust' variance estimate

    Reference:
        http://en.wikipedia.org/wiki/Median_absolute_deviation
    """
    # Return median absolute deviation adjusted sigma
    return np.median(np.fabs(resid - np.median(resid))) / c


# UTILITY FUNCTIONS
# np.any prevents nopython 
Example #14
Source File: kmeans.py    From keras-yolo3 with MIT License 6 votes vote down vote up
def kmeans(self, boxes, k, dist=np.median):
        box_number = boxes.shape[0]
        distances = np.empty((box_number, k))
        last_nearest = np.zeros((box_number,))
        np.random.seed()
        clusters = boxes[np.random.choice(
            box_number, k, replace=False)]  # init k clusters
        while True:

            distances = 1 - self.iou(boxes, clusters)

            current_nearest = np.argmin(distances, axis=1)
            if (last_nearest == current_nearest).all():
                break  # clusters won't change
            for cluster in range(k):
                clusters[cluster] = dist(  # update clusters
                    boxes[current_nearest == cluster], axis=0)

            last_nearest = current_nearest

        return clusters 
Example #15
Source File: timeDomain.py    From HRV with MIT License 6 votes vote down vote up
def timeDomain(NN):
    
    L = len(NN)    
    ANN = np.mean(NN)
    SDNN = np.std(NN)
    SDSD = np.std(np.diff(NN))    
    NN50 = len(np.where(np.diff(NN) > 0.05)[0])    
    pNN50 = NN50/L    
    NN20 = len(np.where(np.diff(NN) > 0.02)[0])
    pNN20 = NN20/L
    rMSSD = np.sqrt((1/L) * sum(np.diff(NN) ** 2))        
    MedianNN = np.median(NN)
    
    timeDomainFeats = {'ANN': ANN, 'SDNN': SDNN,
                       'SDSD': SDSD, 'NN50': NN50,
                       'pNN50': pNN50, 'NN20': NN20,
                       'pNN20': pNN20, 'rMSSD': rMSSD,
                       'MedianNN':MedianNN}
                       
    return timeDomainFeats 
Example #16
Source File: test_function_base.py    From recruit with Apache License 2.0 6 votes vote down vote up
def test_axis_keyword(self):
        a3 = np.array([[2, 3],
                       [0, 1],
                       [6, 7],
                       [4, 5]])
        for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]:
            orig = a.copy()
            np.median(a, axis=None)
            for ax in range(a.ndim):
                np.median(a, axis=ax)
            assert_array_equal(a, orig)

        assert_allclose(np.median(a3, axis=0), [3,  4])
        assert_allclose(np.median(a3.T, axis=1), [3,  4])
        assert_allclose(np.median(a3), 3.5)
        assert_allclose(np.median(a3, axis=None), 3.5)
        assert_allclose(np.median(a3.T), 3.5) 
Example #17
Source File: nav_env.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def _debug_save_hardness(self, seed):
    out_path = os.path.join(self.logdir, '{:s}_{:d}_hardness.png'.format(self.building_name, seed))
    batch_size = 4000
    rng = np.random.RandomState(0)
    start_node_ids, end_node_ids, dists, pred_maps, paths, hardnesss, gt_dists = \
      rng_next_goal_rejection_sampling(
          None, batch_size, self.task.gtG, rng, self.task_params.max_dist,
          self.task_params.min_dist, self.task_params.max_dist,
          self.task.sampling_distribution, self.task.target_distribution,
          self.task.nodes, self.task_params.n_ori, self.task_params.step_size,
          self.task.distribution_bins, self.task.rejection_sampling_M)
    bins = self.task.distribution_bins 
    n_bins = self.task.n_bins
    with plt.style.context('ggplot'):
      fig, axes = utils.subplot(plt, (1,2), (10,10))
      ax = axes[0]
      _ = ax.hist(hardnesss, bins=bins, weights=np.ones_like(hardnesss)/len(hardnesss))
      ax.plot(bins[:-1]+0.5/n_bins, self.task.target_distribution, 'g')
      ax.plot(bins[:-1]+0.5/n_bins, self.task.sampling_distribution, 'b')
      ax.grid('on')
      
      ax = axes[1]
      _ = ax.hist(gt_dists, bins=np.arange(self.task_params.max_dist+1))
      ax.grid('on')
      ax.set_title('Mean: {:0.2f}, Median: {:0.2f}'.format(np.mean(gt_dists),
                                                           np.median(gt_dists)))
      with fu.fopen(out_path, 'w') as f:
        fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) 
Example #18
Source File: bag_of_characters.py    From sato with Apache License 2.0 5 votes vote down vote up
def extract_bag_of_characters_features(data, n_val):
    
    characters_to_check = [ '['+  c + ']' for c in string.printable if c not in ( '\n', '\\', '\v', '\r', '\t', '^' )] + ['[\\\\]', '[\^]']
    
    f = OrderedDict()

    f['n_values'] = n_val
    data_no_null = data.dropna()
    all_value_features = OrderedDict()

    all_value_features['length'] = data_no_null.apply(len)

    for c in characters_to_check:
        all_value_features['n_{}'.format(c)] = data_no_null.str.count(c)
        
    for value_feature_name, value_features in all_value_features.items():
        f['{}-agg-any'.format(value_feature_name)] = any(value_features)
        f['{}-agg-all'.format(value_feature_name)] = all(value_features)
        f['{}-agg-mean'.format(value_feature_name)] = np.mean(value_features)
        f['{}-agg-var'.format(value_feature_name)] = np.var(value_features)
        f['{}-agg-min'.format(value_feature_name)] = np.min(value_features)
        f['{}-agg-max'.format(value_feature_name)] = np.max(value_features)
        f['{}-agg-median'.format(value_feature_name)] = np.median(value_features)
        f['{}-agg-sum'.format(value_feature_name)] = np.sum(value_features)
        f['{}-agg-kurtosis'.format(value_feature_name)] = kurtosis(value_features)
        f['{}-agg-skewness'.format(value_feature_name)] = skew(value_features)

    n_none = data.size - data_no_null.size - len([ e for e in data if e == ''])
    f['none-agg-has'] = n_none > 0
    f['none-agg-percent'] = n_none / len(data)
    f['none-agg-num'] = n_none
    f['none-agg-all'] = (n_none == len(data))
    #print(len(f))
    return f 
Example #19
Source File: tasks.py    From seizure-prediction with MIT License 5 votes vote down vote up
def make_csv_for_target_predictions(target, predictions):
    return ['%s_test_segment_%.4d.mat,%.10f' % (target, i+1, p) for i, p in enumerate(predictions)]


# Wrapper to return both mean and median-combined predictions in csv submission format 
Example #20
Source File: test_GaussianAnalytic.py    From differential-privacy-library with MIT License 5 votes vote down vote up
def test_zero_median_prob(self):
        self.mech.set_sensitivity(1).set_epsilon_delta(0.5, 0.1)
        vals = []

        for i in range(20000):
            vals.append(self.mech.randomise(0.5))

        median = float(np.median(vals))
        self.assertAlmostEqual(np.abs(median), 0.5, delta=0.1) 
Example #21
Source File: evaluate.py    From spleeter with MIT License 5 votes vote down vote up
def _compile_metrics(metrics_output_directory):
    """ Compiles metrics from given directory and returns
    results as dict.

    :param metrics_output_directory: Directory to get metrics from.
    :returns: Compiled metrics as dict.
    """
    songs = glob(join(metrics_output_directory, 'test/*.json'))
    index = pd.MultiIndex.from_tuples(
        product(_INSTRUMENTS, _METRICS),
        names=['instrument', 'metric'])
    pd.DataFrame([], index=['config1', 'config2'], columns=index)
    metrics = {
        instrument: {k: [] for k in _METRICS}
        for instrument in _INSTRUMENTS}
    for song in songs:
        with open(song, 'r') as stream:
            data = json.load(stream)
        for target in data['targets']:
            instrument = target['name']
            for metric in _METRICS:
                sdr_med = np.median([
                    frame['metrics'][metric]
                    for frame in target['frames']
                    if not np.isnan(frame['metrics'][metric])])
                metrics[instrument][metric].append(sdr_med)
    return metrics 
Example #22
Source File: tasks.py    From seizure-prediction with MIT License 5 votes vote down vote up
def train(classifier, training_data, quiet=False):
    X_train = training_data.X_train
    y_train = training_data.y_train
    if not quiet: print 'Training ...',
    start = time.get_seconds()
    classifier.fit(X_train, y_train)
    if not quiet: print '%ds' % (time.get_seconds() - start)


# Make predictions, and then combine the N predictions if using windows using mean and median.
# Returns (mean_predictions, median_predictions, raw_predictions) 
Example #23
Source File: document.py    From web-document-scanner with MIT License 5 votes vote down vote up
def auto_canny(self, image, sigma=0.33):
        v = np.median(image)
    
        lower = int(max(0, (1.0 - sigma) * v))
        upper = int(min(255, (1.0 + sigma) * v))
        edged = cv2.Canny(image, lower, upper)
    
        # return the edged image
        return edged

    # http://www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/ 
Example #24
Source File: ext_median_abs_dev.py    From feets with MIT License 5 votes vote down vote up
def fit(self, magnitude):
        median = np.median(magnitude)
        devs = abs(magnitude - median)
        return {"MedianAbsDev": np.median(devs)} 
Example #25
Source File: ext_percent_difference_flux_percentile.py    From feets with MIT License 5 votes vote down vote up
def fit(self, magnitude):
        median_data = np.median(magnitude)

        sorted_data = np.sort(magnitude)
        lc_length = len(sorted_data)
        F_5_index = int(math.ceil(0.05 * lc_length))
        F_95_index = int(math.ceil(0.95 * lc_length))
        F_5_95 = sorted_data[F_95_index] - sorted_data[F_5_index]

        percent_difference = F_5_95 / median_data

        return {"PercentDifferenceFluxPercentile": percent_difference} 
Example #26
Source File: ext_median_brp.py    From feets with MIT License 5 votes vote down vote up
def fit(self, magnitude):
        median = np.median(magnitude)
        amplitude = (np.max(magnitude) - np.min(magnitude)) / 10
        n = len(magnitude)

        count = np.sum(
            np.logical_and(
                magnitude < median + amplitude, magnitude > median - amplitude
            )
        )

        return {"MedianBRP": float(count) / n} 
Example #27
Source File: ext_percent_amplitude.py    From feets with MIT License 5 votes vote down vote up
def fit(self, magnitude):
        median_data = np.median(magnitude)
        distance_median = np.abs(magnitude - median_data)
        max_distance = np.max(distance_median)

        percent_amplitude = max_distance / median_data

        return {"PercentAmplitude": percent_amplitude} 
Example #28
Source File: ext_amplitude.py    From feets with MIT License 5 votes vote down vote up
def _median_min_max_5p(self, magnitude):
        N = len(magnitude)
        sorted_mag = np.sort(magnitude)

        max5p = np.median(sorted_mag[-int(math.ceil(0.05 * N)) :])
        min5p = np.median(sorted_mag[0 : int(math.ceil(0.05 * N))])

        return min5p, max5p 
Example #29
Source File: evaluate.py    From spleeter with MIT License 5 votes vote down vote up
def entrypoint(arguments, params):
    """ Command entrypoint.

    :param arguments: Command line parsed argument as argparse.Namespace.
    :param params: Deserialized JSON configuration file provided in CLI args.
    """
    # Parse and check musdb directory.
    musdb_root_directory = arguments.mus_dir
    if not exists(musdb_root_directory):
        raise IOError(f'musdb directory {musdb_root_directory} not found')
    # Separate musdb sources.
    audio_output_directory = _separate_evaluation_dataset(
        arguments,
        musdb_root_directory,
        params)
    # Compute metrics with musdb.
    metrics_output_directory = _compute_musdb_metrics(
        arguments,
        musdb_root_directory,
        audio_output_directory)
    # Compute and pretty print median metrics.
    metrics = _compile_metrics(metrics_output_directory)
    for instrument, metric in metrics.items():
        get_logger().info('%s:', instrument)
        for metric, value in metric.items():
            get_logger().info('%s: %s', metric, f'{np.median(value):.3f}')

    return metrics 
Example #30
Source File: nilearn.py    From NiBetaSeries with MIT License 5 votes vote down vote up
def is_outlier(points, thresh=3.5):
    """
    Returns a boolean array with True if points are outliers and False
    otherwise.

    modified from nipype:
    https://github.com/nipy/nipype/blob/b62d80/nipype/algorithms/confounds.py#L1129

    Parameters
    ----------
    points: nparray
        an numobservations by numdimensions numpy array of observations
    thresh: float
        the modified z-score to use as a threshold. Observations with
        a modified z-score (based on the median absolute deviation) greater
        than this value will be classified as outliers.

    Returns
    -------
        A bolean mask, of size numobservations-length array.

    .. note:: References
        Boris Iglewicz and David Hoaglin (1993), "Volume 16: How to Detect and
        Handle Outliers", The ASQC Basic References in Quality Control:
        Statistical Techniques, Edward F. Mykytka, Ph.D., Editor.
    """
    import numpy as np

    if len(points.shape) == 1:
        points = points[:, None]
    median = np.median(points, axis=0)
    diff = np.sum((points - median)**2, axis=-1)
    diff = np.sqrt(diff)
    med_abs_deviation = np.median(diff)

    modified_z_score = 0.6745 * diff / med_abs_deviation

    return modified_z_score > thresh