Python scipy.stats.mannwhitneyu() Examples

The following are 25 code examples of scipy.stats.mannwhitneyu(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module scipy.stats , or try the search function .
Example #1
Source File: monocyte_macrophage.py    From scanorama with MIT License 7 votes vote down vote up
def diff_expr(A, B, genes, permute_cutoff, verbose=True):

    p_vals = []
    for idx, gene in enumerate(genes):
        if sum(A[:, idx]) == 0 and sum(B[:, idx]) == 0:
            p_vals.append(1.)
            continue
        u, p = mannwhitneyu(A[:, idx], B[:, idx])
        p_vals.append(p)

    de_genes = []
    for idx, gene in enumerate(genes):
        if p_vals[idx] < permute_cutoff:
            if verbose:
                print('{}\t{}'.format(gene, p_vals[idx]))
            de_genes.append(gene)
    return de_genes 
Example #2
Source File: basenji_test_reps.py    From basenji with Apache License 2.0 6 votes vote down vote up
def stat_tests(ref_cors, exp_cors, alternative):
  _, mwp = mannwhitneyu(ref_cors, exp_cors, alternative=alternative)
  tt, tp = ttest_ind(ref_cors, exp_cors)

  if alternative == 'less':
    if tt > 0:
      tp = 1 - (1-tp)/2
    else:
      tp /= 2
  elif alternative == 'greater':
    if tt <= 0:
      tp /= 2
    else:
      tp = 1 - (1-tp)/2

  return mwp, tp

################################################################################
# __main__
################################################################################ 
Example #3
Source File: compare_genomes.py    From mCaller with MIT License 6 votes vote down vote up
def compare_by_position(bed1,bed2,xmfa):
    pos_dict = {}

    for i,bed in enumerate([bed1,bed2]):
        pos_dict[i] = {}
        with open(bed,'r') as fi:
                for line in fi:
                #2  1892198 1892199 TCMMTMTTMMM 0.5 -   16
                    csome,start,end,motif,perc_meth,strand,num_reads,probabilities = tuple(line.split('\t'))
                    pos_dict[i][(csome,start,end,strand)] = ((perc_meth,num_reads),np.asarray([float(p) for p in probabilities.strip().split(',')]))

    for pos in pos_dict[0]:
        if pos in pos_dict[1]:
            try:
                u,pval = mannwhitneyu(pos_dict[0][pos][1],pos_dict[0][pos][1],alternative='two-sided')
            except ValueError:
                u,pval = 'none','identical'
            u2,pval2 = ranksums(pos_dict[0][pos][1],pos_dict[0][pos][1])
            try:
                t,pval3 = ttest_ind(pos_dict[0][pos][1],pos_dict[0][pos][1])
            except:
                t,pval3 = 'none','missing df'
            d,pval4 = ks_2samp(pos_dict[0][pos][1],pos_dict[0][pos][1])
            if pval4 < 0.9:
                print pos, pos_dict[0][pos][0], pos_dict[1][pos][0], pval, pval2, pval3, pval4 
Example #4
Source File: fMRI_conn_vs_memory_score.py    From mmvt with GNU General Public License v3.0 6 votes vote down vote up
def run_sandya_code():
    import scipy as spy
    san_path = '/home/npeled/Documents/memory_task/code/'
    ROI_values_dyn = np.load(op.join(san_path, 'ROI_values_dyn102.npy'))
    subject_groups = np.load(op.join(san_path, 'subject_groups.npy'))

    # disturbed = 1, preserved = 0
    disturbed_inds = np.array(np.where(subject_groups == 1))
    preserved_inds = np.array(np.where(subject_groups == 0))

    # left is 0, right is 1
    laterality = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]

    # Separate by contralateral and ipsilateral - ipsi is left, contra is right
    # Mann-Whitney tests between groups - p<0.0125 - bonferroni correction for 4 ROIs
    # spy.mannwhitneyu(x,y)
    ROI_values_dyn_disturbed = np.squeeze(ROI_values_dyn[:, disturbed_inds])
    ROI_values_dyn_preserved = np.squeeze(ROI_values_dyn[:, preserved_inds])
    mann_whitney_results_dyn = np.zeros((ROI_values_dyn.shape[0], 2), 'float')

    for i in range(ROI_values_dyn.shape[0]):
        mann_whitney_results_dyn[i, :] = spy.stats.mannwhitneyu(ROI_values_dyn_disturbed[i, :],
                                                                ROI_values_dyn_preserved[i, :]).pvalue

    print(mann_whitney_results_dyn) 
Example #5
Source File: test_stats.py    From scprep with GNU General Public License v3.0 6 votes vote down vote up
def test_u_statistic():
    X = data.generate_positive_sparse_matrix(shape=(500, 3), seed=42, poisson_mean=0.2)
    Y = data.generate_positive_sparse_matrix(shape=(500, 3), seed=42, poisson_mean=0.3)
    u_stat = [
        stats.mannwhitneyu(X[:, i], Y[:, i], alternative="two-sided")[0]
        for i in range(X.shape[1])
    ]

    def test_fun(X):
        return scprep.stats.rank_sum_statistic(
            scprep.select.select_rows(X, idx=np.arange(500)),
            scprep.select.select_rows(X, idx=np.arange(500, 1000)),
        )

    matrix.test_all_matrix_types(
        np.vstack([X, Y]),
        utils.assert_transform_equals,
        Y=u_stat,
        transform=test_fun,
        check=utils.assert_all_close,
    ) 
Example #6
Source File: test_stats.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_mannwhitneyu_no_correct_one_sided(self):
        u1, p1 = stats.mannwhitneyu(self.X, self.Y, False,
                                    alternative='less')
        u2, p2 = stats.mannwhitneyu(self.Y, self.X, False,
                                    alternative='greater')
        u3, p3 = stats.mannwhitneyu(self.X, self.Y, False,
                                    alternative='greater')
        u4, p4 = stats.mannwhitneyu(self.Y, self.X, False,
                                    alternative='less')

        assert_equal(p1, p2)
        assert_equal(p3, p4)
        assert_(p1 != p3)
        assert_equal(u1, 498)
        assert_equal(u2, 102)
        assert_equal(u3, 498)
        assert_equal(u4, 102)
        assert_approx_equal(p1, 0.999955905990004, significant=self.significant)
        assert_approx_equal(p3, 4.40940099958089e-05, significant=self.significant) 
Example #7
Source File: test_stats.py    From GraphicDesignPatternByPython with MIT License 6 votes vote down vote up
def test_mannwhitneyu_default(self):
        # The default value for alternative is None
        with suppress_warnings() as sup:
            sup.filter(DeprecationWarning,
                       "Calling `mannwhitneyu` without .*`alternative`")
            u1, p1 = stats.mannwhitneyu(self.X, self.Y)
            u2, p2 = stats.mannwhitneyu(self.Y, self.X)
            u3, p3 = stats.mannwhitneyu(self.X, self.Y, alternative=None)

        assert_equal(p1, p2)
        assert_equal(p1, p3)
        assert_equal(u1, 102)
        assert_equal(u2, 102)
        assert_equal(u3, 102)
        assert_approx_equal(p1, 4.5941632666275e-05,
                            significant=self.significant) 
Example #8
Source File: test_stats.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_mannwhitneyu_one_sided(self):
        u1, p1 = stats.mannwhitneyu(self.X, self.Y, alternative='less')
        u2, p2 = stats.mannwhitneyu(self.Y, self.X, alternative='greater')
        u3, p3 = stats.mannwhitneyu(self.X, self.Y, alternative='greater')
        u4, p4 = stats.mannwhitneyu(self.Y, self.X, alternative='less')

        assert_equal(p1, p2)
        assert_equal(p3, p4)
        assert_(p1 != p3)
        assert_equal(u1, 498)
        assert_equal(u2, 102)
        assert_equal(u3, 498)
        assert_equal(u4, 102)
        assert_approx_equal(p1, 0.999957683256589, significant=self.significant)
        assert_approx_equal(p3, 4.5941632666275e-05, significant=self.significant) 
Example #9
Source File: ROCalyzer.py    From biskit with GNU General Public License v3.0 5 votes vote down vote up
def pfisher( pvalues ):
    """
    Combine independent P-values into one according to

    Fisher, R. A. (1948) Combining independent tests of significance.
    American Statistician, vol. 2, issue 5, page 30.

    ('Fisher method' or 'inverse ChiSquare method') See also book:
    Walter W. Piegorsch, A. John Bailer: Analyzing Environmental Data.
    Wiley 2005

    @param pvalues: list of independent P-values
    @type  pvalues: [ float ]
    @return: P-value 
    @rtype: float
    """
    ## stats.mannwhitneyu minimal P ~ stats.zprob( 8.2 );
    ## all below becomes 0. which is not handled by the fisher test 
    clipped = N.clip( pvalues, 1.0e-16, 1.0 )

    x2 = -2 * N.sum( N.log( clipped ) )

    return stats.chisqprob( x2, 2*len(pvalues) )


#############
##  TESTING        
############# 
Example #10
Source File: ROCalyzer.py    From biskit with GNU General Public License v3.0 5 votes vote down vote up
def utest( self, score ):
        """
        Gives the Mann-Withney U test probability that the score is
        random.  See:

        Mason & Graham (2002) Areas beneath the relative operating
        characteristics (ROC) and relative operating levels (ROL)
        curves: Statistical significance and interpretation

        Note (1): P-values below ~1e-16 are reported as 0.0.
        See zprob() in Biskit.Statistics.stats!

        Note (2): the P-value does not distinguish between positive
        and negative deviations from random -- a ROC area of 0.1 will
        get the same P-value as a ROC area of 0.9.

        @param score: the score predicted for each item
        @type  score: [ float ]

        @return: 1-tailed P-value
        @rtype: float
        """
        sample1 = N.compress( self.positives, score )
        sample1 = sample1[-1::-1]  # invert order

        sample2 = N.compress( N.logical_not( self.positives ), score )
        sample2 = sample2[-1::-1]  # invert order

        sample1 = sample1.tolist()
        sample2 = sample2.tolist()

        p = stats.mannwhitneyu( sample1, sample2 )
        return p[1] 
Example #11
Source File: DataSeparate.py    From FAE with GNU General Public License v3.0 5 votes vote down vote up
def _CompareNumetricFeatures(self, array1, array2):
        description = {}
        _, description['p-value'] = mannwhitneyu(array1, array2)
        description['method'] = 'Mann-Whitney'
        description['description'] = ['{:.2f}±{:.2f}'.format(np.mean(array1), np.std(array1)),
                                      '{:.2f}±{:.2f}'.format(np.mean(array2), np.std(array2))]
        return description 
Example #12
Source File: fMRI_conn_vs_memory_score.py    From mmvt with GNU General Public License v3.0 5 votes vote down vote up
def run_stat(res, disturbed_inds, preserved_inds):
    mann_whitney_results = {pc:None for pc in res.keys()}
    for pc, dFCs in res.items():
        subjects_num, labels_num = dFCs.shape
        for label_ind in range(labels_num):
            test_res = mannwhitneyu(dFCs[disturbed_inds, label_ind], dFCs[preserved_inds, label_ind])
            if mann_whitney_results[pc] is None:
                mann_whitney_results[pc] = np.zeros(labels_num)
            mann_whitney_results[pc][label_ind] = test_res.pvalue
    return mann_whitney_results 
Example #13
Source File: test_single_external_libs.py    From diffxpy with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_rank_ref(self, n_cells: int = 2000, n_genes: int = 100):
        """
        Test if de.test.rank_test() generates the same p-value distribution as scipy t-test.

        :param n_cells: Number of cells to simulate (number of observations per test).
        :param n_genes: Number of genes to simulate (number of tests).
        """
        logging.getLogger("tensorflow").setLevel(logging.ERROR)
        logging.getLogger("batchglm").setLevel(logging.WARNING)
        logging.getLogger("diffxpy").setLevel(logging.INFO)

        np.random.seed(1)
        sim = self._prepare_data(n_cells=n_cells, n_genes=n_genes)
        test = de.test.rank_test(
            data=sim.input_data,
            grouping="condition",
            sample_description=sim.sample_description
        )

        # Run scipy t-tests as a reference.
        conds = np.unique(sim.sample_description["condition"].values)
        ind_a = np.where(sim.sample_description["condition"] == conds[0])[0]
        ind_b = np.where(sim.sample_description["condition"] == conds[1])[0]
        scipy_pvals = np.array([
            stats.mannwhitneyu(
                x=sim.x[ind_a, i],
                y=sim.x[ind_b, i],
                use_continuity=True,
                alternative="two-sided"
            ).pvalue
            for i in range(sim.x.shape[1])
        ])
        self._eval(test=test, ref_pvals=scipy_pvals)
        return True 
Example #14
Source File: viz_utils.py    From cpae with MIT License 5 votes vote down vote up
def str_mann_whitney(data1, data2):
    s = "# datapoints {}, {} ; ".format(len(data1), len(data2))
    U, p = mannwhitneyu(data1, data2, alternative='two-sided')
    s += "U = {:0.2f} ; p = {:0.2E}".format(U, p)
    return s 
Example #15
Source File: atlas3.py    From ssbio with MIT License 5 votes vote down vote up
def compute_skew_stats(intra, inter):
    """Returns two dictionaries reporting (skew, skew_pval) for all groups"""
    # Intra (within a group) stats
    intra_skew = {}
    for k, v in intra.items():
        skew = st.skew(v)
        try:
            skew_zstat, skew_pval = st.skewtest(v)
        except ValueError:  # if sample size too small
            skew_zstat, skew_pval = (0, 1)
        intra_skew[k] = (skew, skew_zstat, skew_pval)

    # Inter (between groups) stats
    inter_skew = {}
    for k, v in inter.items():
        # Inter skew stats
        skew_sep = st.skew(v.flatten())
        try:
            skew_sep_zstat, skew_sep_pval = st.skewtest(v.flatten())
        except ValueError:
            skew_sep_zstat, skew_sep_pval = (0, 1)
        inter_skew['-'.join(k)] = (skew_sep, skew_sep_zstat, skew_sep_pval)

        # Significance of difference between intra and inter distributions
        for intra_key in k:
            try:
                separation_zstat, separation_pval = mannwhitneyu(intra[intra_key],
                                                                 v.flatten(),
                                                                 alternative='less')
            except ValueError:  # All numbers are identical in mannwhitneyu
                separation_zstat, separation_pval = (0, 1)
            inter_skew['{}<{}'.format(intra_key, '-'.join(k))] = (separation_zstat, separation_pval)

    return intra_skew, inter_skew 
Example #16
Source File: ptest.py    From pregel with MIT License 5 votes vote down vote up
def _compare_distributions(dist1, dist2):
    '''Method to compare samples from two general distributions to determine if they are likely to be drawn from the
     same distribution.
    Return true if the two list of samples are likely to be drawn from the same distribution'''

    _, pvalue = mannwhitneyu(dist1, dist2)

    if (pvalue > 0.05):
        # Likely to be drawn from same distribution
        return True

    else:
        return False 
Example #17
Source File: columnar_tests.py    From drifter_ml with MIT License 5 votes vote down vote up
def mann_whitney_u_similar_distribution(self, column,
                                            pvalue_threshold=0.05,
                                            num_rounds=3):
        p_value = permutation_test(
            self.new_data[column],
            self.historical_data[column],
            method="approximate",
            num_rounds=num_rounds,
            func=lambda x, y: stats.mannwhitneyu(x, y).statistic,
            seed=0)

        if p_value < pvalue_threshold:
            return False
        return True 
Example #18
Source File: test_stats.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_mannwhitneyu_result_attributes(self):
        # test for namedtuple attribute results
        attributes = ('statistic', 'pvalue')
        res = stats.mannwhitneyu(self.X, self.Y, alternative="less")
        check_named_results(res, attributes) 
Example #19
Source File: test_stats.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_mannwhitneyu_ones(self):
        x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
                      1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
                      1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2.,
                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
                      1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2.,
                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1.,
                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
                      1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
                      1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
                      1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1.,
                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
                      1., 1., 1., 1., 1., 1.])

        y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,
                      2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3.,
                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1.,
                      1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1.,
                      1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2.,
                      2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2.,
                      2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
                      1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1.,
                      1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
                      2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
                      1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1.,
                      1., 1., 1., 1.])

        # p-value verified with matlab and R to 5 significant digits
        assert_array_almost_equal(stats.stats.mannwhitneyu(x, y,
                                                           alternative='less'),
                                  (16980.5, 2.8214327656317373e-005),
                                  decimal=12) 
Example #20
Source File: test_stats.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_mannwhitneyu_no_correct_two_sided(self):
        u1, p1 = stats.mannwhitneyu(self.X, self.Y, False,
                                    alternative='two-sided')
        u2, p2 = stats.mannwhitneyu(self.Y, self.X, False,
                                    alternative='two-sided')

        assert_equal(p1, p2)
        assert_equal(u1, 498)
        assert_equal(u2, 102)
        assert_approx_equal(p1, 8.81880199916178e-05,
                            significant=self.significant) 
Example #21
Source File: test_stats.py    From GraphicDesignPatternByPython with MIT License 5 votes vote down vote up
def test_mannwhitneyu_two_sided(self):
        u1, p1 = stats.mannwhitneyu(self.X, self.Y, alternative='two-sided')
        u2, p2 = stats.mannwhitneyu(self.Y, self.X, alternative='two-sided')

        assert_equal(p1, p2)
        assert_equal(u1, 498)
        assert_equal(u2, 102)
        assert_approx_equal(p1, 9.188326533255e-05,
                            significant=self.significant) 
Example #22
Source File: utils.py    From dl-eeg-review with MIT License 4 votes vote down vote up
def run_mannwhitneyu(df, condition_col, conditions, value_col='acc_diff',
                     min_n_obs=10, plot=False):
    """Run Mann-Whitney rank-sum test.

    Args:
        df (pd.DataFrame): dataframe where each row is a paper.
        condition_col (str): name of column to use as condition.
        conditions (list): list of two strings containing the values of the
            condition to compare.

    Keyword Args:
        value_col (str): name of column to use as the numerical value to run the
            test on.
        min_n_obs (int): minimum number of observations in each sample in order
            to run the test.

    Returns:
        (float): U statistic
        (float): p-value
    """
    assert len(conditions) == 2, '`conditions` must be of length 2, got {}'.format(
        len(conditions))
    data1 = df[df[condition_col] == conditions[0]][value_col]
    data2 = df[df[condition_col] == conditions[1]][value_col]

    if len(data1) >= min_n_obs and len(data2) >= min_n_obs:
        stat, p = mannwhitneyu(data1, data2)
    else:
        stat, p = np.nan, np.nan
        print('Not enough observations in each sample ({} and {}).'.format(
            len(data1), len(data2)))

    if plot:
        fig, ax = plt.subplots()
        sns.violinplot(
            data=df[df[condition_col].isin(conditions)], x=condition_col, 
            y=value_col, ax=ax)
        ax.set_title('Mann-Whitney for {} vs. {}\n(pvalue={:0.4f})'.format(
            condition_col, value_col, p))
    else:
        fig = None

    return {'test': 'mannwhitneyu', 'pvalue': p, 'stat': stat, 'fig': fig} 
Example #23
Source File: effsize.py    From DABEST-python with BSD 3-Clause Clear License 4 votes vote down vote up
def cliffs_delta(control, test):
    """
    Computes Cliff's delta for 2 samples.
    See https://en.wikipedia.org/wiki/Effect_size#Effect_size_for_ordinal_data

    Keywords
    --------
    control, test: numeric iterables.
        These can be lists, tuples, or arrays of numeric types.

    Returns
    -------
        A single numeric float.
    """
    import numpy as np
    from scipy.stats import mannwhitneyu

    # Convert to numpy arrays for speed.
    # NaNs are automatically dropped.
    if control.__class__ != np.ndarray:
        control = np.array(control)
    if test.__class__ != np.ndarray:
        test    = np.array(test)

    c = control[~np.isnan(control)]
    t = test[~np.isnan(test)]

    control_n = len(c)
    test_n = len(t)

    # Note the order of the control and test arrays.
    U, _ = mannwhitneyu(t, c, alternative='two-sided')
    cliffs_delta = ((2 * U) / (control_n * test_n)) - 1

    # more = 0
    # less = 0
    #
    # for i, c in enumerate(control):
    #     for j, t in enumerate(test):
    #         if t > c:
    #             more += 1
    #         elif t < c:
    #             less += 1
    #
    # cliffs_delta = (more - less) / (control_n * test_n)

    return cliffs_delta 
Example #24
Source File: numerical_comparison.py    From DIVE-backend with GNU General Public License v3.0 4 votes vote down vote up
def get_valid_tests(equal_var, independent, normal, num_samples):
    '''
    Get valid tests given number of samples and statistical characterization of
    samples:

    Equal variance
    Indepenence
    Normality
    '''
    if num_samples == 1:
        valid_tests = {
            'chisquare': stats.chisquare,
            'power_divergence': stats.power_divergence,
            'kstest': stats.kstest
        }
        if normal:
            valid_tests['input']['one_sample_ttest'] = stats.ttest_1samp

    elif num_samples == 2:
        if independent:
            valid_tests = {
                'mannwhitneyu': stats.mannwhitneyu,
                'kruskal': stats.kruskal,
                'ks_2samp': stats.ks_2samp
            }
            if normal:
                valid_tests['two_sample_ttest'] = stats.ttest_ind
                if equal_var:
                    valid_tests['f_oneway'] = stats.f_oneway
        else:
            valid_tests = {
                'two_sample_ks': stats.ks_2samp,
                'wilcoxon': stats.wilcoxon
            }
            if normal:
                valid_tests['two_sample_related_ttest'] = stats.ttest_rel

    elif num_samples >= 3:
        if independent:
            valid_tests = {
                'kruskal': stats.kruskal
            }
            if normal and equal_var:
                valid_tests['f_oneway'] = stats.f_oneway

        else:
            valid_tests['friedmanchisquare'] = stats.friedmanchisquare

    return valid_tests 
Example #25
Source File: MannWhitneyU.py    From scattertext with Apache License 2.0 4 votes vote down vote up
def get_score_df(self, correction_method=None):
        '''
        Computes Mann Whitney corrected p, z-values.  Falls back to normal approximation when numerical limits are reached.

        :param correction_method: str or None, correction method from statsmodels.stats.multitest.multipletests
         'fdr_bh' is recommended.
        :return: pd.DataFrame
        '''
        X = self._get_X().astype(np.float64)
        X = X / X.sum(axis=1)
        cat_X, ncat_X = self._get_cat_and_ncat(X)

        def normal_apx(u, x, y):
            # from https://stats.stackexchange.com/questions/116315/problem-with-mann-whitney-u-test-in-scipy
            m_u = len(x) * len(y) / 2
            sigma_u = np.sqrt(len(x) * len(y) * (len(x) + len(y) + 1) / 12)
            z = (u - m_u) / sigma_u
            return 2*norm.cdf(z)
        scores = []
        for i in range(cat_X.shape[1]):
            cat_list = cat_X.T[i].A1
            ncat_list = ncat_X.T[i].A1
            try:
                if cat_list.mean() > ncat_list.mean():
                    mw = mannwhitneyu(cat_list, ncat_list, alternative='greater')
                    if mw.pvalue in (0, 1):
                        mw.pvalue = normal_apx(mw.staistic, cat_list, ncat_list)

                    scores.append({'mwu': mw.statistic, 'mwu_p': mw.pvalue, 'mwu_z': norm.isf(float(mw.pvalue)), 'valid':True})

                else:
                    mw = mannwhitneyu(ncat_list, cat_list, alternative='greater')
                    if mw.pvalue in (0, 1):
                        mw.pvalue = normal_apx(mw.staistic, ncat_list, cat_list)

                    scores.append({'mwu': -mw.statistic, 'mwu_p': 1 - mw.pvalue, 'mwu_z': 1. - norm.isf(float(mw.pvalue)), 'valid':True})
            except:
                scores.append({'mwu': 0, 'mwu_p': 0, 'mwu_z': 0, 'valid':False})

        score_df = pd.DataFrame(scores, index=self.corpus_.get_terms()).fillna(0)
        if correction_method is not None:
            from statsmodels.stats.multitest import multipletests
            for method in ['mwu']:
                valid_pvals = score_df[score_df.valid].mwu_p
                valid_pvals_abs = np.min([valid_pvals, 1-valid_pvals], axis=0)
                valid_pvals_abs_corr = multipletests(valid_pvals_abs, method=correction_method)[1]
                score_df[method + '_p_corr'] = 0.5
                valid_pvals_abs_corr[valid_pvals > 0.5] = 1. - valid_pvals_abs_corr[valid_pvals > 0.5]
                valid_pvals_abs_corr[valid_pvals < 0.5] = valid_pvals_abs_corr[valid_pvals < 0.5]
                score_df.loc[score_df.valid, method + '_p_corr'] = valid_pvals_abs_corr
                score_df[method + '_z'] = -norm.ppf(score_df[method + '_p_corr'])
        return score_df