Python autograd.numpy.min() Examples

The following are 20 code examples of autograd.numpy.min(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module autograd.numpy , or try the search function .
Example #1
Source File: util.py    From kernel-gof with MIT License 6 votes vote down vote up
def bound_by_data(Z, Data):
    """
    Determine lower and upper bound for each dimension from the Data, and project 
    Z so that all points in Z live in the bounds.

    Z: m x d 
    Data: n x d

    Return a projected Z of size m x d.
    """
    n, d = Z.shape
    Low = np.min(Data, 0)
    Up = np.max(Data, 0)
    LowMat = np.repeat(Low[np.newaxis, :], n, axis=0)
    UpMat = np.repeat(Up[np.newaxis, :], n, axis=0)

    Z = np.maximum(LowMat, Z)
    Z = np.minimum(UpMat, Z)
    return Z 
Example #2
Source File: geometry.py    From AeroSandbox with MIT License 6 votes vote down vote up
def get_thickness_at_chord_fraction_legacy(self, chord_fraction):
        # Returns the (interpolated) camber at a given location(s). The location is specified by the chord fraction, as measured from the leading edge. Thickness is nondimensionalized by chord (i.e. this function returns t/c at a given x/c).
        chord = np.max(self.coordinates[:, 0]) - np.min(
            self.coordinates[:, 0])  # This should always be 1, but this is just coded for robustness.

        x = chord_fraction * chord + min(self.coordinates[:, 0])

        upperCoors = self.upper_coordinates()
        lowerCoors = self.lower_coordinates()

        y_upper_func = sp_interp.interp1d(x=upperCoors[:, 0], y=upperCoors[:, 1], copy=False, fill_value='extrapolate')
        y_lower_func = sp_interp.interp1d(x=lowerCoors[:, 0], y=lowerCoors[:, 1], copy=False, fill_value='extrapolate')

        y_upper = y_upper_func(x)
        y_lower = y_lower_func(x)

        thickness = np.maximum(y_upper - y_lower, 0)

        return thickness 
Example #3
Source File: geometry.py    From AeroSandbox with MIT License 6 votes vote down vote up
def get_camber_at_chord_fraction_legacy(self, chord_fraction):
        # Returns the (interpolated) camber at a given location(s). The location is specified by the chord fraction, as measured from the leading edge. Camber is nondimensionalized by chord (i.e. this function returns camber/c at a given x/c).
        chord = np.max(self.coordinates[:, 0]) - np.min(
            self.coordinates[:, 0])  # This should always be 1, but this is just coded for robustness.

        x = chord_fraction * chord + min(self.coordinates[:, 0])

        upperCoors = self.upper_coordinates()
        lowerCoors = self.lower_coordinates()

        y_upper_func = sp_interp.interp1d(x=upperCoors[:, 0], y=upperCoors[:, 1], copy=False, fill_value='extrapolate')
        y_lower_func = sp_interp.interp1d(x=lowerCoors[:, 0], y=lowerCoors[:, 1], copy=False, fill_value='extrapolate')

        y_upper = y_upper_func(x)
        y_lower = y_lower_func(x)

        camber = (y_upper + y_lower) / 2

        return camber 
Example #4
Source File: data.py    From autograd with MIT License 6 votes vote down vote up
def plot_images(images, ax, ims_per_row=5, padding=5, digit_dimensions=(28, 28),
                cmap=matplotlib.cm.binary, vmin=None, vmax=None):
    """Images should be a (N_images x pixels) matrix."""
    N_images = images.shape[0]
    N_rows = (N_images - 1) // ims_per_row + 1
    pad_value = np.min(images.ravel())
    concat_images = np.full(((digit_dimensions[0] + padding) * N_rows + padding,
                             (digit_dimensions[1] + padding) * ims_per_row + padding), pad_value)
    for i in range(N_images):
        cur_image = np.reshape(images[i, :], digit_dimensions)
        row_ix = i // ims_per_row
        col_ix = i % ims_per_row
        row_start = padding + (padding + digit_dimensions[0]) * row_ix
        col_start = padding + (padding + digit_dimensions[1]) * col_ix
        concat_images[row_start: row_start + digit_dimensions[0],
                      col_start: col_start + digit_dimensions[1]] = cur_image
    cax = ax.matshow(concat_images, cmap=cmap, vmin=vmin, vmax=vmax)
    plt.xticks(np.array([]))
    plt.yticks(np.array([]))
    return cax 
Example #5
Source File: test_systematic.py    From autograd with MIT License 5 votes vote down vote up
def test_min():  stat_check(np.min) 
Example #6
Source File: goftest.py    From kernel-gof with MIT License 5 votes vote down vote up
def power_criterion(p, dat, b, c, test_locs, reg=1e-2):
        k = kernel.KIMQ(b=b, c=c)
        return FSSD.power_criterion(p, dat, k, test_locs, reg)

    #@staticmethod
    #def optimize_auto_init(p, dat, J, **ops):
    #    """
    #    Optimize parameters by calling optimize_locs_widths(). Automatically 
    #    initialize the test locations and the Gaussian width.

    #    Return optimized locations, Gaussian width, optimization info
    #    """
    #    assert J>0
    #    # Use grid search to initialize the gwidth
    #    X = dat.data()
    #    n_gwidth_cand = 5
    #    gwidth_factors = 2.0**np.linspace(-3, 3, n_gwidth_cand) 
    #    med2 = util.meddistance(X, 1000)**2

    #    k = kernel.KGauss(med2*2)
    #    # fit a Gaussian to the data and draw to initialize V0
    #    V0 = util.fit_gaussian_draw(X, J, seed=829, reg=1e-6)
    #    list_gwidth = np.hstack( ( (med2)*gwidth_factors ) )
    #    besti, objs = GaussFSSD.grid_search_gwidth(p, dat, V0, list_gwidth)
    #    gwidth = list_gwidth[besti]
    #    assert util.is_real_num(gwidth), 'gwidth not real. Was %s'%str(gwidth)
    #    assert gwidth > 0, 'gwidth not positive. Was %.3g'%gwidth
    #    logging.info('After grid search, gwidth=%.3g'%gwidth)

        
    #    V_opt, gwidth_opt, info = GaussFSSD.optimize_locs_widths(p, dat,
    #            gwidth, V0, **ops) 

    #    # set the width bounds
    #    #fac_min = 5e-2
    #    #fac_max = 5e3
    #    #gwidth_lb = fac_min*med2
    #    #gwidth_ub = fac_max*med2
    #    #gwidth_opt = max(gwidth_lb, min(gwidth_opt, gwidth_ub))
    #    return V_opt, gwidth_opt, info 
Example #7
Source File: goftest.py    From kernel-gof with MIT License 5 votes vote down vote up
def simulate_null_dist(eigs, J, n_simulate=2000, seed=7):
        """
        Simulate the null distribution using the spectrums of the covariance 
        matrix of the U-statistic. The simulated statistic is n*FSSD^2 where
        FSSD is an unbiased estimator.

        - eigs: a numpy array of estimated eigenvalues of the covariance
          matrix. eigs is of length d*J, where d is the input dimension, and 
        - J: the number of test locations.

        Return a numpy array of simulated statistics.
        """
        d = old_div(len(eigs),J)
        assert d>0
        # draw at most d x J x block_size values at a time
        block_size = max(20, int(old_div(1000.0,(d*J))))
        fssds = np.zeros(n_simulate)
        from_ind = 0
        with util.NumpySeedContext(seed=seed):
            while from_ind < n_simulate:
                to_draw = min(block_size, n_simulate-from_ind)
                # draw chi^2 random variables. 
                chi2 = np.random.randn(d*J, to_draw)**2

                # an array of length to_draw 
                sim_fssds = eigs.dot(chi2-1.0)
                # store 
                end_ind = from_ind+to_draw
                fssds[from_ind:end_ind] = sim_fssds
                from_ind = end_ind
        return fssds 
Example #8
Source File: plot.py    From kernel-gof with MIT License 5 votes vote down vote up
def plot_runtime(ex, fname, func_xvalues, xlabel, func_title=None):
    results = glo.ex_load_result(ex, fname)
    value_accessor = lambda job_results: job_results['time_secs']
    vf_pval = np.vectorize(value_accessor)
    # results['job_results'] is a dictionary: 
    # {'test_result': (dict from running perform_test(te) '...':..., }
    times = vf_pval(results['job_results'])
    repeats, _, n_methods = results['job_results'].shape
    time_avg = np.mean(times, axis=0)
    time_std = np.std(times, axis=0)

    xvalues = func_xvalues(results)

    #ns = np.array(results[xkey])
    #te_proportion = 1.0 - results['tr_proportion']
    #test_sizes = ns*te_proportion
    line_styles = func_plot_fmt_map()
    method_labels = get_func2label_map()
    
    func_names = [f.__name__ for f in results['method_job_funcs'] ]
    for i in range(n_methods):    
        te_proportion = 1.0 - results['tr_proportion']
        fmt = line_styles[func_names[i]]
        #plt.errorbar(ns*te_proportion, mean_rejs[:, i], std_pvals[:, i])
        method_label = method_labels[func_names[i]]
        plt.errorbar(xvalues, time_avg[:, i], yerr=time_std[:,i], fmt=fmt,
                label=method_label)
            
    ylabel = 'Time (s)'
    plt.ylabel(ylabel)
    plt.xlabel(xlabel)
    plt.xlim([np.min(xvalues), np.max(xvalues)])
    plt.xticks( xvalues, xvalues )
    plt.legend(loc='best')
    plt.gca().set_yscale('log')
    title = '%s. %d trials. '%( results['prob_label'],
            repeats ) if func_title is None else func_title(results)
    plt.title(title)
    #plt.grid()
    return results 
Example #9
Source File: util.py    From kernel-gof with MIT License 5 votes vote down vote up
def meddistance(X, subsample=None, mean_on_fail=True):
    """
    Compute the median of pairwise distances (not distance squared) of points
    in the matrix.  Useful as a heuristic for setting Gaussian kernel's width.

    Parameters
    ----------
    X : n x d numpy array
    mean_on_fail: True/False. If True, use the mean when the median distance is 0.
        This can happen especially, when the data are discrete e.g., 0/1, and 
        there are more slightly more 0 than 1. In this case, the m

    Return
    ------
    median distance
    """
    if subsample is None:
        D = dist_matrix(X, X)
        Itri = np.tril_indices(D.shape[0], -1)
        Tri = D[Itri]
        med = np.median(Tri)
        if med <= 0:
            # use the mean
            return np.mean(Tri)
        return med

    else:
        assert subsample > 0
        rand_state = np.random.get_state()
        np.random.seed(9827)
        n = X.shape[0]
        ind = np.random.choice(n, min(subsample, n), replace=False)
        np.random.set_state(rand_state)
        # recursion just one
        return meddistance(X[ind, :], None, mean_on_fail) 
Example #10
Source File: util.py    From kernel-gof with MIT License 5 votes vote down vote up
def constrain(val, min_val, max_val):
    return min(max_val, max(min_val, val)) 
Example #11
Source File: test_numpy.py    From autograd with MIT License 5 votes vote down vote up
def test_min_axis_keepdims():
    def fun(x): return np.min(x, axis=1, keepdims=True)
    mat = npr.randn(3, 4, 5)
    check_grads(fun)(mat) 
Example #12
Source File: test_numpy.py    From autograd with MIT License 5 votes vote down vote up
def test_min_axis():
    def fun(x): return np.min(x, axis=1)
    mat = npr.randn(3, 4, 5)
    check_grads(fun)(mat) 
Example #13
Source File: test_numpy.py    From autograd with MIT License 5 votes vote down vote up
def test_min():
    def fun(x): return np.min(x)
    mat = npr.randn(10, 11)
    check_grads(fun)(mat) 
Example #14
Source File: wavelet.py    From scarlet with MIT License 5 votes vote down vote up
def get_starlet_shape(shape, lvl = None):
    """ Get the pad shape for a starlet transform
    """
    #Number of levels for the Starlet decomposition
    lvl_max = np.int(np.log2(np.min(shape[-2:])))
    if (lvl is None) or lvl > lvl_max:
        lvl = lvl_max
    return lvl 
Example #15
Source File: geometry.py    From AeroSandbox with MIT License 5 votes vote down vote up
def cosspace(min=0, max=1, n_points=50):
    mean = (max + min) / 2
    amp = (max - min) / 2

    return mean + amp * np.cos(np.linspace(np.pi, 0, n_points)) 
Example #16
Source File: geometry.py    From AeroSandbox with MIT License 5 votes vote down vote up
def get_sharp_TE_airfoil(self):
        # Returns a version of the airfoil with a sharp trailing edge.

        upper_original_coors = self.upper_coordinates()  # Note: includes leading edge point, be careful about duplicates
        lower_original_coors = self.lower_coordinates()  # Note: includes leading edge point, be careful about duplicates

        # Find data about the TE

        # Get the scale factor
        x_mcl = self.mcl_coordinates[:, 0]
        x_max = np.max(x_mcl)
        x_min = np.min(x_mcl)
        scale_factor = (x_mcl - x_min) / (x_max - x_min)  # linear contraction

        # Do the contraction
        upper_minus_mcl_adjusted = self.upper_minus_mcl - self.upper_minus_mcl[-1, :] * np.expand_dims(scale_factor, 1)

        # Recreate coordinates
        upper_coordinates_adjusted = np.flipud(self.mcl_coordinates + upper_minus_mcl_adjusted)
        lower_coordinates_adjusted = self.mcl_coordinates - upper_minus_mcl_adjusted

        coordinates = np.vstack((
            upper_coordinates_adjusted[:-1, :],
            lower_coordinates_adjusted
        ))

        # Make a new airfoil with the coordinates
        name = self.name + ", with sharp TE"
        new_airfoil = Airfoil(name=name, coordinates=coordinates, repanel=False)

        return new_airfoil 
Example #17
Source File: test_msprime.py    From momi2 with GNU General Public License v3.0 5 votes vote down vote up
def my_t_test(labels, theoretical, observed, min_samples=25):

    assert theoretical.ndim == 1 and observed.ndim == 2
    assert len(theoretical) == observed.shape[
        0] and len(theoretical) == len(labels)

    n_observed = np.sum(observed > 0, axis=1)
    theoretical, observed = theoretical[
        n_observed > min_samples], observed[n_observed > min_samples, :]
    labels = np.array(list(map(str, labels)))[n_observed > min_samples]
    n_observed = n_observed[n_observed > min_samples]

    runs = observed.shape[1]
    observed_mean = np.mean(observed, axis=1)
    bias = observed_mean - theoretical
    variances = np.var(observed, axis=1)

    t_vals = bias / np.sqrt(variances) * np.sqrt(runs)

    # get the p-values
    abs_t_vals = np.abs(t_vals)
    p_vals = 2.0 * scipy.stats.t.sf(abs_t_vals, df=runs - 1)
    print("# labels, p-values, empirical-mean, theoretical-mean, nonzero-counts")
    toPrint = np.array([labels, p_vals, observed_mean,
                        theoretical, n_observed]).transpose()
    toPrint = toPrint[np.array(toPrint[:, 1], dtype='float').argsort()[
        ::-1]]  # reverse-sort by p-vals
    print(toPrint)

    print("Note p-values are for t-distribution, which may not be a good approximation to the true distribution")

    # p-values should be uniformly distributed
    # so then the min p-value should be beta distributed
    return scipy.stats.beta.cdf(np.min(p_vals), 1, len(p_vals)) 
Example #18
Source File: geometry.py    From AeroSandbox with MIT License 4 votes vote down vote up
def get_bounding_cube(self):
        """ Finds the axis-aligned cube that encloses the airplane with the smallest size.
            Useful for plotting and getting a sense for the scale of a problem.
            
            Args:
                self.wings (iterable): All the wings included for analysis each containing their geometry in x,y,z notation using units of m
            Returns:
                tuple: Tuple of 4 floats x, y, z, and s, where x, y, and z are the coordinates of the cube center,
                and s is half of the side length.
        """

        # Get vertices to enclose
        vertices = None
        for wing in self.wings:
            for xsec in wing.xsecs:
                if vertices is None:
                    vertices = xsec.xyz_le + wing.xyz_le
                else:
                    vertices = np.vstack((
                        vertices,
                        xsec.xyz_le + wing.xyz_le
                    ))
                vertices = np.vstack((
                    vertices,
                    xsec.xyz_te() + wing.xyz_le
                ))

                if wing.symmetric:
                    vertices = np.vstack((
                        vertices,
                        reflect_over_XZ_plane(xsec.xyz_le + wing.xyz_le)
                    ))
                    vertices = np.vstack((
                        vertices,
                        reflect_over_XZ_plane(xsec.xyz_te() + wing.xyz_le)
                    ))

        # Enclose them
        x_max = np.max(vertices[:, 0])
        y_max = np.max(vertices[:, 1])
        z_max = np.max(vertices[:, 2])
        x_min = np.min(vertices[:, 0])
        y_min = np.min(vertices[:, 1])
        z_min = np.min(vertices[:, 2])

        x = np.mean((x_max, x_min))
        y = np.mean((y_max, y_min))
        z = np.mean((z_max, z_min))
        s = 0.5 * np.max((
            x_max - x_min,
            y_max - y_min,
            z_max - z_min
        ))

        return x, y, z, s 
Example #19
Source File: variational_smc.py    From variational-smc with MIT License 4 votes vote down vote up
def sim_q(prop_params, model_params, y, smc_obj, rs, verbose=False):
    """
    Simulates a single sample from the VSMC approximation.

    Requires an SMC object with 2 member functions:
    -- sim_prop(t, x_{t-1}, y, prop_params, model_params, rs)
    -- log_weights(t, x_t, x_{t-1}, y, prop_params, model_params)
    """
    # Extract constants
    T = y.shape[0]
    Dx = smc_obj.Dx
    N = smc_obj.N

    # Initialize SMC
    X = np.zeros((N,T,Dx))
    logW = np.zeros(N)
    W = np.zeros((N,T))
    ESS = np.zeros(T)

    for t in range(T):
        # Resampling
        if t > 0:
            ancestors = resampling(W[:,t-1], rs)
            X[:,:t,:] = X[ancestors,:t,:]

        # Propagation
        X[:,t,:] = smc_obj.sim_prop(t, X[:,t-1,:], y, prop_params, model_params, rs)

        # Weighting
        logW = smc_obj.log_weights(t, X[:,t,:], X[:,t-1,:], y, prop_params, model_params)
        max_logW = np.max(logW)
        W[:,t] = np.exp(logW-max_logW)
        W[:,t] /= np.sum(W[:,t])
        ESS[t] = 1./np.sum(W[:,t]**2)

    # Sample from the empirical approximation
    bins = np.cumsum(W[:,-1])
    u = rs.rand()
    B = np.digitize(u,bins)

    if verbose:
        print('Mean ESS', np.mean(ESS)/N)
        print('Min ESS', np.min(ESS))
        
    return X[B,:,:] 
Example #20
Source File: demography.py    From momi2 with GNU General Public License v3.0 4 votes vote down vote up
def admixture_operator(n_node, p):
    # axis0=n_from_parent, axis1=der_from_parent, axis2=der_in_parent
    der_in_parent = np.tile(np.arange(n_node + 1), (n_node + 1, n_node + 1, 1))
    n_from_parent = np.transpose(der_in_parent, [2, 0, 1])
    der_from_parent = np.transpose(der_in_parent, [0, 2, 1])

    anc_in_parent = n_node - der_in_parent
    anc_from_parent = n_from_parent - der_from_parent

    x = comb(der_in_parent, der_from_parent) * comb(
        anc_in_parent, anc_from_parent) / comb(n_node, n_from_parent)
    # rearrange so axis0=1, axis1=der_in_parent, axis2=der_from_parent, axis3=n_from_parent
    x = np.transpose(x)
    x = np.reshape(x, [1] + list(x.shape))

    n = np.arange(n_node+1)
    B = comb(n_node, n)

    # the two arrays to convolve_sum_axes
    x1 = (x * B * ((1-p)**n) * (p**(n[::-1])))
    x2 = x[:, :, :, ::-1]

    # reduce array size; approximate low probability events with 0
    mu = n_node * (1-p)
    sigma = np.sqrt(n_node * p * (1-p))
    n_sd = 4
    lower = np.max((0, np.floor(mu - n_sd*sigma)))
    upper = np.min((n_node, np.ceil(mu + n_sd*sigma))) + 1
    lower, upper = int(lower), int(upper)

    ##x1 = x1[:,:,:upper,lower:upper]
    ##x2 = x2[:,:,:(n_node-lower+1),lower:upper]

    ret = convolve_sum_axes(x1, x2)
    # axis0=der_in_parent1, axis1=der_in_parent2, axis2=der_in_child
    ret = np.reshape(ret, ret.shape[1:])
    if ret.shape[2] < (n_node+1):
        ret = np.pad(ret, [(0,0),(0,0),(0,n_node+1-ret.shape[2])], "constant")
    return ret[:, :, :(n_node+1)]


#@memoize
#def _der_in_admixture_node(n_node):
#    '''
#    returns 4d-array, [n_from_parent1, der_in_child, der_in_parent1, der_in_parent2].
#    Used by Demography._admixture_prob_helper
#    '''
#    # axis0=n_from_parent, axis1=der_from_parent, axis2=der_in_parent
#    der_in_parent = np.tile(np.arange(n_node + 1), (n_node + 1, n_node + 1, 1))
#    n_from_parent = np.transpose(der_in_parent, [2, 0, 1])
#    der_from_parent = np.transpose(der_in_parent, [0, 2, 1])
#
#    anc_in_parent = n_node - der_in_parent
#    anc_from_parent = n_from_parent - der_from_parent
#
#    x = scipy.special.comb(der_in_parent, der_from_parent) * scipy.special.comb(
#        anc_in_parent, anc_from_parent) / scipy.special.comb(n_node, n_from_parent)
#
#    ret, labels = convolve_axes(
#        x, x[::-1, ...], [[c for c in 'ijk'], [c for c in 'ilm']], ['j', 'l'], 'n')
#    return np.einsum('%s->inkm' % ''.join(labels), ret[..., :(n_node + 1)])