Python numpy.min() Examples

The following are 30 code examples of numpy.min(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module numpy , or try the search function .
Example #1
Source File: quantization.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def collect(self, name, arr):
        """Callback function for collecting min and max values from an NDArray."""
        name = py_str(name)
        if self.include_layer is not None and not self.include_layer(name):
            return
        handle = ctypes.cast(arr, NDArrayHandle)
        arr = NDArray(handle, writable=False)
        min_range = ndarray.min(arr).asscalar()
        max_range = ndarray.max(arr).asscalar()
        if name in self.min_max_dict:
            cur_min_max = self.min_max_dict[name]
            self.min_max_dict[name] = (min(cur_min_max[0], min_range),
                                       max(cur_min_max[1], max_range))
        else:
            self.min_max_dict[name] = (min_range, max_range)
        if self.logger is not None:
            self.logger.info("Collecting layer %s output min_range=%f, max_range=%f"
                             % (name, min_range, max_range)) 
Example #2
Source File: dataset_tool.py    From disentangling_conditional_gans with MIT License 6 votes vote down vote up
def create_cifar100(tfrecord_dir, cifar100_dir):
    print('Loading CIFAR-100 from "%s"' % cifar100_dir)
    import pickle
    with open(os.path.join(cifar100_dir, 'train'), 'rb') as file:
        data = pickle.load(file, encoding='latin1')
    images = data['data'].reshape(-1, 3, 32, 32)
    labels = np.array(data['fine_labels'])
    assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8
    assert labels.shape == (50000,) and labels.dtype == np.int32
    assert np.min(images) == 0 and np.max(images) == 255
    assert np.min(labels) == 0 and np.max(labels) == 99
    onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
    onehot[np.arange(labels.size), labels] = 1.0

    with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
        order = tfr.choose_shuffled_order()
        for idx in range(order.size):
            tfr.add_image(images[order[idx]])
        tfr.add_labels(onehot[order])

#---------------------------------------------------------------------------- 
Example #3
Source File: test_bayestar.py    From dustmaps with GNU General Public License v2.0 6 votes vote down vote up
def test_equ_random_sample_scalar(self):
        """
        Test that random sample of reddening at arbitary distance is actually
        from the set of possible reddening samples at that distance. Uses vector
        of coordinates/distances as input. Uses single set of
        coordinates/distance as input.
        """
        for d in self._test_data:
            # Prepare coordinates (with random distances)
            l = d['l']*units.deg
            b = d['b']*units.deg
            dm = 3. + (25.-3.)*np.random.random()

            dist = 10.**(dm/5.-2.)
            c = coords.SkyCoord(l, b, distance=dist*units.kpc, frame='galactic')

            ebv_data = self._interp_ebv(d, dist)
            ebv_calc = self._bayestar(c, mode='random_sample')

            d_ebv = np.min(np.abs(ebv_data[:] - ebv_calc))

            np.testing.assert_allclose(d_ebv, 0., atol=0.001, rtol=0.0001) 
Example #4
Source File: dataset_tool.py    From disentangling_conditional_gans with MIT License 6 votes vote down vote up
def create_mnistrgb(tfrecord_dir, mnist_dir, num_images=1000000, random_seed=123):
    print('Loading MNIST from "%s"' % mnist_dir)
    import gzip
    with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file:
        images = np.frombuffer(file.read(), np.uint8, offset=16)
    images = images.reshape(-1, 28, 28)
    images = np.pad(images, [(0,0), (2,2), (2,2)], 'constant', constant_values=0)
    assert images.shape == (60000, 32, 32) and images.dtype == np.uint8
    assert np.min(images) == 0 and np.max(images) == 255
    
    with TFRecordExporter(tfrecord_dir, num_images) as tfr:
        rnd = np.random.RandomState(random_seed)
        for idx in range(num_images):
            tfr.add_image(images[rnd.randint(images.shape[0], size=3)])

#---------------------------------------------------------------------------- 
Example #5
Source File: dataset_tool.py    From disentangling_conditional_gans with MIT License 6 votes vote down vote up
def create_mnist(tfrecord_dir, mnist_dir):
    print('Loading MNIST from "%s"' % mnist_dir)
    import gzip
    with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file:
        images = np.frombuffer(file.read(), np.uint8, offset=16)
    with gzip.open(os.path.join(mnist_dir, 'train-labels-idx1-ubyte.gz'), 'rb') as file:
        labels = np.frombuffer(file.read(), np.uint8, offset=8)
    images = images.reshape(-1, 1, 28, 28)
    images = np.pad(images, [(0,0), (0,0), (2,2), (2,2)], 'constant', constant_values=0)
    assert images.shape == (60000, 1, 32, 32) and images.dtype == np.uint8
    assert labels.shape == (60000,) and labels.dtype == np.uint8
    assert np.min(images) == 0 and np.max(images) == 255
    assert np.min(labels) == 0 and np.max(labels) == 9
    onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
    onehot[np.arange(labels.size), labels] = 1.0
    
    with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
        order = tfr.choose_shuffled_order()
        for idx in range(order.size):
            tfr.add_image(images[order[idx]])
        tfr.add_labels(onehot[order])

#---------------------------------------------------------------------------- 
Example #6
Source File: test_bayestar.py    From dustmaps with GNU General Public License v2.0 6 votes vote down vote up
def test_equ_random_sample_nodist_vector(self):
        """
        Test that a random sample of the reddening vs. distance curve is drawn
        from the full set of samples. Uses vector of coordinates as input.
        """

        # Prepare coordinates
        l = [d['l']*units.deg for d in self._test_data]
        b = [d['b']*units.deg for d in self._test_data]

        c = coords.SkyCoord(l, b, frame='galactic')

        ebv_data = np.array([d['samples'] for d in self._test_data])
        ebv_calc = self._bayestar(c, mode='random_sample')

        # print 'vector random sample:'
        # print 'ebv_data.shape = {}'.format(ebv_data.shape)
        # print 'ebv_calc.shape = {}'.format(ebv_calc.shape)
        # print ebv_data[0]
        # print ebv_calc[0]

        d_ebv = np.min(np.abs(ebv_data[:,:,:] - ebv_calc[:,None,:]), axis=1)
        np.testing.assert_allclose(d_ebv, 0., atol=0.001, rtol=0.0001) 
Example #7
Source File: nav_env.py    From DOTA_models with Apache License 2.0 6 votes vote down vote up
def get_optimal_action(self, current_node_ids, step_number):
    """Returns the optimal action from the current node."""
    goal_number = step_number / self.task_params.num_steps
    gtG = self.task.gtG
    a = np.zeros((len(current_node_ids), self.task_params.num_actions), dtype=np.int32)
    d_dict = self.episode.dist_to_goal[goal_number]
    for i, c in enumerate(current_node_ids):
      neigh = gtG.vertex(c).out_neighbours()
      neigh_edge = gtG.vertex(c).out_edges()
      ds = np.array([d_dict[i][int(x)] for x in neigh])
      ds_min = np.min(ds)
      for i_, e in enumerate(neigh_edge):
        if ds[i_] == ds_min:
          _ = gtG.ep['action'][e]
          a[i, _] = 1
    return a 
Example #8
Source File: labels.py    From neuropythy with GNU Affero General Public License v3.0 6 votes vote down vote up
def cmap(self, data=None):
        '''
        lblidx.cmap() yields a colormap for the given label index object that assumes that the data
          being plotted will be rescaled such that label 0 is 0 and the highest label value in the
          label index is equal to 1.
        lblidx.cmap(data) yields a colormap that will correctly color the labels given in data if
          data is scaled such that its minimum and maximum value are 0 and 1.
        '''
        import matplotlib.colors
        from_list = matplotlib.colors.LinearSegmentedColormap.from_list
        if data is None: return self.colormap
        data = np.asarray(data).flatten()
        (vmin,vmax) = (np.min(data), np.max(data))
        ii  = np.argsort(self.ids)
        ids = np.asarray(self.ids)[ii]
        if vmin == vmax:
            (vmin,vmax,ii) = (vmin-0.5, vmax+0.5, vmin)
            clr = self.color_lookup(ii)
            return from_list('label1', [(0, clr), (1, clr)])
        q   = (ids >= vmin) & (ids <= vmax)
        ids = ids[q]
        clrs = self.color_lookup(ids)
        vals = (ids - vmin) / (vmax - vmin)
        return from_list('label%d' % len(vals), list(zip(vals, clrs))) 
Example #9
Source File: cgp.py    From cgp-cnn with MIT License 6 votes vote down vote up
def neutral_mutation(self, mutation_rate=0.01):
        for n in range(self.net_info.node_num + self.net_info.out_num):
            t = self.gene[n][0]
            # mutation for type gene
            type_num = self.net_info.func_type_num if n < self.net_info.node_num else self.net_info.out_type_num
            if not self.is_active[n] and np.random.rand() < mutation_rate and type_num > 1:
                self.gene[n][0] = self.__mutate(self.gene[n][0], 0, type_num)
            # mutation for connection gene
            col = np.min((int(n / self.net_info.rows), self.net_info.cols))
            max_connect_id = col * self.net_info.rows + self.net_info.input_num
            min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \
                if col - self.net_info.level_back >= 0 else 0
            in_num = self.net_info.func_in_num[t] if n < self.net_info.node_num else self.net_info.out_in_num[t]
            for i in range(self.net_info.max_in_num):
                if (not self.is_active[n] or i >= in_num) and np.random.rand() < mutation_rate \
                        and max_connect_id - min_connect_id > 1:
                    self.gene[n][i+1] = self.__mutate(self.gene[n][i+1], min_connect_id, max_connect_id)

        self.check_active()
        return False 
Example #10
Source File: cgp.py    From cgp-cnn with MIT License 6 votes vote down vote up
def mutation(self, mutation_rate=0.01):
        active_check = False

        for n in range(self.net_info.node_num + self.net_info.out_num):
            t = self.gene[n][0]
            # mutation for type gene
            type_num = self.net_info.func_type_num if n < self.net_info.node_num else self.net_info.out_type_num
            if np.random.rand() < mutation_rate and type_num > 1:
                self.gene[n][0] = self.__mutate(self.gene[n][0], 0, type_num)
                if self.is_active[n]:
                    active_check = True
            # mutation for connection gene
            col = np.min((int(n / self.net_info.rows), self.net_info.cols))
            max_connect_id = col * self.net_info.rows + self.net_info.input_num
            min_connect_id = (col - self.net_info.level_back) * self.net_info.rows + self.net_info.input_num \
                if col - self.net_info.level_back >= 0 else 0
            in_num = self.net_info.func_in_num[t] if n < self.net_info.node_num else self.net_info.out_in_num[t]
            for i in range(self.net_info.max_in_num):
                if np.random.rand() < mutation_rate and max_connect_id - min_connect_id > 1:
                    self.gene[n][i+1] = self.__mutate(self.gene[n][i+1], min_connect_id, max_connect_id)
                    if self.is_active[n] and i < in_num:
                        active_check = True

        self.check_active()
        return active_check 
Example #11
Source File: dataset.py    From Deep_VoiceChanger with MIT License 6 votes vote down vote up
def wave2input_image(wave, window, pos=0, pad=0):
    wave_image = np.hstack([wave[pos+i*sride:pos+(i+pad*2)*sride+dif].reshape(height+pad*2, sride) for i in range(256//sride)])[:,:254]
    wave_image *= window
    spectrum_image = np.fft.fft(wave_image, axis=1)
    input_image = np.abs(spectrum_image[:,:128].reshape(1, height+pad*2, 128), dtype=np.float32)

    np.clip(input_image, 1000, None, out=input_image)
    np.log(input_image, out=input_image)
    input_image += bias
    input_image /= scale

    if np.max(input_image) > 0.95:
        print('input image max bigger than 0.95', np.max(input_image))
    if np.min(input_image) < 0.05:
        print('input image min smaller than 0.05', np.min(input_image))

    return input_image 
Example #12
Source File: image.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def resize(im, short, max_size):
    """
    only resize input image to target size and return scale
    :param im: BGR image input by opencv
    :param short: one dimensional size (the short side)
    :param max_size: one dimensional max size (the long side)
    :return: resized image (NDArray) and scale (float)
    """
    im_shape = im.shape
    im_size_min = np.min(im_shape[0:2])
    im_size_max = np.max(im_shape[0:2])
    im_scale = float(short) / float(im_size_min)
    # prevent bigger axis from being more than max_size:
    if np.round(im_scale * im_size_max) > max_size:
        im_scale = float(max_size) / float(im_size_max)
    im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
    return im, im_scale 
Example #13
Source File: test_quantization.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_quantize_float32_to_int8():
    shape = rand_shape_nd(4)
    data = rand_ndarray(shape, 'default', dtype='float32')
    min_range = mx.nd.min(data)
    max_range = mx.nd.max(data)
    qdata, min_val, max_val = mx.nd.contrib.quantize(data, min_range, max_range, out_type='int8')
    data_np = data.asnumpy()
    min_range = min_range.asscalar()
    max_range = max_range.asscalar()
    real_range = np.maximum(np.abs(min_range), np.abs(max_range))
    quantized_range = 127.0
    scale = quantized_range / real_range
    assert qdata.dtype == np.int8
    assert min_val.dtype == np.float32
    assert max_val.dtype == np.float32
    assert same(min_val.asscalar(), -real_range)
    assert same(max_val.asscalar(), real_range)
    qdata_np = (np.sign(data_np) * np.minimum(np.abs(data_np) * scale + 0.5, quantized_range)).astype(np.int8)
    assert_almost_equal(qdata.asnumpy(), qdata_np, atol = 1) 
Example #14
Source File: gradcam.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def grad_to_image(gradient):
    """Convert gradients of image obtained using `get_image_grad`
    into image. This shows parts of the image that is most strongly activating
    the output neurons."""
    gradient = gradient - gradient.min()
    gradient /= gradient.max()
    gradient = np.uint8(gradient * 255).transpose(1, 2, 0)
    gradient = gradient[..., ::-1]
    return gradient 
Example #15
Source File: gradcam.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def get_cam(imggrad, conv_out):
    """Compute CAM. Refer section 3 of https://arxiv.org/abs/1610.02391 for details"""
    weights = np.mean(imggrad, axis=(1, 2))
    cam = np.ones(conv_out.shape[1:], dtype=np.float32)
    for i, w in enumerate(weights):
        cam += w * conv_out[i, :, :]
    cam = cv2.resize(cam, (imggrad.shape[1], imggrad.shape[2]))
    cam = np.maximum(cam, 0)
    cam = (cam - np.min(cam)) / (np.max(cam) - np.min(cam)) 
    cam = np.uint8(cam * 255)
    return cam 
Example #16
Source File: gradcam.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def to_grayscale(cv2im):
    """Convert gradients to grayscale. This gives a saliency map."""
    # How strongly does each position activate the output
    grayscale_im = np.sum(np.abs(cv2im), axis=0)

    # Normalize between min and 99th percentile
    im_max = np.percentile(grayscale_im, 99)
    im_min = np.min(grayscale_im)
    grayscale_im = np.clip((grayscale_im - im_min) / (im_max - im_min), 0, 1)

    grayscale_im = np.expand_dims(grayscale_im, axis=0)
    return grayscale_im 
Example #17
Source File: DihedralAngleConstraints.py    From fullrmc with GNU Affero General Public License v3.0 5 votes vote down vote up
def compute_standard_error(self, data):
        """
        Compute the standard error (StdErr) of data not satisfying constraint conditions.

        .. math::
            StdErr = \\sum \\limits_{i}^{C}
            ( \\theta_{i} - \\theta_{i}^{min} ) ^{2}
            \\int_{0}^{\\theta_{i}^{min}} \\delta(\\theta-\\theta_{i}) d \\theta
            +
            ( \\theta_{i} - \\theta_{i}^{max} ) ^{2}
            \\int_{\\theta_{i}^{max}}^{\\pi} \\delta(\\theta-\\theta_{i}) d \\theta

        Where:\n
        :math:`C` is the total number of defined improper angles constraints. \n
        :math:`\\theta_{i}^{min}` is the improper angle constraint lower limit set for constraint i. \n
        :math:`\\theta_{i}^{max}` is the improper angle constraint upper limit set for constraint i. \n
        :math:`\\theta_{i}` is the improper angle computed for constraint i. \n
        :math:`\\delta` is the Dirac delta function. \n
        :math:`\\int_{0}^{\\theta_{i}^{min}} \\delta(\\theta-\\theta_{i}) d \\theta`
        is equal to 1 if :math:`0 \\leqslant \\theta_{i} \\leqslant \\theta_{i}^{min}` and 0 elsewhere.\n
        :math:`\\int_{\\theta_{i}^{max}}^{\\pi} \\delta(\\theta-\\theta_{i}) d \\theta`
        is equal to 1 if :math:`\\theta_{i}^{max} \\leqslant \\theta_{i} \\leqslant \\pi` and 0 elsewhere.\n

        :Parameters:
            #. data (numpy.array): The constraint value data to compute standardError.

        :Returns:
            #. standardError (number): The calculated standardError of the constraint.
        """
        return FLOAT_TYPE( np.sum(data["reducedAngles"]**2) ) 
Example #18
Source File: profiler_ndarray.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def test_reduce():
    sample_num = 200

    def test_reduce_inner(numpy_reduce_func, nd_reduce_func):
        for i in range(sample_num):
            ndim = np.random.randint(1, 6)
            shape = np.random.randint(1, 11, size=ndim)
            axis_flags = np.random.randint(0, 2, size=ndim)
            axes = []
            for (axis, flag) in enumerate(axis_flags):
                if flag:
                    axes.append(axis)
            keepdims = np.random.randint(0, 2)
            dat = np.random.rand(*shape) - 0.5
            if 0 == len(axes):
                axes = tuple(range(ndim))
            else:
                axes = tuple(axes)
            numpy_ret = numpy_reduce_func(dat, axis=axes, keepdims=keepdims)

            ndarray_ret = nd_reduce_func(mx.nd.array(dat), axis=axes, keepdims=keepdims)
            if type(ndarray_ret) is mx.ndarray.NDArray:
                ndarray_ret = ndarray_ret.asnumpy()
            assert (ndarray_ret.shape == numpy_ret.shape) or \
                   (ndarray_ret.shape == (1,) and numpy_ret.shape == ()), "nd:%s, numpy:%s" \
                                                         %(ndarray_ret.shape, numpy_ret.shape)
            err = np.square(ndarray_ret - numpy_ret).mean()
            assert err < 1E-4
    test_reduce_inner(lambda data, axis, keepdims:_np_reduce(data, axis, keepdims, np.sum),
                      mx.nd.sum)
    test_reduce_inner(lambda data, axis, keepdims:_np_reduce(data, axis, keepdims, np.max),
                      mx.nd.max)
    test_reduce_inner(lambda data, axis, keepdims:_np_reduce(data, axis, keepdims, np.min),
                      mx.nd.min) 
Example #19
Source File: gtf_utils.py    From models with MIT License 5 votes vote down vote up
def gene_ends_update(self):
        for t in self.trans:
            self.start = min(self.start, np.min(t.exons))
            self.stop = max(self.stop, np.max(t.exons)) 
Example #20
Source File: dcgan.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def visual(title, X, name):
    assert len(X.shape) == 4
    X = X.transpose((0, 2, 3, 1))
    X = np.clip((X - np.min(X))*(255.0/(np.max(X) - np.min(X))), 0, 255).astype(np.uint8)
    n = np.ceil(np.sqrt(X.shape[0]))
    buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8)
    for i, img in enumerate(X):
        fill_buf(buff, i, img, X.shape[1:3])
    buff = buff[:,:,::-1]
    plt.imshow(buff)
    plt.title(title)
    plt.savefig(name) 
Example #21
Source File: Constraint.py    From fullrmc with GNU Affero General Public License v3.0 5 votes vote down vote up
def fit_scale_factor(self, experimentalData, modelData, dataWeights):
        """
        The best scale factor value is computed by minimizing :math:`E=sM`.\n

        Where:
            #. :math:`E` is the experimental data.
            #. :math:`s` is the scale factor.
            #. :math:`M` is the model constraint data.

        This method doesn't allow specifying frames. It will target used frame
        only.

        :Parameters:
            #. experimentalData (numpy.ndarray): Experimental data.
            #. modelData (numpy.ndarray): Constraint modal data.
            #. dataWeights (None, numpy.ndarray): Data points weights to
               compute the scale factor. If None is given, all data points
               will be considered as having the same weight.

        :Returns:
            #. scaleFactor (number): The new scale factor fit value.

        **NB**: This method won't update the internal scale factor value
        of the constraint. It always computes the best scale factor given
        experimental and atomic model data.
        """
        if dataWeights is None:
            SF = FLOAT_TYPE( np.sum(modelData*experimentalData)/np.sum(modelData**2) )
        else:
            SF = FLOAT_TYPE( np.sum(dataWeights*modelData*experimentalData)/np.sum(modelData**2) )
        SF = max(SF, self.__adjustScaleFactorMinimum)
        SF = min(SF, self.__adjustScaleFactorMaximum)
        return SF 
Example #22
Source File: test_quantization.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def test_get_optimal_thresholds():
    # Given an ndarray with elements following a uniform distribution, the optimal threshold
    # for quantizing the ndarray should be either abs(min(nd)) or abs(max(nd)).
    def get_threshold(nd):
        min_nd = mx.nd.min(nd)
        max_nd = mx.nd.max(nd)
        return mx.nd.maximum(mx.nd.abs(min_nd), mx.nd.abs(max_nd)).asnumpy()

    nd_dict = {'layer1': mx.nd.uniform(low=-10.532, high=11.3432, shape=(8, 3, 23, 23), dtype=np.float64)}
    expected_threshold = get_threshold(nd_dict['layer1'])
    th_dict = mx.contrib.quant._get_optimal_thresholds(nd_dict)
    assert 'layer1' in th_dict
    assert_almost_equal(np.array([th_dict['layer1'][1]]), expected_threshold, rtol=1e-2, atol=1e-4) 
Example #23
Source File: ptBEV.py    From PolarSeg with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def nb_greedy_FPS(xyz,K):
    start_element = 0
    sample_num = xyz.shape[0]
    sum_vec = np.zeros((sample_num,1),dtype = np.float32)
    xyz_sq = xyz**2
    for j in range(sample_num):
        sum_vec[j,0] = np.sum(xyz_sq[j,:])
    pairwise_distance = sum_vec + np.transpose(sum_vec) - 2*np.dot(xyz, np.transpose(xyz))
    
    candidates_ind = np.zeros((sample_num,),dtype = np.bool_)
    candidates_ind[start_element] = True
    remain_ind = np.ones((sample_num,),dtype = np.bool_)
    remain_ind[start_element] = False
    all_ind = np.arange(sample_num)
    
    for i in range(1,K):
        if i == 1:
            min_remain_pt_dis = pairwise_distance[:,start_element]
            min_remain_pt_dis = min_remain_pt_dis[remain_ind]
        else:
            cur_dis = pairwise_distance[remain_ind,:]
            cur_dis = cur_dis[:,candidates_ind]
            min_remain_pt_dis = np.zeros((cur_dis.shape[0],),dtype = np.float32)
            for j in range(cur_dis.shape[0]):
                min_remain_pt_dis[j] = np.min(cur_dis[j,:])
        next_ind_in_remain = np.argmax(min_remain_pt_dis)
        next_ind = all_ind[remain_ind][next_ind_in_remain]
        candidates_ind[next_ind] = True
        remain_ind[next_ind] = False
        
    return candidates_ind 
Example #24
Source File: __init__.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def test_mesh(self):
        '''
        test_mesh() ensures that many general mesh properties and methods are working.
        '''
        import neuropythy.geometry as geo
        logging.info('neuropythy: Testing meshes and properties...')
        # get a random subject's mesh
        sub  = ny.data['benson_winawer_2018'].subjects['S1204']
        hem  = sub.hemis[('lh','rh')[np.random.randint(2)]]
        msh  = hem.white_surface
        # few simple things
        self.assertEqual(msh.coordinates.shape[0], 3)
        self.assertEqual(msh.tess.faces.shape[0], 3)
        self.assertEqual(msh.tess.edges.shape[0], 2)
        self.assertEqual(msh.vertex_count, msh.coordinates.shape[1])
        # face areas and edge lengths should all be non-negative
        self.assertGreaterEqual(np.min(msh.face_areas), 0)
        self.assertGreaterEqual(np.min(msh.edge_lengths), 0)
        # test the properties
        self.assertTrue('blerg' in msh.with_prop(blerg=msh.prop('curvature')).properties)
        self.assertFalse('curvature' in msh.wout_prop('curvature').properties)
        self.assertEqual(msh.properties.row_count, msh.vertex_count)
        self.assertLessEqual(np.abs(np.mean(msh.prop('curvature'))), 0.1)
        # use the property interface to grab a fancy masked property
        v123_areas = msh.property('midgray_surface_area',
                                  mask=('inf-prf_visual_area', (1,2,3)),
                                  null=0)
        v123_area = np.sum(v123_areas)
        self.assertLessEqual(v123_area, 15000)
        self.assertGreaterEqual(v123_area, 500)
        (v1_ecc, v1_rad) = msh.property(['prf_eccentricity','prf_radius'],
                                        mask=('inf-prf_visual_area', 1),
                                        weights='prf_variance_explained',
                                        weight_min=0.1,
                                        clipped=0,
                                        null=np.nan)
        wh = np.isfinite(v1_ecc) & np.isfinite(v1_rad)
        self.assertGreater(np.corrcoef(v1_ecc[wh], v1_rad[wh])[0,0], 0.5) 
Example #25
Source File: core.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def close_curves(*crvs, **kw):
    '''
    close_curves(crv1, crv2...) yields a single curve that merges all of the given list of curves
      together. The curves must be given in order, such that the i'th curve should be connected to
      to the (i+1)'th curve circularly to form a perimeter.

    The following optional parameters may be given:
      * grid may specify the number of grid-points to use in the initial search for a start-point
        (default: 16).
      * order may specify the order of the resulting curve; by default (None) uses the lowest order
        of all curves.
      * smoothing (None) the amount to smooth the points.
      * even_out (True) whether to even out the distances along the curve.
      * meta_data (None) an optional map of meta-data to give the spline representation.
    '''
    for k in six.iterkeys(kw):
        if k not in close_curves.default_options: raise ValueError('Unrecognized option: %s' % k)
    kw = {k:(kw[k] if k in kw else v) for (k,v) in six.iteritems(close_curves.default_options)}
    (grid, order) = (kw['grid'], kw['order'])
    crvs = [(crv if is_curve_spline(crv) else to_curve_spline(crv)).even_out() for crv in crvs]
    # find all intersections:
    isects = [curve_intersection(u,v, grid=grid)
              for (u,v) in zip(crvs, np.roll(crvs,-1))]
    # subsample curves
    crds = np.hstack([crv.subcurve(s1[1], s0[0]).coordinates[:,:-1]
                      for (crv,s0,s1) in zip(crvs, isects, np.roll(isects,1,0))])
    kw['order'] = np.min([crv.order for crv in crvs]) if order is None else order
    kw = {k:v for (k,v) in six.iteritems(kw)
          if v is not None and k in ('order','smoothing','even_out','meta_data')}
    return curve_spline(crds, periodic=True, **kw) 
Example #26
Source File: labels.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def colormap(entries):
        '''
        lblidx.colormap is a colormap appropriate for use with data that has been scaled to run from
        0 at lblidx.vmin to 1 at lblidx.vmax.
        '''
        import matplotlib.colors
        from_list = matplotlib.colors.LinearSegmentedColormap.from_list
        ids = np.asarray([e.id for e in entries])
        ii  = np.argsort(ids)
        ids = ids[ii]
        clrs = np.asarray([e.color for e in entries])[ii]
        (vmin,vmax) = [f(ids) for f in (np.min, np.max)]
        vals = (ids - vmin) / (vmax - vmin)
        return from_list('label%d' % len(vals), list(zip(vals, clrs))) 
Example #27
Source File: labels.py    From neuropythy with GNU Affero General Public License v3.0 5 votes vote down vote up
def vmin(ids):
        '''
        lblidx.vmin is the minimum value of a label identifier in the given label index.
        '''
        return np.min(ids) 
Example #28
Source File: data_provider_test.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def test_preprocessed_image_values_are_in_range(self):
    image_shape = (5, 4, 3)
    fake_image = np.random.randint(low=0, high=255, size=image_shape)
    image_tf = data_provider.preprocess_image(fake_image)

    with self.test_session() as sess:
      image_np = sess.run(image_tf)

    self.assertEqual(image_np.shape, image_shape)
    min_value, max_value = np.min(image_np), np.max(image_np)
    self.assertTrue((-1.28 < min_value) and (min_value < 1.27))
    self.assertTrue((-1.28 < max_value) and (max_value < 1.27)) 
Example #29
Source File: monitoring.py    From neural-pipeline with MIT License 5 votes vote down vote up
def update_losses(self, losses: {}) -> None:
        def on_loss(name: str, values: np.ndarray, string) -> None:
            string.append(" {}: [{:4f}, {:4f}, {:4f}];".format(name, np.min(values), np.mean(values), np.max(values)))

        res_string = self.ResStr("Epoch: [{}];".format(self.epoch_num))
        self._iterate_by_losses(losses, lambda m, v: on_loss(m, v, res_string))
        print(res_string) 
Example #30
Source File: plotting.py    From cat-bbs with MIT License 5 votes vote down vote up
def _line_to_xy(self, line_x, line_y, limit_y_min=None, limit_y_max=None):
        point_every = max(1, int(len(line_x) / self.nb_points_max))
        points_x = []
        points_y = []
        curr_sum = 0
        counter = 0
        last_idx = len(line_x) - 1
        for i in range(len(line_x)):
            batch_idx = line_x[i]
            if batch_idx > self.start_batch_idx:
                curr_sum += line_y[i]
                counter += 1
                if counter >= point_every or i == last_idx:
                    points_x.append(batch_idx)
                    y = curr_sum / counter
                    if limit_y_min is not None and limit_y_max is not None:
                        y = np.clip(y, limit_y_min, limit_y_max)
                    elif limit_y_min is not None:
                        y = max(y, limit_y_min)
                    elif limit_y_max is not None:
                        y = min(y, limit_y_max)
                    points_y.append(y)
                    counter = 0
                    curr_sum = 0

        return points_x, points_y