Python random.uniform() Examples

The following are code examples for showing how to use random.uniform(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: weibo-login   Author: littlepinecone   File: login.py    GNU General Public License v3.0 6 votes vote down vote up
def path_generate(a):
    pos = {'1': [32, 32],
           '2': [128, 32],
           '3': [32, 128],
           '4': [128, 128]}
    path = []
    t0 = (int(round(time.time() * 1000)))
    t00 = 0
    for j in range(0, 3):
        for i in range(0, 7):
            x = pos[a[j]][0] + i * (pos[a[j + 1]][0] - pos[a[j]][0]) / 6 + int(random.uniform(1, 3))

            y = pos[a[j]][1] + i * (pos[a[j + 1]][1] - pos[a[j]][1]) / 6 + int(random.uniform(2, 3))

            t = 30 * int(random.uniform(1, 2))
            t00 += t

            path0 = [x, y, t00]
            path.append(path0)
    path[0][2] = t0
    # print path
    return path 
Example 2
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: opencv.py    Apache License 2.0 6 votes vote down vote up
def random_size_crop(src, size, min_area=0.25, ratio=(3.0/4.0, 4.0/3.0)):
    """Randomly crop src with size. Randomize area and aspect ratio"""
    h, w, _ = src.shape
    area = w*h
    for _ in range(10):
        new_area = random.uniform(min_area, 1.0) * area
        new_ratio = random.uniform(*ratio)
        new_w = int(new_area*new_ratio)
        new_h = int(new_area/new_ratio)

        if random.uniform(0., 1.) < 0.5:
            new_w, new_h = new_h, new_w

        if new_w > w or new_h > h:
            continue

        x0 = random.randint(0, w - new_w)
        y0 = random.randint(0, h - new_h)

        out = fixed_crop(src, x0, y0, new_w, new_h, size)
        return out, (x0, y0, new_w, new_h)

    return random_crop(src, size) 
Example 3
Project: FCOS_GluonCV   Author: DetectionTeamUCAS   File: train_cgan.py    Apache License 2.0 6 votes vote down vote up
def query(self, images):
        if self.pool_size == 0:
            return images
        return_images = []
        for image in images:
            image = image.reshape(1,image.shape[0],image.shape[1],image.shape[2])
            if self.num_imgs < self.pool_size:
                self.num_imgs = self.num_imgs + 1
                self.images.append(image)
                return_images.append(image)
            else:
                p = random.uniform(0, 1)
                if p > 0.5:
                    random_id = random.randint(0, self.pool_size - 1)  # randint is inclusive
                    tmp = self.images[random_id].copy()
                    self.images[random_id] = image
                    return_images.append(tmp)
                else:
                    return_images.append(image)
        image_array = return_images[0].copyto(images.context)
        for image in return_images[1:]:
            image_array = nd.concat(image_array,image.copyto(images.context),dim=0)
        return image_array 
Example 4
Project: tf-cnn-lstm-ocr-captcha   Author: Luonic   File: ImageAugmenter.py    MIT License 6 votes vote down vote up
def apply_motion_blur(image, kernel_size, strength = 1.0):
    """Applies motion blur on image 
    """
    # generating the kernel
    kernel_motion_blur = np.zeros((kernel_size, kernel_size))
    kernel_motion_blur[int((kernel_size - 1) / 2), :] = np.ones(kernel_size)
    kernel_motion_blur = kernel_motion_blur / kernel_size

    rotation_kernel = np.random.uniform(0, 360)
    kernel_motion_blur = rotate(kernel_motion_blur, rotation_kernel)
    #cv2.imshow("kernel", cv2.resize(kernel_motion_blur, (100, 100)))
    kernel_motion_blur *= strength

    # applying the kernel to the input image
    output = cv2.filter2D(image, -1, kernel_motion_blur)
    return output 
Example 5
Project: pepperon.ai   Author: JonWiggins   File: utils.py    MIT License 6 votes vote down vote up
def random_unit_vector(dimensions, seed=None):
    """
    Returns a random unit vector in the given number of dimensions
    Created using Gausian Random vars

    :param dimensions: desired dimensions
    :param seed: nullable, random var see

    :return: random unit vecotor
    """
    raw = []
    magnitude = 0
    if seed:
        random.seed(seed)
        
    for count in range(dimensions):
        uniform1 = random.uniform(0, 1)
        uniform2 = random.uniform(0, 1)
        toadd = math.sqrt(-2 * math.log(uniform1)) * math.cos(2 * math.pi * uniform2)
        magnitude += (toadd ** 2)
        raw.append(toadd)
    
    magnitude = math.sqrt(magnitude)
    return [element / magnitude for element in raw] 
Example 6
Project: bigquerylayers   Author: smandaric   File: retry.py    GNU General Public License v3.0 6 votes vote down vote up
def exponential_sleep_generator(initial, maximum, multiplier=_DEFAULT_DELAY_MULTIPLIER):
    """Generates sleep intervals based on the exponential back-off algorithm.

    This implements the `Truncated Exponential Back-off`_ algorithm.

    .. _Truncated Exponential Back-off:
        https://cloud.google.com/storage/docs/exponential-backoff

    Args:
        initial (float): The minimum amout of time to delay. This must
            be greater than 0.
        maximum (float): The maximum amout of time to delay.
        multiplier (float): The multiplier applied to the delay.

    Yields:
        float: successive sleep intervals.
    """
    delay = initial
    while True:
        # Introduce jitter by yielding a delay that is uniformly distributed
        # to average out to the delay time.
        yield min(random.uniform(0.0, delay * 2.0), maximum)
        delay = delay * multiplier 
Example 7
Project: Manga-colorization---cycle-gan   Author: OValery16   File: image_pool.py    Mozilla Public License 2.0 6 votes vote down vote up
def query(self, images):
        if self.pool_size == 0:
            return images
        return_images = []
        for image in images:
            image = torch.unsqueeze(image.data, 0)
            if self.num_imgs < self.pool_size:
                self.num_imgs = self.num_imgs + 1
                self.images.append(image)
                return_images.append(image)
            else:
                p = random.uniform(0, 1)
                if p > 0.5:
                    random_id = random.randint(0, self.pool_size - 1)  # randint is inclusive
                    tmp = self.images[random_id].clone()
                    self.images[random_id] = image
                    return_images.append(tmp)
                else:
                    return_images.append(image)
        return_images = torch.cat(return_images, 0)
        return return_images 
Example 8
Project: robosuite   Author: StanfordVL   File: demo_sampler_wrapper.py    MIT License 6 votes vote down vote up
def sample(self):
        """
        This is the core sampling method. Samples a state from a
        demonstration, in accordance with the configuration.
        """

        # chooses a sampling scheme randomly based on the mixing ratios
        seed = random.uniform(0, 1)
        ratio = np.cumsum(self.scheme_ratios)
        ratio = ratio > seed
        for i, v in enumerate(ratio):
            if v:
                break

        sample_method = getattr(self, self.sample_method_dict[self.sampling_schemes[i]])
        return sample_method() 
Example 9
Project: 3D_immersion_TL   Author: ptabriz   File: mesh_helpers.py    GNU General Public License v2.0 6 votes vote down vote up
def bmesh_face_points_random(f, num_points=1, margin=0.05):
    import random
    from random import uniform
    uniform_args = 0.0 + margin, 1.0 - margin

    # for pradictable results
    random.seed(f.index)

    vecs = [v.co for v in f.verts]

    for i in range(num_points):
        u1 = uniform(*uniform_args)
        u2 = uniform(*uniform_args)
        u_tot = u1 + u2

        if u_tot > 1.0:
            u1 = 1.0 - u1
            u2 = 1.0 - u2

        side1 = vecs[1] - vecs[0]
        side2 = vecs[2] - vecs[0]

        yield vecs[0] + u1 * side1 + u2 * side2 
Example 10
Project: BayesRate   Author: schnitzler-j   File: prior.py    MIT License 6 votes vote down vote up
def update_time(times, prior_shift, UP, LO):
	times=list(sort(times))
	times.reverse()
	new_times=list()
	new_times=times
	ind=int(random.uniform(1.5, len(times)-1.5))
	up=times[ind-1]
	lo=times[ind+1]
	if ind-1==0: up=UP
	if ind+1==len(times): lo=LO
	
	if len(prior_shift)>1: 	
		j=ind-1
		new_times[ind]=random.uniform(max(new_times[ind]-.5,lo+.5, prior_shift[j*2-1]), min(new_times[ind]+.5,up-.5, prior_shift[j*2-2]))
	else: new_times[ind]=random.uniform(max(new_times[ind]-.5,lo+.5, .5), min(new_times[ind]+.5,up-.5, max(times)-.5))
			#		if times[i]>=prior_shift[j*2-1] and times[i]<=prior_shift[j*2-2]: pass #prior_t.append(log(1./(prior_shift[j*2-1]-prior_shift[j*2-2])))
		#		else: prior_t.append(-100)

	times=list(sort(new_times))
	return times 
Example 11
Project: controller   Author: deis   File: __init__.py    MIT License 6 votes vote down vote up
def fake_responses(request, context):
    responses = [
        # increasing the chance of 404
        {'text': 'Not Found', 'status_code': 404},
        {'text': 'Not Found', 'status_code': 404},
        {'text': 'Not Found', 'status_code': 404},
        {'text': 'Not Found', 'status_code': 404},
        {'text': 'OK', 'status_code': 200},
        {'text': 'Gateway timeout', 'status_code': 504},
        {'text': 'Bad gateway', 'status_code': 502},
    ]
    random.shuffle(responses)
    response = responses.pop()

    context.status_code = response['status_code']
    context.reason = response['text']
    # Random float x, 1.0 <= x < 4.0 for some sleep jitter
    time.sleep(random.uniform(1, 4))
    return response['text'] 
Example 12
Project: controller   Author: deis   File: mock.py    MIT License 6 votes vote down vote up
def add_cleanup_pod(url):
    """populate the cleanup pod list"""
    # variance allows a pod to stay alive past grace period
    variance = random.uniform(0.1, 1.5)
    grace = round(settings.KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS * variance)

    # save
    pods = cache.get('cleanup_pods', {})
    pods[url] = (datetime.utcnow() + timedelta(seconds=grace))
    cache.set('cleanup_pods', pods)

    # add grace period timestamp
    pod = cache.get(url)
    grace = settings.KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS
    pd = datetime.utcnow() + timedelta(seconds=grace)
    timestamp = str(pd.strftime(MockSchedulerClient.DATETIME_FORMAT))
    pod['metadata']['deletionTimestamp'] = timestamp
    cache.set(url, pod) 
Example 13
Project: pcfg-sampling   Author: wilkeraziz   File: generalisedSampling.py    Apache License 2.0 6 votes vote down vote up
def __init__(self, forest, inside_node, omega=lambda edge: edge.log_prob):
        """

        :param forest: an acyclic hypergraph
        :param inside_node: a dictionary mapping nodes to their inside weights.
        :param omega: a function that returns the weight of an edge.
            By default we return the edge's log probability, but omega
            can be used in situations where we must compute a function of that weight, for example,
            when we want to convert from a semiring to another,
            or when we want to compute a uniform probability based on assingments of the slice variables.
        """

        self.forest = forest
        self.inside_node = inside_node
        self.inside_edge = dict()  # cache for the inside weight of edges
        self.omega = omega 
Example 14
Project: torch-toolbox   Author: PistonY   File: transforms.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_params(fov_range, anglex_ranges, angley_ranges, anglez_ranges, shear_ranges,
                   translate, scale_ranges, img_size):
        """Get parameters for ``perspective`` for a random perspective transform.

        Returns:
            sequence: params to be passed to the perspective transformation
        """
        fov = 90 + random.uniform(-fov_range, fov_range)
        anglex = random.uniform(anglex_ranges[0], anglex_ranges[1])
        angley = random.uniform(angley_ranges[0], angley_ranges[1])
        anglez = random.uniform(anglez_ranges[0], anglez_ranges[1])
        shear = random.uniform(shear_ranges[0], shear_ranges[1])

        max_dx = translate[0] * img_size[1]
        max_dy = translate[1] * img_size[0]
        translations = (np.round(random.uniform(-max_dx, max_dx)),
                        np.round(random.uniform(-max_dy, max_dy)))

        scale = (random.uniform(1 / scale_ranges[0], scale_ranges[0]),
                 random.uniform(1 / scale_ranges[1], scale_ranges[1]))

        return fov, anglex, angley, anglez, shear, translations, scale 
Example 15
Project: torch-toolbox   Author: PistonY   File: transforms.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def get_params(img, scale, ratio):

        img_h, img_w, img_c = img.shape

        s = random.uniform(*scale)
        # if you img_h != img_w you may need this.
        # r_1 = max(r_1, (img_h*s)/img_w)
        # r_2 = min(r_2, img_h / (img_w*s))
        r = random.uniform(*ratio)
        s = s * img_h * img_w
        w = int(math.sqrt(s / r))
        h = int(math.sqrt(s * r))
        left = random.randint(0, img_w - w)
        top = random.randint(0, img_h - h)

        return left, top, h, w, img_c 
Example 16
Project: django-rest-mock   Author: thomasjiangcy   File: factory.py    MIT License 6 votes vote down vote up
def handle_other_factory_method(attr, minimum, maximum):
        """
        This is a temporary static method, when there are more factory
        methods, we can move this to another class or find a way to maintain
        it in a scalable manner
        """
        if attr == 'percentage':
            if minimum:
                minimum = ast.literal_eval(minimum)
            else:
                minimum = 0
            if maximum:
                maximum = ast.literal_eval(maximum)
            else:
                maximum = 100
            val = random.uniform(minimum, maximum)
            return val
        
        # If `attr` isn't specified above, we need to raise an error
        raise ValueError('`%s` isn\'t a valid factory method.' % attr) 
Example 17
Project: slidoc   Author: mitotic   File: sdproxy.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def uniform(self, seedValue=None):
        # define the recurrence relationship
        label = seedValue or ''
        if label not in self.sequences:
            raise Exception('Random number generator not initialized properly:'+str(label))
        self.sequences[label] = (self.a * self.sequences[label] + self.c) % self.m
        # return a float in [0, 1) 
        # if sequences[label] = m then sequences[label] / m = 0 therefore (sequences[label] % m) / m < 1 always
        return self.sequences[label] / float(self.m) 
Example 18
Project: slidoc   Author: mitotic   File: sdproxy.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def randomNumber(self, *args):
        # randomNumber(seedValue, min, max)
	    # Equally probable integer values between min and max (inclusive)
        # If min is omitted, equally probable integer values between 1 and max
        # If both omitted, value uniformly distributed between 0.0 and 1.0 (<1.0)
        if len(args) <= 1:
            return self.uniform(*args);
        if len(args) == 2:
            maxVal = args[1]
            minVal = 1
        else:
            maxVal = args[2]
            minVal = args[1]
	    return min(maxVal, int(math.floor( minVal + (maxVal-minVal+1)*self.uniform(args[0]) ))) 
Example 19
Project: slidoc   Author: mitotic   File: sdproxy.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def randomTime():
    return datetime.datetime.fromtimestamp( sliauth.epoch_ms() * random.uniform(0.1, 1.05)/1000. ) 
Example 20
Project: Random-Erasing   Author: zhunzhong07   File: transforms.py    Apache License 2.0 5 votes vote down vote up
def __call__(self, img):

        if random.uniform(0, 1) > self.probability:
            return img

        for attempt in range(100):
            area = img.size()[1] * img.size()[2]
       
            target_area = random.uniform(self.sl, self.sh) * area
            aspect_ratio = random.uniform(self.r1, 1/self.r1)

            h = int(round(math.sqrt(target_area * aspect_ratio)))
            w = int(round(math.sqrt(target_area / aspect_ratio)))

            if w < img.size()[2] and h < img.size()[1]:
                x1 = random.randint(0, img.size()[1] - h)
                y1 = random.randint(0, img.size()[2] - w)
                if img.size()[0] == 3:
                    img[0, x1:x1+h, y1:y1+w] = self.mean[0]
                    img[1, x1:x1+h, y1:y1+w] = self.mean[1]
                    img[2, x1:x1+h, y1:y1+w] = self.mean[2]
                else:
                    img[0, x1:x1+h, y1:y1+w] = self.mean[0]
                return img

        return img 
Example 21
Project: Random-Erasing   Author: zhunzhong07   File: test_progress.py    Apache License 2.0 5 votes vote down vote up
def sleep():
    t = 0.01
    t += t * random.uniform(-0.1, 0.1)  # Add some variance
    time.sleep(t) 
Example 22
Project: deep-learning-note   Author: wdxtub   File: 49_word2vec.py    MIT License 5 votes vote down vote up
def discard(idx):
    return random.uniform(0, 1) < 1 - math.sqrt(
        1e-4 / counter[idx_to_token[idx]] * num_tokens
    ) 
Example 23
Project: pnp   Author: HazardDede   File: mocking.py    MIT License 5 votes vote down vote up
def read_retry(sensor, pin):  # pylint: disable=unused-argument
        """Read the sensor values (humidity, temperature)."""
        return (
            round(random.uniform(1, 100), 2),
            round(random.uniform(8, 36), 2)
        )  # pragma: no cover 
Example 24
Project: redrum   Author: Evidlo   File: redrum.py    MIT License 5 votes vote down vote up
def weighted_select(config, images, seen):
    # if unseen_only is true, only look at at unseen images
    if config.unseen_only:
        images = [image for image in images if image['id'] not in seen]

    if len(images) == 0:
        print("No images available.  Set `unseen_only` to False, increase `max_pages` or add more subreddits")
        sys.exit()

    total_redrum_score = sum([image['redrum_score'] for image in images])
    rand_score = random.uniform(0, total_redrum_score)
    for image in images:
        rand_score -= image['redrum_score']
        if rand_score <= 0:
            break

    print("Selected {0} ({1}) with score {2} out of {3} images".format(image['link'],
                                                                       image['subreddit'],
                                                                       image['redrum_score'],
                                                                       len(images)))
    print("The probability of selecting this image was {0}".format(image['redrum_score']/total_redrum_score))

    return image


# set wallpaper 
Example 25
Project: DeepLab_v3_plus   Author: songdejia   File: transform.py    MIT License 5 votes vote down vote up
def __call__(self, sample):
        img = sample['image']
        mask = sample['label']
        assert img.size == mask.size
        for attempt in range(10):
            area = img.size[0] * img.size[1]
            target_area = random.uniform(0.45, 1.0) * area
            aspect_ratio = random.uniform(0.5, 2)

            w = int(round(math.sqrt(target_area * aspect_ratio)))
            h = int(round(math.sqrt(target_area / aspect_ratio)))

            if random.random() < 0.5:
                w, h = h, w

            if w <= img.size[0] and h <= img.size[1]:
                x1 = random.randint(0, img.size[0] - w)
                y1 = random.randint(0, img.size[1] - h)

                img = img.crop((x1, y1, x1 + w, y1 + h))
                mask = mask.crop((x1, y1, x1 + w, y1 + h))
                assert (img.size == (w, h))

                img = img.resize((self.size, self.size), Image.BILINEAR)
                mask = mask.resize((self.size, self.size), Image.NEAREST)

                return {'image': img,
                        'label': mask}

        # Fallback
        scale = Scale(self.size)
        crop = CenterCrop(self.size)
        sample = crop(scale(sample))
        return sample 
Example 26
Project: DeepLab_v3_plus   Author: songdejia   File: transform.py    MIT License 5 votes vote down vote up
def __call__(self, sample):
        img = sample['image']
        mask = sample['label']
        assert img.size == mask.size

        #w = int(random.uniform(0.8, 2.5) * img.size[0])
        #h = int(random.uniform(0.8, 2.5) * img.size[1])
        scale = random.uniform(0.8, 2.5)
        w = int(scale * img.size[0])
        h = int(scale * img.size[1])

        img, mask = img.resize((w, h), Image.BILINEAR), mask.resize((w, h), Image.NEAREST)
        sample = {'image': img, 'label': mask}

        return self.crop(self.scale(sample)) 
Example 27
Project: DeepLab_v3_plus   Author: songdejia   File: transform.py    MIT License 5 votes vote down vote up
def __call__(self, sample):
        img = sample['image']
        mask = sample['label']
        assert img.size == mask.size

        scale = random.uniform(self.limit[0], self.limit[1])
        w = int(scale * img.size[0])
        h = int(scale * img.size[1])

        img, mask = img.resize((w, h), Image.BILINEAR), mask.resize((w, h), Image.NEAREST)

        return {'image': img, 'label': mask} 
Example 28
Project: synthetic-data-tutorial   Author: theodi   File: AbstractAttribute.py    MIT License 5 votes vote down vote up
def uniform_sampling_within_a_bin(self, bin_idx: int):
        num_bins = len(self.distribution_bins)
        if bin_idx == num_bins:
            return np.nan
        elif self.is_categorical:
            return self.distribution_bins[bin_idx]
        elif bin_idx < num_bins - 1:
            return uniform(self.distribution_bins[bin_idx], self.distribution_bins[bin_idx + 1])
        else:
            # sample from the last interval where the right edge is missing in self.distribution_bins
            neg_2, neg_1 = self.distribution_bins[-2:]
            return uniform(neg_1, 2 * neg_1 - neg_2) 
Example 29
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: detection.py    Apache License 2.0 5 votes vote down vote up
def _random_pad_proposal(self, label, height, width):
        """Generate random padding region"""
        from math import sqrt
        if not self.enabled or height <= 0 or width <= 0:
            return ()
        min_area = self.area_range[0] * height * width
        max_area = self.area_range[1] * height * width
        for _ in range(self.max_attempts):
            ratio = random.uniform(*self.aspect_ratio_range)
            if ratio <= 0:
                continue
            h = int(round(sqrt(min_area / ratio)))
            max_h = int(round(sqrt(max_area / ratio)))
            if round(h * ratio) < width:
                h = int((width + 0.499999) / ratio)
            if h < height:
                h = height
            if h > max_h:
                h = max_h
            if h < max_h:
                h = random.randint(h, max_h)
            w = int(round(h * ratio))
            if (h - height) < 2 or (w - width) < 2:
                continue  # marginal padding is not helpful

            y = random.randint(0, max(0, h - height))
            x = random.randint(0, max(0, w - width))
            new_label = self._update_labels(label, (x, y, w, h), height, width)
            return (x, y, w, h, new_label)
        return () 
Example 30
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: image.py    Apache License 2.0 5 votes vote down vote up
def __call__(self, src):
        """Augmenter body"""
        alpha = 1.0 + random.uniform(-self.brightness, self.brightness)
        src *= alpha
        return src 
Example 31
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: image.py    Apache License 2.0 5 votes vote down vote up
def __call__(self, src):
        """Augmenter body"""
        alpha = 1.0 + random.uniform(-self.saturation, self.saturation)
        gray = src * self.coef
        gray = nd.sum(gray, axis=2, keepdims=True)
        gray *= (1.0 - alpha)
        src *= alpha
        src += gray
        return src 
Example 32
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: image.py    Apache License 2.0 5 votes vote down vote up
def __call__(self, src):
        """Augmenter body.
        Using approximate linear transfomation described in:
        https://beesbuzz.biz/code/hsv_color_transforms.php
        """
        alpha = random.uniform(-self.hue, self.hue)
        u = np.cos(alpha * np.pi)
        w = np.sin(alpha * np.pi)
        bt = np.array([[1.0, 0.0, 0.0],
                       [0.0, u, -w],
                       [0.0, w, u]])
        t = np.dot(np.dot(self.ityiq, bt), self.tyiq).T
        src = nd.dot(src, nd.array(t))
        return src 
Example 33
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_sparse_operator.py    Apache License 2.0 5 votes vote down vote up
def gen_rsp_random_indices(shape, density=.5, force_indices=None):
    assert density >= 0 and density <= 1
    indices = set()
    if force_indices is not None:
        for val in force_indices:
            indices.add(int(val))
    if not np.isclose(density, .0, rtol=1.e-3, atol=1.e-3, equal_nan=True) and len(shape) > 0:
        row_count = shape[0]
        for i in range(row_count):
            r = random.uniform(0, 1)
            if r <= density and len(indices) < shape[0]:
                indices.add(i)
    assert len(indices) <= shape[0]
    return list(indices) 
Example 34
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_sparse_operator.py    Apache License 2.0 5 votes vote down vote up
def test_sparse_elementwise_sum():
    def check_sparse_elementwise_sum_with_shape(stypes, shape, n):
        # forward
        inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
        out = mx.symbol.sparse.add_n(*inputs, name='esum')
        arr = []
        arr_grad = [mx.nd.empty(shape, stype=stype) for stype in stypes]
        densities = [0, 0.01, 0.5, 1.0]
        for stype in stypes:
            arr.append(rand_ndarray(shape, stype, densities[np.random.randint(0, len(densities))]))

        exec1 = out.bind(default_context(),
                         args=arr,
                         args_grad=arr_grad)
        exec1.forward(is_train=True)
        out1 = exec1.outputs[0].asnumpy()
        out = sum(a.asnumpy() for a in arr)
        assert_almost_equal(out, out1, atol=1e-5)

        out_grad = mx.nd.empty(shape)
        out_grad[:] = np.random.uniform(-10, 10, shape)
        # backward
        exec1.backward([out_grad])
        for a in arr_grad:
            assert_almost_equal(a.asnumpy(), out_grad.asnumpy(), atol=1e-5)

    all_stypes = ['default', 'csr', 'row_sparse']
    for dim in range(2, 4):
        shape = tuple(np.random.randint(5, 10, size=dim))
        rsp_test_cnt = np.random.randint(1, 9)
        check_sparse_elementwise_sum_with_shape(['row_sparse' for i in range(rsp_test_cnt)], shape, rsp_test_cnt)
        if dim is 2:
            check_sparse_elementwise_sum_with_shape(['default', 'csr', 'default'], shape, 3)
            test_len = np.random.randint(5, 10)
            # at least one default type
            stypes = ['default']
            for i in range(test_len):
                pick_side = np.random.randint(2)
                pick_type = np.random.randint(3)
                stypes = ([all_stypes[pick_type]] if pick_side is 0 else []) + stypes + ([all_stypes[pick_type]] if pick_side is 1 else [])
            check_sparse_elementwise_sum_with_shape(stypes, shape, test_len+1) 
Example 35
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_sparse_operator.py    Apache License 2.0 5 votes vote down vote up
def test_mkldnn_sparse():
    # This test is trying to create a race condition describedd in
    # https://github.com/apache/incubator-mxnet/issues/10189
    arr = mx.nd.random.uniform(shape=(10, 10, 32, 32))
    weight1 = mx.nd.random.uniform(shape=(10, 10, 3, 3))
    arr = mx.nd.Convolution(data=arr, weight=weight1, no_bias=True, kernel=(3, 3), num_filter=10)

    rs_arr = mx.nd.sparse.row_sparse_array((mx.nd.zeros_like(arr), np.arange(arr.shape[0])))
    weight2 = mx.nd.random.uniform(shape=(10, np.prod(arr.shape[1:4])))
    fc_res = mx.nd.FullyConnected(data=arr, weight=weight2, no_bias=True, num_hidden=10)
    sum_res = mx.nd.elemwise_sub(arr, rs_arr)
    res1 = np.dot(mx.nd.flatten(sum_res).asnumpy(), weight2.asnumpy().T)
    print(res1 - fc_res.asnumpy())
    almost_equal(res1, fc_res.asnumpy()) 
Example 36
Project: FCOS_GluonCV   Author: DetectionTeamUCAS   File: image.py    Apache License 2.0 5 votes vote down vote up
def imresize(src, w, h, interp=1):
    """Resize image with OpenCV.

    This is a duplicate of mxnet.image.imresize for name space consistency.

    Parameters
    ----------
    src : mxnet.nd.NDArray
        source image
    w : int, required
        Width of resized image.
    h : int, required
        Height of resized image.
    interp : int, optional, default='1'
        Interpolation method (default=cv2.INTER_LINEAR).

    out : NDArray, optional
        The output NDArray to hold the result.

    Returns
    -------
    out : NDArray or list of NDArrays
        The output of this function.

    Examples
    --------
    >>> import mxnet as mx
    >>> from gluoncv import data as gdata
    >>> img = mx.random.uniform(0, 255, (300, 300, 3)).astype('uint8')
    >>> print(img.shape)
    (300, 300, 3)
    >>> img = gdata.transforms.image.imresize(img, 200, 200)
    >>> print(img.shape)
    (200, 200, 3)
    """
    from mxnet.image.image import _get_interp_method as get_interp
    oh, ow, _ = src.shape
    return mx.image.imresize(src, w, h, interp=get_interp(interp, (oh, ow, h, w))) 
Example 37
Project: FCOS_GluonCV   Author: DetectionTeamUCAS   File: block.py    Apache License 2.0 5 votes vote down vote up
def forward(self, x):
        if not isinstance(self.probability, float):
            raise TypeError('Got inappropriate size arg')
        if not isinstance(self.s_min, float):
            raise TypeError('Got inappropriate size arg')
        if not isinstance(self.s_max, float):
            raise TypeError('Got inappropriate size arg')
        if not isinstance(self.ratio, float):
            raise TypeError('Got inappropriate size arg')
        if not isinstance(self.mean, (int, tuple)):
            raise TypeError('Got inappropriate size arg')

        if random.uniform(0, 1) > self.probability:
            return x

        width, height, _ = x.shape
        area = width * height
        target_area = random.uniform(self.s_min, self.s_max) * area
        aspect_ratio = random.uniform(self.ratio, 1/self.ratio)
        w = int(round(math.sqrt(target_area * aspect_ratio)))
        h = int(round(math.sqrt(target_area / aspect_ratio)))
        if w < width and h < height:
            x1 = random.randint(0, width - w)
            y1 = random.randint(0, height - h)
            x[x1:x1+w, y1:y1+h, 0] = self.mean[0]
            x[x1:x1+w, y1:y1+h, 1] = self.mean[1]
            x[x1:x1+w, y1:y1+h, 2] = self.mean[2]
        return x 
Example 38
Project: soccer-matlab   Author: utra-robosoccer   File: minitaur_ball_gym_env.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _reset(self):
    self._ball_id = 0
    super(MinitaurBallGymEnv, self)._reset()
    self._init_ball_theta = random.uniform(-INIT_BALL_ANGLE, INIT_BALL_ANGLE)
    self._init_ball_distance = INIT_BALL_DISTANCE
    self._ball_pos = [self._init_ball_distance *
                      math.cos(self._init_ball_theta),
                      self._init_ball_distance *
                      math.sin(self._init_ball_theta), 1]
    self._ball_id = self._pybullet_client.loadURDF(
        "%s/sphere_with_restitution.urdf" % self._urdf_root, self._ball_pos)
    return self._get_observation() 
Example 39
Project: soccer-matlab   Author: utra-robosoccer   File: minitaur_env_randomizer_from_config.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _randomize_control_step(self, env, lower_bound, upper_bound):
    randomized_control_step = random.uniform(lower_bound, upper_bound)
    env.set_time_step(randomized_control_step)
    tf.logging.info("control step is: {}".format(randomized_control_step)) 
Example 40
Project: soccer-matlab   Author: utra-robosoccer   File: minitaur_env_randomizer_from_config.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _randomize_masses(self, minitaur, lower_bound, upper_bound):
    base_mass = minitaur.GetBaseMassesFromURDF()
    random_base_ratio = random.uniform(lower_bound, upper_bound)
    randomized_base_mass = random_base_ratio * np.array(base_mass)
    minitaur.SetBaseMasses(randomized_base_mass)
    tf.logging.info("base mass is: {}".format(randomized_base_mass))

    leg_masses = minitaur.GetLegMassesFromURDF()
    random_leg_ratio = random.uniform(lower_bound, upper_bound)
    randomized_leg_masses = random_leg_ratio * np.array(leg_masses)
    minitaur.SetLegMasses(randomized_leg_masses)
    tf.logging.info("leg mass is: {}".format(randomized_leg_masses)) 
Example 41
Project: soccer-matlab   Author: utra-robosoccer   File: minitaur_env_randomizer_from_config.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _randomize_inertia(self, minitaur, lower_bound, upper_bound):
    base_inertia = minitaur.GetBaseInertiasFromURDF()
    random_base_ratio = random.uniform(lower_bound, upper_bound)
    randomized_base_inertia = random_base_ratio * np.array(base_inertia)
    minitaur.SetBaseInertias(randomized_base_inertia)
    tf.logging.info("base inertia is: {}".format(randomized_base_inertia))
    leg_inertia = minitaur.GetLegInertiasFromURDF()
    random_leg_ratio = random.uniform(lower_bound, upper_bound)
    randomized_leg_inertia = random_leg_ratio * np.array(leg_inertia)
    minitaur.SetLegInertias(randomized_leg_inertia)
    tf.logging.info("leg inertia is: {}".format(randomized_leg_inertia)) 
Example 42
Project: soccer-matlab   Author: utra-robosoccer   File: minitaur_env_randomizer_from_config.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _randomize_latency(self, minitaur, lower_bound, upper_bound):
    randomized_latency = random.uniform(lower_bound, upper_bound)
    minitaur.SetControlLatency(randomized_latency)
    tf.logging.info("control latency is: {}".format(randomized_latency)) 
Example 43
Project: soccer-matlab   Author: utra-robosoccer   File: minitaur_env_randomizer_from_config.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _randomize_motor_friction(self, minitaur, lower_bound, upper_bound):
    randomized_motor_damping = random.uniform(lower_bound, upper_bound)
    minitaur.SetMotorViscousDamping(randomized_motor_damping)
    tf.logging.info("motor friction is: {}".format(randomized_motor_damping)) 
Example 44
Project: soccer-matlab   Author: utra-robosoccer   File: minitaur_env_randomizer_from_config.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _randomize_contact_restitution(self, minitaur, lower_bound, upper_bound):
    randomized_restitution = random.uniform(lower_bound, upper_bound)
    minitaur.SetFootRestitution(randomized_restitution)
    tf.logging.info("foot restitution is: {}".format(randomized_restitution)) 
Example 45
Project: soccer-matlab   Author: utra-robosoccer   File: minitaur_env_randomizer_from_config.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _randomize_contact_friction(self, minitaur, lower_bound, upper_bound):
    randomized_foot_friction = random.uniform(lower_bound, upper_bound)
    minitaur.SetFootFriction(randomized_foot_friction)
    tf.logging.info("foot friction is: {}".format(randomized_foot_friction)) 
Example 46
Project: soccer-matlab   Author: utra-robosoccer   File: minitaur_env_randomizer_from_config.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _randomize_battery_level(self, minitaur, lower_bound, upper_bound):
    randomized_battery_voltage = random.uniform(lower_bound, upper_bound)
    minitaur.SetBatteryVoltage(randomized_battery_voltage)
    tf.logging.info("battery voltage is: {}".format(randomized_battery_voltage)) 
Example 47
Project: soccer-matlab   Author: utra-robosoccer   File: minitaur_env_randomizer_from_config.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _randomize_motor_strength(self, minitaur, lower_bound, upper_bound):
    randomized_motor_strength_ratios = np.random.uniform(
        [lower_bound] * minitaur.num_motors,
        [upper_bound] * minitaur.num_motors)
    minitaur.SetMotorStrengthRatios(randomized_motor_strength_ratios)
    tf.logging.info(
        "motor strength is: {}".format(randomized_motor_strength_ratios)) 
Example 48
Project: soccer-matlab   Author: utra-robosoccer   File: minitaur_env_randomizer.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def _randomize_minitaur(self, minitaur):
    """Randomize various physical properties of minitaur.

    It randomizes the mass/inertia of the base, mass/inertia of the legs,
    friction coefficient of the feet, the battery voltage and the motor damping
    at each reset() of the environment.

    Args:
      minitaur: the Minitaur instance in minitaur_gym_env environment.
    """
    base_mass = minitaur.GetBaseMassFromURDF()
    randomized_base_mass = random.uniform(
        base_mass * (1.0 + self._minitaur_base_mass_err_range[0]),
        base_mass * (1.0 + self._minitaur_base_mass_err_range[1]))
    minitaur.SetBaseMass(randomized_base_mass)

    leg_masses = minitaur.GetLegMassesFromURDF()
    leg_masses_lower_bound = np.array(leg_masses) * (
        1.0 + self._minitaur_leg_mass_err_range[0])
    leg_masses_upper_bound = np.array(leg_masses) * (
        1.0 + self._minitaur_leg_mass_err_range[1])
    randomized_leg_masses = [
        np.random.uniform(leg_masses_lower_bound[i], leg_masses_upper_bound[i])
        for i in range(len(leg_masses))
    ]
    minitaur.SetLegMasses(randomized_leg_masses)

    randomized_battery_voltage = random.uniform(BATTERY_VOLTAGE_RANGE[0],
                                                BATTERY_VOLTAGE_RANGE[1])
    minitaur.SetBatteryVoltage(randomized_battery_voltage)

    randomized_motor_damping = random.uniform(MOTOR_VISCOUS_DAMPING_RANGE[0],
                                              MOTOR_VISCOUS_DAMPING_RANGE[1])
    minitaur.SetMotorViscousDamping(randomized_motor_damping)

    randomized_foot_friction = random.uniform(MINITAUR_LEG_FRICTION[0],
                                              MINITAUR_LEG_FRICTION[1])
    minitaur.SetFootFriction(randomized_foot_friction) 
Example 49
Project: cloud-profiler-python   Author: GoogleCloudPlatform   File: backoff.py    Apache License 2.0 5 votes vote down vote up
def next_backoff(self, error=None):
    """Calculates the backoff duration for a failed request.

    Args:
      error: The exception that caused the failure.

    Returns:
      A float representing the desired backoff duration in seconds.
    """
    try:
      if isinstance(error, googleapiclient.errors.HttpError):
        content = json.loads(error.content.decode('utf-8'))
        for detail in content.get('error', {}).get('details', []):
          if 'retryDelay' in detail:
            delay = duration_pb2.Duration()
            json_format.Parse(json.dumps(detail['retryDelay']), delay)
            return delay.seconds + float(delay.nanos) / _NANOS_PER_SEC
    # It's safe to catch BaseException because this runs in a daemon thread.
    except BaseException as e:
      logger.error(
          'Failed to extract server-specified backoff duration '
          '(will use exponential backoff): %s', str(e))

    duration = random.uniform(0, self._current_envelope_sec)
    self._current_envelope_sec = min(
        self._max_envelope_sec, self._current_envelope_sec * self._multiplier)
    return duration 
Example 50
Project: pyalcs   Author: ParrotPrediction   File: genetic_algorithms.py    MIT License 5 votes vote down vote up
def _weighted_random_choice(choices: Dict):
    max = sum(choices.values())
    pick = random.uniform(0, max)
    current = 0

    for key, value in choices.items():
        current += value
        if current > pick:
            return key 
Example 51
Project: Graphvy   Author: anbarief   File: graphvy.py    MIT License 5 votes vote down vote up
def import_data(self, obj):
        dataset = list(numpy.load("data.npy"))
        nodes = []
        for i in dataset:
            x = random.uniform(0, 2*800)
            y = random.uniform(0, 2*600)
            node = Node(None, i['name'], [x, y], \
                            color = (202/255,204/255,206/255,0.9))
            self.graph.nodes.append(node)
            self.add_widget(node)
            nodes.append((node, i['neighbor']))
        for node in nodes:
            neighbors = [i for i in nodes if i[0].visual_text in node[1]]
            for i in neighbors:
                node[0].connect_node(i[0]) 
Example 52
Project: BayesRate   Author: schnitzler-j   File: prior.py    MIT License 5 votes vote down vote up
def wiener_process(m, s, lowest, greatest):  # NORMAL VARIATION
	k= random.normalvariate(m, s)
	if k<lowest: k= random.uniform(lowest, m)
	if k>greatest: k= random.uniform(m, greatest)
	return k 
Example 53
Project: BayesRate   Author: schnitzler-j   File: prior.py    MIT License 5 votes vote down vote up
def update_BOTHVAR(lrates, mrates, kA, zA, mod_rates):
	lratesN, mratesN= list(), list()
	lratesN.append(update_parameter(lrates[0], .25, .001, 5, mod_rates))
	mratesN.append(update_parameter(mrates[0], .25, 0, lratesN[0], mod_rates))
	kN=update_parameter(kA, .1, .001, 5, mod_rates)
	zN=update_parameter(zA, random.uniform(.1, 10), .001, 50, mod_rates)
	return kN, zN, lratesN, mratesN 
Example 54
Project: BayesRate   Author: schnitzler-j   File: prior.py    MIT License 5 votes vote down vote up
def initial_params(root, categ, fixed_times, prior_shift):
	times=list()
	rates=list()
	mrates=list()
	times.append(root)
	for i in range(categ):
		if i==0: times.append(0)
		elif len(prior_shift)>1: times.append(random.uniform(prior_shift[i*2-1], prior_shift[i*2-2]))
		else: times.append(random.uniform(1,root*.33))
		#min(root*.33,max(1., random.lognormvariate(2,2)))) # random.uniform(lo, up))

	for i in range(categ):
		#if i==0: times.append(0)
		#elif len(prior_shift)>1: times.append(random.uniform(prior_shift[i-1], prior_shift[i]))
		#else: times.append(min(root*.33,max(1., random.lognormvariate(2,2)))) # random.uniform(lo, up))
		lrate=random.random() #max(0.0001, fabs(random.gammavariate(.5, 1)))
		mrate=max(0, random.uniform(0, lrate-.1))
		rates.append(lrate)
		mrates.append(mrate)
	if sum(fixed_times) != 0: 
		lim=root,  0. 
		times = fixed_times + list(lim)
	times=list(sort(times))
	times.reverse()
	std_t=std_r=.1
	kA, zA = random.uniform(.0001, .2), random.uniform(1, 10)
	return times, rates, mrates, std_r, std_t, kA, zA 
Example 55
Project: pcfg-sampling   Author: wilkeraziz   File: generalisedSampling.py    Apache License 2.0 5 votes vote down vote up
def select(self, parent):
        """
        select method, draws a random edge with respect to the Inside weight distribution
        """
        # self.iq = dict()
        incoming = self.forest.get(parent, frozenset())

        if not incoming:
            raise ValueError('I cannot sample an incoming edge to a terminal node')

        # the inside weight of the parent node
        ip = self.inside_node[parent]

        # select an edge randomly with respect to the distribution of the edges
        # threshold for selecting an edge
        threshold = np.log(random.uniform(0, np.exp(ip)))

        acc = -float("inf")
        for e in incoming:
            # acc = math.log(math.exp(acc) + math.exp(self.get_edge_inside(e)))
            acc = np.logaddexp(acc, self.get_edge_inside(e))
            if acc > threshold:
                return e

        # if there is not yet an edge returned for some rare rounding error,
        # return the last edge, hence that is the edge closest to the threshold
        return e 
Example 56
Project: tsn-pytorch   Author: yjxiong   File: transforms.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def __call__(self, img_group):
        for attempt in range(10):
            area = img_group[0].size[0] * img_group[0].size[1]
            target_area = random.uniform(0.08, 1.0) * area
            aspect_ratio = random.uniform(3. / 4, 4. / 3)

            w = int(round(math.sqrt(target_area * aspect_ratio)))
            h = int(round(math.sqrt(target_area / aspect_ratio)))

            if random.random() < 0.5:
                w, h = h, w

            if w <= img_group[0].size[0] and h <= img_group[0].size[1]:
                x1 = random.randint(0, img_group[0].size[0] - w)
                y1 = random.randint(0, img_group[0].size[1] - h)
                found = True
                break
        else:
            found = False
            x1 = 0
            y1 = 0

        if found:
            out_group = list()
            for img in img_group:
                img = img.crop((x1, y1, x1 + w, y1 + h))
                assert(img.size == (w, h))
                out_group.append(img.resize((self.size, self.size), self.interpolation))
            return out_group
        else:
            # Fallback
            scale = GroupScale(self.size, interpolation=self.interpolation)
            crop = GroupRandomCrop(self.size)
            return crop(scale(img_group)) 
Example 57
Project: torch-toolbox   Author: PistonY   File: transforms.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_params(img, scale, ratio):
        """Get parameters for ``crop`` for a random sized crop.

        Args:
            img (CV Image): Image to be cropped.
            scale (tuple): range of size of the origin size cropped
            ratio (tuple): range of aspect ratio of the origin aspect ratio cropped

        Returns:
            tuple: params (i, j, h, w) to be passed to ``crop`` for a random
                sized crop.
        """
        area = img.shape[0] * img.shape[1]

        for attempt in range(10):
            target_area = random.uniform(*scale) * area
            aspect_ratio = random.uniform(*ratio)

            w = int(round(math.sqrt(target_area * aspect_ratio)))
            h = int(round(math.sqrt(target_area / aspect_ratio)))

            if w <= img.shape[1] and h <= img.shape[0]:
                i = random.randint(0, img.shape[0] - h)
                j = random.randint(0, img.shape[1] - w)
                return i, j, h, w

        # Fallback to central crop
        in_ratio = img.shape[1] / img.shape[0]
        if (in_ratio < min(ratio)):
            w = img.shape[1]
            h = int(round(w / min(ratio)))
        elif (in_ratio > max(ratio)):
            h = img.shape[0]
            w = int(round(h * max(ratio)))
        else:  # whole image
            w = img.shape[1]
            h = img.shape[0]
        i = (img.shape[0] - h) // 2
        j = (img.shape[1] - w) // 2
        return i, j, h, w 
Example 58
Project: torch-toolbox   Author: PistonY   File: transforms.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_params(brightness, contrast, saturation, hue):
        """Get a randomized transform to be applied on image.

        Arguments are same as that of __init__.

        Returns:
            Transform which randomly adjusts brightness, contrast and
            saturation in a random order.
        """
        transforms = []

        if brightness is not None:
            brightness_factor = random.uniform(brightness[0], brightness[1])
            transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))

        if contrast is not None:
            contrast_factor = random.uniform(contrast[0], contrast[1])
            transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))

        if saturation is not None:
            saturation_factor = random.uniform(saturation[0], saturation[1])
            transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))

        if hue is not None:
            hue_factor = random.uniform(hue[0], hue[1])
            transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))

        random.shuffle(transforms)
        transform = Compose(transforms)

        return transform 
Example 59
Project: torch-toolbox   Author: PistonY   File: transforms.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_params(degrees):
        """Get parameters for ``rotate`` for a random rotation.

        Returns:
            sequence: params to be passed to ``rotate`` for random rotation.
        """
        angle = random.uniform(degrees[0], degrees[1])

        return angle 
Example 60
Project: torch-toolbox   Author: PistonY   File: transforms.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_params(degrees, translate, scale_ranges, shears, img_size):
        """Get parameters for affine transformation

        Returns:
            sequence: params to be passed to the affine transformation
        """
        angle = random.uniform(degrees[0], degrees[1])
        if translate is not None:
            max_dx = translate[0] * img_size[1]
            max_dy = translate[1] * img_size[0]
            translations = (np.round(random.uniform(-max_dx, max_dx)),
                            np.round(random.uniform(-max_dy, max_dy)))
        else:
            translations = (0, 0)

        if scale_ranges is not None:
            scale = random.uniform(scale_ranges[0], scale_ranges[1])
        else:
            scale = 1.0

        if shears is not None:
            shear = random.uniform(shears[0], shears[1])
        else:
            shear = 0.0

        return angle, translations, scale, shear 
Example 61
Project: torch-toolbox   Author: PistonY   File: transforms.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_params(mean, std):
        """Get parameters for gaussian noise
        Returns:
            sequence: params to be passed to the affine transformation
        """
        mean = random.uniform(-mean, mean)
        std = random.uniform(-std, std)

        return mean, std 
Example 62
Project: 1-tk1-zener-learner   Author: mlennox   File: expand.py    MIT License 5 votes vote down vote up
def rotate_image(img, rotation):
	# we want to have random rotations but my feeling is 
	# we should have more smaller rotations than larger
	# this skews the random numbers toward zero
	rotation_factor = math.pow(random.uniform(0.0, 1.0), 4)
	# we want to rotate either way
	rotation_direction = (1, -1)[random.random() > 0.5]
	rotation_angle = int(math.floor(rotation * rotation_factor * rotation_direction))
	return img.rotate(rotation_angle)


# crop the image to a square that bounds the image using largest bounding-box dimension
# and then resize the image to the size desired for the neural net training 
Example 63
Project: Distributed-AutoDeepLab   Author: HankKung   File: custom_transforms.py    Apache License 2.0 5 votes vote down vote up
def __call__(self, sample):
        img = sample['image']
        mask = sample['label']
        rotate_degree = random.uniform(-1*self.degree, self.degree)
        img = img.rotate(rotate_degree, Image.BILINEAR)
        mask = mask.rotate(rotate_degree, Image.NEAREST)

        return {'image': img,
                'label': mask} 
Example 64
Project: pytorch-mri-segmentation-3D   Author: Achilleas   File: train_deeplab2D.py    MIT License 5 votes vote down vote up
def get_data_from_chunk_v2(chunk):

    main_folder_path = '../../Data/MS2017a/'
    scans_folder_path = main_folder_path + 'scans/'

    img_type_path = 'pre/FLAIR.nii.gz'
    gt_type_path = 'wmh.nii.gz'

    scale = random.uniform(0.5, 1.3)
    dim = int(scale*321)

    images = np.zeros((dim,dim, 1,len(chunk)))
    gt = np.zeros((dim,dim,1,len(chunk)))
    for i, piece in enumerate(chunk):
        print(os.path.join(main_folder_path, piece))
        img_temp = PP.numpyFromScan(os.path.join(main_folder_path, piece))
        flip_p = random.uniform(0, 1)

        img_temp = cv2.resize(img_temp,(321,321)).astype(float)
        img_temp = img_temp.reshape([321, 321, 1])

        img_temp = scale_im(img_temp,scale)
        img_temp = flip(img_temp,flip_p)
        images[:,:,0,i] = img_temp

        piece_gt = piece.replace('slices', 'gt_slices').replace('FLAIR', 'wmh')
        gt_temp = PP.numpyFromScan(os.path.join(main_folder_path, piece_gt), makebin = onlyLesions)
        gt_temp = cv2.resize(gt_temp,(321,321) , interpolation = cv2.INTER_NEAREST)
        gt_temp = gt_temp.reshape([321,321, 1])
        gt_temp = scale_gt(gt_temp,scale)
        gt_temp = flip(gt_temp,flip_p)

        gt[:,:,0,i] = gt_temp
        a = outS(321*scale)

    labels = [resize_label_batch(gt,i) for i in [a,a,a,a]]

    #from dim1 x dim2 x 1 x batch -> batch x 1 x dim1 x dim2
    images = images.transpose((3,2,0,1))
    images = torch.from_numpy(images).float()
    return images, labels 
Example 65
Project: pytorch-mri-segmentation-3D   Author: Achilleas   File: train_deeplab3D.py    MIT License 5 votes vote down vote up
def get_data_from_chunk_v2(chunk):

    main_folder_path = '../../Data/MS2017a/'
    scans_folder_path = main_folder_path + 'scans/'

    img_type_path = 'pre/FLAIR.nii.gz'
    gt_type_path = 'wmh.nii.gz'

    scale = random.uniform(0.5, 1.3)
    dim = int(scale*321)

    images = np.zeros((dim,dim, 1,len(chunk)))
    gt = np.zeros((dim,dim,1,len(chunk)))
    for i, piece in enumerate(chunk):
        print(os.path.join(main_folder_path, piece))
        img_temp = PP.numpyFromScan(os.path.join(main_folder_path, piece))
        flip_p = random.uniform(0, 1)

        img_temp = cv2.resize(img_temp,(321,321)).astype(float)
        img_temp = img_temp.reshape([321, 321, 1])

        img_temp = scale_im(img_temp,scale)
        img_temp = flip(img_temp,flip_p)
        images[:,:,0,i] = img_temp

        piece_gt = piece.replace('slices', 'gt_slices').replace('FLAIR', 'wmh')
        gt_temp = PP.numpyFromScan(os.path.join(main_folder_path, piece_gt), makebin = onlyLesions)
        gt_temp = cv2.resize(gt_temp,(321,321) , interpolation = cv2.INTER_NEAREST)
        gt_temp = gt_temp.reshape([321,321, 1])
        gt_temp = scale_gt(gt_temp,scale)
        gt_temp = flip(gt_temp,flip_p)

        gt[:,:,0,i] = gt_temp
        a = outS(321*scale)

    labels = [resize_label_batch(gt,i) for i in [a,a,a,a]]

    #from dim1 x dim2 x 1 x batch -> batch x 1 x dim1 x dim2
    images = images.transpose((3,2,0,1))
    images = torch.from_numpy(images).float()
    return images, labels 
Example 66
Project: pytorch-mri-segmentation-3D   Author: Achilleas   File: PP.py    MIT License 5 votes vote down vote up
def getCenterPixelPatch(patch_size, img_list, locs_lesion, locs_other, 
							onlyLesions, main_folder_path, postfix, with_priv = False):
	b = random.uniform(0.5, 3.5)
	#segm class = 1
	if b < 1.5:
		loc_str = locs_lesion[randint(0, len(locs_lesion) - 1)].rstrip()
	#segm class = 2
	elif b > 1.5 and b < 2.5 and (not onlyLesions):
		loc_str = locs_other[randint(0,len(locs_other) - 1)].rstrip()
	#segm class = 3
	else:
		loc_str = getBackgroundLoc(patch_size, img_list, onlyLesions, main_folder_path)

	#extract patch given folder number, location of top left edge and patch size
	#---------------------------------------------------------------------------
	folder_num_str, x, y, z = parseLocStr(loc_str)
	img_type_path = 'pre/FLAIR' + postfix + '.nii.gz'
	gt_type_path = 'wmh' + postfix + '.nii.gz'

	#read the file
	img_np = numpyFromScan(os.path.join(main_folder_path, 'scans', folder_num_str, img_type_path))
	gt_np = numpyFromScan(os.path.join(main_folder_path, 'scans', folder_num_str, gt_type_path), makebin = onlyLesions)

    #extract the patch
	patch_img_np = img_np[x:x+patch_size, y:y+patch_size, z:z+patch_size, :]
	patch_gt_np = gt_np[x:x+patch_size, y:y+patch_size, z:z+patch_size, :]
	
	#reshape to 1 x dim1 x dim2 x dim3
	patch_img_np = patch_img_np.transpose((3,0,1,2))
	patch_gt_np = patch_gt_np.transpose((3,0,1,2))

	if with_priv:
		gif_type_path = 'parcellation' + postfix + '.nii.gz'
		gif_np = numpyFromScan(os.path.join(main_folder_path, 'gifs', folder_num_str, gif_type_path))
		patch_gif_np = gif_np[x:x+patch_size, y:y+patch_size, z:z+patch_size, :]
		patch_gif_np = patch_gif_np.transpose((3,0,1,2))
		
		return patch_img_np, patch_gt_np, patch_gif_np
	return patch_img_np, patch_gt_np, None 
Example 67
Project: openhatch   Author: campbe13   File: __init__.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def download_delay(self):
        if self.randomize_delay:
            return random.uniform(0.5*self.delay, 1.5*self.delay)
        return self.delay 
Example 68
Project: PSMNet   Author: JiaRenChang   File: preprocess.py    MIT License 5 votes vote down vote up
def __call__(self, img):
        gs = Grayscale()(img)
        alpha = random.uniform(0, self.var)
        return img.lerp(gs, alpha) 
Example 69
Project: PSMNet   Author: JiaRenChang   File: preprocess.py    MIT License 5 votes vote down vote up
def __call__(self, img):
        gs = img.new().resize_as_(img).zero_()
        alpha = random.uniform(0, self.var)
        return img.lerp(gs, alpha) 
Example 70
Project: PSMNet   Author: JiaRenChang   File: preprocess.py    MIT License 5 votes vote down vote up
def __call__(self, img):
        gs = Grayscale()(img)
        gs.fill_(gs.mean())
        alpha = random.uniform(0, self.var)
        return img.lerp(gs, alpha) 
Example 71
Project: PSMNet   Author: JiaRenChang   File: preprocess.py    MIT License 5 votes vote down vote up
def __call__(self, img):
        gs = Grayscale()(img)
        alpha = random.uniform(0, self.var)
        return img.lerp(gs, alpha) 
Example 72
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: detection.py    Apache License 2.0 4 votes vote down vote up
def _random_crop_proposal(self, label, height, width):
        """Propose cropping areas"""
        from math import sqrt

        if not self.enabled or height <= 0 or width <= 0:
            return ()
        min_area = self.area_range[0] * height * width
        max_area = self.area_range[1] * height * width
        for _ in range(self.max_attempts):
            ratio = random.uniform(*self.aspect_ratio_range)
            if ratio <= 0:
                continue
            h = int(round(sqrt(min_area / ratio)))
            max_h = int(round(sqrt(max_area / ratio)))
            if round(max_h * ratio) > width:
                # find smallest max_h satifying round(max_h * ratio) <= width
                max_h = int((width + 0.4999999) / ratio)
            if max_h > height:
                max_h = height
            if h > max_h:
                h = max_h
            if h < max_h:
                # generate random h in range [h, max_h]
                h = random.randint(h, max_h)
            w = int(round(h * ratio))
            assert w <= width

            # trying to fix rounding problems
            area = w * h
            if area < min_area:
                h += 1
                w = int(round(h * ratio))
                area = w * h
            if area > max_area:
                h -= 1
                w = int(round(h * ratio))
                area = w * h
            if not (min_area <= area <= max_area and 0 <= w <= width and 0 <= h <= height):
                continue

            y = random.randint(0, max(0, height - h))
            x = random.randint(0, max(0, width - w))
            if self._check_satisfy_constraints(label, x, y, x + w, y + h, width, height):
                new_label = self._update_labels(label, (x, y, w, h), height, width)
                if new_label is not None:
                    return (x, y, w, h, new_label)
        return () 
Example 73
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: image.py    Apache License 2.0 4 votes vote down vote up
def random_size_crop(src, size, area, ratio, interp=2, **kwargs):
    """Randomly crop src with size. Randomize area and aspect ratio.

    Parameters
    ----------
    src : NDArray
        Input image
    size : tuple of (int, int)
        Size of the crop formatted as (width, height).
    area : float in (0, 1] or tuple of (float, float)
        If tuple, minimum area and maximum area to be maintained after cropping
        If float, minimum area to be maintained after cropping, maximum area is set to 1.0
    ratio : tuple of (float, float)
        Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
    interp: int, optional, default=2
        Interpolation method. See resize_short for details.
    Returns
    -------
    NDArray
        An `NDArray` containing the cropped image.
    Tuple
        A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
        original image and (width, height) are the dimensions of the cropped image.

    """
    h, w, _ = src.shape
    src_area = h * w

    if 'min_area' in kwargs:
        warnings.warn('`min_area` is deprecated. Please use `area` instead.',
                      DeprecationWarning)
        area = kwargs.pop('min_area')
    assert not kwargs, "unexpected keyword arguments for `random_size_crop`."

    if isinstance(area, numeric_types):
        area = (area, 1.0)
    for _ in range(10):
        target_area = random.uniform(area[0], area[1]) * src_area
        new_ratio = random.uniform(*ratio)

        new_w = int(round(np.sqrt(target_area * new_ratio)))
        new_h = int(round(np.sqrt(target_area / new_ratio)))

        if random.random() < 0.5:
            new_h, new_w = new_w, new_h

        if new_w <= w and new_h <= h:
            x0 = random.randint(0, w - new_w)
            y0 = random.randint(0, h - new_h)

            out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
            return out, (x0, y0, new_w, new_h)

    # fall back to center_crop
    return center_crop(src, size, interp) 
Example 74
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: test_sparse_operator.py    Apache License 2.0 4 votes vote down vote up
def test_contrib_sparse_embedding():
    ''' test sparse embedding operator '''
    def check_sparse_embedding(in_dim, out_dim, batch, densities, deterministic, weight_stype):
        # init executor
        data = mx.sym.Variable("data")
        weight = mx.sym.Variable("embed_weight", stype=weight_stype)
        embed = mx.sym.contrib.SparseEmbedding(data=data, weight=weight, input_dim=in_dim,
                                               output_dim=out_dim, deterministic=deterministic,
                                               name="embed")
        grad_req = {'data': 'null', 'embed_weight': 'write'}
        exe_test = embed.simple_bind(default_context(), grad_req=grad_req, data=(batch,))
        arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
        grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
        # init data
        np_data = np.random.randint(low=0, high=in_dim, size=batch)
        np_onehot = np.zeros((batch, in_dim)).astype(np.float32)
        np_onehot[np.arange(batch), np_data] = 1.0
        arg_map["data"][:] = np_data
        # init grad
        np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
        grad = mx.nd.zeros(np_grad.shape)
        grad[:] = np_grad
        # weight
        weight = arg_map["embed_weight"]
        for density in densities:
            # update weight based on density
            weight[:] = rand_ndarray(weight.shape, weight_stype, density=density)
            # check forward
            exe_test.forward(is_train=True)
            assert_almost_equal(exe_test.outputs[0].asnumpy(), np.dot(np_onehot, weight.asnumpy()), atol=1e-4)
            # check backward
            exe_test.backward([grad])
            assert_almost_equal(grad_map["embed_weight"].asnumpy(), np.dot(np_onehot.T, grad.asnumpy()), atol=1e-4)
            # run twice to check if the result is deterministic when passing "deterministic=True" to SparseEmbedding
            if deterministic:
                grad_ref = grad_map["embed_weight"].asnumpy()
                exe_test.backward([grad])
                assert_almost_equal(grad_map["embed_weight"].asnumpy(), grad_ref, atol=0, rtol=0)

    densities = [0, 0.5, 1]
    in_dim = 50
    out_dim = 3
    batch = 8
    stypes = ['default', 'row_sparse']
    deterministics = [True, False]
    for stype in stypes:
        for deterministic in deterministics:
            check_sparse_embedding(in_dim, out_dim, batch, densities, deterministic, stype)
            check_sparse_embedding(in_dim, out_dim, batch, densities, deterministic, stype) 
Example 75
Project: FCOS_GluonCV   Author: DetectionTeamUCAS   File: image.py    Apache License 2.0 4 votes vote down vote up
def random_expand(src, max_ratio=4, fill=0, keep_ratio=True):
    """Random expand original image with borders, this is identical to placing
    the original image on a larger canvas.

    Parameters
    ----------
    src : mxnet.nd.NDArray
        The original image with HWC format.
    max_ratio : int or float
        Maximum ratio of the output image on both direction(vertical and horizontal)
    fill : int or float or array-like
        The value(s) for padded borders. If `fill` is numerical type, RGB channels
        will be padded with single value. Otherwise `fill` must have same length
        as image channels, which resulted in padding with per-channel values.
    keep_ratio : bool
        If `True`, will keep output image the same aspect ratio as input.

    Returns
    -------
    mxnet.nd.NDArray
        Augmented image.
    tuple
        Tuple of (offset_x, offset_y, new_width, new_height)

    """
    if max_ratio <= 1:
        return src, (0, 0, src.shape[1], src.shape[0])

    h, w, c = src.shape
    ratio_x = random.uniform(1, max_ratio)
    if keep_ratio:
        ratio_y = ratio_x
    else:
        ratio_y = random.uniform(1, max_ratio)

    oh, ow = int(h * ratio_y), int(w * ratio_x)
    off_y = random.randint(0, oh - h)
    off_x = random.randint(0, ow - w)

    # make canvas
    if isinstance(fill, numeric_types):
        dst = nd.full(shape=(oh, ow, c), val=fill, dtype=src.dtype)
    else:
        fill = nd.array(fill, dtype=src.dtype, ctx=src.context)
        if not c == fill.size:
            raise ValueError("Channel and fill size mismatch, {} vs {}".format(c, fill.size))
        dst = nd.tile(fill.reshape((1, c)), reps=(oh * ow, 1)).reshape((oh, ow, c))

    dst[off_y:off_y+h, off_x:off_x+w, :] = src
    return dst, (off_x, off_y, ow, oh) 
Example 76
Project: soccer-matlab   Author: utra-robosoccer   File: boxstack_pybullet_sim.py    BSD 2-Clause "Simplified" License 4 votes vote down vote up
def Reset(self, reload_urdf=False):
    """Reset the minitaur to its initial states.

    Args:
      reload_urdf: Whether to reload the urdf file. If not, Reset() just place
        the minitaur back to its starting position.
    """
    self.m_actions_taken_since_reset=0
    xPosRange=0.025
    yPosRange=0.025
    boxHalfExtents = 0.025
    
    if reload_urdf:
      camInfo = self._pybullet_client.getDebugVisualizerCamera()
      cameraDistance=camInfo[10]
      print("cameraYaw=",camInfo[8])
      print("cameraPitch=",camInfo[9])
      print("camtarget=",camInfo[11])
      print("projectionMatrix=",camInfo[3])
      self._pybullet_client.resetDebugVisualizerCamera(cameraDistance=0.3, cameraYaw=camInfo[8], cameraPitch=camInfo[9],cameraTargetPosition=camInfo[11])

      plane = self._pybullet_client.loadURDF("plane.urdf")
      texUid = self._pybullet_client.loadTexture("checker_blue.png")
      self._pybullet_client.changeVisualShape(plane,-1, textureUniqueId = texUid)
      
      
      self._numObjects=4 #random number?
      
      
      self._cubes=[]
      
      red=[0.97,0.25,0.25,1]
      green=[0.41,0.68,0.31,1]
      yellow=[0.92,0.73,0,1]
      blue=[0,0.55,0.81,1]
      colors=[red,green,yellow,blue]
      
      for i in range (self._numObjects):
          pos=[0,0,boxHalfExtents + i*2*boxHalfExtents]
          orn = self._pybullet_client.getQuaternionFromEuler([0,0,0])
          orn=[0,0,0,1]
          cube = self._pybullet_client.loadURDF("cube_small.urdf",pos,orn)
          self._pybullet_client.changeVisualShape(cube,-1,rgbaColor=colors[i])
          self._cubes.append(cube)
    
      self._pybullet_client.setGravity(0, 0, -10)
      self.stateId = self._pybullet_client.saveState()
    else:
      if (self.stateId>=0):
        self._pybullet_client.restoreState(self.stateId)
    index=0
    for i in self._cubes:
      posX = random.uniform(-xPosRange,xPosRange)
      posY = random.uniform(-yPosRange,yPosRange)
      yaw = random.uniform(-math.pi,math.pi)
      pos=[posX,posY,boxHalfExtents + index*2*boxHalfExtents]
      index+=1
      orn = self._pybullet_client.getQuaternionFromEuler([0,0,yaw])
      self._pybullet_client.resetBasePositionAndOrientation(i,pos,orn) 
Example 77
Project: pepperon.ai   Author: JonWiggins   File: cluster.py    MIT License 4 votes vote down vote up
def kmeans_pp(points, center_count, distance_function=euclidian_distance):
    """
    Clusters based on the kmeans++ algorithm

    :param points: all of the datapoints to cluster
    :param center_count: the number of clusters to produce
    :param distance_function: a function that will compare two datapoints

    :return: a dictionary that maps centers to the points in its cluster
    """

    # start with any arbitrary center
    centers = set(points[0])

    while len(centers) < center_count:

        decider_value = random.uniform(0.0, 1.0)

        distances_to_centers = [
            pow(get_distance_to_center(element, centers, distance_function), 2)
            for element in points
        ]

        # normalize the distances
        total = sum(distances_to_centers)
        distances_to_centers = [element / total for element in distances_to_centers]

        counter = 0.0

        for index, element in enumerate(distances_to_centers):
            counter += distances_to_centers

            if counter >= decider_value:
                centers.append(points[index])
                break

    # make a dict that maps a center to the points in its cluster
    to_return = {}

    for center in centers:
        to_add = set()
        for index, element in points:
            if get_nearest_center(element, centers, distance_function) == center:
                to_add.add(element)
        to_return[center] = to_add

    return to_return 
Example 78
Project: Dumb-Cogs   Author: irdumbs   File: noflippedtables.py    MIT License 4 votes vote down vote up
def scrutinize_messages(self, message):
		channel = message.channel
		user = message.author
		if hasattr(user, 'bot') and user.bot is True:
                    return
		if channel.id not in self.flippedTables:
			 self.flippedTables[channel.id] = {}
		#┬─┬ ┬┬ ┻┻ ┻━┻ ┬───┬ ┻━┻ will leave 3 tables left flipped
		#count flipped tables
		for m in re.finditer('┻━*┻|┬─*┬', message.content):
			t = m.group()
			if '┻' in t and not (message.author.id == self.bot.user.id and self.settings["BOT_EXEMPT"]):
				if t in self.flippedTables[channel.id]:
					self.flippedTables[channel.id][t] += 1
				else:
					self.flippedTables[channel.id][t] = 1
					if not self.settings["ALL_TABLES"]:
						break
			else:
				f = t.replace('┬','┻').replace('─','━')
				if f in self.flippedTables[channel.id]:
					if self.flippedTables[channel.id][f] <= 0:
						del self.flippedTables[channel.id][f]
					else:
						self.flippedTables[channel.id][f] -= 1
		#wait random time. some tables may be unflipped by now.
		await asyncio.sleep(randfloat(0,1.5))
		tables = ""

		deleteTables = []
		#unflip tables in self.flippedTables[channel.id]
		for t, n in self.flippedTables[channel.id].items():
			unflipped = t.replace('┻','┬').replace('━','─') + " ノ( ゜-゜ノ)" + "\n"
			for i in range(0,n):
				tables += unflipped
				#in case being processed in parallel
				self.flippedTables[channel.id][t] -= 1
			deleteTables.append(t)
		for t in deleteTables:
			del self.flippedTables[channel.id][t]
		if tables != "":
			await self.bot.send_message(channel, tables) 
Example 79
Project: CapsAttnNet   Author: rstager   File: gen_images.py    MIT License 4 votes vote down vote up
def generator(width_height=(28, 28), object_scale=0.5,
              width_shift_range=0.25, height_shift_range=0.25,
              scale_range=1, rotate_range=0,
              count=1,
              objects = default_objects):
    """Generate images and labels in a keras data generator style. Image contains simple objects
    in a random pose.

    Args:
        width_height (tuple(width,height)     : The size of the generated images.
        object_scale (float)        : The size objects contained in the images as fraction of image width
        width_shift_range(float)    : The range of random shift in object x position as fraction of image width
        height_shift_range(float)   : The range of random shift in object y position as fraction of image height
        scale_range(float)          : The range of scales of the objects. Objects are scaled (from x to 1)
        rotate_range(float)         : The range of rotation in degrees
        objects (list)              : A description of objects that can be contained in the images.

    Returns:
        generator                   :  The keras data generator. Firt argument is the image. Second is
                                       the pose (class,x offset,y offset, scale, rotation)
    """
    viewer = Viewer(*width_height)
    while 1:
        viewer.geoms=[]
        y_truth=[]
        for i in range(count):
            cls=random.randrange(len(objects))
            obj=[]
            for g,x,y,s,r in objects[cls][2]:
                r*=(np.pi/180)
                if g in 'B':
                    geom = FilledPolygon([(-0.5,-0.5), (0.5,-0.5), (0.5,0.5), (-0.5,0.5)])
                elif g in 'T':
                    geom = FilledPolygon(([(-0.5, -0.5), (0.5, -0.5), (0.5, 0.5)]))
                elif g in 'C':
                    geom=make_circle(radius=1, res=30)
                geom.add_attr(Transform(translation=(x, y), rotation=r, scale=s))
                geom.add_attr(Transform(scale=(objects[cls][1],objects[cls][1])))
                geom.set_color(.8,.6,.4)
                obj.append(geom)

            x= random.uniform(-width_shift_range,width_shift_range)
            y= random.uniform(-height_shift_range,height_shift_range)
            s= random.uniform(scale_range,1)
            r=random.uniform(-rotate_range,rotate_range)*(np.pi/180)
            ss=s*object_scale * width_height[0]

            geom= Compound(obj)
            geom.add_attr(Transform(translation=((x + 0.5)*width_height[0], (y+0.5)*width_height[1]),
                                    rotation=r,scale=(ss,ss)))
            viewer.add_geom(geom)
            y_truth.append((cls,x,y,s,r))

        img=viewer.render(return_rgb_array = True)
        yield (img, np.array(y_truth)) 
Example 80
Project: BayesRate   Author: schnitzler-j   File: prior.py    MIT License 4 votes vote down vote up
def update_prior(root, up, lo, times, lrates, mrates, rep, lam_r, lam_m, bd, prior_shift):
	prior_l, prior_m, prior_t=list(), list(), list()	
	#print lam_m
	# UNIFORM PRIOR
	if len(times)>2:
		for i in range(1,len(times)-1):
			#lo, up = min(times[1:len(times)-1]), max(times[1:len(times)-1])
			if times[i] <= lo or times[i]>=up: prior_t.append(-inf)
			#elif fabs(times[i]-times[i+1])<1 and times[i+1]>0 :prior_t.append(-inf)
			else: prior_t.append(log(1./root)) # UNIFORM
			
			if len(prior_shift)>1:
				j=i-1
				if times[i]<=prior_shift[j*2-1] and times[i]>=prior_shift[j*2-2]: pass #prior_t.append(log(1./(prior_shift[j*2-1]-prior_shift[j*2-2])))
				else: prior_t.append(-inf)
				#print times[i], prior_shift[j*2-1], prior_shift[j*2-2]
			
	else: prior_t.append(0)
	
	for i in lrates:
		if lam_r==0:
			if i <=0 or i>5: prior_l.append(-inf)
			else: prior_l.append(log(1./5))     # UNIFORM
		else: 
			lam_r=1./lam_r
			prior_l.append(log(lam_r)-lam_r*i)  # EXPONENTIAL (lambda=0 if uniform)

	if bd==1:
		for i in range(0, len(mrates)): 
			if mrates[i]>= 1 or mrates[i]<0: prior_m.append(-inf) #lrates[i]
			try:
				if len(lam_m)>2: lam=lam_m[i]
				else: lam=lam_m
				a=float(lam[0]) 
				b=float(lam[1])
				if b>0:                           # UN-NORMALIZED BETA 
					prior_m.append((a-1)*log(mrates[i])+(b-1)*log(mrates[i]))
					#log(gamma(a+b)/(gamma(a)*gamma(b))*mrates[i]**(a-1)*(1-mrates[i])**(b-1)))
				else:
					l=-log(1-.95)/a
					#print l, log(l)-l*i, a, i
					prior_m.append(log(l)-l*mrates[i])    # EXPONENTIAL (semi-PB)
			except: 
				#print lam_m
				lam=lam_m
				prior_m.append(log(1.))  # UNIFORM
			
			#elif lam==0 and len(lam)==1: prior_m.append(log(1.))  # UNIFORM
			#else: 
	return sum(prior_t), sum(prior_l), sum(prior_m)