Python imgaug.seed() Examples

The following are 30 code examples of imgaug.seed(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module imgaug , or try the search function .
Example #1
Source File: check_seed.py    From ViolenceDetection with Apache License 2.0 6 votes vote down vote up
def main():
    img = data.astronaut()
    img = misc.imresize(img, (64, 64))
    aug = iaa.Fliplr(0.5)
    unseeded1 = aug.draw_grid(img, cols=8, rows=1)
    unseeded2 = aug.draw_grid(img, cols=8, rows=1)

    ia.seed(1000)
    seeded1 = aug.draw_grid(img, cols=8, rows=1)
    seeded2 = aug.draw_grid(img, cols=8, rows=1)

    ia.seed(1000)
    reseeded1 = aug.draw_grid(img, cols=8, rows=1)
    reseeded2 = aug.draw_grid(img, cols=8, rows=1)

    ia.seed(1001)
    reseeded3 = aug.draw_grid(img, cols=8, rows=1)
    reseeded4 = aug.draw_grid(img, cols=8, rows=1)

    all_rows = np.vstack([unseeded1, unseeded2, seeded1, seeded2, reseeded1, reseeded2, reseeded3, reseeded4])
    misc.imshow(all_rows) 
Example #2
Source File: test_serialization.py    From albumentations with MIT License 6 votes vote down vote up
def test_lambda_serialization(image, mask, albumentations_bboxes, keypoints, seed, p):
    def vflip_image(image, **kwargs):
        return F.vflip(image)

    def vflip_mask(mask, **kwargs):
        return F.vflip(mask)

    def vflip_bbox(bbox, **kwargs):
        return F.bbox_vflip(bbox, **kwargs)

    def vflip_keypoint(keypoint, **kwargs):
        return F.keypoint_vflip(keypoint, **kwargs)

    aug = A.Lambda(name="vflip", image=vflip_image, mask=vflip_mask, bbox=vflip_bbox, keypoint=vflip_keypoint, p=p)

    serialized_aug = A.to_dict(aug)
    deserialized_aug = A.from_dict(serialized_aug, lambda_transforms={"vflip": aug})
    set_seed(seed)
    aug_data = aug(image=image, mask=mask, bboxes=albumentations_bboxes, keypoints=keypoints)
    set_seed(seed)
    deserialized_aug_data = deserialized_aug(image=image, mask=mask, bboxes=albumentations_bboxes, keypoints=keypoints)
    assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
    assert np.array_equal(aug_data["mask"], deserialized_aug_data["mask"])
    assert np.array_equal(aug_data["bboxes"], deserialized_aug_data["bboxes"])
    assert np.array_equal(aug_data["keypoints"], deserialized_aug_data["keypoints"]) 
Example #3
Source File: generate_documentation_images.py    From ViolenceDetection with Apache License 2.0 6 votes vote down vote up
def chapter_augmenters_coarsedropout():
    aug = iaa.CoarseDropout(0.02, size_percent=0.5)
    run_and_save_augseq(
        "coarsedropout.jpg", aug,
        [ia.quokka(size=(128, 128)) for _ in range(8)], cols=4, rows=2,
        quality=75
    )

    aug = iaa.CoarseDropout((0.0, 0.05), size_percent=(0.02, 0.25))
    run_and_save_augseq(
        "coarsedropout_both_uniform.jpg", aug,
        [ia.quokka(size=(128, 128)) for _ in range(8)], cols=4, rows=2,
        quality=75,
        seed=2
    )

    aug = iaa.CoarseDropout(0.02, size_percent=0.15, per_channel=0.5)
    run_and_save_augseq(
        "coarsedropout_per_channel.jpg", aug,
        [ia.quokka(size=(128, 128)) for _ in range(8)], cols=4, rows=2,
        quality=75,
        seed=2
    ) 
Example #4
Source File: test_serialization.py    From albumentations with MIT License 6 votes vote down vote up
def test_transform_pipeline_serialization_with_keypoints(seed, image, keypoints, keypoint_format, labels):
    aug = A.Compose(
        [
            A.OneOrOther(
                A.Compose([A.RandomRotate90(), A.OneOf([A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5)])]),
                A.Compose([A.Rotate(p=0.5), A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1)]),
            ),
            A.HorizontalFlip(p=1),
            A.RandomBrightnessContrast(p=0.5),
        ],
        keypoint_params={"format": keypoint_format, "label_fields": ["labels"]},
    )
    serialized_aug = A.to_dict(aug)
    deserialized_aug = A.from_dict(serialized_aug)
    set_seed(seed)
    aug_data = aug(image=image, keypoints=keypoints, labels=labels)
    set_seed(seed)
    deserialized_aug_data = deserialized_aug(image=image, keypoints=keypoints, labels=labels)
    assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
    assert np.array_equal(aug_data["keypoints"], deserialized_aug_data["keypoints"]) 
Example #5
Source File: test_serialization.py    From albumentations with MIT License 6 votes vote down vote up
def test_transform_pipeline_serialization(seed, image, mask):
    aug = A.Compose(
        [
            A.OneOrOther(
                A.Compose(
                    [
                        A.Resize(1024, 1024),
                        A.RandomSizedCrop(min_max_height=(256, 1024), height=512, width=512, p=1),
                        A.OneOf(
                            [
                                A.RandomSizedCrop(min_max_height=(256, 512), height=384, width=384, p=0.5),
                                A.RandomSizedCrop(min_max_height=(256, 512), height=512, width=512, p=0.5),
                            ]
                        ),
                    ]
                ),
                A.Compose(
                    [
                        A.Resize(1024, 1024),
                        A.RandomSizedCrop(min_max_height=(256, 1025), height=256, width=256, p=1),
                        A.OneOf([A.HueSaturationValue(p=0.5), A.RGBShift(p=0.7)], p=1),
                    ]
                ),
            ),
            A.HorizontalFlip(p=1),
            A.RandomBrightnessContrast(p=0.5),
        ]
    )
    serialized_aug = A.to_dict(aug)
    deserialized_aug = A.from_dict(serialized_aug)
    set_seed(seed)
    aug_data = aug(image=image, mask=mask)
    set_seed(seed)
    deserialized_aug_data = deserialized_aug(image=image, mask=mask)
    assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
    assert np.array_equal(aug_data["mask"], deserialized_aug_data["mask"]) 
Example #6
Source File: generate_documentation_images.py    From ViolenceDetection with Apache License 2.0 6 votes vote down vote up
def chapter_augmenters_sometimes():
    aug = iaa.Sometimes(0.5, iaa.GaussianBlur(sigma=2.0))
    run_and_save_augseq(
        "sometimes.jpg", aug,
        [ia.quokka(size=(64, 64)) for _ in range(16)], cols=8, rows=2,
        seed=2
    )

    aug = iaa.Sometimes(
        0.5,
        iaa.GaussianBlur(sigma=2.0),
        iaa.Sequential([iaa.Affine(rotate=45), iaa.Sharpen(alpha=1.0)])
    )
    run_and_save_augseq(
        "sometimes_if_else.jpg", aug,
        [ia.quokka(size=(64, 64)) for _ in range(16)], cols=8, rows=2
    ) 
Example #7
Source File: generate_documentation_images.py    From ViolenceDetection with Apache License 2.0 6 votes vote down vote up
def run_and_save_augseq(filename, augseq, images, cols, rows, quality=75, seed=1):
    ia.seed(seed)
    # augseq may be a single seq (applied to all images) or a list (one seq per
    # image).
    # use type() here instead of isinstance, because otherwise Sequential is
    # also interpreted as a list
    if type(augseq) == list:
        # one augmenter per image specified
        assert len(augseq) == len(images)
        images_aug = [augseq[i].augment_image(images[i]) for i in range(len(images))]
    else:
        # calling N times augment_image() is here critical for random order in
        # Sequential
        images_aug = [augseq.augment_image(images[i]) for i in range(len(images))]
    save(
        "overview_of_augmenters",
        filename,
        grid(images_aug, cols=cols, rows=rows),
        quality=quality
    ) 
Example #8
Source File: test_serialization.py    From albumentations with MIT License 5 votes vote down vote up
def test_augmentations_serialization_to_file_with_custom_parameters(
    augmentation_cls, params, p, seed, image, mask, always_apply, data_format
):
    with patch("builtins.open", OpenMock()):
        aug = augmentation_cls(p=p, always_apply=always_apply, **params)
        filepath = "serialized.{}".format(data_format)
        A.save(aug, filepath, data_format=data_format)
        deserialized_aug = A.load(filepath, data_format=data_format)
        set_seed(seed)
        aug_data = aug(image=image, mask=mask)
        set_seed(seed)
        deserialized_aug_data = deserialized_aug(image=image, mask=mask)
        assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
        assert np.array_equal(aug_data["mask"], deserialized_aug_data["mask"]) 
Example #9
Source File: test_serialization.py    From albumentations with MIT License 5 votes vote down vote up
def set_seed(seed):
    random.seed(seed)
    np.random.seed(seed) 
Example #10
Source File: test_serialization.py    From albumentations with MIT License 5 votes vote down vote up
def test_augmentations_for_bboxes_serialization(
    augmentation_cls, params, p, seed, image, albumentations_bboxes, always_apply
):
    aug = augmentation_cls(p=p, always_apply=always_apply, **params)
    serialized_aug = A.to_dict(aug)
    deserialized_aug = A.from_dict(serialized_aug)
    set_seed(seed)
    aug_data = aug(image=image, bboxes=albumentations_bboxes)
    set_seed(seed)
    deserialized_aug_data = deserialized_aug(image=image, bboxes=albumentations_bboxes)
    assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
    assert np.array_equal(aug_data["bboxes"], deserialized_aug_data["bboxes"]) 
Example #11
Source File: test_serialization.py    From albumentations with MIT License 5 votes vote down vote up
def test_imgaug_augmentations_serialization(augmentation_cls, params, p, seed, image, mask, always_apply):
    aug = augmentation_cls(p=p, always_apply=always_apply, **params)
    serialized_aug = A.to_dict(aug)
    deserialized_aug = A.from_dict(serialized_aug)
    set_seed(seed)
    ia.seed(seed)
    aug_data = aug(image=image, mask=mask)
    set_seed(seed)
    ia.seed(seed)
    deserialized_aug_data = deserialized_aug(image=image, mask=mask)
    assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
    assert np.array_equal(aug_data["mask"], deserialized_aug_data["mask"]) 
Example #12
Source File: test_serialization.py    From albumentations with MIT License 5 votes vote down vote up
def test_imgaug_augmentations_for_bboxes_serialization(
    augmentation_cls, params, p, seed, image, albumentations_bboxes, always_apply
):
    aug = augmentation_cls(p=p, always_apply=always_apply, **params)
    serialized_aug = A.to_dict(aug)
    deserialized_aug = A.from_dict(serialized_aug)
    set_seed(seed)
    ia.seed(seed)
    aug_data = aug(image=image, bboxes=albumentations_bboxes)
    set_seed(seed)
    ia.seed(seed)
    deserialized_aug_data = deserialized_aug(image=image, bboxes=albumentations_bboxes)
    assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
    assert np.array_equal(aug_data["bboxes"], deserialized_aug_data["bboxes"]) 
Example #13
Source File: test_serialization.py    From albumentations with MIT License 5 votes vote down vote up
def test_imgaug_augmentations_for_keypoints_serialization(
    augmentation_cls, params, p, seed, image, keypoints, always_apply
):
    aug = augmentation_cls(p=p, always_apply=always_apply, **params)
    serialized_aug = A.to_dict(aug)
    deserialized_aug = A.from_dict(serialized_aug)
    set_seed(seed)
    ia.seed(seed)
    aug_data = aug(image=image, keypoints=keypoints)
    set_seed(seed)
    ia.seed(seed)
    deserialized_aug_data = deserialized_aug(image=image, keypoints=keypoints)
    assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
    assert np.array_equal(aug_data["keypoints"], deserialized_aug_data["keypoints"]) 
Example #14
Source File: test_serialization.py    From albumentations with MIT License 5 votes vote down vote up
def test_image_only_crop_around_bbox_augmentation_serialization(p, seed, image, always_apply):
    aug = A.RandomCropNearBBox(p=p, always_apply=always_apply, max_part_shift=0.15)
    annotations = {"image": image, "cropping_bbox": [-59, 77, 177, 231]}
    serialized_aug = A.to_dict(aug)
    deserialized_aug = A.from_dict(serialized_aug)
    set_seed(seed)
    aug_data = aug(**annotations)
    set_seed(seed)
    deserialized_aug_data = deserialized_aug(**annotations)
    assert np.array_equal(aug_data["image"], deserialized_aug_data["image"]) 
Example #15
Source File: test_serialization.py    From albumentations with MIT License 5 votes vote down vote up
def test_additional_targets_for_image_only_serialization(augmentation_cls, params, image, seed):
    aug = A.Compose([augmentation_cls(always_apply=True, **params)], additional_targets={"image2": "image"})
    image2 = image.copy()

    serialized_aug = A.to_dict(aug)
    deserialized_aug = A.from_dict(serialized_aug)
    set_seed(seed)
    aug_data = aug(image=image, image2=image2)
    set_seed(seed)
    deserialized_aug_data = deserialized_aug(image=image, image2=image2)
    assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
    assert np.array_equal(aug_data["image2"], deserialized_aug_data["image2"]) 
Example #16
Source File: train.py    From self-driving-truck with MIT License 5 votes vote down vote up
def __init__(self, val, queue_size, augseq, nb_workers, threaded=False):
        self.queue = multiprocessing.Queue(queue_size)
        self.workers = []
        for i in range(nb_workers):
            seed = random.randint(0, 10**6)
            augseq_worker = augseq.deepcopy()
            if threaded:
                worker = threading.Thread(target=self._load_batches, args=(val, self.queue, augseq_worker, None))
            else:
                worker = multiprocessing.Process(target=self._load_batches, args=(val, self.queue, augseq_worker, seed))
            worker.daemon = True
            worker.start()
            self.workers.append(worker) 
Example #17
Source File: train.py    From self-driving-truck with MIT License 5 votes vote down vote up
def _load_batches(self, val, queue, augseq_worker, seed):
        if seed is None:
            random.seed(seed)
            np.random.seed(seed)
            augseq_worker.reseed(seed)
            ia.seed(seed)
        memory = replay_memory.ReplayMemory.create_instance_reinforced(val=val)

        while True:
            batch = load_random_batch(memory, augseq_worker, BATCH_SIZE)
            queue.put(pickle.dumps(batch, protocol=-1)) 
Example #18
Source File: batching.py    From self-driving-truck with MIT License 5 votes vote down vote up
def __init__(self, dataset, dataset_autogen, queue_size, augseq, nb_workers, threaded=False):
        self.queue = multiprocessing.Queue(queue_size)
        self.workers = []
        for i in range(nb_workers):
            seed = random.randint(0, 10**6)
            augseq_worker = augseq.deepcopy()
            if threaded:
                worker = threading.Thread(target=self._load_batches, args=(dataset, dataset_autogen, self.queue, augseq_worker, None))
            else:
                worker = multiprocessing.Process(target=self._load_batches, args=(dataset, dataset_autogen, self.queue, augseq_worker, seed))
            worker.daemon = True
            worker.start()
            self.workers.append(worker) 
Example #19
Source File: batching.py    From self-driving-truck with MIT License 5 votes vote down vote up
def _load_batches(self, dataset, dataset_autogen, queue, augseq_worker, seed):
        if seed is not None:
            random.seed(seed)
            np.random.seed(seed)
            augseq_worker.reseed(seed)
            ia.seed(seed)

        while True:
            batch = create_batch(dataset, dataset_autogen, augseq_worker)
            queue.put(pickle.dumps(batch, protocol=-1)) 
Example #20
Source File: batching.py    From self-driving-truck with MIT License 5 votes vote down vote up
def __init__(self, batch_loader, queue_size, nb_workers, threaded=False):
        self.queue = multiprocessing.Queue(queue_size)
        self.workers = []
        self.exit_signal = multiprocessing.Event()
        for i in range(nb_workers):
            seed = random.randint(1, 10**6)
            if threaded:
                worker = threading.Thread(target=self._load_batches, args=(batch_loader, self.queue, self.exit_signal, None))
            else:
                worker = multiprocessing.Process(target=self._load_batches, args=(batch_loader, self.queue, self.exit_signal, seed))
            worker.daemon = True
            worker.start()
            self.workers.append(worker) 
Example #21
Source File: test_serialization.py    From albumentations with MIT License 5 votes vote down vote up
def test_augmentations_serialization_with_custom_parameters(
    augmentation_cls, params, p, seed, image, mask, always_apply
):
    aug = augmentation_cls(p=p, always_apply=always_apply, **params)
    serialized_aug = A.to_dict(aug)
    deserialized_aug = A.from_dict(serialized_aug)
    set_seed(seed)
    aug_data = aug(image=image, mask=mask)
    set_seed(seed)
    deserialized_aug_data = deserialized_aug(image=image, mask=mask)
    assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
    assert np.array_equal(aug_data["mask"], deserialized_aug_data["mask"]) 
Example #22
Source File: test_serialization.py    From albumentations with MIT License 5 votes vote down vote up
def test_augmentations_serialization(augmentation_cls, params, p, seed, image, mask, always_apply):
    aug = augmentation_cls(p=p, always_apply=always_apply, **params)
    serialized_aug = A.to_dict(aug)
    deserialized_aug = A.from_dict(serialized_aug)
    set_seed(seed)
    aug_data = aug(image=image, mask=mask)
    set_seed(seed)
    deserialized_aug_data = deserialized_aug(image=image, mask=mask)
    assert np.array_equal(aug_data["image"], deserialized_aug_data["image"])
    assert np.array_equal(aug_data["mask"], deserialized_aug_data["mask"]) 
Example #23
Source File: test_imgaug.py    From imgaug with MIT License 5 votes vote down vote up
def test_seed(mock_seed):
    ia.seed(10017)
    mock_seed.assert_called_once_with(10017) 
Example #24
Source File: test.py    From ViolenceDetection with Apache License 2.0 5 votes vote down vote up
def reseed(seed=0):
    ia.seed(seed)
    np.random.seed(seed)
    random.seed(seed) 
Example #25
Source File: generate_documentation_images.py    From ViolenceDetection with Apache License 2.0 5 votes vote down vote up
def chapter_examples_bounding_boxes_projection():
    import imgaug as ia
    from imgaug import augmenters as iaa

    ia.seed(1)

    # Define image with two bounding boxes
    image = ia.quokka(size=(256, 256))
    bbs = ia.BoundingBoxesOnImage([
        ia.BoundingBox(x1=25, x2=75, y1=25, y2=75),
        ia.BoundingBox(x1=100, x2=150, y1=25, y2=75)
    ], shape=image.shape)

    # Rescale image and bounding boxes
    image_rescaled = ia.imresize_single_image(image, (512, 512))
    bbs_rescaled = bbs.on(image_rescaled)

    # Draw image before/after rescaling and with rescaled bounding boxes
    image_bbs = bbs.draw_on_image(image, thickness=2)
    image_rescaled_bbs = bbs_rescaled.draw_on_image(image_rescaled, thickness=2)

    # ------------

    save(
        "examples_bounding_boxes",
        "projection.jpg",
        grid([image_bbs, image_rescaled_bbs], cols=2, rows=1),
        quality=75
    ) 
Example #26
Source File: data_agumentation.py    From ICPR_TextDection with GNU General Public License v3.0 5 votes vote down vote up
def data_agumentation(img, gt_bbox, operation_obj, txts=None, save_flag=None):
    ia.seed(int((time.time() * 1000) % 100000))
    shape = np.shape(gt_bbox)
    [h, w, _] = np.shape(img)
    if shape[1] == 8:
        bboxes = np.reshape(gt_bbox, [-1, 4, 2])
    else:
        bboxes = gt_bbox
    keypoints_on_images = []
    keypoints_imgaug_obj = []
    # print bboxes
    # print np.shape(bboxes)
    for key_points in bboxes:
        # print key_points
        for key_point in key_points:
            keypoints_imgaug_obj.append(ia.Keypoint(x=key_point[0], y=key_point[1]))
    keypoints_on_images.append(ia.KeypointsOnImage(keypoints_imgaug_obj, shape=img.shape))

    seq_det = operation_obj.to_deterministic()

    img_aug = seq_det.augment_image(img)
    key_points_aug = seq_det.augment_keypoints(keypoints_on_images)
    key_points_after = []
    for idx, (keypoints_before, keypoints_after) in enumerate(zip(keypoints_on_images, key_points_aug)):
        for kp_idx, keypoint in enumerate(keypoints_after.keypoints):
            keypoint.x = keypoint.x if keypoint.x < w else w
            keypoint.x = keypoint.x if keypoint.x > 0 else 0
            keypoint.y = keypoint.y if keypoint.y < h else h
            keypoint.y = keypoint.y if keypoint.y > 0 else 0
            key_points_after.append([keypoint.x, keypoint.y])
    # print np.shape(key_points_after)
    key_points_after = np.reshape(key_points_after, [-1, 4, 2])
    if save_flag:
        save_gt_file('./rotated_10.txt', np.reshape(key_points_after, [-1, 8]), txts=txts)
        cv2.imwrite('./rotated_10.png', img_aug)
        vis_img_bbox('./rotated_10.png', './rotated_10.txt')
    return img_aug, np.asarray(key_points_after, np.float32) 
Example #27
Source File: test_imgaug.py    From imgaug with MIT License 5 votes vote down vote up
def test_new_random_state__use_seed(mock_rng):
    with warnings.catch_warnings(record=True) as caught_warnings:
        warnings.simplefilter("always")

        _ = ia.new_random_state(seed=1)

    mock_rng.assert_called_once_with(1)
    assert len(caught_warnings) == 1
    assert "is deprecated" in str(caught_warnings[-1].message) 
Example #28
Source File: test_imgaug.py    From imgaug with MIT License 5 votes vote down vote up
def test_new_random_state__induce_fully_random(mock_rng):
    with warnings.catch_warnings(record=True) as caught_warnings:
        warnings.simplefilter("always")

        _ = ia.new_random_state(seed=None, fully_random=True)

    assert mock_rng.create_fully_random.call_count == 1
    assert len(caught_warnings) == 1
    assert "is deprecated" in str(caught_warnings[-1].message) 
Example #29
Source File: test_imgaug.py    From imgaug with MIT License 5 votes vote down vote up
def test_new_random_state__induce_pseudo_random(mock_rng):
    with warnings.catch_warnings(record=True) as caught_warnings:
        warnings.simplefilter("always")

        _ = ia.new_random_state(seed=None, fully_random=False)

    assert mock_rng.create_pseudo_random_.call_count == 1
    assert len(caught_warnings) == 1
    assert "is deprecated" in str(caught_warnings[-1].message) 
Example #30
Source File: generate_documentation_images.py    From ViolenceDetection with Apache License 2.0 4 votes vote down vote up
def chapter_parameters_discrete():
    ia.seed(1)

    # -----------------------
    # Binomial
    # -----------------------
    from imgaug import parameters as iap
    params = [
        iap.Binomial(0.5),
        iap.Binomial(0.9)
    ]
    gridarr = draw_distributions_grid(params, rows=1)
    save(
        "parameters",
        "continuous_binomial.jpg",
        gridarr,
        quality=PARAMETER_DEFAULT_QUALITY
    )

    # -----------------------
    # DiscreteUniform
    # -----------------------
    from imgaug import parameters as iap
    params = [
        iap.DiscreteUniform(0, 10),
        iap.DiscreteUniform(-10, 10),
        iap.DiscreteUniform([-10, -9, -8, -7], 10),
        iap.DiscreteUniform((-10, -7), 10)
    ]
    gridarr = draw_distributions_grid(params)
    save(
        "parameters",
        "continuous_discreteuniform.jpg",
        gridarr,
        quality=PARAMETER_DEFAULT_QUALITY
    )

    # -----------------------
    # Poisson
    # -----------------------
    from imgaug import parameters as iap
    params = [
        iap.Poisson(1),
        iap.Poisson(2.5),
        iap.Poisson((1, 2.5)),
        iap.RandomSign(iap.Poisson(2.5))
    ]
    gridarr = draw_distributions_grid(params)
    save(
        "parameters",
        "continuous_poisson.jpg",
        gridarr,
        quality=PARAMETER_DEFAULT_QUALITY
    )