Java Code Examples for reactor.core.scheduler.Scheduler#dispose()

The following examples show how to use reactor.core.scheduler.Scheduler#dispose() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SimpleFifoPoolTest.java    From reactor-pool with Apache License 2.0 6 votes vote down vote up
@Test
@Tag("loops")
void acquireReleaseRaceWithMinSize_loop() {
    final Scheduler racer = Schedulers.fromExecutorService(Executors.newFixedThreadPool(2));
    AtomicInteger newCount = new AtomicInteger();
    try {
        PoolConfig<PoolableTest> testConfig = from(Mono.fromCallable(() -> new PoolableTest(newCount.getAndIncrement())))
                .sizeBetween(4, 5)
                .buildConfig();
        SimpleFifoPool<PoolableTest> pool = new SimpleFifoPool<>(testConfig);

        for (int i = 0; i < 100; i++) {
            RaceTestUtils.race(() -> pool.acquire().block().release().block(),
                    () -> pool.acquire().block().release().block(),
                    racer);
        }
        //we expect that only 3 element was created
        assertThat(newCount).as("elements created in total").hasValue(4);
    }
    finally {
        racer.dispose();
    }
}
 
Example 2
Source File: FluxProcessorTest.java    From reactor-core with Apache License 2.0 6 votes vote down vote up
@Test
public void testSubmitSession() throws Exception {
	FluxIdentityProcessor<Integer> processor = EmitterProcessor.create();
	AtomicInteger count = new AtomicInteger();
	CountDownLatch latch = new CountDownLatch(1);
	Scheduler scheduler = Schedulers.parallel();
	processor.publishOn(scheduler)
	         .delaySubscription(Duration.ofMillis(1000))
	         .limitRate(1)
	         .subscribe(d -> {
		         count.incrementAndGet();
		         latch.countDown();
	         });

	FluxSink<Integer> session = processor.sink();
	session.next(1);
	//System.out.println(emission);
	session.complete();

	latch.await(5, TimeUnit.SECONDS);
	Assert.assertTrue("latch : " + count, count.get() == 1);
	scheduler.dispose();
}
 
Example 3
Source File: FluxTests.java    From reactor-core with Apache License 2.0 6 votes vote down vote up
@Test
public void consistentMultithreadingWithPartition() throws InterruptedException {
	Scheduler supplier1 = Schedulers.newParallel("groupByPool", 2);
	Scheduler supplier2 = Schedulers.newParallel("partitionPool", 5);

	CountDownLatch latch = new CountDownLatch(10);

	/*Disposable c = */Flux.range(1, 10)
	                     .groupBy(n -> n % 2 == 0)
	                     .flatMap(stream -> stream.publishOn(supplier1)
	                                            .log("groupBy-" + stream.key()))
	                     .parallel(5)
	                     .runOn(supplier2)
	                     .sequential()
	                     .publishOn(asyncGroup)
	                     .log("join")
	                     .subscribe(t -> {
		                   latch.countDown();
	                   });


	latch.await(30, TimeUnit.SECONDS);
	assertThat("Not totally dispatched: " + latch.getCount(), latch.getCount() == 0);
	supplier1.dispose();
	supplier2.dispose();
}
 
Example 4
Source File: FluxBufferTimeoutTest.java    From reactor-core with Apache License 2.0 5 votes vote down vote up
@Test
public void rejectedOnNextLeadsToOnError() {
	Scheduler scheduler = Schedulers.newSingle("rejectedOnNextLeadsToOnError");
	scheduler.dispose();

	StepVerifier.create(Flux.just(1, 2, 3)
	                        .bufferTimeout(4, Duration.ofMillis(500), scheduler))
	            .expectError(RejectedExecutionException.class)
	            .verify(Duration.ofSeconds(1));
}
 
Example 5
Source File: MonoRetryWhenTest.java    From reactor-core with Apache License 2.0 5 votes vote down vote up
@Test
public void monoRetryBackoffRetriesOnGivenScheduler() {
	//the monoRetryBackoffWithGivenScheduler above is not suitable to verify the retry scheduler,
	// as VTS is akin to immediate() and doesn't really change the Thread
	Scheduler backoffScheduler = Schedulers.newSingle("backoffScheduler");
	String main = Thread.currentThread().getName();
	final IllegalStateException exception = new IllegalStateException("boom");
	List<String> threadNames = new ArrayList<>(4);
	try {
		StepVerifier.create(Mono.error(exception)
		                        .doOnError(e -> threadNames.add(Thread.currentThread().getName().replaceFirst("-\\d+", "")))
		                        .retryWhen(Retry.backoff(2, Duration.ofMillis(10))
				                        .maxBackoff(Duration.ofMillis(100))
				                        .jitter(0.5d)
				                        .scheduler(backoffScheduler)
		                        )
		)
		            .expectErrorSatisfies(e -> assertThat(e).isInstanceOf(IllegalStateException.class)
		                                                    .hasMessage("Retries exhausted: 2/2")
		                                                    .hasCause(exception))
		            .verify(Duration.ofMillis(200));

		assertThat(threadNames)
				.as("retry runs on backoffScheduler")
				.containsExactly(main, "backoffScheduler", "backoffScheduler");
	}
	finally {
		backoffScheduler.dispose();
	}
}
 
Example 6
Source File: FluxSpecTests.java    From reactor-core with Apache License 2.0 5 votes vote down vote up
@Test
	public void whenProcessorIsStreamed() {
//		"When a processor is streamed"
//		given: "a source composable and a async downstream"
		FluxIdentityProcessor<Integer> source = Processors.replayAll();
		Scheduler scheduler = Schedulers.newParallel("test", 2);

		try {
			Mono<List<Integer>> res = source.subscribeOn(scheduler)
			                                .delaySubscription(Duration.ofMillis(1L))
			                                .log("streamed")
			                                .map(it -> it * 2)
			                                .buffer()
			                                .publishNext();

			res.subscribe();

//		when: "the source accepts a value"
			source.onNext(1);
			source.onNext(2);
			source.onNext(3);
			source.onNext(4);
			source.onComplete();

//		then: "the res is passed on"
			assertThat(res.block()).containsExactly(2, 4, 6, 8);
		}
		finally {
			scheduler.dispose();
		}
	}
 
Example 7
Source File: FluxRetryWhenTest.java    From reactor-core with Apache License 2.0 5 votes vote down vote up
@Test
public void fluxRetryBackoffRetriesOnGivenScheduler() {
	//the fluxRetryBackoffWithSpecificScheduler above is not suitable to verify the retry scheduler, as VTS is akin to immediate()
	//and doesn't really change the Thread
	Scheduler backoffScheduler = Schedulers.newSingle("backoffScheduler");
	String main = Thread.currentThread().getName();
	final IllegalStateException exception = new IllegalStateException("boom");
	List<String> threadNames = new ArrayList<>(4);
	try {
		StepVerifier.create(Flux.concat(Flux.range(0, 2), Flux.error(exception))
		                        .doOnError(e -> threadNames.add(Thread.currentThread().getName().replaceAll("-\\d+", "")))
		                        .retryWhen(Retry
				                        .backoff(2, Duration.ofMillis(10))
				                        .maxBackoff(Duration.ofMillis(100))
				                        .jitter(0.5d)
				                        .scheduler(backoffScheduler)
		                        )
		)
		            .expectNext(0, 1, 0, 1, 0, 1)
		            .expectErrorSatisfies(e -> assertThat(e).isInstanceOf(IllegalStateException.class)
		                                                    .hasMessage("Retries exhausted: 2/2")
		                                                    .hasCause(exception))
		            .verify(Duration.ofMillis(200));

		assertThat(threadNames)
				.as("retry runs on backoffScheduler")
				.containsExactly(main, "backoffScheduler", "backoffScheduler");
	}
	finally {
		backoffScheduler.dispose();
	}
}
 
Example 8
Source File: SimpleFifoPoolTest.java    From reactor-pool with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("FutureReturnValueIgnored")
void allocatedReleasedOrAbortedIfCancelRequestRace(int round, AtomicInteger newCount, AtomicInteger releasedCount, boolean cancelFirst) throws InterruptedException {
    Scheduler scheduler = Schedulers.newParallel("poolable test allocator");
    final ExecutorService executorService = Executors.newFixedThreadPool(2);

    try {

        PoolConfig<PoolableTest> testConfig = poolableTestConfig(0, 1,
                Mono.defer(() -> Mono.delay(Duration.ofMillis(50)).thenReturn(new PoolableTest(newCount.incrementAndGet())))
                    .subscribeOn(scheduler),
                pt -> releasedCount.incrementAndGet());
        SimpleFifoPool<PoolableTest> pool = new SimpleFifoPool<>(testConfig);

        //acquire the only element and capture the subscription, don't request just yet
        CountDownLatch latch = new CountDownLatch(1);
        final BaseSubscriber<PooledRef<PoolableTest>> baseSubscriber = new BaseSubscriber<PooledRef<PoolableTest>>() {
            @Override
            protected void hookOnSubscribe(Subscription subscription) {
                //don't request
                latch.countDown();
            }
        };
        pool.acquire().subscribe(baseSubscriber);
        latch.await();

        if (cancelFirst) {
            executorService.submit(baseSubscriber::cancel);
            executorService.submit(baseSubscriber::requestUnbounded);
        }
        else {
            executorService.submit(baseSubscriber::requestUnbounded);
            executorService.submit(baseSubscriber::cancel);
        }

        //release due to cancel is async, give it ample time
        await().atMost(200, TimeUnit.MILLISECONDS).with().pollInterval(10, TimeUnit.MILLISECONDS)
               .untilAsserted(() -> assertThat(releasedCount)
                       .as("released vs created in round " + round + (cancelFirst? " (cancel first)" : " (request first)"))
                       .hasValue(newCount.get()));
    }
    finally {
        scheduler.dispose();
        executorService.shutdownNow();
    }
}
 
Example 9
Source File: SimpleFifoPoolTest.java    From reactor-pool with Apache License 2.0 4 votes vote down vote up
void defaultThreadDeliveringWhenNoElementsAndFullAndRaceDrain(int round, AtomicInteger releaserWins, AtomicInteger borrowerWins) throws InterruptedException {
    AtomicReference<String> threadName = new AtomicReference<>();
    AtomicInteger newCount = new AtomicInteger();
    Scheduler acquire1Scheduler = Schedulers.newSingle("acquire1");
    Scheduler racerReleaseScheduler = Schedulers.fromExecutorService(
            Executors.newSingleThreadScheduledExecutor((r -> new Thread(r,"racerRelease"))));
    Scheduler racerAcquireScheduler = Schedulers.fromExecutorService(
            Executors.newSingleThreadScheduledExecutor((r -> new Thread(r,"racerAcquire"))));
    Scheduler allocatorScheduler = Schedulers.newParallel("poolable test allocator");

    try {
        PoolConfig<PoolableTest> testConfig = poolableTestConfig(1, 1,
                Mono.fromCallable(() -> new PoolableTest(newCount.getAndIncrement()))
                    .subscribeOn(allocatorScheduler));

        SimpleFifoPool<PoolableTest> pool = new SimpleFifoPool<>(testConfig);

        //the pool is started with one elements, and has capacity for 1.
        //we actually first acquire that element so that next acquire will wait for a release
        PooledRef<PoolableTest> uniqueSlot = pool.acquire().block();
        assertThat(uniqueSlot).isNotNull();

        //we prepare next acquire
        Mono<PoolableTest> borrower = Mono.fromDirect(pool.withPoolable(Mono::just));
        CountDownLatch latch = new CountDownLatch(3);

        //we actually perform the acquire from its dedicated thread, capturing the thread on which the element will actually get delivered
        acquire1Scheduler.schedule(() -> borrower.subscribe(v -> threadName.set(Thread.currentThread().getName())
                , e -> latch.countDown(), latch::countDown));

        //in parallel, we'll both attempt concurrent acquire AND release the unique element (each on their dedicated threads)
        racerAcquireScheduler.schedule(() -> {
            pool.acquire().block();
            latch.countDown();
        }, 100, TimeUnit.MILLISECONDS);
        racerReleaseScheduler.schedule(() -> {
            uniqueSlot.release().block();
            latch.countDown();
        }, 100, TimeUnit.MILLISECONDS);

        assertThat(latch.await(1, TimeUnit.SECONDS)).as("1s").isTrue();

        assertThat(newCount).as("created 1 poolable in round " + round).hasValue(1);

        //we expect that sometimes the race will let the second borrower thread drain, which would mean first borrower
        //will get the element delivered from racerAcquire thread. Yet the rest of the time it would get drained by racerRelease.
        if (threadName.get().startsWith("racerRelease")) releaserWins.incrementAndGet();
        else if (threadName.get().startsWith("racerAcquire")) borrowerWins.incrementAndGet();
        else System.out.println(threadName.get());
    }
    finally {
        acquire1Scheduler.dispose();
        racerAcquireScheduler.dispose();
        racerReleaseScheduler.dispose();
        allocatorScheduler.dispose();
    }
}
 
Example 10
Source File: SimpleLifoPoolTest.java    From reactor-pool with Apache License 2.0 4 votes vote down vote up
void consistentThreadDeliveringWhenNoElementsAndFullAndRaceDrain(int i) throws InterruptedException {
    Scheduler allocatorScheduler = Schedulers.newParallel("poolable test allocator");
    Scheduler deliveryScheduler = Schedulers.newSingle("delivery");
    Scheduler acquire1Scheduler = Schedulers.newSingle("acquire1");
    Scheduler racerScheduler = Schedulers.fromExecutorService(Executors.newFixedThreadPool(2, r -> new Thread(r, "racer")));
    try {
        AtomicReference<String> threadName = new AtomicReference<>();
        AtomicInteger newCount = new AtomicInteger();

        PoolConfig<PoolableTest> testConfig = poolableTestConfig(1, 1,
                Mono.fromCallable(() -> new PoolableTest(newCount.getAndIncrement()))
                    .subscribeOn(allocatorScheduler),
                deliveryScheduler);
        SimpleLifoPool<PoolableTest> pool = new SimpleLifoPool<>(testConfig);

        //the pool is started with one elements, and has capacity for 1.
        //we actually first acquire that element so that next acquire will wait for a release
        PooledRef<PoolableTest> uniqueSlot = pool.acquire().block();
        assertThat(uniqueSlot).isNotNull();

        //we prepare two more acquires
        Mono<PooledRef<PoolableTest>> firstBorrower = pool.acquire();
        Mono<PooledRef<PoolableTest>> secondBorrower = pool.acquire();

        CountDownLatch latch = new CountDownLatch(1);

        //we'll enqueue a first acquire from a first thread
        //in parallel, we'll race a second acquire AND release the unique element (each on their dedicated threads)
        //we expect the release might sometimes win, which would mean acquire 1 would get served. mostly we want to verify delivery thread though
        acquire1Scheduler.schedule(() -> firstBorrower.subscribe(v -> threadName.compareAndSet(null, Thread.currentThread().getName())
                , e -> latch.countDown(), latch::countDown));
        RaceTestUtils.race(() -> secondBorrower.subscribe(v -> threadName.compareAndSet(null, Thread.currentThread().getName())
                , e -> latch.countDown(), latch::countDown),
                uniqueSlot.release()::block);

        latch.await(1, TimeUnit.SECONDS);

        //we expect that, consistently, the poolable is delivered on a `delivery` thread
        assertThat(threadName.get()).as("round #" + i).startsWith("delivery-");

        //we expect that only 1 element was created
        assertThat(newCount).as("elements created in round " + i).hasValue(1);
    }
    finally {
        allocatorScheduler.dispose();
        deliveryScheduler.dispose();
        racerScheduler.dispose();
        acquire1Scheduler.dispose();
    }
}
 
Example 11
Source File: SimpleLifoPoolTest.java    From reactor-pool with Apache License 2.0 4 votes vote down vote up
void consistentThreadDeliveringWhenNoElementsAndFullAndRaceDrain(int i) throws InterruptedException {
    Scheduler allocatorScheduler = Schedulers.newParallel("poolable test allocator");
    Scheduler deliveryScheduler = Schedulers.newSingle("delivery");
    Scheduler acquire1Scheduler = Schedulers.newSingle("acquire1");
    Scheduler racerScheduler = Schedulers.fromExecutorService(
            Executors.newFixedThreadPool(2, (r -> new Thread(r,"racer"))));

    try {
        AtomicReference<String> threadName = new AtomicReference<>();
        AtomicInteger newCount = new AtomicInteger();


        PoolConfig<PoolableTest> testConfig = poolableTestConfig(1, 1,
                Mono.fromCallable(() -> new PoolableTest(newCount.getAndIncrement()))
                    .subscribeOn(allocatorScheduler),
                deliveryScheduler);
        SimpleLifoPool<PoolableTest> pool = new SimpleLifoPool<>(testConfig);

        //the pool is started with one elements, and has capacity for 1.
        //we actually first acquire that element so that next acquire will wait for a release
        PooledRef<PoolableTest> uniqueSlot = pool.acquire().block();
        assertThat(uniqueSlot).isNotNull();

        //we prepare next acquire
        Mono<PoolableTest> firstBorrower = Mono.fromDirect(pool.withPoolable(Mono::just));
        Mono<PoolableTest> otherBorrower = Mono.fromDirect(pool.withPoolable(Mono::just));

        CountDownLatch latch = new CountDownLatch(3);

        //we actually perform the acquire from its dedicated thread, capturing the thread on which the element will actually get delivered
        acquire1Scheduler.schedule(() -> firstBorrower.subscribe(v -> threadName.set(Thread.currentThread().getName())
                , e -> latch.countDown(), latch::countDown));

        //in parallel, we'll race a second acquire AND release the unique element (each on their dedicated threads)
        //since LIFO we expect that if the release loses, it will server acquire1
        RaceTestUtils.race(
                () -> otherBorrower.subscribe(v -> threadName.set(Thread.currentThread().getName())
                        , e -> latch.countDown(), latch::countDown),
                () -> {
                    uniqueSlot.release().block();
                    latch.countDown();
                },
                racerScheduler);
        latch.await(1, TimeUnit.SECONDS);

        //we expect that, consistently, the poolable is delivered on a `delivery` thread
        assertThat(threadName.get()).as("round #" + i).startsWith("delivery-");

        //2 elements MIGHT be created if the first acquire wins (since we're in auto-release mode)
        assertThat(newCount.get()).as("1 or 2 elements created in round " + i).isIn(1, 2);
    }
    finally {
        allocatorScheduler.dispose();
        deliveryScheduler.dispose();
        acquire1Scheduler.dispose();
        racerScheduler.dispose();
    }
}
 
Example 12
Source File: WebsocketTest.java    From reactor-netty with Apache License 2.0 4 votes vote down vote up
@Test
public void testIssue821() throws Exception {
	Scheduler scheduler = Schedulers.newSingle("ws");
	CountDownLatch latch = new CountDownLatch(1);
	AtomicReference<Throwable> error = new AtomicReference<>();
	httpServer = HttpServer.create()
	                       .port(0)
	                       .route(r -> r.ws("/ws", (in, out) -> {
	                           scheduler.schedule(() ->
	                               out.sendString(Mono.just("scheduled"))
	                                  .then()
	                                  .subscribe(
	                                          null,
	                                          t -> {
	                                              error.set(t);
	                                              latch.countDown();
	                                          },
	                                          null)
	                           , 500, TimeUnit.MILLISECONDS);
	                           return out.sendString(Mono.just("test"));
	                       }))
	                       .wiretap(true)
	                       .bindNow();

	String res =
			HttpClient.create()
			          .port(httpServer.port())
			          .wiretap(true)
			          .websocket()
			          .uri("/ws")
			          .receive()
			          .asString()
			          .blockLast();

	assertThat(res).isNotNull()
	               .isEqualTo("test");

	assertThat(latch.await(30, TimeUnit.SECONDS)).isTrue();

	assertThat(error.get()).isNotNull()
	                       .isInstanceOf(AbortedException.class);

	scheduler.dispose();
}
 
Example 13
Source File: ParallelMergeOrderedTest.java    From reactor-core with Apache License 2.0 4 votes vote down vote up
@Test
public void reorderingByIndex() {
	final int LOOPS = 100;
	final int PARALLELISM = 2;
	final List<Integer> ordered = Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);

	int notShuffled = 0;
	for (int i = 0; i < LOOPS; i++) {
		final Scheduler SCHEDULER = Schedulers.newParallel("test", PARALLELISM);
		final List<Integer> disordered = Collections.synchronizedList(new ArrayList<>());

		List<Integer> reordered = Flux.fromIterable(ordered)
		                         .hide()
		                         .index()
		                         .parallel(PARALLELISM)
		                         .runOn(SCHEDULER)
		                         .doOnNext(t2 -> disordered.add(t2.getT2()))
		                         .ordered(Comparator.comparing(Tuple2::getT1))
		                         .map(Tuple2::getT2)
		                         .collectList()
		                         .block();

		SCHEDULER.dispose();

		assertThat(reordered).containsExactlyElementsOf(ordered);
		assertThat(disordered).containsExactlyInAnyOrderElementsOf(ordered);

		try {
			assertThat(disordered).doesNotContainSequence(ordered);
			System.out.println("parallel shuffled the collection into " + disordered);
			break;
		}
		catch (AssertionError e) {
			notShuffled++;
		}
	}
	if (notShuffled > 0) {
		System.out.println("not shuffled loops: " + notShuffled);
	}

	assertThat(LOOPS - notShuffled)
			.as("at least one run shuffled")
			.isGreaterThan(0);
}
 
Example 14
Source File: FluxTests.java    From reactor-core with Apache License 2.0 4 votes vote down vote up
/**
 * <pre>
 *                 forkStream
 *                 /        \      < - - - int
 *                v          v
 * persistenceStream        computationStream
 *                 \        /      < - - - List< String >
 *                  v      v
 *                 joinStream      < - - - String
 *                 splitStream
 *             observedSplitStream
 * </pre>
    * @throws Exception for convenience
 */
@Test(timeout = TIMEOUT)
public void multiplexUsingDispatchersAndSplit() throws Exception {

	final FluxIdentityProcessor<Integer> forkEmitterProcessor = Processors.multicast();

	final FluxIdentityProcessor<Integer> computationEmitterProcessor = Processors.more().multicast(false);

	Scheduler computation = Schedulers.newSingle("computation");
	Scheduler persistence = Schedulers.newSingle("persistence");
	Scheduler forkJoin = Schedulers.newParallel("forkJoin", 2);

	final Flux<List<String>> computationStream =
			computationEmitterProcessor.publishOn(computation)
			                      .map(i -> {
				                      final List<String> list = new ArrayList<>(i);
				                      for (int j = 0; j < i; j++) {
					                      list.add("i" + j);
				                      }
				                      return list;
			                      })
			                      .doOnNext(ls -> println("Computed: ", ls))
			                      .log("computation");

	final FluxIdentityProcessor<Integer> persistenceEmitterProcessor = Processors.more().multicast(false);

	final Flux<List<String>> persistenceStream =
			persistenceEmitterProcessor.publishOn(persistence)
			                      .doOnNext(i -> println("Persisted: ", i))
			                      .map(i -> Collections.singletonList("done" + i))
			                      .log("persistence");

	Flux<Integer> forkStream = forkEmitterProcessor.publishOn(forkJoin)
	                                             .log("fork");

	forkStream.subscribe(computationEmitterProcessor);
	forkStream.subscribe(persistenceEmitterProcessor);

	final Flux<List<String>> joinStream = Flux.zip(computationStream, persistenceStream, (a, b) -> Arrays.asList(a, b))
	                                                .publishOn(forkJoin)
	                                                .map(listOfLists -> {
		                                               listOfLists.get(0)
		                                                          .addAll(listOfLists.get(1));
		                                               return listOfLists.get(0);
	                                               })
	                                                .log("join");

	final Semaphore doneSemaphore = new Semaphore(0);

	final MonoProcessor<List<String>> listPromise = joinStream.flatMap(Flux::fromIterable)
	                                                 .log("resultStream")
	                                                 .collectList()
	                                                 .doOnTerminate(doneSemaphore::release)
	                                                 .toProcessor();
	listPromise.subscribe();

	forkEmitterProcessor.onNext(1);
	forkEmitterProcessor.onNext(2);
	forkEmitterProcessor.onNext(3);
	forkEmitterProcessor.onComplete();

	List<String> res = listPromise.block(Duration.ofSeconds(5));
	assertEquals(Arrays.asList("i0", "done1", "i0", "i1", "done2", "i0", "i1", "i2", "done3"), res);

	forkJoin.dispose();
	persistence.dispose();
	computation.dispose();
}
 
Example 15
Source File: FluxSubscribeOnTest.java    From reactor-core with Apache License 2.0 4 votes vote down vote up
@Test
public void gh507() {
	Scheduler s = Schedulers.newSingle("subscribe");
	Scheduler s2 = Schedulers.newParallel("receive");
	AtomicBoolean interrupted = new AtomicBoolean();
	AtomicBoolean timedOut = new AtomicBoolean();

	try {
		Flux.from((Publisher<String>) subscriber -> {
			subscriber.onSubscribe(new Subscription() {
				private int totalCount;

				@Override
				public void request(long n) {
					for (int i = 0; i < n; i++) {
						if (totalCount++ < 317) {
							subscriber.onNext(String.valueOf(totalCount));
						}
						else {
							subscriber.onComplete();
						}
					}
				}

				@Override
				public void cancel() {
					// do nothing
				}
			});
		})
		    .subscribeOn(s)
		    .limitRate(10)
		    .doOnNext(d -> {
			    CountDownLatch latch = new CountDownLatch(1);
			    Mono.fromCallable(() -> d)
			        .subscribeOn(s2)
			        .doFinally(it -> latch.countDown())
			        .subscribe();

			    try {
				    if (!latch.await(5, TimeUnit.SECONDS)) {
				    	timedOut.set(true);
				    }
			    }
			    catch (InterruptedException e) {
				    interrupted.set(true);
			    }
		    })
		    .blockLast(Duration.ofSeconds(2));

		assertThat(interrupted).as("interrupted").isFalse();
		assertThat(timedOut).as("latch timeout").isFalse();
	}
	finally {
		s.dispose();
		s2.dispose();
	}
}
 
Example 16
Source File: HooksTest.java    From reactor-core with Apache License 2.0 4 votes vote down vote up
@Test
public void parallelModeFused() {
	Hooks.onOperatorDebug();

	Hooks.onEachOperator(p -> {
		System.out.println(Scannable.from(p).stepName());
		return p;
	});

	Flux<Integer> source = Mono.just(1)
	                           .flux()
	                           .repeat(999)
	                           .publish()
	                           .autoConnect();
	int ncpu = Math.max(8,
			Runtime.getRuntime()
			       .availableProcessors());

		Scheduler scheduler = Schedulers.newParallel("test", ncpu);

		try {
			Flux<Integer> result = ParallelFlux.from(source, ncpu)
			                                   .runOn(scheduler)
			                                   .map(v -> v + 1)
			                                   .log("test", Level.INFO, true, SignalType.ON_SUBSCRIBE)
			                                   .sequential();

			AssertSubscriber<Integer> ts = AssertSubscriber.create();

			result.subscribe(ts);

			ts.await(Duration.ofSeconds(10));

			ts.assertSubscribed()
			  .assertValueCount(1000)
			  .assertComplete()
			  .assertNoError();
		}
		finally {
			scheduler.dispose();
		}

}