Java Code Examples for org.reactivestreams.Publisher#subscribe()
The following examples show how to use
org.reactivestreams.Publisher#subscribe() .
These examples are extracted from open source projects.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: reactor-core File: FluxSampleTimeout.java License: Apache License 2.0 | 6 votes |
@Override public void onNext(T t) { long idx = INDEX.incrementAndGet(this); if (!Operators.set(OTHER, this, Operators.emptySubscription())) { return; } Publisher<U> p; try { p = Objects.requireNonNull(throttler.apply(t), "throttler returned a null publisher"); } catch (Throwable e) { onError(Operators.onOperatorError(s, e, t, ctx)); return; } SampleTimeoutOther<T, U> os = new SampleTimeoutOther<>(this, t, idx); if (Operators.replace(OTHER, this, os)) { p.subscribe(os); } }
Example 2
Source Project: smallrye-reactive-streams-operators File: APITest.java License: Apache License 2.0 | 6 votes |
@Test public void testBuildingSubscriberFromSpec() throws ExecutionException, InterruptedException { Processor<ByteBuffer, MyDomainObject> parser = createParser(); CompletionSubscriber<ByteBuffer, List<MyDomainObject>> subscriber = ReactiveStreams.<ByteBuffer> builder() .via(parser) .toList() .build(); CompletionStage<List<MyDomainObject>> result = subscriber.getCompletion(); List<MyDomainObject> domainObjects = Arrays.asList(new MyDomainObject("Clement", "Neo"), new MyDomainObject("Tintin", "Milou")); Publisher<ByteBuffer> publisher = ReactiveStreams.fromIterable(domainObjects) .map(obj -> String.format("%s,%s\n", obj.field1, obj.field2)) .map(line -> ByteBuffer.wrap(line.getBytes())) .buildRs(); publisher.subscribe(subscriber); List<MyDomainObject> objects = result.toCompletableFuture().get(); assertThat(objects.toString()).contains("Clement => Neo", "Tintin => Milou"); }
Example 3
Source Project: reactor-core File: FluxFirstEmitting.java License: Apache License 2.0 | 5 votes |
void subscribe(Publisher<? extends T>[] sources, int n, CoreSubscriber<? super T> actual) { FirstEmittingSubscriber<T>[] a = subscribers; for (int i = 0; i < n; i++) { a[i] = new FirstEmittingSubscriber<>(actual, this, i); } actual.onSubscribe(this); for (int i = 0; i < n; i++) { if (cancelled || wip != Integer.MIN_VALUE) { return; } Publisher<? extends T> p = sources[i]; if (p == null) { if (WIP.compareAndSet(this, Integer.MIN_VALUE, -1)) { actual.onError(new NullPointerException("The " + i + " th Publisher source is null")); } return; } p.subscribe(a[i]); } }
Example 4
Source Project: reactor-core File: FluxSampleFirst.java License: Apache License 2.0 | 5 votes |
@Override public void onNext(T t) { if (!gate) { gate = true; if (wip == 0 && WIP.compareAndSet(this, 0, 1)) { actual.onNext(t); if (WIP.decrementAndGet(this) != 0) { handleTermination(); return; } } else { return; } Publisher<U> p; try { p = Objects.requireNonNull(throttler.apply(t), "The throttler returned a null publisher"); } catch (Throwable e) { Operators.terminate(S, this); error(Operators.onOperatorError(null, e, t, ctx)); return; } SampleFirstOther<U> other = new SampleFirstOther<>(this); if (Operators.replace(OTHER, this, other)) { p.subscribe(other); } } else { Operators.onDiscard(t, ctx); } }
Example 5
Source Project: cxf File: AbstractReactiveInvoker.java License: Apache License 2.0 | 5 votes |
protected boolean isStreamingSubscriberUsed(Publisher<?> publisher, AsyncResponse asyncResponse, Message inMessage) { if (isUseStreamingSubscriberIfPossible() && isJsonResponse(inMessage)) { publisher.subscribe(new JsonStreamingAsyncSubscriber<>(asyncResponse)); return true; } else { return false; } }
Example 6
Source Project: cyclops File: ConcurrentFlatMapper.java License: Apache License 2.0 | 5 votes |
public void onNext(T t) { if (!running) return; try { Publisher<? extends R> next = mapper.apply(t); ActiveSubscriber inner = new ActiveSubscriber(); queueUpdater.getAndUpdate(this, q -> q.plus(inner)); next.subscribe(inner); }catch(Throwable e){ onError.accept(e); } }
Example 7
Source Project: reactor-core File: FluxSwitchOnFirst.java License: Apache License 2.0 | 5 votes |
@Override public void onComplete() { // read of the first should occur before the read of inner since otherwise // first may be nulled while the previous read has shown that inner is still // null hence double invocation of transformer occurs final T f = this.first; final CoreSubscriber<? super T> i = this.inner; if (this.done || i == Operators.EMPTY_SUBSCRIBER) { return; } this.done = true; if (f == null && i == null) { final Publisher<? extends R> result; final CoreSubscriber<? super R> o = outer; try { result = Objects.requireNonNull( this.transformer.apply(Signal.complete(o.currentContext()), this), "The transformer returned a null value" ); } catch (Throwable e) { this.done = true; Operators.error(o, Operators.onOperatorError(this.s, e, null, o.currentContext())); return; } result.subscribe(o); return; } drain(); }
Example 8
Source Project: reactive-streams-commons File: PublisherRangePerf.java License: Apache License 2.0 | 4 votes |
@Benchmark public void createNew(Blackhole bh) { Publisher<Integer> p = new PublisherRange(0, count); bh.consume(p); p.subscribe(new PerfSubscriber(bh)); }
Example 9
Source Project: reactor-core File: OnDiscardShouldNotLeakTest.java License: Apache License 2.0 | 4 votes |
@Test public void ensureNoLeaksPopulatedQueueAndRacingCancelAndOnError() { Assumptions.assumeThat(discardScenario.subscriptionsNumber).isOne(); for (int i = 0; i < 10000; i++) { tracker.reset(); TestPublisher<Tracked> testPublisher = TestPublisher.createNoncompliant( TestPublisher.Violation.DEFER_CANCELLATION, TestPublisher.Violation.REQUEST_OVERFLOW); @SuppressWarnings("unchecked") Publisher<Tracked> source = discardScenario.producePublisherFromSources(testPublisher); if (conditional) { if (source instanceof Flux) { source = ((Flux<Tracked>) source).filter(t -> true); } else { source = ((Mono<Tracked>) source).filter(t -> true); } } Scannable scannable = Scannable.from(source); Integer prefetch = scannable.scan(Scannable.Attr.PREFETCH); Assumptions.assumeThat(prefetch).isNotZero(); AssertSubscriber<Tracked> assertSubscriber = new AssertSubscriber<>(Operators.enableOnDiscard(null, Tracked::safeRelease), 0); if (fused) { assertSubscriber.requestedFusionMode(Fuseable.ANY); } source.subscribe(assertSubscriber); testPublisher.next(tracker.track(1)); testPublisher.next(tracker.track(2)); testPublisher.next(tracker.track(3)); testPublisher.next(tracker.track(4)); RaceTestUtils.race( assertSubscriber::cancel, () -> testPublisher.error(new RuntimeException("test")), scheduler); List<Tracked> values = assertSubscriber.values(); values.forEach(Tracked::release); if (assertSubscriber.isTerminated()) { // has a chance to error with rejected exception assertSubscriber.assertError(); } tracker.assertNoLeaks(); } }
Example 10
Source Project: reactive-streams-commons File: PublisherRetryPerf.java License: Apache License 2.0 | 4 votes |
@Benchmark public void createNew(Blackhole bh) { Publisher<Integer> p = createSource(); bh.consume(p); p.subscribe(new PerfSubscriber(bh)); }
Example 11
Source Project: smallrye-mutiny File: MultiConcatOp.java License: Apache License 2.0 | 4 votes |
@Override public void onCompletion() { if (wip.getAndIncrement() == 0) { Publisher<? extends T>[] a = upstreams; do { if (isCancelled()) { return; } int i = index; if (i == a.length) { Throwable last = Subscriptions.markFailureAsTerminated(failure); if (last != null) { downstream.onFailure(last); } else { downstream.onCompletion(); } return; } Publisher<? extends T> p = a[i]; if (p == null) { downstream.onFailure( new NullPointerException("Source Publisher at currentIndex " + i + " is null")); return; } long c = produced; if (c != 0L) { produced = 0L; emitted(c); } p.subscribe(Infrastructure.onMultiSubscription(p, this)); if (isCancelled()) { return; } index = ++i; } while (wip.decrementAndGet() != 0); } }
Example 12
Source Project: reactive-streams-commons File: PublisherConcatIterablePerf.java License: Apache License 2.0 | 4 votes |
@Benchmark public void createNew2(Blackhole bh) { Publisher<Integer> p = createSource2(); bh.consume(p); p.subscribe(new PerfSubscriber(bh)); }
Example 13
Source Project: reactor-core File: OnDiscardShouldNotLeakTest.java License: Apache License 2.0 | 4 votes |
@Test public void ensureNoLeaksPopulatedQueueAndRacingCancelAndOnNext() { Assumptions.assumeThat(discardScenario.subscriptionsNumber).isOne(); for (int i = 0; i < 10000; i++) { tracker.reset(); TestPublisher<Tracked> testPublisher = TestPublisher.createNoncompliant( TestPublisher.Violation.DEFER_CANCELLATION, TestPublisher.Violation.REQUEST_OVERFLOW); Publisher<Tracked> source = discardScenario.producePublisherFromSources(testPublisher); if (conditional) { if (source instanceof Flux) { source = ((Flux<Tracked>) source).filter(t -> true); } else { source = ((Mono<Tracked>) source).filter(t -> true); } } Scannable scannable = Scannable.from(source); Integer prefetch = scannable.scan(Scannable.Attr.PREFETCH); Assumptions.assumeThat(prefetch).isNotZero(); AssertSubscriber<Tracked> assertSubscriber = new AssertSubscriber<>(Operators.enableOnDiscard(null, Tracked::safeRelease), 0); if (fused) { assertSubscriber.requestedFusionMode(Fuseable.ANY); } source.subscribe(assertSubscriber); testPublisher.next(tracker.track(1)); testPublisher.next(tracker.track(2)); Tracked value3 = tracker.track(3); Tracked value4 = tracker.track(4); Tracked value5 = tracker.track(5); RaceTestUtils.race(assertSubscriber::cancel, () -> { testPublisher.next(value3); testPublisher.next(value4); testPublisher.next(value5); }, scheduler); List<Tracked> values = assertSubscriber.values(); values.forEach(Tracked::release); tracker.assertNoLeaks(); } }
Example 14
Source Project: spring-analysis-note File: ZeroDemandResponse.java License: MIT License | 4 votes |
@Override public Mono<Void> writeWith(Publisher<? extends DataBuffer> body) { body.subscribe(this.writeSubscriber); return Mono.never(); }
Example 15
Source Project: rsocket-java File: RSocketRequesterTest.java License: Apache License 2.0 | 4 votes |
@ParameterizedTest @MethodSource("requestNInteractions") public void ensuresThatNoOpsMustHappenUntilFirstRequestN( FrameType frameType, BiFunction<ClientSocketRule, Payload, Publisher<Payload>> interaction) { Payload payload1 = ByteBufPayload.create("abc1"); Publisher<Payload> interaction1 = interaction.apply(rule, payload1); Payload payload2 = ByteBufPayload.create("abc2"); Publisher<Payload> interaction2 = interaction.apply(rule, payload2); Assertions.assertThat(rule.connection.getSent()).isEmpty(); AssertSubscriber<Payload> assertSubscriber1 = AssertSubscriber.create(0); interaction1.subscribe(assertSubscriber1); AssertSubscriber<Payload> assertSubscriber2 = AssertSubscriber.create(0); interaction2.subscribe(assertSubscriber2); assertSubscriber1.assertNotTerminated().assertNoError(); assertSubscriber2.assertNotTerminated().assertNoError(); // even though we subscribed, nothing should happen until the first requestN Assertions.assertThat(rule.connection.getSent()).isEmpty(); // first request on the second interaction to ensure that stream id issuing on the first request assertSubscriber2.request(1); Assertions.assertThat(rule.connection.getSent()) .hasSize(1) .first() .matches(bb -> frameType(bb) == frameType) .matches( bb -> FrameHeaderCodec.streamId(bb) == 1, "Expected to have stream ID {1} but got {" + FrameHeaderCodec.streamId(rule.connection.getSent().iterator().next()) + "}") .matches( bb -> { switch (frameType) { case REQUEST_RESPONSE: return ByteBufUtil.equals( RequestResponseFrameCodec.data(bb), Unpooled.wrappedBuffer("abc2".getBytes())); case REQUEST_STREAM: return ByteBufUtil.equals( RequestStreamFrameCodec.data(bb), Unpooled.wrappedBuffer("abc2".getBytes())); case REQUEST_CHANNEL: return ByteBufUtil.equals( RequestChannelFrameCodec.data(bb), Unpooled.wrappedBuffer("abc2".getBytes())); } return false; }) .matches(ReferenceCounted::release); rule.connection.clearSendReceiveBuffers(); assertSubscriber1.request(1); Assertions.assertThat(rule.connection.getSent()) .hasSize(1) .first() .matches(bb -> frameType(bb) == frameType) .matches( bb -> FrameHeaderCodec.streamId(bb) == 3, "Expected to have stream ID {1} but got {" + FrameHeaderCodec.streamId(rule.connection.getSent().iterator().next()) + "}") .matches( bb -> { switch (frameType) { case REQUEST_RESPONSE: return ByteBufUtil.equals( RequestResponseFrameCodec.data(bb), Unpooled.wrappedBuffer("abc1".getBytes())); case REQUEST_STREAM: return ByteBufUtil.equals( RequestStreamFrameCodec.data(bb), Unpooled.wrappedBuffer("abc1".getBytes())); case REQUEST_CHANNEL: return ByteBufUtil.equals( RequestChannelFrameCodec.data(bb), Unpooled.wrappedBuffer("abc1".getBytes())); } return false; }) .matches(ReferenceCounted::release); }
Example 16
Source Project: rsocket-java File: RSocketRequesterTest.java License: Apache License 2.0 | 4 votes |
@ParameterizedTest @MethodSource("encodeDecodePayloadCases") public void verifiesThatFrameWithNoMetadataHasDecodedCorrectlyIntoPayload( FrameType frameType, int framesCnt, int responsesCnt) { ByteBufAllocator allocator = rule.alloc(); AssertSubscriber<Payload> assertSubscriber = AssertSubscriber.create(responsesCnt); TestPublisher<Payload> testPublisher = TestPublisher.create(); Publisher<Payload> response; switch (frameType) { case REQUEST_FNF: response = testPublisher.mono().flatMap(p -> rule.socket.fireAndForget(p).then(Mono.empty())); break; case REQUEST_RESPONSE: response = testPublisher.mono().flatMap(p -> rule.socket.requestResponse(p)); break; case REQUEST_STREAM: response = testPublisher.mono().flatMapMany(p -> rule.socket.requestStream(p)); break; case REQUEST_CHANNEL: response = rule.socket.requestChannel(testPublisher.flux()); break; default: throw new UnsupportedOperationException("illegal case"); } response.subscribe(assertSubscriber); testPublisher.next(ByteBufPayload.create("d")); int streamId = rule.getStreamIdForRequestType(frameType); if (responsesCnt > 0) { for (int i = 0; i < responsesCnt - 1; i++) { rule.connection.addToReceivedBuffer( PayloadFrameCodec.encode( allocator, streamId, false, false, true, null, Unpooled.wrappedBuffer(("rd" + (i + 1)).getBytes()))); } rule.connection.addToReceivedBuffer( PayloadFrameCodec.encode( allocator, streamId, false, true, true, null, Unpooled.wrappedBuffer(("rd" + responsesCnt).getBytes()))); } if (framesCnt > 1) { rule.connection.addToReceivedBuffer( RequestNFrameCodec.encode(allocator, streamId, framesCnt)); } for (int i = 1; i < framesCnt; i++) { testPublisher.next(ByteBufPayload.create("d" + i)); } Assertions.assertThat(rule.connection.getSent()) .describedAs( "Interaction Type :[%s]. Expected to observe %s frames sent", frameType, framesCnt) .hasSize(framesCnt) .allMatch(bb -> !FrameHeaderCodec.hasMetadata(bb)) .allMatch(ByteBuf::release); Assertions.assertThat(assertSubscriber.isTerminated()) .describedAs("Interaction Type :[%s]. Expected to be terminated", frameType) .isTrue(); Assertions.assertThat(assertSubscriber.values()) .describedAs( "Interaction Type :[%s]. Expected to observe %s frames received", frameType, responsesCnt) .hasSize(responsesCnt) .allMatch(p -> !p.hasMetadata()) .allMatch(p -> p.release()); rule.assertHasNoLeaks(); rule.connection.clearSendReceiveBuffers(); }
Example 17
Source Project: RHub File: AkkaProcProxy.java License: Apache License 2.0 | 4 votes |
@Override public Removable addUpstream(Publisher publisher) { publisher.subscribe(new Subscriber() { Subscription s; @Override public void onSubscribe(final Subscription subscription) { s = subscription; subscriptions.put(publisher, s); s.request(cnt.get()); } @Override public void onNext(Object o) { proc.onNext(o); cnt.decrementAndGet(); if (cnt.compareAndSet(0, mat.settings().maxInputBufferSize())) { try { //todo super hack: // Akka internal subscriber request chunks of data // it might happen that we call onNext where internal subscriber did not // request the next chunk. this is why we wait a little and try to use the // same ratio as internals Thread.sleep(33); } catch (InterruptedException e) { e.printStackTrace(); } s.request(mat.settings().maxInputBufferSize()); } } @Override public void onError(Throwable t) { if (tePolicy.equals(WRAP)) { proc.onNext(new Event.ErrorEvent(t)); cnt.decrementAndGet(); cnt.compareAndSet(0, mat.settings().maxInputBufferSize()); } else if (tePolicy.equals(PASS)) { proc.onError(t); } } @Override public void onComplete() { if (tePolicy.equals(WRAP)) { proc.onNext(Event.COMPLETE); cnt.decrementAndGet(); cnt.compareAndSet(0, mat.settings().maxInputBufferSize()); } else if (tePolicy.equals(PASS)) { proc.onComplete(); } } }); return () -> removeUpstream(publisher); }
Example 18
Source Project: reactive-ipc-jvm File: ChannelToConnectionBridge.java License: Apache License 2.0 | 4 votes |
@Override public void write(final ChannelHandlerContext ctx, Object msg, final ChannelPromise promise) throws Exception { if (msg instanceof Publisher) { @SuppressWarnings("unchecked") final Publisher<W> data = (Publisher<W>) msg; data.subscribe(new Subscriber<W>() { // TODO: Needs to be fixed to wire all futures to the promise of the Publisher write. private ChannelFuture lastWriteFuture; @Override public void onSubscribe(Subscription s) { s.request(Long.MAX_VALUE); // TODO: Backpressure } @Override public void onNext(W w) { lastWriteFuture = ctx.channel().write(w); } @Override public void onError(Throwable t) { onTerminate(); } @Override public void onComplete() { onTerminate(); } private void onTerminate() { ctx.channel().flush(); lastWriteFuture.addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if (future.isSuccess()) { promise.trySuccess(); } else { promise.tryFailure(future.cause()); } } }); } }); } else { super.write(ctx, msg, promise); } }
Example 19
Source Project: reactor-core File: FluxMergeSequential.java License: Apache License 2.0 | 4 votes |
@Override public void onNext(T t) { Publisher<? extends R> publisher; try { publisher = Objects.requireNonNull(mapper.apply(t), "publisher"); } catch (Throwable ex) { onError(Operators.onOperatorError(s, ex, t, actual.currentContext())); return; } MergeSequentialInner<R> inner = new MergeSequentialInner<>(this, prefetch); if (cancelled) { return; } if (!subscribers.offer(inner)) { int badSize = subscribers.size(); inner.cancel(); drainAndCancel(); onError(Operators.onOperatorError(s, new IllegalStateException("Too many subscribers for " + "fluxMergeSequential on item: " + t + "; subscribers: " + badSize), t, actual.currentContext())); return; } if (cancelled) { return; } publisher.subscribe(inner); if (cancelled) { inner.cancel(); drainAndCancel(); } }
Example 20
Source Project: sourcerer File: EventStoreEsjcEventRepositoryTest.java License: MIT License | 4 votes |
@Test(expected = RuntimeException.class) public void errorNotPropagatedWhenCancelHandlerThrows() throws IOException { // This is not a nice behavior, but test is to confirm root cause of issue seen live, where // a subscription dies and does not recover as we time out trying to stop the subscription // that we're currently handling an error for! String streamId = "test-stream"; when(reader.readValue((byte[]) any())).thenReturn(new Object()); // Subscribe call not yet mocked, ensures we don't call subscribe until we subscribe // to the Flux Publisher<EventSubscriptionUpdate<Event>> publisher = repository.getStreamPublisher( streamId, null); // Set up subscription - should trigger a call to underlying subscribe CatchUpSubscription catchUpSubscription = mock(CatchUpSubscription.class); when(eventStore.subscribeToStreamFrom( anyString(), any(Long.class), any(CatchUpSubscriptionSettings.class), any(CatchUpSubscriptionListener.class), any(UserCredentials.class))) .thenReturn(catchUpSubscription); // Hook up fake listener, checking that we're getting notified AtomicInteger seenEvents = new AtomicInteger(0); AtomicReference<Throwable> seenError = new AtomicReference<>(null); AtomicBoolean seenStop = new AtomicBoolean(false); publisher.subscribe(Subscribers.bounded( 100, event -> seenEvents.incrementAndGet(), seenError::set, () -> seenStop.set(true))); ArgumentCaptor<CatchUpSubscriptionListener> listenerCaptor = ArgumentCaptor.forClass(CatchUpSubscriptionListener.class); verify(eventStore, times(1)).subscribeToStreamFrom( eq("pref-" + streamId), eq(null), any(CatchUpSubscriptionSettings.class), listenerCaptor.capture()); CatchUpSubscriptionListener listener = listenerCaptor.getValue(); Mockito .doThrow(new RuntimeException("bad stuff on close")) .when(catchUpSubscription).stop(); listener.onClose( catchUpSubscription, SubscriptionDropReason.CatchUpError, new RuntimeException("bad things happen")); Assert.assertEquals(0, seenEvents.get()); Assert.assertNull(seenError.get()); }