akka.stream.javadsl.Keep Java Examples

The following examples show how to use akka.stream.javadsl.Keep. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: Kata3SearchAkkaStream.java    From ditto-examples with Eclipse Public License 2.0 6 votes vote down vote up
@Test
public void part1CreateAkkaSearchQuery() {
    final ActorSystem system = ActorSystem.create("thing-search");
    try {

        final String filter = "or(eq(attributes/counter,1), eq(attributes/counter,2))";


        // TODO create Akka source of publisher with above filter
        final Source<List<Thing>, NotUsed> things = null;


        // Verify Results
        things.flatMapConcat(Source::from)
                .toMat(Sink.seq(), Keep.right())
                .run(ActorMaterializer.create(system))
                .thenAccept(t -> Assertions.assertThat(t).containsAnyOf(thing1, thing2).doesNotContain(thing3))
                .toCompletableFuture()
                .join();
    } finally {
        system.terminate();
    }
}
 
Example #2
Source File: AbstractBackgroundStreamingActorWithConfigWithStatusReport.java    From ditto with Eclipse Public License 2.0 6 votes vote down vote up
private void restartStream() {
    shutdownKillSwitch();

    final Pair<UniqueKillSwitch, CompletionStage<Done>> materializedValues =
            getSource().viaMat(KillSwitches.single(), Keep.right())
                    .toMat(Sink.ignore(), Keep.both())
                    .run(materializer);

    killSwitch = materializedValues.first();

    materializedValues.second()
            .<Void>handle((result, error) -> {
                final String description = String.format("Stream terminated. Result=<%s> Error=<%s>",
                        result, error);
                log.info(description);
                getSelf().tell(new StreamTerminated(description), getSelf());
                return null;
            });
}
 
Example #3
Source File: HttpPublisherActor.java    From ditto with Eclipse Public License 2.0 6 votes vote down vote up
@SuppressWarnings("unused")
private HttpPublisherActor(final Connection connection, final HttpPushFactory factory) {
    super(connection);
    this.factory = factory;

    final ActorSystem system = getContext().getSystem();
    final ConnectionConfig connectionConfig =
            DittoConnectivityConfig.of(DefaultScopedConfig.dittoScoped(system.settings().config()))
                    .getConnectionConfig();
    config = connectionConfig.getHttpPushConfig();

    materializer = ActorMaterializer.create(getContext());
    sourceQueue =
            Source.<Pair<HttpRequest, HttpPushContext>>queue(config.getMaxQueueSize(), OverflowStrategy.dropNew())
                    .viaMat(factory.createFlow(system, log), Keep.left())
                    .toMat(Sink.foreach(this::processResponse), Keep.left())
                    .run(materializer);
}
 
Example #4
Source File: EnforcementFlow.java    From ditto with Eclipse Public License 2.0 6 votes vote down vote up
private Source<Entry<Enforcer>, NotUsed> readCachedEnforcer(final Metadata metadata,
        final EntityIdWithResourceType policyId, final int iteration) {

    final Source<Entry<Enforcer>, ?> lazySource = Source.lazily(() -> {
        final CompletionStage<Source<Entry<Enforcer>, NotUsed>> enforcerFuture = policyEnforcerCache.get(policyId)
                .thenApply(optionalEnforcerEntry -> {
                    if (shouldReloadCache(optionalEnforcerEntry.orElse(null), metadata, iteration)) {
                        // invalid entry; invalidate and retry after delay
                        policyEnforcerCache.invalidate(policyId);
                        return readCachedEnforcer(metadata, policyId, iteration + 1)
                                .initialDelay(cacheRetryDelay);
                    } else {
                        return optionalEnforcerEntry.map(Source::single)
                                .orElse(ENFORCER_NONEXISTENT);
                    }
                })
                .exceptionally(error -> {
                    log.error("Failed to read policyEnforcerCache", error);
                    return ENFORCER_NONEXISTENT;
                });

        return Source.fromSourceCompletionStage(enforcerFuture);
    });

    return lazySource.viaMat(Flow.create(), Keep.none());
}
 
Example #5
Source File: EnforcementFlow.java    From ditto with Eclipse Public License 2.0 6 votes vote down vote up
private Source<SudoRetrieveThingResponse, NotUsed> sudoRetrieveThing(final ThingId thingId) {
    final SudoRetrieveThing command =
            SudoRetrieveThing.withOriginalSchemaVersion(thingId, DittoHeaders.empty());
    final CompletionStage<Source<SudoRetrieveThingResponse, NotUsed>> responseFuture =
            // using default thread-pool for asking Things shard region
            Patterns.ask(thingsShardRegion, command, thingsTimeout)
                    .handle((response, error) -> {
                        if (response instanceof SudoRetrieveThingResponse) {
                            return Source.single((SudoRetrieveThingResponse) response);
                        } else {
                            if (error != null) {
                                log.error("Failed " + command, error);
                            } else if (!(response instanceof ThingNotAccessibleException)) {
                                log.error("Unexpected response for <{}>: <{}>", command, response);
                            }
                            return Source.empty();
                        }
                    });

    return Source.fromSourceCompletionStage(responseFuture)
            .viaMat(Flow.create(), Keep.none());
}
 
Example #6
Source File: ThingsSseRouteBuilderTest.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
private static void replySourceRef(final TestProbe testProbe, final Source<?, ?> source) {
    testProbe.reply(
            source.toMat(StreamRefs.sourceRef(), Keep.right())
                    .run(ActorMaterializer.create(actorSystem))
                    .toCompletableFuture()
                    .join()
    );
}
 
Example #7
Source File: AkkaHubProxy.java    From RHub with Apache License 2.0 5 votes vote down vote up
@Override
public Removable addUpstream(Source<Object, NotUsed> publisher) {
    UniqueKillSwitch killSwitch =
            publisher.viaMat(busFlow, Keep.right())
                    .to(Sink.ignore())
                    .run(mat);
    subscriptions.put(publisher, killSwitch);
    return () -> AkkaHubProxy.this.removeUpstream(publisher);
}
 
Example #8
Source File: AkkaHubProxy.java    From RHub with Apache License 2.0 5 votes vote down vote up
public AkkaHubProxy(ActorMaterializer mat) {
    this.mat = mat;
    //  Obtain a Sink and Source which will publish and receive from the "bus" respectively.
    Pair<Sink<Object, NotUsed>, Source<Object, NotUsed>> sinkAndSource =
            MergeHub.of(Object.class, 16)
                    .toMat(BroadcastHub.of(Object.class, 256), Keep.both())
                    .run(mat);

    Sink<Object, NotUsed> sink = sinkAndSource.first();
    source = sinkAndSource.second().takeWhile((Predicate<Object>) o -> o != Done.getInstance());
    //source.runWith(Sink.ignore(), mat);
    busFlow = Flow.fromSinkAndSource(sink, source)
            .joinMat(KillSwitches.singleBidi(), Keep.right());
}
 
Example #9
Source File: HttpPushFactoryTest.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
private Pair<SourceQueueWithComplete<HttpRequest>, SinkQueueWithCancel<Try<HttpResponse>>> newSourceSinkQueues(
        final HttpPushFactory underTest) {

    return Source.<HttpRequest>queue(10, OverflowStrategy.dropNew())
            .map(r -> Pair.create(r, null))
            .viaMat(underTest.createFlow(actorSystem, actorSystem.log()), Keep.left())
            .map(Pair::first)
            .toMat(Sink.queue(), Keep.both())
            .run(mat);
}
 
Example #10
Source File: KafkaPublisherActor.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
private ActorRef createInternalKafkaProducer(final KafkaConnectionFactory factory,
        final BiFunction<Done, Throwable, Done> completionOrFailureHandler) {

    final Pair<ActorRef, CompletionStage<Done>> materializedFlowedValues =
            Source.<ProducerMessage.Envelope<String, String, PassThrough>>actorRef(100,
                    OverflowStrategy.dropHead())
                    .via(factory.newFlow())
                    .toMat(KafkaPublisherActor.publishSuccessSink(), Keep.both())
                    .run(ActorMaterializer.create(getContext()));
    materializedFlowedValues.second().handleAsync(completionOrFailureHandler);
    return materializedFlowedValues.first();
}
 
Example #11
Source File: MessageMappingProcessorActor.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
private SourceQueue<ExternalMessage> materializeInboundStream(final int processorPoolSize) {
    return Source.<ExternalMessage>queue(getBufferSize(), OverflowStrategy.dropNew())
            // parallelize potentially CPU-intensive payload mapping on this actor's dispatcher
            .mapAsync(processorPoolSize, externalMessage -> CompletableFuture.supplyAsync(
                    () -> mapInboundMessage(externalMessage),
                    getContext().getDispatcher())
            )
            .flatMapConcat(signalSource -> signalSource)
            .toMat(Sink.foreach(this::handleIncomingMappedSignal), Keep.left())
            .run(materializer);
}
 
Example #12
Source File: SearchUpdaterStream.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
/**
 * Start a perpetual search updater stream killed only by the kill-switch.
 *
 * @param actorRefFactory where to create actors for this stream.
 * @return kill-switch to terminate the stream.
 */
public KillSwitch start(final ActorRefFactory actorRefFactory) {
    final Source<Source<AbstractWriteModel, NotUsed>, NotUsed> restartSource = createRestartSource();
    final Sink<Source<AbstractWriteModel, NotUsed>, NotUsed> restartSink = createRestartSink();
    final ActorMaterializer actorMaterializer = ActorMaterializer.create(actorRefFactory);
    return restartSource.viaMat(KillSwitches.single(), Keep.right())
            .toMat(restartSink, Keep.left())
            .run(actorMaterializer);
}
 
Example #13
Source File: PolicyEventForwarder.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
private void restartPolicyReferenceTagStream() {
    terminateStream();
    final ActorRef self = getSelf();
    killSwitch = Source.repeat(Control.DUMP_POLICY_REVISIONS)
            .delay(interval, DelayOverflowStrategy.backpressure())
            .withAttributes(Attributes.inputBuffer(1, 1))
            .viaMat(KillSwitches.single(), Keep.right())
            .mapAsync(1, message ->
                    PatternsCS.ask(self, message, ASK_SELF_TIMEOUT).exceptionally(Function.identity()))
            .flatMapConcat(this::mapDumpResult)
            .to(Sink.actorRef(self, Control.STREAM_COMPLETED))
            .run(materializer);
}
 
Example #14
Source File: TransistorTest.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
/**
 * Connect a transistor to 2 test collectors and a test sink.
 */
@Before
public void init() {
    system = ActorSystem.create();
    final Source<Integer, TestPublisher.Probe<Integer>> collectorSource = TestSource.probe(system);
    final Source<Integer, TestPublisher.Probe<Integer>> baseSource = TestSource.probe(system);
    final Sink<Integer, TestSubscriber.Probe<Integer>> emitterSink = TestSink.probe(system);
    final Transistor<Integer> underTest = Transistor.of();

    final Graph<SourceShape<Integer>, Pair<TestPublisher.Probe<Integer>, TestPublisher.Probe<Integer>>>
            collectorGateTransistor =
            GraphDSL$.MODULE$.create3(
                    collectorSource, baseSource, underTest,
                    (collector, base, notUsed) -> Pair.create(collector, base),
                    (builder, collectorShape, baseShape, transistorShape) -> {
                        builder.from(collectorShape.out()).toInlet(transistorShape.in0());
                        builder.from(baseShape.out()).toInlet(transistorShape.in1());
                        return SourceShape.of(transistorShape.out());
                    });

    final Pair<Pair<TestPublisher.Probe<Integer>, TestPublisher.Probe<Integer>>, TestSubscriber.Probe<Integer>> m =
            Source.fromGraph(collectorGateTransistor)
                    .toMat(emitterSink, Keep.both())
                    .run(ActorMaterializer.create(system));

    collector = m.first().first();
    base = m.first().second();
    emitter = m.second();
}
 
Example #15
Source File: MergeSortedAsPairTest.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
private void materializeTestProbes() {
    final Pair<Pair<TestPublisher.Probe<Integer>, TestPublisher.Probe<Integer>>,
            TestSubscriber.Probe<Pair<Integer, Integer>>>
            probes =
            mergeSortedAsPairWithMat(TestSource.probe(system), TestSource.probe(system))
                    .toMat(TestSink.probe(system), Keep.both())
                    .run(mat);
    source1Probe = probes.first().first();
    source2Probe = probes.first().second();
    sinkProbe = probes.second();
}
 
Example #16
Source File: HttpFlow.java    From ts-reaktive with MIT License 5 votes vote down vote up
private Flow<ByteString, ByteString, CompletionStage<HttpResponse>> createFlow(HttpMethod method, Uri uri, Option<ContentType> contentType, Predicate<HttpResponse> isSuccess, HttpHeader... headers) {
    Sink<ByteString, Publisher<ByteString>> in = Sink.asPublisher(AsPublisher.WITH_FANOUT); // akka internally recreates this twice, on some errors...
    Source<ByteString, Subscriber<ByteString>> out = Source.asSubscriber();
    
    return Flow.fromSinkAndSourceMat(in, out, Keep.both()).mapMaterializedValue(pair -> {
        RequestEntity entity;
        if (contentType.isDefined()) {
            Source<ByteString, NotUsed> inReader = Source.fromPublisher(pair.first());
            entity = HttpEntities.createChunked(contentType.get(), inReader);
        } else {
            entity = HttpEntities.EMPTY;
        }
        HttpRequest rq = HttpRequest.create().withMethod(method).withUri(uri).addHeaders(Arrays.asList(headers)).withEntity(entity);
        
        return http.singleRequest(rq).thenApply(resp -> {
            if (isSuccess.test(resp)) {
                resp.entity().getDataBytes()
                    .runWith(Sink.fromSubscriber(pair.second()), materializer);
            } else {
                log.info("Http responded error: {} for request {}", resp, rq);
                resp.discardEntityBytes(materializer);
                pair.second().onError(new IllegalStateException("Unsuccessful HTTP response: " + resp + " for " + rq));
            }
            return resp;
        }).exceptionally(x -> {
            Throwable cause = (x instanceof CompletionException) ? x.getCause() : x;
            if (!(cause instanceof IllegalStateException)) {
                log.info("Could not make http request " + rq, cause);
            }
            pair.second().onError(cause);
            throw (cause instanceof RuntimeException) ? (RuntimeException) x : new RuntimeException(cause);
        });
    });
}
 
Example #17
Source File: EnforcerActor.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
/**
 * Create the sink that defines the behavior of this enforcer actor by creating enforcement tasks for incoming
 * messages.
 *
 * @param enforcementProviders a set of {@link EnforcementProvider}s.
 * @param preEnforcer a function executed before actual enforcement, may be {@code null}.
 * @return a handler as {@link Flow} of {@link Contextual} messages.
 */
@SuppressWarnings("unchecked") // due to GraphDSL usage
private Sink<Contextual<WithDittoHeaders>, CompletionStage<Done>> assembleSink(
        final Set<EnforcementProvider<?>> enforcementProviders,
        @Nullable final PreEnforcer preEnforcer,
        final ActorRef enforcementScheduler) {

    final PreEnforcer preEnforcerStep =
            preEnforcer != null ? preEnforcer : CompletableFuture::completedStage;
    final Graph<FlowShape<Contextual<WithDittoHeaders>, EnforcementTask>, NotUsed> enforcerFlow =
            GraphDSL.create(
                    Broadcast.<Contextual<WithDittoHeaders>>create(enforcementProviders.size()),
                    Merge.<EnforcementTask>create(enforcementProviders.size(), true),
                    (notUsed1, notUsed2) -> notUsed1,
                    (builder, bcast, merge) -> {
                        final ArrayList<EnforcementProvider<?>> providers = new ArrayList<>(enforcementProviders);
                        for (int i = 0; i < providers.size(); i++) {
                            builder.from(bcast.out(i))
                                    .via(builder.add(providers.get(i).createEnforcementTask(preEnforcerStep)))
                                    .toInlet(merge.in(i));
                        }

                        return FlowShape.of(bcast.in(), merge.out());
                    });

    return Flow.<Contextual<WithDittoHeaders>>create()
            .via(enforcerFlow)
            .toMat(Sink.foreach(task -> enforcementScheduler.tell(task, ActorRef.noSender())), Keep.right());
}
 
Example #18
Source File: Main.java    From ari-proxy with GNU Affero General Public License v3.0 5 votes vote down vote up
private static void runAriEventProcessor(
		Config serviceConfig,
		ActorSystem system,
		ActorRef callContextProvider,
		ActorRef metricsService,
		Runnable applicationReplacedHandler) {
	// see: https://doc.akka.io/docs/akka/2.5.8/java/stream/stream-error.html#delayed-restarts-with-a-backoff-stage
	final Flow<Message, Message, NotUsed> restartWebsocketFlow = RestartFlow.withBackoff(
			Duration.ofSeconds(3), // min backoff
			Duration.ofSeconds(30), // max backoff
			0.2, // adds 20% "noise" to vary the intervals slightly
			() -> createWebsocketFlow(system, serviceConfig.getString(WEBSOCKET_URI))
	);

	final Source<Message, NotUsed> source = Source.<Message>maybe().viaMat(restartWebsocketFlow, Keep.right());

	final ProducerSettings<String, String> producerSettings = ProducerSettings
			.create(system, new StringSerializer(), new StringSerializer())
			.withBootstrapServers(serviceConfig.getConfig(KAFKA).getString(BOOTSTRAP_SERVERS));

	final Sink<ProducerRecord<String, String>, NotUsed> sink = Producer
			.plainSink(producerSettings)
			.mapMaterializedValue(done -> NotUsed.getInstance());

	final Run processingPipeline = WebsocketMessageToProducerRecordTranslator.eventProcessing()
			.on(system)
			.withHandler(applicationReplacedHandler)
			.withCallContextProvider(callContextProvider)
			.withMetricsService(metricsService)
			.from(source)
			.to(sink);

	Match(Try.of(() -> processingPipeline.run())).of(
			Case($Success($()), mat -> run(() -> system.log().debug("Successfully started ari event processor."))),
			Case($Failure($(instanceOf(KafkaException.class))), err -> run(() -> {
				system.log().error(err, "Failed to start ari event processor.");
				System.exit(-1);
			}))
	);
}
 
Example #19
Source File: DataImporter.java    From tutorials with MIT License 4 votes vote down vote up
private Sink<Double, CompletionStage<Done>> storeAverages() {
    return Flow.of(Double.class)
            .mapAsyncUnordered(4, averageRepository::save)
            .toMat(Sink.ignore(), Keep.right());
}
 
Example #20
Source File: AkkaHubProxy.java    From RHub with Apache License 2.0 4 votes vote down vote up
@Override
public void emit(Object event) {
    Source.single(event).viaMat(busFlow, Keep.right())
            .to(Sink.ignore())
            .run(mat);
}
 
Example #21
Source File: AmqpConnectorsTest.java    From rabbitmq-mock with Apache License 2.0 4 votes vote down vote up
@Test
public void publishAndConsumeRpcWithoutAutoAck() throws Exception {

    final String queueName = "amqp-conn-it-spec-rpc-queue-" + System.currentTimeMillis();
    final QueueDeclaration queueDeclaration = QueueDeclaration.create(queueName);

    final List<String> input = Arrays.asList("one", "two", "three", "four", "five");

    final Flow<WriteMessage, CommittableReadResult, CompletionStage<String>> ampqRpcFlow =
        AmqpRpcFlow.committableFlow(
            AmqpWriteSettings.create(connectionProvider)
                .withRoutingKey(queueName)
                .withDeclaration(queueDeclaration),
            10,
            1);
    Pair<CompletionStage<String>, TestSubscriber.Probe<ReadResult>> result =
        Source.from(input)
            .map(ByteString::fromString)
            .map(bytes -> WriteMessage.create(bytes))
            .viaMat(ampqRpcFlow, Keep.right())
            .mapAsync(1, cm -> cm.ack().thenApply(unused -> cm.message()))
            .toMat(TestSink.probe(system), Keep.both())
            .run(materializer);

    result.first().toCompletableFuture().get(5, TimeUnit.SECONDS);

    Sink<WriteMessage, CompletionStage<Done>> amqpSink =
        AmqpSink.createReplyTo(AmqpReplyToSinkSettings.create(connectionProvider));

    final Source<ReadResult, NotUsed> amqpSource =
        AmqpSource.atMostOnceSource(
            NamedQueueSourceSettings.create(connectionProvider, queueName)
                .withDeclaration(queueDeclaration),
            1);

    UniqueKillSwitch sourceToSink =
        amqpSource
            .viaMat(KillSwitches.single(), Keep.right())
            .map(b -> WriteMessage.create(b.bytes()).withProperties(b.properties()))
            .to(amqpSink)
            .run(materializer);

    List<ReadResult> probeResult =
        JavaConverters.seqAsJavaListConverter(
            result.second().toStrict(Duration.create(5, TimeUnit.SECONDS)))
            .asJava();
    assertEquals(
        probeResult.stream().map(s -> s.bytes().utf8String()).collect(Collectors.toList()), input);
    sourceToSink.shutdown();
}
 
Example #22
Source File: ThingsSseRouteBuilder.java    From ditto with Eclipse Public License 2.0 4 votes vote down vote up
private Route createSseRoute(final RequestContext ctx, final CompletionStage<DittoHeaders> dittoHeadersStage,
        final Map<String, String> parameters) {

    @Nullable final String filterString = parameters.get(PARAM_FILTER);
    final List<String> namespaces = getNamespaces(parameters.get(PARAM_NAMESPACES));
    final List<ThingId> targetThingIds = getThingIds(parameters.get(ThingsParameter.IDS.toString()));
    @Nullable final JsonFieldSelector fields = getFieldSelector(parameters.get(ThingsParameter.FIELDS.toString()));
    @Nullable final JsonFieldSelector extraFields = getFieldSelector(parameters.get(PARAM_EXTRA_FIELDS));
    final SignalEnrichmentFacade facade =
            signalEnrichmentProvider == null ? null : signalEnrichmentProvider.getFacade(ctx.getRequest());

    final CompletionStage<Source<ServerSentEvent, NotUsed>> sseSourceStage =
            dittoHeadersStage.thenApply(dittoHeaders -> {

                sseAuthorizationEnforcer.checkAuthorization(ctx, dittoHeaders);

                if (filterString != null) {
                    // will throw an InvalidRqlExpressionException if the RQL expression was not valid:
                    queryFilterCriteriaFactory.filterCriteria(filterString, dittoHeaders);
                }

                final Source<SessionedJsonifiable, ActorRef> publisherSource =
                        Source.actorPublisher(EventAndResponsePublisher.props(10));

                return publisherSource.mapMaterializedValue(
                        publisherActor -> {
                            final String requestCorrelationId = dittoHeaders.getCorrelationId()
                                    .orElseThrow(() -> new IllegalStateException(
                                            "Expected correlation-id in SSE DittoHeaders: " + dittoHeaders));

                            final CharSequence connectionCorrelationId = StreamingSessionIdentifier.of(
                                    requestCorrelationId, UUID.randomUUID().toString());

                            final JsonSchemaVersion jsonSchemaVersion = dittoHeaders.getSchemaVersion()
                                    .orElse(JsonSchemaVersion.LATEST);
                            sseConnectionSupervisor.supervise(publisherActor, connectionCorrelationId,
                                    dittoHeaders);
                            streamingActor.tell(
                                    new Connect(publisherActor, connectionCorrelationId, STREAMING_TYPE_SSE,
                                            jsonSchemaVersion, null),
                                    null);
                            final StartStreaming startStreaming =
                                    StartStreaming.getBuilder(StreamingType.EVENTS, connectionCorrelationId,
                                            dittoHeaders.getAuthorizationContext())
                                            .withNamespaces(namespaces)
                                            .withFilter(filterString)
                                            .withExtraFields(extraFields)
                                            .build();
                            streamingActor.tell(startStreaming, null);
                            return NotUsed.getInstance();
                        })
                        .mapAsync(streamingConfig.getParallelism(), jsonifiable ->
                                postprocess(jsonifiable, facade, targetThingIds, namespaces, fields))
                        .mapConcat(jsonObjects -> jsonObjects)
                        .map(jsonValue -> {
                            THINGS_SSE_COUNTER.increment();
                            return ServerSentEvent.create(jsonValue.toString());
                        })
                        .log("SSE " + PATH_THINGS)
                        // sniffer shouldn't sniff heartbeats
                        .viaMat(eventSniffer.toAsyncFlow(ctx.getRequest()), Keep.none())
                        .keepAlive(Duration.ofSeconds(1), ServerSentEvent::heartbeat);
            });

    return completeOKWithFuture(sseSourceStage, EventStreamMarshalling.toEventStream());
}
 
Example #23
Source File: SearchSourceTest.java    From ditto with Eclipse Public License 2.0 4 votes vote down vote up
private SourceRef<Object> materializeSourceProbe() {
    final Pair<TestPublisher.Probe<Object>, CompletionStage<SourceRef<Object>>> materializedValues =
            TestSource.probe(actorSystem).toMat(StreamRefs.sourceRef(), Keep.both()).run(materializer);
    sourceProbe = materializedValues.first();
    return materializedValues.second().toCompletableFuture().join();
}
 
Example #24
Source File: AriCommandResponseKafkaProcessor.java    From ari-proxy with GNU Affero General Public License v3.0 4 votes vote down vote up
private static ActorMaterializer run(
		ActorSystem system,
		CommandResponseHandler commandResponseHandler,
		ActorRef callContextProvider,
		ActorRef metricsService,
		Source<ConsumerRecord<String, String>, NotUsed> source,
		Sink<ProducerRecord<String, String>, NotUsed> sink) {

	final Function<Throwable, Directive> decider = t -> {
		system.log().error(t, "Error in some stage; restarting stream ...");
		return Supervision.restart();
	};

	final Config serviceConfig = ConfigFactory.load().getConfig(SERVICE);
	final String stasisApp = serviceConfig.getString(STASIS_APP);

	final Config kafkaConfig = serviceConfig.getConfig(KAFKA);
	final String commandsTopic = kafkaConfig.getString(COMMANDS_TOPIC);
	final String eventsAndResponsesTopic = kafkaConfig.getString(EVENTS_AND_RESPONSES_TOPIC);

	final Config restConfig = serviceConfig.getConfig(REST);
	final String restUri = restConfig.getString(URI);
	final String restUser = restConfig.getString(USER);
	final String restPassword = restConfig.getString(PASSWORD);

	final ActorMaterializer materializer = ActorMaterializer.create(
			ActorMaterializerSettings.create(system).withSupervisionStrategy(decider),
			system);

	source
			.log(">>>   ARI COMMAND", ConsumerRecord::value).withAttributes(LOG_LEVELS)
			.map(AriCommandResponseKafkaProcessor::unmarshallAriCommandEnvelope)
			.map(msgEnvelope -> {
				AriCommandResponseProcessing
						.registerCallContext(callContextProvider, msgEnvelope.getCallContext(), msgEnvelope.getAriCommand())
						.getOrElseThrow(t -> t).run();
				return Tuple.of(
						msgEnvelope.getAriCommand(),
						new CallContextAndCommandId(msgEnvelope.getCallContext(), msgEnvelope.getCommandId())
				);
			})
			.map(ariCommandAndContext -> ariCommandAndContext
					.map1(cmd -> toHttpRequest(cmd, restUri, restUser, restPassword)))
			.mapAsync(1, requestAndContext -> commandResponseHandler.apply(requestAndContext)
					.thenApply(response -> Tuple.of(response, requestAndContext._2)))
			.wireTap(Sink.foreach(gatherMetrics(metricsService, stasisApp)))
			.mapAsync(1, rawHttpResponseAndContext -> toAriResponse(rawHttpResponseAndContext, materializer))
			.map(ariResponseAndContext -> envelopeAriResponse(ariResponseAndContext._1, ariResponseAndContext._2,
					commandsTopic))
			.map(ariMessageEnvelopeAndContext -> ariMessageEnvelopeAndContext
					.map1(AriCommandResponseKafkaProcessor::marshallAriMessageEnvelope))
			.map(ariResponseStringAndContext -> new ProducerRecord<>(
					eventsAndResponsesTopic,
					ariResponseStringAndContext._2.getCallContext(),
					ariResponseStringAndContext._1)
			)
			.log(">>>   ARI RESPONSE", ProducerRecord::value).withAttributes(LOG_LEVELS)
			.toMat(sink, Keep.none())
			.run(materializer);

	return materializer;
}