akka.stream.javadsl.Sink Java Examples

The following examples show how to use akka.stream.javadsl.Sink. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: IndexInitializer.java    From ditto with Eclipse Public License 2.0 6 votes vote down vote up
private CompletionStage<Done> createNonExistingIndices(final String collectionName,
        final List<Index> indices) {
    if (indices.isEmpty()) {
        LOGGER.warn("No indices are defined, thus no indices are created.");
        return CompletableFuture.completedFuture(Done.getInstance());
    }
    return indexOperations.getIndicesExceptDefaultIndex(collectionName)
            .flatMapConcat(
                    existingIndices -> {
                        LOGGER.info("Create non-existing indices: Existing indices are: {}", existingIndices);
                        final List<Index> indicesToCreate = excludeIndices(indices, existingIndices);
                        LOGGER.info("Indices to create are: {}", indicesToCreate);
                        return createIndices(collectionName, indicesToCreate);
                    })
            .runWith(Sink.ignore(), materializer);
}
 
Example #2
Source File: S3Backup.java    From ts-reaktive with MIT License 6 votes vote down vote up
private Receive startBackup(long offset) {
    query
        .eventsByTag(tag, NoOffset.getInstance())
        // create backups of max [N] elements, or at least every [T] on activity
        // FIXME write a stage that, instead of buffering each chunk into memory, creates sub-streams instead.
        .groupedWithin(eventChunkSize, eventChunkDuration)
        .filter(list -> list.size() > 0)
        .mapAsync(4, list -> s3.store(tag, Vector.ofAll(list)).thenApply(done -> list.get(list.size() - 1).offset()))
        .runWith(Sink.actorRefWithAck(self(), "init", "ack", "done", Failure::new), materializer);
    
    return ReceiveBuilder.create()
        .matchEquals("init", msg -> sender().tell("ack", self()))
        .match(Long.class, l -> pipe(s3.saveOffset(l).thenApply(done -> "ack"), context().dispatcher()).to(sender()))
        .match(Failure.class, msg -> {
            log.error("Stream failed, rethrowing", msg.cause());
            throw new RuntimeException(msg.cause());
        })
        .matchEquals("done", msg -> { throw new IllegalStateException("eventsByTag completed, this should not happen. Killing actor, hoping for restart"); })
        .build();
}
 
Example #3
Source File: SearchActorIT.java    From ditto with Eclipse Public License 2.0 6 votes vote down vote up
private void insertTestThings() {
    final Thing baseThing = ThingsModelFactory.newThingBuilder()
            .setId(ThingId.of("thing", "00"))
            .setRevision(1234L)
            .setPermissions(AUTH_CONTEXT.getFirstAuthorizationSubject().orElseThrow(AssertionError::new),
                    Permission.READ)
            .setAttribute(JsonPointer.of("x"), JsonValue.of(5))
            .build();

    final Thing irrelevantThing = baseThing.toBuilder().removeAllAttributes().build();

    writePersistence.writeThingWithAcl(template(baseThing, 0, "a"))
            .concat(writePersistence.writeThingWithAcl(template(baseThing, 1, "b")))
            .concat(writePersistence.writeThingWithAcl(template(baseThing, 2, "a")))
            .concat(writePersistence.writeThingWithAcl(template(baseThing, 3, "b")))
            .concat(writePersistence.writeThingWithAcl(template(baseThing, 4, "c")))
            .concat(writePersistence.writeThingWithAcl(template(irrelevantThing, 5, "c")))
            .runWith(Sink.ignore(), materializer)
            .toCompletableFuture()
            .join();
}
 
Example #4
Source File: MongoReadJournalIT.java    From ditto with Eclipse Public License 2.0 6 votes vote down vote up
@Test
public void extractJournalPidsFromEventsAndNotSnapshots() {
    insert("test_journal", new Document().append("pid", "pid3").append("to", 2L));
    insert("test_journal", new Document().append("pid", "pid4").append("to", 2L));
    insert("test_journal", new Document().append("pid", "pid1").append("to", 1L));
    insert("test_journal", new Document().append("pid", "pid2").append("to", 1L));
    insert("test_snaps", new Document().append("pid", "pid5").append("sn", 3L));
    insert("test_snaps", new Document().append("pid", "pid6").append("sn", 4L));

    final List<String> pids =
            readJournal.getJournalPids(2, Duration.ZERO, materializer)
                    .runWith(Sink.seq(), materializer)
                    .toCompletableFuture().join();

    assertThat(pids).containsExactly("pid1", "pid2", "pid3", "pid4");
}
 
Example #5
Source File: S3Restore.java    From ts-reaktive with MIT License 6 votes vote down vote up
private void startRestore() {
    s3
    .list(tag)
    // skip over entries until the one BEFORE entry where startTime >= offset (since the one before may have been only partially restored)
    .via(dropUntilNext(l -> S3.getStartInstant(l).toEpochMilli() >= offset, true))
    .flatMapConcat(entry -> s3.loadEvents(entry.key().substring(entry.key().lastIndexOf("/") + 1)))
    .mapAsync(maxInFlight, e -> {
        log.debug("Replaying {}:{}", e.getPersistenceId(), e.getSequenceNr());
        return ask(shardRegion, e, timeout);
    })
    .map(resp -> {
        log.debug("Responded {}", resp);
        return (Long) resp;
    })
    // only save one lastOffset update per minute, and only the lowest one
    .conflate((Long l1, Long l2) -> l1 < l2 ? l1 : l2)
    .runWith(Sink.actorRefWithAck(self(), "init", "ack", "done", Failure::new), materializer);
}
 
Example #6
Source File: WebsocketMessageToProducerRecordTranslatorITCase.java    From ari-proxy with GNU Affero General Public License v3.0 6 votes vote down vote up
@Test
void verifyProcessingPipelineWorksAsExpectedForBogusMessages() {

	final TestKit catchAllProbe = new TestKit(system);

	final Source<Message, NotUsed> source = Source.single(new Strict("invalid message from ws"));
	final Sink<ProducerRecord<String, String>, NotUsed> sink = Sink.actorRef(catchAllProbe.getRef(), new ProducerRecord<String, String>("none", "completed"));

	WebsocketMessageToProducerRecordTranslator.eventProcessing()
			.on(system)
			.withHandler(() -> catchAllProbe.getRef().tell("Application replaced", catchAllProbe.getRef()))
			.withCallContextProvider(catchAllProbe.getRef())
			.withMetricsService(catchAllProbe.getRef())
			.from(source)
			.to(sink)
			.run();

	final ProducerRecord<String, String> completeMsg = catchAllProbe.expectMsgClass(ProducerRecord.class);
	assertThat(completeMsg.topic(), is("none"));
	assertThat(completeMsg.value(), is("completed"));
}
 
Example #7
Source File: SearchUpdaterStream.java    From ditto with Eclipse Public License 2.0 6 votes vote down vote up
private Sink<Source<AbstractWriteModel, NotUsed>, NotUsed> createRestartSink() {
    final StreamConfig streamConfig = searchConfig.getStreamConfig();
    final PersistenceStreamConfig persistenceConfig = streamConfig.getPersistenceConfig();

    final int parallelism = persistenceConfig.getParallelism();
    final int maxBulkSize = persistenceConfig.getMaxBulkSize();
    final Duration writeInterval = streamConfig.getWriteInterval();
    final Sink<Source<AbstractWriteModel, NotUsed>, NotUsed> sink =
            mongoSearchUpdaterFlow.start(parallelism, maxBulkSize, writeInterval)
                    .via(bulkWriteResultAckFlow.start())
                    .log("SearchUpdaterStream/BulkWriteResult")
                    .withAttributes(Attributes.logLevels(
                            Attributes.logLevelInfo(),
                            Attributes.logLevelWarning(),
                            Attributes.logLevelError()))
                    .to(Sink.ignore());

    final ExponentialBackOffConfig backOffConfig = persistenceConfig.getExponentialBackOffConfig();

    return RestartSink.withBackoff(backOffConfig.getMax(), backOffConfig.getMax(), backOffConfig.getRandomFactor(),
            () -> sink);
}
 
Example #8
Source File: StatsRoute.java    From ditto with Eclipse Public License 2.0 6 votes vote down vote up
private Route handleSudoCountThingsPerRequest(final RequestContext ctx, final SudoCountThings command) {
    final CompletableFuture<HttpResponse> httpResponseFuture = new CompletableFuture<>();

    Source.single(command)
            .to(Sink.actorRef(createHttpPerRequestActor(ctx, httpResponseFuture),
                    AbstractHttpRequestActor.COMPLETE_MESSAGE))
            .run(materializer);

    final CompletionStage<HttpResponse> allThingsCountHttpResponse = Source.fromCompletionStage(httpResponseFuture)
            .flatMapConcat(httpResponse -> httpResponse.entity().getDataBytes())
            .fold(ByteString.empty(), ByteString::concat)
            .map(ByteString::utf8String)
            .map(Integer::valueOf)
            .map(count -> JsonObject.newBuilder().set("allThingsCount", count).build())
            .map(jsonObject -> HttpResponse.create()
                    .withEntity(ContentTypes.APPLICATION_JSON, ByteString.fromString(jsonObject.toString()))
                    .withStatus(HttpStatusCode.OK.toInt()))
            .runWith(Sink.head(), materializer);

    return completeWithFuture(allThingsCountHttpResponse);
}
 
Example #9
Source File: AriCommandResponseKafkaProcessorTest.java    From ari-proxy with GNU Affero General Public License v3.0 6 votes vote down vote up
@Test()
void properlyHandleInvalidCommandMessage() {
	final TestKit kafkaProducer = new TestKit(system);
	final TestKit metricsService = new TestKit(system);
	final TestKit callContextProvider = new TestKit(system);

	final ConsumerRecord<String, String> consumerRecord = new ConsumerRecord<>("topic", 0, 0,
			"key", "NOT JSON");
	final Source<ConsumerRecord<String, String>, NotUsed> source = Source.single(consumerRecord);
	final Sink<ProducerRecord<String, String>, NotUsed> sink = Sink.<ProducerRecord<String, String>>ignore()
			.mapMaterializedValue(q -> NotUsed.getInstance());

	AriCommandResponseKafkaProcessor.commandResponseProcessing()
			.on(system)
			.withHandler(requestAndContext -> Http.get(system).singleRequest(requestAndContext._1))
			.withCallContextProvider(callContextProvider.getRef())
			.withMetricsService(metricsService.getRef())
			.from(source)
			.to(sink)
			.run();

	kafkaProducer.expectNoMsg(Duration.apply(250, TimeUnit.MILLISECONDS));
}
 
Example #10
Source File: StreamIT.java    From lagom-example with Apache License 2.0 5 votes vote down vote up
@Test
public void helloStream() throws Exception {
    // Important to concat our source with a maybe, this ensures the connection doesn't get closed once we've
    // finished feeding our elements in, and then also to take 3 from the response stream, this ensures our
    // connection does get closed once we've received the 3 elements.
    Source<String, ?> response = await(streamService.stream().invoke(
            Source.from(Arrays.asList("a", "b", "c"))
                    .concat(Source.maybe())));
    List<String> messages = await(response.take(3).runWith(Sink.seq(), mat));
    assertEquals(Arrays.asList("Hello, a!", "Hello, b!", "Hello, c!"), messages);
}
 
Example #11
Source File: SearchActor.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
private <T extends Command> void executeCount(final T countCommand,
        final Function<T, Query> queryParseFunction,
        final boolean isSudo) {
    final DittoHeaders dittoHeaders = countCommand.getDittoHeaders();
    final Optional<String> correlationIdOpt = dittoHeaders.getCorrelationId();
    LogUtil.enhanceLogWithCorrelationId(log, correlationIdOpt);
    log.info("Processing CountThings command: {}", countCommand);
    final JsonSchemaVersion version = countCommand.getImplementedSchemaVersion();

    final String queryType = "count";

    final StartedTimer countTimer = startNewTimer(version, queryType);

    final StartedTimer queryParsingTimer = countTimer.startNewSegment(QUERY_PARSING_SEGMENT_NAME);

    final ActorRef sender = getSender();

    final Source<Object, ?> replySource = createQuerySource(queryParseFunction, countCommand)
            .flatMapConcat(query -> {
                LogUtil.enhanceLogWithCorrelationId(log, correlationIdOpt);
                stopTimer(queryParsingTimer);
                final StartedTimer databaseAccessTimer =
                        countTimer.startNewSegment(DATABASE_ACCESS_SEGMENT_NAME);

                final Source<Long, NotUsed> countResultSource = isSudo
                        ? searchPersistence.sudoCount(query)
                        : searchPersistence.count(query,
                        countCommand.getDittoHeaders().getAuthorizationContext().getAuthorizationSubjectIds());

                return processSearchPersistenceResult(countResultSource, dittoHeaders)
                        .via(Flow.fromFunction(result -> {
                            stopTimer(databaseAccessTimer);
                            return result;
                        }))
                        .map(count -> CountThingsResponse.of(count, dittoHeaders));
            })
            .via(stopTimerAndHandleError(countTimer, countCommand));

    Patterns.pipe(replySource.runWith(Sink.head(), materializer), getContext().dispatcher()).to(sender);
}
 
Example #12
Source File: CsvProtocolSpec.java    From ts-reaktive with MIT License 5 votes vote down vote up
private <T> List<T> parse(Protocol<CsvEvent, T> proto, String input)
    throws InterruptedException, ExecutionException, TimeoutException {
    List<T> output = Source.single(input)
        .via(new CsvParser(CsvSettings.RFC4180))
        .via(ProtocolReader.of(proto))
        .runWith(Sink.seq(), materializer)
        .toCompletableFuture()
        .get(1, TimeUnit.SECONDS);
    return output;
}
 
Example #13
Source File: ResumeSourceTest.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
@Before
public void init() {
    system = ActorSystem.create();
    mat = ActorMaterializer.create(system);

    rematerializeSource();

    // materialize sink once - it never fails.
    final Sink<Integer, TestSubscriber.Probe<Integer>> sink = TestSink.probe(system);
    final Pair<TestSubscriber.Probe<Integer>, Sink<Integer, NotUsed>> sinkPair = sink.preMaterialize(mat);
    sinkProbe = sinkPair.first();
    testSink = sinkPair.second();
}
 
Example #14
Source File: AkkaProcProxy.java    From RHub with Apache License 2.0 5 votes vote down vote up
@Override
@SuppressWarnings("unchecked")
protected Publisher hide(Processor processor) {
    return (Publisher) Source
            .fromPublisher(processor)
            .runWith(Sink.asPublisher(AsPublisher.WITH_FANOUT), mat);
}
 
Example #15
Source File: EventSniffer.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
/**
 * Create an async flow for event sniffing.
 *
 * @param request the HTTP request that started the event stream.
 * @return flow to pass events through with a wiretap attached over an async barrier to the sink for sniffed events.
 */
default Flow<T, T, NotUsed> toAsyncFlow(final HttpRequest request) {
    return Flow.<T>create().wireTap(
            Flow.<T>create()
                    .async()
                    .to(Sink.lazyInitAsync(() -> CompletableFuture.completedFuture(
                            createSink(request)))));
}
 
Example #16
Source File: DataCenterForwarder.java    From ts-reaktive with MIT License 5 votes vote down vote up
private Sink<EventEnvelope,NotUsed> filteredDataCenterSink() {
    log.debug("filteredDataCenterSink()");
    return Flow.<EventEnvelope>create()
        .mapAsync(parallelism, e -> {
            return visibilityRepo.isVisibleTo(dataCenter, e.persistenceId()).thenApply(v -> {
                log.debug("Visibility of {}: {}", e, v);
                return Tuple.of(e,v);});
        })
        .filter(t -> t._2)
        .map(t -> t._1)
        .via(dataCenter.uploadFlow())
        .map(EventDelivered::new)
        .to(Sink.actorRef(self(), new Failure(new IllegalStateException("Remote datacenter closed connection"))));
}
 
Example #17
Source File: ThingsJournalTestHelper.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
private <T> List<T> runBlockingWithReturn(final Source<T, NotUsed> publisher) {
    final CompletionStage<List<T>> done = publisher.runWith(Sink.seq(), mat);
    try {
        return done.toCompletableFuture().get(WAIT_TIMEOUT, TimeUnit.SECONDS);
    } catch (final InterruptedException | ExecutionException | TimeoutException e) {
        throw new IllegalStateException(e);
    }
}
 
Example #18
Source File: KafkaPublisherActor.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
private static Sink<ProducerMessage.Results<String, String, PassThrough>, CompletionStage<Done>> publishSuccessSink() {

        // basically, we don't know if the 'publish' will succeed or fail. We would need to write our own
        // GraphStage actor for Kafka and MQTT, since alpakka doesn't provide this useful information for us.
        return Sink.foreach(results -> {
            final ConnectionMonitor connectionMonitor = results.passThrough().connectionMonitor;
            connectionMonitor.success(results.passThrough().externalMessage);
        });
    }
 
Example #19
Source File: SearchActor.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
private void stream(final StreamThings streamThings) {
    log.withCorrelationId(streamThings)
            .info("Processing StreamThings command: {}", streamThings);
    final JsonSchemaVersion version = streamThings.getImplementedSchemaVersion();
    final String queryType = "query"; // same as queryThings
    final StartedTimer searchTimer = startNewTimer(version, queryType);
    final StartedTimer queryParsingTimer = searchTimer.startNewSegment(QUERY_PARSING_SEGMENT_NAME);
    final ActorRef sender = getSender();
    final Set<String> namespaces = streamThings.getNamespaces().orElse(null);
    final Source<Optional<ThingsSearchCursor>, NotUsed> cursorSource =
            ThingsSearchCursor.extractCursor(streamThings);
    final Source<SourceRef<String>, NotUsed> sourceRefSource = cursorSource.flatMapConcat(cursor -> {
        cursor.ifPresent(c -> c.logCursorCorrelationId(log, streamThings));
        return createQuerySource(queryParser::parse, streamThings).flatMapConcat(parsedQuery -> {
            final Query query = ThingsSearchCursor.adjust(cursor, parsedQuery, queryParser.getCriteriaFactory());
            stopTimer(queryParsingTimer);
            searchTimer.startNewSegment(DATABASE_ACCESS_SEGMENT_NAME); // segment stopped by stopTimerAndHandleError
            final List<String> subjectIds = streamThings.getDittoHeaders().getAuthorizationSubjects();
            final CompletionStage<SourceRef<String>> sourceRefFuture =
                    searchPersistence.findAllUnlimited(query, subjectIds, namespaces)
                            .map(ThingId::toString) // for serialization???
                            .runWith(StreamRefs.sourceRef(), materializer);
            return Source.fromCompletionStage(sourceRefFuture);
        });
    });
    final Source<Object, NotUsed> replySourceWithErrorHandling =
            sourceRefSource.via(stopTimerAndHandleError(searchTimer, streamThings));

    Patterns.pipe(replySourceWithErrorHandling.runWith(Sink.head(), materializer), getContext().dispatcher())
            .to(sender);
}
 
Example #20
Source File: AkkaHttpClientService.java    From mutual-tls-ssl with Apache License 2.0 5 votes vote down vote up
private String extractBody(HttpResponse httpResponse) {
    return httpResponse.entity()
            .getDataBytes()
            .fold(ByteString.empty(), ByteString::concat)
            .map(ByteString::utf8String)
            .runWith(Sink.head(), actorSystem)
            .toCompletableFuture()
            .join();
}
 
Example #21
Source File: AkkaHubProxy.java    From RHub with Apache License 2.0 5 votes vote down vote up
public AkkaHubProxy(ActorMaterializer mat) {
    this.mat = mat;
    //  Obtain a Sink and Source which will publish and receive from the "bus" respectively.
    Pair<Sink<Object, NotUsed>, Source<Object, NotUsed>> sinkAndSource =
            MergeHub.of(Object.class, 16)
                    .toMat(BroadcastHub.of(Object.class, 256), Keep.both())
                    .run(mat);

    Sink<Object, NotUsed> sink = sinkAndSource.first();
    source = sinkAndSource.second().takeWhile((Predicate<Object>) o -> o != Done.getInstance());
    //source.runWith(Sink.ignore(), mat);
    busFlow = Flow.fromSinkAndSource(sink, source)
            .joinMat(KillSwitches.singleBidi(), Keep.right());
}
 
Example #22
Source File: HttpFlow.java    From ts-reaktive with MIT License 5 votes vote down vote up
private Flow<ByteString, ByteString, CompletionStage<HttpResponse>> createFlow(HttpMethod method, Uri uri, Option<ContentType> contentType, Predicate<HttpResponse> isSuccess, HttpHeader... headers) {
    Sink<ByteString, Publisher<ByteString>> in = Sink.asPublisher(AsPublisher.WITH_FANOUT); // akka internally recreates this twice, on some errors...
    Source<ByteString, Subscriber<ByteString>> out = Source.asSubscriber();
    
    return Flow.fromSinkAndSourceMat(in, out, Keep.both()).mapMaterializedValue(pair -> {
        RequestEntity entity;
        if (contentType.isDefined()) {
            Source<ByteString, NotUsed> inReader = Source.fromPublisher(pair.first());
            entity = HttpEntities.createChunked(contentType.get(), inReader);
        } else {
            entity = HttpEntities.EMPTY;
        }
        HttpRequest rq = HttpRequest.create().withMethod(method).withUri(uri).addHeaders(Arrays.asList(headers)).withEntity(entity);
        
        return http.singleRequest(rq).thenApply(resp -> {
            if (isSuccess.test(resp)) {
                resp.entity().getDataBytes()
                    .runWith(Sink.fromSubscriber(pair.second()), materializer);
            } else {
                log.info("Http responded error: {} for request {}", resp, rq);
                resp.discardEntityBytes(materializer);
                pair.second().onError(new IllegalStateException("Unsuccessful HTTP response: " + resp + " for " + rq));
            }
            return resp;
        }).exceptionally(x -> {
            Throwable cause = (x instanceof CompletionException) ? x.getCause() : x;
            if (!(cause instanceof IllegalStateException)) {
                log.info("Could not make http request " + rq, cause);
            }
            pair.second().onError(cause);
            throw (cause instanceof RuntimeException) ? (RuntimeException) x : new RuntimeException(cause);
        });
    });
}
 
Example #23
Source File: WebSocketRoute.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
private Graph<SinkShape<Either<StreamControlMessage, Signal>>, ?> getStreamControlOrSignalSink() {

        return Sink.foreach(either -> {
            final Object streamControlMessageOrSignal = either.isLeft() ? either.left().get() : either.right().get();
            streamingActor.tell(streamControlMessageOrSignal, ActorRef.noSender());
        });
    }
 
Example #24
Source File: MongoTimestampPersistenceIT.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
@Test
public void ensureCollectionIsCapped() throws Exception {
    final MongoCollection<Document> collection =
            syncPersistence.getCollection().runWith(Sink.head(), materializer).toCompletableFuture().get();

    runBlocking(syncPersistence.setTimestamp(Instant.now()));
    runBlocking(syncPersistence.setTimestamp(Instant.now()));

    assertThat(runBlocking(Source.fromPublisher(collection.count()))).containsExactly(1L);
}
 
Example #25
Source File: AkkaHubProxy.java    From RHub with Apache License 2.0 5 votes vote down vote up
@Override
public Removable addUpstream(Source<Object, NotUsed> publisher) {
    UniqueKillSwitch killSwitch =
            publisher.viaMat(busFlow, Keep.right())
                    .to(Sink.ignore())
                    .run(mat);
    subscriptions.put(publisher, killSwitch);
    return () -> AkkaHubProxy.this.removeUpstream(publisher);
}
 
Example #26
Source File: StatsRoute.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
private Route handleDevOpsPerRequest(final RequestContext ctx,
        final Source<ByteString, ?> payloadSource,
        final Function<String, DevOpsCommand<?>> requestJsonToCommandFunction) {
    final CompletableFuture<HttpResponse> httpResponseFuture = new CompletableFuture<>();

    payloadSource
            .fold(ByteString.empty(), ByteString::concat)
            .map(ByteString::utf8String)
            .map(requestJsonToCommandFunction)
            .to(Sink.actorRef(createHttpPerRequestActor(ctx, httpResponseFuture),
                    AbstractHttpRequestActor.COMPLETE_MESSAGE))
            .run(materializer);

    return completeWithFuture(httpResponseFuture);
}
 
Example #27
Source File: HttpHelper.java    From netty-reactive-streams with Apache License 2.0 5 votes vote down vote up
public StreamedHttpResponse createStreamedResponse(HttpVersion version, List<String> body, long contentLength) {
    List<HttpContent> content = new ArrayList<>();
    for (String chunk: body) {
        content.add(new DefaultHttpContent(Unpooled.copiedBuffer(chunk, Charset.forName("utf-8"))));
    }
    Publisher<HttpContent> publisher = Source.from(content).runWith(Sink.<HttpContent>asPublisher(AsPublisher.WITH_FANOUT), materializer);
    StreamedHttpResponse response = new DefaultStreamedHttpResponse(version, HttpResponseStatus.OK, publisher);
    HttpUtil.setContentLength(response, contentLength);
    return response;
}
 
Example #28
Source File: ThingsSearchCursorTest.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
@Test
public void encodeAndDecodeAreInverse() {
    final ThingsSearchCursor input = randomCursor();
    final ThingsSearchCursor decoded =
            ThingsSearchCursor.decode(input.encode(), materializer)
                    .runWith(Sink.head(), materializer)
                    .toCompletableFuture().join();

    assertThat(decoded).isEqualTo(input);
}
 
Example #29
Source File: S3.java    From ts-reaktive with MIT License 5 votes vote down vote up
/**
  * Loads the last known written offset from S3, or returns 0 if not found
  */
 public CompletionStage<Long> loadOffset() {
     return download("_lastOffset")
 		.reduce((bs1, bs2) -> bs1.concat(bs2))
 		.map(bs -> Long.parseLong(bs.utf8String()))
 		.recoverWith(new PFBuilder<Throwable, Source<Long,NotUsed>>()
 			.matchAny(x -> Source.single(0L)) // not found -> start at 0
	.build()
)
 		.runWith(Sink.head(), materializer);
 }
 
Example #30
Source File: AkkaProcProxy.java    From RHub with Apache License 2.0 5 votes vote down vote up
@Override
@SuppressWarnings("unchecked")
protected <T> Publisher<T> filter(Processor processor, final Class<T> filterClass) {
    Source src = Source.fromPublisher(processor)
            .filter(o -> filterClass.isAssignableFrom(o.getClass()));
    return (Publisher<T>) src.runWith(Sink.asPublisher(AsPublisher.WITH_FANOUT), mat);
}