io.pravega.client.stream.Stream Java Examples

The following examples show how to use io.pravega.client.stream.Stream. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PeriodicWatermarking.java    From pravega with Apache License 2.0 6 votes vote down vote up
@VisibleForTesting
public PeriodicWatermarking(StreamMetadataStore streamMetadataStore, BucketStore bucketStore, 
                            Function<Stream, WatermarkClient> watermarkClientSupplier, ScheduledExecutorService executor) {
    this.streamMetadataStore = streamMetadataStore;
    this.bucketStore = bucketStore;
    this.executor = executor;
    this.watermarkClientCache = CacheBuilder.newBuilder()
                                            .maximumSize(MAX_CACHE_SIZE)
                                            .expireAfterAccess(10, TimeUnit.MINUTES)
                                            .removalListener((RemovalListener<Stream, WatermarkClient>) notification -> {
                                                notification.getValue().client.close();
                                            })
                                            .build(new CacheLoader<Stream, WatermarkClient>() {
                                                @ParametersAreNonnullByDefault
                                                @Override
                                                public WatermarkClient load(final Stream stream) {
                                                    return watermarkClientSupplier.apply(stream);
                                                }
                                            });
}
 
Example #2
Source File: BatchClientImplTest.java    From pravega with Apache License 2.0 6 votes vote down vote up
@Test(timeout = 5000)
public void testGetSegmentsWithStreamCut() throws Exception {

    PravegaNodeUri location = new PravegaNodeUri("localhost", 0);
    MockConnectionFactoryImpl connectionFactory = getMockConnectionFactory(location);
    MockController mockController = new MockController(location.getEndpoint(), location.getPort(), connectionFactory, false);
    Stream stream = createStream(SCOPE, STREAM, 3, mockController);
    BatchClientFactoryImpl client = new BatchClientFactoryImpl(mockController, connectionFactory);

    Iterator<SegmentRange> boundedSegments = client.getSegments(stream, getStreamCut(5L, 0, 1, 2), getStreamCut(15L, 0, 1, 2)).getIterator();
    assertTrue(boundedSegments.hasNext());
    assertEquals(0L, boundedSegments.next().asImpl().getSegment().getSegmentId());
    assertTrue(boundedSegments.hasNext());
    assertEquals(1L, boundedSegments.next().asImpl().getSegment().getSegmentId());
    assertTrue(boundedSegments.hasNext());
    assertEquals(2L, boundedSegments.next().asImpl().getSegment().getSegmentId());
    assertFalse(boundedSegments.hasNext());
}
 
Example #3
Source File: MockStreamManager.java    From pravega with Apache License 2.0 6 votes vote down vote up
@Override
public Iterator<Stream> listStreams(String scopeName) {
    AsyncIterator<Stream> asyncIterator = controller.listStreams(scopeName);
    return new Iterator<Stream>() {
        private Stream next;

        private void load() {
            next = asyncIterator.getNext().join();
        }
        
        @Override
        public boolean hasNext() {
            load();
            return next != null;
        }

        @Override
        public Stream next() {
            load();
            return next;
        }
    };
}
 
Example #4
Source File: EventStreamWriterImpl.java    From pravega with Apache License 2.0 6 votes vote down vote up
EventStreamWriterImpl(Stream stream, String writerId, Controller controller, SegmentOutputStreamFactory outputStreamFactory,
                      Serializer<Type> serializer, EventWriterConfig config, ExecutorService retransmitPool,
                      ScheduledExecutorService internalExecutor) {
    this.writerId = writerId;
    this.stream = Preconditions.checkNotNull(stream);
    this.controller = Preconditions.checkNotNull(controller);
    this.segmentSealedCallBack = this::handleLogSealed;
    this.tokenProvider = DelegationTokenProviderFactory.create(this.controller, this.stream.getScope(), this.stream.getStreamName());
    this.selector = new SegmentSelector(stream, controller, outputStreamFactory, config, tokenProvider);
    this.serializer = Preconditions.checkNotNull(serializer);
    this.config = config;
    this.retransmitPool = Preconditions.checkNotNull(retransmitPool);
    this.pinger = new Pinger(config.getTransactionTimeoutTime(), stream, controller, internalExecutor);
    List<PendingEvent> failedEvents = selector.refreshSegmentEventWriters(segmentSealedCallBack);
    assert failedEvents.isEmpty() : "There should not be any events to have failed";
    if (config.isAutomaticallyNoteTime()) {
        //See: https://github.com/pravega/pravega/issues/4218
        internalExecutor.scheduleWithFixedDelay(() -> noteTimeInternal(System.currentTimeMillis()), 5, 5, TimeUnit.SECONDS);
    }
}
 
Example #5
Source File: MockController.java    From pravega with Apache License 2.0 6 votes vote down vote up
@Override
@Synchronized
public AsyncIterator<Stream> listStreams(String scopeName) {
    Set<Stream> collect = createdScopes.get(scopeName)
                                       .stream()
                                       .filter(s -> !s.getStreamName().startsWith(NameUtils.INTERNAL_NAME_PREFIX))
                                       .collect(Collectors.toSet());
    return new AsyncIterator<Stream>() {
        Object lock = new Object();
        @GuardedBy("lock")
        Iterator<Stream> iterator = collect.iterator();
        @Override
        public CompletableFuture<Stream> getNext() {
            Stream next;
            synchronized (lock) {
                if (!iterator.hasNext()) {
                    next = null;
                } else {
                    next = iterator.next();
                }
            }

            return CompletableFuture.completedFuture(next);
        }
    };
}
 
Example #6
Source File: ReaderGroupStateTest.java    From pravega with Apache License 2.0 6 votes vote down vote up
@Test
public void getStreamCutsForCompletedCheckpointMultipleScope() {

    // Begin checkpoint.
    Map<Segment, Long> offsetMap = getOffsetMap(asList("scope1", "scope2"), asList("s1", "s2"), 0L);
    CheckpointState chkPointState = readerState.getCheckpointState();
    chkPointState.beginNewCheckpoint("chk1", ImmutableSet.of("r1"), offsetMap);
    chkPointState.readerCheckpointed("chk1", "r1", getOffsetMap(asList("scope1", "scope2"), asList("s1", "s2"), 99L));

    Map<Stream, StreamCut> expectedStreamCuts = getStreamCutMap(asList("scope1", "scope2"), asList("s1", "s2"), 99L);

    // invoke and verify.
    Optional<Map<Stream, StreamCut>> streamCuts = readerState.getStreamCutsForCompletedCheckpoint("chk1");
    assertTrue(streamCuts.isPresent());
    assertEquals(expectedStreamCuts, streamCuts.get());
}
 
Example #7
Source File: StreamCutsCli.java    From pravega-samples with Apache License 2.0 6 votes vote down vote up
private void doBoundedSummingOfStreamValues(String prefix,int exampleNumDays, StreamCutsExample example) throws IOException {
    output("For which day number do you want to sum up values?.%n");
    int dayNumber = askForIntInput(prefix, 0, exampleNumDays);

    Map<Stream, List<StreamCut>> streamDayStreamCuts = new LinkedHashMap<>();
    for (String streamName: example.getMyStreamNames()) {
        final SimpleEntry<Integer, Integer> eventIndexesForDay = example.getStreamEventIndexesForDay(streamName, dayNumber);

        // Due to randomization, there could be streams with no events for a given day.
        if (eventIndexesForDay == null){
            continue;
        }
        output("[Stream %s] Indexes to bound day%s events: %s%n", streamName, dayNumber, eventIndexesForDay.toString());

        // Get the StreamCuts that define the event boundaries for the given day in this stream.
        final List<StreamCut> myStreamCuts = example.createStreamCutsByIndexFor(streamName, eventIndexesForDay.getKey(),
                eventIndexesForDay.getValue());
        streamDayStreamCuts.put(Stream.of(scope, streamName), myStreamCuts);
    }

    // Next, we demonstrate the capabilities of StreamCuts by enabling readers to perform bounded reads.
    output("Now, look! We can sum up values from bounded slices of multiple Streams:%n%n");
    output("Result from summing all the values belonging to day%s is: %s!%n", dayNumber,
            example.sumBoundedStreams(streamDayStreamCuts));
}
 
Example #8
Source File: ControllerImpl.java    From pravega with Apache License 2.0 6 votes vote down vote up
@Override
public CompletableFuture<TxnSegments> createTransaction(final Stream stream, final long lease) {
    Exceptions.checkNotClosed(closed.get(), this);
    Preconditions.checkNotNull(stream, "stream");
    long traceId = LoggerHelpers.traceEnter(log, "createTransaction", stream, lease);

    final CompletableFuture<CreateTxnResponse> result = this.retryConfig.runAsync(() -> {
        RPCAsyncCallback<CreateTxnResponse> callback = new RPCAsyncCallback<>(traceId, "createTransaction", stream, lease);
        client.withDeadlineAfter(timeoutMillis, TimeUnit.MILLISECONDS).createTransaction(
                CreateTxnRequest.newBuilder()
                        .setStreamInfo(ModelHelper.createStreamInfo(stream.getScope(), stream.getStreamName()))
                        .setLease(lease)
                        .build(),
                callback);
        return callback.getFuture();
    }, this.executor);
    return result.thenApply(this::convert)
            .whenComplete((x, e) -> {
                if (e != null) {
                    log.warn("createTransaction on stream {} failed: ", stream.getStreamName(), e);
                }
                LoggerHelpers.traceLeave(log, "createTransaction", traceId);
            });
}
 
Example #9
Source File: EventProcessorGroupImpl.java    From pravega with Apache License 2.0 6 votes vote down vote up
void initialize() throws CheckpointStoreException {

        try {
            checkpointStore.addReaderGroup(actorSystem.getProcess(), eventProcessorConfig.getConfig().getReaderGroupName());
        } catch (CheckpointStoreException e) {
            if (!e.getType().equals(CheckpointStoreException.Type.NodeExists)) {
                throw e;
            } else {
                log.warn("reader group {} exists", eventProcessorConfig.getConfig().getReaderGroupName());
            }
        }

        // Continue creating reader group if adding reader group to checkpoint store succeeds.
        readerGroup = createIfNotExists(
                actorSystem.readerGroupManager,
                eventProcessorConfig.getConfig().getReaderGroupName(),
                ReaderGroupConfig.builder().disableAutomaticCheckpoints()
                                 .stream(Stream.of(actorSystem.getScope(), eventProcessorConfig.getConfig().getStreamName())).build());

        createEventProcessors(eventProcessorConfig.getConfig().getEventProcessorCount() - eventProcessorMap.values().size());
    }
 
Example #10
Source File: FlinkPravegaTableFactoryTest.java    From flink-connectors with Apache License 2.0 6 votes vote down vote up
@Test
public void testValidWriterModeExactlyOnce() {
    Pravega pravega = new Pravega();
    Stream stream = Stream.of(SCOPE, STREAM);

    pravega.tableSinkWriterBuilder()
            .withRoutingKeyField("name").withWriterMode(PravegaWriterMode.EXACTLY_ONCE)
            .forStream(stream)
            .withPravegaConfig(PRAVEGA_CONFIG);

    final TestTableDescriptor testDesc = new TestTableDescriptor(pravega)
            .withFormat(JSON)
            .withSchema(SCHEMA)
            .inAppendMode();

    final Map<String, String> propertiesMap = testDesc.toProperties();
    final TableSink<?> sink = TableFactoryService.find(StreamTableSinkFactory.class, propertiesMap)
            .createStreamTableSink(propertiesMap);
    assertNotNull(sink);
}
 
Example #11
Source File: FlinkPravegaTableFactoryTest.java    From flink-connectors with Apache License 2.0 6 votes vote down vote up
@Test (expected = ValidationException.class)
public void testMissingFormatDefinition() {
    Pravega pravega = new Pravega();
    Stream stream = Stream.of(SCOPE, STREAM);

    pravega.tableSinkWriterBuilder()
            .withRoutingKeyField("name")
            .forStream(stream)
            .withPravegaConfig(PRAVEGA_CONFIG);

    final TestTableDescriptor testDesc = new TestTableDescriptor(pravega)
            .withSchema(SCHEMA)
            .inAppendMode();

    final Map<String, String> propertiesMap = testDesc.toProperties();
    TableFactoryService.find(StreamTableSinkFactory.class, propertiesMap)
            .createStreamTableSink(propertiesMap);
    fail("table factory validation failed");
}
 
Example #12
Source File: StreamCutsExample.java    From pravega-samples with Apache License 2.0 6 votes vote down vote up
/**
 * A good use-case for {@link StreamCut}s is to allow efficient batch processing of data events within specific
 * boundaries (e.g., perform a mean on the temperature values in 1986). Instead of ingesting all the data and force
 * the reader to discard irrelevant events, {@link StreamCut}s help readers to only read the events that are
 * important for a particular task. In this sense, this method enables the Pravega {@link BatchClientFactory} to read from
 * various {@link Stream}s within the specific ranges passed as input, and the sum up all the values contained in
 * read events.
 *
 * @param streamCuts Map that defines the slices to read of a set of {@link Stream}s.
 * @return Sum of all the values of time series data belonging to {@link Stream}s and bounded by {@link StreamCut}s.
 */
public int sumBoundedStreams(Map<Stream, List<StreamCut>> streamCuts) {
    int totalSumValuesInDay = 0;
    try (BatchClientFactory batchClient = BatchClientFactory.withScope(scope, ClientConfig.builder().controllerURI(controllerURI).build())) {
        for (Stream myStream: streamCuts.keySet()) {

            // Get the cuts for this stream that will bound the number of events to read.
            final StreamCut startStreamCut = streamCuts.get(myStream).get(0);
            final StreamCut endStreamCut = streamCuts.get(myStream).get(1);

            // Then, we get the segment ranges according to the StreamCuts.
            StreamSegmentsIterator segments = batchClient.getSegments(myStream, startStreamCut, endStreamCut);
            List<SegmentRange> ranges = Lists.newArrayList(segments.getIterator());

            // We basically sum up all the values of events within the ranges.
            for (SegmentRange range: ranges) {
                List<String> eventData = Lists.newArrayList(batchClient.readSegment(range, new JavaSerializer<>()));
                totalSumValuesInDay += eventData.stream().map(s -> s.split(eventSeparator)[2]).mapToInt(Integer::valueOf).sum();
            }
        }
    }
    return totalSumValuesInDay;
}
 
Example #13
Source File: FlinkPravegaTableFactoryTest.java    From flink-connectors with Apache License 2.0 6 votes vote down vote up
@Test (expected = ValidationException.class)
public void testMissingSchemaDefinition() {
    Pravega pravega = new Pravega();
    Stream stream = Stream.of(SCOPE, STREAM);

    pravega.tableSinkWriterBuilder()
            .withRoutingKeyField("name")
            .forStream(stream)
            .withPravegaConfig(PRAVEGA_CONFIG);

    final TestTableDescriptor testDesc = new TestTableDescriptor(pravega)
            .withFormat(JSON)
            .inAppendMode();

    final Map<String, String> propertiesMap = testDesc.toProperties();
    TableFactoryService.find(StreamTableSinkFactory.class, propertiesMap)
            .createStreamTableSink(propertiesMap);
    fail("missing schema validation failed");
}
 
Example #14
Source File: EventStreamReaderImpl.java    From pravega with Apache License 2.0 6 votes vote down vote up
EventStreamReaderImpl(SegmentInputStreamFactory inputStreamFactory,
        SegmentMetadataClientFactory metadataClientFactory, Serializer<Type> deserializer,
        ReaderGroupStateManager groupState, Orderer orderer, Supplier<Long> clock, ReaderConfig config, 
        ImmutableMap<Stream, WatermarkReaderImpl> waterMarkReaders, Controller controller) {
    this.deserializer = deserializer;
    this.inputStreamFactory = inputStreamFactory;
    this.metadataClientFactory = metadataClientFactory;
    this.groupState = groupState;
    this.orderer = orderer;
    this.clock = clock;
    this.config = config;
    this.waterMarkReaders = waterMarkReaders;
    this.closed = false;
    this.controller = controller;
    this.segmentsWithData = new Semaphore(0);
}
 
Example #15
Source File: LocalController.java    From pravega with Apache License 2.0 6 votes vote down vote up
@Override
public CompletableFuture<Boolean> startScale(final Stream stream,
                                              final List<Long> sealedSegments,
                                              final Map<Double, Double> newKeyRanges) {
    return startScaleInternal(stream, sealedSegments, newKeyRanges)
            .thenApply(x -> {
                switch (x.getStatus()) {
                case FAILURE:
                    throw new ControllerFailureException("Failed to scale stream: " + stream);
                case PRECONDITION_FAILED:
                    return false;
                case STARTED:
                    return true;
                default:
                    throw new ControllerFailureException("Unknown return status scaling stream "
                            + stream + " " + x.getStatus());
                }
            });
}
 
Example #16
Source File: BatchClientFactoryImpl.java    From pravega with Apache License 2.0 6 votes vote down vote up
private StreamSegmentsIterator getStreamSegmentInfo(final Stream stream, final StreamCut startStreamCut, final StreamCut endStreamCut) {
    log.debug("Start stream cut: {}, End stream cut: {}", startStreamCut, endStreamCut);
    StreamSegmentsInfoImpl.validateStreamCuts(startStreamCut, endStreamCut);

    StreamSegmentSuccessors segments = getAndHandleExceptions(controller.getSegments(startStreamCut, endStreamCut),
            RuntimeException::new);
    final SortedSet<Segment> segmentSet = new TreeSet<>(segments.getSegments());
    final DelegationTokenProvider tokenProvider = DelegationTokenProviderFactory
            .create(segments.getDelegationToken(), controller, stream.getScope(), stream.getStreamName());
    log.debug("List of Segments between the start and end stream cuts : {}", segmentSet);

    Iterator<SegmentRange> iterator = Iterators.transform(segmentSet.iterator(),
            s -> getSegmentRange(s, startStreamCut, endStreamCut, tokenProvider));
    return StreamSegmentsInfoImpl.builder().segmentRangeIterator(iterator)
                                 .startStreamCut(startStreamCut)
                                 .endStreamCut(endStreamCut).build();
}
 
Example #17
Source File: BucketServiceTest.java    From pravega with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 10000)
public void testRetentionService() {
    Map<Integer, BucketService> bucketServices = retentionService.getBucketServices();
                                      
    assertNotNull(bucketServices);
    assertEquals(3, bucketServices.size());
    assertTrue(retentionService.takeBucketOwnership(0, hostId, executor).join());
    assertTrue(retentionService.takeBucketOwnership(1, hostId, executor).join());
    assertTrue(retentionService.takeBucketOwnership(2, hostId, executor).join());
    AssertExtensions.assertThrows("", () -> retentionService.takeBucketOwnership(3, hostId, executor).join(),
            e -> e instanceof IllegalArgumentException);
    retentionService.tryTakeOwnership(0).join();

    String scope = "scope";
    String streamName = "stream";
    Stream stream = new StreamImpl(scope, streamName);
    
    bucketStore.addStreamToBucketStore(BucketStore.ServiceType.RetentionService, scope, streamName, executor).join();

    // verify that at least one of the buckets got the notification
    int bucketId = BucketStore.getBucket(scope, streamName, 3);
    Set<String> streams = bucketStore.getStreamsForBucket(BucketStore.ServiceType.RetentionService, bucketId, executor).join();
    
    BucketService bucketService = bucketServices.get(bucketId);
    AtomicBoolean added = new AtomicBoolean(false);
    RetryHelper.loopWithDelay(() -> !added.get(), () -> CompletableFuture.completedFuture(null)
            .thenAccept(x -> added.set(bucketService.getKnownStreams().size() > 0)), Duration.ofSeconds(1).toMillis(), executor).join();
    assertTrue(bucketService.getKnownStreams().contains(stream));

    bucketStore.removeStreamFromBucketStore(BucketStore.ServiceType.RetentionService, scope, streamName, executor).join();
    AtomicBoolean removed = new AtomicBoolean(false);
    RetryHelper.loopWithDelay(() -> !removed.get(), () -> CompletableFuture.completedFuture(null)
            .thenAccept(x -> removed.set(bucketService.getKnownStreams().size() == 0)), Duration.ofSeconds(1).toMillis(), executor).join();
    assertEquals(0, bucketService.getKnownStreams().size());
}
 
Example #18
Source File: PeriodicWatermarking.java    From pravega with Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
WatermarkClient(Stream stream, SynchronizerClientFactory clientFactory) {
    this.client = clientFactory.createRevisionedStreamClient(
            NameUtils.getMarkStreamForStream(stream.getStreamName()), 
            new WatermarkSerializer(), SynchronizerConfig.builder().build());
    this.inactiveWriters = new ConcurrentHashMap<>();
}
 
Example #19
Source File: BatchClientTest.java    From pravega with Apache License 2.0 5 votes vote down vote up
protected void listAndReadSegmentsUsingBatchClient(String scopeName, String streamName, ClientConfig config)
        throws InterruptedException, ExecutionException {
    @Cleanup
    EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scopeName, config);
    createTestStreamWithEvents(clientFactory);
    log.info("Done creating test event stream with test events");

    @Cleanup
    BatchClientFactory batchClient = BatchClientFactory.withScope(scopeName, config);

    // List out all the segments in the stream.
    ArrayList<SegmentRange> segments = Lists.newArrayList(
            batchClient.getSegments(Stream.of(scopeName, streamName), null, null).getIterator());
    assertEquals("Expected number of segments", 6, segments.size());

    // Batch read all events from stream.
    List<String> batchEventList = new ArrayList<>();
    segments.forEach(segInfo -> {
        @Cleanup
        SegmentIterator<String> segmentIterator = batchClient.readSegment(segInfo, serializer);
        batchEventList.addAll(Lists.newArrayList(segmentIterator));
    });
    assertEquals("Event count", 9, batchEventList.size());

    // Read from a given offset.
    Segment seg0 = new Segment(scopeName, streamName, 0);
    SegmentRange seg0Info = SegmentRangeImpl.builder().segment(seg0).startOffset(60).endOffset(90).build();
    @Cleanup
    SegmentIterator<String> seg0Iterator = batchClient.readSegment(seg0Info, serializer);
    ArrayList<String> dataAtOffset = Lists.newArrayList(seg0Iterator);
    assertEquals(1, dataAtOffset.size());
    assertEquals(DATA_OF_SIZE_30, dataAtOffset.get(0));
}
 
Example #20
Source File: CheckpointImpl.java    From pravega with Apache License 2.0 5 votes vote down vote up
CheckpointImpl(String name, Map<Segment, Long> segmentPositions) {
    this.name = name;
    Map<Stream, ImmutableMap.Builder<Segment, Long>> streamPositions = new HashMap<>();
    for (Entry<Segment, Long> position : segmentPositions.entrySet()) {
        streamPositions.computeIfAbsent(position.getKey().getStream(),
                                        k -> new ImmutableMap.Builder<Segment, Long>())
                       .put(position);
    }
    ImmutableMap.Builder<Stream, StreamCut> positionBuilder = ImmutableMap.builder();
    for (Entry<Stream, Builder<Segment, Long>> streamPosition : streamPositions.entrySet()) {
        positionBuilder.put(streamPosition.getKey(),
                            new StreamCutImpl(streamPosition.getKey(), streamPosition.getValue().build()));
    }
    this.positions = positionBuilder.build();
}
 
Example #21
Source File: EventStreamReaderImpl.java    From pravega with Apache License 2.0 5 votes vote down vote up
@Override
public TimeWindow getCurrentTimeWindow(Stream stream) {
    if (getConfig().isDisableTimeWindows()) {
        return new TimeWindow(null, null);
    }
    WatermarkReaderImpl tracker = waterMarkReaders.get(stream);
    if (tracker == null) {
        throw new IllegalArgumentException("Reader is not subscribed to stream: " + stream);
    } else {
        return tracker.getTimeWindow();
    }
}
 
Example #22
Source File: LocalController.java    From pravega with Apache License 2.0 5 votes vote down vote up
@Override
public CompletableFuture<Void> commitTransaction(Stream stream, final String writerId, final Long timestamp, UUID txnId) {
    long time = Optional.ofNullable(timestamp).orElse(Long.MIN_VALUE);
    return controller
            .commitTransaction(stream.getScope(), stream.getStreamName(), txnId, writerId, time)
            .thenApply(x -> null);
}
 
Example #23
Source File: ReaderGroupState.java    From pravega with Apache License 2.0 5 votes vote down vote up
@Synchronized
Optional<Map<Stream, StreamCut>> getStreamCutsForCompletedCheckpoint(final String checkpointId) {
    final Optional<Map<Segment, Long>> positionMap = Optional.ofNullable(checkpointState.getPositionsForCompletedCheckpoint(checkpointId));

    return positionMap.map(map -> map.entrySet().stream()
                                     .collect(groupingBy(o -> o.getKey().getStream(),
                                                         collectingAndThen(toMap(Entry::getKey, Entry::getValue),
                                                                           x -> new StreamCutImpl(x.keySet().stream().findAny().get().getStream(), x)))));
}
 
Example #24
Source File: StreamCutImpl.java    From pravega with Apache License 2.0 5 votes vote down vote up
private void read00(RevisionDataInput revisionDataInput, StreamCutBuilder builder) throws IOException {
    Stream stream = Stream.of(revisionDataInput.readUTF());
    builder.stream(stream);
    Map<Segment, Long> map = revisionDataInput.readMap(in -> new Segment(stream.getScope(),
                                                                         stream.getStreamName(), in.readCompactLong()),
                                                       in -> in.readCompactLong());
    builder.positions(map);
}
 
Example #25
Source File: ReaderGroupManagerImpl.java    From pravega with Apache License 2.0 5 votes vote down vote up
private Stream createStreamHelper(String streamName, StreamConfiguration config) {
    getAndHandleExceptions(controller.createStream(scope, streamName, StreamConfiguration.builder()
                                                                      .scalingPolicy(config.getScalingPolicy())
                                                                      .build()),
                           RuntimeException::new);
    return new StreamImpl(scope, streamName);
}
 
Example #26
Source File: ControllerGrpcListStreamsTest.java    From pravega with Apache License 2.0 5 votes vote down vote up
private Set<Stream> listStreams(ClientConfig clientConfig, String scopeName) {
    @Cleanup
    StreamManager streamManager = StreamManager.create(clientConfig);
    assertNotNull(streamManager);

    Iterator<Stream> streamsIter = streamManager.listStreams(scopeName);
    Set<Stream> streams = new HashSet<>();
    streamsIter.forEachRemaining(s -> streams.add(s));
    return streams;
}
 
Example #27
Source File: StreamCutImpl.java    From pravega with Apache License 2.0 5 votes vote down vote up
private void read10(RevisionDataInput revisionDataInput, StreamCutBuilder builder) throws IOException {
    Stream stream = Stream.of(revisionDataInput.readUTF());
    builder.stream(stream);
    Map<Segment, Long> map = revisionDataInput.readMap(in -> new Segment(stream.getScope(),
                                                                         stream.getStreamName(), in.readCompactLong()),
                                                       RevisionDataInput::readCompactSignedLong);
    builder.positions(map);
}
 
Example #28
Source File: ControllerGrpcListStreamsTest.java    From pravega with Apache License 2.0 5 votes vote down vote up
@Test
public void testListStreamsReturnsAuthorizedStreamsOnly() {
    // Arrange
    Map<String, String> passwordInputFileEntries = new HashMap<>();
    passwordInputFileEntries.put("admin", "*,READ_UPDATE");
    passwordInputFileEntries.put("user", "scope1,READ;scope1/stream1,READ");

    @Cleanup
    ClusterWrapper cluster = new ClusterWrapper(true, "secret",
            600, this.preparePasswordInputFileEntries(passwordInputFileEntries), 4);

    cluster.initialize();
    String scopeName = "scope1";

    this.createStreams(ClientConfig.builder()
                    .controllerURI(URI.create(cluster.controllerUri()))
                    .credentials(new DefaultCredentials("1111_aaaa", "admin"))
                    .build(),
            scopeName,
            Arrays.asList("stream1", "stream2", "stream3"));

    // Act
    Set<Stream> streams = listStreams(ClientConfig.builder()
            .controllerURI(URI.create(cluster.controllerUri()))
            .credentials(new DefaultCredentials("1111_aaaa", "user"))
            .build(), scopeName);

    // Assert
    assertEquals(1, streams.size());
}
 
Example #29
Source File: Utils.java    From pravega-samples with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a Pravega stream with a given configuration.
 *
 * @param pravegaConfig the Pravega configuration.
 * @param streamName the stream name (qualified or unqualified).
 * @param streamConfig the stream configuration (scaling policy, retention policy).
 */
public static Stream createStream(PravegaConfig pravegaConfig, String streamName, StreamConfiguration streamConfig) {
    // resolve the qualified name of the stream
    Stream stream = pravegaConfig.resolve(streamName);

    try(StreamManager streamManager = StreamManager.create(pravegaConfig.getClientConfig())) {
        // create the requested scope (if necessary)
        streamManager.createScope(stream.getScope());

        // create the requested stream based on the given stream configuration
        streamManager.createStream(stream.getScope(), stream.getStreamName(), streamConfig);
    }

    return stream;
}
 
Example #30
Source File: MaxTravellersPerDestination.java    From pravega-samples with Apache License 2.0 5 votes vote down vote up
@Override
public void handleRequest() {

    TableSchema tableSchema = TripRecord.getTableSchema();

    FlinkPravegaJsonTableSource source = FlinkPravegaJsonTableSource.builder()
            .forStream(Stream.of(getScope(), getStream()).getScopedName())
            .withPravegaConfig(getPravegaConfig())
            .failOnMissingField(true)
            .withRowtimeAttribute("dropOffTime", new ExistingField("dropOffTime"), new BoundedOutOfOrderTimestamps(30000L))
            .withSchema(tableSchema)
            .withReaderGroupScope(getScope())
            .build();

    StreamExecutionEnvironment env = getStreamExecutionEnvironment();

    // create a TableEnvironment
    StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
    tEnv.registerTableSource("TaxiRide", source);

    String fields = "passengerCount, dropOffTime, destLocationZone";

    Table noOfTravelersPerDest = tEnv
            .scan("TaxiRide")
            .select(fields)
            .window(Tumble.over("1.hour").on("dropOffTime").as("w"))
            .groupBy("destLocationZone, w")
            .select("destLocationZone, w.start AS start, w.end AS end, count(passengerCount) AS cnt");

    tEnv.toAppendStream(noOfTravelersPerDest, Row.class).print();

    try {
        env.execute("Max-Travellers-Per-Destination");
    } catch (Exception e) {
        log.error("Application Failed", e);
    }
}