Java Code Examples for io.pravega.client.stream.Stream#of()

The following examples show how to use io.pravega.client.stream.Stream#of() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FlinkPravegaTableFactoryTest.java    From flink-connectors with Apache License 2.0 6 votes vote down vote up
/**
 * Scope should be supplied either through {@link PravegaConfig} or {@link Pravega.TableSourceReaderBuilder}.
 */
@Test (expected = IllegalStateException.class)
public void testMissingRGScopeFail() {

    Pravega pravega = new Pravega();
    Stream stream = Stream.of(SCOPE, STREAM);

    pravega.tableSourceReaderBuilder()
            .forStream(stream);

    final TestTableDescriptor testDesc = new TestTableDescriptor(pravega)
            .withFormat(JSON)
            .withSchema(SCHEMA)
            .inAppendMode();

    final Map<String, String> propertiesMap = testDesc.toProperties();

    FlinkPravegaTableFactoryBase tableFactoryBase = new FlinkPravegaStreamTableSourceFactory();
    tableFactoryBase.createFlinkPravegaTableSource(propertiesMap);
    fail("scope validation failed");
}
 
Example 2
Source File: FlinkPravegaTableFactoryTest.java    From flink-connectors with Apache License 2.0 6 votes vote down vote up
/**
 * Rowtime attribute should be of type TIMESTAMP.
 */
@Test (expected = ValidationException.class)
public void testWrongRowTimeAttributeType() {
    final Schema schema = new Schema()
            .field("name", DataTypes.STRING())
            .field("age", DataTypes.INT()).rowtime(new Rowtime()
                                                            .timestampsFromField("age")
                                                            .watermarksFromStrategy(
                                                                    new BoundedOutOfOrderTimestamps(30000L)));
    Pravega pravega = new Pravega();
    Stream stream = Stream.of(SCOPE, STREAM);
    pravega.tableSourceReaderBuilder()
            .forStream(stream)
            .withPravegaConfig(PRAVEGA_CONFIG);
    final TestTableDescriptor testDesc = new TestTableDescriptor(pravega)
            .withFormat(JSON)
            .withSchema(schema)
            .inAppendMode();
    final Map<String, String> propertiesMap = testDesc.toProperties();
    FlinkPravegaTableFactoryBase tableFactoryBase = new FlinkPravegaStreamTableSourceFactory();
    tableFactoryBase.createFlinkPravegaTableSource(propertiesMap);
    fail("Schema validation failed");
}
 
Example 3
Source File: FlinkPravegaTableFactoryTest.java    From flink-connectors with Apache License 2.0 6 votes vote down vote up
@Test
public void testValidWriterModeAtleastOnce() {
    Pravega pravega = new Pravega();
    Stream stream = Stream.of(SCOPE, STREAM);

    pravega.tableSinkWriterBuilder()
            .withRoutingKeyField("name").withWriterMode(PravegaWriterMode.ATLEAST_ONCE)
            .forStream(stream)
            .withPravegaConfig(PRAVEGA_CONFIG);

    final TestTableDescriptor testDesc = new TestTableDescriptor(pravega)
            .withFormat(JSON)
            .withSchema(SCHEMA)
            .inAppendMode();

    final Map<String, String> propertiesMap = testDesc.toProperties();
    final TableSink<?> sink = TableFactoryService.find(StreamTableSinkFactory.class, propertiesMap)
            .createStreamTableSink(propertiesMap);
    assertNotNull(sink);
}
 
Example 4
Source File: FlinkPravegaTableFactoryTest.java    From flink-connectors with Apache License 2.0 6 votes vote down vote up
@Test (expected = ValidationException.class)
public void testMissingFormatDefinition() {
    Pravega pravega = new Pravega();
    Stream stream = Stream.of(SCOPE, STREAM);

    pravega.tableSinkWriterBuilder()
            .withRoutingKeyField("name")
            .forStream(stream)
            .withPravegaConfig(PRAVEGA_CONFIG);

    final TestTableDescriptor testDesc = new TestTableDescriptor(pravega)
            .withSchema(SCHEMA)
            .inAppendMode();

    final Map<String, String> propertiesMap = testDesc.toProperties();
    TableFactoryService.find(StreamTableSinkFactory.class, propertiesMap)
            .createStreamTableSink(propertiesMap);
    fail("table factory validation failed");
}
 
Example 5
Source File: ReaderGroupImplTest.java    From pravega with Apache License 2.0 5 votes vote down vote up
private StreamCut getStreamCut(String streamName, long offset, int... segmentNumbers) {
    ImmutableMap.Builder<Segment, Long> builder = ImmutableMap.<Segment, Long>builder();
    Arrays.stream(segmentNumbers).forEach(seg -> {
        builder.put(new Segment(SCOPE, streamName, seg), offset);
    });

    return new StreamCutImpl(Stream.of(SCOPE, streamName), builder.build());
}
 
Example 6
Source File: AbstractEndToEndTest.java    From pravega with Apache License 2.0 5 votes vote down vote up
protected void scaleStream(final String streamName, final Map<Double, Double> keyRanges) throws Exception {
    Stream stream = Stream.of(SCOPE, streamName);
    Controller controller = controllerWrapper.getController();
    List<Long> currentSegments = controller.getCurrentSegments(SCOPE, streamName).join().getSegments()
                                           .stream().map(Segment::getSegmentId).collect(Collectors.toList());
    assertTrue(controller.scaleStream(stream, currentSegments, keyRanges, executorService()).getFuture().get());
}
 
Example 7
Source File: BatchClientImplTest.java    From pravega with Apache License 2.0 5 votes vote down vote up
private StreamCut getStreamCut(long offset, int... segments) {
    final Map<Segment, Long> positionMap = Arrays.stream(segments).boxed()
                                                 .collect(Collectors.toMap(s -> new Segment("scope", STREAM, s),
                                                         s -> offset));

    return new StreamCutImpl(Stream.of("scope", STREAM), positionMap);
}
 
Example 8
Source File: MockController.java    From pravega with Apache License 2.0 5 votes vote down vote up
@Override
public CompletableFuture<StreamSegmentsWithPredecessors> getSuccessors(Segment segment) {
    final Stream segmentStream = Stream.of(segment.getScopedStreamName());
    final CompletableFuture<StreamSegmentsWithPredecessors> result = new CompletableFuture<>();
    if (!createdStreams.containsKey(segmentStream)) {
        result.completeExceptionally(new RuntimeException("Stream is deleted"));
    } else {
        result.complete(new StreamSegmentsWithPredecessors(Collections.emptyMap(), ""));
    }
    return result;
}
 
Example 9
Source File: BatchClientTest.java    From pravega with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 50000)
@SuppressWarnings("deprecation")
public void testBatchClientWithStreamTruncation() throws InterruptedException, ExecutionException {
    StreamManager streamManager = StreamManager.create(clientConfig);

    @Cleanup
    EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, clientConfig);
    createTestStreamWithEvents(clientFactory);
    log.info("Done creating a test stream with test events");

    @Cleanup
    BatchClientFactory batchClient = BatchClientFactory.withScope(SCOPE, clientConfig);
    log.info("Done creating batch client factory");

    // 1. Create a StreamCut after 2 events(offset = 2 * 30 = 60).
    StreamCut streamCut60L = new StreamCutImpl(Stream.of(SCOPE, STREAM),
            ImmutableMap.of(new Segment(SCOPE, STREAM, 0), 60L));
    // 2. Truncate stream.
    assertTrue("truncate stream", controllerWrapper.getController().truncateStream(SCOPE, STREAM, streamCut60L).join());
    // 3a. Fetch Segments using StreamCut.UNBOUNDED>
    ArrayList<SegmentRange> segmentsPostTruncation1 = Lists.newArrayList(batchClient.getSegments(Stream.of(SCOPE, STREAM), StreamCut.UNBOUNDED, StreamCut.UNBOUNDED).getIterator());
    // 3b. Fetch Segments using getStreamInfo() api.
    StreamInfo streamInfo = streamManager.getStreamInfo(SCOPE, STREAM);
    ArrayList<SegmentRange> segmentsPostTruncation2 = Lists.newArrayList(batchClient.getSegments(Stream.of(SCOPE, STREAM), streamInfo.getHeadStreamCut(), streamInfo.getTailStreamCut()).getIterator());
    // Validate results.
    validateSegmentCountAndEventCount(batchClient, segmentsPostTruncation1);
    validateSegmentCountAndEventCount(batchClient, segmentsPostTruncation2);
}
 
Example 10
Source File: FlinkPravegaOutputFormatITCase.java    From flink-connectors with Apache License 2.0 5 votes vote down vote up
/**
 * Verifies the following using DataSet API:
 *  - writes data into Pravega using {@link FlinkPravegaOutputFormat}.
 *  - reads data from Pravega using {@link FlinkPravegaInputFormat}.
 */
@Test
public void testPravegaOutputFormat() throws Exception {

    Stream stream = Stream.of(SETUP_UTILS.getScope(), "outputFormatDataSet");
    SETUP_UTILS.createTestStream(stream.getStreamName(), 1);

    PravegaConfig pravegaConfig = SETUP_UTILS.getPravegaConfig();

    FlinkPravegaOutputFormat<Integer> flinkPravegaOutputFormat = FlinkPravegaOutputFormat.<Integer>builder()
            .withEventRouter(router -> "fixedKey")
            .withSerializationSchema(new IntegerSerializationSchema())
            .withPravegaConfig(pravegaConfig)
            .forStream(stream)
            .build();

    ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

    Collection<Integer> inputData = Arrays.asList(10, 20);
    env.fromCollection(inputData)
            .output(flinkPravegaOutputFormat);
    env.execute("write");

    DataSet<Integer> integers = env.createInput(
            FlinkPravegaInputFormat.<Integer>builder()
                    .forStream(stream)
                    .withPravegaConfig(SETUP_UTILS.getPravegaConfig())
                    .withDeserializationSchema(new IntegerDeserializationSchema())
                    .build(),
            BasicTypeInfo.INT_TYPE_INFO
    );

    // verify that all events were read
    Assert.assertEquals(2, integers.collect().size());
}
 
Example 11
Source File: AbstractHandler.java    From pravega-samples with Apache License 2.0 5 votes vote down vote up
public void createStream() {
    Stream taxiStream = Stream.of(getScope(), getStream());
    ClientConfig clientConfig = ClientConfig.builder().controllerURI(URI.create(getControllerUri())).build();

    StreamConfiguration streamConfiguration = StreamConfiguration.builder()
            .scalingPolicy(ScalingPolicy.fixed(DEFAULT_NO_SEGMENTS))
            .build();

    Helper helper = new Helper();
    helper.createStream(taxiStream, clientConfig, streamConfiguration);
}
 
Example 12
Source File: FlinkPravegaTableSourceTest.java    From flink-connectors with Apache License 2.0 4 votes vote down vote up
@Test
@SuppressWarnings("unchecked")
public void testTableSourceDescriptorWithWatermark() {
    final String cityName = "fruitName";
    final String total = "count";
    final String eventTime = "eventTime";
    final String controllerUri = "tcp://localhost:9090";
    final String streamName = "test";
    final String scopeName = "test";

    Stream stream = Stream.of(scopeName, streamName);
    PravegaConfig pravegaConfig = PravegaConfig.fromDefaults()
            .withControllerURI(URI.create(controllerUri))
            .withDefaultScope(scopeName);

    // construct table source using descriptors and table source factory
    Pravega pravega = new Pravega();
    pravega.tableSourceReaderBuilder()
            .withTimestampAssigner(new MyAssigner())
            .withReaderGroupScope(stream.getScope())
            .forStream(stream)
            .withPravegaConfig(pravegaConfig);

    final TableSchema tableSchema = TableSchema.builder()
            .field(cityName, DataTypes.STRING())
            .field(total, DataTypes.INT())
            .field(eventTime, DataTypes.TIMESTAMP(3))
            .build();

    final TestTableDescriptor testDesc = new TestTableDescriptor(pravega)
            .withFormat(new Json().failOnMissingField(false))
            .withSchema(
                    new Schema()
                            .field(cityName, DataTypes.STRING())
                            .field(total, DataTypes.INT())
                            .field(eventTime, DataTypes.TIMESTAMP(3))
                            .rowtime(new Rowtime()
                                    .timestampsFromSource()
                                    .watermarksFromSource()
                            ))
            .inAppendMode();

    final Map<String, String> propertiesMap = testDesc.toProperties();
    final TableSource<?> actualSource = TableFactoryService.find(StreamTableSourceFactory.class, propertiesMap)
            .createStreamTableSource(propertiesMap);
    assertNotNull(actualSource);
    TableSourceValidation.validateTableSource(actualSource, tableSchema);
}
 
Example 13
Source File: SerializationTest.java    From pravega with Apache License 2.0 4 votes vote down vote up
@Test
public void testStream() {
    Stream stream = Stream.of("foo/bar");
    assertEquals("foo/bar", stream.getScopedName());  
}
 
Example 14
Source File: FlinkPravegaTableSourceTest.java    From flink-connectors with Apache License 2.0 4 votes vote down vote up
@Test
@SuppressWarnings("unchecked")
public void testTableSourceDescriptor() {
    final String cityName = "fruitName";
    final String total = "count";
    final String eventTime = "eventTime";
    final String procTime = "procTime";
    final String controllerUri = "tcp://localhost:9090";
    final long delay = 3000L;
    final String streamName = "test";
    final String scopeName = "test";

    final TableSchema tableSchema = TableSchema.builder()
            .field(cityName, DataTypes.STRING())
            .field(total, DataTypes.BIGINT())
            .field(eventTime, DataTypes.TIMESTAMP(3))
            .field(procTime, DataTypes.TIMESTAMP(3))
            .build();

    Stream stream = Stream.of(scopeName, streamName);
    PravegaConfig pravegaConfig = PravegaConfig.fromDefaults()
            .withControllerURI(URI.create(controllerUri))
            .withDefaultScope(scopeName);

    // construct table source using descriptors and table source factory
    Pravega pravega = new Pravega();
    pravega.tableSourceReaderBuilder()
            .withReaderGroupScope(stream.getScope())
            .forStream(stream)
            .withPravegaConfig(pravegaConfig);

    final TestTableDescriptor testDesc = new TestTableDescriptor(pravega)
            .withFormat(new Json().failOnMissingField(false))
            .withSchema(
                    new Schema()
                            .field(cityName, DataTypes.STRING())
                            .field(total, DataTypes.BIGINT())
                            .field(eventTime, DataTypes.TIMESTAMP(3))
                                .rowtime(new Rowtime()
                                            .timestampsFromField(eventTime)
                                            .watermarksFromStrategy(new BoundedOutOfOrderTimestamps(delay))
                                        )
                            .field(procTime, DataTypes.TIMESTAMP(3)).proctime())
            .inAppendMode();

    final Map<String, String> propertiesMap = testDesc.toProperties();
    final TableSource<?> actualSource = TableFactoryService.find(StreamTableSourceFactory.class, propertiesMap)
            .createStreamTableSource(propertiesMap);
    assertNotNull(actualSource);
    TableSourceValidation.validateTableSource(actualSource, tableSchema);
}
 
Example 15
Source File: FlinkTableITCase.java    From flink-connectors with Apache License 2.0 4 votes vote down vote up
@Test
public void testStreamTableSinkUsingDescriptor() throws Exception {

    // create a Pravega stream for test purposes
    Stream stream = Stream.of(setupUtils.getScope(), "testStreamTableSinkUsingDescriptor");
    this.setupUtils.createTestStream(stream.getStreamName(), 1);

    // create a Flink Table environment
    StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment().setParallelism(1);
    StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env,
            EnvironmentSettings.newInstance()
                    // watermark is only supported in blink planner
                    .useBlinkPlanner()
                    .inStreamingMode()
                    .build());

    Table table = tableEnv.fromDataStream(env.fromCollection(SAMPLES));

    Pravega pravega = new Pravega();
    pravega.tableSinkWriterBuilder()
            .withRoutingKeyField("category")
            .forStream(stream)
            .withPravegaConfig(setupUtils.getPravegaConfig());

    ConnectTableDescriptor desc = tableEnv.connect(pravega)
            .withFormat(new Json().failOnMissingField(true))
            .withSchema(new Schema().
                    field("category", DataTypes.STRING())
                    .field("value", DataTypes.INT()))
            .inAppendMode();
    desc.createTemporaryTable("test");

    final Map<String, String> propertiesMap = desc.toProperties();
    final TableSink<?> sink = TableFactoryService.find(StreamTableSinkFactory.class, propertiesMap)
            .createStreamTableSink(propertiesMap);

    String tablePath = tableEnv.getCurrentDatabase() + "." + "PravegaSink";

    ConnectorCatalogTable<?, ?> connectorCatalogTable = ConnectorCatalogTable.sink(sink, false);

    tableEnv.getCatalog(tableEnv.getCurrentCatalog()).get().createTable(
            ObjectPath.fromString(tablePath),
            connectorCatalogTable, false);

    table.insertInto("PravegaSink");
    env.execute();
}
 
Example 16
Source File: ReaderCheckpointHookTest.java    From flink-connectors with Apache License 2.0 4 votes vote down vote up
private StreamCut getStreamCut(String streamName, long offset) {
    ImmutableMap<Segment, Long> positions = ImmutableMap.<Segment, Long>builder().put(new Segment(SCOPE,
            streamName, 0), offset).build();
    return new StreamCutImpl(Stream.of(SCOPE, streamName), positions);
}
 
Example 17
Source File: FlinkTableITCase.java    From flink-connectors with Apache License 2.0 4 votes vote down vote up
@Test
public void testStreamTableSinkUsingDescriptorForAvro() throws Exception {

    // create a Pravega stream for test purposes
    Stream stream = Stream.of(setupUtils.getScope(), "testStreamTableSinkUsingDescriptorForAvro");
    this.setupUtils.createTestStream(stream.getStreamName(), 1);

    // create a Flink Table environment
    StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment().setParallelism(1);
    StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env,
            EnvironmentSettings.newInstance()
                    // watermark is only supported in blink planner
                    .useBlinkPlanner()
                    .inStreamingMode()
                    .build());

    Table table = tableEnv.fromDataStream(env.fromCollection(SAMPLES));

    Pravega pravega = new Pravega();
    pravega.tableSinkWriterBuilder()
            .withRoutingKeyField("category")
            .forStream(stream)
            .withPravegaConfig(setupUtils.getPravegaConfig());

    Avro avro = new Avro();
    String avroSchema =  "{" +
            "  \"type\": \"record\"," +
            "  \"name\": \"test\"," +
            "  \"fields\" : [" +
            "    {\"name\": \"category\", \"type\": \"string\"}," +
            "    {\"name\": \"value\", \"type\": \"int\"}" +
            "  ]" +
            "}";
    avro.avroSchema(avroSchema);

    ConnectTableDescriptor desc = tableEnv.connect(pravega)
            .withFormat(avro)
            .withSchema(new Schema().field("category", DataTypes.STRING()).
                    field("value", DataTypes.INT()))
            .inAppendMode();
    desc.createTemporaryTable("test");

    final Map<String, String> propertiesMap = desc.toProperties();
    final TableSink<?> sink = TableFactoryService.find(StreamTableSinkFactory.class, propertiesMap)
            .createStreamTableSink(propertiesMap);

    String tablePath = tableEnv.getCurrentDatabase() + "." + "PravegaSink";

    ConnectorCatalogTable<?, ?> connectorCatalogTable = ConnectorCatalogTable.sink(sink, false);

    tableEnv.getCatalog(tableEnv.getCurrentCatalog()).get().createTable(
            ObjectPath.fromString(tablePath),
            connectorCatalogTable, false);

    table.insertInto("PravegaSink");
    env.execute();
}
 
Example 18
Source File: BatchClientSimpleTest.java    From pravega with Apache License 2.0 4 votes vote down vote up
/**
 * This test verifies the basic functionality of {@link BatchClientFactory}, including stream metadata checks, segment
 * counts, parallel segment reads and reads with offsets using stream cuts.
 */
@Test
@SuppressWarnings("deprecation")
public void batchClientSimpleTest() {
    final int totalEvents = RG_PARALLELISM * 100;
    final int offsetEvents = RG_PARALLELISM * 20;
    final int batchIterations = 4;
    final Stream stream = Stream.of(SCOPE, STREAM);
    final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);

    @Cleanup
    ConnectionFactory connectionFactory = new ConnectionFactoryImpl(clientConfig);
    ControllerImpl controller = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).build(),
                                                                        connectionFactory.getInternalExecutor());
    @Cleanup
    ClientFactoryImpl clientFactory = new ClientFactoryImpl(SCOPE, controller, connectionFactory);
    @Cleanup
    BatchClientFactory batchClient = BatchClientFactory.withScope(SCOPE, clientConfig);
    log.info("Invoking batchClientSimpleTest test with Controller URI: {}", controllerURI);
    @Cleanup
    ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, clientConfig);
    groupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().disableAutomaticCheckpoints()
                                                                  .stream(SCOPE + "/" + STREAM).build());
    ReaderGroup readerGroup = groupManager.getReaderGroup(READER_GROUP);

    log.info("Writing events to stream");
    // Write events to the Stream.
    writeEvents(clientFactory, STREAM, totalEvents);

    // Instantiate readers to consume from Stream up to truncatedEvents.
    List<CompletableFuture<Integer>> futures = readEventFutures(clientFactory, READER_GROUP, RG_PARALLELISM, offsetEvents);
    Futures.allOf(futures).join();

    // Create a stream cut on the specified offset position.
    Checkpoint cp = readerGroup.initiateCheckpoint("batchClientCheckpoint", executor).join();
    StreamCut streamCut = cp.asImpl().getPositions().values().iterator().next();

    // Instantiate the batch client and assert it provides correct stream info.
    log.debug("Creating batch client.");
    StreamInfo streamInfo = streamManager.getStreamInfo(SCOPE, stream.getStreamName());
    log.debug("Validating stream metadata fields.");
    assertEquals("Expected Stream name: ", STREAM, streamInfo.getStreamName());
    assertEquals("Expected Scope name: ", SCOPE, streamInfo.getScope());

    // Test that we can read events from parallel segments from an offset onwards.
    log.debug("Reading events from stream cut onwards in parallel.");
    List<SegmentRange> ranges = Lists.newArrayList(batchClient.getSegments(stream, streamCut, StreamCut.UNBOUNDED).getIterator());
    assertEquals("Expected events read: ", totalEvents - offsetEvents, readFromRanges(ranges, batchClient));

    // Emulate the behavior of Hadoop client: i) Get tail of Stream, ii) Read from current point until tail, iii) repeat.
    log.debug("Reading in batch iterations.");
    StreamCut currentTailStreamCut = streamManager.getStreamInfo(SCOPE, stream.getStreamName()).getTailStreamCut();
    int readEvents = 0;
    for (int i = 0; i < batchIterations; i++) {
        writeEvents(clientFactory, STREAM, totalEvents);

        // Read all the existing events in parallel segments from the previous tail to the current one.
        ranges = Lists.newArrayList(batchClient.getSegments(stream, currentTailStreamCut, StreamCut.UNBOUNDED).getIterator());
        assertEquals("Expected number of segments: ", RG_PARALLELISM, ranges.size());
        readEvents += readFromRanges(ranges, batchClient);
        log.debug("Events read in parallel so far: {}.", readEvents);
        currentTailStreamCut = streamManager.getStreamInfo(SCOPE, stream.getStreamName()).getTailStreamCut();
    }

    assertEquals("Expected events read: .", totalEvents * batchIterations, readEvents);

    // Truncate the stream in first place.
    log.debug("Truncating stream at event {}.", offsetEvents);
    assertTrue(controller.truncateStream(SCOPE, STREAM, streamCut).join());

    // Test the batch client when we select to start reading a Stream from a truncation point.
    StreamCut initialPosition = streamManager.getStreamInfo(SCOPE, stream.getStreamName()).getHeadStreamCut();
    List<SegmentRange> newRanges = Lists.newArrayList(batchClient.getSegments(stream, initialPosition, StreamCut.UNBOUNDED).getIterator());
    assertEquals("Expected events read: ", (totalEvents - offsetEvents) + totalEvents * batchIterations,
                readFromRanges(newRanges, batchClient));
    log.debug("Events correctly read from Stream: simple batch client test passed.");
}
 
Example 19
Source File: EndToEndReaderGroupTest.java    From pravega with Apache License 2.0 4 votes vote down vote up
@Test(timeout = 30000)
public void testReaderOfflineWithSilentCheckpoint() throws Exception {
    final Stream stream = Stream.of(SCOPE, STREAM);
    final String group = "group";

    @Cleanup("shutdown")
    InlineExecutor backgroundExecutor = new InlineExecutor();

    createScope(SCOPE);
    createStream(SCOPE, STREAM, ScalingPolicy.fixed(1));

    @Cleanup
    EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(controllerURI).build());
    @Cleanup
    EventStreamWriter<String> writer = clientFactory.createEventWriter(STREAM, serializer,
                                                                       EventWriterConfig.builder().build());
    //Prep the stream with data.
    //1.Write events with event size of 30
    writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(1)).join();
    writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(2)).join();
    writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(3)).join();
    writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(4)).join();

    @Cleanup
    ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, controllerURI);
    groupManager.createReaderGroup(group, ReaderGroupConfig
            .builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(1000)
            .stream(stream)
            .build());

    ReaderGroup readerGroup = groupManager.getReaderGroup(group);

    //Create a reader
    @Cleanup
    EventStreamReader<String> reader = clientFactory.createReader("readerId", group, serializer,
                                                                  ReaderConfig.builder().build());

    //2. Read an event.
    readAndVerify(reader, 1);

    //3. Trigger a checkpoint and verify it is completed.
    CompletableFuture<Checkpoint> checkpoint = readerGroup.initiateCheckpoint("chk1", backgroundExecutor);
    // The reader group state will be updated after 1 second.
    TimeUnit.SECONDS.sleep(1);
    EventRead<String> data = reader.readNextEvent(15000);
    assertTrue(data.isCheckpoint());
    readAndVerify(reader, 2);
    assertTrue("Checkpointing should complete successfully", Futures.await(checkpoint));

    //4. GenerateStreamCuts and validate the offset of stream cut.
    CompletableFuture<Map<Stream, StreamCut>> sc = readerGroup.generateStreamCuts(backgroundExecutor);
    // The reader group state will be updated after 1 second.
    TimeUnit.SECONDS.sleep(1);
    data = reader.readNextEvent(15000);
    assertTrue("StreamCut generation should complete successfully", Futures.await(sc));
    //expected segment 0 offset is 60L, since 2 events are read.
    Map<Segment, Long> expectedOffsetMap = ImmutableMap.of(getSegment(0, 0), 60L);
    Map<Stream, StreamCut> scMap = sc.join();
    assertEquals("StreamCut for a single stream expected", 1, scMap.size());
    assertEquals("StreamCut pointing ot offset 30L expected", new StreamCutImpl(stream, expectedOffsetMap),
                 scMap.get(stream));

    //5. Invoke readerOffline with last position as null. The newer readers should start reading
    //from the last checkpointed position
    readerGroup.readerOffline("readerId", null);
    @Cleanup
    EventStreamReader<String> reader1 = clientFactory.createReader("readerId", group, serializer,
                                                                   ReaderConfig.builder().build());
    readAndVerify(reader1, 2);
}
 
Example 20
Source File: BoundedStreamReaderTest.java    From pravega with Apache License 2.0 4 votes vote down vote up
private void scaleStream(final String streamName, final Map<Double, Double> keyRanges) throws Exception {
    Stream stream = Stream.of(SCOPE, streamName);
    Controller controller = controllerWrapper.getController();
    assertTrue(controller.scaleStream(stream, Collections.singletonList(0L), keyRanges, executor).getFuture().get());
}