org.apache.pulsar.client.api.CompressionType Java Examples

The following examples show how to use org.apache.pulsar.client.api.CompressionType. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CompressionCodecProvider.java    From pulsar with Apache License 2.0 6 votes vote down vote up
public static CompressionType convertFromWireProtocol(PulsarApi.CompressionType compressionType) {
    switch (compressionType) {
    case NONE:
        return CompressionType.NONE;
    case LZ4:
        return CompressionType.LZ4;
    case ZLIB:
        return CompressionType.ZLIB;
    case ZSTD:
        return CompressionType.ZSTD;
    case SNAPPY:
        return CompressionType.SNAPPY;

    default:
        throw new RuntimeException("Invalid compression type");
    }
}
 
Example #2
Source File: LogAppender.java    From pulsar with Apache License 2.0 6 votes vote down vote up
@Override
public void start() {
    this.state = State.STARTING;
    try {
        producer = pulsarClient.newProducer()
                .topic(logTopic)
                .blockIfQueueFull(false)
                .enableBatching(true)
                .compressionType(CompressionType.LZ4)
                .batchingMaxPublishDelay(100, TimeUnit.MILLISECONDS)
                .property("function", fqn)
                .create();
    } catch (Exception e) {
        throw new RuntimeException("Error starting LogTopic Producer", e);
    }
    this.state = State.STARTED;
}
 
Example #3
Source File: CompressionCodecProvider.java    From pulsar with Apache License 2.0 6 votes vote down vote up
public static PulsarApi.CompressionType convertToWireProtocol(CompressionType compressionType) {
    switch (compressionType) {
    case NONE:
        return PulsarApi.CompressionType.NONE;
    case LZ4:
        return PulsarApi.CompressionType.LZ4;
    case ZLIB:
        return PulsarApi.CompressionType.ZLIB;
    case ZSTD:
        return PulsarApi.CompressionType.ZSTD;
    case SNAPPY:
        return PulsarApi.CompressionType.SNAPPY;

    default:
        throw new RuntimeException("Invalid compression type");
    }
}
 
Example #4
Source File: PulsarKafkaProducerTest.java    From pulsar with Apache License 2.0 6 votes vote down vote up
@Test
public void testPulsarKafkaProducerWithDefaultConfig() throws Exception {
    // https://kafka.apache.org/08/documentation.html#producerconfigs
    Properties properties = new Properties();
    properties.put(BROKER_URL, "http://localhost:8080/");

    ProducerConfig config = new ProducerConfig(properties);
    PulsarKafkaProducer<byte[], byte[]> producer = new PulsarKafkaProducer<>(config);
    ProducerBuilderImpl<byte[]> producerBuilder = (ProducerBuilderImpl<byte[]>) producer.getPulsarProducerBuilder();
    Field field = ProducerBuilderImpl.class.getDeclaredField("conf");
    field.setAccessible(true);
    ProducerConfigurationData conf = (ProducerConfigurationData) field.get(producerBuilder);
    System.out.println("getMaxPendingMessages= " + conf.getMaxPendingMessages());
    assertEquals(conf.getCompressionType(), CompressionType.NONE);
    assertEquals(conf.isBlockIfQueueFull(), true);
    assertEquals(conf.getMaxPendingMessages(), 1000);
    assertEquals(conf.getBatchingMaxPublishDelayMicros(), TimeUnit.MILLISECONDS.toMicros(1));
    assertEquals(conf.getBatchingMaxMessages(), 1000);
}
 
Example #5
Source File: PulsarKafkaProducerTest.java    From pulsar with Apache License 2.0 6 votes vote down vote up
@Test
public void testPulsarKafkaProducer() throws Exception {
    // https://kafka.apache.org/08/documentation.html#producerconfigs
    Properties properties = new Properties();
    properties.put(BROKER_URL, "http://localhost:8080/");
    properties.put(COMPRESSION_CODEC, "gzip"); // compression: ZLIB
    properties.put(QUEUE_ENQUEUE_TIMEOUT_MS, "-1"); // block queue if full => -1 = true
    properties.put(QUEUE_BUFFERING_MAX_MESSAGES, "6000"); // queue max message
    properties.put(QUEUE_BUFFERING_MAX_MS, "100"); // batch delay
    properties.put(BATCH_NUM_MESSAGES, "500"); // batch msg
    properties.put(CLIENT_ID, "test");
    ProducerConfig config = new ProducerConfig(properties);
    PulsarKafkaProducer<byte[], byte[]> producer = new PulsarKafkaProducer<>(config);
    ProducerBuilderImpl<byte[]> producerBuilder = (ProducerBuilderImpl<byte[]>) producer.getPulsarProducerBuilder();
    Field field = ProducerBuilderImpl.class.getDeclaredField("conf");
    field.setAccessible(true);
    ProducerConfigurationData conf = (ProducerConfigurationData) field.get(producerBuilder);
    assertEquals(conf.getCompressionType(), CompressionType.ZLIB);
    assertEquals(conf.isBlockIfQueueFull(), true);
    assertEquals(conf.getMaxPendingMessages(), 6000);
    assertEquals(conf.getBatchingMaxPublishDelayMicros(), TimeUnit.MILLISECONDS.toMicros(100));
    assertEquals(conf.getBatchingMaxMessages(), 500);
}
 
Example #6
Source File: PulsarSink.java    From pulsar with Apache License 2.0 6 votes vote down vote up
public Producer<T> createProducer(PulsarClient client, String topic, String producerName, Schema<T> schema)
        throws PulsarClientException {
    ProducerBuilder<T> builder = client.newProducer(schema)
            .blockIfQueueFull(true)
            .enableBatching(true)
            .batchingMaxPublishDelay(10, TimeUnit.MILLISECONDS)
            .compressionType(CompressionType.LZ4)
            .hashingScheme(HashingScheme.Murmur3_32Hash) //
            .messageRoutingMode(MessageRoutingMode.CustomPartition)
            .messageRouter(FunctionResultRouter.of())
            // set send timeout to be infinity to prevent potential deadlock with consumer
            // that might happen when consumer is blocked due to unacked messages
            .sendTimeout(0, TimeUnit.SECONDS)
            .topic(topic);
    if (producerName != null) {
        builder.producerName(producerName);
    }

    return builder.properties(properties).create();
}
 
Example #7
Source File: UtilsTest.java    From pulsar with Apache License 2.0 6 votes vote down vote up
private Record<byte[]> createRecord(byte[] data, String algo, String[] keyNames, byte[][] keyValues, byte[] param,
        Map<String, String> metadata1, Map<String, String> metadata2, int batchSize, int compressionMsgSize,
        Map<String, String> properties, boolean isEncryption) {
    EncryptionContext ctx = null;
    if(isEncryption) {
        ctx = new EncryptionContext();
        ctx.setAlgorithm(algo);
        ctx.setBatchSize(Optional.of(batchSize));
        ctx.setCompressionType(CompressionType.LZ4);
        ctx.setUncompressedMessageSize(compressionMsgSize);
        Map<String, EncryptionKey> keys = Maps.newHashMap();
        EncryptionKey encKeyVal = new EncryptionKey();
        encKeyVal.setKeyValue(keyValues[0]);

        encKeyVal.setMetadata(metadata1);
        EncryptionKey encKeyVal2 = new EncryptionKey();
        encKeyVal2.setKeyValue(keyValues[1]);
        encKeyVal2.setMetadata(metadata2);
        keys.put(keyNames[0], encKeyVal);
        keys.put(keyNames[1], encKeyVal2);
        ctx.setKeys(keys);
        ctx.setParam(param);
    }
    return new RecordImpl(data, properties, Optional.ofNullable(ctx));
}
 
Example #8
Source File: ProducerUnitTest.java    From tutorials with MIT License 5 votes vote down vote up
public static void main(String[] args) throws IOException {
    // Create a Pulsar client instance. A single instance can be shared across many
    // producers and consumer within the same application
    PulsarClient client = PulsarClient.builder()
            .serviceUrl(SERVICE_URL)
            .build();

    // Configure producer specific settings
    Producer<byte[]> producer = client.newProducer()
            // Set the topic
            .topic(TOPIC_NAME)
            // Enable compression
            .compressionType(CompressionType.LZ4)
            .create();

    // Once the producer is created, it can be used for the entire application life-cycle
    System.out.println("Created producer for the topic "+TOPIC_NAME);

    // Send 5 test messages
    IntStream.range(1, 5).forEach(i -> {
        String content = String.format("hi-pulsar-%d", i);

        // Build a message object
        Message<byte[]> msg = MessageBuilder.create()
                .setContent(content.getBytes())
                .build();

        // Send each message and log message content and ID when successfully received
        try {
            MessageId msgId = producer.send(msg);

            System.out.println("Published message '"+content+"' with the ID "+msgId);
        } catch (PulsarClientException e) {
            System.out.println(e.getMessage());
        }
    });

    client.close();
}
 
Example #9
Source File: BatchMessageTest.java    From pulsar with Apache License 2.0 5 votes vote down vote up
@Test(dataProvider = "codecAndContainerBuilder")
public void testSendOverSizeMessage(CompressionType compressionType, BatcherBuilder builder) throws Exception {

    final int numMsgs = 10;
    final String topicName = "persistent://prop/ns-abc/testSendOverSizeMessage-" + UUID.randomUUID();

    Producer<byte[]> producer = pulsarClient.newProducer().topic(topicName)
            .batchingMaxPublishDelay(1, TimeUnit.MILLISECONDS)
            .batchingMaxMessages(2)
            .enableBatching(true)
            .compressionType(compressionType)
            .batcherBuilder(builder)
            .create();

    try {
        producer.send(new byte[1024 * 1024 * 10]);
    } catch (PulsarClientException e) {
        assertTrue(e instanceof PulsarClientException.InvalidMessageException);
    }

    for (int i = 0; i < numMsgs; i++) {
        producer.send(new byte[1024]);
    }

    producer.close();

}
 
Example #10
Source File: BatchMessageTest.java    From pulsar with Apache License 2.0 5 votes vote down vote up
@DataProvider(name = "codecAndContainerBuilder")
public Object[][] codecAndContainerBuilderProvider() {
    return new Object[][] {
            { CompressionType.NONE, BatcherBuilder.DEFAULT },
            { CompressionType.LZ4, BatcherBuilder.DEFAULT },
            { CompressionType.ZLIB, BatcherBuilder.DEFAULT },
            { CompressionType.NONE, BatcherBuilder.KEY_BASED },
            { CompressionType.LZ4, BatcherBuilder.KEY_BASED },
            { CompressionType.ZLIB, BatcherBuilder.KEY_BASED }
    };
}
 
Example #11
Source File: PersistentTopicE2ETest.java    From pulsar with Apache License 2.0 5 votes vote down vote up
@Test(dataProvider = "codec")
public void testCompression(CompressionType compressionType) throws Exception {
    final String topicName = "persistent://prop/ns-abc/topic0" + compressionType;

    // 1. producer connect
    Producer<byte[]> producer = pulsarClient.newProducer()
        .topic(topicName)
        .enableBatching(false)
        .messageRoutingMode(MessageRoutingMode.SinglePartition)
        .compressionType(compressionType)
        .create();
    Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName("my-sub").subscribe();

    PersistentTopic topicRef = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topicName).get();
    assertNotNull(topicRef);
    assertEquals(topicRef.getProducers().size(), 1);

    // 2. producer publish messages
    for (int i = 0; i < 10; i++) {
        String message = "my-message-" + i;
        producer.send(message.getBytes());
    }

    for (int i = 0; i < 10; i++) {
        Message<byte[]> msg = consumer.receive(5, TimeUnit.SECONDS);
        assertNotNull(msg);
        assertEquals(msg.getData(), ("my-message-" + i).getBytes());
    }

    // 3. producer disconnect
    producer.close();
    consumer.close();
}
 
Example #12
Source File: CompactionTest.java    From pulsar with Apache License 2.0 5 votes vote down vote up
@Test
public void testCompactCompressedNoBatch() throws Exception {
    String topic = "persistent://my-property/use/my-ns/my-topic1";

    // subscribe before sending anything, so that we get all messages
    pulsarClient.newConsumer().topic(topic).subscriptionName("sub1")
        .readCompacted(true).subscribe().close();

    try (Producer<byte[]> producer = pulsarClient.newProducer().topic(topic)
            .compressionType(CompressionType.LZ4).enableBatching(false).create()) {
        producer.newMessage()
                .key("key1")
                .value("my-message-1".getBytes())
                .sendAsync();
        producer.newMessage()
                .key("key2")
                .value("my-message-2".getBytes())
                .sendAsync();
        producer.newMessage()
                .key("key2")
                .value("my-message-3".getBytes())
                .send();
    }

    // compact the topic
    Compactor compactor = new TwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler);
    compactor.compact(topic).get();

    try (Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topic)
            .subscriptionName("sub1").readCompacted(true).subscribe()){
        Message<byte[]> message1 = consumer.receive();
        Assert.assertEquals(message1.getKey(), "key1");
        Assert.assertEquals(new String(message1.getData()), "my-message-1");

        Message<byte[]> message2 = consumer.receive();
        Assert.assertEquals(message2.getKey(), "key2");
        Assert.assertEquals(new String(message2.getData()), "my-message-3");
    }
}
 
Example #13
Source File: AsyncProducerTutorial.java    From pulsar-java-tutorial with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws PulsarClientException {
    PulsarClient client = PulsarClient.builder()
            .serviceUrl(SERVICE_URL)
            .build();

    log.info("Created a client for the Pulsar cluster running at {}", SERVICE_URL);

    client.newProducer()
            .topic(TOPIC_NAME)
            .compressionType(CompressionType.LZ4)
            .createAsync()
            .thenAccept(producer -> {
                log.info("Producer created asynchronously for the topic {}", TOPIC_NAME);

                MessageBuilder<byte[]> msgBuilder = MessageBuilder.create();

                // Send 10 messages with varying content
                IntStream.range(1, 11).forEach(i -> {
                    byte[] msgContent = String.format("hello-pulsar-%d", i).getBytes();
                    msgBuilder.setContent(msgContent);
                    producer.sendAsync(msgBuilder.build())
                            .handle((msgId, e) -> {
                                if (e != null) {
                                    e.printStackTrace();
                                }

                                log.info("Successfully produced message with ID {}",
                                        new String(msgId.toByteArray()));
                                return null;
                            });
                });
            })
            .exceptionally(e -> {
                log.error(e.toString());
                return null;
            });
}
 
Example #14
Source File: ProducerTutorial.java    From pulsar-java-tutorial with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws IOException {
    // Create a Pulsar client instance. A single instance can be shared across many
    // producers and consumer within the same application
    PulsarClient client = PulsarClient.builder()
            .serviceUrl(SERVICE_URL)
            .build();

    // Here you get the chance to configure producer specific settings
    Producer<byte[]> producer = client.newProducer()
            // Set the topic
            .topic(TOPIC_NAME)
            // Enable compression
            .compressionType(CompressionType.LZ4)
            .create();

    // Once the producer is created, it can be used for the entire application life-cycle
    log.info("Created producer for the topic {}", TOPIC_NAME);

    // Send 10 test messages
    IntStream.range(1, 11).forEach(i -> {
        String content = String.format("hello-pulsar-%d", i);

        // Build a message object
        Message<byte[]> msg = MessageBuilder.create()
                .setContent(content.getBytes())
                .build();

        // Send each message and log message content and ID when successfully received
        try {
            MessageId msgId = producer.send(msg);

            log.info("Published message '{}' with the ID {}", content, msgId);
        } catch (PulsarClientException e) {
            log.error(e.getMessage());
        }
    });

    client.close();
}
 
Example #15
Source File: MessageRecordUtils.java    From kop with Apache License 2.0 5 votes vote down vote up
public static ByteBuf messageToByteBuf(Message<byte[]> message) {
    checkArgument(message instanceof MessageImpl);

    MessageImpl<byte[]> msg = (MessageImpl<byte[]>) message;
    MessageMetadata.Builder msgMetadataBuilder = msg.getMessageBuilder();
    ByteBuf payload = msg.getDataBuffer();

    // filled in required fields
    if (!msgMetadataBuilder.hasSequenceId()) {
        msgMetadataBuilder.setSequenceId(-1);
    }
    if (!msgMetadataBuilder.hasPublishTime()) {
        msgMetadataBuilder.setPublishTime(clock.millis());
    }
    if (!msgMetadataBuilder.hasProducerName()) {
        msgMetadataBuilder.setProducerName(FAKE_KOP_PRODUCER_NAME);
    }

    msgMetadataBuilder.setCompression(
        CompressionCodecProvider.convertToWireProtocol(CompressionType.NONE));
    msgMetadataBuilder.setUncompressedSize(payload.readableBytes());
    MessageMetadata msgMetadata = msgMetadataBuilder.build();

    ByteBuf buf = Commands.serializeMetadataAndPayload(ChecksumType.Crc32c, msgMetadata, payload);

    msgMetadataBuilder.recycle();
    msgMetadata.recycle();

    return buf;
}
 
Example #16
Source File: SchedulerManagerTest.java    From pulsar with Apache License 2.0 4 votes vote down vote up
@BeforeMethod
public void setup() {
    WorkerConfig workerConfig = new WorkerConfig();
    workerConfig.setWorkerId("worker-1");
    workerConfig.setFunctionRuntimeFactoryClassName(ThreadRuntimeFactory.class.getName());
    workerConfig.setFunctionRuntimeFactoryConfigs(
            ObjectMapperFactory.getThreadLocal().convertValue(
                    new ThreadRuntimeFactoryConfig().setThreadGroupName("test"), Map.class));
    workerConfig.setPulsarServiceUrl("pulsar://localhost:6650");
    workerConfig.setStateStorageServiceUrl("foo");
    workerConfig.setFunctionAssignmentTopicName("assignments");
    workerConfig.setSchedulerClassName(RoundRobinScheduler.class.getName());
    workerConfig.setAssignmentWriteMaxRetries(0);

    producer = mock(Producer.class);
    completableFuture = spy(new CompletableFuture<>());
    completableFuture.complete(MessageId.earliest);
    //byte[] bytes = any();
    message = mock(TypedMessageBuilder.class);
    when(producer.newMessage()).thenReturn(message);
    when(message.key(anyString())).thenReturn(message);
    when(message.value(any())).thenReturn(message);
    when(message.sendAsync()).thenReturn(completableFuture);

    ProducerBuilder<byte[]> builder = mock(ProducerBuilder.class);
    when(builder.topic(anyString())).thenReturn(builder);
    when(builder.producerName(anyString())).thenReturn(builder);
    when(builder.enableBatching(anyBoolean())).thenReturn(builder);
    when(builder.blockIfQueueFull(anyBoolean())).thenReturn(builder);
    when(builder.compressionType(any(CompressionType.class))).thenReturn(builder);
    when(builder.sendTimeout(anyInt(), any(TimeUnit.class))).thenReturn(builder);

    when(builder.createAsync()).thenReturn(CompletableFuture.completedFuture(producer));

    PulsarClient pulsarClient = mock(PulsarClient.class);
    when(pulsarClient.newProducer()).thenReturn(builder);

    this.executor = Executors
            .newSingleThreadScheduledExecutor(new DefaultThreadFactory("worker-test"));
    errorNotifier = spy(ErrorNotifier.getDefaultImpl());
    schedulerManager = spy(new SchedulerManager(workerConfig, pulsarClient, null, errorNotifier));
    functionRuntimeManager = mock(FunctionRuntimeManager.class);
    functionMetaDataManager = mock(FunctionMetaDataManager.class);
    membershipManager = mock(MembershipManager.class);
    leaderService = mock(LeaderService.class);
    schedulerManager.setFunctionMetaDataManager(functionMetaDataManager);
    schedulerManager.setFunctionRuntimeManager(functionRuntimeManager);
    schedulerManager.setMembershipManager(membershipManager);
    schedulerManager.setLeaderService(leaderService);
}
 
Example #17
Source File: CompressionCodecProvider.java    From pulsar with Apache License 2.0 4 votes vote down vote up
public static CompressionCodec getCompressionCodec(PulsarApi.CompressionType type) {
    return codecs.get(type);
}
 
Example #18
Source File: CompressionCodecProvider.java    From pulsar with Apache License 2.0 4 votes vote down vote up
public static CompressionCodec getCompressionCodec(CompressionType type) {
    return codecs.get(convertToWireProtocol(type));
}
 
Example #19
Source File: CompactionTest.java    From pulsar with Apache License 2.0 4 votes vote down vote up
@Test
public void testCompactCompressedBatching() throws Exception {
    String topic = "persistent://my-property/use/my-ns/my-topic1";

    // subscribe before sending anything, so that we get all messages
    pulsarClient.newConsumer().topic(topic).subscriptionName("sub1")
        .readCompacted(true).subscribe().close();

    try (Producer<byte[]> producer = pulsarClient.newProducer().topic(topic)
            .compressionType(CompressionType.LZ4)
            .maxPendingMessages(3)
            .enableBatching(true)
            .batchingMaxMessages(3)
            .batchingMaxPublishDelay(1, TimeUnit.HOURS).create()) {
        producer.newMessage()
                .key("key1")
                .value("my-message-1".getBytes())
                .sendAsync();
        producer.newMessage()
                .key("key2")
                .value("my-message-2".getBytes())
                .sendAsync();
        producer.newMessage()
                .key("key2")
                .value("my-message-3".getBytes())
                .send();
    }

    // compact the topic
    Compactor compactor = new TwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler);
    compactor.compact(topic).get();

    try (Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topic)
            .subscriptionName("sub1").readCompacted(true).subscribe()){
        Message<byte[]> message1 = consumer.receive();
        Assert.assertEquals(message1.getKey(), "key1");
        Assert.assertEquals(new String(message1.getData()), "my-message-1");

        Message<byte[]> message2 = consumer.receive();
        Assert.assertEquals(message2.getKey(), "key2");
        Assert.assertEquals(new String(message2.getData()), "my-message-3");
    }
}
 
Example #20
Source File: PersistentTopicE2ETest.java    From pulsar with Apache License 2.0 4 votes vote down vote up
@DataProvider(name = "codec")
public Object[][] codecProvider() {
    return new Object[][] { { CompressionType.NONE }, { CompressionType.LZ4 }, { CompressionType.ZLIB }, };
}
 
Example #21
Source File: CompactionTest.java    From pulsar with Apache License 2.0 4 votes vote down vote up
@Test
public void testCompactEncryptedAndCompressedBatching() throws Exception {
    String topic = "persistent://my-property/use/my-ns/my-topic1";

    // subscribe before sending anything, so that we get all messages
    pulsarClient.newConsumer().topic(topic).subscriptionName("sub1")
        .readCompacted(true).subscribe().close();

    try (Producer<byte[]> producer = pulsarClient.newProducer().topic(topic)
            .addEncryptionKey("client-ecdsa.pem").cryptoKeyReader(new EncKeyReader())
            .compressionType(CompressionType.LZ4)
            .maxPendingMessages(3)
            .enableBatching(true)
            .batchingMaxMessages(3)
            .batchingMaxPublishDelay(1, TimeUnit.HOURS).create()) {
        producer.newMessage()
                .key("key1")
                .value("my-message-1".getBytes())
                .sendAsync();
        producer.newMessage()
                .key("key2")
                .value("my-message-2".getBytes())
                .sendAsync();
        producer.newMessage()
                .key("key2")
                .value("my-message-3".getBytes())
                .send();
    }

    // compact the topic
    Compactor compactor = new TwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler);
    compactor.compact(topic).get();

    // with encryption, all messages are passed through compaction as it doesn't
    // have the keys to decrypt the batch payload
    try (Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topic)
            .subscriptionName("sub1").cryptoKeyReader(new EncKeyReader())
            .readCompacted(true).subscribe()){
        Message<byte[]> message1 = consumer.receive();
        Assert.assertEquals(message1.getKey(), "key1");
        Assert.assertEquals(new String(message1.getData()), "my-message-1");

        Message<byte[]> message2 = consumer.receive();
        Assert.assertEquals(message2.getKey(), "key2");
        Assert.assertEquals(new String(message2.getData()), "my-message-2");

        Message<byte[]> message3 = consumer.receive();
        Assert.assertEquals(message3.getKey(), "key2");
        Assert.assertEquals(new String(message3.getData()), "my-message-3");
    }
}
 
Example #22
Source File: CompactionTest.java    From pulsar with Apache License 2.0 4 votes vote down vote up
@Test
public void testCompactEncryptedAndCompressedNoBatch() throws Exception {
    String topic = "persistent://my-property/use/my-ns/my-topic1";

    // subscribe before sending anything, so that we get all messages
    pulsarClient.newConsumer().topic(topic).subscriptionName("sub1")
        .readCompacted(true).subscribe().close();

    try (Producer<byte[]> producer = pulsarClient.newProducer().topic(topic)
            .addEncryptionKey("client-ecdsa.pem").cryptoKeyReader(new EncKeyReader())
            .compressionType(CompressionType.LZ4)
            .enableBatching(false).create()) {
        producer.newMessage()
                .key("key1")
                .value("my-message-1".getBytes())
                .sendAsync();
        producer.newMessage()
                .key("key2")
                .value("my-message-2".getBytes())
                .sendAsync();
        producer.newMessage()
                .key("key2")
                .value("my-message-3".getBytes())
                .send();
    }

    // compact the topic
    Compactor compactor = new TwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler);
    compactor.compact(topic).get();

    // Check that messages after compaction have same ids
    try (Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topic)
            .subscriptionName("sub1").cryptoKeyReader(new EncKeyReader())
            .readCompacted(true).subscribe()){
        Message<byte[]> message1 = consumer.receive();
        Assert.assertEquals(message1.getKey(), "key1");
        Assert.assertEquals(new String(message1.getData()), "my-message-1");

        Message<byte[]> message2 = consumer.receive();
        Assert.assertEquals(message2.getKey(), "key2");
        Assert.assertEquals(new String(message2.getData()), "my-message-3");
    }
}
 
Example #23
Source File: PulsarComponentConfiguration.java    From camel-spring-boot with Apache License 2.0 4 votes vote down vote up
public CompressionType getCompressionType() {
    return compressionType;
}
 
Example #24
Source File: CompactionTest.java    From pulsar with Apache License 2.0 4 votes vote down vote up
@Test
public void testEmptyPayloadDeletesWhenCompressed() throws Exception {
    String topic = "persistent://my-property/use/my-ns/my-topic1";

    // subscribe before sending anything, so that we get all messages
    pulsarClient.newConsumer().topic(topic).subscriptionName("sub1")
        .readCompacted(true).subscribe().close();

    try (Producer<byte[]> producerNormal = pulsarClient.newProducer()
             .topic(topic)
             .enableBatching(false)
             .compressionType(CompressionType.LZ4)
             .create();
         Producer<byte[]> producerBatch = pulsarClient.newProducer()
             .topic(topic)
             .maxPendingMessages(3)
             .enableBatching(true)
             .compressionType(CompressionType.LZ4)
             .batchingMaxMessages(3)
             .batchingMaxPublishDelay(1, TimeUnit.HOURS)
             .create()) {

        // key0 persists through it all
        producerNormal.newMessage()
                .key("key0")
                .value("my-message-0".getBytes()).send();

        // key1 is added but then deleted
        producerNormal.newMessage()
                .key("key1")
                .value("my-message-1".getBytes()).send();

        producerNormal.newMessage()
                .key("key1").send();

        // key2 is added but deleted in same batch
        producerBatch.newMessage()
                .key("key2")
                .value("my-message-2".getBytes()).sendAsync();
        producerBatch.newMessage()
                .key("key3")
                .value("my-message-3".getBytes()).sendAsync();
        producerBatch.newMessage()
                .key("key2").send();

        // key3 is added in previous batch, deleted in this batch
        producerBatch.newMessage()
                .key("key3")
                .sendAsync();
        producerBatch.newMessage()
                .key("key4")
                .value("my-message-3".getBytes())
                .sendAsync();
        producerBatch.newMessage()
                .key("key4")
                .send();

        // key4 is added, deleted, then resurrected
        producerNormal.newMessage()
                .key("key4")
                .value("my-message-4".getBytes())
                .send();
    }

    // compact the topic
    Compactor compactor = new TwoPhaseCompactor(conf, pulsarClient, bk, compactionScheduler);
    compactor.compact(topic).get();

    try (Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topic)
            .subscriptionName("sub1").readCompacted(true).subscribe()){
        Message<byte[]> message1 = consumer.receive();
        Assert.assertEquals(message1.getKey(), "key0");
        Assert.assertEquals(new String(message1.getData()), "my-message-0");

        Message<byte[]> message2 = consumer.receive();
        Assert.assertEquals(message2.getKey(), "key4");
        Assert.assertEquals(new String(message2.getData()), "my-message-4");
    }
}
 
Example #25
Source File: UtilsTest.java    From pulsar with Apache License 2.0 4 votes vote down vote up
@Test(dataProvider="encryption")
public void testFbSerialization(boolean isEncryption) throws Exception {

    final String[] keyNames = { "key1", "key2" };
    final String param = "param";
    final String algo = "algo";
    int batchSize = 10;
    int compressionMsgSize = 10;

    for (int k = 0; k < 5; k++) {
        String payloadString = RandomStringUtils.random(142342 * k, String.valueOf(System.currentTimeMillis()));
        final String key1Value = payloadString + "test1";
        final String key2Value = payloadString + "test2";
        final byte[][] keyValues = { key1Value.getBytes(), key2Value.getBytes() };
        byte[] data = payloadString.getBytes();
        Map<String, String> properties = Maps.newHashMap();
        properties.put("prop1", payloadString);
        Map<String, String> metadata1 = Maps.newHashMap();
        metadata1.put("version", "v1");
        metadata1.put("ckms", "cmks-1");
        Map<String, String> metadata2 = Maps.newHashMap();
        metadata2.put("version", "v2");
        metadata2.put("ckms", "cmks-2");
        Record<byte[]> record = createRecord(data, algo, keyNames, keyValues, param.getBytes(), metadata1,
                metadata2, batchSize, compressionMsgSize, properties, isEncryption);
        ByteBuffer flatBuffer = Utils.serializeRecordToFlatBuffer(record);

        Message kinesisJsonResponse = Message.getRootAsMessage(flatBuffer);
        byte[] fbPayloadBytes = new byte[kinesisJsonResponse.payloadLength()];
        kinesisJsonResponse.payloadAsByteBuffer().get(fbPayloadBytes);
        assertEquals(data, fbPayloadBytes);

        if(isEncryption) {
            org.apache.pulsar.io.kinesis.fbs.EncryptionCtx encryptionCtxDeser = kinesisJsonResponse.encryptionCtx();
            byte compressionType = encryptionCtxDeser.compressionType();
            int fbBatchSize = encryptionCtxDeser.batchSize();
            boolean isBathcMessage = encryptionCtxDeser.isBatchMessage();
            int fbCompressionMsgSize = encryptionCtxDeser.uncompressedMessageSize();
            int totalKeys = encryptionCtxDeser.keysLength();
            Map<String, Map<String, String>> fbKeyMetadataResult = Maps.newHashMap();
            Map<String, byte[]> fbKeyValueResult = Maps.newHashMap();
            for (int i = 0; i < encryptionCtxDeser.keysLength(); i++) {
                org.apache.pulsar.io.kinesis.fbs.EncryptionKey encryptionKey = encryptionCtxDeser.keys(i);
                String keyName = encryptionKey.key();
                byte[] keyValueBytes = new byte[encryptionKey.valueLength()];
                encryptionKey.valueAsByteBuffer().get(keyValueBytes);
                fbKeyValueResult.put(keyName, keyValueBytes);
                Map<String, String> fbMetadata = Maps.newHashMap();
                for (int j = 0; j < encryptionKey.metadataLength(); j++) {
                    KeyValue encMtdata = encryptionKey.metadata(j);
                    fbMetadata.put(encMtdata.key(), encMtdata.value());
                }
                fbKeyMetadataResult.put(keyName, fbMetadata);
            }
            byte[] paramBytes = new byte[encryptionCtxDeser.paramLength()];
            encryptionCtxDeser.paramAsByteBuffer().get(paramBytes);

            assertEquals(totalKeys, 2);
            assertEquals(batchSize, fbBatchSize);
            assertTrue(isBathcMessage);
            assertEquals(compressionMsgSize, fbCompressionMsgSize);
            assertEquals(keyValues[0], fbKeyValueResult.get(keyNames[0]));
            assertEquals(keyValues[1], fbKeyValueResult.get(keyNames[1]));
            assertEquals(metadata1, fbKeyMetadataResult.get(keyNames[0]));
            assertEquals(metadata2, fbKeyMetadataResult.get(keyNames[1]));
            assertEquals(compressionType, org.apache.pulsar.io.kinesis.fbs.CompressionType.LZ4);
            assertEquals(param.getBytes(), paramBytes);
            assertEquals(algo, encryptionCtxDeser.algo());
        }

        Map<String, String> fbproperties = Maps.newHashMap();
        for (int i = 0; i < kinesisJsonResponse.propertiesLength(); i++) {
            KeyValue property = kinesisJsonResponse.properties(i);
            fbproperties.put(property.key(), property.value());
        }
        assertEquals(properties, fbproperties);

    }
}
 
Example #26
Source File: ProducerHandler.java    From pulsar with Apache License 2.0 4 votes vote down vote up
private ProducerBuilder<byte[]> getProducerBuilder(PulsarClient client) {
    ProducerBuilder<byte[]> builder = client.newProducer()
        .enableBatching(false)
        .messageRoutingMode(MessageRoutingMode.SinglePartition);

    // Set to false to prevent the server thread from being blocked if a lot of messages are pending.
    builder.blockIfQueueFull(false);

    if (queryParams.containsKey("producerName")) {
        builder.producerName(queryParams.get("producerName"));
    }

    if (queryParams.containsKey("initialSequenceId")) {
        builder.initialSequenceId(Long.parseLong("initialSequenceId"));
    }

    if (queryParams.containsKey("hashingScheme")) {
        builder.hashingScheme(HashingScheme.valueOf(queryParams.get("hashingScheme")));
    }

    if (queryParams.containsKey("sendTimeoutMillis")) {
        builder.sendTimeout(Integer.parseInt(queryParams.get("sendTimeoutMillis")), TimeUnit.MILLISECONDS);
    }

    if (queryParams.containsKey("batchingEnabled")) {
        builder.enableBatching(Boolean.parseBoolean(queryParams.get("batchingEnabled")));
    }

    if (queryParams.containsKey("batchingMaxMessages")) {
        builder.batchingMaxMessages(Integer.parseInt(queryParams.get("batchingMaxMessages")));
    }

    if (queryParams.containsKey("maxPendingMessages")) {
        builder.maxPendingMessages(Integer.parseInt(queryParams.get("maxPendingMessages")));
    }

    if (queryParams.containsKey("batchingMaxPublishDelay")) {
        builder.batchingMaxPublishDelay(Integer.parseInt(queryParams.get("batchingMaxPublishDelay")),
                TimeUnit.MILLISECONDS);
    }

    if (queryParams.containsKey("messageRoutingMode")) {
        checkArgument(
                Enums.getIfPresent(MessageRoutingMode.class, queryParams.get("messageRoutingMode")).isPresent(),
                "Invalid messageRoutingMode %s", queryParams.get("messageRoutingMode"));
        MessageRoutingMode routingMode = MessageRoutingMode.valueOf(queryParams.get("messageRoutingMode"));
        if (!MessageRoutingMode.CustomPartition.equals(routingMode)) {
            builder.messageRoutingMode(routingMode);
        }
    }

    if (queryParams.containsKey("compressionType")) {
        checkArgument(Enums.getIfPresent(CompressionType.class, queryParams.get("compressionType")).isPresent(),
                "Invalid compressionType %s", queryParams.get("compressionType"));
        builder.compressionType(CompressionType.valueOf(queryParams.get("compressionType")));
    }

    return builder;
}
 
Example #27
Source File: PulsarKafkaProducer.java    From pulsar with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings({ "unchecked", "rawtypes" })
public PulsarKafkaProducer(ProducerConfig config) {
    super((kafka.producer.Producer) null);
    partitioner = config.partitionerClass() != null
            ? newInstance(config.partitionerClass(), Partitioner.class, config.props())
            : new DefaultPartitioner(config.props());
    // kafka-config returns default serializer if client doesn't configure it
    checkNotNull(config.keySerializerClass(), "key-serializer class can't be null");
    checkNotNull(config.serializerClass(), "value-serializer class can't be null");
    keySerializer = newInstance(config.keySerializerClass(), Encoder.class, config.props());
    valueSerializer = newInstance(config.serializerClass(), Encoder.class, config.props());

    Properties properties = config.props() != null && config.props().props() != null ? config.props().props()
            : new Properties();
    String serviceUrl = config.brokerList();
    try {
        client = PulsarClientKafkaConfig.getClientBuilder(properties).serviceUrl(serviceUrl).build();
    } catch (PulsarClientException e) {
        throw new IllegalArgumentException(
                "Failed to create pulsar-client using url = " + serviceUrl + ", properties = " + properties, e);
    }
    pulsarProducerBuilder = client.newProducer();

    // doc: https://kafka.apache.org/08/documentation.html#producerapi
    // api-doc:
    // https://github.com/apache/kafka/blob/0.8.2.2/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java

    // queue.enqueue.timeout.ms: The amount of time to block before dropping messages when running in async mode and
    // the buffer has reached queue.buffering.max.messages. If set to 0 events will be enqueued immediately or
    // dropped if the queue is full (the producer send call will never block). If set to -1 the producer will block
    // indefinitely and never willingly drop a send.
    boolean blockIfQueueFull = config.queueEnqueueTimeoutMs() == -1 ? true : false;
    // This parameter specifies whether the messages are sent asynchronously in a background thread. Valid values
    // are (1) async for asynchronous send and (2) sync for synchronous send. By setting the producer to async we
    // allow batching together of requests (which is great for throughput) but open the possibility of a failure of
    // the client machine dropping unsent data.
    isSendAsync = "async".equalsIgnoreCase(config.producerType());
    CompressionType compressionType = CompressionType.NONE;
    // Valid values are "none", "gzip" and "snappy".
    if ("gzip".equals(config.compressionCodec().name())) {
        compressionType = CompressionType.ZLIB;
    } else if ("snappy".equals(config.compressionCodec().name())) {
        compressionType = CompressionType.SNAPPY;
    }
    long batchDelayMs = config.queueBufferingMaxMs();

    if (properties.containsKey(KAFKA_KEY_MAX_QUEUE_BUFFERING_MESSAGES)) {
        pulsarProducerBuilder.maxPendingMessages(config.queueBufferingMaxMessages());
    }
    if (properties.containsKey(KAFKA_KEY_MAX_BATCH_MESSAGES)) {
        pulsarProducerBuilder.batchingMaxMessages(config.batchNumMessages());
    }
    if (properties.containsKey(KAFKA_KEY_MAX_QUEUE_BUFFERING_TIME_MS)) {
        pulsarProducerBuilder.batchingMaxPublishDelay(batchDelayMs, TimeUnit.MILLISECONDS);
    }
    if (properties.containsKey(KAFKA_KEY_REQUEST_TIMEOUT_MS)) {
        pulsarProducerBuilder.sendTimeout(config.requestTimeoutMs(), TimeUnit.MILLISECONDS);
    }

    pulsarProducerBuilder.blockIfQueueFull(blockIfQueueFull).compressionType(compressionType);

}
 
Example #28
Source File: PulsarBlockChainEventBroadcaster.java    From eventeum with Apache License 2.0 4 votes vote down vote up
protected Producer<byte[]> createProducer(String topic) throws PulsarClientException {
	return client.newProducer()
			.topic(topic)
			.compressionType(CompressionType.LZ4)
			.create();
}
 
Example #29
Source File: PulsarComponentConfiguration.java    From camel-spring-boot with Apache License 2.0 4 votes vote down vote up
public void setCompressionType(CompressionType compressionType) {
    this.compressionType = compressionType;
}