kafka.serializer.Decoder Java Examples
The following examples show how to use
kafka.serializer.Decoder.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ConsumerConnector.java From pulsar with Apache License 2.0 | 6 votes |
public <K, V> Map<String, List<PulsarKafkaStream<K, V>>> createMessageStreamsByFilter(TopicFilter topicFilter, Map<String, Integer> topicCountMap, Decoder<K> keyDecoder, Decoder<V> valueDecoder) { Map<String, List<PulsarKafkaStream<K, V>>> streams = Maps.newHashMap(); topicCountMap.forEach((topic, count) -> { try { Consumer<byte[]> consumer = consumerBuilder.topic(topic).subscribe(); resetOffsets(consumer, strategy); log.info("Creating stream for {}-{} with config {}", topic, groupId, consumerBuilder.toString()); for (int i = 0; i < count; i++) { PulsarKafkaStream<K, V> stream = new PulsarKafkaStream<>(keyDecoder, valueDecoder, consumer, isAutoCommit, clientId); // if multiple thread-count present then client expects multiple streams reading from the same // topic. so, create multiple stream using the same consumer streams.computeIfAbsent(topic, key -> Lists.newArrayList()).add(stream); topicStreams.add(stream); } } catch (PulsarClientException e) { log.error("Failed to subscribe on topic {} with group-id {}, {}", topic, groupId, e.getMessage(), e); throw new RuntimeException("Failed to subscribe on topic " + topic, e); } }); return streams; }
Example #2
Source File: PulsarMessageAndMetadata.java From pulsar with Apache License 2.0 | 5 votes |
public PulsarMessageAndMetadata(String topic, int partition, Message rawMessage, long offset, Decoder<K> keyDecoder, Decoder<V> valueDecoder, K key, V value) { super(topic, partition, rawMessage, offset, keyDecoder, valueDecoder); this.topic = topic; this.partition = partition; this.offset = offset; this.keyDecoder = keyDecoder; this.valueDecoder = valueDecoder; this.key = key; this.value = value; }
Example #3
Source File: PulsarKafkaStream.java From pulsar with Apache License 2.0 | 5 votes |
public PulsarKafkaStream(Decoder<K> keyDecoder, Decoder<V> valueDecoder, Consumer<byte[]> consumer, boolean isAutoCommit, String clientId) { this.keyDeSerializer = Optional.ofNullable(keyDecoder); this.valueDeSerializer = Optional.ofNullable(valueDecoder); this.iterator = new ConsumerIterator<>(consumer, receivedMessages, keyDeSerializer, valueDeSerializer, isAutoCommit); }
Example #4
Source File: ConsumerIterator.java From pulsar with Apache License 2.0 | 5 votes |
public ConsumerIterator(Consumer<byte[]> consumer, ConcurrentLinkedQueue<Message<byte[]>> receivedMessages, Optional<Decoder<K>> keyDeSerializer, Optional<Decoder<V>> valueDeSerializer, boolean isAutoCommit) { this.consumer = consumer; this.receivedMessages = receivedMessages; this.keyDeSerializer = keyDeSerializer; this.valueDeSerializer = valueDeSerializer; this.isAutoCommit = isAutoCommit; }
Example #5
Source File: PulsarMessageAndMetadata.java From pulsar with Apache License 2.0 | 4 votes |
@Override public Decoder<V> valueDecoder() { return this.valueDecoder; }
Example #6
Source File: PulsarMessageAndMetadata.java From pulsar with Apache License 2.0 | 4 votes |
@Override public Decoder<K> keyDecoder() { return this.keyDecoder; }
Example #7
Source File: ConsumerConnector.java From pulsar with Apache License 2.0 | 4 votes |
public <K, V> Map<String, List<PulsarKafkaStream<K, V>>> createMessageStreams(Map<String, Integer> topicCountMap, Decoder<K> keyDecoder, Decoder<V> valueDecoder) { return createMessageStreamsByFilter(null, topicCountMap, keyDecoder, valueDecoder); }
Example #8
Source File: ConsumerConnector.java From pulsar with Apache License 2.0 | 4 votes |
public <K, V> List<PulsarKafkaStream<K, V>> createMessageStreamsByFilter(TopicFilter topicFilter, int arg1, Decoder<K> keyDecoder, Decoder<V> valueDecoder) { throw new UnsupportedOperationException("method not supported"); }
Example #9
Source File: KafkaMessageReceiverPool.java From message-queue-client-framework with Apache License 2.0 | 2 votes |
@Override public synchronized void init() { String topic = destination.getDestinationName(); int defaultSize = getReceiver().getPartitionCount(topic); if (poolSize == 0 || poolSize > defaultSize) setPoolSize(defaultSize); if (retryCount > 0) receiverRetry = new KafkaMessageReceiverRetry<MessageAndMetadata<K, V>>(topic, retryCount, messageAdapter); this.threadFactory = new KafkaPoolThreadFactory(tagger + "-" + topic); this.pool = Executors.newFixedThreadPool(poolSize, threadFactory); logger.info("Message receiver pool initializing. poolSize : " + poolSize + " config : " + props.toString()); consumer = kafka.consumer.Consumer .createJavaConsumerConnector(new ConsumerConfig(props)); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(topic, poolSize); VerifiableProperties verProps = new VerifiableProperties(props); @SuppressWarnings("unchecked") Decoder<K> keyDecoder = (Decoder<K>) RefleTool.newInstance( keyDecoderClass, verProps); @SuppressWarnings("unchecked") Decoder<V> valDecoder = (Decoder<V>) RefleTool.newInstance( valDecoderClass, verProps); Map<String, List<KafkaStream<K, V>>> consumerMap = consumer .createMessageStreams(topicCountMap, keyDecoder, valDecoder); List<KafkaStream<K, V>> streams = consumerMap.get(topic); for (final KafkaStream<K, V> stream : streams) { pool.submit(new ReceiverThread(stream, messageAdapter)); } logger.info("Message receiver pool initialized."); running.set(true); }
Example #10
Source File: KafkaMessageReceiverImpl.java From message-queue-client-framework with Apache License 2.0 | 2 votes |
@Override public synchronized List<V> receive(String topic, int partition, long beginOffset, long readOffset) { if (readOffset <= 0) { throw new IllegalArgumentException("read offset must be greater than 0"); } List<V> messages = new ArrayList<V>(); boolean returnFlag = false; for (int i = 0; i < 3; i++) { if (checkLeader(topic, partition, beginOffset)) { returnFlag = true; break; } } if (!returnFlag) return messages; for (MessageAndOffset messageAndOffset : fetchResponse.messageSet( topic, partition)) { long currentOffset = messageAndOffset.offset(); if (currentOffset > beginOffset + readOffset - 1) { break; } ByteBuffer valload = messageAndOffset.message().payload(); byte[] vals = new byte[valload.limit()]; valload.get(vals); @SuppressWarnings("unchecked") Decoder<V> decoder = (Decoder<V>) RefleTool.newInstance(pool.getValDecoderClass(), props); V val = decoder.fromBytes(vals); messages.add(val); } return messages; }