com.microsoft.azure.eventhubs.EventData Java Examples
The following examples show how to use
com.microsoft.azure.eventhubs.EventData.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PutAzureEventHub.java From localization_nifi with Apache License 2.0 | 6 votes |
protected void sendMessage(final byte[] buffer) throws ProcessException { final EventHubClient sender = senderQueue.poll(); if(null != sender) { try { sender.sendSync(new EventData(buffer)); } catch (final ServiceBusException sbe) { throw new ProcessException("Caught exception trying to send message to eventbus", sbe); } finally { senderQueue.offer(sender); } }else{ throw new ProcessException("No EventHubClients are configured for sending"); } }
Example #2
Source File: ConsumeAzureEventHub.java From nifi with Apache License 2.0 | 6 votes |
@Override public void onEvents(PartitionContext context, Iterable<EventData> messages) throws Exception { final ProcessSession session = processSessionFactory.createSession(); try { final StopWatch stopWatch = new StopWatch(true); if (readerFactory != null && writerFactory != null) { writeRecords(context, messages, session, stopWatch); } else { writeFlowFiles(context, messages, session, stopWatch); } // Commit NiFi first. session.commit(); // If creating an Event Hub checkpoint failed, then the same message can be retrieved again. context.checkpoint(); } catch (Exception e) { getLogger().error("Unable to fully process received message due to " + e, e); // FlowFiles those are already committed will not get rollback. session.rollback(); } }
Example #3
Source File: ConsumeAzureEventHub.java From nifi with Apache License 2.0 | 6 votes |
private void writeFlowFiles(PartitionContext context, Iterable<EventData> messages, ProcessSession session, StopWatch stopWatch) { final String eventHubName = context.getEventHubPath(); final String partitionId = context.getPartitionId(); final String consumerGroup = context.getConsumerGroupName(); messages.forEach(eventData -> { FlowFile flowFile = session.create(); final Map<String, String> attributes = new HashMap<>(); putEventHubAttributes(attributes, eventHubName, partitionId, eventData); flowFile = session.putAllAttributes(flowFile, attributes); flowFile = session.write(flowFile, out -> { out.write(eventData.getBytes()); }); transferTo(REL_SUCCESS, session, stopWatch, eventHubName, partitionId, consumerGroup, flowFile); }); }
Example #4
Source File: GetAzureEventHubTest.java From nifi with Apache License 2.0 | 6 votes |
@Override protected Iterable<EventData> receiveEvents(final ProcessContext context, final String partitionId) throws ProcessException{ if(nullReceive){ return null; } if(getReceiverThrow){ throw new ProcessException("Could not create receiver"); } final LinkedList<EventData> receivedEvents = new LinkedList<>(); for(int i = 0; i < 10; i++){ EventData eventData = EventData.create(String.format("test event number: %d", i).getBytes()); if (received) { HashMap<String, Object> properties = new HashMap<>(); properties.put(AmqpConstants.PARTITION_KEY_ANNOTATION_NAME, PARTITION_KEY_VALUE); properties.put(AmqpConstants.OFFSET_ANNOTATION_NAME, OFFSET_VALUE); properties.put(AmqpConstants.SEQUENCE_NUMBER_ANNOTATION_NAME, SEQUENCE_NUMBER_VALUE); properties.put(AmqpConstants.ENQUEUED_TIME_UTC_ANNOTATION_NAME, ENQUEUED_TIME_VALUE); SystemProperties systemProperties = new SystemProperties(properties); eventData.setSystemProperties(systemProperties); } receivedEvents.add(eventData); } return receivedEvents; }
Example #5
Source File: TestConsumeAzureEventHub.java From nifi with Apache License 2.0 | 6 votes |
@Test public void testReceiveOne() throws Exception { final Iterable<EventData> eventDataList = Arrays.asList(EventData.create("one".getBytes(StandardCharsets.UTF_8))); eventProcessor.onEvents(partitionContext, eventDataList); processSession.assertCommitted(); final List<MockFlowFile> flowFiles = processSession.getFlowFilesForRelationship(ConsumeAzureEventHub.REL_SUCCESS); assertEquals(1, flowFiles.size()); final MockFlowFile msg1 = flowFiles.get(0); msg1.assertContentEquals("one"); msg1.assertAttributeEquals("eventhub.name", "eventhub-name"); msg1.assertAttributeEquals("eventhub.partition", "partition-id"); final List<ProvenanceEventRecord> provenanceEvents = sharedState.getProvenanceEvents(); assertEquals(1, provenanceEvents.size()); final ProvenanceEventRecord provenanceEvent1 = provenanceEvents.get(0); assertEquals(ProvenanceEventType.RECEIVE, provenanceEvent1.getEventType()); assertEquals("amqps://namespace.servicebus.windows.net/" + "eventhub-name/ConsumerGroups/consumer-group/Partitions/partition-id", provenanceEvent1.getTransitUri()); }
Example #6
Source File: TestConsumeAzureEventHub.java From nifi with Apache License 2.0 | 6 votes |
@Test public void testReceiveTwo() throws Exception { final Iterable<EventData> eventDataList = Arrays.asList( EventData.create("one".getBytes(StandardCharsets.UTF_8)), EventData.create("two".getBytes(StandardCharsets.UTF_8)) ); eventProcessor.onEvents(partitionContext, eventDataList); processSession.assertCommitted(); final List<MockFlowFile> flowFiles = processSession.getFlowFilesForRelationship(ConsumeAzureEventHub.REL_SUCCESS); assertEquals(2, flowFiles.size()); final MockFlowFile msg1 = flowFiles.get(0); msg1.assertContentEquals("one"); final MockFlowFile msg2 = flowFiles.get(1); msg2.assertContentEquals("two"); final List<ProvenanceEventRecord> provenanceEvents = sharedState.getProvenanceEvents(); assertEquals(2, provenanceEvents.size()); }
Example #7
Source File: TestConsumeAzureEventHub.java From nifi with Apache License 2.0 | 6 votes |
@Test public void testCheckpointFailure() throws Exception { final Iterable<EventData> eventDataList = Arrays.asList( EventData.create("one".getBytes(StandardCharsets.UTF_8)), EventData.create("two".getBytes(StandardCharsets.UTF_8)) ); doThrow(new RuntimeException("Failed to create a checkpoint.")).when(partitionContext).checkpoint(); eventProcessor.onEvents(partitionContext, eventDataList); // Even if it fails to create a checkpoint, these FlowFiles are already committed. processSession.assertCommitted(); final List<MockFlowFile> flowFiles = processSession.getFlowFilesForRelationship(ConsumeAzureEventHub.REL_SUCCESS); assertEquals(2, flowFiles.size()); final MockFlowFile msg1 = flowFiles.get(0); msg1.assertContentEquals("one"); final MockFlowFile msg2 = flowFiles.get(1); msg2.assertContentEquals("two"); final List<ProvenanceEventRecord> provenanceEvents = sharedState.getProvenanceEvents(); assertEquals(2, provenanceEvents.size()); }
Example #8
Source File: EventHubSystemProducer.java From samza with Apache License 2.0 | 6 votes |
protected EventData createEventData(String streamId, OutgoingMessageEnvelope envelope) { Optional<Interceptor> interceptor = Optional.ofNullable(interceptors.getOrDefault(streamId, null)); byte[] eventValue = (byte[]) envelope.getMessage(); if (interceptor.isPresent()) { eventValue = interceptor.get().intercept(eventValue); } EventData eventData = new EventDataImpl(eventValue); eventData.getProperties().put(PRODUCE_TIMESTAMP, Long.toString(System.currentTimeMillis())); if (config.getSendKeyInEventProperties(systemName)) { String keyValue = ""; if (envelope.getKey() != null) { keyValue = (envelope.getKey() instanceof byte[]) ? new String((byte[]) envelope.getKey()) : envelope.getKey().toString(); } eventData.getProperties().put(KEY, keyValue); } return eventData; }
Example #9
Source File: EventHubSystemProducer.java From samza with Apache License 2.0 | 6 votes |
private CompletableFuture<Void> sendToEventHub(String streamId, EventData eventData, Object partitionKey, EventHubClient eventHubClient) { if (PartitioningMethod.ROUND_ROBIN.equals(partitioningMethod)) { return eventHubClient.send(eventData); } else if (PartitioningMethod.EVENT_HUB_HASHING.equals(partitioningMethod)) { if (partitionKey == null) { throw new SamzaException("Partition key cannot be null for EventHub hashing"); } return eventHubClient.send(eventData, convertPartitionKeyToString(partitionKey)); } else if (PartitioningMethod.PARTITION_KEY_AS_PARTITION.equals(partitioningMethod)) { if (!(partitionKey instanceof Integer)) { String msg = "Partition key should be of type Integer"; throw new SamzaException(msg); } Integer numPartition = streamPartitionSenders.get(streamId).size(); Integer destinationPartition = (Integer) partitionKey % numPartition; PartitionSender sender = streamPartitionSenders.get(streamId).get(destinationPartition); return sender.send(eventData); } else { throw new SamzaException("Unknown partitioning method " + partitioningMethod); } }
Example #10
Source File: PutAzureEventHubTest.java From nifi with Apache License 2.0 | 6 votes |
@Test public void testMessageIsSentWithPartitioningKeyIfSpecifiedAndPopulated() { MockedEventhubClientMockPutAzureEventHub processor = new PutAzureEventHubTest.MockedEventhubClientMockPutAzureEventHub(); MockitoAnnotations.initMocks(processor); EventHubClient eventHubClient = processor.getEventHubClient(); when(eventHubClient.send(any(EventData.class), anyString())) .thenReturn(CompletableFuture.completedFuture(null)); when(eventHubClient.send(any(EventData.class))) .thenThrow(new RuntimeException("Partition-key-less method called despite key is defined and required.")); testRunner = TestRunners.newTestRunner(processor); setUpStandardTestConfig(); testRunner.setProperty(PutAzureEventHub.PARTITIONING_KEY_ATTRIBUTE_NAME, TEST_PARTITIONING_KEY_ATTRIBUTE_NAME); MockFlowFile flowFile = new MockFlowFile(1234); flowFile.putAttributes(ImmutableMap.of(TEST_PARTITIONING_KEY_ATTRIBUTE_NAME, TEST_PARTITIONING_KEY)); testRunner.enqueue(flowFile); testRunner.run(1, true); Mockito.verify(eventHubClient).send(any(EventData.class), eq(TEST_PARTITIONING_KEY)); }
Example #11
Source File: GetAzureEventHubTest.java From localization_nifi with Apache License 2.0 | 6 votes |
@Override protected Iterable<EventData> receiveEvents(final ProcessContext context, final String partitionId) throws ProcessException{ if(nullReceive){ return null; } if(getReceiverThrow){ throw new ProcessException("Could not create receiver"); } final LinkedList<EventData> receivedEvents = new LinkedList<>(); for(int i = 0; i < 10; i++){ final EventData eventData = new EventData(String.format("test event number: %d",i).getBytes()); Whitebox.setInternalState(eventData,"isReceivedEvent",true); Whitebox.setInternalState(eventData, "partitionKey","0"); Whitebox.setInternalState(eventData, "offset", "100"); Whitebox.setInternalState(eventData, "sequenceNumber",13L); Whitebox.setInternalState(eventData, "enqueuedTime",Instant.now().minus(100L, ChronoUnit.SECONDS)); receivedEvents.add(eventData); } return receivedEvents; }
Example #12
Source File: PutAzureEventHubTest.java From nifi with Apache License 2.0 | 6 votes |
@Test public void testAllAttributesAreLiftedToProperties() { MockedEventhubClientMockPutAzureEventHub processor = new PutAzureEventHubTest.MockedEventhubClientMockPutAzureEventHub(); MockitoAnnotations.initMocks(processor); EventHubClient eventHubClient = processor.getEventHubClient(); when(eventHubClient.send(any(EventData.class))) .thenReturn(CompletableFuture.completedFuture(null)); testRunner = TestRunners.newTestRunner(processor); setUpStandardTestConfig(); MockFlowFile flowFile = new MockFlowFile(1234); ImmutableMap<String, String> demoAttributes = ImmutableMap.of("A", "a", "B", "b", "D", "d", "C", "c"); flowFile.putAttributes(demoAttributes); testRunner.enqueue(flowFile); testRunner.run(1, true); ArgumentCaptor<EventData> eventDataCaptor = ArgumentCaptor.forClass(EventData.class); Mockito.verify(eventHubClient).send(eventDataCaptor.capture()); EventData event = eventDataCaptor.getValue(); assertTrue(event.getProperties().entrySet().containsAll(demoAttributes.entrySet())); }
Example #13
Source File: TestConsumeAzureEventHub.java From nifi with Apache License 2.0 | 5 votes |
@Test public void testReceiveAllRecordFailure() throws Exception { final List<EventData> eventDataList = Collections.singletonList( EventData.create("one".getBytes(StandardCharsets.UTF_8)) ); setupRecordReader(eventDataList, 0, null); setupRecordWriter(); eventProcessor.onEvents(partitionContext, eventDataList); processSession.assertCommitted(); final List<MockFlowFile> flowFiles = processSession.getFlowFilesForRelationship(ConsumeAzureEventHub.REL_SUCCESS); assertEquals(0, flowFiles.size()); final List<MockFlowFile> failedFFs = processSession.getFlowFilesForRelationship(ConsumeAzureEventHub.REL_PARSE_FAILURE); assertEquals(1, failedFFs.size()); final MockFlowFile failed1 = failedFFs.get(0); failed1.assertContentEquals("one"); failed1.assertAttributeEquals("eventhub.name", "eventhub-name"); failed1.assertAttributeEquals("eventhub.partition", "partition-id"); final List<ProvenanceEventRecord> provenanceEvents = sharedState.getProvenanceEvents(); assertEquals(1, provenanceEvents.size()); final ProvenanceEventRecord provenanceEvent1 = provenanceEvents.get(0); assertEquals(ProvenanceEventType.RECEIVE, provenanceEvent1.getEventType()); assertEquals("amqps://namespace.servicebus.windows.net/" + "eventhub-name/ConsumerGroups/consumer-group/Partitions/partition-id", provenanceEvent1.getTransitUri()); }
Example #14
Source File: TestConsumeAzureEventHub.java From nifi with Apache License 2.0 | 5 votes |
@Test public void testReceiveRecords() throws Exception { final List<EventData> eventDataList = Arrays.asList( EventData.create("one".getBytes(StandardCharsets.UTF_8)), EventData.create("two".getBytes(StandardCharsets.UTF_8)) ); setupRecordReader(eventDataList); setupRecordWriter(); eventProcessor.onEvents(partitionContext, eventDataList); processSession.assertCommitted(); final List<MockFlowFile> flowFiles = processSession.getFlowFilesForRelationship(ConsumeAzureEventHub.REL_SUCCESS); assertEquals(1, flowFiles.size()); final MockFlowFile ff1 = flowFiles.get(0); ff1.assertContentEquals("onetwo"); ff1.assertAttributeEquals("eventhub.name", "eventhub-name"); ff1.assertAttributeEquals("eventhub.partition", "partition-id"); final List<ProvenanceEventRecord> provenanceEvents = sharedState.getProvenanceEvents(); assertEquals(1, provenanceEvents.size()); final ProvenanceEventRecord provenanceEvent1 = provenanceEvents.get(0); assertEquals(ProvenanceEventType.RECEIVE, provenanceEvent1.getEventType()); assertEquals("amqps://namespace.servicebus.windows.net/" + "eventhub-name/ConsumerGroups/consumer-group/Partitions/partition-id", provenanceEvent1.getTransitUri()); }
Example #15
Source File: TestConsumeAzureEventHub.java From nifi with Apache License 2.0 | 5 votes |
private void setupRecordReader(List<EventData> eventDataList, int throwExceptionAt, String writeFailureWith) throws MalformedRecordException, IOException, SchemaNotFoundException { final RecordReaderFactory readerFactory = mock(RecordReaderFactory.class); processor.setReaderFactory(readerFactory); final RecordReader reader = mock(RecordReader.class); when(readerFactory.createRecordReader(anyMap(), any(), anyLong(), any())).thenReturn(reader); final List<Record> recordList = eventDataList.stream() .map(eventData -> toRecord(new String(eventData.getBytes()))) .collect(Collectors.toList()); // Add null to indicate the end of records. final Function<List<Record>, List<Record>> addEndRecord = rs -> rs.stream() // If the record is simulated to throw an exception when writing, do not add a null record to avoid messing up indices. .flatMap(r -> r.getAsString("value").equals(writeFailureWith) ? Stream.of(r) : Stream.of(r, null)) .collect(Collectors.toList()); final List<Record> recordSetList = addEndRecord.apply(recordList); final Record[] records = recordSetList.toArray(new Record[recordSetList.size()]); switch (throwExceptionAt) { case -1: when(reader.nextRecord()) .thenReturn(records[0], Arrays.copyOfRange(records, 1, records.length)); break; case 0: when(reader.nextRecord()) .thenThrow(new MalformedRecordException("Simulating Record parse failure.")) .thenReturn(records[0], Arrays.copyOfRange(records, 1, records.length)); break; default: final List<Record> recordList1 = addEndRecord.apply(recordList.subList(0, throwExceptionAt)); final List<Record> recordList2 = addEndRecord.apply(recordList.subList(throwExceptionAt + 1, recordList.size())); final Record[] records1 = recordList1.toArray(new Record[recordList1.size()]); final Record[] records2 = recordList2.toArray(new Record[recordList2.size()]); when(reader.nextRecord()) .thenReturn(records1[0], Arrays.copyOfRange(records1, 1, records1.length)) .thenThrow(new MalformedRecordException("Simulating Record parse failure.")) .thenReturn(records2[0], Arrays.copyOfRange(records2, 1, records2.length)); } }
Example #16
Source File: PutAzureEventHubTest.java From nifi with Apache License 2.0 | 5 votes |
@Test public void testMessageIsSentWithoutPartitioningKeyIfNotSpecifiedOrNotPopulated() { MockedEventhubClientMockPutAzureEventHub processor = new PutAzureEventHubTest.MockedEventhubClientMockPutAzureEventHub(); MockitoAnnotations.initMocks(processor); EventHubClient eventHubClient = processor.getEventHubClient(); when(eventHubClient.send(any(EventData.class), anyString())) .thenThrow(new RuntimeException("Partition-key-full method called despite key is Not required or not populated.")); when(eventHubClient.send(any(EventData.class))) .thenReturn(CompletableFuture.completedFuture(null)); testRunner = TestRunners.newTestRunner(processor); setUpStandardTestConfig(); MockFlowFile flowFile = new MockFlowFile(1234); flowFile.putAttributes(ImmutableMap.of(TEST_PARTITIONING_KEY_ATTRIBUTE_NAME, TEST_PARTITIONING_KEY)); // Key not specified testRunner.enqueue(flowFile); testRunner.run(1, true); Mockito.verify(eventHubClient, never()).send(any(EventData.class), eq(TEST_PARTITIONING_KEY)); Mockito.verify(eventHubClient).send(any(EventData.class)); // Key wanted but not available testRunner.setProperty(PutAzureEventHub.PARTITIONING_KEY_ATTRIBUTE_NAME, "Non-existing-attribute"); testRunner.enqueue(flowFile); testRunner.run(1, true); Mockito.verify(eventHubClient, never()).send(any(EventData.class), eq(TEST_PARTITIONING_KEY)); Mockito.verify(eventHubClient, times(2)).send(any(EventData.class)); }
Example #17
Source File: ActionsEventProcessor.java From remote-monitoring-services-java with MIT License | 5 votes |
@Override public void onEvents(PartitionContext context, Iterable<EventData> events) throws Exception { for (EventData eventData : events) { String data = new String(eventData.getBytes(), "UTF8"); List<AsaAlarmApiModel> alarms = AlarmParser.parseAlarmList(data); actionManager.executeAsync(alarms).toCompletableFuture().get(); } context.checkpoint().get(); }
Example #18
Source File: ConsumeAzureEventHub.java From nifi with Apache License 2.0 | 5 votes |
private void putEventHubAttributes(Map<String, String> attributes, String eventHubName, String partitionId, EventData eventData) { final EventData.SystemProperties systemProperties = eventData.getSystemProperties(); if (null != systemProperties) { attributes.put("eventhub.enqueued.timestamp", String.valueOf(systemProperties.getEnqueuedTime())); attributes.put("eventhub.offset", systemProperties.getOffset()); attributes.put("eventhub.sequence", String.valueOf(systemProperties.getSequenceNumber())); } attributes.put("eventhub.name", eventHubName); attributes.put("eventhub.partition", partitionId); }
Example #19
Source File: PutAzureEventHubTest.java From nifi with Apache License 2.0 | 5 votes |
@Test public void testBatchProcessesUptoMaximum() { MockedEventhubClientMockPutAzureEventHub processor = new PutAzureEventHubTest.MockedEventhubClientMockPutAzureEventHub(); MockitoAnnotations.initMocks(processor); EventHubClient eventHubClient = processor.getEventHubClient(); CompletableFuture<Void> failedFuture = new CompletableFuture<Void>(); failedFuture.completeExceptionally(new IllegalArgumentException()); when(eventHubClient.send(any(EventData.class))) .thenReturn(failedFuture) .thenReturn(CompletableFuture.completedFuture(null)); testRunner = TestRunners.newTestRunner(processor); setUpStandardTestConfig(); List<MockFlowFile> flowFiles = Arrays.asList(new MockFlowFile(1), new MockFlowFile(2), new MockFlowFile(3), new MockFlowFile(4), new MockFlowFile(5), new MockFlowFile(6)); flowFiles.stream().forEachOrdered(ff -> testRunner.enqueue(ff)); testRunner.setProperty(PutAzureEventHub.MAX_BATCH_SIZE, "4"); testRunner.run(1, true); Mockito.verify(eventHubClient, times(4)).send(any(EventData.class)); testRunner.assertTransferCount(PutAzureEventHub.REL_SUCCESS, 3); testRunner.assertTransferCount(PutAzureEventHub.REL_FAILURE, 1); }
Example #20
Source File: PutAzureEventHub.java From nifi with Apache License 2.0 | 5 votes |
/** * @param buffer Block of data to be sent as a message body. Entire array is used. See Event hub limits for body size. * @param partitioningKey A hint for Eventhub message broker how to distribute messages consistently amongst multiple partitions. * @param userProperties A key value set of customary information that is attached in User defined properties part of the message. * @return future object for referencing a success/failure of this message sending. * @throws ProcessException * * @see <a href="https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-quotas">Event Hubs Quotas</a> */ protected CompletableFuture<Void> sendMessage(final byte[] buffer, String partitioningKey, Map<String, Object> userProperties) throws ProcessException { final EventHubClient sender = senderQueue.poll(); if(sender == null) { throw new ProcessException("No EventHubClients are configured for sending"); } // Create message with properties final EventData eventData = EventData.create(buffer); final Map<String, Object> properties = eventData.getProperties(); if(userProperties != null && properties != null) { properties.putAll(userProperties); } // Send with optional partition key final CompletableFuture<Void> eventFuture; if(StringUtils.isNotBlank(partitioningKey)) { eventFuture = sender.send(eventData, partitioningKey); }else { eventFuture = sender.send(eventData); } senderQueue.offer(sender); return eventFuture; }
Example #21
Source File: EventHubProcessor.java From datacollector with Apache License 2.0 | 5 votes |
@Override public void onEvents(PartitionContext context, Iterable<EventData> messages) { BatchContext batchContext = pushSourceContext.startBatch(); final EventData[] lasEventData = new EventData[]{null}; if (messages != null) { messages.forEach(eventData -> { lasEventData[0] = eventData; List<Record> records = new ArrayList<>(); String requestId = System.currentTimeMillis() + "." + counter.getAndIncrement(); try (DataParser parser = dataParserFactory.getParser(requestId, eventData.getBytes())) { Record parsedRecord = parser.parse(); while (parsedRecord != null) { records.add(parsedRecord); parsedRecord = parser.parse(); } for (Record record : records) { batchContext.getBatchMaker().addRecord(record); } } catch (Exception ex) { errorQueue.offer(ex); LOG.warn("Error while processing request payload from: {}", ex.toString(), ex); } }); } if (pushSourceContext.processBatch(batchContext) && lasEventData[0] != null && !pushSourceContext.isPreview()) { context.checkpoint(lasEventData[0]); } }
Example #22
Source File: EventHubConsoleConsumer.java From samza with Apache License 2.0 | 5 votes |
private static Object handleComplete(PartitionReceiver receiver, Iterable<EventData> records, Throwable throwable) { for (EventData record : records) { System.out.println( String.format("Partition %s, Event %s", receiver.getPartitionId(), new String(record.getBytes()))); } receiver.receive(10).handle((r, t) -> handleComplete(receiver, r, t)); return null; }
Example #23
Source File: MockEventData.java From samza with Apache License 2.0 | 5 votes |
public static List<EventData> generateEventData(int numEvents) { Random rand = new Random(System.currentTimeMillis()); List<EventData> result = new ArrayList<>(); for (int i = 0; i < numEvents; i++) { String key = "key_" + rand.nextInt(); String message = "message:" + rand.nextInt(); String offset = "offset_" + i; EventData eventData = new MockEventData(message.getBytes(Charset.defaultCharset()), key, offset); result.add(eventData); } return result; }
Example #24
Source File: TestEventHubSystemAdmin.java From samza with Apache License 2.0 | 5 votes |
@Test public void testStartpointResolverShouldResolveTheStartpointTimestampToCorrectOffset() throws EventHubException { // Initialize variables required for testing. EventHubSystemAdmin mockEventHubSystemAdmin = Mockito.mock(EventHubSystemAdmin.class); EventHubConfig eventHubConfig = Mockito.mock(EventHubConfig.class); SystemStreamPartition systemStreamPartition = new SystemStreamPartition("test-system", "test-stream", new Partition(0)); String mockedOffsetToReturn = "100"; // Setup the mock variables. EventHubClientManager mockEventHubClientManager = Mockito.mock(EventHubClientManager.class); EventHubClient mockEventHubClient = Mockito.mock(EventHubClient.class); PartitionReceiver mockPartitionReceiver = Mockito.mock(PartitionReceiver.class); EventData mockEventData = Mockito.mock(EventData.class); EventData.SystemProperties mockSystemProperties = Mockito.mock(EventData.SystemProperties.class); // Configure the mock variables to return the appropriate values. Mockito.when(mockEventHubSystemAdmin.getOrCreateStreamEventHubClient("test-stream")).thenReturn(mockEventHubClientManager); Mockito.when(mockEventHubClientManager.getEventHubClient()).thenReturn(mockEventHubClient); Mockito.when(mockEventHubClient.createReceiverSync(Mockito.anyString(), Mockito.anyString(), Mockito.any())).thenReturn(mockPartitionReceiver); Mockito.when(mockPartitionReceiver.receiveSync(1)).thenReturn(Arrays.asList(mockEventData)); Mockito.when(mockEventData.getSystemProperties()).thenReturn(mockSystemProperties); Mockito.when(mockSystemProperties.getOffset()).thenReturn(mockedOffsetToReturn); // Test the Offset resolver. EventHubSamzaOffsetResolver resolver = new EventHubSamzaOffsetResolver(mockEventHubSystemAdmin, eventHubConfig); String resolvedOffset = resolver.visit(systemStreamPartition, new StartpointTimestamp(100L)); Assert.assertEquals(mockedOffsetToReturn, resolvedOffset); }
Example #25
Source File: ITestEventHubSystemProducer.java From samza with Apache License 2.0 | 5 votes |
private void receiveMessages(PartitionReceiver receiver, int numMessages) throws EventHubException { int count = 0; while (count < numMessages) { Iterable<EventData> messages = receiver.receiveSync(100); if (messages == null) { break; } for (EventData data : messages) { count++; LOG.info("Data" + new String(data.getBytes())); } } }
Example #26
Source File: EventHubSystemConsumer.java From samza with Apache License 2.0 | 5 votes |
@Override public void onReceive(Iterable<EventData> events) { if (events != null) { events.forEach(event -> { byte[] eventDataBody = event.getBytes(); if (interceptor != null) { eventDataBody = interceptor.intercept(eventDataBody); } String offset = event.getSystemProperties().getOffset(); Object partitionKey = event.getSystemProperties().getPartitionKey(); if (partitionKey == null) { partitionKey = event.getProperties().get(EventHubSystemProducer.KEY); } try { updateMetrics(event); // note that the partition key can be null put(ssp, new EventHubIncomingMessageEnvelope(ssp, offset, partitionKey, eventDataBody, event)); } catch (InterruptedException e) { String msg = String.format("Interrupted while adding the event from ssp %s to dispatch queue.", ssp); LOG.error(msg, e); throw new SamzaException(msg, e); } // Cache latest checkpoint streamPartitionOffsets.put(ssp, offset); }); } }
Example #27
Source File: EventHubSystemConsumer.java From samza with Apache License 2.0 | 5 votes |
private void updateMetrics(EventData event) { int eventDataLength = event.getBytes() == null ? 0 : event.getBytes().length; eventReadRate.inc(); aggEventReadRate.inc(); eventByteReadRate.inc(eventDataLength); aggEventByteReadRate.inc(eventDataLength); long latencyMs = Duration.between(event.getSystemProperties().getEnqueuedTime(), Instant.now()).toMillis(); readLatency.update(latencyMs); aggConsumptionLagMs.update(latencyMs); }
Example #28
Source File: GetAzureEventHub.java From nifi with Apache License 2.0 | 4 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { final BlockingQueue<String> partitionIds = this.partitionNames; final String partitionId = partitionIds.poll(); if (partitionId == null) { getLogger().debug("No partitions available"); return; } final StopWatch stopWatch = new StopWatch(true); try { final Iterable<EventData> receivedEvents = receiveEvents(context, partitionId); if (receivedEvents == null) { return; } for (final EventData eventData : receivedEvents) { if (null != eventData) { final Map<String, String> attributes = new HashMap<>(); FlowFile flowFile = session.create(); final EventData.SystemProperties systemProperties = eventData.getSystemProperties(); if (null != systemProperties) { attributes.put("eventhub.enqueued.timestamp", String.valueOf(systemProperties.getEnqueuedTime())); attributes.put("eventhub.offset", systemProperties.getOffset()); attributes.put("eventhub.sequence", String.valueOf(systemProperties.getSequenceNumber())); } attributes.put("eventhub.name", context.getProperty(EVENT_HUB_NAME).getValue()); attributes.put("eventhub.partition", partitionId); flowFile = session.putAllAttributes(flowFile, attributes); flowFile = session.write(flowFile, out -> { out.write(eventData.getBytes()); }); session.transfer(flowFile, REL_SUCCESS); final String namespace = context.getProperty(NAMESPACE).getValue(); final String eventHubName = context.getProperty(EVENT_HUB_NAME).getValue(); final String consumerGroup = context.getProperty(CONSUMER_GROUP).getValue(); final String serviceBusEndPoint = context.getProperty(SERVICE_BUS_ENDPOINT).getValue(); final String transitUri = "amqps://" + namespace + serviceBusEndPoint + "/" + eventHubName + "/ConsumerGroups/" + consumerGroup + "/Partitions/" + partitionId; session.getProvenanceReporter().receive(flowFile, transitUri, stopWatch.getElapsed(TimeUnit.MILLISECONDS)); } } } finally { partitionIds.offer(partitionId); } }
Example #29
Source File: GetAzureEventHub.java From localization_nifi with Apache License 2.0 | 4 votes |
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { final BlockingQueue<String> partitionIds = this.partitionNames; final String partitionId = partitionIds.poll(); if (partitionId == null) { getLogger().debug("No partitions available"); return; } final StopWatch stopWatch = new StopWatch(true); try { final Iterable<EventData> receivedEvents = receiveEvents(context, partitionId); if (receivedEvents == null) { return; } for (final EventData eventData : receivedEvents) { if (null != eventData) { final Map<String, String> attributes = new HashMap<>(); FlowFile flowFile = session.create(); EventData.SystemProperties systemProperties = eventData.getSystemProperties(); if (null != systemProperties) { attributes.put("eventhub.enqueued.timestamp", String.valueOf(eventData.getSystemProperties().getEnqueuedTime())); attributes.put("eventhub.offset", eventData.getSystemProperties().getOffset()); attributes.put("eventhub.sequence", String.valueOf(eventData.getSystemProperties().getSequenceNumber())); } attributes.put("eventhub.name", context.getProperty(EVENT_HUB_NAME).getValue()); attributes.put("eventhub.partition", partitionId); flowFile = session.putAllAttributes(flowFile, attributes); flowFile = session.write(flowFile, out -> { out.write(eventData.getBody()); }); session.transfer(flowFile, REL_SUCCESS); final String namespace = context.getProperty(NAMESPACE).getValue(); final String eventHubName = context.getProperty(EVENT_HUB_NAME).getValue(); final String consumerGroup = context.getProperty(CONSUMER_GROUP).getValue(); final String transitUri = "amqps://" + namespace + ".servicebus.windows.net" + "/" + eventHubName + "/ConsumerGroups/" + consumerGroup + "/Partitions/" + partitionId; session.getProvenanceReporter().receive(flowFile, transitUri, stopWatch.getElapsed(TimeUnit.MILLISECONDS)); } } } finally { partitionIds.offer(partitionId); } }
Example #30
Source File: EventHubSystemProducer.java From samza with Apache License 2.0 | 4 votes |
@Override public synchronized CompletableFuture<Void> sendAsync(String source, OutgoingMessageEnvelope envelope) { LOG.debug(String.format("Trying to send %s", envelope)); if (!isStarted) { throw new SamzaException("Trying to call send before the producer is started."); } if (!isInitialized) { // lazy initialization on the first send init(); } String streamId = config.getStreamId(envelope.getSystemStream().getStream()); if (!perStreamEventHubClientManagers.containsKey(streamId)) { String msg = String.format("Trying to send event to a destination {%s} that is not registered.", streamId); throw new SamzaException(msg); } EventData eventData = createEventData(streamId, envelope); // SAMZA-1654: waiting for the client library to expose the API to calculate the exact size of the AMQP message // https://github.com/Azure/azure-event-hubs-java/issues/305 int eventDataLength = eventData.getBytes() == null ? 0 : eventData.getBytes().length; // If the maxMessageSize is lesser than zero, then it means there is no message size restriction. if (this.maxMessageSize > 0 && eventDataLength > this.maxMessageSize) { LOG.info("Received a message with size {} > maxMessageSize configured {(}), Skipping it", eventDataLength, this.maxMessageSize); eventSkipRate.get(streamId).inc(); aggEventSkipRate.inc(); return CompletableFuture.completedFuture(null); } eventWriteRate.get(streamId).inc(); aggEventWriteRate.inc(); eventByteWriteRate.get(streamId).inc(eventDataLength); aggEventByteWriteRate.inc(eventDataLength); EventHubClientManager ehClient = perStreamEventHubClientManagers.get(streamId); // Async send call return sendToEventHub(streamId, eventData, getEnvelopePartitionId(envelope), ehClient.getEventHubClient()); }