com.google.pubsub.v1.PullResponse Java Examples

The following examples show how to use com.google.pubsub.v1.PullResponse. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 6 votes vote down vote up
/** Tests that the correct partition is assigned when the partition scheme is "hash_value". */
@Test
public void testPollWithPartitionSchemeHashValue() throws Exception {
  props.put(
      CloudPubSubSourceConnector.KAFKA_PARTITION_SCHEME_CONFIG,
      CloudPubSubSourceConnector.PartitionScheme.HASH_VALUE.toString());
  task.start(props);
  ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>());
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(1, result.size());
  SourceRecord expected =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          KAFKA_VALUE.hashCode() % Integer.parseInt(KAFKA_PARTITIONS),
          Schema.OPTIONAL_STRING_SCHEMA,
          null,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  assertRecordsEqual(expected, result.get(0));
}
 
Example #2
Source File: SubscriberService.java    From kafka-pubsub-emulator with Apache License 2.0 6 votes vote down vote up
@Override
public void pull(PullRequest request, StreamObserver<PullResponse> responseObserver) {
  logger.atFine().log("Pulling messages %s", request);
  SubscriptionManager subscriptionManager = subscriptions.get(request.getSubscription());
  if (subscriptionManager == null) {
    String message = request.getSubscription() + " is not a valid Subscription";
    logger.atWarning().log(message);
    responseObserver.onError(Status.NOT_FOUND.withDescription(message).asException());
  } else {
    PullResponse response =
        PullResponse.newBuilder()
            .addAllReceivedMessages(
                buildReceivedMessageList(
                    request.getSubscription(),
                    subscriptionManager.pull(
                        request.getMaxMessages(), request.getReturnImmediately())))
            .build();
    logger.atFine().log("Returning %d messages", response.getReceivedMessagesCount());
    responseObserver.onNext(response);
    responseObserver.onCompleted();
  }
}
 
Example #3
Source File: SubscriberServiceTest.java    From kafka-pubsub-emulator with Apache License 2.0 6 votes vote down vote up
@Test
public void pull() {
  List<PubsubMessage> messages =
      Arrays.asList(
          PubsubMessage.newBuilder()
              .setMessageId("0-0")
              .setData(ByteString.copyFromUtf8("hello"))
              .build(),
          PubsubMessage.newBuilder()
              .setMessageId("0-1")
              .setData(ByteString.copyFromUtf8("world"))
              .build());
  when(mockSubscriptionManager3.pull(100, false)).thenReturn(messages);

  PullRequest request =
      PullRequest.newBuilder()
          .setSubscription(TestHelpers.PROJECT2_SUBSCRIPTION3)
          .setMaxMessages(100)
          .build();
  PullResponse response = blockingStub.pull(request);
  assertThat(
      response.getReceivedMessagesList(),
      Matchers.contains(
          ReceivedMessage.newBuilder().setAckId("0-0").setMessage(messages.get(0)).build(),
          ReceivedMessage.newBuilder().setAckId("0-1").setMessage(messages.get(1)).build()));
}
 
Example #4
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 6 votes vote down vote up
/** Tests that the no partition is assigned when the partition scheme is "kafka_partitioner". */
@Test
public void testPollWithPartitionSchemeKafkaPartitioner() throws Exception {
  props.put(
          CloudPubSubSourceConnector.KAFKA_PARTITION_SCHEME_CONFIG,
          CloudPubSubSourceConnector.PartitionScheme.KAFKA_PARTITIONER.toString());
  task.start(props);
  ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>());
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(1, result.size());
  SourceRecord expected =
          new SourceRecord(
                  null,
                  null,
                  KAFKA_TOPIC,
                  null,
                  Schema.OPTIONAL_STRING_SCHEMA,
                  null,
                  Schema.BYTES_SCHEMA,
                  KAFKA_VALUE);
  assertRecordsEqual(expected, result.get(0));
  assertNull(result.get(0).kafkaPartition());
}
 
Example #5
Source File: PubSubSubscriberTemplate.java    From spring-cloud-gcp with Apache License 2.0 6 votes vote down vote up
/**
 * Pulls messages asynchronously, on demand, using the pull request in argument.
 *
 * @param pullRequest pull request containing the subscription name
 * @return the ListenableFuture for the asynchronous execution, returning
 * the list of {@link AcknowledgeablePubsubMessage} containing the ack ID, subscription
 * and acknowledger
 */
private ListenableFuture<List<AcknowledgeablePubsubMessage>> pullAsync(PullRequest pullRequest) {
	Assert.notNull(pullRequest, "The pull request can't be null.");

	ApiFuture<PullResponse> pullFuture = this.subscriberStub.pullCallable().futureCall(pullRequest);

	final SettableListenableFuture<List<AcknowledgeablePubsubMessage>> settableFuture = new SettableListenableFuture<>();
	ApiFutures.addCallback(pullFuture, new ApiFutureCallback<PullResponse>() {

		@Override
		public void onFailure(Throwable throwable) {
			settableFuture.setException(throwable);
		}

		@Override
		public void onSuccess(PullResponse pullResponse) {
			List<AcknowledgeablePubsubMessage> result = toAcknowledgeablePubsubMessageList(
					pullResponse.getReceivedMessagesList(), pullRequest.getSubscription());

			settableFuture.set(result);
		}

	}, asyncPullExecutor);

	return settableFuture;
}
 
Example #6
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 6 votes vote down vote up
/**
 * Tests when the message(s) retrieved from Cloud Pub/Sub do have an attribute that matches {@link
 * #KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE} and {@link #KAFKA_MESSAGE_KEY_ATTRIBUTE}.
 */
@Test
public void testPollWithMessageTimestampAttribute() throws Exception{
  task.start(props);
  Map<String, String> attributes = new HashMap<>();
  attributes.put(KAFKA_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE);
  attributes.put(KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE, KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE_VALUE);
  ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, attributes);
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(1, result.size());
  SourceRecord expected =
          new SourceRecord(
                  null,
                  null,
                  KAFKA_TOPIC,
                  0,
                  Schema.OPTIONAL_STRING_SCHEMA,
                  KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE,
                  Schema.BYTES_SCHEMA,
                  KAFKA_VALUE, Long.parseLong(KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE_VALUE));
  assertRecordsEqual(expected, result.get(0));
}
 
Example #7
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 6 votes vote down vote up
/**
 * Tests when the message(s) retrieved from Cloud Pub/Sub do have an attribute that matches {@link
 * #KAFKA_MESSAGE_KEY_ATTRIBUTE}.
 */
@Test
public void testPollWithMessageKeyAttribute() throws Exception {
  task.start(props);
  Map<String, String> attributes = new HashMap<>();
  attributes.put(KAFKA_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE);
  ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, attributes);
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(1, result.size());
  SourceRecord expected =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          0,
          Schema.OPTIONAL_STRING_SCHEMA,
          KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  assertRecordsEqual(expected, result.get(0));
}
 
Example #8
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 6 votes vote down vote up
/**
 * Tests when the message(s) retrieved from Cloud Pub/Sub do not have an attribute that matches
 * {@link #KAFKA_MESSAGE_KEY_ATTRIBUTE}.
 */
@Test
public void testPollWithNoMessageKeyAttribute() throws Exception {
  task.start(props);
  ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>());
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(1, result.size());
  SourceRecord expected =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          0,
          Schema.OPTIONAL_STRING_SCHEMA,
          null,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  assertRecordsEqual(expected, result.get(0));
}
 
Example #9
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that when a call to ackMessages() fails, that the message is not redelivered to Kafka if
 * the message is received again by Cloud Pub/Sub. Also tests that ack ids are added properly if
 * the ack id has not been seen before.
 */
@Test
public void testPollWithDuplicateReceivedMessages() throws Exception {
  task.start(props);
  ReceivedMessage rm1 = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>());
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm1).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  assertEquals(1, result.size());
  ReceivedMessage rm2 = createReceivedMessage(ACK_ID2, CPS_MESSAGE, new HashMap<String, String>());
  stubbedPullResponse =
      PullResponse.newBuilder().addReceivedMessages(0, rm1).addReceivedMessages(1, rm2).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  result = task.poll();
  assertEquals(1, result.size());
}
 
Example #10
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that when ackMessages() succeeds and the subsequent call to poll() has no messages, that
 * the subscriber does not invoke ackMessages because there should be no acks.
 */
@Test
public void testPollInRegularCase() throws Exception {
  task.start(props);
  ReceivedMessage rm1 = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>());
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm1).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  assertEquals(1, result.size());
  task.commitRecord(result.get(0));
  stubbedPullResponse = PullResponse.newBuilder().build();
  SettableApiFuture<Empty> goodFuture = SettableApiFuture.create();
  goodFuture.set(Empty.getDefaultInstance());
  when(subscriber.ackMessages(any(AcknowledgeRequest.class))).thenReturn(goodFuture);
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  result = task.poll();
  assertEquals(0, result.size());
  result = task.poll();
  assertEquals(0, result.size());
  verify(subscriber, times(1)).ackMessages(any(AcknowledgeRequest.class));
}
 
Example #11
Source File: PubsubHelper.java    From flink with Apache License 2.0 5 votes vote down vote up
public List<ReceivedMessage> pullMessages(String projectId, String subscriptionId, int maxNumberOfMessages) throws Exception {
	SubscriberStubSettings subscriberStubSettings =
		SubscriberStubSettings.newBuilder()
			.setTransportChannelProvider(channelProvider)
			.setCredentialsProvider(NoCredentialsProvider.create())
			.build();
	try (SubscriberStub subscriber = GrpcSubscriberStub.create(subscriberStubSettings)) {
		// String projectId = "my-project-id";
		// String subscriptionId = "my-subscription-id";
		// int numOfMessages = 10;   // max number of messages to be pulled
		String subscriptionName = ProjectSubscriptionName.format(projectId, subscriptionId);
		PullRequest pullRequest =
			PullRequest.newBuilder()
				.setMaxMessages(maxNumberOfMessages)
				.setReturnImmediately(false) // return immediately if messages are not available
				.setSubscription(subscriptionName)
				.build();

		// use pullCallable().futureCall to asynchronously perform this operation
		PullResponse pullResponse = subscriber.pullCallable().call(pullRequest);
		List<String> ackIds = new ArrayList<>();
		for (ReceivedMessage message : pullResponse.getReceivedMessagesList()) {
			// handle received message
			// ...
			ackIds.add(message.getAckId());
		}
		// acknowledge received messages
		AcknowledgeRequest acknowledgeRequest =
			AcknowledgeRequest.newBuilder()
				.setSubscription(subscriptionName)
				.addAllAckIds(ackIds)
				.build();
		// use acknowledgeCallable().futureCall to asynchronously perform this operation
		subscriber.acknowledgeCallable().call(acknowledgeRequest);
		return pullResponse.getReceivedMessagesList();
	}
}
 
Example #12
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 5 votes vote down vote up
/**
 * Tests when the message retrieved from Cloud Pub/Sub have several attributes, including
 * one that matches {@link #KAFKA_MESSAGE_KEY_ATTRIBUTE}
 */
@Test
public void testPollWithMultipleAttributes() throws Exception {
  task.start(props);
  Map<String, String> attributes = new HashMap<>();
  attributes.put(KAFKA_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE);
  attributes.put("attribute1", "attribute_value1");
  attributes.put("attribute2", "attribute_value2");
  ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, attributes);
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(1, result.size());
  Schema expectedSchema =
      SchemaBuilder.struct()
          .field(ConnectorUtils.KAFKA_MESSAGE_CPS_BODY_FIELD, Schema.BYTES_SCHEMA)
          .field("attribute1", Schema.STRING_SCHEMA)
          .field("attribute2", Schema.STRING_SCHEMA)
          .build();
  Struct expectedValue = new Struct(expectedSchema)
                             .put(ConnectorUtils.KAFKA_MESSAGE_CPS_BODY_FIELD, KAFKA_VALUE)
                             .put("attribute1", "attribute_value1")
                             .put("attribute2", "attribute_value2");
  SourceRecord expected =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          0,
          Schema.OPTIONAL_STRING_SCHEMA,
          KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE,
          expectedSchema,
          expectedValue);
  assertRecordsEqual(expected, result.get(0));
}
 
Example #13
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 5 votes vote down vote up
/**
 * Tests when the message retrieved from Cloud Pub/Sub have several attributes, including
 * one that matches {@link #KAFKA_MESSAGE_KEY_ATTRIBUTE} and uses Kafka Record Headers to store them
 */
@Test
public void testPollWithMultipleAttributesAndRecordHeaders() throws Exception {
  props.put(CloudPubSubSourceConnector.USE_KAFKA_HEADERS, "true");
  task.start(props);
  Map<String, String> attributes = new HashMap<>();
  attributes.put(KAFKA_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE);
  attributes.put("attribute1", "attribute_value1");
  attributes.put("attribute2", "attribute_value2");
  ReceivedMessage rm = createReceivedMessage(ACK_ID1, CPS_MESSAGE, attributes);
  PullResponse stubbedPullResponse = PullResponse.newBuilder().addReceivedMessages(rm).build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(1, result.size());

  ConnectHeaders headers = new ConnectHeaders();
  headers.addString("attribute1", "attribute_value1");
  headers.addString("attribute2", "attribute_value2");

  SourceRecord expected =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          0,
          Schema.OPTIONAL_STRING_SCHEMA,
          KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE,
          Long.parseLong(KAFKA_MESSAGE_TIMESTAMP_ATTRIBUTE_VALUE),
          headers);
  assertRecordsEqual(expected, result.get(0));
}
 
Example #14
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 5 votes vote down vote up
/** Tests when no messages are received from the Cloud Pub/Sub PullResponse. */
@Test
public void testPollCaseWithNoMessages() throws Exception {
  task.start(props);
  PullResponse stubbedPullResponse = PullResponse.newBuilder().build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  assertEquals(0, task.poll().size());
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
}
 
Example #15
Source File: PubSubSubscriberTemplateTests.java    From spring-cloud-gcp with Apache License 2.0 5 votes vote down vote up
@Test
public void testPullNextAsync_NoMessages() throws InterruptedException, ExecutionException, TimeoutException {
	when(this.pullApiFuture.get()).thenReturn(PullResponse.newBuilder().build());

	ListenableFuture<PubsubMessage> asyncResult = this.pubSubSubscriberTemplate.pullNextAsync("sub2");

	PubsubMessage message = asyncResult.get(10L, TimeUnit.SECONDS);
	assertThat(asyncResult.isDone()).isTrue();

	assertThat(message).isNull();

	verify(this.subscriberFactory).createPullRequest("sub2", 1, true);
	verify(this.pubSubSubscriberTemplate, never()).ack(any());
}
 
Example #16
Source File: PubSubSubscriberTemplateTests.java    From spring-cloud-gcp with Apache License 2.0 5 votes vote down vote up
@Test
public void testPullNext_NoMessages() {
	when(this.pullCallable.call(any(PullRequest.class))).thenReturn(PullResponse.newBuilder().build());

	PubsubMessage message = this.pubSubSubscriberTemplate.pullNext("sub2");

	assertThat(message).isNull();

	verify(this.subscriberFactory).createPullRequest("sub2", 1, true);
	verify(this.pubSubSubscriberTemplate, never()).ack(any());
}
 
Example #17
Source File: PubSubSubscriberTemplateTests.java    From spring-cloud-gcp with Apache License 2.0 5 votes vote down vote up
@Test
public void testPullAndAckAsync_NoMessages() throws InterruptedException, ExecutionException, TimeoutException {
	when(this.pullApiFuture.get()).thenReturn(PullResponse.newBuilder().build());

	ListenableFuture<List<PubsubMessage>> asyncResult = this.pubSubSubscriberTemplate.pullAndAckAsync(
			"sub2", 1, true);

	List<PubsubMessage> result = asyncResult.get(10L, TimeUnit.SECONDS);
	assertThat(asyncResult.isDone()).isTrue();

	assertThat(result.size()).isEqualTo(0);

	verify(this.pubSubSubscriberTemplate, never()).ack(any());
}
 
Example #18
Source File: PubSubSubscriberTemplateTests.java    From spring-cloud-gcp with Apache License 2.0 5 votes vote down vote up
@Test
public void testPullAndAck_NoMessages() {
	when(this.pullCallable.call(any(PullRequest.class))).thenReturn(PullResponse.newBuilder().build());

	List<PubsubMessage> result = this.pubSubSubscriberTemplate.pullAndAck(
			"sub2", 1, true);

	assertThat(result.size()).isEqualTo(0);

	verify(this.pubSubSubscriberTemplate, never()).ack(any());
}
 
Example #19
Source File: PubSubSubscriberTemplate.java    From spring-cloud-gcp with Apache License 2.0 5 votes vote down vote up
/**
 * Pulls messages synchronously, on demand, using the pull request in argument.
 *
 * @param pullRequest pull request containing the subscription name
 * @return the list of {@link AcknowledgeablePubsubMessage} containing the ack ID, subscription
 * and acknowledger
 */
private List<AcknowledgeablePubsubMessage> pull(PullRequest pullRequest) {
	Assert.notNull(pullRequest, "The pull request can't be null.");

	PullResponse pullResponse = this.subscriberStub.pullCallable().call(pullRequest);
	return toAcknowledgeablePubsubMessageList(
			pullResponse.getReceivedMessagesList(),
			pullRequest.getSubscription());
}
 
Example #20
Source File: PubSubSampleApplicationTests.java    From spring-cloud-gcp with Apache License 2.0 5 votes vote down vote up
private List<String> getMessagesFromSubscription(String subscriptionName) {
	String projectSubscriptionName = ProjectSubscriptionName.format(
			projectName, subscriptionName);

	PullRequest pullRequest = PullRequest.newBuilder()
			.setReturnImmediately(true)
			.setMaxMessages(10)
			.setSubscription(projectSubscriptionName)
			.build();

	PullResponse pullResponse = subscriptionAdminClient.getStub().pullCallable().call(pullRequest);
	return pullResponse.getReceivedMessagesList().stream()
			.map((message) -> message.getMessage().getData().toStringUtf8())
			.collect(Collectors.toList());
}
 
Example #21
Source File: SubscriberServiceTest.java    From kafka-pubsub-emulator with Apache License 2.0 5 votes vote down vote up
@Test
public void pull_emptyList() {
  when(mockSubscriptionManager3.pull(100, false)).thenReturn(Collections.emptyList());

  PullRequest request =
      PullRequest.newBuilder()
          .setSubscription(TestHelpers.PROJECT2_SUBSCRIPTION3)
          .setMaxMessages(100)
          .build();
  PullResponse response = blockingStub.pull(request);

  assertThat(response.getReceivedMessagesList(), Matchers.empty());
}
 
Example #22
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the correct partition is assigned when the partition scheme is "round_robin". The
 * tests makes sure to submit an approrpriate number of messages to poll() so that all partitions
 * in the round robin are hit once.
 */
@Test
public void testPollWithPartitionSchemeRoundRobin() throws Exception {
  task.start(props);
  ReceivedMessage rm1 = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>());
  ReceivedMessage rm2 = createReceivedMessage(ACK_ID2, CPS_MESSAGE, new HashMap<String, String>());
  ReceivedMessage rm3 = createReceivedMessage(ACK_ID3, CPS_MESSAGE, new HashMap<String, String>());
  ReceivedMessage rm4 = createReceivedMessage(ACK_ID4, CPS_MESSAGE, new HashMap<String, String>());
  PullResponse stubbedPullResponse =
      PullResponse.newBuilder()
          .addReceivedMessages(0, rm1)
          .addReceivedMessages(1, rm2)
          .addReceivedMessages(2, rm3)
          .addReceivedMessages(3, rm4)
          .build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(4, result.size());
  SourceRecord expected1 =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          0,
          Schema.OPTIONAL_STRING_SCHEMA,
          null,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  SourceRecord expected2 =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          1,
          Schema.OPTIONAL_STRING_SCHEMA,
          null,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  SourceRecord expected3 =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          2,
          Schema.OPTIONAL_STRING_SCHEMA,
          null,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  SourceRecord expected4 =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          0,
          Schema.OPTIONAL_STRING_SCHEMA,
          null,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  assertRecordsEqual(expected1, result.get(0));
  assertRecordsEqual(expected2, result.get(1));
  assertRecordsEqual(expected3, result.get(2));
  assertRecordsEqual(expected4, result.get(3));
}
 
Example #23
Source File: PubsubGrpcClient.java    From beam with Apache License 2.0 4 votes vote down vote up
@Override
public List<IncomingMessage> pull(
    long requestTimeMsSinceEpoch,
    SubscriptionPath subscription,
    int batchSize,
    boolean returnImmediately)
    throws IOException {
  PullRequest request =
      PullRequest.newBuilder()
          .setSubscription(subscription.getPath())
          .setReturnImmediately(returnImmediately)
          .setMaxMessages(batchSize)
          .build();
  PullResponse response = subscriberStub().pull(request);
  if (response.getReceivedMessagesCount() == 0) {
    return ImmutableList.of();
  }
  List<IncomingMessage> incomingMessages = new ArrayList<>(response.getReceivedMessagesCount());
  for (ReceivedMessage message : response.getReceivedMessagesList()) {
    PubsubMessage pubsubMessage = message.getMessage();
    @Nullable Map<String, String> attributes = pubsubMessage.getAttributes();

    // Timestamp.
    String pubsubTimestampString = null;
    Timestamp timestampProto = pubsubMessage.getPublishTime();
    if (timestampProto != null) {
      pubsubTimestampString =
          String.valueOf(timestampProto.getSeconds() + timestampProto.getNanos() / 1000L);
    }
    long timestampMsSinceEpoch =
        extractTimestamp(timestampAttribute, pubsubTimestampString, attributes);

    // Ack id.
    String ackId = message.getAckId();
    checkState(!Strings.isNullOrEmpty(ackId));

    // Record id, if any.
    @Nullable String recordId = null;
    if (idAttribute != null && attributes != null) {
      recordId = attributes.get(idAttribute);
    }
    if (Strings.isNullOrEmpty(recordId)) {
      // Fall back to the Pubsub provided message id.
      recordId = pubsubMessage.getMessageId();
    }

    incomingMessages.add(
        IncomingMessage.of(
            pubsubMessage, timestampMsSinceEpoch, requestTimeMsSinceEpoch, ackId, recordId));
  }
  return incomingMessages;
}
 
Example #24
Source File: PubsubGrpcClientTest.java    From beam with Apache License 2.0 4 votes vote down vote up
@Test
public void pullOneMessage() throws IOException {
  String expectedSubscription = SUBSCRIPTION.getPath();
  final PullRequest expectedRequest =
      PullRequest.newBuilder()
          .setSubscription(expectedSubscription)
          .setReturnImmediately(true)
          .setMaxMessages(10)
          .build();
  Timestamp timestamp =
      Timestamp.newBuilder()
          .setSeconds(PUB_TIME / 1000)
          .setNanos((int) (PUB_TIME % 1000) * 1000)
          .build();
  PubsubMessage expectedPubsubMessage =
      PubsubMessage.newBuilder()
          .setMessageId(MESSAGE_ID)
          .setData(ByteString.copyFrom(DATA.getBytes(StandardCharsets.UTF_8)))
          .setPublishTime(timestamp)
          .putAllAttributes(ATTRIBUTES)
          .putAllAttributes(
              ImmutableMap.of(
                  TIMESTAMP_ATTRIBUTE, String.valueOf(MESSAGE_TIME), ID_ATTRIBUTE, RECORD_ID))
          .build();
  ReceivedMessage expectedReceivedMessage =
      ReceivedMessage.newBuilder().setMessage(expectedPubsubMessage).setAckId(ACK_ID).build();
  final PullResponse response =
      PullResponse.newBuilder()
          .addAllReceivedMessages(ImmutableList.of(expectedReceivedMessage))
          .build();

  final List<PullRequest> requestsReceived = new ArrayList<>();
  SubscriberImplBase subscriberImplBase =
      new SubscriberImplBase() {
        @Override
        public void pull(PullRequest request, StreamObserver<PullResponse> responseObserver) {
          requestsReceived.add(request);
          responseObserver.onNext(response);
          responseObserver.onCompleted();
        }
      };
  Server server =
      InProcessServerBuilder.forName(channelName).addService(subscriberImplBase).build().start();
  try {
    List<IncomingMessage> acutalMessages = client.pull(REQ_TIME, SUBSCRIPTION, 10, true);
    assertEquals(1, acutalMessages.size());
    IncomingMessage actualMessage = acutalMessages.get(0);
    assertEquals(ACK_ID, actualMessage.ackId());
    assertEquals(DATA, actualMessage.message().getData().toStringUtf8());
    assertEquals(RECORD_ID, actualMessage.recordId());
    assertEquals(REQ_TIME, actualMessage.requestTimeMsSinceEpoch());
    assertEquals(MESSAGE_TIME, actualMessage.timestampMsSinceEpoch());
    assertEquals(expectedRequest, Iterables.getOnlyElement(requestsReceived));
  } finally {
    server.shutdownNow();
  }
}
 
Example #25
Source File: ConsumeGCPubSub.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
    if (subscriber == null) {

        if (storedException.get() != null) {
            getLogger().error("Failed to create Google Cloud PubSub subscriber due to {}", new Object[]{storedException.get()});
        } else {
            getLogger().error("Google Cloud PubSub Subscriber was not properly created. Yielding the processor...");
        }

        context.yield();
        return;
    }

    final PullResponse pullResponse = subscriber.pullCallable().call(pullRequest);
    final List<String> ackIds = new ArrayList<>();

    for (ReceivedMessage message : pullResponse.getReceivedMessagesList()) {
        if (message.hasMessage()) {
            FlowFile flowFile = session.create();

            final Map<String, String> attributes = new HashMap<>();
            ackIds.add(message.getAckId());

            attributes.put(ACK_ID_ATTRIBUTE, message.getAckId());
            attributes.put(SERIALIZED_SIZE_ATTRIBUTE, String.valueOf(message.getSerializedSize()));
            attributes.put(MESSAGE_ID_ATTRIBUTE, message.getMessage().getMessageId());
            attributes.put(MSG_ATTRIBUTES_COUNT_ATTRIBUTE, String.valueOf(message.getMessage().getAttributesCount()));
            attributes.put(MSG_PUBLISH_TIME_ATTRIBUTE, String.valueOf(message.getMessage().getPublishTime().getSeconds()));
            attributes.putAll(message.getMessage().getAttributesMap());

            flowFile = session.putAllAttributes(flowFile, attributes);
            flowFile = session.write(flowFile, out -> out.write(message.getMessage().getData().toByteArray()));

            session.transfer(flowFile, REL_SUCCESS);
            session.getProvenanceReporter().receive(flowFile, getSubscriptionName(context));
        }
    }

    if (!ackIds.isEmpty()) {
        AcknowledgeRequest acknowledgeRequest = AcknowledgeRequest.newBuilder()
                .addAllAckIds(ackIds)
                .setSubscription(getSubscriptionName(context))
                .build();
        subscriber.acknowledgeCallable().call(acknowledgeRequest);
    }
}
 
Example #26
Source File: CloudPubSubSourceTaskTest.java    From pubsub with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the correct partition is assigned when the partition scheme is "hash_key". The test
 * has two cases, one where a key does exist and one where it does not.
 */
@Test
public void testPollWithPartitionSchemeHashKey() throws Exception {
  props.put(
      CloudPubSubSourceConnector.KAFKA_PARTITION_SCHEME_CONFIG,
      CloudPubSubSourceConnector.PartitionScheme.HASH_KEY.toString());
  task.start(props);
  Map<String, String> attributes = new HashMap<>();
  attributes.put(KAFKA_MESSAGE_KEY_ATTRIBUTE, KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE);
  ReceivedMessage withoutKey = createReceivedMessage(ACK_ID1, CPS_MESSAGE, new HashMap<String, String>());
  ReceivedMessage withKey = createReceivedMessage(ACK_ID2, CPS_MESSAGE, attributes);
  PullResponse stubbedPullResponse =
      PullResponse.newBuilder()
          .addReceivedMessages(0, withKey)
          .addReceivedMessages(1, withoutKey)
          .build();
  when(subscriber.pull(any(PullRequest.class)).get()).thenReturn(stubbedPullResponse);
  List<SourceRecord> result = task.poll();
  verify(subscriber, never()).ackMessages(any(AcknowledgeRequest.class));
  assertEquals(2, result.size());
  SourceRecord expectedForMessageWithKey =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE.hashCode() % Integer.parseInt(KAFKA_PARTITIONS),
          Schema.OPTIONAL_STRING_SCHEMA,
          KAFKA_MESSAGE_KEY_ATTRIBUTE_VALUE,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);
  SourceRecord expectedForMessageWithoutKey =
      new SourceRecord(
          null,
          null,
          KAFKA_TOPIC,
          0,
          Schema.OPTIONAL_STRING_SCHEMA,
          null,
          Schema.BYTES_SCHEMA,
          KAFKA_VALUE);

  assertRecordsEqual(expectedForMessageWithKey, result.get(0));
  assertArrayEquals((byte[])expectedForMessageWithoutKey.value(), (byte[])result.get(1).value());
}
 
Example #27
Source File: CloudPubSubRoundRobinSubscriber.java    From pubsub with Apache License 2.0 4 votes vote down vote up
@Override
public ApiFuture<PullResponse> pull(PullRequest request) {
  currentSubscriberIndex = (currentSubscriberIndex + 1) % subscribers.size();
  return subscribers.get(currentSubscriberIndex).pull(request);
}
 
Example #28
Source File: CloudPubSubGRPCSubscriber.java    From pubsub with Apache License 2.0 4 votes vote down vote up
public ApiFuture<PullResponse> pull(PullRequest request) {
  if (System.currentTimeMillis() > nextSubscriberResetTime) {
    makeSubscriber();
  }
  return subscriber.pullCallable().futureCall(request);
}
 
Example #29
Source File: Poller.java    From spanner-event-exporter with Apache License 2.0 4 votes vote down vote up
private String getLastProcessedTimestamp() {

    String timestamp = "";
    try {
      final SubscriberStubSettings subscriberStubSettings =
          SubscriberStubSettings.newBuilder()
              .setTransportChannelProvider(
                  SubscriberStubSettings.defaultGrpcTransportProviderBuilder()
                      .setMaxInboundMessageSize(20 << 20) // 20MB
                      .build())
              .build();

      try (SubscriberStub subscriber = GrpcSubscriberStub.create(subscriberStubSettings)) {
        final String subscriptionName = ProjectSubscriptionName.format(PROJECT_ID, tableName);
        final PullRequest pullRequest =
            PullRequest.newBuilder()
                .setMaxMessages(1)
                .setReturnImmediately(true)
                .setSubscription(subscriptionName)
                .build();

        final PullResponse pullResponse = subscriber.pullCallable().call(pullRequest);
        final DatumReader<GenericRecord> datumReader =
            new GenericDatumReader<GenericRecord>(avroSchema);

        for (ReceivedMessage message : pullResponse.getReceivedMessagesList()) {
          final JsonDecoder decoder =
              DecoderFactory.get()
                  .jsonDecoder(avroSchema, message.getMessage().getData().newInput());

          final GenericRecord record = datumReader.read(null, decoder);
          timestamp = record.get("Timestamp").toString();

          log.debug("---------------- Got Timestamp: " + timestamp);
        }
      }
    } catch (IOException e) {
      log.error("Could not get last processed timestamp from pub / sub", e);

      // If we cannot find a previously processed timestamp, we will default
      // to the one present in the config file.
      return startingTimestamp;
    }

    return timestamp;
  }
 
Example #30
Source File: CloudPubSubSubscriber.java    From pubsub with Apache License 2.0 votes vote down vote up
public ApiFuture<PullResponse> pull(PullRequest request);