org.springframework.integration.IntegrationMessageHeaderAccessor Java Examples

The following examples show how to use org.springframework.integration.IntegrationMessageHeaderAccessor. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PubSubMessageSource.java    From spring-cloud-gcp with Apache License 2.0 6 votes vote down vote up
/**
 * Applies header customizations and acknowledges the message, if necessary.
 * <p>{@link AckMode#AUTO} and {@link AckMode#AUTO_ACK} result in automatic acking on
 * success. {@link AckMode#AUTO} results in automatic nacking on failure.
 * @param message source Pub/Sub message.
 * @return {@link Message} wrapper containing the original message.
 */
private AbstractIntegrationMessageBuilder<?> processMessage(ConvertedAcknowledgeablePubsubMessage<?> message) {
	if (message == null) {
		return null;
	}

	Map<String, Object> messageHeaders =
			this.headerMapper.toHeaders(message.getPubsubMessage().getAttributesMap());

	messageHeaders.put(GcpPubSubHeaders.ORIGINAL_MESSAGE, message);
	messageHeaders.put(IntegrationMessageHeaderAccessor.ACKNOWLEDGMENT_CALLBACK,
				new PubSubAcknowledgmentCallback(message, this.ackMode));

	return getMessageBuilderFactory()
			.withPayload(message.getPayload())
			.copyHeaders(messageHeaders);
}
 
Example #2
Source File: PubSubMessageSourceTests.java    From spring-cloud-gcp with Apache License 2.0 6 votes vote down vote up
@Test
@SuppressWarnings("unchecked")
public void doReceive_manualAckModeAppliesAcknowledgmentHeaderAndDoesNotAck() {

	PubSubMessageSource pubSubMessageSource = new PubSubMessageSource(
			this.mockPubSubSubscriberOperations, "sub1");
	pubSubMessageSource.setMaxFetchSize(1);
	pubSubMessageSource.setPayloadType(String.class);
	pubSubMessageSource.setAckMode(AckMode.MANUAL);

	MessageBuilder<String> message = (MessageBuilder<String>) pubSubMessageSource.doReceive(1);

	assertThat(message).isNotNull();

	assertThat(message.getPayload()).isEqualTo("msg1");
	AcknowledgmentCallback callback = (AcknowledgmentCallback) message.getHeaders()
			.get(IntegrationMessageHeaderAccessor.ACKNOWLEDGMENT_CALLBACK);
	assertThat(callback).isNotNull();
	assertThat(callback.isAcknowledged()).isFalse();
	verify(this.msg1, times(0)).ack();

	callback.acknowledge(AcknowledgmentCallback.Status.ACCEPT);
	verify(this.msg1, times(1)).ack();
	assertThat(callback.isAcknowledged()).isTrue();
}
 
Example #3
Source File: RepublishUnitTests.java    From spring-cloud-stream-binder-rabbit with Apache License 2.0 6 votes vote down vote up
@Test
public void testBadRepublishSetting() throws IOException {
	ConnectionFactory cf = mock(ConnectionFactory.class);
	Connection conn = mock(Connection.class);
	given(cf.createConnection()).willReturn(conn);
	Channel channel = mock(Channel.class);
	given(channel.isOpen()).willReturn(true);
	given(channel.exchangeDeclarePassive("DLX")).willThrow(new IOException());
	given(conn.createChannel(false)).willReturn(channel);
	RabbitProperties props = new RabbitProperties();
	RabbitMessageChannelBinder binder = new RabbitMessageChannelBinder(cf, props, null);
	RabbitConsumerProperties extension = new RabbitConsumerProperties();
	ExtendedConsumerProperties<RabbitConsumerProperties> bindingProps =
			new ExtendedConsumerProperties<RabbitConsumerProperties>(extension);
	MessageHandler handler = binder.getErrorMessageHandler(mock(ConsumerDestination.class), "foo", bindingProps);
	ErrorMessage message = new ErrorMessage(new RuntimeException("test"),
			Collections.singletonMap(IntegrationMessageHeaderAccessor.SOURCE_DATA,
					new Message("foo".getBytes(), new MessageProperties())));
	handler.handleMessage(message);
	handler.handleMessage(message);
	verify(channel, times(1)).exchangeDeclarePassive("DLX");
	verify(channel, never()).basicPublish(any(), any(), eq(false), any(), any());
}
 
Example #4
Source File: SplitterProcessorIntegrationTests.java    From spring-cloud-stream-app-starters with Apache License 2.0 6 votes vote down vote up
@Test
public void test() throws Exception {
	assertThat(this.splitter, instanceOf(FileSplitter.class));
	assertSame(this.splitter, TestUtils.getPropertyValue(this.consumer, "handler"));
	File file = new File(System.getProperty("java.io.tmpdir") + File.separator + "splitter.proc.test");
	FileOutputStream fos = new FileOutputStream(file);
	fos.write("hello\nworld\n".getBytes());
	fos.close();
	this.channels.input().send(new GenericMessage<>(file));
	Message<?> m = this.collector.forChannel(this.channels.output()).poll(10, TimeUnit.SECONDS);
	assertNotNull(m);
	assertNull((m.getHeaders().get(IntegrationMessageHeaderAccessor.SEQUENCE_NUMBER)));
	assertThat(m, hasPayload("hello"));
	assertThat(this.collector.forChannel(this.channels.output()), receivesPayloadThat(is("world")));
	file.delete();
}
 
Example #5
Source File: DemoApplication.java    From spring-and-kafka with Apache License 2.0 6 votes vote down vote up
@Bean(name = OUTBOUND_ID)
IntegrationFlow producer() {

    log.info("starting producer flow..");

    return flowDefinition -> {
        Consumer<KafkaProducerMessageHandlerSpec.ProducerMetadataSpec> producerMetadataSpecConsumer =
                (KafkaProducerMessageHandlerSpec.ProducerMetadataSpec metadata) ->
                        metadata.async(true)
                                .batchNumMessages(10)
                                .valueClassType(String.class)
                                .<String>valueEncoder(String::getBytes);

        KafkaProducerMessageHandlerSpec messageHandlerSpec =
                Kafka.outboundChannelAdapter(props -> props.put("queue.buffering.max.ms", "15000"))
                        .messageKey(m -> m.getHeaders().get(IntegrationMessageHeaderAccessor.SEQUENCE_NUMBER))
                        .addProducer(this.kafkaConfig.getTopic(), this.kafkaConfig.getBrokerAddress(), producerMetadataSpecConsumer);
        flowDefinition
                .handle(messageHandlerSpec);
    };
}
 
Example #6
Source File: BinderHeaderMapper.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 5 votes vote down vote up
/**
 * Remove never headers.
 * @param headers the headers from which to remove the never headers.
 * @since 3.0.2
 */
public static void removeNeverHeaders(Headers headers) {
	headers.remove(MessageHeaders.ID);
	headers.remove(MessageHeaders.TIMESTAMP);
	headers.remove(IntegrationMessageHeaderAccessor.DELIVERY_ATTEMPT);
	headers.remove(BinderHeaders.NATIVE_HEADERS_PRESENT);
}
 
Example #7
Source File: PollableConsumerTests.java    From spring-cloud-stream with Apache License 2.0 5 votes vote down vote up
@Test
public void testRequeue() {
	TestChannelBinder binder = createBinder();
	MessageConverterConfigurer configurer = this.context
			.getBean(MessageConverterConfigurer.class);

	DefaultPollableMessageSource pollableSource = new DefaultPollableMessageSource(
			this.messageConverter);
	configurer.configurePolledMessageSource(pollableSource, "foo");
	AcknowledgmentCallback callback = mock(AcknowledgmentCallback.class);
	pollableSource.addInterceptor(new ChannelInterceptor() {

		@Override
		public Message<?> preSend(Message<?> message, MessageChannel channel) {
			return MessageBuilder.fromMessage(message)
					.setHeader(
							IntegrationMessageHeaderAccessor.ACKNOWLEDGMENT_CALLBACK,
							callback)
					.build();
		}

	});
	ExtendedConsumerProperties<Object> properties = new ExtendedConsumerProperties<>(null);
	properties.setMaxAttempts(2);
	properties.setBackOffInitialInterval(0);
	binder.bindPollableConsumer("foo", "bar", pollableSource, properties);
	final AtomicInteger count = new AtomicInteger();
	try {
		assertThat(pollableSource.poll(received -> {
			count.incrementAndGet();
			throw new RequeueCurrentMessageException("test retry");
		})).isTrue();
	}
	catch (Exception e) {
		// no op
	}
	assertThat(count.get()).isEqualTo(2);
	verify(callback).acknowledge(Status.REQUEUE);
}
 
Example #8
Source File: PollableConsumerTests.java    From spring-cloud-stream with Apache License 2.0 5 votes vote down vote up
@Test
public void testRequeueFromErrorFlow() {
	TestChannelBinder binder = createBinder();
	MessageConverterConfigurer configurer = this.context
			.getBean(MessageConverterConfigurer.class);

	DefaultPollableMessageSource pollableSource = new DefaultPollableMessageSource(
			this.messageConverter);
	configurer.configurePolledMessageSource(pollableSource, "foo");
	AcknowledgmentCallback callback = mock(AcknowledgmentCallback.class);
	pollableSource.addInterceptor(new ChannelInterceptor() {

		@Override
		public Message<?> preSend(Message<?> message, MessageChannel channel) {
			return MessageBuilder.fromMessage(message)
					.setHeader(
							IntegrationMessageHeaderAccessor.ACKNOWLEDGMENT_CALLBACK,
							callback)
					.build();
		}

	});
	ExtendedConsumerProperties<Object> properties = new ExtendedConsumerProperties<>(null);
	properties.setMaxAttempts(1);
	binder.bindPollableConsumer("foo", "bar", pollableSource, properties);
	SubscribableChannel errorChannel = new DirectChannel();
	errorChannel.subscribe(msg -> {
		throw new RequeueCurrentMessageException((Throwable) msg.getPayload());
	});
	pollableSource.setErrorChannel(errorChannel);
	try {
		pollableSource.poll(received -> {
			throw new RuntimeException("test requeue from error flow");
		});
	}
	catch (Exception e) {
		// no op
	}
	verify(callback).acknowledge(Status.REQUEUE);
}
 
Example #9
Source File: OrderEntryProducerConfiguration.java    From event-based-shopping-system with MIT License 5 votes vote down vote up
@Bean(name = OUTBOUND_ID)
public IntegrationFlow producer() {

	log.info("starting producer flow..");

	return flowDefinition -> {
		Consumer<KafkaProducerMessageHandlerSpec.ProducerMetadataSpec> producerMetadataSpecConsumer = (
				KafkaProducerMessageHandlerSpec.ProducerMetadataSpec metadata) -> metadata
				.async(true).batchNumMessages(5)
				.valueClassType(String.class);

		Consumer<PropertiesBuilder> producerProperties = props -> props
				.put("queue.buffering.max.ms", "15000");
		Function<Message<Object>, ?> messageKey = m -> m.getHeaders().get(
				IntegrationMessageHeaderAccessor.SEQUENCE_NUMBER);
		KafkaProducerMessageHandlerSpec outboundChannelAdapter = Kafka
				.outboundChannelAdapter(producerProperties);
		String topic = this.kafkaConfig.getTopic();
		String brokerAddress = this.kafkaConfig.getBrokerAddress();

		KafkaProducerMessageHandlerSpec messageHandlerSpec = outboundChannelAdapter
				.messageKey(messageKey).addProducer(topic, brokerAddress,
						producerMetadataSpecConsumer);

		flowDefinition.handle(messageHandlerSpec);
	};
}
 
Example #10
Source File: ActivatorImpl.java    From tutorials with MIT License 5 votes vote down vote up
@Override
public void handleMessage(Message<File> input) {
	File filePayload = input.getPayload();
	IntegrationMessageHeaderAccessor accessor = new IntegrationMessageHeaderAccessor(input);
	Logger.getAnonymousLogger().info("The file size "+filePayload.length());
	Logger.getAnonymousLogger().info("The time of the message "+accessor.getTimestamp());
	
}
 
Example #11
Source File: RocketMQMessageSource.java    From spring-cloud-alibaba with Apache License 2.0 4 votes vote down vote up
@Override
protected synchronized Object doReceive() {
	if (messageQueueChooser.getMessageQueues() == null
			|| messageQueueChooser.getMessageQueues().size() == 0) {
		return null;
	}
	try {
		int count = 0;
		while (count < messageQueueChooser.getMessageQueues().size()) {
			MessageQueue messageQueue;
			synchronized (this.consumerMonitor) {
				messageQueue = messageQueueChooser.choose();
				messageQueueChooser.increment();
			}

			long offset = consumer.fetchConsumeOffset(messageQueue,
					rocketMQConsumerProperties.getExtension().isFromStore());

			log.debug("topic='{}', group='{}', messageQueue='{}', offset now='{}'",
					this.topic, this.group, messageQueue, offset);

			PullResult pullResult;
			if (messageSelector != null) {
				pullResult = consumer.pull(messageQueue, messageSelector, offset, 1);
			}
			else {
				pullResult = consumer.pull(messageQueue, (String) null, offset, 1);
			}

			if (pullResult.getPullStatus() == PullStatus.FOUND) {
				List<MessageExt> messageExtList = pullResult.getMsgFoundList();

				Message message = RocketMQUtil
						.convertToSpringMessage(messageExtList.get(0));

				AcknowledgmentCallback ackCallback = this.ackCallbackFactory
						.createCallback(new RocketMQAckInfo(messageQueue, pullResult,
								consumer, offset));

				Message messageResult = MessageBuilder.fromMessage(message).setHeader(
						IntegrationMessageHeaderAccessor.ACKNOWLEDGMENT_CALLBACK,
						ackCallback).build();
				return messageResult;
			}
			else {
				log.debug("messageQueue='{}' PullResult='{}' with topic `{}`",
						messageQueueChooser.getMessageQueues(),
						pullResult.getPullStatus(), topic);
			}
			count++;
		}
	}
	catch (Exception e) {
		log.error("Consumer pull error: " + e.getMessage(), e);
	}
	return null;
}
 
Example #12
Source File: KafkaBinderTests.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 4 votes vote down vote up
@Test
@SuppressWarnings("unchecked")
public void testPartitionedModuleSpELWithRawMode() throws Exception {
	Binder binder = getBinder();
	ExtendedProducerProperties<KafkaProducerProperties> properties = createProducerProperties();
	properties.setPartitionKeyExpression(
			spelExpressionParser.parseExpression("payload[0]"));
	properties.setPartitionSelectorExpression(
			spelExpressionParser.parseExpression("hashCode()"));
	properties.setPartitionCount(6);
	properties.setHeaderMode(HeaderMode.none);

	DirectChannel output = createBindableChannel("output",
			createProducerBindingProperties(properties));
	output.setBeanName("test.output");
	Binding<MessageChannel> outputBinding = binder.bindProducer("part.raw.0", output,
			properties);
	try {
		Object endpoint = extractEndpoint(outputBinding);
		assertThat(getEndpointRouting(endpoint))
				.contains(getExpectedRoutingBaseDestination("part.raw.0", "test")
						+ "-' + headers['partition']");
	}
	catch (UnsupportedOperationException ignored) {
	}

	ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties();
	consumerProperties.setConcurrency(2);
	consumerProperties.setInstanceIndex(0);
	consumerProperties.setInstanceCount(3);
	consumerProperties.setPartitioned(true);
	consumerProperties.setHeaderMode(HeaderMode.none);
	consumerProperties.getExtension().setAutoRebalanceEnabled(false);
	QueueChannel input0 = new QueueChannel();
	input0.setBeanName("test.input0S");
	Binding<MessageChannel> input0Binding = binder.bindConsumer("part.raw.0", "test",
			input0, consumerProperties);
	consumerProperties.setInstanceIndex(1);
	QueueChannel input1 = new QueueChannel();
	input1.setBeanName("test.input1S");
	Binding<MessageChannel> input1Binding = binder.bindConsumer("part.raw.0", "test",
			input1, consumerProperties);
	consumerProperties.setInstanceIndex(2);
	QueueChannel input2 = new QueueChannel();
	input2.setBeanName("test.input2S");
	Binding<MessageChannel> input2Binding = binder.bindConsumer("part.raw.0", "test",
			input2, consumerProperties);

	Message<byte[]> message2 = org.springframework.integration.support.MessageBuilder
			.withPayload(new byte[] { 2 })
			.setHeader(IntegrationMessageHeaderAccessor.CORRELATION_ID,
					"kafkaBinderTestCommonsDelegate")
			.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_NUMBER, 42)
			.setHeader(IntegrationMessageHeaderAccessor.SEQUENCE_SIZE, 43).build();
	output.send(message2);
	output.send(new GenericMessage<>(new byte[] { 1 }));
	output.send(new GenericMessage<>(new byte[] { 0 }));
	Message<?> receive0 = receive(input0);
	assertThat(receive0).isNotNull();
	Message<?> receive1 = receive(input1);
	assertThat(receive1).isNotNull();
	Message<?> receive2 = receive(input2);
	assertThat(receive2).isNotNull();
	assertThat(Arrays.asList(((byte[]) receive0.getPayload())[0],
			((byte[]) receive1.getPayload())[0], ((byte[]) receive2.getPayload())[0]))
					.containsExactlyInAnyOrder((byte) 0, (byte) 1, (byte) 2);
	input0Binding.unbind();
	input1Binding.unbind();
	input2Binding.unbind();
	outputBinding.unbind();
}
 
Example #13
Source File: RabbitMessageChannelBinder.java    From spring-cloud-stream-binder-rabbit with Apache License 2.0 4 votes vote down vote up
@Override
protected MessageHandler createProducerMessageHandler(
		final ProducerDestination producerDestination,
		ExtendedProducerProperties<RabbitProducerProperties> producerProperties,
		MessageChannel errorChannel) {
	Assert.state(
			!HeaderMode.embeddedHeaders.equals(producerProperties.getHeaderMode()),
			"the RabbitMQ binder does not support embedded headers since RabbitMQ supports headers natively");
	String prefix = producerProperties.getExtension().getPrefix();
	String exchangeName = producerDestination.getName();
	String destination = StringUtils.isEmpty(prefix) ? exchangeName
			: exchangeName.substring(prefix.length());
	final AmqpOutboundEndpoint endpoint = new AmqpOutboundEndpoint(
			buildRabbitTemplate(producerProperties.getExtension(),
					errorChannel != null));
	endpoint.setExchangeName(producerDestination.getName());
	RabbitProducerProperties extendedProperties = producerProperties.getExtension();
	boolean expressionInterceptorNeeded = expressionInterceptorNeeded(
			extendedProperties);
	Expression routingKeyExpression = extendedProperties.getRoutingKeyExpression();
	if (!producerProperties.isPartitioned()) {
		if (routingKeyExpression == null) {
			endpoint.setRoutingKey(destination);
		}
		else {
			if (expressionInterceptorNeeded) {
				endpoint.setRoutingKeyExpressionString("headers['"
						+ RabbitExpressionEvaluatingInterceptor.ROUTING_KEY_HEADER
						+ "']");
			}
			else {
				endpoint.setRoutingKeyExpression(routingKeyExpression);
			}
		}
	}
	else {
		if (routingKeyExpression == null) {
			endpoint.setRoutingKeyExpression(
					buildPartitionRoutingExpression(destination, false));
		}
		else {
			if (expressionInterceptorNeeded) {
				endpoint.setRoutingKeyExpression(
						buildPartitionRoutingExpression("headers['"
								+ RabbitExpressionEvaluatingInterceptor.ROUTING_KEY_HEADER
								+ "']", true));
			}
			else {
				endpoint.setRoutingKeyExpression(buildPartitionRoutingExpression(
						routingKeyExpression.getExpressionString(), true));
			}
		}
	}
	if (extendedProperties.getDelayExpression() != null) {
		if (expressionInterceptorNeeded) {
			endpoint.setDelayExpressionString("headers['"
					+ RabbitExpressionEvaluatingInterceptor.DELAY_HEADER + "']");
		}
		else {
			endpoint.setDelayExpression(extendedProperties.getDelayExpression());
		}
	}
	DefaultAmqpHeaderMapper mapper = DefaultAmqpHeaderMapper.outboundMapper();
	List<String> headerPatterns = new ArrayList<>(extendedProperties.getHeaderPatterns().length + 3);
	headerPatterns.add("!" + BinderHeaders.PARTITION_HEADER);
	headerPatterns.add("!" + IntegrationMessageHeaderAccessor.SOURCE_DATA);
	headerPatterns.add("!" + IntegrationMessageHeaderAccessor.DELIVERY_ATTEMPT);
	headerPatterns.addAll(Arrays.asList(extendedProperties.getHeaderPatterns()));
	mapper.setRequestHeaderNames(
			headerPatterns.toArray(new String[headerPatterns.size()]));
	endpoint.setHeaderMapper(mapper);
	endpoint.setDefaultDeliveryMode(extendedProperties.getDeliveryMode());
	endpoint.setBeanFactory(this.getBeanFactory());
	if (errorChannel != null) {
		checkConnectionFactoryIsErrorCapable();
		endpoint.setReturnChannel(errorChannel);
		endpoint.setConfirmNackChannel(errorChannel);
		String ackChannelBeanName = StringUtils
				.hasText(extendedProperties.getConfirmAckChannel())
						? extendedProperties.getConfirmAckChannel()
						: IntegrationContextUtils.NULL_CHANNEL_BEAN_NAME;
		if (!ackChannelBeanName.equals(IntegrationContextUtils.NULL_CHANNEL_BEAN_NAME)
				&& !getApplicationContext().containsBean(ackChannelBeanName)) {
			GenericApplicationContext context = (GenericApplicationContext) getApplicationContext();
			context.registerBean(ackChannelBeanName, DirectChannel.class,
					() -> new DirectChannel());
		}
		endpoint.setConfirmAckChannelName(ackChannelBeanName);
		endpoint.setConfirmCorrelationExpressionString("#root");
		endpoint.setErrorMessageStrategy(new DefaultErrorMessageStrategy());
	}
	endpoint.setHeadersMappedLast(true);
	return endpoint;
}