org.apache.flink.streaming.connectors.elasticsearch.util.NoOpFailureHandler Java Examples

The following examples show how to use org.apache.flink.streaming.connectors.elasticsearch.util.NoOpFailureHandler. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ElasticsearchUpsertTableSinkFactoryBase.java    From flink with Apache License 2.0 6 votes vote down vote up
private ActionRequestFailureHandler getFailureHandler(DescriptorProperties descriptorProperties) {
	final String failureHandler = descriptorProperties
		.getOptionalString(CONNECTOR_FAILURE_HANDLER)
		.orElse(DEFAULT_FAILURE_HANDLER);
	switch (failureHandler) {
		case CONNECTOR_FAILURE_HANDLER_VALUE_FAIL:
			return new NoOpFailureHandler();
		case CONNECTOR_FAILURE_HANDLER_VALUE_IGNORE:
			return new IgnoringFailureHandler();
		case CONNECTOR_FAILURE_HANDLER_VALUE_RETRY:
			return new RetryRejectedExecutionFailureHandler();
		case CONNECTOR_FAILURE_HANDLER_VALUE_CUSTOM:
			final Class<? extends ActionRequestFailureHandler> clazz = descriptorProperties
				.getClass(CONNECTOR_FAILURE_HANDLER_CLASS, ActionRequestFailureHandler.class);
			return InstantiationUtil.instantiate(clazz);
		default:
			throw new IllegalArgumentException("Unknown failure handler.");
	}
}
 
Example #2
Source File: ElasticsearchUpsertTableSinkFactoryBase.java    From flink with Apache License 2.0 6 votes vote down vote up
private ActionRequestFailureHandler getFailureHandler(DescriptorProperties descriptorProperties) {
	final String failureHandler = descriptorProperties
		.getOptionalString(CONNECTOR_FAILURE_HANDLER)
		.orElse(DEFAULT_FAILURE_HANDLER);
	switch (failureHandler) {
		case CONNECTOR_FAILURE_HANDLER_VALUE_FAIL:
			return new NoOpFailureHandler();
		case CONNECTOR_FAILURE_HANDLER_VALUE_IGNORE:
			return new IgnoringFailureHandler();
		case CONNECTOR_FAILURE_HANDLER_VALUE_RETRY:
			return new RetryRejectedExecutionFailureHandler();
		case CONNECTOR_FAILURE_HANDLER_VALUE_CUSTOM:
			final Class<? extends ActionRequestFailureHandler> clazz = descriptorProperties
				.getClass(CONNECTOR_FAILURE_HANDLER_CLASS, ActionRequestFailureHandler.class);
			return InstantiationUtil.instantiate(clazz);
		default:
			throw new IllegalArgumentException("Unknown failure handler.");
	}
}
 
Example #3
Source File: ElasticsearchSinkBaseTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Verifies that the collection given to the sink is not modified.
 */
@Test
public void testCollectionArgumentNotModified() {
	Map<String, String> userConfig = new HashMap<>();
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_BACKOFF_DELAY, "1");
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_BACKOFF_ENABLE, "true");
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_BACKOFF_RETRIES, "1");
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_BACKOFF_TYPE, "CONSTANT");
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_INTERVAL_MS, "1");
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, "1");
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_MAX_SIZE_MB, "1");

	new DummyElasticsearchSink<>(
		Collections.unmodifiableMap(userConfig),
		new SimpleSinkFunction<String>(),
		new NoOpFailureHandler());
}
 
Example #4
Source File: ElasticsearchSinkBaseTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Verifies that the collection given to the sink is not modified.
 */
@Test
public void testCollectionArgumentNotModified() {
	Map<String, String> userConfig = new HashMap<>();
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_BACKOFF_DELAY, "1");
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_BACKOFF_ENABLE, "true");
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_BACKOFF_RETRIES, "1");
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_BACKOFF_TYPE, "CONSTANT");
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_INTERVAL_MS, "1");
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, "1");
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_MAX_SIZE_MB, "1");

	new DummyElasticsearchSink<>(
		Collections.unmodifiableMap(userConfig),
		new SimpleSinkFunction<String>(),
		new NoOpFailureHandler());
}
 
Example #5
Source File: ElasticsearchSinkBaseTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Verifies that the collection given to the sink is not modified.
 */
@Test
public void testCollectionArgumentNotModified() {
	Map<String, String> userConfig = new HashMap<>();
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_BACKOFF_DELAY, "1");
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_BACKOFF_ENABLE, "true");
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_BACKOFF_RETRIES, "1");
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_BACKOFF_TYPE, "CONSTANT");
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_INTERVAL_MS, "1");
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, "1");
	userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_MAX_SIZE_MB, "1");

	new DummyElasticsearchSink<>(
		Collections.unmodifiableMap(userConfig),
		new SimpleSinkFunction<String>(),
		new NoOpFailureHandler());
}
 
Example #6
Source File: Elasticsearch7DynamicSinkTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testDefaultConfig() {
	final TableSchema schema = createTestSchema();
	Configuration configuration = new Configuration();
	configuration.setString(ElasticsearchOptions.INDEX_OPTION.key(), INDEX);
	configuration.setString(ElasticsearchOptions.DOCUMENT_TYPE_OPTION.key(), DOC_TYPE);
	configuration.setString(ElasticsearchOptions.HOSTS_OPTION.key(), SCHEMA + "://" + HOSTNAME + ":" + PORT);

	BuilderProvider provider = new BuilderProvider();
	final Elasticsearch7DynamicSink testSink = new Elasticsearch7DynamicSink(
		new DummyEncodingFormat(),
		new Elasticsearch7Configuration(configuration, this.getClass().getClassLoader()),
		schema,
		provider
	);

	testSink.getSinkRuntimeProvider(new MockSinkContext()).createSinkFunction();

	verify(provider.builderSpy).setFailureHandler(new NoOpFailureHandler());
	verify(provider.builderSpy).setBulkFlushBackoff(false);
	verify(provider.builderSpy).setBulkFlushInterval(1000);
	verify(provider.builderSpy).setBulkFlushMaxActions(1000);
	verify(provider.builderSpy).setBulkFlushMaxSizeMb(2);
	verify(provider.builderSpy).setRestClientFactory(new Elasticsearch7DynamicSink.DefaultRestClientFactory(null));
	verify(provider.sinkSpy, never()).disableFlushOnCheckpoint();
}
 
Example #7
Source File: ElasticsearchUpsertTableSinkFactoryBase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private ActionRequestFailureHandler getFailureHandler(DescriptorProperties descriptorProperties) {
	final String failureHandler = descriptorProperties
		.getOptionalString(CONNECTOR_FAILURE_HANDLER)
		.orElse(DEFAULT_FAILURE_HANDLER);
	switch (failureHandler) {
		case CONNECTOR_FAILURE_HANDLER_VALUE_FAIL:
			return new NoOpFailureHandler();
		case CONNECTOR_FAILURE_HANDLER_VALUE_IGNORE:
			return new IgnoringFailureHandler();
		case CONNECTOR_FAILURE_HANDLER_VALUE_RETRY:
			return new RetryRejectedExecutionFailureHandler();
		case CONNECTOR_FAILURE_HANDLER_VALUE_CUSTOM:
			final Class<? extends ActionRequestFailureHandler> clazz = descriptorProperties
				.getClass(CONNECTOR_FAILURE_HANDLER_CLASS, ActionRequestFailureHandler.class);
			return InstantiationUtil.instantiate(clazz);
		default:
			throw new IllegalArgumentException("Unknown failure handler.");
	}
}
 
Example #8
Source File: Elasticsearch6DynamicSinkTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testDefaultConfig() {
	final TableSchema schema = createTestSchema();
	Configuration configuration = new Configuration();
	configuration.setString(ElasticsearchOptions.INDEX_OPTION.key(), INDEX);
	configuration.setString(ElasticsearchOptions.DOCUMENT_TYPE_OPTION.key(), DOC_TYPE);
	configuration.setString(ElasticsearchOptions.HOSTS_OPTION.key(), SCHEMA + "://" + HOSTNAME + ":" + PORT);

	BuilderProvider provider = new BuilderProvider();
	final Elasticsearch6DynamicSink testSink = new Elasticsearch6DynamicSink(
		new DummyEncodingFormat(),
		new Elasticsearch6Configuration(configuration, this.getClass().getClassLoader()),
		schema,
		provider
	);

	testSink.getSinkRuntimeProvider(new MockSinkContext()).createSinkFunction();

	verify(provider.builderSpy).setFailureHandler(new NoOpFailureHandler());
	verify(provider.builderSpy).setBulkFlushBackoff(false);
	verify(provider.builderSpy).setBulkFlushInterval(1000);
	verify(provider.builderSpy).setBulkFlushMaxActions(1000);
	verify(provider.builderSpy).setBulkFlushMaxSizeMb(2);
	verify(provider.builderSpy).setRestClientFactory(new Elasticsearch6DynamicSink.DefaultRestClientFactory(null));
	verify(provider.sinkSpy, never()).disableFlushOnCheckpoint();
}
 
Example #9
Source File: ActionRequestFailureHandlerUtil.java    From alchemy with Apache License 2.0 5 votes vote down vote up
public static ActionRequestFailureHandler createFailureHandler(String failureHandler) {
    if (failureHandler == null || failureHandler.trim().length() == 0) {
        return new NoOpFailureHandler();
    }
    FailureHandler handler = FailureHandler.valueOf(failureHandler.toUpperCase());
    switch (handler) {
        case IGNORE:
            return new IgnoreFailureHandler();
        case RETRYREJECTED:
            return new RetryRejectedExecutionFailureHandler();
        default:
            return new NoOpFailureHandler();
    }
}
 
Example #10
Source File: ElasticsearchSinkBaseTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/** Tests that any bulk failure in the listener callbacks is rethrown on an immediately following checkpoint. */
@Test
public void testBulkFailureRethrownOnCheckpoint() throws Throwable {
	final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>(
		new HashMap<String, String>(), new SimpleSinkFunction<String>(), new NoOpFailureHandler());

	final OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));

	testHarness.open();

	// setup the next bulk request, and let the whole bulk request fail
	sink.setFailNextBulkRequestCompletely(new Exception("artificial failure for bulk request"));
	testHarness.processElement(new StreamRecord<>("msg"));
	verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));

	// manually execute the next bulk request
	sink.manualBulkRequestWithAllPendingRequests();

	try {
		testHarness.snapshot(1L, 1000L);
	} catch (Exception e) {
		// the snapshot should have failed with the bulk request failure
		Assert.assertTrue(e.getCause().getCause().getMessage().contains("artificial failure for bulk request"));

		// test succeeded
		return;
	}

	Assert.fail();
}
 
Example #11
Source File: ElasticsearchSinkBaseTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/** Tests that any bulk failure in the listener callbacks is rethrown on an immediately following invoke call. */
@Test
public void testBulkFailureRethrownOnInvoke() throws Throwable {
	final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>(
		new HashMap<String, String>(), new SimpleSinkFunction<String>(), new NoOpFailureHandler());

	final OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));

	testHarness.open();

	// setup the next bulk request, and let the whole bulk request fail
	sink.setFailNextBulkRequestCompletely(new Exception("artificial failure for bulk request"));
	testHarness.processElement(new StreamRecord<>("msg"));
	verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));

	// manually execute the next bulk request
	sink.manualBulkRequestWithAllPendingRequests();

	try {
		testHarness.processElement(new StreamRecord<>("next msg"));
	} catch (Exception e) {
		// the invoke should have failed with the bulk request failure
		Assert.assertTrue(e.getCause().getMessage().contains("artificial failure for bulk request"));

		// test succeeded
		return;
	}

	Assert.fail();
}
 
Example #12
Source File: ElasticsearchSink.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a new {@code ElasticsearchSink} that connects to the cluster using a {@link TransportClient}.
 *
 * @param userConfig The map of user settings that are used when constructing the {@link TransportClient} and {@link BulkProcessor}
 * @param transportAddresses The addresses of Elasticsearch nodes to which to connect using a {@link TransportClient}
 * @param elasticsearchSinkFunction This is used to generate multiple {@link ActionRequest} from the incoming element
 */
public ElasticsearchSink(
	Map<String, String> userConfig,
	List<InetSocketAddress> transportAddresses,
	ElasticsearchSinkFunction<T> elasticsearchSinkFunction) {

	this(userConfig, transportAddresses, elasticsearchSinkFunction, new NoOpFailureHandler());
}
 
Example #13
Source File: ElasticsearchSinkBaseTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/** Tests that any item failure in the listener callbacks is rethrown on an immediately following checkpoint. */
@Test
public void testItemFailureRethrownOnCheckpoint() throws Throwable {
	final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>(
		new HashMap<String, String>(), new SimpleSinkFunction<String>(), new NoOpFailureHandler());

	final OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));

	testHarness.open();

	// setup the next bulk request, and its mock item failures
	sink.setMockItemFailuresListForNextBulkItemResponses(Collections.singletonList(new Exception("artificial failure for record")));
	testHarness.processElement(new StreamRecord<>("msg"));
	verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));

	// manually execute the next bulk request
	sink.manualBulkRequestWithAllPendingRequests();

	try {
		testHarness.snapshot(1L, 1000L);
	} catch (Exception e) {
		// the snapshot should have failed with the failure
		Assert.assertTrue(e.getCause().getCause().getMessage().contains("artificial failure for record"));

		// test succeeded
		return;
	}

	Assert.fail();
}
 
Example #14
Source File: ElasticsearchSinkBaseTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/** Tests that any item failure in the listener callbacks is rethrown on an immediately following invoke call. */
@Test
public void testItemFailureRethrownOnInvoke() throws Throwable {
	final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>(
		new HashMap<String, String>(), new SimpleSinkFunction<String>(), new NoOpFailureHandler());

	final OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));

	testHarness.open();

	// setup the next bulk request, and its mock item failures
	sink.setMockItemFailuresListForNextBulkItemResponses(Collections.singletonList(new Exception("artificial failure for record")));
	testHarness.processElement(new StreamRecord<>("msg"));
	verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));

	// manually execute the next bulk request
	sink.manualBulkRequestWithAllPendingRequests();

	try {
		testHarness.processElement(new StreamRecord<>("next msg"));
	} catch (Exception e) {
		// the invoke should have failed with the failure
		Assert.assertTrue(e.getCause().getMessage().contains("artificial failure for record"));

		// test succeeded
		return;
	}

	Assert.fail();
}
 
Example #15
Source File: ElasticsearchSinkBaseTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/** Tests that any item failure in the listener callbacks is rethrown on an immediately following invoke call. */
@Test
public void testItemFailureRethrownOnInvoke() throws Throwable {
	final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>(
		new HashMap<String, String>(), new SimpleSinkFunction<String>(), new NoOpFailureHandler());

	final OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));

	testHarness.open();

	// setup the next bulk request, and its mock item failures
	sink.setMockItemFailuresListForNextBulkItemResponses(Collections.singletonList(new Exception("artificial failure for record")));
	testHarness.processElement(new StreamRecord<>("msg"));
	verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));

	// manually execute the next bulk request
	sink.manualBulkRequestWithAllPendingRequests();

	try {
		testHarness.processElement(new StreamRecord<>("next msg"));
	} catch (Exception e) {
		// the invoke should have failed with the failure
		Assert.assertTrue(e.getCause().getMessage().contains("artificial failure for record"));

		// test succeeded
		return;
	}

	Assert.fail();
}
 
Example #16
Source File: ElasticsearchTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public List<Descriptor> descriptors() {
	final Descriptor minimumDesc =
		new Elasticsearch()
			.version("6")
			.host("localhost", 1234, "http")
			.index("MyIndex")
			.documentType("MyType");

	final Descriptor maximumDesc =
		new Elasticsearch()
			.version("6")
			.host("host1", 1234, "https")
			.host("host2", 1234, "https")
			.index("MyIndex")
			.documentType("MyType")
			.keyDelimiter("#")
			.keyNullLiteral("")
			.bulkFlushBackoffExponential()
			.bulkFlushBackoffDelay(123L)
			.bulkFlushBackoffMaxRetries(3)
			.bulkFlushInterval(100L)
			.bulkFlushMaxActions(1000)
			.bulkFlushMaxSize("12 MB")
			.failureHandlerRetryRejected()
			.connectionMaxRetryTimeout(100)
			.connectionPathPrefix("/myapp");

	final Descriptor customDesc =
		new Elasticsearch()
			.version("6")
			.host("localhost", 1234, "http")
			.index("MyIndex")
			.documentType("MyType")
			.disableFlushOnCheckpoint()
			.failureHandlerCustom(NoOpFailureHandler.class);

	return Arrays.asList(minimumDesc, maximumDesc, customDesc);
}
 
Example #17
Source File: ElasticsearchTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public List<Descriptor> descriptors() {
	final Descriptor minimumDesc = new Elasticsearch()
			.version("6")
			.host("localhost", 1234, "http")
			.index("MyIndex")
			.documentType("MyType");

	final Descriptor maximumDesc =
		new Elasticsearch()
			.version("6")
			.host("host1", 1234, "https")
			.host("host2", 1234, "https")
			.index("MyIndex")
			.documentType("MyType")
			.keyDelimiter("#")
			.keyNullLiteral("")
			.bulkFlushBackoffExponential()
			.bulkFlushBackoffDelay(123L)
			.bulkFlushBackoffMaxRetries(3)
			.bulkFlushInterval(100L)
			.bulkFlushMaxActions(1000)
			.bulkFlushMaxSize("12 MB")
			.failureHandlerRetryRejected()
			.connectionMaxRetryTimeout(100)
			.connectionPathPrefix("/myapp");

	final Descriptor customDesc =
		new Elasticsearch()
			.version("6")
			.host("localhost", 1234, "http")
			.index("MyIndex")
			.documentType("MyType")
			.disableFlushOnCheckpoint()
			.failureHandlerCustom(NoOpFailureHandler.class);

	return Arrays.asList(minimumDesc, maximumDesc, customDesc);
}
 
Example #18
Source File: ElasticsearchSink.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a new {@code ElasticsearchSink} that connects to the cluster using a {@link TransportClient}.
 *
 * @param userConfig The map of user settings that are used when constructing the {@link TransportClient} and {@link BulkProcessor}
 * @param transportAddresses The addresses of Elasticsearch nodes to which to connect using a {@link TransportClient}
 * @param elasticsearchSinkFunction This is used to generate multiple {@link ActionRequest} from the incoming element
 */
public ElasticsearchSink(
	Map<String, String> userConfig,
	List<InetSocketAddress> transportAddresses,
	ElasticsearchSinkFunction<T> elasticsearchSinkFunction) {

	this(userConfig, transportAddresses, elasticsearchSinkFunction, new NoOpFailureHandler());
}
 
Example #19
Source File: ElasticsearchSink.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a new {@code ElasticsearchSink} that connects to the cluster using a {@link TransportClient}.
 *
 * @param userConfig The map of user settings that are passed when constructing the {@link TransportClient} and {@link BulkProcessor}
 * @param transportAddresses The addresses of Elasticsearch nodes to which to connect using a {@link TransportClient}
 * @param elasticsearchSinkFunction This is used to generate multiple {@link ActionRequest} from the incoming element
 */
public ElasticsearchSink(
	Map<String, String> userConfig,
	List<InetSocketAddress> transportAddresses,
	org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction<T> elasticsearchSinkFunction) {

	this(userConfig, transportAddresses, elasticsearchSinkFunction, new NoOpFailureHandler());
}
 
Example #20
Source File: ElasticsearchSinkBaseTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/** Tests that any item failure in the listener callbacks is rethrown on an immediately following checkpoint. */
@Test
public void testItemFailureRethrownOnCheckpoint() throws Throwable {
	final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>(
		new HashMap<String, String>(), new SimpleSinkFunction<String>(), new NoOpFailureHandler());

	final OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));

	testHarness.open();

	// setup the next bulk request, and its mock item failures
	sink.setMockItemFailuresListForNextBulkItemResponses(Collections.singletonList(new Exception("artificial failure for record")));
	testHarness.processElement(new StreamRecord<>("msg"));
	verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));

	// manually execute the next bulk request
	sink.manualBulkRequestWithAllPendingRequests();

	try {
		testHarness.snapshot(1L, 1000L);
	} catch (Exception e) {
		// the snapshot should have failed with the failure
		Assert.assertTrue(e.getCause().getCause().getMessage().contains("artificial failure for record"));

		// test succeeded
		return;
	}

	Assert.fail();
}
 
Example #21
Source File: ElasticsearchSinkBaseTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/** Tests that any bulk failure in the listener callbacks is rethrown on an immediately following checkpoint. */
@Test
public void testBulkFailureRethrownOnCheckpoint() throws Throwable {
	final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>(
		new HashMap<String, String>(), new SimpleSinkFunction<String>(), new NoOpFailureHandler());

	final OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));

	testHarness.open();

	// setup the next bulk request, and let the whole bulk request fail
	sink.setFailNextBulkRequestCompletely(new Exception("artificial failure for bulk request"));
	testHarness.processElement(new StreamRecord<>("msg"));
	verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));

	// manually execute the next bulk request
	sink.manualBulkRequestWithAllPendingRequests();

	try {
		testHarness.snapshot(1L, 1000L);
	} catch (Exception e) {
		// the snapshot should have failed with the bulk request failure
		Assert.assertTrue(e.getCause().getCause().getMessage().contains("artificial failure for bulk request"));

		// test succeeded
		return;
	}

	Assert.fail();
}
 
Example #22
Source File: ElasticsearchSinkBaseTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/** Tests that any bulk failure in the listener callbacks is rethrown on an immediately following invoke call. */
@Test
public void testBulkFailureRethrownOnInvoke() throws Throwable {
	final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>(
		new HashMap<String, String>(), new SimpleSinkFunction<String>(), new NoOpFailureHandler());

	final OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));

	testHarness.open();

	// setup the next bulk request, and let the whole bulk request fail
	sink.setFailNextBulkRequestCompletely(new Exception("artificial failure for bulk request"));
	testHarness.processElement(new StreamRecord<>("msg"));
	verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));

	// manually execute the next bulk request
	sink.manualBulkRequestWithAllPendingRequests();

	try {
		testHarness.processElement(new StreamRecord<>("next msg"));
	} catch (Exception e) {
		// the invoke should have failed with the bulk request failure
		Assert.assertTrue(e.getCause().getMessage().contains("artificial failure for bulk request"));

		// test succeeded
		return;
	}

	Assert.fail();
}
 
Example #23
Source File: ElasticsearchSinkBaseTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/** Tests that any item failure in the listener callbacks is rethrown on an immediately following checkpoint. */
@Test
public void testItemFailureRethrownOnCheckpoint() throws Throwable {
	final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>(
		new HashMap<String, String>(), new SimpleSinkFunction<String>(), new NoOpFailureHandler());

	final OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));

	testHarness.open();

	// setup the next bulk request, and its mock item failures
	sink.setMockItemFailuresListForNextBulkItemResponses(Collections.singletonList(new Exception("artificial failure for record")));
	testHarness.processElement(new StreamRecord<>("msg"));
	verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));

	// manually execute the next bulk request
	sink.manualBulkRequestWithAllPendingRequests();

	try {
		testHarness.snapshot(1L, 1000L);
	} catch (Exception e) {
		// the snapshot should have failed with the failure
		Assert.assertTrue(e.getCause().getCause().getMessage().contains("artificial failure for record"));

		// test succeeded
		return;
	}

	Assert.fail();
}
 
Example #24
Source File: ElasticsearchSinkBaseTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/** Tests that any item failure in the listener callbacks is rethrown on an immediately following invoke call. */
@Test
public void testItemFailureRethrownOnInvoke() throws Throwable {
	final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>(
		new HashMap<String, String>(), new SimpleSinkFunction<String>(), new NoOpFailureHandler());

	final OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));

	testHarness.open();

	// setup the next bulk request, and its mock item failures
	sink.setMockItemFailuresListForNextBulkItemResponses(Collections.singletonList(new Exception("artificial failure for record")));
	testHarness.processElement(new StreamRecord<>("msg"));
	verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));

	// manually execute the next bulk request
	sink.manualBulkRequestWithAllPendingRequests();

	try {
		testHarness.processElement(new StreamRecord<>("next msg"));
	} catch (Exception e) {
		// the invoke should have failed with the failure
		Assert.assertTrue(e.getCause().getMessage().contains("artificial failure for record"));

		// test succeeded
		return;
	}

	Assert.fail();
}
 
Example #25
Source File: ElasticsearchSinkBaseTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/** Tests that any bulk failure in the listener callbacks is rethrown on an immediately following invoke call. */
@Test
public void testBulkFailureRethrownOnInvoke() throws Throwable {
	final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>(
		new HashMap<String, String>(), new SimpleSinkFunction<String>(), new NoOpFailureHandler());

	final OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));

	testHarness.open();

	// setup the next bulk request, and let the whole bulk request fail
	sink.setFailNextBulkRequestCompletely(new Exception("artificial failure for bulk request"));
	testHarness.processElement(new StreamRecord<>("msg"));
	verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));

	// manually execute the next bulk request
	sink.manualBulkRequestWithAllPendingRequests();

	try {
		testHarness.processElement(new StreamRecord<>("next msg"));
	} catch (Exception e) {
		// the invoke should have failed with the bulk request failure
		Assert.assertTrue(e.getCause().getMessage().contains("artificial failure for bulk request"));

		// test succeeded
		return;
	}

	Assert.fail();
}
 
Example #26
Source File: ElasticsearchTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public List<Descriptor> descriptors() {
	final Descriptor minimumDesc =
		new Elasticsearch()
			.version("6")
			.host("localhost", 1234, "http")
			.index("MyIndex")
			.documentType("MyType");

	final Descriptor maximumDesc =
		new Elasticsearch()
			.version("6")
			.host("host1", 1234, "https")
			.host("host2", 1234, "https")
			.index("MyIndex")
			.documentType("MyType")
			.keyDelimiter("#")
			.keyNullLiteral("")
			.bulkFlushBackoffExponential()
			.bulkFlushBackoffDelay(123L)
			.bulkFlushBackoffMaxRetries(3)
			.bulkFlushInterval(100L)
			.bulkFlushMaxActions(1000)
			.bulkFlushMaxSize("12 MB")
			.failureHandlerRetryRejected()
			.connectionMaxRetryTimeout(100)
			.connectionPathPrefix("/myapp");

	final Descriptor customDesc =
		new Elasticsearch()
			.version("6")
			.host("localhost", 1234, "http")
			.index("MyIndex")
			.documentType("MyType")
			.disableFlushOnCheckpoint()
			.failureHandlerCustom(NoOpFailureHandler.class);

	return Arrays.asList(minimumDesc, maximumDesc, customDesc);
}
 
Example #27
Source File: ElasticsearchSink.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a new {@code ElasticsearchSink} that connects to the cluster using a {@link TransportClient}.
 *
 * @param userConfig The map of user settings that are used when constructing the {@link TransportClient} and {@link BulkProcessor}
 * @param transportAddresses The addresses of Elasticsearch nodes to which to connect using a {@link TransportClient}
 * @param elasticsearchSinkFunction This is used to generate multiple {@link ActionRequest} from the incoming element
 */
public ElasticsearchSink(
	Map<String, String> userConfig,
	List<InetSocketAddress> transportAddresses,
	ElasticsearchSinkFunction<T> elasticsearchSinkFunction) {

	this(userConfig, transportAddresses, elasticsearchSinkFunction, new NoOpFailureHandler());
}
 
Example #28
Source File: ElasticsearchSink.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a new {@code ElasticsearchSink} that connects to the cluster using a {@link TransportClient}.
 *
 * @param userConfig The map of user settings that are passed when constructing the {@link TransportClient} and {@link BulkProcessor}
 * @param transportAddresses The addresses of Elasticsearch nodes to which to connect using a {@link TransportClient}
 * @param elasticsearchSinkFunction This is used to generate multiple {@link ActionRequest} from the incoming element
 */
public ElasticsearchSink(
	Map<String, String> userConfig,
	List<InetSocketAddress> transportAddresses,
	org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction<T> elasticsearchSinkFunction) {

	this(userConfig, transportAddresses, elasticsearchSinkFunction, new NoOpFailureHandler());
}
 
Example #29
Source File: ElasticsearchSinkBaseTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/** Tests that any bulk failure in the listener callbacks is rethrown on an immediately following checkpoint. */
@Test
public void testBulkFailureRethrownOnCheckpoint() throws Throwable {
	final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>(
		new HashMap<String, String>(), new SimpleSinkFunction<String>(), new NoOpFailureHandler());

	final OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));

	testHarness.open();

	// setup the next bulk request, and let the whole bulk request fail
	sink.setFailNextBulkRequestCompletely(new Exception("artificial failure for bulk request"));
	testHarness.processElement(new StreamRecord<>("msg"));
	verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));

	// manually execute the next bulk request
	sink.manualBulkRequestWithAllPendingRequests();

	try {
		testHarness.snapshot(1L, 1000L);
	} catch (Exception e) {
		// the snapshot should have failed with the bulk request failure
		Assert.assertTrue(e.getCause().getCause().getMessage().contains("artificial failure for bulk request"));

		// test succeeded
		return;
	}

	Assert.fail();
}
 
Example #30
Source File: ElasticsearchSinkBaseTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that any item failure in the listener callbacks due to flushing on an immediately following checkpoint
 * is rethrown; we set a timeout because the test will not finish if the logic is broken.
 */
@Test(timeout = 5000)
public void testItemFailureRethrownOnCheckpointAfterFlush() throws Throwable {
	final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>(
		new HashMap<String, String>(), new SimpleSinkFunction<String>(), new NoOpFailureHandler());

	final OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));

	testHarness.open();

	// setup the next bulk request, and its mock item failures

	List<Exception> mockResponsesList = new ArrayList<>(2);
	mockResponsesList.add(null); // the first request in a bulk will succeed
	mockResponsesList.add(new Exception("artificial failure for record")); // the second request in a bulk will fail
	sink.setMockItemFailuresListForNextBulkItemResponses(mockResponsesList);

	testHarness.processElement(new StreamRecord<>("msg-1"));
	verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));

	// manually execute the next bulk request (1 request only, thus should succeed)
	sink.manualBulkRequestWithAllPendingRequests();

	// setup the requests to be flushed in the snapshot
	testHarness.processElement(new StreamRecord<>("msg-2"));
	testHarness.processElement(new StreamRecord<>("msg-3"));
	verify(sink.getMockBulkProcessor(), times(3)).add(any(IndexRequest.class));

	CheckedThread snapshotThread = new CheckedThread() {
		@Override
		public void go() throws Exception {
			testHarness.snapshot(1L, 1000L);
		}
	};
	snapshotThread.start();

	// the snapshot should eventually be blocked before snapshot triggers flushing
	while (snapshotThread.getState() != Thread.State.WAITING) {
		Thread.sleep(10);
	}

	// let the snapshot-triggered flush continue (2 records in the bulk, so the 2nd one should fail)
	sink.continueFlush();

	try {
		snapshotThread.sync();
	} catch (Exception e) {
		// the snapshot should have failed with the failure from the 2nd request
		Assert.assertTrue(e.getCause().getCause().getMessage().contains("artificial failure for record"));

		// test succeeded
		return;
	}

	Assert.fail();
}