Java Code Examples for org.apache.flink.runtime.testutils.MiniClusterResource#after()

The following examples show how to use org.apache.flink.runtime.testutils.MiniClusterResource#after() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ManualExactlyOnceTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
	final ParameterTool pt = ParameterTool.fromArgs(args);
	LOG.info("Starting exactly once test");

	final String streamName = "flink-test-" + UUID.randomUUID().toString();
	final String accessKey = pt.getRequired("accessKey");
	final String secretKey = pt.getRequired("secretKey");
	final String region = pt.getRequired("region");

	Properties configProps = new Properties();
	configProps.setProperty(AWSConfigConstants.AWS_ACCESS_KEY_ID, accessKey);
	configProps.setProperty(AWSConfigConstants.AWS_SECRET_ACCESS_KEY, secretKey);
	configProps.setProperty(AWSConfigConstants.AWS_REGION, region);
	AmazonKinesis client = AWSUtil.createKinesisClient(configProps);

	// create a stream for the test:
	client.createStream(streamName, 1);

	// wait until stream has been created
	DescribeStreamResult status = client.describeStream(streamName);
	LOG.info("status {}" , status);
	while (!status.getStreamDescription().getStreamStatus().equals("ACTIVE")) {
		status = client.describeStream(streamName);
		LOG.info("Status of stream {}", status);
		Thread.sleep(1000);
	}

	final Configuration flinkConfig = new Configuration();
	flinkConfig.setString(TaskManagerOptions.MANAGED_MEMORY_SIZE, "16m");
	flinkConfig.setString(ConfigConstants.RESTART_STRATEGY_FIXED_DELAY_DELAY, "0 s");

	MiniClusterResource flink = new MiniClusterResource(new MiniClusterResourceConfiguration.Builder()
		.setNumberTaskManagers(1)
		.setNumberSlotsPerTaskManager(8)
		.setConfiguration(flinkConfig)
		.build());
	flink.before();

	final int flinkPort = flink.getRestAddres().getPort();

	try {
		final AtomicReference<Throwable> producerError = new AtomicReference<>();
		Thread producerThread = KinesisEventsGeneratorProducerThread.create(
			TOTAL_EVENT_COUNT, 2,
			accessKey, secretKey, region, streamName,
			producerError, flinkPort, flinkConfig);
		producerThread.start();

		final AtomicReference<Throwable> consumerError = new AtomicReference<>();
		Thread consumerThread = ExactlyOnceValidatingConsumerThread.create(
			TOTAL_EVENT_COUNT, 200, 2, 500, 500,
			accessKey, secretKey, region, streamName,
			consumerError, flinkPort, flinkConfig);
		consumerThread.start();

		boolean deadlinePassed = false;
		long deadline = System.currentTimeMillis() + (1000 * 2 * 60); // wait at most for two minutes
		// wait until both producer and consumer finishes, or an unexpected error is thrown
		while ((consumerThread.isAlive() || producerThread.isAlive()) &&
			(producerError.get() == null && consumerError.get() == null)) {
			Thread.sleep(1000);
			if (System.currentTimeMillis() >= deadline) {
				LOG.warn("Deadline passed");
				deadlinePassed = true;
				break; // enough waiting
			}
		}

		if (producerThread.isAlive()) {
			producerThread.interrupt();
		}

		if (consumerThread.isAlive()) {
			consumerThread.interrupt();
		}

		if (producerError.get() != null) {
			LOG.info("+++ TEST failed! +++");
			throw new RuntimeException("Producer failed", producerError.get());
		}
		if (consumerError.get() != null) {
			LOG.info("+++ TEST failed! +++");
			throw new RuntimeException("Consumer failed", consumerError.get());
		}

		if (!deadlinePassed) {
			LOG.info("+++ TEST passed! +++");
		} else {
			LOG.info("+++ TEST failed! +++");
		}

	} finally {
		client.deleteStream(streamName);
		client.shutdown();

		// stopping flink
		flink.after();
	}
}
 
Example 2
Source File: JarRunHandlerTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testRunJar() throws Exception {
	Path uploadDir = TMP.newFolder().toPath();

	Path actualUploadDir = uploadDir.resolve("flink-web-upload");
	Files.createDirectory(actualUploadDir);

	Path emptyJar = actualUploadDir.resolve("empty.jar");
	Files.createFile(emptyJar);

	Configuration config = new Configuration();
	config.setString(WebOptions.UPLOAD_DIR, uploadDir.toString());

	MiniClusterResource clusterResource = new MiniClusterResource(
		new MiniClusterResourceConfiguration.Builder()
			.setConfiguration(config)
			.setNumberTaskManagers(1)
			.setNumberSlotsPerTaskManager(1)
			.build());
	clusterResource.before();

	try {
		Configuration clientConfig = clusterResource.getClientConfiguration();
		RestClient client = new RestClient(RestClientConfiguration.fromConfiguration(clientConfig), TestingUtils.defaultExecutor());

		try {
			JarRunHeaders headers = JarRunHeaders.getInstance();
			JarRunMessageParameters parameters = headers.getUnresolvedMessageParameters();
			parameters.jarIdPathParameter.resolve(emptyJar.getFileName().toString());

			String host = clientConfig.getString(RestOptions.ADDRESS);
			int port = clientConfig.getInteger(RestOptions.PORT);

			try {
				client.sendRequest(host, port, headers, parameters, new JarRunRequestBody())
					.get();
			} catch (Exception e) {
				Optional<RestClientException> expected = ExceptionUtils.findThrowable(e, RestClientException.class);
				if (expected.isPresent()) {
					// implies the job was actually submitted
					assertTrue(expected.get().getMessage().contains("ProgramInvocationException"));
					// original cause is preserved in stack trace
					assertThat(expected.get().getMessage(), containsString("ZipException"));
					// implies the jar was registered for the job graph (otherwise the jar name would not occur in the exception)
					// implies the jar was uploaded (otherwise the file would not be found at all)
					assertTrue(expected.get().getMessage().contains("empty.jar'. zip file is empty"));
				} else {
					throw e;
				}
			}
		} finally {
			client.shutdown(Time.milliseconds(10));
		}
	} finally {
		clusterResource.after();
	}
}
 
Example 3
Source File: ManualExactlyOnceTest.java    From flink with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
	final ParameterTool pt = ParameterTool.fromArgs(args);
	LOG.info("Starting exactly once test");

	final String streamName = "flink-test-" + UUID.randomUUID().toString();
	final String accessKey = pt.getRequired("accessKey");
	final String secretKey = pt.getRequired("secretKey");
	final String region = pt.getRequired("region");

	Properties configProps = new Properties();
	configProps.setProperty(AWSConfigConstants.AWS_ACCESS_KEY_ID, accessKey);
	configProps.setProperty(AWSConfigConstants.AWS_SECRET_ACCESS_KEY, secretKey);
	configProps.setProperty(AWSConfigConstants.AWS_REGION, region);
	AmazonKinesis client = AWSUtil.createKinesisClient(configProps);

	// create a stream for the test:
	client.createStream(streamName, 1);

	// wait until stream has been created
	DescribeStreamResult status = client.describeStream(streamName);
	LOG.info("status {}" , status);
	while (!status.getStreamDescription().getStreamStatus().equals("ACTIVE")) {
		status = client.describeStream(streamName);
		LOG.info("Status of stream {}", status);
		Thread.sleep(1000);
	}

	final Configuration flinkConfig = new Configuration();
	flinkConfig.setString(TaskManagerOptions.MANAGED_MEMORY_SIZE, "16m");
	flinkConfig.setString(ConfigConstants.RESTART_STRATEGY_FIXED_DELAY_DELAY, "0 s");

	MiniClusterResource flink = new MiniClusterResource(new MiniClusterResourceConfiguration.Builder()
		.setNumberTaskManagers(1)
		.setNumberSlotsPerTaskManager(8)
		.setConfiguration(flinkConfig)
		.build());
	flink.before();

	final int flinkPort = flink.getRestAddres().getPort();

	try {
		final AtomicReference<Throwable> producerError = new AtomicReference<>();
		Thread producerThread = KinesisEventsGeneratorProducerThread.create(
			TOTAL_EVENT_COUNT, 2,
			accessKey, secretKey, region, streamName,
			producerError, flinkPort, flinkConfig);
		producerThread.start();

		final AtomicReference<Throwable> consumerError = new AtomicReference<>();
		Thread consumerThread = ExactlyOnceValidatingConsumerThread.create(
			TOTAL_EVENT_COUNT, 200, 2, 500, 500,
			accessKey, secretKey, region, streamName,
			consumerError, flinkPort, flinkConfig);
		consumerThread.start();

		boolean deadlinePassed = false;
		long deadline = System.currentTimeMillis() + (1000 * 2 * 60); // wait at most for two minutes
		// wait until both producer and consumer finishes, or an unexpected error is thrown
		while ((consumerThread.isAlive() || producerThread.isAlive()) &&
			(producerError.get() == null && consumerError.get() == null)) {
			Thread.sleep(1000);
			if (System.currentTimeMillis() >= deadline) {
				LOG.warn("Deadline passed");
				deadlinePassed = true;
				break; // enough waiting
			}
		}

		if (producerThread.isAlive()) {
			producerThread.interrupt();
		}

		if (consumerThread.isAlive()) {
			consumerThread.interrupt();
		}

		if (producerError.get() != null) {
			LOG.info("+++ TEST failed! +++");
			throw new RuntimeException("Producer failed", producerError.get());
		}
		if (consumerError.get() != null) {
			LOG.info("+++ TEST failed! +++");
			throw new RuntimeException("Consumer failed", consumerError.get());
		}

		if (!deadlinePassed) {
			LOG.info("+++ TEST passed! +++");
		} else {
			LOG.info("+++ TEST failed! +++");
		}

	} finally {
		client.deleteStream(streamName);
		client.shutdown();

		// stopping flink
		flink.after();
	}
}
 
Example 4
Source File: JarRunHandlerTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testRunJar() throws Exception {
	Path uploadDir = TMP.newFolder().toPath();

	Path actualUploadDir = uploadDir.resolve("flink-web-upload");
	Files.createDirectory(actualUploadDir);

	Path emptyJar = actualUploadDir.resolve("empty.jar");
	Files.createFile(emptyJar);

	Configuration config = new Configuration();
	config.setString(WebOptions.UPLOAD_DIR, uploadDir.toString());

	MiniClusterResource clusterResource = new MiniClusterResource(
		new MiniClusterResourceConfiguration.Builder()
			.setConfiguration(config)
			.setNumberTaskManagers(1)
			.setNumberSlotsPerTaskManager(1)
			.build());
	clusterResource.before();

	try {
		Configuration clientConfig = clusterResource.getClientConfiguration();
		RestClient client = new RestClient(RestClientConfiguration.fromConfiguration(clientConfig), TestingUtils.defaultExecutor());

		try {
			JarRunHeaders headers = JarRunHeaders.getInstance();
			JarRunMessageParameters parameters = headers.getUnresolvedMessageParameters();
			parameters.jarIdPathParameter.resolve(emptyJar.getFileName().toString());

			String host = clientConfig.getString(RestOptions.ADDRESS);
			int port = clientConfig.getInteger(RestOptions.PORT);

			try {
				client.sendRequest(host, port, headers, parameters, new JarRunRequestBody())
					.get();
			} catch (Exception e) {
				Optional<RestClientException> expected = ExceptionUtils.findThrowable(e, RestClientException.class);
				if (expected.isPresent()) {
					// implies the job was actually submitted
					assertTrue(expected.get().getMessage().contains("ProgramInvocationException"));
					// original cause is preserved in stack trace
					assertThat(expected.get().getMessage(), containsString("ZipException: zip file is empty"));
					// implies the jar was registered for the job graph (otherwise the jar name would not occur in the exception)
					// implies the jar was uploaded (otherwise the file would not be found at all)
					assertTrue(expected.get().getMessage().contains("empty.jar"));
				} else {
					throw e;
				}
			}
		} finally {
			client.shutdown(Time.milliseconds(10));
		}
	} finally {
		clusterResource.after();
	}
}
 
Example 5
Source File: ManualExactlyOnceTest.java    From flink with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
	final ParameterTool pt = ParameterTool.fromArgs(args);
	LOG.info("Starting exactly once test");

	final String streamName = "flink-test-" + UUID.randomUUID().toString();
	final String accessKey = pt.getRequired("accessKey");
	final String secretKey = pt.getRequired("secretKey");
	final String region = pt.getRequired("region");

	Properties configProps = new Properties();
	configProps.setProperty(AWSConfigConstants.AWS_ACCESS_KEY_ID, accessKey);
	configProps.setProperty(AWSConfigConstants.AWS_SECRET_ACCESS_KEY, secretKey);
	configProps.setProperty(AWSConfigConstants.AWS_REGION, region);
	AmazonKinesis client = AWSUtil.createKinesisClient(configProps);

	// create a stream for the test:
	client.createStream(streamName, 1);

	// wait until stream has been created
	DescribeStreamResult status = client.describeStream(streamName);
	LOG.info("status {}" , status);
	while (!status.getStreamDescription().getStreamStatus().equals("ACTIVE")) {
		status = client.describeStream(streamName);
		LOG.info("Status of stream {}", status);
		Thread.sleep(1000);
	}

	final Configuration flinkConfig = new Configuration();
	flinkConfig.set(TaskManagerOptions.MANAGED_MEMORY_SIZE, MemorySize.parse("16m"));
	flinkConfig.setString(ConfigConstants.RESTART_STRATEGY_FIXED_DELAY_DELAY, "0 s");

	MiniClusterResource flink = new MiniClusterResource(new MiniClusterResourceConfiguration.Builder()
		.setNumberTaskManagers(1)
		.setNumberSlotsPerTaskManager(8)
		.setConfiguration(flinkConfig)
		.build());
	flink.before();

	final int flinkPort = flink.getRestAddres().getPort();

	try {
		final AtomicReference<Throwable> producerError = new AtomicReference<>();
		Thread producerThread = KinesisEventsGeneratorProducerThread.create(
			TOTAL_EVENT_COUNT, 2,
			accessKey, secretKey, region, streamName,
			producerError, flinkPort, flinkConfig);
		producerThread.start();

		final AtomicReference<Throwable> consumerError = new AtomicReference<>();
		Thread consumerThread = ExactlyOnceValidatingConsumerThread.create(
			TOTAL_EVENT_COUNT, 200, 2, 500, 500,
			accessKey, secretKey, region, streamName,
			consumerError, flinkPort, flinkConfig);
		consumerThread.start();

		boolean deadlinePassed = false;
		long deadline = System.currentTimeMillis() + (1000 * 2 * 60); // wait at most for two minutes
		// wait until both producer and consumer finishes, or an unexpected error is thrown
		while ((consumerThread.isAlive() || producerThread.isAlive()) &&
			(producerError.get() == null && consumerError.get() == null)) {
			Thread.sleep(1000);
			if (System.currentTimeMillis() >= deadline) {
				LOG.warn("Deadline passed");
				deadlinePassed = true;
				break; // enough waiting
			}
		}

		if (producerThread.isAlive()) {
			producerThread.interrupt();
		}

		if (consumerThread.isAlive()) {
			consumerThread.interrupt();
		}

		if (producerError.get() != null) {
			LOG.info("+++ TEST failed! +++");
			throw new RuntimeException("Producer failed", producerError.get());
		}
		if (consumerError.get() != null) {
			LOG.info("+++ TEST failed! +++");
			throw new RuntimeException("Consumer failed", consumerError.get());
		}

		if (!deadlinePassed) {
			LOG.info("+++ TEST passed! +++");
		} else {
			LOG.info("+++ TEST failed! +++");
		}

	} finally {
		client.deleteStream(streamName);
		client.shutdown();

		// stopping flink
		flink.after();
	}
}