Java Code Examples for org.apache.flink.util.OperatingSystem

The following examples show how to use org.apache.flink.util.OperatingSystem. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: flink   Source File: PythonDependencyInfoTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testParsePythonArchives() {
	// Skip this test on Windows as we can not control the Window Driver letters.
	Assume.assumeFalse(OperatingSystem.isWindows());

	Configuration config = new Configuration();
	Map<String, String> pythonArchives = new HashMap<>();
	pythonArchives.put("python_archive_{SHA256_0}", "py27.zip");
	pythonArchives.put("python_archive_{SHA256_1}", "py37");
	config.set(PythonDependencyUtils.PYTHON_ARCHIVES, pythonArchives);
	PythonDependencyInfo dependencyInfo = PythonDependencyInfo.create(new PythonConfig(config), distributedCache);

	Map<String, String> expected = new HashMap<>();
	expected.put("/distributed_cache/file4", "py27.zip");
	expected.put("/distributed_cache/file5", "py37");
	assertEquals(expected, dependencyInfo.getArchives());
}
 
Example 2
Source Project: flink   Source File: EnvironmentInformation.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tries to retrieve the maximum number of open file handles. This method will only work on
 * UNIX-based operating systems with Sun/Oracle Java versions.
 * 
 * <p>If the number of max open file handles cannot be determined, this method returns {@code -1}.</p>
 * 
 * @return The limit of open file handles, or {@code -1}, if the limit could not be determined.
 */
public static long getOpenFileHandlesLimit() {
	if (OperatingSystem.isWindows()) { // getMaxFileDescriptorCount method is not available on Windows
		return -1L;
	}
	Class<?> sunBeanClass;
	try {
		sunBeanClass = Class.forName("com.sun.management.UnixOperatingSystemMXBean");
	}
	catch (ClassNotFoundException e) {
		return -1L;
	}
	
	try {
		Method fhLimitMethod = sunBeanClass.getMethod("getMaxFileDescriptorCount");
		Object result = fhLimitMethod.invoke(ManagementFactory.getOperatingSystemMXBean());
		return (Long) result;
	}
	catch (Throwable t) {
		LOG.warn("Unexpected error when accessing file handle limit", t);
		return -1L;
	}
}
 
Example 3
Source Project: flink   Source File: PythonEnvironmentManagerUtils.java    License: Apache License 2.0 6 votes vote down vote up
public static String getPythonUdfRunnerScript(
		String pythonExecutable,
		Map<String, String> environmentVariables) throws IOException {
	String runnerDir;
	if (environmentVariables.containsKey(PYFLINK_UDF_RUNNER_DIR)) {
		runnerDir = environmentVariables.get(PYFLINK_UDF_RUNNER_DIR);
	} else {
		String[] commands = new String[] { pythonExecutable, "-c", GET_RUNNER_DIR_SCRIPT};
		String out = execute(commands, environmentVariables, false);
		runnerDir = out.trim();
	}
	String runnerScriptPath;
	if (OperatingSystem.isWindows()) {
		runnerScriptPath = String.join(File.separator, runnerDir, PYFLINK_UDF_RUNNER_BAT);
	} else {
		runnerScriptPath = String.join(File.separator, runnerDir, PYFLINK_UDF_RUNNER_SH);
	}
	return runnerScriptPath;
}
 
Example 4
Source Project: Flink-CEPplus   Source File: EnvironmentInformation.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tries to retrieve the maximum number of open file handles. This method will only work on
 * UNIX-based operating systems with Sun/Oracle Java versions.
 * 
 * <p>If the number of max open file handles cannot be determined, this method returns {@code -1}.</p>
 * 
 * @return The limit of open file handles, or {@code -1}, if the limit could not be determined.
 */
public static long getOpenFileHandlesLimit() {
	if (OperatingSystem.isWindows()) { // getMaxFileDescriptorCount method is not available on Windows
		return -1L;
	}
	Class<?> sunBeanClass;
	try {
		sunBeanClass = Class.forName("com.sun.management.UnixOperatingSystemMXBean");
	}
	catch (ClassNotFoundException e) {
		return -1L;
	}
	
	try {
		Method fhLimitMethod = sunBeanClass.getMethod("getMaxFileDescriptorCount");
		Object result = fhLimitMethod.invoke(ManagementFactory.getOperatingSystemMXBean());
		return (Long) result;
	}
	catch (Throwable t) {
		LOG.warn("Unexpected error when accessing file handle limit", t);
		return -1L;
	}
}
 
Example 5
Source Project: flink   Source File: JvmExitOnFatalErrorTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
	public void testExitJvmOnOutOfMemory() throws Exception {
		// this test works only on linux
		assumeTrue(OperatingSystem.isLinux());

		// to check what went wrong (when the test hangs) uncomment this line
//		ProcessEntryPoint.main(new String[0]);

		final KillOnFatalErrorProcess testProcess = new KillOnFatalErrorProcess();

		try {
			testProcess.startProcess();
			testProcess.waitFor();
		}
		finally {
			testProcess.destroy();
		}
	}
 
Example 6
Source Project: flink   Source File: BucketingSinkTest.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void createHDFS() throws IOException {
	Assume.assumeTrue("HDFS cluster cannot be started on Windows without extensions.", !OperatingSystem.isWindows());

	Configuration conf = new Configuration();

	File dataDir = tempFolder.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	hdfsURI = "hdfs://"
		+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
		+ "/";
}
 
Example 7
Source Project: flink   Source File: ContinuousFileProcessingTest.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void createHDFS() {
	Assume.assumeTrue("HDFS cluster cannot be start on Windows without extensions.", !OperatingSystem.isWindows());

	try {
		File hdfsDir = tempFolder.newFolder();

		org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration();
		hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsDir.getAbsolutePath());
		hdConf.set("dfs.block.size", String.valueOf(1048576)); // this is the minimum we can set.

		MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
		hdfsCluster = builder.build();

		hdfsURI = "hdfs://" + hdfsCluster.getURI().getHost() + ":" + hdfsCluster.getNameNodePort() + "/";
		hdfs = new org.apache.hadoop.fs.Path(hdfsURI).getFileSystem(hdConf);

	} catch (Throwable e) {
		e.printStackTrace();
		Assert.fail("Test failed " + e.getMessage());
	}
}
 
Example 8
Source Project: flink   Source File: ContinuousFileProcessingTest.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void createHDFS() {
	Assume.assumeTrue("HDFS cluster cannot be start on Windows without extensions.", !OperatingSystem.isWindows());

	try {
		File hdfsDir = tempFolder.newFolder();

		org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration();
		hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsDir.getAbsolutePath());
		hdConf.set("dfs.block.size", String.valueOf(1048576)); // this is the minimum we can set.

		MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
		hdfsCluster = builder.build();

		hdfsURI = "hdfs://" + hdfsCluster.getURI().getHost() + ":" + hdfsCluster.getNameNodePort() + "/";
		hdfs = new org.apache.hadoop.fs.Path(hdfsURI).getFileSystem(hdConf);

	} catch (Throwable e) {
		e.printStackTrace();
		Assert.fail("Test failed " + e.getMessage());
	}
}
 
Example 9
Source Project: flink   Source File: PythonDependencyInfoTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testParsePythonRequirements() throws IOException {
	// Skip this test on Windows as we can not control the Window Driver letters.
	Assume.assumeFalse(OperatingSystem.isWindows());

	Configuration config = new Configuration();
	config.set(PythonDependencyUtils.PYTHON_REQUIREMENTS_FILE, new HashMap<>());
	config.get(PythonDependencyUtils.PYTHON_REQUIREMENTS_FILE)
		.put(PythonDependencyUtils.FILE, "python_requirements_file_{SHA256}");
	PythonDependencyInfo dependencyInfo = PythonDependencyInfo.create(new PythonConfig(config), distributedCache);

	assertEquals("/distributed_cache/file2", dependencyInfo.getRequirementsFilePath().get());
	assertFalse(dependencyInfo.getRequirementsCacheDir().isPresent());

	config.get(PythonDependencyUtils.PYTHON_REQUIREMENTS_FILE)
		.put(PythonDependencyUtils.CACHE, "python_requirements_cache_{SHA256}");
	dependencyInfo = PythonDependencyInfo.create(new PythonConfig(config), distributedCache);

	assertEquals("/distributed_cache/file2", dependencyInfo.getRequirementsFilePath().get());
	assertEquals("/distributed_cache/file3", dependencyInfo.getRequirementsCacheDir().get());
}
 
Example 10
Source Project: flink   Source File: BucketingSinkTest.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void createHDFS() throws IOException {
	Assume.assumeTrue("HDFS cluster cannot be started on Windows without extensions.", !OperatingSystem.isWindows());

	Configuration conf = new Configuration();

	File dataDir = tempFolder.newFolder();

	conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
	hdfsCluster = builder.build();

	dfs = hdfsCluster.getFileSystem();

	hdfsURI = "hdfs://"
		+ NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
		+ "/";
}
 
Example 11
Source Project: flink   Source File: PythonEnvUtilsTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testSetPythonExecutable() throws IOException {
	Configuration config = new Configuration();

	PythonEnvUtils.PythonEnvironment env = preparePythonEnvironment(config, null, tmpDirPath);
	if (OperatingSystem.isWindows()) {
		Assert.assertEquals("python.exe", env.pythonExec);
	} else {
		Assert.assertEquals("python", env.pythonExec);
	}

	Map<String, String> systemEnv = new HashMap<>(System.getenv());
	systemEnv.put(PYFLINK_CLIENT_EXECUTABLE, "python3");
	CommonTestUtils.setEnv(systemEnv);
	try {
		env = preparePythonEnvironment(config, null, tmpDirPath);
		Assert.assertEquals("python3", env.pythonExec);
	} finally {
		systemEnv.remove(PYFLINK_CLIENT_EXECUTABLE);
		CommonTestUtils.setEnv(systemEnv);
	}

	config.set(PYTHON_CLIENT_EXECUTABLE, "/usr/bin/python");
	env = preparePythonEnvironment(config, null, tmpDirPath);
	Assert.assertEquals("/usr/bin/python", env.pythonExec);
}
 
Example 12
Source Project: flink   Source File: JvmExitOnFatalErrorTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
	public void testExitJvmOnOutOfMemory() throws Exception {
		// this test works only on linux
		assumeTrue(OperatingSystem.isLinux());

		// to check what went wrong (when the test hangs) uncomment this line
//		ProcessEntryPoint.main(new String[0]);

		final KillOnFatalErrorProcess testProcess = new KillOnFatalErrorProcess();

		try {
			testProcess.startProcess();
			testProcess.waitFor();
		}
		finally {
			testProcess.destroy();
		}
	}
 
Example 13
Source Project: flink   Source File: YARNHighAvailabilityITCase.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Tests that Yarn will restart a killed {@link YarnSessionClusterEntrypoint} which will then resume
 * a persisted {@link JobGraph}.
 */
@Test
public void testKillYarnSessionClusterEntrypoint() throws Exception {
	runTest(() -> {
		assumeTrue(
			"This test kills processes via the pkill command. Thus, it only runs on Linux, Mac OS, Free BSD and Solaris.",
			OperatingSystem.isLinux() || OperatingSystem.isMac() || OperatingSystem.isFreeBSD() || OperatingSystem.isSolaris());

		final YarnClusterDescriptor yarnClusterDescriptor = setupYarnClusterDescriptor();
		final RestClusterClient<ApplicationId> restClusterClient = deploySessionCluster(yarnClusterDescriptor);

		try {
			final JobID jobId = submitJob(restClusterClient);
			final ApplicationId id = restClusterClient.getClusterId();

			waitUntilJobIsRunning(restClusterClient, jobId);

			killApplicationMaster(yarnClusterDescriptor.getYarnSessionClusterEntrypoint());
			waitForApplicationAttempt(id, 2);

			waitForJobTermination(restClusterClient, jobId);

			killApplicationAndWait(id);
		} finally {
			restClusterClient.close();
		}
	});
}
 
Example 14
Source Project: flink   Source File: GlobFilePathFilterTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testExcludeFilenameWithStart() {
	Assume.assumeTrue("Windows does not allow asterisks in file names.", !OperatingSystem.isWindows());

	GlobFilePathFilter matcher = new GlobFilePathFilter(
		Collections.singletonList("**"),
		Collections.singletonList("\\*"));

	assertTrue(matcher.filterPath(new Path("*")));
	assertFalse(matcher.filterPath(new Path("**")));
	assertFalse(matcher.filterPath(new Path("other.txt")));
}
 
Example 15
Source Project: Flink-CEPplus   Source File: GlobFilePathFilterTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testExcludeFilenameWithStart() {
	Assume.assumeTrue("Windows does not allow asterisks in file names.", !OperatingSystem.isWindows());

	GlobFilePathFilter matcher = new GlobFilePathFilter(
		Collections.singletonList("**"),
		Collections.singletonList("\\*"));

	assertTrue(matcher.filterPath(new Path("*")));
	assertFalse(matcher.filterPath(new Path("**")));
	assertFalse(matcher.filterPath(new Path("other.txt")));
}
 
Example 16
@BeforeClass
public static void createHDFS() throws Exception {
	Assume.assumeTrue("HDFS cluster cannot be start on Windows without extensions.", !OperatingSystem.isWindows());

	final File tempDir = TEMP_DIR.newFolder();

	Configuration hdConf = new Configuration();
	hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tempDir.getAbsolutePath());

	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
	hdfsCluster = builder.build();

	hdfsRootPath = new Path("hdfs://" + hdfsCluster.getURI().getHost() + ":"
			+ hdfsCluster.getNameNodePort() + "/");
}
 
Example 17
Source Project: flink   Source File: BlobServerPutTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Uploads a byte array to a server which cannot create any files via the {@link BlobServer}.
 * File transfers should fail.
 *
 * @param jobId
 * 		job id
 * @param blobType
 * 		whether the BLOB should become permanent or transient
 */
private void testPutBufferFails(@Nullable final JobID jobId, BlobKey.BlobType blobType)
		throws IOException {
	assumeTrue(!OperatingSystem.isWindows()); //setWritable doesn't work on Windows.

	final Configuration config = new Configuration();
	config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath());

	File tempFileDir = null;
	try (BlobServer server = new BlobServer(config, new VoidBlobStore())) {

		server.start();

		// make sure the blob server cannot create any files in its storage dir
		tempFileDir = server.createTemporaryFilename().getParentFile().getParentFile();
		assertTrue(tempFileDir.setExecutable(true, false));
		assertTrue(tempFileDir.setReadable(true, false));
		assertTrue(tempFileDir.setWritable(false, false));

		byte[] data = new byte[2000000];
		rnd.nextBytes(data);

		// upload the file to the server directly
		exception.expect(AccessDeniedException.class);

		put(server, jobId, data, blobType);

	} finally {
		// set writable again to make sure we can remove the directory
		if (tempFileDir != null) {
			//noinspection ResultOfMethodCallIgnored
			tempFileDir.setWritable(true, false);
		}
	}
}
 
Example 18
Source Project: Flink-CEPplus   Source File: YarnFileStageTest.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void createHDFS() throws Exception {
	Assume.assumeTrue(!OperatingSystem.isWindows());

	final File tempDir = CLASS_TEMP_DIR.newFolder();

	org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration();
	hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tempDir.getAbsolutePath());

	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
	hdfsCluster = builder.build();
	hdfsRootPath = new Path(hdfsCluster.getURI());
}
 
Example 19
@BeforeClass
public static void createHDFS() throws Exception {
	Assume.assumeTrue(!OperatingSystem.isWindows());

	final File tempDir = TEMP_DIR.newFolder();

	org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration();
	hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tempDir.getAbsolutePath());

	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
	hdfsCluster = builder.build();
	hdfsRootPath = new Path(hdfsCluster.getURI());
}
 
Example 20
@BeforeClass
public static void createHDFS() throws Exception {
	Assume.assumeTrue(!OperatingSystem.isWindows());

	final File tempDir = TEMP_DIR.newFolder();

	org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration();
	hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tempDir.getAbsolutePath());

	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
	hdfsCluster = builder.build();
	hdfsRootPath = new Path(hdfsCluster.getURI());
}
 
Example 21
Source Project: Flink-CEPplus   Source File: SignalHandler.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Register some signal handlers.
 *
 * @param LOG The slf4j logger
 */
public static void register(final Logger LOG) {
	synchronized (SignalHandler.class) {
		if (registered) {
			return;
		}
		registered = true;

		final String[] SIGNALS = OperatingSystem.isWindows()
			? new String[]{ "TERM", "INT"}
			: new String[]{ "TERM", "HUP", "INT" };
		
		StringBuilder bld = new StringBuilder();
		bld.append("Registered UNIX signal handlers for [");
		
		String separator = "";
		for (String signalName : SIGNALS) {
			try {
				new Handler(signalName, LOG);
				bld.append(separator);
				bld.append(signalName);
				separator = ", ";
			} catch (Exception e) {
				LOG.info("Error while registering signal handler", e);
			}
		}
		bld.append("]");
		LOG.info(bld.toString());
	}
}
 
Example 22
Source Project: Flink-CEPplus   Source File: BlobCachePutTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Uploads a byte array to a server which cannot create any files via the {@link
 * BlobCacheService}. File transfers should fail.
 *
 * @param jobId
 * 		job id
 * @param blobType
 * 		whether the BLOB should become permanent or transient
 */
private void testPutBufferFails(@Nullable final JobID jobId, BlobKey.BlobType blobType)
		throws IOException {
	assumeTrue(!OperatingSystem.isWindows()); //setWritable doesn't work on Windows.

	final Configuration config = new Configuration();
	config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath());

	File tempFileDir = null;
	try (
		BlobServer server = new BlobServer(config, new VoidBlobStore());
		BlobCacheService cache = new BlobCacheService(config, new VoidBlobStore(), new InetSocketAddress("localhost", server.getPort())
		)) {

		server.start();

		// make sure the blob server cannot create any files in its storage dir
		tempFileDir = server.createTemporaryFilename().getParentFile().getParentFile();
		assertTrue(tempFileDir.setExecutable(true, false));
		assertTrue(tempFileDir.setReadable(true, false));
		assertTrue(tempFileDir.setWritable(false, false));

		byte[] data = new byte[2000000];
		rnd.nextBytes(data);

		// upload the file to the server via the cache
		exception.expect(IOException.class);
		exception.expectMessage("PUT operation failed: ");

		put(cache, jobId, data, blobType);

	} finally {
		// set writable again to make sure we can remove the directory
		if (tempFileDir != null) {
			//noinspection ResultOfMethodCallIgnored
			tempFileDir.setWritable(true, false);
		}
	}
}
 
Example 23
Source Project: Flink-CEPplus   Source File: BlobUtilsNonWritableTest.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void before() throws IOException {
	assumeTrue(!OperatingSystem.isWindows()); //setWritable doesn't work on Windows.

	// Prepare test directory
	blobUtilsTestDirectory = temporaryFolder.newFolder();
	assertTrue(blobUtilsTestDirectory.setExecutable(true, false));
	assertTrue(blobUtilsTestDirectory.setReadable(true, false));
	assertTrue(blobUtilsTestDirectory.setWritable(false, false));
}
 
Example 24
Source Project: Flink-CEPplus   Source File: BlobServerTest.java    License: Apache License 2.0 5 votes vote down vote up
@Nonnull
private File createNonWritableDirectory() throws IOException {
	assumeFalse(OperatingSystem.isWindows()); //setWritable doesn't work on Windows.
	final File blobStorageDirectory = temporaryFolder.newFolder();
	assertTrue(blobStorageDirectory.setExecutable(true, false));
	assertTrue(blobStorageDirectory.setReadable(true, false));
	assertTrue(blobStorageDirectory.setWritable(false, false));
	return blobStorageDirectory;
}
 
Example 25
Source Project: Flink-CEPplus   Source File: BlobServerPutTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Uploads a byte array to a server which cannot create any files via the {@link BlobServer}.
 * File transfers should fail.
 *
 * @param jobId
 * 		job id
 * @param blobType
 * 		whether the BLOB should become permanent or transient
 */
private void testPutBufferFails(@Nullable final JobID jobId, BlobKey.BlobType blobType)
		throws IOException {
	assumeTrue(!OperatingSystem.isWindows()); //setWritable doesn't work on Windows.

	final Configuration config = new Configuration();
	config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath());

	File tempFileDir = null;
	try (BlobServer server = new BlobServer(config, new VoidBlobStore())) {

		server.start();

		// make sure the blob server cannot create any files in its storage dir
		tempFileDir = server.createTemporaryFilename().getParentFile().getParentFile();
		assertTrue(tempFileDir.setExecutable(true, false));
		assertTrue(tempFileDir.setReadable(true, false));
		assertTrue(tempFileDir.setWritable(false, false));

		byte[] data = new byte[2000000];
		rnd.nextBytes(data);

		// upload the file to the server directly
		exception.expect(AccessDeniedException.class);

		put(server, jobId, data, blobType);

	} finally {
		// set writable again to make sure we can remove the directory
		if (tempFileDir != null) {
			//noinspection ResultOfMethodCallIgnored
			tempFileDir.setWritable(true, false);
		}
	}
}
 
Example 26
Source Project: Flink-CEPplus   Source File: BlockingShutdownTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testProcessShutdownBlocking() throws Exception {
	// this test works only on linux
	assumeTrue(OperatingSystem.isLinux());

	final File markerFile = new File(
			EnvironmentInformation.getTemporaryFileDirectory(), UUID.randomUUID() + ".marker");

	final BlockingShutdownProcess blockingProcess = 
			new BlockingShutdownProcess(markerFile.getAbsolutePath(), 0, false);

	try {
		blockingProcess.startProcess();
		long pid = blockingProcess.getProcessId();
		assertTrue("Cannot determine process ID", pid != -1);

		// wait for the marker file to appear, which means the process is up properly
		TestJvmProcess.waitForMarkerFile(markerFile, 30000);

		// send it a regular kill command (SIG_TERM)
		Process kill = Runtime.getRuntime().exec("kill " + pid);
		kill.waitFor();
		assertEquals("failed to send SIG_TERM to process", 0, kill.exitValue());

		// minimal delay until the Java process object notices that the process is gone
		// this will not let the test fail predictably if the process is actually in fact going away,
		// but it would create frequent failures. Not ideal, but the best we can do without
		// severely prolonging the test
		Thread.sleep(50);

		// the process should not go away by itself
		assertTrue("Test broken, process shutdown blocking does not work", blockingProcess.isAlive());
	}
	finally {
		blockingProcess.destroy();

		//noinspection ResultOfMethodCallIgnored
		markerFile.delete();
	}
}
 
Example 27
Source Project: Flink-CEPplus   Source File: YARNHighAvailabilityITCase.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Tests that Yarn will restart a killed {@link YarnSessionClusterEntrypoint} which will then resume
 * a persisted {@link JobGraph}.
 */
@Test
public void testKillYarnSessionClusterEntrypoint() throws Exception {
	assumeTrue(
		"This test kills processes via the pkill command. Thus, it only runs on Linux, Mac OS, Free BSD and Solaris.",
		OperatingSystem.isLinux() || OperatingSystem.isMac() || OperatingSystem.isFreeBSD() || OperatingSystem.isSolaris());

	final YarnClusterDescriptor yarnClusterDescriptor = setupYarnClusterDescriptor();
	yarnClusterDescriptor.addShipFiles(Arrays.asList(flinkShadedHadoopDir.listFiles()));

	final RestClusterClient<ApplicationId> restClusterClient = deploySessionCluster(yarnClusterDescriptor);

	try {
		final JobID jobId = submitJob(restClusterClient);
		final ApplicationId id = restClusterClient.getClusterId();

		waitUntilJobIsRunning(restClusterClient, jobId);

		killApplicationMaster(yarnClusterDescriptor.getYarnSessionClusterEntrypoint());
		waitForApplicationAttempt(id, 2);

		waitForJobTermination(restClusterClient, jobId);

		killApplicationAndWait(id);
	} finally {
		restClusterClient.shutdown();
	}
}
 
Example 28
Source Project: flink   Source File: LocalStandaloneKafkaResource.java    License: Apache License 2.0 5 votes vote down vote up
LocalStandaloneKafkaResource(final String kafkaVersion, @Nullable Path logBackupDirectory) {
	OperatingSystemRestriction.forbid(
		String.format("The %s relies on UNIX utils and shell scripts.", getClass().getSimpleName()),
		OperatingSystem.WINDOWS);
	this.kafkaVersion = kafkaVersion;
	this.logBackupDirectory = logBackupDirectory;
}
 
Example 29
Source Project: flink   Source File: FsNegativeRunningJobsRegistryTest.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void createHDFS() throws Exception {
	Assume.assumeTrue("HDFS cluster cannot be start on Windows without extensions.", !OperatingSystem.isWindows());

	final File tempDir = TEMP_DIR.newFolder();

	Configuration hdConf = new Configuration();
	hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tempDir.getAbsolutePath());

	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
	hdfsCluster = builder.build();

	hdfsRootPath = new Path("hdfs://" + hdfsCluster.getURI().getHost() + ":"
			+ hdfsCluster.getNameNodePort() + "/");
}
 
Example 30
Source Project: flink   Source File: YarnFileStageTest.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void createHDFS() throws Exception {
	Assume.assumeTrue(!OperatingSystem.isWindows());

	final File tempDir = CLASS_TEMP_DIR.newFolder();

	org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration();
	hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tempDir.getAbsolutePath());

	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
	hdfsCluster = builder.build();
	hdfsRootPath = new Path(hdfsCluster.getURI());
}