org.apache.flink.runtime.util.HadoopUtils Java Examples

The following examples show how to use org.apache.flink.runtime.util.HadoopUtils. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HadoopRecoverableFsDataOutputStream.java    From flink with Apache License 2.0 6 votes vote down vote up
private static boolean truncate(final FileSystem hadoopFs, final Path file, final long length) throws IOException {
	if (!HadoopUtils.isMinHadoopVersion(2, 7)) {
		throw new IllegalStateException("Truncation is not available in hadoop version < 2.7 , You are on Hadoop " + VersionInfo.getVersion());
	}

	if (truncateHandle != null) {
		try {
			return (Boolean) truncateHandle.invoke(hadoopFs, file, length);
		}
		catch (InvocationTargetException e) {
			ExceptionUtils.rethrowIOException(e.getTargetException());
		}
		catch (Throwable t) {
			throw new IOException(
					"Truncation of file failed because of access/linking problems with Hadoop's truncate call. " +
							"This is most likely a dependency conflict or class loading problem.");
		}
	}
	else {
		throw new IllegalStateException("Truncation handle has not been initialized");
	}
	return false;
}
 
Example #2
Source File: HadoopRecoverableFsDataOutputStream.java    From flink with Apache License 2.0 6 votes vote down vote up
private static void ensureTruncateInitialized() throws FlinkRuntimeException {
	if (HadoopUtils.isMinHadoopVersion(2, 7) && truncateHandle == null) {
		Method truncateMethod;
		try {
			truncateMethod = FileSystem.class.getMethod("truncate", Path.class, long.class);
		}
		catch (NoSuchMethodException e) {
			throw new FlinkRuntimeException("Could not find a public truncate method on the Hadoop File System.");
		}

		if (!Modifier.isPublic(truncateMethod.getModifiers())) {
			throw new FlinkRuntimeException("Could not find a public truncate method on the Hadoop File System.");
		}

		truncateHandle = truncateMethod;
	}
}
 
Example #3
Source File: HadoopRecoverableWriter.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Creates a new Recoverable writer.
 * @param fs The Hadoop file system on which the writer operates.
 */
public HadoopRecoverableWriter(org.apache.hadoop.fs.FileSystem fs) {
	this.fs = checkNotNull(fs);

	// This writer is only supported on a subset of file systems
	if (!"hdfs".equalsIgnoreCase(fs.getScheme())) {
		throw new UnsupportedOperationException(
				"Recoverable writers on Hadoop are only supported for HDFS");
	}

	// Part of functionality depends on specific versions. We check these schemes and versions eagerly for
	// better error messages.
	if (!HadoopUtils.isMinHadoopVersion(2, 7)) {
		LOG.warn("WARNING: You are running on hadoop version " + VersionInfo.getVersion() + "." +
				" If your RollingPolicy does not roll on every checkpoint/savepoint, the StreamingFileSink will throw an exception upon recovery.");
	}
}
 
Example #4
Source File: HadoopConfigLoadingTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void loadFromLegacyConfigEntries() throws Exception {
	final String k1 = "shipmate";
	final String v1 = "smooth sailing";

	final String k2 = "pirate";
	final String v2 = "Arrg, yer scurvy dog!";

	final File file1 = tempFolder.newFile("core-site.xml");
	final File file2 = tempFolder.newFile("hdfs-site.xml");

	printConfig(file1, k1, v1);
	printConfig(file2, k2, v2);

	final Configuration cfg = new Configuration();
	cfg.setString(ConfigConstants.HDFS_DEFAULT_CONFIG, file1.getAbsolutePath());
	cfg.setString(ConfigConstants.HDFS_SITE_CONFIG, file2.getAbsolutePath());

	org.apache.hadoop.conf.Configuration hadoopConf = HadoopUtils.getHadoopConfiguration(cfg);

	// contains extra entries
	assertEquals(v1, hadoopConf.get(k1, null));
	assertEquals(v2, hadoopConf.get(k2, null));

	// also contains classpath defaults
	assertEquals(IN_CP_CONFIG_VALUE, hadoopConf.get(IN_CP_CONFIG_KEY, null));
}
 
Example #5
Source File: ResourceInformationReflectorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testSetAndGetExtendedResourcesWithoutYarnSupport() {
	assumeTrue(HadoopUtils.isMaxHadoopVersion(2, 10));

	final Resource resource = Resource.newInstance(100, 1);

	// Should do nothing without leading to failure.
	ResourceInformationReflector.INSTANCE.setResourceInformation(resource, RESOURCE_NAME, RESOURCE_VALUE);

	final Map<String, Long> externalResourcesResult = ResourceInformationReflector.INSTANCE.getExternalResources(resource);
	assertTrue(externalResourcesResult.isEmpty());
}
 
Example #6
Source File: ResourceInformationReflectorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testDefaultTwoResourceTypeWithYarnSupport() {
	assumeTrue(HadoopUtils.isMinHadoopVersion(2, 10));

	final Resource resource = Resource.newInstance(100, 1);

	// make sure that Resource has at least two associated resources (cpu and memory)
	final Map<String, Long> resourcesResult = ResourceInformationReflector.INSTANCE.getAllResourceInfos(resource);
	assertThat(resourcesResult.size(), is(2));
}
 
Example #7
Source File: HadoopRecoverableWriter.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a new Recoverable writer.
 * @param fs The Hadoop file system on which the writer operates.
 */
public HadoopRecoverableWriter(org.apache.hadoop.fs.FileSystem fs) {
	this.fs = checkNotNull(fs);

	// This writer is only supported on a subset of file systems, and on
	// specific versions. We check these schemes and versions eagerly for
	// better error messages.
	if (!"hdfs".equalsIgnoreCase(fs.getScheme()) || !HadoopUtils.isMinHadoopVersion(2, 7)) {
		throw new UnsupportedOperationException(
				"Recoverable writers on Hadoop are only supported for HDFS and for Hadoop version 2.7 or newer");
	}
}
 
Example #8
Source File: HadoopConfigLoadingTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void loadFromClasspathByDefault() {
	org.apache.hadoop.conf.Configuration hadoopConf =
			HadoopUtils.getHadoopConfiguration(new Configuration());

	assertEquals(IN_CP_CONFIG_VALUE, hadoopConf.get(IN_CP_CONFIG_KEY, null));
}
 
Example #9
Source File: HadoopConfigLoadingTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void loadFromLegacyConfigEntries() throws Exception {
	final String k1 = "shipmate";
	final String v1 = "smooth sailing";

	final String k2 = "pirate";
	final String v2 = "Arrg, yer scurvy dog!";

	final File file1 = tempFolder.newFile("core-site.xml");
	final File file2 = tempFolder.newFile("hdfs-site.xml");

	printConfig(file1, k1, v1);
	printConfig(file2, k2, v2);

	final Configuration cfg = new Configuration();
	cfg.setString(ConfigConstants.HDFS_DEFAULT_CONFIG, file1.getAbsolutePath());
	cfg.setString(ConfigConstants.HDFS_SITE_CONFIG, file2.getAbsolutePath());

	org.apache.hadoop.conf.Configuration hadoopConf = HadoopUtils.getHadoopConfiguration(cfg);

	// contains extra entries
	assertEquals(v1, hadoopConf.get(k1, null));
	assertEquals(v2, hadoopConf.get(k2, null));

	// also contains classpath defaults
	assertEquals(IN_CP_CONFIG_VALUE, hadoopConf.get(IN_CP_CONFIG_KEY, null));
}
 
Example #10
Source File: HadoopConfigLoadingTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void loadFromHadoopConfEntry() throws Exception {
	final String k1 = "singing?";
	final String v1 = "rain!";

	final String k2 = "dancing?";
	final String v2 = "shower!";

	final File confDir = tempFolder.newFolder();

	final File file1 = new File(confDir, "core-site.xml");
	final File file2 = new File(confDir, "hdfs-site.xml");

	printConfig(file1, k1, v1);
	printConfig(file2, k2, v2);

	final Configuration cfg = new Configuration();
	cfg.setString(ConfigConstants.PATH_HADOOP_CONFIG, confDir.getAbsolutePath());

	org.apache.hadoop.conf.Configuration hadoopConf = HadoopUtils.getHadoopConfiguration(cfg);

	// contains extra entries
	assertEquals(v1, hadoopConf.get(k1, null));
	assertEquals(v2, hadoopConf.get(k2, null));

	// also contains classpath defaults
	assertEquals(IN_CP_CONFIG_VALUE, hadoopConf.get(IN_CP_CONFIG_KEY, null));
}
 
Example #11
Source File: HadoopConfigLoadingTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void loadFromClasspathByDefault() {
	org.apache.hadoop.conf.Configuration hadoopConf =
			HadoopUtils.getHadoopConfiguration(new Configuration());

	assertEquals(IN_CP_CONFIG_VALUE, hadoopConf.get(IN_CP_CONFIG_KEY, null));
}
 
Example #12
Source File: HadoopConfigLoadingTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void loadFromHadoopConfEntry() throws Exception {
	final String k1 = "singing?";
	final String v1 = "rain!";

	final String k2 = "dancing?";
	final String v2 = "shower!";

	final File confDir = tempFolder.newFolder();

	final File file1 = new File(confDir, "core-site.xml");
	final File file2 = new File(confDir, "hdfs-site.xml");

	printConfig(file1, k1, v1);
	printConfig(file2, k2, v2);

	final Configuration cfg = new Configuration();
	cfg.setString(ConfigConstants.PATH_HADOOP_CONFIG, confDir.getAbsolutePath());

	org.apache.hadoop.conf.Configuration hadoopConf = HadoopUtils.getHadoopConfiguration(cfg);

	// contains extra entries
	assertEquals(v1, hadoopConf.get(k1, null));
	assertEquals(v2, hadoopConf.get(k2, null));

	// also contains classpath defaults
	assertEquals(IN_CP_CONFIG_VALUE, hadoopConf.get(IN_CP_CONFIG_KEY, null));
}
 
Example #13
Source File: HadoopRecoverableWriter.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a new Recoverable writer.
 * @param fs The Hadoop file system on which the writer operates.
 */
public HadoopRecoverableWriter(org.apache.hadoop.fs.FileSystem fs) {
	this.fs = checkNotNull(fs);

	// This writer is only supported on a subset of file systems, and on
	// specific versions. We check these schemes and versions eagerly for
	// better error messages.
	if (!"hdfs".equalsIgnoreCase(fs.getScheme()) || !HadoopUtils.isMinHadoopVersion(2, 7)) {
		throw new UnsupportedOperationException(
				"Recoverable writers on Hadoop are only supported for HDFS and for Hadoop version 2.7 or newer");
	}
}
 
Example #14
Source File: HadoopConfigLoadingTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void loadFromHadoopConfEntry() throws Exception {
	final String k1 = "singing?";
	final String v1 = "rain!";

	final String k2 = "dancing?";
	final String v2 = "shower!";

	final File confDir = tempFolder.newFolder();

	final File file1 = new File(confDir, "core-site.xml");
	final File file2 = new File(confDir, "hdfs-site.xml");

	printConfig(file1, k1, v1);
	printConfig(file2, k2, v2);

	final Configuration cfg = new Configuration();
	cfg.setString(ConfigConstants.PATH_HADOOP_CONFIG, confDir.getAbsolutePath());

	org.apache.hadoop.conf.Configuration hadoopConf = HadoopUtils.getHadoopConfiguration(cfg);

	// contains extra entries
	assertEquals(v1, hadoopConf.get(k1, null));
	assertEquals(v2, hadoopConf.get(k2, null));

	// also contains classpath defaults
	assertEquals(IN_CP_CONFIG_VALUE, hadoopConf.get(IN_CP_CONFIG_KEY, null));
}
 
Example #15
Source File: HadoopConfigLoadingTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void loadFromLegacyConfigEntries() throws Exception {
	final String k1 = "shipmate";
	final String v1 = "smooth sailing";

	final String k2 = "pirate";
	final String v2 = "Arrg, yer scurvy dog!";

	final File file1 = tempFolder.newFile("core-site.xml");
	final File file2 = tempFolder.newFile("hdfs-site.xml");

	printConfig(file1, k1, v1);
	printConfig(file2, k2, v2);

	final Configuration cfg = new Configuration();
	cfg.setString(ConfigConstants.HDFS_DEFAULT_CONFIG, file1.getAbsolutePath());
	cfg.setString(ConfigConstants.HDFS_SITE_CONFIG, file2.getAbsolutePath());

	org.apache.hadoop.conf.Configuration hadoopConf = HadoopUtils.getHadoopConfiguration(cfg);

	// contains extra entries
	assertEquals(v1, hadoopConf.get(k1, null));
	assertEquals(v2, hadoopConf.get(k2, null));

	// also contains classpath defaults
	assertEquals(IN_CP_CONFIG_VALUE, hadoopConf.get(IN_CP_CONFIG_KEY, null));
}
 
Example #16
Source File: HadoopConfigLoadingTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void loadFromClasspathByDefault() {
	org.apache.hadoop.conf.Configuration hadoopConf =
			HadoopUtils.getHadoopConfiguration(new Configuration());

	assertEquals(IN_CP_CONFIG_VALUE, hadoopConf.get(IN_CP_CONFIG_KEY, null));
}
 
Example #17
Source File: HadoopRecoverableWriterOldHadoopWithNoTruncateSupportTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void testHadoopVersion() {
	Assume.assumeTrue(HadoopUtils.isMaxHadoopVersion(2, 7));
}
 
Example #18
Source File: TsFileOutputFormat.java    From incubator-iotdb with Apache License 2.0 4 votes vote down vote up
@Override
public void configure(org.apache.flink.configuration.Configuration flinkConfiguration) {
	super.configure(flinkConfiguration);
	hadoopConf = HadoopUtils.getHadoopConfiguration(flinkConfiguration);
}
 
Example #19
Source File: HadoopConfigLoadingTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void loadFromEnvVariables() throws Exception {
	final String k1 = "where?";
	final String v1 = "I'm on a boat";
	final String k2 = "when?";
	final String v2 = "midnight";
	final String k3 = "why?";
	final String v3 = "what do you think?";
	final String k4 = "which way?";
	final String v4 = "south, always south...";
	final String k5 = "how long?";
	final String v5 = "an eternity";
	final String k6 = "for real?";
	final String v6 = "quite so...";

	final File hadoopConfDir = tempFolder.newFolder();

	final File hadoopHome = tempFolder.newFolder();

	final File hadoopHomeConf = new File(hadoopHome, "conf");
	final File hadoopHomeEtc = new File(hadoopHome, "etc/hadoop");

	assertTrue(hadoopHomeConf.mkdirs());
	assertTrue(hadoopHomeEtc.mkdirs());

	final File file1 = new File(hadoopConfDir, "core-site.xml");
	final File file2 = new File(hadoopConfDir, "hdfs-site.xml");
	final File file3 = new File(hadoopHomeConf, "core-site.xml");
	final File file4 = new File(hadoopHomeConf, "hdfs-site.xml");
	final File file5 = new File(hadoopHomeEtc, "core-site.xml");
	final File file6 = new File(hadoopHomeEtc, "hdfs-site.xml");

	printConfig(file1, k1, v1);
	printConfig(file2, k2, v2);
	printConfig(file3, k3, v3);
	printConfig(file4, k4, v4);
	printConfig(file5, k5, v5);
	printConfig(file6, k6, v6);

	final org.apache.hadoop.conf.Configuration hadoopConf;

	final Map<String, String> originalEnv = System.getenv();
	final Map<String, String> newEnv = new HashMap<>(originalEnv);
	newEnv.put("HADOOP_CONF_DIR", hadoopConfDir.getAbsolutePath());
	newEnv.put("HADOOP_HOME", hadoopHome.getAbsolutePath());
	try {
		CommonTestUtils.setEnv(newEnv);
		hadoopConf = HadoopUtils.getHadoopConfiguration(new Configuration());
	}
	finally {
		CommonTestUtils.setEnv(originalEnv);
	}

	// contains extra entries
	assertEquals(v1, hadoopConf.get(k1, null));
	assertEquals(v2, hadoopConf.get(k2, null));
	assertEquals(v3, hadoopConf.get(k3, null));
	assertEquals(v4, hadoopConf.get(k4, null));
	assertEquals(v5, hadoopConf.get(k5, null));
	assertEquals(v6, hadoopConf.get(k6, null));

	// also contains classpath defaults
	assertEquals(IN_CP_CONFIG_VALUE, hadoopConf.get(IN_CP_CONFIG_KEY, null));
}
 
Example #20
Source File: HadoopConfigLoadingTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void loadOverlappingConfig() throws Exception {
	final String k1 = "key1";
	final String k2 = "key2";
	final String k3 = "key3";
	final String k4 = "key4";
	final String k5 = "key5";

	final String v1 = "from HADOOP_CONF_DIR";
	final String v2 = "from Flink config `fs.hdfs.hadoopconf`";
	final String v3 = "from Flink config `fs.hdfs.hdfsdefault`";
	final String v4 = "from HADOOP_HOME/etc/hadoop";
	final String v5 = "from HADOOP_HOME/conf";

	final File hadoopConfDir = tempFolder.newFolder("hadoopConfDir");
	final File hadoopConfEntryDir = tempFolder.newFolder("hadoopConfEntryDir");
	final File legacyConfDir = tempFolder.newFolder("legacyConfDir");
	final File hadoopHome = tempFolder.newFolder("hadoopHome");

	final File hadoopHomeConf = new File(hadoopHome, "conf");
	final File hadoopHomeEtc = new File(hadoopHome, "etc/hadoop");

	assertTrue(hadoopHomeConf.mkdirs());
	assertTrue(hadoopHomeEtc.mkdirs());

	final File file1 = new File(hadoopConfDir, "core-site.xml");
	final File file2 = new File(hadoopConfEntryDir, "core-site.xml");
	final File file3 = new File(legacyConfDir, "core-site.xml");
	final File file4 = new File(hadoopHomeEtc, "core-site.xml");
	final File file5 = new File(hadoopHomeConf, "core-site.xml");

	printConfig(file1, k1, v1);

	Map<String, String> properties2 = new HashMap<>();
	properties2.put(k1, v2);
	properties2.put(k2, v2);
	printConfigs(file2, properties2);

	Map<String, String> properties3 = new HashMap<>();
	properties3.put(k1, v3);
	properties3.put(k2, v3);
	properties3.put(k3, v3);
	printConfigs(file3, properties3);

	Map<String, String> properties4 = new HashMap<>();
	properties4.put(k1, v4);
	properties4.put(k2, v4);
	properties4.put(k3, v4);
	properties4.put(k4, v4);
	printConfigs(file4, properties4);

	Map<String, String> properties5 = new HashMap<>();
	properties5.put(k1, v5);
	properties5.put(k2, v5);
	properties5.put(k3, v5);
	properties5.put(k4, v5);
	properties5.put(k5, v5);
	printConfigs(file5, properties5);

	final Configuration cfg = new Configuration();
	cfg.setString(ConfigConstants.PATH_HADOOP_CONFIG, hadoopConfEntryDir.getAbsolutePath());
	cfg.setString(ConfigConstants.HDFS_DEFAULT_CONFIG, file3.getAbsolutePath());

	final org.apache.hadoop.conf.Configuration hadoopConf;

	final Map<String, String> originalEnv = System.getenv();
	final Map<String, String> newEnv = new HashMap<>(originalEnv);
	newEnv.put("HADOOP_CONF_DIR", hadoopConfDir.getAbsolutePath());
	newEnv.put("HADOOP_HOME", hadoopHome.getAbsolutePath());
	try {
		CommonTestUtils.setEnv(newEnv);
		hadoopConf = HadoopUtils.getHadoopConfiguration(cfg);
	}
	finally {
		CommonTestUtils.setEnv(originalEnv);
	}

	// contains extra entries
	assertEquals(v1, hadoopConf.get(k1, null));
	assertEquals(v2, hadoopConf.get(k2, null));
	assertEquals(v3, hadoopConf.get(k3, null));
	assertEquals(v4, hadoopConf.get(k4, null));
	assertEquals(v5, hadoopConf.get(k5, null));

	// also contains classpath defaults
	assertEquals(IN_CP_CONFIG_VALUE, hadoopConf.get(IN_CP_CONFIG_KEY, null));
}
 
Example #21
Source File: HadoopRecoverableWriterTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void testHadoopVersion() {
	Assume.assumeTrue(HadoopUtils.isMinHadoopVersion(2, 7));
}
 
Example #22
Source File: TsFileInputFormat.java    From incubator-iotdb with Apache License 2.0 4 votes vote down vote up
@Override
public void configure(Configuration flinkConfiguration) {
	super.configure(flinkConfiguration);
	hadoopConf = HadoopUtils.getHadoopConfiguration(flinkConfiguration);
}
 
Example #23
Source File: HadoopConfigLoadingTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void loadFromEnvVariables() throws Exception {
	final String k1 = "where?";
	final String v1 = "I'm on a boat";
	final String k2 = "when?";
	final String v2 = "midnight";
	final String k3 = "why?";
	final String v3 = "what do you think?";
	final String k4 = "which way?";
	final String v4 = "south, always south...";
	final String k5 = "how long?";
	final String v5 = "an eternity";
	final String k6 = "for real?";
	final String v6 = "quite so...";

	final File hadoopConfDir = tempFolder.newFolder();

	final File hadoopHome = tempFolder.newFolder();

	final File hadoopHomeConf = new File(hadoopHome, "conf");
	final File hadoopHomeEtc = new File(hadoopHome, "etc/hadoop");

	assertTrue(hadoopHomeConf.mkdirs());
	assertTrue(hadoopHomeEtc.mkdirs());

	final File file1 = new File(hadoopConfDir, "core-site.xml");
	final File file2 = new File(hadoopConfDir, "hdfs-site.xml");
	final File file3 = new File(hadoopHomeConf, "core-site.xml");
	final File file4 = new File(hadoopHomeConf, "hdfs-site.xml");
	final File file5 = new File(hadoopHomeEtc, "core-site.xml");
	final File file6 = new File(hadoopHomeEtc, "hdfs-site.xml");

	printConfig(file1, k1, v1);
	printConfig(file2, k2, v2);
	printConfig(file3, k3, v3);
	printConfig(file4, k4, v4);
	printConfig(file5, k5, v5);
	printConfig(file6, k6, v6);

	final org.apache.hadoop.conf.Configuration hadoopConf;

	final Map<String, String> originalEnv = System.getenv();
	final Map<String, String> newEnv = new HashMap<>(originalEnv);
	newEnv.put("HADOOP_CONF_DIR", hadoopConfDir.getAbsolutePath());
	newEnv.put("HADOOP_HOME", hadoopHome.getAbsolutePath());
	try {
		CommonTestUtils.setEnv(newEnv);
		hadoopConf = HadoopUtils.getHadoopConfiguration(new Configuration());
	}
	finally {
		CommonTestUtils.setEnv(originalEnv);
	}

	// contains extra entries
	assertEquals(v1, hadoopConf.get(k1, null));
	assertEquals(v2, hadoopConf.get(k2, null));
	assertEquals(v3, hadoopConf.get(k3, null));
	assertEquals(v4, hadoopConf.get(k4, null));
	assertEquals(v5, hadoopConf.get(k5, null));
	assertEquals(v6, hadoopConf.get(k6, null));

	// also contains classpath defaults
	assertEquals(IN_CP_CONFIG_VALUE, hadoopConf.get(IN_CP_CONFIG_KEY, null));
}
 
Example #24
Source File: HadoopRecoverableWriterTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void testHadoopVersion() {
	Assume.assumeTrue(HadoopUtils.isMinHadoopVersion(2, 7));
}
 
Example #25
Source File: HadoopConfigLoadingTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void loadFromEnvVariables() throws Exception {
	final String k1 = "where?";
	final String v1 = "I'm on a boat";
	final String k2 = "when?";
	final String v2 = "midnight";
	final String k3 = "why?";
	final String v3 = "what do you think?";
	final String k4 = "which way?";
	final String v4 = "south, always south...";
	final String k5 = "how long?";
	final String v5 = "an eternity";
	final String k6 = "for real?";
	final String v6 = "quite so...";

	final File hadoopConfDir = tempFolder.newFolder();

	final File hadoopHome = tempFolder.newFolder();

	final File hadoopHomeConf = new File(hadoopHome, "conf");
	final File hadoopHomeEtc = new File(hadoopHome, "etc/hadoop");

	assertTrue(hadoopHomeConf.mkdirs());
	assertTrue(hadoopHomeEtc.mkdirs());

	final File file1 = new File(hadoopConfDir, "core-site.xml");
	final File file2 = new File(hadoopConfDir, "hdfs-site.xml");
	final File file3 = new File(hadoopHomeConf, "core-site.xml");
	final File file4 = new File(hadoopHomeConf, "hdfs-site.xml");
	final File file5 = new File(hadoopHomeEtc, "core-site.xml");
	final File file6 = new File(hadoopHomeEtc, "hdfs-site.xml");

	printConfig(file1, k1, v1);
	printConfig(file2, k2, v2);
	printConfig(file3, k3, v3);
	printConfig(file4, k4, v4);
	printConfig(file5, k5, v5);
	printConfig(file6, k6, v6);

	final org.apache.hadoop.conf.Configuration hadoopConf;

	final Map<String, String> originalEnv = System.getenv();
	final Map<String, String> newEnv = new HashMap<>(originalEnv);
	newEnv.put("HADOOP_CONF_DIR", hadoopConfDir.getAbsolutePath());
	newEnv.put("HADOOP_HOME", hadoopHome.getAbsolutePath());
	try {
		CommonTestUtils.setEnv(newEnv);
		hadoopConf = HadoopUtils.getHadoopConfiguration(new Configuration());
	}
	finally {
		CommonTestUtils.setEnv(originalEnv);
	}

	// contains extra entries
	assertEquals(v1, hadoopConf.get(k1, null));
	assertEquals(v2, hadoopConf.get(k2, null));
	assertEquals(v3, hadoopConf.get(k3, null));
	assertEquals(v4, hadoopConf.get(k4, null));
	assertEquals(v5, hadoopConf.get(k5, null));
	assertEquals(v6, hadoopConf.get(k6, null));

	// also contains classpath defaults
	assertEquals(IN_CP_CONFIG_VALUE, hadoopConf.get(IN_CP_CONFIG_KEY, null));
}
 
Example #26
Source File: HadoopRecoverableWriterTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void testHadoopVersion() {
	Assume.assumeTrue(HadoopUtils.isMinHadoopVersion(2, 7));
}