Java Code Examples for org.apache.hadoop.conf.Configuration#unset()

The following examples show how to use org.apache.hadoop.conf.Configuration#unset() . These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may want to check out the right sidebar which shows the related API usage.
Example 1
Source Project: hadoop   File: TestDFSUtil.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testEncryptionProbe() throws Throwable {
  Configuration conf = new Configuration(false);
  conf.unset(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI);
  assertFalse("encryption enabled on no provider key",
      DFSUtil.isHDFSEncryptionEnabled(conf));
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, "");
  assertFalse("encryption enabled on empty provider key",
      DFSUtil.isHDFSEncryptionEnabled(conf));
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, "\n\t\n");
  assertFalse("encryption enabled on whitespace provider key",
      DFSUtil.isHDFSEncryptionEnabled(conf));
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, "http://hadoop.apache.org");
  assertTrue("encryption disabled on valid provider key",
      DFSUtil.isHDFSEncryptionEnabled(conf));

}
 
Example 2
Source Project: hadoop   File: TestNameNodeRpcServer.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testNamenodeRpcBindAny() throws IOException {
  Configuration conf = new HdfsConfiguration();

  // The name node in MiniDFSCluster only binds to 127.0.0.1.
  // We can set the bind address to 0.0.0.0 to make it listen
  // to all interfaces.
  conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, "0.0.0.0");
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    assertEquals("0.0.0.0", ((NameNodeRpcServer)cluster.getNameNodeRpc())
        .getClientRpcServer().getListenerAddress().getHostName());
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
    // Reset the config
    conf.unset(DFS_NAMENODE_RPC_BIND_HOST_KEY);
  }
}
 
Example 3
Source Project: hadoop   File: TestEncryptionZones.java    License: Apache License 2.0 6 votes vote down vote up
@Test(timeout = 120000)
public void testCreateEZWithNoProvider() throws Exception {
  // Unset the key provider and make sure EZ ops don't work
  final Configuration clusterConf = cluster.getConfiguration(0);
  clusterConf.unset(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI);
  cluster.restartNameNode(true);
  cluster.waitActive();
  final Path zone1 = new Path("/zone1");
  fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
  try {
    dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
    fail("expected exception");
  } catch (IOException e) {
    assertExceptionContains("since no key provider is available", e);
  }
  final Path jksPath = new Path(testRootDir.toString(), "test.jks");
  clusterConf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()
  );
  // Try listing EZs as well
  assertNumZones(0);
}
 
Example 4
@Test(timeout = 5000)
public void testTaskSpecificLogOptions() {
  Configuration conf = new Configuration(false);
  conf.set(TezConfiguration.TEZ_TASK_SPECIFIC_LAUNCH_CMD_OPTS_LIST, "v1[0,2,5]");
  TaskSpecificLaunchCmdOption options;

  conf.set(TezConfiguration.TEZ_TASK_SPECIFIC_LOG_LEVEL, "DEBUG;org.apache.tez=INFO");
  options = new TaskSpecificLaunchCmdOption(conf);
  assertTrue(options.hasModifiedLogProperties());
  assertFalse(options.hasModifiedTaskLaunchOpts());
  assertEquals(2, options.getTaskSpecificLogParams().length);

  conf.unset(TezConfiguration.TEZ_TASK_SPECIFIC_LOG_LEVEL);
  options = new TaskSpecificLaunchCmdOption(conf);
  assertFalse(options.hasModifiedLogProperties());
  assertFalse(options.hasModifiedTaskLaunchOpts());

  conf.set(TezConfiguration.TEZ_TASK_SPECIFIC_LOG_LEVEL, "DEBUG");
  options = new TaskSpecificLaunchCmdOption(conf);
  assertTrue(options.hasModifiedLogProperties());
  assertFalse(options.hasModifiedTaskLaunchOpts());
  assertEquals(1, options.getTaskSpecificLogParams().length);
}
 
Example 5
Source Project: hadoop   File: TestSecurityUtil.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testGetAuthenticationMethod() {
  Configuration conf = new Configuration();
  // default is simple
  conf.unset(HADOOP_SECURITY_AUTHENTICATION);
  assertEquals(SIMPLE, SecurityUtil.getAuthenticationMethod(conf));
  // simple
  conf.set(HADOOP_SECURITY_AUTHENTICATION, "simple");
  assertEquals(SIMPLE, SecurityUtil.getAuthenticationMethod(conf));
  // kerberos
  conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
  assertEquals(KERBEROS, SecurityUtil.getAuthenticationMethod(conf));
  // bad value
  conf.set(HADOOP_SECURITY_AUTHENTICATION, "kaboom");
  String error = null;
  try {
    SecurityUtil.getAuthenticationMethod(conf);
  } catch (Exception e) {
    error = e.toString();
  }
  assertEquals("java.lang.IllegalArgumentException: " +
               "Invalid attribute value for " +
               HADOOP_SECURITY_AUTHENTICATION + " of kaboom", error);
}
 
Example 6
Source Project: big-c   File: TestDFSUtil.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testEncryptionProbe() throws Throwable {
  Configuration conf = new Configuration(false);
  conf.unset(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI);
  assertFalse("encryption enabled on no provider key",
      DFSUtil.isHDFSEncryptionEnabled(conf));
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, "");
  assertFalse("encryption enabled on empty provider key",
      DFSUtil.isHDFSEncryptionEnabled(conf));
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, "\n\t\n");
  assertFalse("encryption enabled on whitespace provider key",
      DFSUtil.isHDFSEncryptionEnabled(conf));
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, "http://hadoop.apache.org");
  assertTrue("encryption disabled on valid provider key",
      DFSUtil.isHDFSEncryptionEnabled(conf));

}
 
Example 7
Source Project: Bats   File: StramClientUtils.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Get the RM webapp address. The configuration that is passed in should not be used by other threads while this
 * method is executing.
 * @param conf The configuration
 * @param sslEnabled Whether SSL is enabled or not
 * @param rmId If HA is enabled the resource manager id
 * @return The webapp socket address
 */
public static InetSocketAddress getRMWebAddress(Configuration conf, boolean sslEnabled, String rmId)
{
  boolean isHA = (rmId != null);
  if (isHA) {
    conf = getYarnConfiguration(conf);
    conf.set(ConfigUtils.RM_HA_ID, rmId);
  }
  InetSocketAddress address;
  if (sslEnabled) {
    address = conf.getSocketAddr(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS, YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_ADDRESS, YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_PORT);
  } else {
    address = conf.getSocketAddr(YarnConfiguration.RM_WEBAPP_ADDRESS, YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS, YarnConfiguration.DEFAULT_RM_WEBAPP_PORT);
  }
  if (isHA) {
    conf.unset(ConfigUtils.RM_HA_ID);
  }
  LOG.info("rm webapp address setting {}", address);
  LOG.debug("rm setting sources {}", conf.getPropertySources(YarnConfiguration.RM_WEBAPP_ADDRESS));
  InetSocketAddress resolvedSocketAddress = NetUtils.getConnectAddress(address);
  InetAddress resolved = resolvedSocketAddress.getAddress();
  if (resolved == null || resolved.isAnyLocalAddress() || resolved.isLoopbackAddress()) {
    try {
      resolvedSocketAddress = InetSocketAddress.createUnresolved(InetAddress.getLocalHost().getCanonicalHostName(), address.getPort());
    } catch (UnknownHostException e) {
      //Ignore and fallback.
    }
  }
  return resolvedSocketAddress;
}
 
Example 8
public static void setMaximumSplitCount(
    final Class<?> implementingClass,
    final Configuration config,
    final Integer maxSplits) {
  if (maxSplits != null) {
    config.set(enumToConfKey(implementingClass, InputConfig.MAX_SPLITS), maxSplits.toString());
  } else {
    config.unset(enumToConfKey(implementingClass, InputConfig.MAX_SPLITS));
  }
}
 
Example 9
Source Project: hadoop   File: NameNode.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Clone the supplied configuration but remove the shared edits dirs.
 *
 * @param conf Supplies the original configuration.
 * @return Cloned configuration without the shared edit dirs.
 * @throws IOException on failure to generate the configuration.
 */
private static Configuration getConfigurationWithoutSharedEdits(
    Configuration conf)
    throws IOException {
  List<URI> editsDirs = FSNamesystem.getNamespaceEditsDirs(conf, false);
  String editsDirsString = Joiner.on(",").join(editsDirs);

  Configuration confWithoutShared = new Configuration(conf);
  confWithoutShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
  confWithoutShared.setStrings(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      editsDirsString);
  return confWithoutShared;
}
 
Example 10
public static void setDataTypeQueryOptions(
    final Class<?> implementingClass,
    final Configuration config,
    final DataTypeQueryOptions<?> queryOptions) {
  if (queryOptions != null) {
    config.set(
        enumToConfKey(implementingClass, InputConfig.DATA_TYPE_QUERY_OPTIONS),
        ByteArrayUtils.byteArrayToString(PersistenceUtils.toBinary(queryOptions)));
  } else {
    config.unset(enumToConfKey(implementingClass, InputConfig.DATA_TYPE_QUERY_OPTIONS));
  }
}
 
Example 11
@Test
public void testTezBaseStagingPath() throws Exception {
  Configuration localConf = new Configuration();
  // Check if default works with localFS
  localConf.unset(TezConfiguration.TEZ_AM_STAGING_DIR);
  localConf.set("fs.defaultFS", "file:///");
  Path stageDir = TezCommonUtils.getTezBaseStagingPath(localConf);
  Assert.assertEquals(stageDir.toString(), "file:" + TezConfiguration.TEZ_AM_STAGING_DIR_DEFAULT);

  // check if user set something, indeed works
  conf.set(TezConfiguration.TEZ_AM_STAGING_DIR, STAGE_DIR);
  stageDir = TezCommonUtils.getTezBaseStagingPath(conf);
  Assert.assertEquals(stageDir.toString(), RESOLVED_STAGE_DIR);
}
 
Example 12
Source Project: big-c   File: TestNameNodeRecovery.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Create a test configuration that will exercise the initializeGenericKeys
 * code path.  This is a regression test for HDFS-4279.
 */
static void setupRecoveryTestConf(Configuration conf) throws IOException {
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
  conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
  conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
    "ns1"), "nn1,nn2");
  String baseDir = System.getProperty(
      MiniDFSCluster.PROP_TEST_BUILD_DATA, "build/test/data") + "/dfs/";
  File nameDir = new File(baseDir, "nameR");
  File secondaryDir = new File(baseDir, "namesecondaryR");
  conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.
      DFS_NAMENODE_NAME_DIR_KEY, "ns1", "nn1"),
      nameDir.getCanonicalPath());
  conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.
      DFS_NAMENODE_CHECKPOINT_DIR_KEY, "ns1", "nn1"),
      secondaryDir.getCanonicalPath());
  conf.unset(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
  conf.unset(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
  FileUtils.deleteQuietly(nameDir);
  if (!nameDir.mkdirs()) {
    throw new RuntimeException("failed to make directory " +
      nameDir.getAbsolutePath());
  }
  FileUtils.deleteQuietly(secondaryDir);
  if (!secondaryDir.mkdirs()) {
    throw new RuntimeException("failed to make directory " +
      secondaryDir.getAbsolutePath());
  }
}
 
Example 13
Source Project: hadoop   File: TestSSLFactory.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testNoTrustStore() throws Exception {
  Configuration conf = createConfiguration(false, false);
  conf.unset(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY);
  SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
  try {
    sslFactory.init();
  } finally {
    sslFactory.destroy();
  }
}
 
Example 14
public static void setIndexQueryOptions(
    final Class<?> implementingClass,
    final Configuration config,
    final IndexQueryOptions queryOptions) {
  if (queryOptions != null) {
    config.set(
        enumToConfKey(implementingClass, InputConfig.INDEX_QUERY_OPTIONS),
        ByteArrayUtils.byteArrayToString(PersistenceUtils.toBinary(queryOptions)));
  } else {
    config.unset(enumToConfKey(implementingClass, InputConfig.INDEX_QUERY_OPTIONS));
  }
}
 
Example 15
/**
 * Test the specified task java heap options.
 */
@SuppressWarnings("deprecation")
private void testJavaHeapOptions(String mapOptions, 
    String reduceOptions, String taskOptions, String defaultMapOptions, 
    String defaultReduceOptions, String defaultTaskOptions, 
    String expectedMapOptions, String expectedReduceOptions, 
    String expectedTaskOptions) throws Exception {
  Configuration simulatedConf = new Configuration();
  // reset the configuration parameters
  simulatedConf.unset(MRJobConfig.MAP_JAVA_OPTS);
  simulatedConf.unset(MRJobConfig.REDUCE_JAVA_OPTS);
  simulatedConf.unset(JobConf.MAPRED_TASK_JAVA_OPTS);
  
  // set the default map task options
  if (defaultMapOptions != null) {
    simulatedConf.set(MRJobConfig.MAP_JAVA_OPTS, defaultMapOptions);
  }
  // set the default reduce task options
  if (defaultReduceOptions != null) {
    simulatedConf.set(MRJobConfig.REDUCE_JAVA_OPTS, defaultReduceOptions);
  }
  // set the default task options
  if (defaultTaskOptions != null) {
    simulatedConf.set(JobConf.MAPRED_TASK_JAVA_OPTS, defaultTaskOptions);
  }
  
  Configuration originalConf = new Configuration();
  // reset the configuration parameters
  originalConf.unset(MRJobConfig.MAP_JAVA_OPTS);
  originalConf.unset(MRJobConfig.REDUCE_JAVA_OPTS);
  originalConf.unset(JobConf.MAPRED_TASK_JAVA_OPTS);
  
  // set the map task options
  if (mapOptions != null) {
    originalConf.set(MRJobConfig.MAP_JAVA_OPTS, mapOptions);
  }
  // set the reduce task options
  if (reduceOptions != null) {
    originalConf.set(MRJobConfig.REDUCE_JAVA_OPTS, reduceOptions);
  }
  // set the task options
  if (taskOptions != null) {
    originalConf.set(JobConf.MAPRED_TASK_JAVA_OPTS, taskOptions);
  }
  
  // configure the task jvm's heap options
  GridmixJob.configureTaskJVMOptions(originalConf, simulatedConf);
  
  assertEquals("Map heap options mismatch!", expectedMapOptions, 
               simulatedConf.get(MRJobConfig.MAP_JAVA_OPTS));
  assertEquals("Reduce heap options mismatch!", expectedReduceOptions, 
               simulatedConf.get(MRJobConfig.REDUCE_JAVA_OPTS));
  assertEquals("Task heap options mismatch!", expectedTaskOptions, 
               simulatedConf.get(JobConf.MAPRED_TASK_JAVA_OPTS));
}
 
Example 16
Source Project: Hadoop-BAM   File: BAMInputFormat.java    License: MIT License 4 votes vote down vote up
/**
 * Reset traversal parameters so that all reads are included.
 * @param conf the Hadoop configuration to set properties on
 */
public static void unsetTraversalParameters(Configuration conf) {
	conf.unset(BOUNDED_TRAVERSAL_PROPERTY);
	conf.unset(INTERVALS_PROPERTY);
	conf.unset(TRAVERSE_UNPLACED_UNMAPPED_PROPERTY);
}
 
Example 17
Source Project: big-c   File: TestEncryptedTransfer.java    License: Apache License 2.0 4 votes vote down vote up
private static FileSystem getFileSystem(Configuration conf) throws IOException {
  Configuration localConf = new Configuration(conf);
  localConf.setBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, false);
  localConf.unset(DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
  return FileSystem.get(localConf);
}
 
Example 18
Source Project: mrgeo   File: DataProviderFactoryTest.java    License: Apache License 2.0 4 votes vote down vote up
private void setupPreferred(Configuration conf, String confVal,
    String mrgeoVal, String defMrgeoVal)
{
  MrGeoProperties.resetProperties();
  if (conf != null)
  {
//    oldConfValues = new HashMap<>();
//
//    oldConfValues.put(DataProviderFactory.PREFERRED_ADHOC_PROVIDER_NAME,
//        conf.get(DataProviderFactory.PREFERRED_ADHOC_PROVIDER_NAME, null));
//    oldConfValues.put(DataProviderFactory.PREFERRED_MRSIMAGE_PROVIDER_NAME,
//        conf.get(DataProviderFactory.PREFERRED_MRSIMAGE_PROVIDER_NAME, null));
//    oldConfValues.put(DataProviderFactory.PREFERRED_VECTOR_PROVIDER_NAME,
//        conf.get(DataProviderFactory.PREFERRED_VECTOR_PROVIDER_NAME, null));

    if (confVal == null)
    {
      conf.unset(DataProviderFactory.PREFERRED_ADHOC_PROVIDER_NAME);
      conf.unset(DataProviderFactory.PREFERRED_MRSIMAGE_PROVIDER_NAME);
      conf.unset(DataProviderFactory.PREFERRED_VECTOR_PROVIDER_NAME);
    }
    else
    {
      conf.set(DataProviderFactory.PREFERRED_ADHOC_PROVIDER_NAME, confVal);
      conf.set(DataProviderFactory.PREFERRED_MRSIMAGE_PROVIDER_NAME, confVal);
      conf.set(DataProviderFactory.PREFERRED_VECTOR_PROVIDER_NAME, confVal);
    }
  }

  Properties mp = MrGeoProperties.getInstance();

//  oldMrGeoValues = new HashMap<>();
//
//  oldMrGeoValues.put(DataProviderFactory.PREFERRED_ADHOC_PROPERTYNAME,
//      mp.getProperty(DataProviderFactory.PREFERRED_ADHOC_PROPERTYNAME, null));
//  oldMrGeoValues.put(DataProviderFactory.PREFERRED_MRSIMAGE_PROPERTYNAME,
//      mp.getProperty(DataProviderFactory.PREFERRED_MRSIMAGE_PROPERTYNAME, null));
//  oldMrGeoValues.put(DataProviderFactory.PREFERRED_VECTOR_PROPERTYNAME,
//      mp.getProperty(DataProviderFactory.PREFERRED_VECTOR_PROPERTYNAME, null));
//  oldMrGeoValues.put(DataProviderFactory.PREFERRED_PROPERTYNAME,
//      mp.getProperty(DataProviderFactory.PREFERRED_PROPERTYNAME, null));

  if (mrgeoVal == null)
  {
    mp.remove(DataProviderFactory.PREFERRED_ADHOC_PROPERTYNAME);
    mp.remove(DataProviderFactory.PREFERRED_MRSIMAGE_PROPERTYNAME);
    mp.remove(DataProviderFactory.PREFERRED_VECTOR_PROPERTYNAME);
  }
  else
  {
    mp.setProperty(DataProviderFactory.PREFERRED_ADHOC_PROPERTYNAME, mrgeoVal);
    mp.setProperty(DataProviderFactory.PREFERRED_MRSIMAGE_PROPERTYNAME, mrgeoVal);
    mp.setProperty(DataProviderFactory.PREFERRED_VECTOR_PROPERTYNAME, mrgeoVal);
  }

  if (defMrgeoVal == null)
  {
    mp.remove(DataProviderFactory.PREFERRED_PROPERTYNAME);
  }
  else
  {
    mp.setProperty(DataProviderFactory.PREFERRED_PROPERTYNAME, defMrgeoVal);
  }

}
 
Example 19
Source Project: phoenix   File: IndexUpgradeToolTest.java    License: Apache License 2.0 4 votes vote down vote up
private void setupConfForConnectionlessQuery(Configuration conf) {
    conf.set(HConstants.ZOOKEEPER_QUORUM, PhoenixRuntime.CONNECTIONLESS);
    conf.unset(HConstants.ZOOKEEPER_CLIENT_PORT);
    conf.unset(HConstants.ZOOKEEPER_ZNODE_PARENT);
}
 
Example 20
Source Project: big-c   File: TokenCache.java    License: Apache License 2.0 2 votes vote down vote up
/**
 * Remove jobtoken referrals which don't make sense in the context
 * of the task execution.
 *
 * @param conf
 */
public static void cleanUpTokenReferral(Configuration conf) {
  conf.unset(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY);
}