Java Code Examples for org.apache.hadoop.hdfs.server.namenode.NameNode#initializeGenericKeys()

The following examples show how to use org.apache.hadoop.hdfs.server.namenode.NameNode#initializeGenericKeys() . These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   File: DFSZKFailoverController.java    License: Apache License 2.0 6 votes vote down vote up
public static DFSZKFailoverController create(Configuration conf) {
  Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(localNNConf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
  if (nnId == null) {
    String msg = "Could not get the namenode ID of this node. " +
        "You may run zkfc on the node other than namenode.";
    throw new HadoopIllegalArgumentException(msg);
  }
  NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
  DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
  
  NNHAServiceTarget localTarget = new NNHAServiceTarget(
      localNNConf, nsId, nnId);
  return new DFSZKFailoverController(localNNConf, localTarget);
}
 
Example 2
Source Project: hadoop   File: TestHAConfiguration.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testGetOtherNNHttpAddress() throws IOException {
  // Use non-local addresses to avoid host address matching
  Configuration conf = getHAConf("ns1", "1.2.3.1", "1.2.3.2");
  conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, "ns1");

  // This is done by the NN before the StandbyCheckpointer is created
  NameNode.initializeGenericKeys(conf, "ns1", "nn1");

  // Since we didn't configure the HTTP address, and the default is
  // 0.0.0.0, it should substitute the address from the RPC configuration
  // above.
  StandbyCheckpointer checkpointer = new StandbyCheckpointer(conf, fsn);
  assertEquals(new URL("http", "1.2.3.2",
      DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, ""),
      checkpointer.getActiveNNAddress());
}
 
Example 3
Source Project: big-c   File: DFSZKFailoverController.java    License: Apache License 2.0 6 votes vote down vote up
public static DFSZKFailoverController create(Configuration conf) {
  Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(localNNConf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
  if (nnId == null) {
    String msg = "Could not get the namenode ID of this node. " +
        "You may run zkfc on the node other than namenode.";
    throw new HadoopIllegalArgumentException(msg);
  }
  NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
  DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
  
  NNHAServiceTarget localTarget = new NNHAServiceTarget(
      localNNConf, nsId, nnId);
  return new DFSZKFailoverController(localNNConf, localTarget);
}
 
Example 4
Source Project: big-c   File: TestHAConfiguration.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testGetOtherNNHttpAddress() throws IOException {
  // Use non-local addresses to avoid host address matching
  Configuration conf = getHAConf("ns1", "1.2.3.1", "1.2.3.2");
  conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, "ns1");

  // This is done by the NN before the StandbyCheckpointer is created
  NameNode.initializeGenericKeys(conf, "ns1", "nn1");

  // Since we didn't configure the HTTP address, and the default is
  // 0.0.0.0, it should substitute the address from the RPC configuration
  // above.
  StandbyCheckpointer checkpointer = new StandbyCheckpointer(conf, fsn);
  assertEquals(new URL("http", "1.2.3.2",
      DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, ""),
      checkpointer.getActiveNNAddress());
}
 
Example 5
Source Project: hadoop   File: BootstrapStandby.java    License: Apache License 2.0 5 votes vote down vote up
private void parseConfAndFindOtherNN() throws IOException {
  Configuration conf = getConf();
  nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(conf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  nnId = HAUtil.getNameNodeId(conf, nsId);
  NameNode.initializeGenericKeys(conf, nsId, nnId);

  if (!HAUtil.usesSharedEditsDir(conf)) {
    throw new HadoopIllegalArgumentException(
      "Shared edits storage is not enabled for this namenode.");
  }
  
  Configuration otherNode = HAUtil.getConfForOtherNode(conf);
  otherNNId = HAUtil.getNameNodeId(otherNode, nsId);
  otherIpcAddr = NameNode.getServiceAddress(otherNode, true);
  Preconditions.checkArgument(otherIpcAddr.getPort() != 0 &&
      !otherIpcAddr.getAddress().isAnyLocalAddress(),
      "Could not determine valid IPC address for other NameNode (%s)" +
      ", got: %s", otherNNId, otherIpcAddr);

  final String scheme = DFSUtil.getHttpClientScheme(conf);
  otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost(
      otherIpcAddr.getHostName(), otherNode, scheme).toURL();

  dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
  editUrisToFormat = FSNamesystem.getNamespaceEditsDirs(
      conf, false);
  sharedEditsUris = FSNamesystem.getSharedEditsDirs(conf);
}
 
Example 6
Source Project: hadoop   File: HAUtil.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Given the configuration for this node, return a Configuration object for
 * the other node in an HA setup.
 * 
 * @param myConf the configuration of this node
 * @return the configuration of the other node in an HA setup
 */
public static Configuration getConfForOtherNode(
    Configuration myConf) {
  
  String nsId = DFSUtil.getNamenodeNameServiceId(myConf);
  String otherNn = getNameNodeIdOfOtherNode(myConf, nsId);
  
  // Look up the address of the active NN.
  Configuration confForOtherNode = new Configuration(myConf);
  NameNode.initializeGenericKeys(confForOtherNode, nsId, otherNn);
  return confForOtherNode;
}
 
Example 7
Source Project: hadoop   File: TestDFSUtil.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Ensure that fs.defaultFS is set in the configuration even if neither HA nor
 * Federation is enabled.
 * 
 * Regression test for HDFS-3351.
 */
@Test
public void testConfModificationNoFederationOrHa() {
  final HdfsConfiguration conf = new HdfsConfiguration();
  String nsId = null;
  String nnId = null;
  
  conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1234");

  assertFalse("hdfs://localhost:1234".equals(conf.get(FS_DEFAULT_NAME_KEY)));
  NameNode.initializeGenericKeys(conf, nsId, nnId);
  assertEquals("hdfs://localhost:1234", conf.get(FS_DEFAULT_NAME_KEY));
}
 
Example 8
Source Project: big-c   File: BootstrapStandby.java    License: Apache License 2.0 5 votes vote down vote up
private void parseConfAndFindOtherNN() throws IOException {
  Configuration conf = getConf();
  nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(conf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  nnId = HAUtil.getNameNodeId(conf, nsId);
  NameNode.initializeGenericKeys(conf, nsId, nnId);

  if (!HAUtil.usesSharedEditsDir(conf)) {
    throw new HadoopIllegalArgumentException(
      "Shared edits storage is not enabled for this namenode.");
  }
  
  Configuration otherNode = HAUtil.getConfForOtherNode(conf);
  otherNNId = HAUtil.getNameNodeId(otherNode, nsId);
  otherIpcAddr = NameNode.getServiceAddress(otherNode, true);
  Preconditions.checkArgument(otherIpcAddr.getPort() != 0 &&
      !otherIpcAddr.getAddress().isAnyLocalAddress(),
      "Could not determine valid IPC address for other NameNode (%s)" +
      ", got: %s", otherNNId, otherIpcAddr);

  final String scheme = DFSUtil.getHttpClientScheme(conf);
  otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost(
      otherIpcAddr.getHostName(), otherNode, scheme).toURL();

  dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
  editUrisToFormat = FSNamesystem.getNamespaceEditsDirs(
      conf, false);
  sharedEditsUris = FSNamesystem.getSharedEditsDirs(conf);
}
 
Example 9
Source Project: big-c   File: HAUtil.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Given the configuration for this node, return a Configuration object for
 * the other node in an HA setup.
 * 
 * @param myConf the configuration of this node
 * @return the configuration of the other node in an HA setup
 */
public static Configuration getConfForOtherNode(
    Configuration myConf) {
  
  String nsId = DFSUtil.getNamenodeNameServiceId(myConf);
  String otherNn = getNameNodeIdOfOtherNode(myConf, nsId);
  
  // Look up the address of the active NN.
  Configuration confForOtherNode = new Configuration(myConf);
  NameNode.initializeGenericKeys(confForOtherNode, nsId, otherNn);
  return confForOtherNode;
}
 
Example 10
Source Project: big-c   File: TestDFSUtil.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Ensure that fs.defaultFS is set in the configuration even if neither HA nor
 * Federation is enabled.
 * 
 * Regression test for HDFS-3351.
 */
@Test
public void testConfModificationNoFederationOrHa() {
  final HdfsConfiguration conf = new HdfsConfiguration();
  String nsId = null;
  String nnId = null;
  
  conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1234");

  assertFalse("hdfs://localhost:1234".equals(conf.get(FS_DEFAULT_NAME_KEY)));
  NameNode.initializeGenericKeys(conf, nsId, nnId);
  assertEquals("hdfs://localhost:1234", conf.get(FS_DEFAULT_NAME_KEY));
}