Java Code Examples for org.apache.hadoop.hdfs.server.datanode.DataNode#createDataNode()

The following examples show how to use org.apache.hadoop.hdfs.server.datanode.DataNode#createDataNode() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MiniDFSCluster.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Restart a datanode, on the same port if requested
 * @param dnprop the datanode to restart
 * @param keepPort whether to use the same port 
 * @return true if restarting is successful
 * @throws IOException
 */
public synchronized boolean restartDataNode(DataNodeProperties dnprop,
    boolean keepPort) throws IOException {
  Configuration conf = dnprop.conf;
  String[] args = dnprop.dnArgs;
  SecureResources secureResources = dnprop.secureResources;
  Configuration newconf = new HdfsConfiguration(conf); // save cloned config
  if (keepPort) {
    InetSocketAddress addr = dnprop.datanode.getXferAddress();
    conf.set(DFS_DATANODE_ADDRESS_KEY, 
        addr.getAddress().getHostAddress() + ":" + addr.getPort());
    conf.set(DFS_DATANODE_IPC_ADDRESS_KEY,
        addr.getAddress().getHostAddress() + ":" + dnprop.ipcPort); 
  }
  DataNode newDn = DataNode.createDataNode(args, conf, secureResources);
  dataNodes.add(new DataNodeProperties(
      newDn, newconf, args, secureResources, newDn.getIpcPort()));
  numDataNodes++;
  return true;
}
 
Example 2
Source File: MiniDFSCluster.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Restart a datanode, on the same port if requested
 * @param dnprop the datanode to restart
 * @param keepPort whether to use the same port 
 * @return true if restarting is successful
 * @throws IOException
 */
public synchronized boolean restartDataNode(DataNodeProperties dnprop,
    boolean keepPort) throws IOException {
  Configuration conf = dnprop.conf;
  String[] args = dnprop.dnArgs;
  SecureResources secureResources = dnprop.secureResources;
  Configuration newconf = new HdfsConfiguration(conf); // save cloned config
  if (keepPort) {
    InetSocketAddress addr = dnprop.datanode.getXferAddress();
    conf.set(DFS_DATANODE_ADDRESS_KEY, 
        addr.getAddress().getHostAddress() + ":" + addr.getPort());
    conf.set(DFS_DATANODE_IPC_ADDRESS_KEY,
        addr.getAddress().getHostAddress() + ":" + dnprop.ipcPort); 
  }
  DataNode newDn = DataNode.createDataNode(args, conf, secureResources);
  dataNodes.add(new DataNodeProperties(
      newDn, newconf, args, secureResources, newDn.getIpcPort()));
  numDataNodes++;
  return true;
}
 
Example 3
Source File: TestDatanodeConfig.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test that a data-node does not start if configuration specifies
 * incorrect URI scheme in data directory.
 * Test that a data-node starts if data directory is specified as
 * URI = "file:///path" or as a non URI path.
 */
@Test
public void testDataDirectories() throws IOException {
  File dataDir = new File(BASE_DIR, "data").getCanonicalFile();
  Configuration conf = cluster.getConfiguration(0);
  // 1. Test unsupported schema. Only "file:" is supported.
  String dnDir = makeURI("shv", null, fileAsURI(dataDir).getPath());
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dnDir);
  DataNode dn = null;
  try {
    dn = DataNode.createDataNode(new String[]{}, conf);
    fail();
  } catch(Exception e) {
    // expecting exception here
  } finally {
    if (dn != null) {
      dn.shutdown();
    }
  }
  assertNull("Data-node startup should have failed.", dn);

  // 2. Test "file:" schema and no schema (path-only). Both should work.
  String dnDir1 = fileAsURI(dataDir).toString() + "1";
  String dnDir2 = makeURI("file", "localhost",
                  fileAsURI(dataDir).getPath() + "2");
  String dnDir3 = dataDir.getAbsolutePath() + "3";
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
              dnDir1 + "," + dnDir2 + "," + dnDir3);
  try {
    cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
    assertTrue("Data-node should startup.", cluster.isDataNodeUp());
  } finally {
    if (cluster != null) {
      cluster.shutdownDataNodes();
    }
  }
}
 
Example 4
Source File: TestHDFSServerPorts.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Start the datanode.
 */
public DataNode startDataNode(int index, Configuration config) 
throws IOException {
  File dataNodeDir = new File(TEST_DATA_DIR, "data-" + index);
  config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataNodeDir.getPath());

  String[] args = new String[] {};
  // NameNode will modify config with the ports it bound to
  return DataNode.createDataNode(args, config);
}
 
Example 5
Source File: TestHDFSServerPorts.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Check whether the datanode can be started.
 */
private boolean canStartDataNode(Configuration conf) throws IOException {
  DataNode dn = null;
  try {
    dn = DataNode.createDataNode(new String[]{}, conf);
  } catch(IOException e) {
    if (e instanceof java.net.BindException)
      return false;
    throw e;
  } finally {
    if(dn != null) dn.shutdown();
  }
  return true;
}
 
Example 6
Source File: TestDatanodeConfig.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test that a data-node does not start if configuration specifies
 * incorrect URI scheme in data directory.
 * Test that a data-node starts if data directory is specified as
 * URI = "file:///path" or as a non URI path.
 */
@Test
public void testDataDirectories() throws IOException {
  File dataDir = new File(BASE_DIR, "data").getCanonicalFile();
  Configuration conf = cluster.getConfiguration(0);
  // 1. Test unsupported schema. Only "file:" is supported.
  String dnDir = makeURI("shv", null, fileAsURI(dataDir).getPath());
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dnDir);
  DataNode dn = null;
  try {
    dn = DataNode.createDataNode(new String[]{}, conf);
    fail();
  } catch(Exception e) {
    // expecting exception here
  } finally {
    if (dn != null) {
      dn.shutdown();
    }
  }
  assertNull("Data-node startup should have failed.", dn);

  // 2. Test "file:" schema and no schema (path-only). Both should work.
  String dnDir1 = fileAsURI(dataDir).toString() + "1";
  String dnDir2 = makeURI("file", "localhost",
                  fileAsURI(dataDir).getPath() + "2");
  String dnDir3 = dataDir.getAbsolutePath() + "3";
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
              dnDir1 + "," + dnDir2 + "," + dnDir3);
  try {
    cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
    assertTrue("Data-node should startup.", cluster.isDataNodeUp());
  } finally {
    if (cluster != null) {
      cluster.shutdownDataNodes();
    }
  }
}
 
Example 7
Source File: TestHDFSServerPorts.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Start the datanode.
 */
public DataNode startDataNode(int index, Configuration config) 
throws IOException {
  File dataNodeDir = new File(TEST_DATA_DIR, "data-" + index);
  config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataNodeDir.getPath());

  String[] args = new String[] {};
  // NameNode will modify config with the ports it bound to
  return DataNode.createDataNode(args, config);
}
 
Example 8
Source File: TestHDFSServerPorts.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Check whether the datanode can be started.
 */
private boolean canStartDataNode(Configuration conf) throws IOException {
  DataNode dn = null;
  try {
    dn = DataNode.createDataNode(new String[]{}, conf);
  } catch(IOException e) {
    if (e instanceof java.net.BindException)
      return false;
    throw e;
  } finally {
    if(dn != null) dn.shutdown();
  }
  return true;
}
 
Example 9
Source File: TestHDFSServerPorts.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Start the data-node.
 */
public DataNode startDataNode(int index, Configuration config) 
throws IOException {
  String dataDir = System.getProperty("test.build.data");
  File dataNodeDir = new File(dataDir, "data-" + index);
  config.set("dfs.data.dir", dataNodeDir.getPath());

  String[] args = new String[] {};
  // NameNode will modify config with the ports it bound to
  return DataNode.createDataNode(args, config);
}
 
Example 10
Source File: TestHDFSServerPorts.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Check whether the data-node can be started.
 */
private boolean canStartDataNode(Configuration conf) throws IOException {
  DataNode dn = null;
  try {
    dn = DataNode.createDataNode(new String[]{}, conf);
  } catch(IOException e) {
    if (e instanceof java.net.BindException)
      return false;
    throw e;
  }
  dn.shutdown();
  return true;
}
 
Example 11
Source File: TestHDFSServerPorts.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * Start the data-node.
 */
public DataNode startDataNode(int index, Configuration config) 
throws IOException {
  String dataDir = System.getProperty("test.build.data");
  File dataNodeDir = new File(dataDir, "data-" + index);
  config.set("dfs.data.dir", dataNodeDir.getPath());

  String[] args = new String[] {};
  // NameNode will modify config with the ports it bound to
  return DataNode.createDataNode(args, config);
}
 
Example 12
Source File: TestHDFSServerPorts.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * Check whether the data-node can be started.
 */
private boolean canStartDataNode(Configuration conf) throws IOException {
  DataNode dn = null;
  try {
    dn = DataNode.createDataNode(new String[]{}, conf);
  } catch(IOException e) {
    if (e instanceof java.net.BindException)
      return false;
    throw e;
  }
  dn.shutdown();
  return true;
}
 
Example 13
Source File: TestDatanodeConfig.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test(timeout=60000)
public void testMemlockLimit() throws Exception {
  assumeTrue(NativeIO.isAvailable());
  final long memlockLimit =
      NativeIO.POSIX.getCacheManipulator().getMemlockLimit();

  // Can't increase the memlock limit past the maximum.
  assumeTrue(memlockLimit != Long.MAX_VALUE);

  File dataDir = new File(BASE_DIR, "data").getCanonicalFile();
  Configuration conf = cluster.getConfiguration(0);
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
    makeURI("file", null, fileAsURI(dataDir).getPath()));
  long prevLimit = conf.
      getLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
          DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT);
  DataNode dn = null;
  try {
    // Try starting the DN with limit configured to the ulimit
    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
        memlockLimit);
    dn = DataNode.createDataNode(new String[]{},  conf);
    dn.shutdown();
    dn = null;
    // Try starting the DN with a limit > ulimit
    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
        memlockLimit+1);
    try {
      dn = DataNode.createDataNode(new String[]{}, conf);
    } catch (RuntimeException e) {
      GenericTestUtils.assertExceptionContains(
          "more than the datanode's available RLIMIT_MEMLOCK", e);
    }
  } finally {
    if (dn != null) {
      dn.shutdown();
    }
    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
        prevLimit);
  }
}
 
Example 14
Source File: TestDatanodeConfig.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test(timeout=60000)
public void testMemlockLimit() throws Exception {
  assumeTrue(NativeIO.isAvailable());
  final long memlockLimit =
      NativeIO.POSIX.getCacheManipulator().getMemlockLimit();

  // Can't increase the memlock limit past the maximum.
  assumeTrue(memlockLimit != Long.MAX_VALUE);

  File dataDir = new File(BASE_DIR, "data").getCanonicalFile();
  Configuration conf = cluster.getConfiguration(0);
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
    makeURI("file", null, fileAsURI(dataDir).getPath()));
  long prevLimit = conf.
      getLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
          DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT);
  DataNode dn = null;
  try {
    // Try starting the DN with limit configured to the ulimit
    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
        memlockLimit);
    dn = DataNode.createDataNode(new String[]{},  conf);
    dn.shutdown();
    dn = null;
    // Try starting the DN with a limit > ulimit
    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
        memlockLimit+1);
    try {
      dn = DataNode.createDataNode(new String[]{}, conf);
    } catch (RuntimeException e) {
      GenericTestUtils.assertExceptionContains(
          "more than the datanode's available RLIMIT_MEMLOCK", e);
    }
  } finally {
    if (dn != null) {
      dn.shutdown();
    }
    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
        prevLimit);
  }
}
 
Example 15
Source File: MiniDFSCluster.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Add another cluster to current cluster and start it. Configuration of datanodes
 * in the cluster is refreshed to register with the new namenodes;
 */
public void addCluster(MiniDFSCluster cluster, boolean format)
    throws IOException, InterruptedException {
  if(!federation || !cluster.federation) {
    throw new IOException("Cannot handle non-federated cluster");
  }
  if (cluster.dataNodes.size() > this.dataNodes.size()) {
    throw new IOException("Cannot merge: new cluster has more datanodes the old one.");
  }
  LOG.info("Shutdown both clusters");
  this.shutdown(false);
  cluster.shutdown(false);
  this.numDataNodes = this.dataNodes.size();
  int nnIndex = nameNodes.length;
  int numNameNodes = nameNodes.length + cluster.nameNodes.length;
  NameNodeInfo[] newlist = new NameNodeInfo[numNameNodes];
  System.arraycopy(nameNodes, 0, newlist, 0, nameNodes.length);
  System.arraycopy(cluster.nameNodes, 0, newlist, nameNodes.length, 
      cluster.nameNodes.length);
  nameNodes = newlist;
  String newNameserviceIds = cluster.conf.get(FSConstants.DFS_FEDERATION_NAMESERVICES);
  String nameserviceIds = conf.get(FSConstants.DFS_FEDERATION_NAMESERVICES);
  nameserviceIds += "," + newNameserviceIds;
  conf.set(FSConstants.DFS_FEDERATION_NAMESERVICES, nameserviceIds);
  int i;
  for (i = 0; i < nameNodes.length; i++) {
    NameNodeInfo nni = nameNodes[i];
    String nameserviceId = nni.conf.get(FSConstants.DFS_FEDERATION_NAMESERVICE_ID);
    initFederatedNamenodeAddress(nni.conf, nameserviceId, 0);
    if (i < nnIndex) {
      // Start with upgrade
      createFederatedNameNode(i, nni.conf, numDataNodes, false, format,
          StartupOption.UPGRADE, nameserviceId);
    } else {
      // Start with regular
      createFederatedNameNode(i, nni.conf, numDataNodes, false, format,
          null, nameserviceId);
    }
    for (int dnIndex = 0; dnIndex < dataNodes.size(); dnIndex++) {
      Configuration dstConf = dataNodes.get(dnIndex).conf;
      if (i >= nnIndex) {
        String dataStr = cluster.dataNodes.get(dnIndex).conf.get("dfs.data.dir");
        dstConf.set("dfs.merge.data.dir." + nameserviceId, dataStr);
      }
      String key = DFSUtil.getNameServiceIdKey(NameNode.DATANODE_PROTOCOL_ADDRESS, 
          nameserviceId);
      dstConf.set(key, nni.conf.get(key));
    }
  }
  //restart Datanode
  for (i = 0; i < dataNodes.size(); i++) {
    DataNodeProperties dn = dataNodes.get(i);
    dn.conf.set(FSConstants.DFS_FEDERATION_NAMESERVICES, nameserviceIds);
    dn.datanode = DataNode.createDataNode(dn.dnArgs, dn.conf);
  }
  waitClusterUp();
}