Java Code Examples for org.apache.hadoop.hdfs.server.datanode.DataNode#shutdown()

The following examples show how to use org.apache.hadoop.hdfs.server.datanode.DataNode#shutdown() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MiniDFSCluster.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public synchronized DataNodeProperties stopDataNode(int i) {
  if (i < 0 || i >= dataNodes.size()) {
    return null;
  }
  DataNodeProperties dnprop = dataNodes.remove(i);
  DataNode dn = dnprop.datanode;
  LOG.info("MiniDFSCluster Stopping DataNode " +
                     dn.getDisplayName() +
                     " from a total of " + (dataNodes.size() + 1) + 
                     " datanodes.");
  dn.shutdown();
  numDataNodes--;
  return dnprop;
}
 
Example 2
Source File: TestHDFSServerPorts.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Check whether the datanode can be started.
 */
private boolean canStartDataNode(Configuration conf) throws IOException {
  DataNode dn = null;
  try {
    dn = DataNode.createDataNode(new String[]{}, conf);
  } catch(IOException e) {
    if (e instanceof java.net.BindException)
      return false;
    throw e;
  } finally {
    if(dn != null) dn.shutdown();
  }
  return true;
}
 
Example 3
Source File: MiniDFSCluster.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
public DataNodeProperties stopDataNode(int i) {
  if (i < 0 || i >= dataNodes.size()) {
    return null;
  }
  DataNodeProperties dnprop = dataNodes.remove(i);
  DataNode dn = dnprop.datanode;
  System.out.println("MiniDFSCluster Stopping DataNode " + 
                     dn.dnRegistration.getName() +
                     " from a total of " + (dataNodes.size() + 1) + 
                     " datanodes.");
  dn.shutdown();
  numDataNodes--;
  return dnprop;
}
 
Example 4
Source File: TestHDFSServerPorts.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * Check whether the data-node can be started.
 */
private boolean canStartDataNode(Configuration conf) throws IOException {
  DataNode dn = null;
  try {
    dn = DataNode.createDataNode(new String[]{}, conf);
  } catch(IOException e) {
    if (e instanceof java.net.BindException)
      return false;
    throw e;
  }
  dn.shutdown();
  return true;
}
 
Example 5
Source File: TestEncryptedTransfer.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testEncryptedAppendRequiringBlockTransfer() throws IOException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    setEncryptionConfigKeys(conf);
    
    // start up 4 DNs
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    
    FileSystem fs = getFileSystem(conf);
    
    // Create a file with replication 3, so its block is on 3 / 4 DNs.
    writeTestDataToFile(fs);
    assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
    
    // Shut down one of the DNs holding a block replica.
    FSDataInputStream in = fs.open(TEST_PATH);
    List<LocatedBlock> locatedBlocks = DFSTestUtil.getAllBlocks(in);
    in.close();
    assertEquals(1, locatedBlocks.size());
    assertEquals(3, locatedBlocks.get(0).getLocations().length);
    DataNode dn = cluster.getDataNode(locatedBlocks.get(0).getLocations()[0].getIpcPort());
    dn.shutdown();
    
    // Reopen the file for append, which will need to add another DN to the
    // pipeline and in doing so trigger a block transfer.
    writeTestDataToFile(fs);
    assertEquals(PLAIN_TEXT + PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
    
    fs.close();
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 6
Source File: MiniDFSCluster.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * Shutdown all DataNodes started by this class.  The NameNode
 * is left running so that new DataNodes may be started.
 */
public void shutdownDataNodes() {
  for (int i = dataNodes.size()-1; i >= 0; i--) {
    System.out.println("Shutting down DataNode " + i);
    DataNode dn = dataNodes.remove(i).datanode;
    dn.shutdown();
    numDataNodes--;
  }
}
 
Example 7
Source File: MiniDFSCluster.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Shutdown all DataNodes started by this class.  The NameNode
 * is left running so that new DataNodes may be started.
 */
public void shutdownDataNodes() {
  for (int i = dataNodes.size()-1; i >= 0; i--) {
    LOG.info("Shutting down DataNode " + i);
    DataNode dn = dataNodes.remove(i).datanode;
    dn.shutdown();
    numDataNodes--;
  }
}
 
Example 8
Source File: TestWebUIMissingBlocks.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Test
public void testDeletedFileMissingBlock() throws IOException {
  FSNamesystem namesystem = cluster.getNameNode().namesystem;
  String fileName = "/testMissingBlock";
  DFSTestUtil.createFile(cluster.getFileSystem(), new Path(fileName),
      (long) FILE_SIZE, (short) 1, (long) 0);
  DatanodeInfo[] locations = namesystem
      .getBlockLocations(fileName, 0, Long.MAX_VALUE).get(0).getLocations();
  int port = locations[0].getPort();
  // Shutdown a datanode to get missing blocks.
  for (DataNode dn : cluster.getDataNodes()) {
    if (dn.getSelfAddr().getPort() == port) {
      dn.shutdown();
    }
  }
  // Wait for missing blocks.
  while (namesystem.getMissingBlocksCount() == 0) {
    try {
      System.out.println("No missing blocks yet");
      Thread.sleep(1000);
    } catch (InterruptedException e) {
      throw new IOException(e);
    }
  }
  assertTrue(namesystem.getMissingBlocksCount() > 0);
  // Delete the file.
  ((DistributedFileSystem) cluster.getFileSystem()).getClient().delete(
      fileName, false);
  // Once we delete the file, there should be no missing blocks.
  assertEquals(0, namesystem.getMissingBlocksCount());
}
 
Example 9
Source File: MiniDFSCluster.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public DataNodeProperties stopDataNode(int i) {
  if (i < 0 || i >= dataNodes.size()) {
    return null;
  }
  DataNodeProperties dnprop = dataNodes.remove(i);
  DataNode dn = dnprop.datanode;
  System.out.println("MiniDFSCluster Stopping DataNode " + 
                     dn.getDatanodeInfo() +
                     " from a total of " + (dataNodes.size() + 1) + 
                     " datanodes.");
  dn.shutdown();
  numDataNodes--;
  return dnprop;
}
 
Example 10
Source File: TestEncryptedTransfer.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testEncryptedAppendRequiringBlockTransfer() throws IOException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    setEncryptionConfigKeys(conf);
    
    // start up 4 DNs
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    
    FileSystem fs = getFileSystem(conf);
    
    // Create a file with replication 3, so its block is on 3 / 4 DNs.
    writeTestDataToFile(fs);
    assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
    
    // Shut down one of the DNs holding a block replica.
    FSDataInputStream in = fs.open(TEST_PATH);
    List<LocatedBlock> locatedBlocks = DFSTestUtil.getAllBlocks(in);
    in.close();
    assertEquals(1, locatedBlocks.size());
    assertEquals(3, locatedBlocks.get(0).getLocations().length);
    DataNode dn = cluster.getDataNode(locatedBlocks.get(0).getLocations()[0].getIpcPort());
    dn.shutdown();
    
    // Reopen the file for append, which will need to add another DN to the
    // pipeline and in doing so trigger a block transfer.
    writeTestDataToFile(fs);
    assertEquals(PLAIN_TEXT + PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
    
    fs.close();
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 11
Source File: MiniDFSCluster.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Shutdown all DataNodes started by this class.  The NameNode
 * is left running so that new DataNodes may be started.
 */
public void shutdownDataNodes() {
  for (int i = dataNodes.size()-1; i >= 0; i--) {
    LOG.info("Shutting down DataNode " + i);
    DataNode dn = dataNodes.remove(i).datanode;
    dn.shutdown();
    numDataNodes--;
  }
}
 
Example 12
Source File: TestHDFSServerPorts.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Stop the datanode.
 */
public void stopDataNode(DataNode dn) {
  if (dn != null) {
    dn.shutdown();
  }
}
 
Example 13
Source File: TestDatanodeConfig.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test(timeout=60000)
public void testMemlockLimit() throws Exception {
  assumeTrue(NativeIO.isAvailable());
  final long memlockLimit =
      NativeIO.POSIX.getCacheManipulator().getMemlockLimit();

  // Can't increase the memlock limit past the maximum.
  assumeTrue(memlockLimit != Long.MAX_VALUE);

  File dataDir = new File(BASE_DIR, "data").getCanonicalFile();
  Configuration conf = cluster.getConfiguration(0);
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
    makeURI("file", null, fileAsURI(dataDir).getPath()));
  long prevLimit = conf.
      getLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
          DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT);
  DataNode dn = null;
  try {
    // Try starting the DN with limit configured to the ulimit
    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
        memlockLimit);
    dn = DataNode.createDataNode(new String[]{},  conf);
    dn.shutdown();
    dn = null;
    // Try starting the DN with a limit > ulimit
    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
        memlockLimit+1);
    try {
      dn = DataNode.createDataNode(new String[]{}, conf);
    } catch (RuntimeException e) {
      GenericTestUtils.assertExceptionContains(
          "more than the datanode's available RLIMIT_MEMLOCK", e);
    }
  } finally {
    if (dn != null) {
      dn.shutdown();
    }
    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
        prevLimit);
  }
}
 
Example 14
Source File: TestHDFSServerPorts.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Stop the datanode.
 */
public void stopDataNode(DataNode dn) {
  if (dn != null) {
    dn.shutdown();
  }
}
 
Example 15
Source File: TestDatanodeReport.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * This test attempts to different types of datanode report.
 */
@Test
public void testDatanodeReport() throws Exception {
  conf.setInt(
      DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500); // 0.5s
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
  MiniDFSCluster cluster = 
    new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
  try {
    //wait until the cluster is up
    cluster.waitActive();
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    final List<DataNode> datanodes = cluster.getDataNodes();
    final DFSClient client = cluster.getFileSystem().dfs;

    assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, bpid);
    assertReports(NUM_OF_DATANODES, DatanodeReportType.LIVE, client, datanodes, bpid);
    assertReports(0, DatanodeReportType.DEAD, client, datanodes, bpid);

    // bring down one datanode
    final DataNode last = datanodes.get(datanodes.size() - 1);
    LOG.info("XXX shutdown datanode " + last.getDatanodeUuid());
    last.shutdown();

    DatanodeInfo[] nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
    while (nodeInfo.length != 1) {
      try {
        Thread.sleep(500);
      } catch (Exception e) {
      }
      nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
    }

    assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, null);
    assertReports(NUM_OF_DATANODES - 1, DatanodeReportType.LIVE, client, datanodes, null);
    assertReports(1, DatanodeReportType.DEAD, client, datanodes, null);

    Thread.sleep(5000);
    assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
  } finally {
    cluster.shutdown();
  }
}
 
Example 16
Source File: TestHDFSServerPorts.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
/**
 * Stop the datanode.
 */
public void stopDataNode(DataNode dn) {
  if (dn != null) {
    dn.shutdown();
  }
}
 
Example 17
Source File: TestDatanodeConfig.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test(timeout=60000)
public void testMemlockLimit() throws Exception {
  assumeTrue(NativeIO.isAvailable());
  final long memlockLimit =
      NativeIO.POSIX.getCacheManipulator().getMemlockLimit();

  // Can't increase the memlock limit past the maximum.
  assumeTrue(memlockLimit != Long.MAX_VALUE);

  File dataDir = new File(BASE_DIR, "data").getCanonicalFile();
  Configuration conf = cluster.getConfiguration(0);
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
    makeURI("file", null, fileAsURI(dataDir).getPath()));
  long prevLimit = conf.
      getLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
          DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT);
  DataNode dn = null;
  try {
    // Try starting the DN with limit configured to the ulimit
    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
        memlockLimit);
    dn = DataNode.createDataNode(new String[]{},  conf);
    dn.shutdown();
    dn = null;
    // Try starting the DN with a limit > ulimit
    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
        memlockLimit+1);
    try {
      dn = DataNode.createDataNode(new String[]{}, conf);
    } catch (RuntimeException e) {
      GenericTestUtils.assertExceptionContains(
          "more than the datanode's available RLIMIT_MEMLOCK", e);
    }
  } finally {
    if (dn != null) {
      dn.shutdown();
    }
    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
        prevLimit);
  }
}
 
Example 18
Source File: TestHDFSServerPorts.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Stop the datanode.
 */
public void stopDataNode(DataNode dn) {
  if (dn != null) {
    dn.shutdown();
  }
}
 
Example 19
Source File: TestHoodieLogFormatAppendFailure.java    From hudi with Apache License 2.0 4 votes vote down vote up
@Test
@Timeout(60)
public void testFailedToGetAppendStreamFromHDFSNameNode()
    throws IOException, URISyntaxException, InterruptedException, TimeoutException {

  // Use some fs like LocalFileSystem, that does not support appends
  String uuid = UUID.randomUUID().toString();
  Path localPartitionPath = new Path("/tmp/");
  FileSystem fs = cluster.getFileSystem();
  Path testPath = new Path(localPartitionPath, uuid);
  fs.mkdirs(testPath);

  // Some data & append.
  List<IndexedRecord> records = SchemaTestUtil.generateTestRecords(0, 10);
  Map<HoodieLogBlock.HeaderMetadataType, String> header = new HashMap<>(2);
  header.put(HoodieLogBlock.HeaderMetadataType.INSTANT_TIME, "100");
  header.put(HoodieLogBlock.HeaderMetadataType.SCHEMA, getSimpleSchema().toString());
  HoodieAvroDataBlock dataBlock = new HoodieAvroDataBlock(records, header);

  Writer writer = HoodieLogFormat.newWriterBuilder().onParentPath(testPath)
      .withFileExtension(HoodieArchivedLogFile.ARCHIVE_EXTENSION).withFileId("commits.archive")
      .overBaseCommit("").withFs(fs).build();

  writer = writer.appendBlock(dataBlock);
  // get the current log file version to compare later
  int logFileVersion = writer.getLogFile().getLogVersion();
  Path logFilePath = writer.getLogFile().getPath();
  writer.close();

  // Wait for 3 times replication of file
  DFSTestUtil.waitReplication(fs, logFilePath, (short) 3);
  // Shut down all DNs that have the last block location for the file
  LocatedBlocks lbs = cluster.getFileSystem().getClient().getNamenode()
      .getBlockLocations("/tmp/" + uuid + "/" + logFilePath.getName(), 0, Long.MAX_VALUE);
  List<DataNode> dnsOfCluster = cluster.getDataNodes();
  DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().getLocations();
  for (DataNode dn : dnsOfCluster) {
    for (DatanodeInfo loc : dnsWithLocations) {
      if (dn.getDatanodeId().equals(loc)) {
        dn.shutdown();
        cluster.stopDataNode(dn.getDisplayName());
        DFSTestUtil.waitForDatanodeDeath(dn);
      }
    }
  }
  // Wait for the replication of this file to go down to 0
  DFSTestUtil.waitReplication(fs, logFilePath, (short) 0);

  // Opening a new Writer right now will throw IOException. The code should handle this, rollover the logfile and
  // return a new writer with a bumped up logVersion
  writer = HoodieLogFormat.newWriterBuilder().onParentPath(testPath)
      .withFileExtension(HoodieArchivedLogFile.ARCHIVE_EXTENSION).withFileId("commits.archive")
      .overBaseCommit("").withFs(fs).build();
  // The log version should be different for this new writer
  assertNotEquals(writer.getLogFile().getLogVersion(), logFileVersion);
}
 
Example 20
Source File: TestDatanodeDeath2.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public void testBlockAbandoned() throws Exception {
  Callback newPipeline = new Callback() {
    @Override
    public void execute() {
        try {
          FSNamesystem namesystem = cluster.getNameNode().getNamesystem();
          LocatedBlocks blocks =
            namesystem.getBlockLocations(FILE1, 0, 2* BLOCK_SIZE);
          List<LocatedBlock> blockList = blocks.getLocatedBlocks();
          String holder = ((DistributedFileSystem) fileSystem).getClient().clientName;

          // abandonBlock clears the targets of the INodeFileUnderConstruction
          namesystem.abandonBlock(
            blockList.get(blockList.size() - 1).getBlock(),
            FILE1,
            holder
          );

          // take down the datanode
          DataNode dataNode = cluster.getDataNodes().get(0);

          // get a new block for the same file which we exclude the node from
          Node excludedNode = cluster
            .getNameNode()
            .getNamesystem()
            .getDatanode(dataNode.getDNRegistrationForNS(
                cluster.getNameNode().getNamespaceID()));
          namesystem.getAdditionalBlock(
            FILE1, holder, Arrays.<Node>asList(excludedNode)
          );

          dataNode.shutdown();
        }
        catch (IOException e) {
          fail("exception: " + StringUtils.stringifyException(e));
      }
    }
  };

  runTestDatanodeRemovedFromPipeline(false, newPipeline);
}