Java Code Examples for org.apache.hadoop.fs.FileSystem.close()

The following are Jave code examples for showing how to use close() of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
+ Save this method
Example 1
Project: hadoop   File: TestHttpFSFileSystemLocalFileSystem.java   View Source Code Vote up 7 votes
@Override
protected void testSetPermission() throws Exception {
  if (Path.WINDOWS) {
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    Path path = new Path(getProxiedFSTestDir(), "foodir");
    fs.mkdirs(path);

    fs = getHttpFSFileSystem();
    FsPermission permission1 = new FsPermission(FsAction.READ_WRITE, FsAction.NONE, FsAction.NONE);
    fs.setPermission(path, permission1);
    fs.close();

    fs = FileSystem.get(getProxiedFSConf());
    FileStatus status1 = fs.getFileStatus(path);
    fs.close();
    FsPermission permission2 = status1.getPermission();
    Assert.assertEquals(permission2, permission1);

    // sticky bit not supported on Windows with local file system, so the
    // subclass skips that part of the test
  } else {
    super.testSetPermission();
  }
}
 
Example 2
Project: hadoop   File: TestDFSClientFailover.java   View Source Code Vote up 7 votes
/**
 * Make sure that client failover works when an active NN dies and the standby
 * takes over.
 */
@Test
public void testDfsClientFailover() throws IOException, URISyntaxException {
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  
  DFSTestUtil.createFile(fs, TEST_FILE,
      FILE_LENGTH_TO_VERIFY, (short)1, 1L);
  
  assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);
  assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);
  
  // Check that it functions even if the URL becomes canonicalized
  // to include a port number.
  Path withPort = new Path("hdfs://" +
      HATestUtil.getLogicalHostname(cluster) + ":" +
      NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath());
  FileSystem fs2 = withPort.getFileSystem(fs.getConf());
  assertTrue(fs2.exists(withPort));

  fs.close();
}
 
Example 3
Project: hadoop   File: TestDFSShellGenericOptions.java   View Source Code Vote up 6 votes
private void execute(String [] args, String namenode) {
  FsShell shell=new FsShell();
  FileSystem fs=null;
  try {
    ToolRunner.run(shell, args);
    fs = FileSystem.get(NameNode.getUri(NameNode.getAddress(namenode)),
        shell.getConf());
    assertTrue("Directory does not get created", 
               fs.isDirectory(new Path("/data")));
    fs.delete(new Path("/data"), true);
  } catch (Exception e) {
    System.err.println(e.getMessage());
    e.printStackTrace();
  } finally {
    if (fs!=null) {
      try {
        fs.close();
      } catch (IOException ignored) {
      }
    }
  }
}
 
Example 4
Project: hadoop   File: BaseTestHttpFSWith.java   View Source Code Vote up 6 votes
private void testAppend() throws Exception {
  if (!isLocalFS()) {
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    fs.mkdirs(getProxiedFSTestDir());
    Path path = new Path(getProxiedFSTestDir(), "foo.txt");
    OutputStream os = fs.create(path);
    os.write(1);
    os.close();
    fs.close();
    fs = getHttpFSFileSystem();
    os = fs.append(new Path(path.toUri().getPath()));
    os.write(2);
    os.close();
    fs.close();
    fs = FileSystem.get(getProxiedFSConf());
    InputStream is = fs.open(path);
    Assert.assertEquals(is.read(), 1);
    Assert.assertEquals(is.read(), 2);
    Assert.assertEquals(is.read(), -1);
    is.close();
    fs.close();
  }
}
 
Example 5
Project: Transwarp-Sample-Code   File: CreateDir.java   View Source Code Vote up 6 votes
public static void main(String[] args) throws IOException {
    // 通过Java API创建HDFS目录
    String rootPath = "hdfs://nameservice1";
    Path p = new Path(rootPath + "/tmp/newDir3");

    Configuration conf = new Configuration();
    conf.addResource("core-site.xml");
    conf.addResource("hdfs-site.xml");
    conf.addResource("yarn-site.xml");
    // 没开kerberos,注释下面两行
    UserGroupInformation.setConfiguration(conf);
    UserGroupInformation.loginUserFromKeytab("[email protected]","E:\\星环\\hdfs.keytab");
    FileSystem fs = p.getFileSystem(conf);
    boolean b = fs.mkdirs(p);
    System.out.println(b);
    fs.close();
}
 
Example 6
Project: hadoop   File: TestPread.java   View Source Code Vote up 6 votes
/**
 * Tests positional read in LocalFS.
 */
@Test
public void testPreadLocalFS() throws IOException {
  Configuration conf = new HdfsConfiguration();
  FileSystem fileSys = FileSystem.getLocal(conf);
  try {
    Path file1 = new Path("build/test/data", "preadtest.dat");
    writeFile(fileSys, file1);
    pReadFile(fileSys, file1);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
  }
}
 
Example 7
Project: hadoop   File: BaseTestHttpFSWith.java   View Source Code Vote up 5 votes
private void testListStatus() throws Exception {
  FileSystem fs = FileSystem.get(getProxiedFSConf());
  Path path = new Path(getProxiedFSTestDir(), "foo.txt");
  OutputStream os = fs.create(path);
  os.write(1);
  os.close();
  FileStatus status1 = fs.getFileStatus(path);
  fs.close();

  fs = getHttpFSFileSystem();
  FileStatus status2 = fs.getFileStatus(new Path(path.toUri().getPath()));
  fs.close();

  Assert.assertEquals(status2.getPermission(), status1.getPermission());
  Assert.assertEquals(status2.getPath().toUri().getPath(), status1.getPath().toUri().getPath());
  Assert.assertEquals(status2.getReplication(), status1.getReplication());
  Assert.assertEquals(status2.getBlockSize(), status1.getBlockSize());
  Assert.assertEquals(status2.getAccessTime(), status1.getAccessTime());
  Assert.assertEquals(status2.getModificationTime(), status1.getModificationTime());
  Assert.assertEquals(status2.getOwner(), status1.getOwner());
  Assert.assertEquals(status2.getGroup(), status1.getGroup());
  Assert.assertEquals(status2.getLen(), status1.getLen());

  FileStatus[] stati = fs.listStatus(path.getParent());
  Assert.assertEquals(stati.length, 1);
  Assert.assertEquals(stati[0].getPath().getName(), path.getName());
}
 
Example 8
Project: hadoop   File: TestModTime.java   View Source Code Vote up 5 votes
/**
 * Regression test for HDFS-3864 - NN does not update internal file mtime for
 * OP_CLOSE when reading from the edit log.
 */
@Test
public void testModTimePersistsAfterRestart() throws IOException {
  final long sleepTime = 10; // 10 milliseconds
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  Configuration conf = new HdfsConfiguration();
  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    fs = cluster.getFileSystem();
    Path testPath = new Path("/test");
    
    // Open a file, and get its initial modification time.
    OutputStream out = fs.create(testPath);
    long initialModTime = fs.getFileStatus(testPath).getModificationTime();
    assertTrue(initialModTime > 0);
    
    // Wait and then close the file. Ensure that the mod time goes up.
    ThreadUtil.sleepAtLeastIgnoreInterrupts(sleepTime);
    out.close();
    long modTimeAfterClose = fs.getFileStatus(testPath).getModificationTime();
    assertTrue(modTimeAfterClose >= initialModTime + sleepTime);
    
    // Restart the NN, and make sure that the later mod time is still used.
    cluster.restartNameNode();
    long modTimeAfterRestart = fs.getFileStatus(testPath).getModificationTime();
    assertEquals(modTimeAfterClose, modTimeAfterRestart);
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 9
Project: hadoop-oss   File: TestTraceUtils.java   View Source Code Vote up 5 votes
/**
 * Test tracing the globber.  This is a regression test for HDFS-9187.
 */
@Test
public void testTracingGlobber() throws Exception {
  // Bypass the normal FileSystem object creation path by just creating an
  // instance of a subclass.
  FileSystem fs = new LocalFileSystem();
  fs.initialize(new URI("file:///"), new Configuration());
  fs.globStatus(new Path("/"));
  fs.close();
}
 
Example 10
Project: hadoop   File: TestFiDataTransferProtocol2.java   View Source Code Vote up 5 votes
/**
 * 1. create files with dfs
 * 2. write MIN_N_PACKET to MAX_N_PACKET packets
 * 3. close file
 * 4. open the same file
 * 5. read the bytes and compare results
 */
private static void writeSeveralPackets(String methodName) throws IOException {
  final Random r = FiTestUtil.RANDOM.get();
  final int nPackets = FiTestUtil.nextRandomInt(MIN_N_PACKET, MAX_N_PACKET + 1);
  final int lastPacketSize = FiTestUtil.nextRandomInt(1, PACKET_SIZE + 1);
  final int size = (nPackets - 1)*PACKET_SIZE + lastPacketSize;

  FiTestUtil.LOG.info("size=" + size + ", nPackets=" + nPackets
      + ", lastPacketSize=" + lastPacketSize);

  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf
      ).numDataNodes(REPLICATION + 2).build();
  final FileSystem dfs = cluster.getFileSystem();
  try {
    final Path p = new Path("/" + methodName + "/foo");
    final FSDataOutputStream out = createFile(dfs, p);

    final long seed = r.nextLong();
    final Random ran = new Random(seed);
    ran.nextBytes(bytes);
    out.write(bytes, 0, size);
    out.close();

    final FSDataInputStream in = dfs.open(p);
    int totalRead = 0;
    int nRead = 0;
    while ((nRead = in.read(toRead, totalRead, size - totalRead)) > 0) {
      totalRead += nRead;
    }
    Assert.assertEquals("Cannot read file.", size, totalRead);
    for (int i = 0; i < size; i++) {
      Assert.assertTrue("File content differ.", bytes[i] == toRead[i]);
    }
  }
  finally {
    dfs.close();
    cluster.shutdown();
  }
}
 
Example 11
Project: hadoop   File: TestAzureFileSystemErrorConditions.java   View Source Code Vote up 5 votes
@Test
public void testAccessContainerWithWrongVersion() throws Exception {
  AzureNativeFileSystemStore store = new AzureNativeFileSystemStore();
  MockStorageInterface mockStorage = new MockStorageInterface();
  store.setAzureStorageInteractionLayer(mockStorage);
  FileSystem fs = new NativeAzureFileSystem(store);
  try {
    Configuration conf = new Configuration();
    AzureBlobStorageTestAccount.setMockAccountKey(conf);
    HashMap<String, String> metadata = new HashMap<String, String>();
    metadata.put(AzureNativeFileSystemStore.VERSION_METADATA_KEY,
        "2090-04-05"); // It's from the future!
    mockStorage.addPreExistingContainer(
        AzureBlobStorageTestAccount.getMockContainerUri(), metadata);

    boolean passed = false;
    try {
      fs.initialize(new URI(AzureBlobStorageTestAccount.MOCK_WASB_URI), conf);
      fs.listStatus(new Path("/"));
      passed = true;
    } catch (AzureException ex) {
      assertTrue("Unexpected exception message: " + ex,
          ex.getMessage().contains("unsupported version: 2090-04-05."));
    }
    assertFalse("Should've thrown an exception because of the wrong version.",
        passed);
  } finally {
    fs.close();
  }
}
 
Example 12
Project: hadoop   File: TestFsck.java   View Source Code Vote up 5 votes
/** Test fsck with symlinks in the filesystem */
@Test
public void testFsckSymlink() throws Exception {
  final DFSTestUtil util = new DFSTestUtil.Builder().
      setName(getClass().getSimpleName()).setNumFiles(1).build();
  final Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);

  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    final long precision = 1L;
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    fs = cluster.getFileSystem();
    final String fileName = "/srcdat";
    util.createFiles(fs, fileName);
    final FileContext fc = FileContext.getFileContext(
        cluster.getConfiguration(0));
    final Path file = new Path(fileName);
    final Path symlink = new Path("/srcdat-symlink");
    fc.createSymlink(file, symlink, false);
    util.waitReplication(fs, fileName, (short)3);
    long aTime = fc.getFileStatus(symlink).getAccessTime();
    Thread.sleep(precision);
    setupAuditLogs();
    String outStr = runFsck(conf, 0, true, "/");
    verifyAuditLogs();
    assertEquals(aTime, fc.getFileStatus(symlink).getAccessTime());
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    assertTrue(outStr.contains("Total symlinks:\t\t1"));
    util.cleanup(fs, fileName);
  } finally {
    if (fs != null) {try{fs.close();} catch(Exception e){}}
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example 13
Project: WIFIProbe   File: HDFSTool.java   View Source Code Vote up 5 votes
/**从HDFS上删除文件*/
public static void deleteFromHdfs(String fileName) throws IOException {
    String dst = NodeConfig.HDFS_PATH + fileName;
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(URI.create(dst), conf);
    fs.deleteOnExit(new Path(dst));
    fs.close();
}
 
Example 14
Project: WIFIProbe   File: HDFSTool.java   View Source Code Vote up 5 votes
/**重命名**/
public static void renameFile(String origin, String newName) throws IOException{
    Configuration conf = new Configuration();
    String str = NodeConfig.HDFS_PATH+origin;
    String dst = NodeConfig.HDFS_PATH+newName;
    FileSystem fs = FileSystem.get(URI.create(str), conf);
    Path srcPath = new Path(str);
    Path dstPath = new Path(dst);
    fs.rename(srcPath, dstPath);
    fs.close();
}
 
Example 15
Project: hadoop   File: TestDatanodeRestart.java   View Source Code Vote up 4 votes
private void testRbwReplicas(MiniDFSCluster cluster, boolean isCorrupt) 
throws IOException {
  FSDataOutputStream out = null;
  FileSystem fs = cluster.getFileSystem();
  final Path src = new Path("/test.txt");
  try {
    final int fileLen = 515;
    // create some rbw replicas on disk
    byte[] writeBuf = new byte[fileLen];
    new Random().nextBytes(writeBuf);
    out = fs.create(src);
    out.write(writeBuf);
    out.hflush();
    DataNode dn = cluster.getDataNodes().get(0);
    for (FsVolumeSpi v : dataset(dn).getVolumes()) {
      final FsVolumeImpl volume = (FsVolumeImpl)v;
      File currentDir = volume.getCurrentDir().getParentFile().getParentFile();
      File rbwDir = new File(currentDir, "rbw");
      for (File file : rbwDir.listFiles()) {
        if (isCorrupt && Block.isBlockFilename(file)) {
          new RandomAccessFile(file, "rw").setLength(fileLen-1); // corrupt
        }
      }
    }
    cluster.restartDataNodes();
    cluster.waitActive();
    dn = cluster.getDataNodes().get(0);

    // check volumeMap: one rwr replica
    String bpid = cluster.getNamesystem().getBlockPoolId();
    ReplicaMap replicas = dataset(dn).volumeMap;
    Assert.assertEquals(1, replicas.size(bpid));
    ReplicaInfo replica = replicas.replicas(bpid).iterator().next();
    Assert.assertEquals(ReplicaState.RWR, replica.getState());
    if (isCorrupt) {
      Assert.assertEquals((fileLen-1)/512*512, replica.getNumBytes());
    } else {
      Assert.assertEquals(fileLen, replica.getNumBytes());
    }
    dataset(dn).invalidate(bpid, new Block[]{replica});
  } finally {
    IOUtils.closeStream(out);
    if (fs.exists(src)) {
      fs.delete(src, false);
    }
    fs.close();
  }      
}
 
Example 16
Project: hadoop   File: TestSecurityTokenEditLog.java   View Source Code Vote up 4 votes
/**
 * Tests transaction logging in dfs.
 */
@Test
public void testEditLog() throws IOException {

  // start a cluster 
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  FileSystem fileSys = null;

  try {
    conf.setBoolean(
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
    cluster.waitActive();
    fileSys = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();

    for (Iterator<URI> it = cluster.getNameDirs(0).iterator(); it.hasNext(); ) {
      File dir = new File(it.next().getPath());
      System.out.println(dir);
    }
    
    FSImage fsimage = namesystem.getFSImage();
    FSEditLog editLog = fsimage.getEditLog();

    // set small size of flush buffer
    editLog.setOutputBufferCapacity(2048);
  
    // Create threads and make them run transactions concurrently.
    Thread threadId[] = new Thread[NUM_THREADS];
    for (int i = 0; i < NUM_THREADS; i++) {
      Transactions trans = new Transactions(namesystem, NUM_TRANSACTIONS);
      threadId[i] = new Thread(trans, "TransactionThread-" + i);
      threadId[i].start();
    }

    // wait for all transactions to get over
    for (int i = 0; i < NUM_THREADS; i++) {
      try {
        threadId[i].join();
      } catch (InterruptedException e) {
        i--;      // retry 
      }
    } 
    
    editLog.close();
      
    // Verify that we can read in all the transactions that we have written.
    // If there were any corruptions, it is likely that the reading in
    // of these transactions will throw an exception.
    //
    namesystem.getDelegationTokenSecretManager().stopThreads();
    int numKeys = namesystem.getDelegationTokenSecretManager().getNumberOfKeys();
    int expectedTransactions = NUM_THREADS * opsPerTrans * NUM_TRANSACTIONS + numKeys
        + 2; // + 2 for BEGIN and END txns

    for (StorageDirectory sd : fsimage.getStorage().dirIterable(NameNodeDirType.EDITS)) {
      File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 1 + expectedTransactions - 1);
      System.out.println("Verifying file: " + editFile);
      
      FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0);        
      long numEdits = loader.loadFSEdits(
          new EditLogFileInputStream(editFile), 1);
      assertEquals("Verification for " + editFile, expectedTransactions, numEdits);
    }
  } finally {
    if(fileSys != null) fileSys.close();
    if(cluster != null) cluster.shutdown();
  }
}
 
Example 17
Project: hadoop   File: TestBackupNode.java   View Source Code Vote up 4 votes
/**
 * Ensure that the backupnode will tail edits from the NN
 * and keep in sync, even while the NN rolls, checkpoints
 * occur, etc.
 */
@Test
public void testBackupNodeTailsEdits() throws Exception {
  Configuration conf = new HdfsConfiguration();
  HAUtil.setAllowStandbyReads(conf, true);
  MiniDFSCluster cluster = null;
  FileSystem fileSys = null;
  BackupNode backup = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf)
                                .numDataNodes(0).build();
    fileSys = cluster.getFileSystem();
    backup = startBackupNode(conf, StartupOption.BACKUP, 1);
    
    BackupImage bnImage = (BackupImage) backup.getFSImage();
    testBNInSync(cluster, backup, 1);
    
    // Force a roll -- BN should roll with NN.
    NameNode nn = cluster.getNameNode();
    NamenodeProtocols nnRpc = nn.getRpcServer();
    nnRpc.rollEditLog();
    assertEquals(bnImage.getEditLog().getCurSegmentTxId(),
        nn.getFSImage().getEditLog().getCurSegmentTxId());
    
    // BN should stay in sync after roll
    testBNInSync(cluster, backup, 2);
    
    long nnImageBefore =
      nn.getFSImage().getStorage().getMostRecentCheckpointTxId();
    // BN checkpoint
    backup.doCheckpoint();
    
    // NN should have received a new image
    long nnImageAfter =
      nn.getFSImage().getStorage().getMostRecentCheckpointTxId();
    
    assertTrue("nn should have received new checkpoint. before: " +
        nnImageBefore + " after: " + nnImageAfter,
        nnImageAfter > nnImageBefore);

    // BN should stay in sync after checkpoint
    testBNInSync(cluster, backup, 3);

    // Stop BN
    StorageDirectory sd = bnImage.getStorage().getStorageDir(0);
    backup.stop();
    backup = null;
    
    // When shutting down the BN, it shouldn't finalize logs that are
    // still open on the NN
    EditLogFile editsLog = FSImageTestUtil.findLatestEditsLog(sd);
    assertEquals(editsLog.getFirstTxId(),
        nn.getFSImage().getEditLog().getCurSegmentTxId());
    assertTrue("Should not have finalized " + editsLog,
        editsLog.isInProgress());
    
    // do some edits
    assertTrue(fileSys.mkdirs(new Path("/edit-while-bn-down")));
    
    // start a new backup node
    backup = startBackupNode(conf, StartupOption.BACKUP, 1);

    testBNInSync(cluster, backup, 4);
    assertNotNull(backup.getNamesystem().getFileInfo("/edit-while-bn-down", false));
  } finally {
    LOG.info("Shutting down...");
    if (backup != null) backup.stop();
    if (fileSys != null) fileSys.close();
    if (cluster != null) cluster.shutdown();
  }
  
  assertStorageDirsMatch(cluster.getNameNode(), backup);
}
 
Example 18
Project: hadoop   File: TestFailureToReadEdits.java   View Source Code Vote up 4 votes
/**
 * Test the following case:
 * 1. SBN is reading a finalized edits file when NFS disappears halfway
 *    through (or some intermittent error happens)
 * 2. SBN performs a checkpoint and uploads it to the NN
 * 3. NN receives a checkpoint that doesn't correspond to the end of any log
 *    segment
 * 4. Both NN and SBN should be able to restart at this point.
 * 
 * This is a regression test for HDFS-2766.
 */
@Test
public void testCheckpointStartingMidEditsFile() throws Exception {
  assertTrue(fs.mkdirs(new Path(TEST_DIR1)));
  
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  
  // Once the standby catches up, it should notice that it needs to
  // do a checkpoint and save one to its local directories.
  HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(0, 3));
  
  // It should also upload it back to the active.
  HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(0, 3));
  
  causeFailureOnEditLogRead();
  
  assertTrue(fs.mkdirs(new Path(TEST_DIR2)));
  assertTrue(fs.mkdirs(new Path(TEST_DIR3)));
  
  try {
    HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
    fail("Standby fully caught up, but should not have been able to");
  } catch (HATestUtil.CouldNotCatchUpException e) {
    // Expected. The NN did not exit.
  }
  
  // 5 because we should get OP_START_LOG_SEGMENT and one successful OP_MKDIR
  HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(0, 3, 5));
  
  // It should also upload it back to the active.
  HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(0, 3, 5));

  // Restart the active NN
  cluster.restartNameNode(0);
  
  HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(0, 3, 5));
  
  FileSystem fs0 = null;
  try {
    // Make sure that when the active restarts, it loads all the edits.
    fs0 = FileSystem.get(NameNode.getUri(nn0.getNameNodeAddress()),
        conf);
    
    assertTrue(fs0.exists(new Path(TEST_DIR1)));
    assertTrue(fs0.exists(new Path(TEST_DIR2)));
    assertTrue(fs0.exists(new Path(TEST_DIR3)));
  } finally {
    if (fs0 != null)
      fs0.close();
  }
}
 
Example 19
Project: hadoop   File: TestOverReplicatedBlocks.java   View Source Code Vote up 4 votes
/**
 * The test verifies that replica for deletion is chosen on a node,
 * with the oldest heartbeat, when this heartbeat is larger than the
 * tolerable heartbeat interval.
 * It creates a file with several blocks and replication 4.
 * The last DN is configured to send heartbeats rarely.
 * 
 * Test waits until the tolerable heartbeat interval expires, and reduces
 * replication of the file. All replica deletions should be scheduled for the
 * last node. No replicas will actually be deleted, since last DN doesn't
 * send heartbeats. 
 */
@Test
public void testChooseReplicaToDelete() throws Exception {
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK_SIZE);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    fs = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();

    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 300);
    cluster.startDataNodes(conf, 1, true, null, null, null);
    DataNode lastDN = cluster.getDataNodes().get(3);
    DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP(
        lastDN, namesystem.getBlockPoolId());
    String lastDNid = dnReg.getDatanodeUuid();

    final Path fileName = new Path("/foo2");
    DFSTestUtil.createFile(fs, fileName, SMALL_FILE_LENGTH, (short)4, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short)4);

    // Wait for tolerable number of heartbeats plus one
    DatanodeDescriptor nodeInfo = null;
    long lastHeartbeat = 0;
    long waitTime = DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000 *
      (DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT + 1);
    do {
      nodeInfo = namesystem.getBlockManager().getDatanodeManager()
          .getDatanode(dnReg);
      lastHeartbeat = nodeInfo.getLastUpdateMonotonic();
    } while (monotonicNow() - lastHeartbeat < waitTime);
    fs.setReplication(fileName, (short)3);

    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(fileName), 0, Long.MAX_VALUE);

    // All replicas for deletion should be scheduled on lastDN.
    // And should not actually be deleted, because lastDN does not heartbeat.
    namesystem.readLock();
    Collection<Block> dnBlocks = 
      namesystem.getBlockManager().excessReplicateMap.get(lastDNid);
    assertEquals("Replicas on node " + lastDNid + " should have been deleted",
        SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks.size());
    namesystem.readUnlock();
    for(BlockLocation location : locs)
      assertEquals("Block should still have 4 replicas",
          4, location.getNames().length);
  } finally {
    if(fs != null) fs.close();
    if(cluster != null) cluster.shutdown();
  }
}
 
Example 20
Project: hadoop   File: TestDFSUpgradeWithHA.java   View Source Code Vote up 4 votes
/**
 * Make sure that an HA NN with NFS-based HA can successfully start and
 * upgrade.
 */
@Test
public void testNfsUpgrade() throws IOException, URISyntaxException {
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleHATopology())
        .numDataNodes(0)
        .build();
    
    File sharedDir = new File(cluster.getSharedEditsDir(0, 1));
    
    // No upgrade is in progress at the moment.
    checkClusterPreviousDirExistence(cluster, false);
    assertCTimesEqual(cluster);
    checkPreviousDirExistence(sharedDir, false);
    
    // Transition NN0 to active and do some FS ops.
    cluster.transitionToActive(0);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(new Path("/foo1")));
    
    // Do the upgrade. Shut down NN1 and then restart NN0 with the upgrade
    // flag.
    cluster.shutdownNameNode(1);
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.UPGRADE);
    cluster.restartNameNode(0, false);
    
    checkNnPreviousDirExistence(cluster, 0, true);
    checkNnPreviousDirExistence(cluster, 1, false);
    checkPreviousDirExistence(sharedDir, true);
    
    // NN0 should come up in the active state when given the -upgrade option,
    // so no need to transition it to active.
    assertTrue(fs.mkdirs(new Path("/foo2")));
    
    // Restart NN0 without the -upgrade flag, to make sure that works.
    cluster.getNameNodeInfos()[0].setStartOpt(StartupOption.REGULAR);
    cluster.restartNameNode(0, false);
    
    // Make sure we can still do FS ops after upgrading.
    cluster.transitionToActive(0);
    assertTrue(fs.mkdirs(new Path("/foo3")));
    
    // Now bootstrap the standby with the upgraded info.
    int rc = BootstrapStandby.run(
        new String[]{"-force"},
        cluster.getConfiguration(1));
    assertEquals(0, rc);
    
    // Now restart NN1 and make sure that we can do ops against that as well.
    cluster.restartNameNode(1);
    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);
    assertTrue(fs.mkdirs(new Path("/foo4")));
    
    assertCTimesEqual(cluster);
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}