Java Code Examples for org.apache.hadoop.hdfs.DistributedFileSystem#delete()

The following examples show how to use org.apache.hadoop.hdfs.DistributedFileSystem#delete() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestArchiveHdfsLogReaderAndWriter.java    From incubator-ratis with Apache License 2.0 6 votes vote down vote up
@Test public void testSeek() throws IOException {
  String archiveLocation = location+"/testSeek";
  LogName logName = LogName.of("testSeek");
  DistributedFileSystem fs = cluster.getFileSystem();
  fs.delete(new Path(archiveLocation), true);
  ArchiveLogWriter writer = new ArchiveHdfsLogWriter(conf);
  writer.init(archiveLocation, logName);
  int k = 100;
  write(writer, 1, k);
  writer.close();
  ArchiveLogReader reader = new ArchiveHdfsLogReader(conf,
      LogServiceUtils.getArchiveLocationForLog(archiveLocation, logName));
  reader.seek(80);
  Assert.assertEquals(80, reader.getPosition());
  int count = 0;
  while (reader.next() != null) {
    count++;
  }
  Assert.assertEquals(20, count);
}
 
Example 2
Source File: TestHDFSFileContextMainOperations.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 */
@Test
public void testEditsLogOldRename() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
  Path dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
  createFile(src1);
  fs.mkdirs(dst1.getParent());
  createFile(dst1);
  
  // Set quota so that dst1 parent cannot allow under it new files/directories 
  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  // Free up quota for a subsequent rename
  fs.delete(dst1, true);
  oldRename(src1, dst1, true, false);
  
  // Restart the cluster and ensure the above operations can be
  // loaded from the edits log
  restartCluster();
  fs = cluster.getFileSystem();
  src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
  dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
  Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
  Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
}
 
Example 3
Source File: TestHDFSFileContextMainOperations.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 */
@Test
public void testEditsLogRename() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
  Path dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
  createFile(src1);
  fs.mkdirs(dst1.getParent());
  createFile(dst1);
  
  // Set quota so that dst1 parent cannot allow under it new files/directories 
  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  // Free up quota for a subsequent rename
  fs.delete(dst1, true);
  rename(src1, dst1, true, true, false, Rename.OVERWRITE);
  
  // Restart the cluster and ensure the above operations can be
  // loaded from the edits log
  restartCluster();
  fs = cluster.getFileSystem();
  src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
  dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
  Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
  Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
}
 
Example 4
Source File: TestHDFSFileContextMainOperations.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 */
@Test
public void testEditsLogOldRename() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
  Path dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
  createFile(src1);
  fs.mkdirs(dst1.getParent());
  createFile(dst1);
  
  // Set quota so that dst1 parent cannot allow under it new files/directories 
  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  // Free up quota for a subsequent rename
  fs.delete(dst1, true);
  oldRename(src1, dst1, true, false);
  
  // Restart the cluster and ensure the above operations can be
  // loaded from the edits log
  restartCluster();
  fs = cluster.getFileSystem();
  src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
  dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
  Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
  Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
}
 
Example 5
Source File: TestHDFSFileContextMainOperations.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 */
@Test
public void testEditsLogRename() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
  Path dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
  createFile(src1);
  fs.mkdirs(dst1.getParent());
  createFile(dst1);
  
  // Set quota so that dst1 parent cannot allow under it new files/directories 
  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  // Free up quota for a subsequent rename
  fs.delete(dst1, true);
  rename(src1, dst1, true, true, false, Rename.OVERWRITE);
  
  // Restart the cluster and ensure the above operations can be
  // loaded from the edits log
  restartCluster();
  fs = cluster.getFileSystem();
  src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
  dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
  Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
  Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
}
 
Example 6
Source File: OozieJobsServiceImpl.java    From searchanalytics-bigdata with MIT License 6 votes vote down vote up
private String setupHiveAddPartitionWorkflowApp() throws IOException {
	String userName = System.getProperty("user.name");
	String workFlowRoot = hadoopClusterService.getHDFSUri() + "/usr/"
			+ userName + "/oozie/wf-hive-add-partition";

	// put oozie app in hadoop
	DistributedFileSystem fs = hadoopClusterService.getFileSystem();
	Path workFlowRootPath = new Path(workFlowRoot);
	fs.delete(workFlowRootPath, true);

	File wfDir = new ClassPathResource("oozie/wf-hive-add-partition")
			.getFile();
	LOG.debug("wfdir: {}", wfDir.getAbsolutePath());
	FileUtil.copy(wfDir, fs, workFlowRootPath, false, new Configuration());
	FileUtil.copy(new ClassPathResource("hive/hive-site.xml").getFile(),
			fs, new Path(workFlowRoot), false, new Configuration());
	return workFlowRoot;
}
 
Example 7
Source File: ShellTest.java    From knox with Apache License 2.0 6 votes vote down vote up
private void testPutGetScript(String script) throws IOException, URISyntaxException {
  setupLogging();
  DistributedFileSystem fileSystem = miniDFSCluster.getFileSystem();
  Path dir = new Path("/user/guest/example");
  fileSystem.delete(dir, true);
  fileSystem.mkdirs(dir, new FsPermission("777"));
  fileSystem.setOwner(dir, "guest", "users");
  Binding binding = new Binding();
  binding.setProperty("gateway", driver.getClusterUrl());
  URL readme = driver.getResourceUrl("README");
  File file = new File(readme.toURI());
  binding.setProperty("file", file.getAbsolutePath());
  GroovyShell shell = new GroovyShell(binding);
  shell.evaluate(driver.getResourceUrl(script).toURI());
  String status = (String) binding.getProperty("status");
  assertNotNull(status);
  String fetchedFile = (String) binding.getProperty("fetchedFile");
  assertNotNull(fetchedFile);
  assertThat(fetchedFile, containsString("README"));
}
 
Example 8
Source File: TestArchiveHdfsLogReaderAndWriter.java    From incubator-ratis with Apache License 2.0 5 votes vote down vote up
@Test public void testRollingWriter() throws IOException {
  String archiveLocation = location+"/testRollingWriter";
  LogName logName = LogName.of("testRollingWriterLogName");
  DistributedFileSystem fs = cluster.getFileSystem();
  fs.delete(new Path(archiveLocation), true);
  ArchiveLogWriter writer = new ArchiveHdfsLogWriter(conf);
  writer.init(archiveLocation, logName);
  int k = 2;
  write(writer, 1, k);
  Assert.assertEquals(writer.getLastWrittenRecordId(), k);
  writer.rollWriter();
  String[] files = Arrays.stream(
      fs.listStatus(new Path(LogServiceUtils.getArchiveLocationForLog(archiveLocation, logName))))
      .map(fileStatus -> fileStatus.getPath().getName()).toArray(String[]::new);
  String[] expectedFiles = { logName.getName(), logName.getName() + "_recordId_" + k };
  Assert.assertArrayEquals(expectedFiles, files);
  ArchiveLogReader reader = new ArchiveHdfsLogReader(conf,
      LogServiceUtils.getArchiveLocationForLog(archiveLocation, logName));
  verifyRecords(reader, k);
  Assert.assertEquals(writer.getLastWrittenRecordId(), reader.getPosition());
  write(writer, k + 1, 2 * k);
  Assert.assertEquals(writer.getLastWrittenRecordId(), 2 * k);
  writer.close();
  reader = new ArchiveHdfsLogReader(conf,
      LogServiceUtils.getArchiveLocationForLog(archiveLocation, logName));
  verifyRecords(reader, 2 * k);

  files = ((ArchiveHdfsLogReader) reader).getFiles().stream()
      .map(fileStatus -> fileStatus.getPath().getName()).toArray(String[]::new);
  String[] expectedFiles1 =
      { logName.getName() + "_recordId_" + k, logName.getName() + "_recordId_" + 2 * k };
  Assert.assertArrayEquals(expectedFiles1, files);
  reader.close();
}
 
Example 9
Source File: HadoopClusterServiceTest.java    From searchanalytics-bigdata with MIT License 5 votes vote down vote up
@Test
public void hdfsFileLoggerSinkAndTest() throws FileNotFoundException,
		IOException {

	List<Event> searchEvents = generateSearchAnalyticsDataService
			.getSearchEvents(11);

	DistributedFileSystem fs = hadoopClusterService.getFileSystem();

	// /Write to file
	Path outFile = new Path("/searchevents/event" + UUID.randomUUID());
	FSDataOutputStream out = fs.create(outFile, false);
	for (Event event : searchEvents) {
		String eventString = new String(event.getBody(), "UTF-8");
		System.out.println("Writing event string: " + eventString);
		out.writeUTF(eventString + System.lineSeparator());
	}
	out.flush();
	out.close();

	// check the data is there...with standard file
	FSDataInputStream input = fs.open(outFile);
	try (BufferedReader br = new BufferedReader(new InputStreamReader(
			input, "UTF-8"))) {
		String line = null;
		while ((line = br.readLine()) != null) {
			System.out.println("HDFS file line is:" + line);
		}
	}

	input.close();
	fs.delete(outFile, true);
}
 
Example 10
Source File: DistCpSync.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static void deleteTargetTmpDir(DistributedFileSystem targetFs,
    Path tmpDir) {
  try {
    if (tmpDir != null) {
      targetFs.delete(tmpDir, true);
    }
  } catch (IOException e) {
    DistCp.LOG.error("Unable to cleanup tmp dir: " + tmpDir, e);
  }
}
 
Example 11
Source File: HiveSearchClicksServiceTest.java    From searchanalytics-bigdata with MIT License 5 votes vote down vote up
@Before
public void prepareHive() {
	DistributedFileSystem fs = hadoopClusterService.getFileSystem();
	Path path = new Path("/searchevents");
	try {
		fs.delete(path, true);
		fs.mkdirs(path);
	} catch (IOException e) {
		e.printStackTrace();
		fail();
	}
	hiveSearchClicksService.setup();
}
 
Example 12
Source File: SparkStreamServiceTest.java    From searchanalytics-bigdata with MIT License 5 votes vote down vote up
@Before
public void prepareHdfs() {
	DistributedFileSystem fs = hadoopClusterService.getFileSystem();
	Path path = new Path("/searchevents");
	Path sparkPath = new Path("/sparkcheckpoint");
	try {
		fs.delete(path, true);
		fs.delete(sparkPath, true);
		fs.mkdirs(path);
	} catch (IOException e) {
		e.printStackTrace();
		fail();
	}
}
 
Example 13
Source File: DistCpSync.java    From big-c with Apache License 2.0 5 votes vote down vote up
private static void deleteTargetTmpDir(DistributedFileSystem targetFs,
    Path tmpDir) {
  try {
    if (tmpDir != null) {
      targetFs.delete(tmpDir, true);
    }
  } catch (IOException e) {
    DistCp.LOG.error("Unable to cleanup tmp dir: " + tmpDir, e);
  }
}
 
Example 14
Source File: SecureKnoxShellTest.java    From knox with Apache License 2.0 5 votes vote down vote up
/**
 * Do the heavy lifting here.
 */
private void webhdfsPutGet() throws Exception {
  DistributedFileSystem fileSystem = miniDFSCluster.getFileSystem();
  Path dir = new Path("/user/guest/example");
  fileSystem.delete(dir, true);
  fileSystem.mkdirs(dir, new FsPermission("777"));
  fileSystem.setOwner(dir, "guest", "users");

  final File jaasFile = setupJaasConf(baseDir, keytab, hdfsPrincipal);

  final Binding binding = new Binding();

  binding.setProperty("jaasConf", jaasFile.getAbsolutePath());
  binding.setProperty("krb5conf", krb5conf);
  binding.setProperty("gateway", driver.getClusterUrl());

  URL readme = driver.getResourceUrl("README");
  File file = new File(readme.toURI());
  binding.setProperty("file", file.getAbsolutePath());

  final GroovyShell shell = new GroovyShell(binding);

  shell.evaluate(getResourceUrl(SCRIPT).toURI());

  String status = (String) binding.getProperty("status");
  assertNotNull(status);

  String fetchedFile = (String) binding.getProperty("fetchedFile");
  assertNotNull(fetchedFile);
  assertTrue(fetchedFile.contains("README"));
}
 
Example 15
Source File: OfflineEditsViewerHelper.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Run file operations to create edits for all op codes
 * to be tested.
 */
private void runOperations() throws IOException {

  LOG.info("Creating edits by performing fs operations");
  // no check, if it's not it throws an exception which is what we want
  DistributedFileSystem dfs =
    (DistributedFileSystem)cluster.getFileSystem();
  // OP_ADD 0, OP_SET_GENSTAMP 10
  Path pathFileCreate = new Path("/file_create");
  FSDataOutputStream s = dfs.create(pathFileCreate);
  // OP_CLOSE 9
  s.close();
  // OP_RENAME 1
  Path pathFileMoved = new Path("/file_moved");
  dfs.rename(pathFileCreate, pathFileMoved);
  // OP_DELETE 2
  dfs.delete(pathFileMoved, false);
  // OP_MKDIR 3
  Path pathDirectoryMkdir = new Path("/directory_mkdir");
  dfs.mkdirs(pathDirectoryMkdir);
  // OP_SET_REPLICATION 4
  s = dfs.create(pathFileCreate);
  s.close();
  dfs.setReplication(pathFileCreate, (short)1);
  // OP_SET_PERMISSIONS 7
  Short permission = 0777;
  dfs.setPermission(pathFileCreate, new FsPermission(permission));
  // OP_SET_OWNER 8
  dfs.setOwner(pathFileCreate, new String("newOwner"), null);
  // OP_CLOSE 9 see above
  // OP_SET_GENSTAMP 10 see above
  // OP_SET_NS_QUOTA 11 obsolete
  // OP_CLEAR_NS_QUOTA 12 obsolete
  // OP_TIMES 13
  long mtime = 1285195527000L; // Wed, 22 Sep 2010 22:45:27 GMT
  long atime = mtime;
  dfs.setTimes(pathFileCreate, mtime, atime);
  // OP_SET_QUOTA 14
  dfs.setQuota(pathDirectoryMkdir, 1000L, FSConstants.QUOTA_DONT_SET);
  // OP_CONCAT_DELETE 16
  Path   pathConcatTarget = new Path("/file_concat_target");
  Path[] pathConcatFiles  = new Path[2];
  pathConcatFiles[0]      = new Path("/file_concat_0");
  pathConcatFiles[1]      = new Path("/file_concat_1");

  long  length      = blockSize * 3; // multiple of blocksize for concat
  short replication = 1;
  long  seed        = 1;

  DFSTestUtil.createFile(dfs, pathConcatTarget, length, replication, seed);
  DFSTestUtil.createFile(dfs, pathConcatFiles[0], length, replication, seed);
  DFSTestUtil.createFile(dfs, pathConcatFiles[1], length, replication, seed);
  dfs.concat(pathConcatTarget, pathConcatFiles, false);

  // sync to disk, otherwise we parse partial edits
  cluster.getNameNode().getFSImage().getEditLog().logSync();
  dfs.close();
}
 
Example 16
Source File: TestOfflineImageViewer.java    From big-c with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void createOriginalFSImage() throws IOException {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    conf.setLong(
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
    conf.setLong(
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
    conf.setBoolean(
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL,
        "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    DistributedFileSystem hdfs = cluster.getFileSystem();

    // Create a reasonable namespace
    for (int i = 0; i < NUM_DIRS; i++) {
      Path dir = new Path("/dir" + i);
      hdfs.mkdirs(dir);
      writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString()));
      for (int j = 0; j < FILES_PER_DIR; j++) {
        Path file = new Path(dir, "file" + j);
        FSDataOutputStream o = hdfs.create(file);
        o.write(23);
        o.close();

        writtenFiles.put(file.toString(),
            pathToFileEntry(hdfs, file.toString()));
      }
    }

    // Create an empty directory
    Path emptydir = new Path("/emptydir");
    hdfs.mkdirs(emptydir);
    writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir));

    //Create a directory whose name should be escaped in XML
    Path invalidXMLDir = new Path("/dirContainingInvalidXMLChar\u0000here");
    hdfs.mkdirs(invalidXMLDir);

    // Get delegation tokens so we log the delegation token op
    Token<?>[] delegationTokens = hdfs
        .addDelegationTokens(TEST_RENEWER, null);
    for (Token<?> t : delegationTokens) {
      LOG.debug("got token " + t);
    }

    final Path snapshot = new Path("/snapshot");
    hdfs.mkdirs(snapshot);
    hdfs.allowSnapshot(snapshot);
    hdfs.mkdirs(new Path("/snapshot/1"));
    hdfs.delete(snapshot, true);

    // Set XAttrs so the fsimage contains XAttr ops
    final Path xattr = new Path("/xattr");
    hdfs.mkdirs(xattr);
    hdfs.setXAttr(xattr, "user.a1", new byte[]{ 0x31, 0x32, 0x33 });
    hdfs.setXAttr(xattr, "user.a2", new byte[]{ 0x37, 0x38, 0x39 });
    // OIV should be able to handle empty value XAttrs
    hdfs.setXAttr(xattr, "user.a3", null);
    writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr));

    // Write results to the fsimage file
    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    hdfs.saveNamespace();

    // Determine location of fsimage file
    originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
        .getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
    if (originalFsimage == null) {
      throw new RuntimeException("Didn't generate or can't find fsimage");
    }
    LOG.debug("original FS image file is " + originalFsimage);
  } finally {
    if (cluster != null)
      cluster.shutdown();
  }
}
 
Example 17
Source File: TestPendingReplication.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test if BlockManager can correctly remove corresponding pending records
 * when a file is deleted
 * 
 * @throws Exception
 */
@Test
public void testPendingAndInvalidate() throws Exception {
  final Configuration CONF = new HdfsConfiguration();
  CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
  CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
      DFS_REPLICATION_INTERVAL);
  CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
      DFS_REPLICATION_INTERVAL);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
      DATANODE_COUNT).build();
  cluster.waitActive();
  
  FSNamesystem namesystem = cluster.getNamesystem();
  BlockManager bm = namesystem.getBlockManager();
  DistributedFileSystem fs = cluster.getFileSystem();
  try {
    // 1. create a file
    Path filePath = new Path("/tmp.txt");
    DFSTestUtil.createFile(fs, filePath, 1024, (short) 3, 0L);
    
    // 2. disable the heartbeats
    for (DataNode dn : cluster.getDataNodes()) {
      DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
    }
    
    // 3. mark a couple of blocks as corrupt
    LocatedBlock block = NameNodeAdapter.getBlockLocations(
        cluster.getNameNode(), filePath.toString(), 0, 1).get(0);
    cluster.getNamesystem().writeLock();
    try {
      bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
          "STORAGE_ID", "TEST");
      bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[1],
          "STORAGE_ID", "TEST");
    } finally {
      cluster.getNamesystem().writeUnlock();
    }
    BlockManagerTestUtil.computeAllPendingWork(bm);
    BlockManagerTestUtil.updateState(bm);
    assertEquals(bm.getPendingReplicationBlocksCount(), 1L);
    assertEquals(bm.pendingReplications.getNumReplicas(block.getBlock()
        .getLocalBlock()), 2);
    
    // 4. delete the file
    fs.delete(filePath, true);
    // retry at most 10 times, each time sleep for 1s. Note that 10s is much
    // less than the default pending record timeout (5~10min)
    int retries = 10; 
    long pendingNum = bm.getPendingReplicationBlocksCount();
    while (pendingNum != 0 && retries-- > 0) {
      Thread.sleep(1000);  // let NN do the deletion
      BlockManagerTestUtil.updateState(bm);
      pendingNum = bm.getPendingReplicationBlocksCount();
    }
    assertEquals(pendingNum, 0L);
  } finally {
    cluster.shutdown();
  }
}
 
Example 18
Source File: TestINodeFile.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testFilesInGetListingOps() throws Exception {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem hdfs = cluster.getFileSystem();
    final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();

    hdfs.mkdirs(new Path("/tmp"));
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f2"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f3"), 0, (short) 1, 0);

    DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp",
        HdfsFileStatus.EMPTY_NAME, false);
    assertTrue(dl.getPartialListing().length == 3);

    String f2 = new String("f2");
    dl = cluster.getNameNodeRpc().getListing("/tmp", f2.getBytes(), false);
    assertTrue(dl.getPartialListing().length == 1);

    INode f2INode = fsdir.getINode("/tmp/f2");
    String f2InodePath = "/.reserved/.inodes/" + f2INode.getId();
    dl = cluster.getNameNodeRpc().getListing("/tmp", f2InodePath.getBytes(),
        false);
    assertTrue(dl.getPartialListing().length == 1);

    // Test the deleted startAfter file
    hdfs.delete(new Path("/tmp/f2"), false);
    try {
      dl = cluster.getNameNodeRpc().getListing("/tmp",
          f2InodePath.getBytes(), false);
      fail("Didn't get exception for the deleted startAfter token.");
    } catch (IOException e) {
      assertTrue(e instanceof DirectoryListingStartAfterNotFoundException);
    }

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 19
Source File: TestPendingReplication.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test if BlockManager can correctly remove corresponding pending records
 * when a file is deleted
 * 
 * @throws Exception
 */
@Test
public void testPendingAndInvalidate() throws Exception {
  final Configuration CONF = new HdfsConfiguration();
  CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
  CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
      DFS_REPLICATION_INTERVAL);
  CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
      DFS_REPLICATION_INTERVAL);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
      DATANODE_COUNT).build();
  cluster.waitActive();
  
  FSNamesystem namesystem = cluster.getNamesystem();
  BlockManager bm = namesystem.getBlockManager();
  DistributedFileSystem fs = cluster.getFileSystem();
  try {
    // 1. create a file
    Path filePath = new Path("/tmp.txt");
    DFSTestUtil.createFile(fs, filePath, 1024, (short) 3, 0L);
    
    // 2. disable the heartbeats
    for (DataNode dn : cluster.getDataNodes()) {
      DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
    }
    
    // 3. mark a couple of blocks as corrupt
    LocatedBlock block = NameNodeAdapter.getBlockLocations(
        cluster.getNameNode(), filePath.toString(), 0, 1).get(0);
    cluster.getNamesystem().writeLock();
    try {
      bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
          "STORAGE_ID", "TEST");
      bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[1],
          "STORAGE_ID", "TEST");
    } finally {
      cluster.getNamesystem().writeUnlock();
    }
    BlockManagerTestUtil.computeAllPendingWork(bm);
    BlockManagerTestUtil.updateState(bm);
    assertEquals(bm.getPendingReplicationBlocksCount(), 1L);
    assertEquals(bm.pendingReplications.getNumReplicas(block.getBlock()
        .getLocalBlock()), 2);
    
    // 4. delete the file
    fs.delete(filePath, true);
    // retry at most 10 times, each time sleep for 1s. Note that 10s is much
    // less than the default pending record timeout (5~10min)
    int retries = 10; 
    long pendingNum = bm.getPendingReplicationBlocksCount();
    while (pendingNum != 0 && retries-- > 0) {
      Thread.sleep(1000);  // let NN do the deletion
      BlockManagerTestUtil.updateState(bm);
      pendingNum = bm.getPendingReplicationBlocksCount();
    }
    assertEquals(pendingNum, 0L);
  } finally {
    cluster.shutdown();
  }
}
 
Example 20
Source File: TestINodeFile.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testFilesInGetListingOps() throws Exception {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem hdfs = cluster.getFileSystem();
    final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();

    hdfs.mkdirs(new Path("/tmp"));
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f2"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path("/tmp/f3"), 0, (short) 1, 0);

    DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp",
        HdfsFileStatus.EMPTY_NAME, false);
    assertTrue(dl.getPartialListing().length == 3);

    String f2 = new String("f2");
    dl = cluster.getNameNodeRpc().getListing("/tmp", f2.getBytes(), false);
    assertTrue(dl.getPartialListing().length == 1);

    INode f2INode = fsdir.getINode("/tmp/f2");
    String f2InodePath = "/.reserved/.inodes/" + f2INode.getId();
    dl = cluster.getNameNodeRpc().getListing("/tmp", f2InodePath.getBytes(),
        false);
    assertTrue(dl.getPartialListing().length == 1);

    // Test the deleted startAfter file
    hdfs.delete(new Path("/tmp/f2"), false);
    try {
      dl = cluster.getNameNodeRpc().getListing("/tmp",
          f2InodePath.getBytes(), false);
      fail("Didn't get exception for the deleted startAfter token.");
    } catch (IOException e) {
      assertTrue(e instanceof DirectoryListingStartAfterNotFoundException);
    }

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}