Java Code Examples for org.apache.hadoop.hdfs.DFSTestUtil.createFile()

The following are Jave code examples for showing how to use createFile() of the org.apache.hadoop.hdfs.DFSTestUtil class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
+ Save this method
Example 1
Project: hadoop   File: TestHASafeMode.java   View Source Code Vote up 6 votes
/**
 * Test case for enter safemode in active namenode, when it is already in startup safemode.
 * It is a regression test for HDFS-2747.
 */
@Test
public void testEnterSafeModeInANNShouldNotThrowNPE() throws Exception {
  banner("Restarting active");
  DFSTestUtil
    .createFile(fs, new Path("/test"), 3 * BLOCK_SIZE, (short) 3, 1L);
  restartActive();
  nn0.getRpcServer().transitionToActive(
      new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));

  FSNamesystem namesystem = nn0.getNamesystem();
  String status = namesystem.getSafemode();
  assertTrue("Bad safemode status: '" + status + "'", status
      .startsWith("Safe mode is ON."));
  NameNodeAdapter.enterSafeMode(nn0, false);
  assertTrue("Failed to enter into safemode in active", namesystem
      .isInSafeMode());
  NameNodeAdapter.enterSafeMode(nn0, false);
  assertTrue("Failed to enter into safemode in active", namesystem
      .isInSafeMode());
}
 
Example 2
Project: hadoop   File: TestDataNodeRollingUpgrade.java   View Source Code Vote up 6 votes
@Test (timeout=600000)
public void testDatanodeRollingUpgradeWithFinalize() throws Exception {
  try {
    startCluster();

    // Create files in DFS.
    Path testFile1 = new Path("/" + GenericTestUtils.getMethodName() + ".01.dat");
    Path testFile2 = new Path("/" + GenericTestUtils.getMethodName() + ".02.dat");
    DFSTestUtil.createFile(fs, testFile1, FILE_SIZE, REPL_FACTOR, SEED);
    DFSTestUtil.createFile(fs, testFile2, FILE_SIZE, REPL_FACTOR, SEED);

    startRollingUpgrade();
    File blockFile = getBlockForFile(testFile2, true);
    File trashFile = getTrashFileForBlock(blockFile, false);
    deleteAndEnsureInTrash(testFile2, blockFile, trashFile);
    finalizeRollingUpgrade();

    // Ensure that delete file testFile2 stays deleted after finalize
    assertFalse(isTrashRootPresent());
    assert(!fs.exists(testFile2));
    assert(fs.exists(testFile1));

  } finally {
    shutdownCluster();
  }
}
 
Example 3
Project: hadoop   File: TestQuotaByStorageType.java   View Source Code Vote up 5 votes
/**
 * Both traditional space quota and the storage type quota for SSD are set and
 * not exceeded.
 */
@Test(timeout = 60000)
public void testQuotaByStorageTypeWithTraditionalQuota() throws Exception {
  final Path foo = new Path(dir, "foo");
  dfs.mkdirs(foo);

  dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
  dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 10);
  dfs.setQuota(foo, Long.MAX_VALUE - 1, REPLICATION * BLOCKSIZE * 10);

  INode fnode = fsdir.getINode4Write(foo.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(fnode.isQuotaSet());

  Path createdFile = new Path(foo, "created_file.data");
  long fileLen = BLOCKSIZE * 2 + BLOCKSIZE / 2;
  DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16,
      fileLen, BLOCKSIZE, REPLICATION, seed);

  QuotaCounts cnt = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed();
  assertEquals(2, cnt.getNameSpace());
  assertEquals(fileLen * REPLICATION, cnt.getStorageSpace());

  dfs.delete(createdFile, true);

  QuotaCounts cntAfterDelete = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed();
  assertEquals(1, cntAfterDelete.getNameSpace());
  assertEquals(0, cntAfterDelete.getStorageSpace());

  // Validate the computeQuotaUsage()
  QuotaCounts counts = new QuotaCounts.Builder().build();
  fnode.computeQuotaUsage(fsn.getBlockManager().getStoragePolicySuite(), counts, true);
  assertEquals(fnode.dumpTreeRecursively().toString(), 1,
      counts.getNameSpace());
  assertEquals(fnode.dumpTreeRecursively().toString(), 0,
      counts.getStorageSpace());
}
 
Example 4
Project: hadoop   File: TestHDFSConcat.java   View Source Code Vote up 5 votes
/**
 * make sure we update the quota correctly after concat
 */
@Test
public void testConcatWithQuotaDecrease() throws IOException {
  final short srcRepl = 3; // note this is different with REPL_FACTOR
  final int srcNum = 10;
  final Path foo = new Path("/foo");
  final Path[] srcs = new Path[srcNum];
  final Path target = new Path(foo, "target");
  DFSTestUtil.createFile(dfs, target, blockSize, REPL_FACTOR, 0L);

  dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);

  for (int i = 0; i < srcNum; i++) {
    srcs[i] = new Path(foo, "src" + i);
    DFSTestUtil.createFile(dfs, srcs[i], blockSize * 2, srcRepl, 0L);
  }

  ContentSummary summary = dfs.getContentSummary(foo);
  Assert.assertEquals(11, summary.getFileCount());
  Assert.assertEquals(blockSize * REPL_FACTOR +
          blockSize * 2 * srcRepl * srcNum, summary.getSpaceConsumed());

  dfs.concat(target, srcs);
  summary = dfs.getContentSummary(foo);
  Assert.assertEquals(1, summary.getFileCount());
  Assert.assertEquals(
      blockSize * REPL_FACTOR + blockSize * 2 * REPL_FACTOR * srcNum,
      summary.getSpaceConsumed());
}
 
Example 5
Project: hadoop   File: TestINodeFile.java   View Source Code Vote up 5 votes
/**
 * Check /.reserved path is reserved and cannot be created.
 */
@Test
public void testReservedFileNames() throws IOException {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    // First start a cluster with reserved file names check turned off
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    
    // Creation of directory or file with reserved path names is disallowed
    ensureReservedFileNamesCannotBeCreated(fs, "/.reserved", false);
    ensureReservedFileNamesCannotBeCreated(fs, "/.reserved", false);
    Path reservedPath = new Path("/.reserved");
    
    // Loading of fsimage or editlog with /.reserved directory should fail
    // Mkdir "/.reserved reserved path with reserved path check turned off
    FSDirectory.CHECK_RESERVED_FILE_NAMES = false;
    fs.mkdirs(reservedPath);
    assertTrue(fs.isDirectory(reservedPath));
    ensureReservedFileNamesCannotBeLoaded(cluster);

    // Loading of fsimage or editlog with /.reserved file should fail
    // Create file "/.reserved reserved path with reserved path check turned off
    FSDirectory.CHECK_RESERVED_FILE_NAMES = false;
    ensureClusterRestartSucceeds(cluster);
    fs.delete(reservedPath, true);
    DFSTestUtil.createFile(fs, reservedPath, 10, (short)1, 0L);
    assertTrue(!fs.isDirectory(reservedPath));
    ensureReservedFileNamesCannotBeLoaded(cluster);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 6
Project: hadoop   File: TestResolveHdfsSymlink.java   View Source Code Vote up 5 votes
/**
 * Tests resolution of an hdfs symlink to the local file system.
 * @throws IOException
 * @throws InterruptedException
 */
@Test
public void testFcResolveAfs() throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  FileContext fcLocal = FileContext.getLocalFSFileContext();
  FileContext fcHdfs = FileContext.getFileContext(cluster.getFileSystem()
      .getUri());

  final String localTestRoot = helper.getAbsoluteTestRootDir(fcLocal);
  Path alphaLocalPath = new Path(fcLocal.getDefaultFileSystem().getUri()
      .toString(), new File(localTestRoot, "alpha").getAbsolutePath());
  DFSTestUtil.createFile(FileSystem.getLocal(conf), alphaLocalPath, 16,
      (short) 1, 2);

  Path linkTarget = new Path(fcLocal.getDefaultFileSystem().getUri()
      .toString(), localTestRoot);
  Path hdfsLink = new Path(fcHdfs.getDefaultFileSystem().getUri().toString(),
      "/tmp/link");
  fcHdfs.createSymlink(linkTarget, hdfsLink, true);

  Path alphaHdfsPathViaLink = new Path(fcHdfs.getDefaultFileSystem().getUri()
      .toString()
      + "/tmp/link/alpha");

  Set<AbstractFileSystem> afsList = fcHdfs
      .resolveAbstractFileSystems(alphaHdfsPathViaLink);
  Assert.assertEquals(2, afsList.size());
  for (AbstractFileSystem afs : afsList) {
    if ((!afs.equals(fcHdfs.getDefaultFileSystem()))
        && (!afs.equals(fcLocal.getDefaultFileSystem()))) {
      Assert.fail("Failed to resolve AFS correctly");
    }
  }
}
 
Example 7
Project: hadoop   File: TestQuotaByStorageType.java   View Source Code Vote up 5 votes
@Test(timeout = 60000)
public void testQuotaByStorageTypeParentOnChildOff() throws Exception {
  short replication = 1;
  final Path parent = new Path(dir, "parent");
  final Path child = new Path(parent, "child");
  dfs.mkdirs(parent);
  dfs.mkdirs(child);

  dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
  dfs.setQuotaByStorageType(parent, StorageType.SSD, 3 * BLOCKSIZE);

  // Create file of size 2.5 * BLOCKSIZE under child directory
  // Verify parent Quota applies
  Path createdFile1 = new Path(child, "created_file1.data");
  long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
  int bufLen = BLOCKSIZE / 16;
  DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE,
      replication, seed);

  INode fnode = fsdir.getINode4Write(parent.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(fnode.isQuotaSet());
  long currentSSDConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, currentSSDConsumed);

  // Create the 2nd file of size BLOCKSIZE under child directory and expect quota exceeded exception
  Path createdFile2 = new Path(child, "created_file2.data");
  long file2Len = BLOCKSIZE;

  try {
    DFSTestUtil.createFile(dfs, createdFile2, bufLen, file2Len, BLOCKSIZE, replication, seed);
    fail("Should have failed with QuotaByStorageTypeExceededException ");
  } catch (Throwable t) {
    LOG.info("Got expected exception ", t);
    currentSSDConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
        .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
    assertEquals(file1Len, currentSSDConsumed);
  }
}
 
Example 8
Project: hadoop   File: TestRetryCacheWithHA.java   View Source Code Vote up 5 votes
@Override
void prepare() throws Exception {
  final Path targetPath = new Path(target);
  DFSTestUtil.createFile(dfs, targetPath, BlockSize, DataNodes, 0);
  for (int i = 0; i < srcPaths.length; i++) {
    DFSTestUtil.createFile(dfs, srcPaths[i], BlockSize, DataNodes, 0);
  }
  assertEquals(BlockSize, dfs.getFileStatus(targetPath).getLen());
}
 
Example 9
Project: hadoop   File: TestDistCpSync.java   View Source Code Vote up 5 votes
private void initData3(Path dir) throws Exception {
  final Path test = new Path(dir, "test");
  final Path foo = new Path(dir, "foo");
  final Path bar = new Path(dir, "bar");
  final Path f1 = new Path(test, "file");
  final Path f2 = new Path(foo, "file");
  final Path f3 = new Path(bar, "file");

  DFSTestUtil.createFile(dfs, f1, BLOCK_SIZE, DATA_NUM, 0L);
  DFSTestUtil.createFile(dfs, f2, BLOCK_SIZE * 2, DATA_NUM, 1L);
  DFSTestUtil.createFile(dfs, f3, BLOCK_SIZE * 3, DATA_NUM, 2L);
}
 
Example 10
Project: hadoop   File: TestFSImageWithSnapshot.java   View Source Code Vote up 5 votes
/**
 * Test the fsimage loading while there is file under construction.
 */
@Test (timeout=60000)
public void testLoadImageWithAppending() throws Exception {
  Path sub1 = new Path(dir, "sub1");
  Path sub1file1 = new Path(sub1, "sub1file1");
  Path sub1file2 = new Path(sub1, "sub1file2");
  DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed);
  
  hdfs.allowSnapshot(dir);
  hdfs.createSnapshot(dir, "s0");
  
  HdfsDataOutputStream out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));      
  
  // save namespace and restart cluster
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
}
 
Example 11
Project: hadoop   File: TestNamenodeRetryCache.java   View Source Code Vote up 5 votes
/**
 * Test for rename1
 */
@Test
public void testAppend() throws Exception {
  String src = "/testNamenodeRetryCache/testAppend/src";
  resetCall();
  // Create a file with partial block
  DFSTestUtil.createFile(filesystem, new Path(src), 128, (short)1, 0L);
  
  // Retried append requests succeed
  newCall();
  LastBlockWithStatus b = nnRpc.append(src, "holder",
      new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));
  Assert.assertEquals(b, nnRpc.append(src, "holder",
      new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND))));
  Assert.assertEquals(b, nnRpc.append(src, "holder",
      new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND))));
  
  // non-retried call fails
  newCall();
  try {
    nnRpc.append(src, "holder",
        new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));
    Assert.fail("testAppend - expected exception is not thrown");
  } catch (Exception e) {
    // Expected
  }
}
 
Example 12
Project: hadoop   File: TestNNMetricFilesInGetListingOps.java   View Source Code Vote up 4 votes
/** create a file with a length of <code>fileLen</code> */
private void createFile(String fileName, long fileLen, short replicas) throws IOException {
  Path filePath = new Path(fileName);
  DFSTestUtil.createFile(fs, filePath, fileLen, replicas, rand.nextLong());
}
 
Example 13
Project: hadoop   File: TestPendingReplication.java   View Source Code Vote up 4 votes
/**
 * Test if BlockManager can correctly remove corresponding pending records
 * when a file is deleted
 * 
 * @throws Exception
 */
@Test
public void testPendingAndInvalidate() throws Exception {
  final Configuration CONF = new HdfsConfiguration();
  CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
  CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
      DFS_REPLICATION_INTERVAL);
  CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
      DFS_REPLICATION_INTERVAL);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
      DATANODE_COUNT).build();
  cluster.waitActive();
  
  FSNamesystem namesystem = cluster.getNamesystem();
  BlockManager bm = namesystem.getBlockManager();
  DistributedFileSystem fs = cluster.getFileSystem();
  try {
    // 1. create a file
    Path filePath = new Path("/tmp.txt");
    DFSTestUtil.createFile(fs, filePath, 1024, (short) 3, 0L);
    
    // 2. disable the heartbeats
    for (DataNode dn : cluster.getDataNodes()) {
      DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
    }
    
    // 3. mark a couple of blocks as corrupt
    LocatedBlock block = NameNodeAdapter.getBlockLocations(
        cluster.getNameNode(), filePath.toString(), 0, 1).get(0);
    cluster.getNamesystem().writeLock();
    try {
      bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
          "STORAGE_ID", "TEST");
      bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[1],
          "STORAGE_ID", "TEST");
    } finally {
      cluster.getNamesystem().writeUnlock();
    }
    BlockManagerTestUtil.computeAllPendingWork(bm);
    BlockManagerTestUtil.updateState(bm);
    assertEquals(bm.getPendingReplicationBlocksCount(), 1L);
    assertEquals(bm.pendingReplications.getNumReplicas(block.getBlock()
        .getLocalBlock()), 2);
    
    // 4. delete the file
    fs.delete(filePath, true);
    // retry at most 10 times, each time sleep for 1s. Note that 10s is much
    // less than the default pending record timeout (5~10min)
    int retries = 10; 
    long pendingNum = bm.getPendingReplicationBlocksCount();
    while (pendingNum != 0 && retries-- > 0) {
      Thread.sleep(1000);  // let NN do the deletion
      BlockManagerTestUtil.updateState(bm);
      pendingNum = bm.getPendingReplicationBlocksCount();
    }
    assertEquals(pendingNum, 0L);
  } finally {
    cluster.shutdown();
  }
}
 
Example 14
Project: hadoop   File: TestSnapshot.java   View Source Code Vote up 4 votes
@Override
void modify() throws Exception {
  DFSTestUtil.createFile(fs, file, fileLen,
      REPLICATION, seed);
}
 
Example 15
Project: hadoop   File: TestRenameWithSnapshots.java   View Source Code Vote up 4 votes
/**
 * Rename and deletion snapshot under the same the snapshottable directory.
 */
@Test
public void testRenameDirAndDeleteSnapshot_6() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(dir2);
  
  final Path foo = new Path(dir2, "foo");
  final Path bar = new Path(foo, "bar");
  final Path file = new Path(bar, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPL, SEED);
  
  // take a snapshot on /test
  SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
  
  // delete /test/dir2/foo/bar/file after snapshot s0, so that there is a 
  // snapshot copy recorded in bar
  hdfs.delete(file, true);
  
  // rename foo from dir2 to dir1
  final Path newfoo = new Path(dir1, foo.getName());
  hdfs.rename(foo, newfoo);
  
  final Path foo_s0 = SnapshotTestHelper.getSnapshotPath(test, "s0",
      "dir2/foo");
  assertTrue("the snapshot path " + foo_s0 + " should exist",
      hdfs.exists(foo_s0));
  
  // delete snapshot s0. The deletion will first go down through dir1, and 
  // find foo in the created list of dir1. Then it will use null as the prior
  // snapshot and continue the snapshot deletion process in the subtree of 
  // foo. We need to make sure the snapshot s0 can be deleted cleanly in the
  // foo subtree.
  hdfs.deleteSnapshot(test, "s0");
  // check the internal
  assertFalse("after deleting s0, " + foo_s0 + " should not exist",
      hdfs.exists(foo_s0));
  INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString())
      .asDirectory();
  assertTrue("the diff list of " + dir2
      + " should be empty after deleting s0", dir2Node.getDiffs().asList()
      .isEmpty());
  
  assertTrue(hdfs.exists(newfoo));
  INode fooRefNode = fsdir.getINode4Write(newfoo.toString());
  assertTrue(fooRefNode instanceof INodeReference.DstReference);
  INodeDirectory fooNode = fooRefNode.asDirectory();
  // fooNode should be still INodeDirectory (With Snapshot) since we call
  // recordModification before the rename
  assertTrue(fooNode.isWithSnapshot());
  assertTrue(fooNode.getDiffs().asList().isEmpty());
  INodeDirectory barNode = fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID)
      .get(0).asDirectory();
  // bar should also be INodeDirectory (With Snapshot), and both of its diff 
  // list and children list are empty 
  assertTrue(barNode.getDiffs().asList().isEmpty());
  assertTrue(barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
  
  restartClusterAndCheckImage(true);
}
 
Example 16
Project: hadoop   File: TestReadOnlySharedStorage.java   View Source Code Vote up 4 votes
/**
 * Setup a {@link MiniDFSCluster}.
 * Create a block with both {@link State#NORMAL} and {@link State#READ_ONLY_SHARED} replicas.
 */
@Before
public void setup() throws IOException, InterruptedException {
  conf = new HdfsConfiguration();
  SimulatedFSDataset.setFactory(conf);
  
  Configuration[] overlays = new Configuration[NUM_DATANODES];
  for (int i = 0; i < overlays.length; i++) {
    overlays[i] = new Configuration();
    if (i == RO_NODE_INDEX) {
      overlays[i].setEnum(SimulatedFSDataset.CONFIG_PROPERTY_STATE, 
          i == RO_NODE_INDEX 
            ? READ_ONLY_SHARED
            : NORMAL);
    }
  }
  
  cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(NUM_DATANODES)
      .dataNodeConfOverlays(overlays)
      .build();
  fs = cluster.getFileSystem();
  blockManager = cluster.getNameNode().getNamesystem().getBlockManager();
  datanodeManager = blockManager.getDatanodeManager();
  client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()),
                         cluster.getConfiguration(0));
  
  for (int i = 0; i < NUM_DATANODES; i++) {
    DataNode dataNode = cluster.getDataNodes().get(i);
    validateStorageState(
        BlockManagerTestUtil.getStorageReportsForDatanode(
            datanodeManager.getDatanode(dataNode.getDatanodeId())),
            i == RO_NODE_INDEX 
              ? READ_ONLY_SHARED
              : NORMAL);
  }
  
  // Create a 1 block file
  DFSTestUtil.createFile(fs, PATH, BLOCK_SIZE, BLOCK_SIZE,
                         BLOCK_SIZE, (short) 1, seed);
  
  LocatedBlock locatedBlock = getLocatedBlock();
  extendedBlock = locatedBlock.getBlock();
  block = extendedBlock.getLocalBlock();
  
  assertThat(locatedBlock.getLocations().length, is(1));
  normalDataNode = locatedBlock.getLocations()[0];
  readOnlyDataNode = datanodeManager.getDatanode(cluster.getDataNodes().get(RO_NODE_INDEX).getDatanodeId());
  assertThat(normalDataNode, is(not(readOnlyDataNode)));
  
  validateNumberReplicas(1);
  
  // Inject the block into the datanode with READ_ONLY_SHARED storage 
  cluster.injectBlocks(0, RO_NODE_INDEX, Collections.singleton(block));
  
  // There should now be 2 *locations* for the block
  // Must wait until the NameNode has processed the block report for the injected blocks
  waitForLocations(2);
}
 
Example 17
Project: hadoop   File: TestFsDatasetCacheRevocation.java   View Source Code Vote up 4 votes
/**
 * Test that when we have an uncache request, and the client refuses to release
 * the replica for a long time, we will un-mlock it.
 */
@Test(timeout=120000)
public void testRevocation() throws Exception {
  assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS);
  BlockReaderTestUtil.enableHdfsCachingTracing();
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  Configuration conf = getDefaultConf();
  // Set a really short revocation timeout.
  conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS, 250L);
  // Poll very often
  conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS, 2L);
  MiniDFSCluster cluster = null;
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem dfs = cluster.getFileSystem();

  // Create and cache a file.
  final String TEST_FILE = "/test_file2";
  DFSTestUtil.createFile(dfs, new Path(TEST_FILE),
      BLOCK_SIZE, (short)1, 0xcafe);
  dfs.addCachePool(new CachePoolInfo("pool"));
  long cacheDirectiveId =
      dfs.addCacheDirective(new CacheDirectiveInfo.Builder().
          setPool("pool").setPath(new Path(TEST_FILE)).
          setReplication((short) 1).build());
  FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
  DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd);

  // Mmap the file.
  FSDataInputStream in = dfs.open(new Path(TEST_FILE));
  ByteBuffer buf =
      in.read(null, BLOCK_SIZE, EnumSet.noneOf(ReadOption.class));

  // Attempt to uncache file.  The file should get uncached.
  LOG.info("removing cache directive {}", cacheDirectiveId);
  dfs.removeCacheDirective(cacheDirectiveId);
  LOG.info("finished removing cache directive {}", cacheDirectiveId);
  Thread.sleep(1000);
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);

  // Cleanup
  in.releaseBuffer(buf);
  in.close();
  cluster.shutdown();
}
 
Example 18
Project: hadoop   File: TestBlocksWithNotEnoughRacks.java   View Source Code Vote up 4 votes
@Test
public void testNodeDecomissionRespectsRackPolicy() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 2;
  final Path filePath = new Path("/testFile");

  // Configure an excludes file
  FileSystem localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, "build/test/data/temp/decommission");
  Path excludeFile = new Path(dir, "exclude");
  Path includeFile = new Path(dir, "include");
  assertTrue(localFileSys.mkdirs(dir));
  DFSTestUtil.writeFile(localFileSys, excludeFile, "");
  DFSTestUtil.writeFile(localFileSys, includeFile, "");
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());

  // Two blocks and four racks
  String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Decommission one of the hosts with the block, this should cause 
    // the block to get replicated to another host on the same rack,
    // otherwise the rack policy is violated.
    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
    String name = locs[0].getNames()[0];
    DFSTestUtil.writeFile(localFileSys, excludeFile, name);
    ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
    DFSTestUtil.waitForDecommission(fs, name);

    // Check the block still has sufficient # replicas across racks
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
 
Example 19
Project: hadoop   File: TestHASafeMode.java   View Source Code Vote up 4 votes
/**
 * Similar to {@link #testBlocksRemovedWhileInSafeMode()} except that
 * the OP_DELETE edits arrive at the SBN before the block deletion reports.
 * The tracking of safe blocks needs to properly account for the removal
 * of the blocks as well as the safe count. This is a regression test for
 * HDFS-2742.
 */
@Test
public void testBlocksRemovedWhileInSafeModeEditsArriveFirst() throws Exception {
  banner("Starting with NN0 active and NN1 standby, creating some blocks");
  DFSTestUtil.createFile(fs, new Path("/test"), 10*BLOCK_SIZE, (short) 3, 1L);

  // Roll edit log so that, when the SBN restarts, it will load
  // the namespace during startup.
  nn0.getRpcServer().rollEditLog();
 
  banner("Restarting standby");
  restartStandby();
  
  // It will initially have all of the blocks necessary.
  String status = nn1.getNamesystem().getSafemode();
  assertTrue("Bad safemode status: '" + status + "'",
    status.startsWith(
      "Safe mode is ON. The reported blocks 10 has reached the threshold "
      + "0.9990 of total blocks 10. The number of live datanodes 3 has "
      + "reached the minimum number 0. In safe mode extension. "
      + "Safe mode will be turned off automatically"));

  // Delete those blocks while the SBN is in safe mode.
  // Immediately roll the edit log before the actual deletions are sent
  // to the DNs.
  banner("Removing the blocks without rolling the edit log");
  fs.delete(new Path("/test"), true);
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);

  // Should see removal of the blocks as well as their contribution to safe block count.
  assertSafeMode(nn1, 0, 0, 3, 0);

  
  banner("Triggering sending deletions to DNs and Deletion Reports");
  BlockManagerTestUtil.computeAllPendingWork(
      nn0.getNamesystem().getBlockManager());    
  cluster.triggerHeartbeats();
  HATestUtil.waitForDNDeletions(cluster);
  cluster.triggerDeletionReports();

  // No change in assertion status here, but some of the consistency checks
  // in safemode will fire here if we accidentally decrement safe block count
  // below 0.    
  assertSafeMode(nn1, 0, 0, 3, 0);
}
 
Example 20
Project: hadoop   File: TestDNFencing.java   View Source Code Vote up 4 votes
@Test
public void testDnFencing() throws Exception {
  // Create a file with replication level 3.
  DFSTestUtil.createFile(fs, TEST_FILE_PATH, 30*SMALL_BLOCK, (short)3, 1L);
  ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, TEST_FILE_PATH);
  
  // Drop its replication count to 1, so it becomes over-replicated.
  // Then compute the invalidation of the extra blocks and trigger
  // heartbeats so the invalidations are flushed to the DNs.
  nn1.getRpcServer().setReplication(TEST_FILE, (short) 1);
  BlockManagerTestUtil.computeInvalidationWork(
      nn1.getNamesystem().getBlockManager());
  cluster.triggerHeartbeats();
  
  // Transition nn2 to active even though nn1 still thinks it's active.
  banner("Failing to NN2 but let NN1 continue to think it's active");
  NameNodeAdapter.abortEditLogs(nn1);
  NameNodeAdapter.enterSafeMode(nn1, false);
  cluster.transitionToActive(1);
  
  // Check that the standby picked up the replication change.
  assertEquals(1,
      nn2.getRpcServer().getFileInfo(TEST_FILE).getReplication());

  // Dump some info for debugging purposes.
  banner("NN2 Metadata immediately after failover");
  doMetasave(nn2);
  
  banner("Triggering heartbeats and block reports so that fencing is completed");
  cluster.triggerHeartbeats();
  cluster.triggerBlockReports();
  
  banner("Metadata after nodes have all block-reported");
  doMetasave(nn2);

  // Force a rescan of postponedMisreplicatedBlocks.
  BlockManager nn2BM = nn2.getNamesystem().getBlockManager();
  BlockManagerTestUtil.checkHeartbeat(nn2BM);
  BlockManagerTestUtil.rescanPostponedMisreplicatedBlocks(nn2BM);

  // The blocks should no longer be postponed.
  assertEquals(0, nn2.getNamesystem().getPostponedMisreplicatedBlocks());
  
  // Wait for NN2 to enact its deletions (replication monitor has to run, etc)
  BlockManagerTestUtil.computeInvalidationWork(
      nn2.getNamesystem().getBlockManager());
  cluster.triggerHeartbeats();
  HATestUtil.waitForDNDeletions(cluster);
  cluster.triggerDeletionReports();
  assertEquals(0, nn2.getNamesystem().getUnderReplicatedBlocks());
  assertEquals(0, nn2.getNamesystem().getPendingReplicationBlocks());
  
  banner("Making sure the file is still readable");
  FileSystem fs2 = cluster.getFileSystem(1);
  DFSTestUtil.readFile(fs2, TEST_FILE_PATH);

  banner("Waiting for the actual block files to get deleted from DNs.");
  waitForTrueReplication(cluster, block, 1);
}