org.apache.hadoop.hdfs.DFSTestUtil Java Examples

The following examples show how to use org.apache.hadoop.hdfs.DFSTestUtil. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestScrLazyPersistFiles.java    From big-c with Apache License 2.0 6 votes vote down vote up
public void doShortCircuitReadMetaFileCorruptionTest() throws IOException,
    InterruptedException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
  Path path2 = new Path("/" + METHOD_NAME + ".02.dat");

  final int SEED = 0xFADED;
  makeRandomTestFile(path1, BLOCK_SIZE, true, SEED);
  ensureFileReplicasOnStorageType(path1, RAM_DISK);

  // Create another file with a replica on RAM_DISK, which evicts the first.
  makeRandomTestFile(path2, BLOCK_SIZE, true, SEED);

  // Sleep for a short time to allow the lazy writer thread to do its job.
  Thread.sleep(3 * LAZY_WRITER_INTERVAL_SEC * 1000);
  triggerBlockReport();

  // Corrupt the lazy-persisted checksum file, and verify that checksum
  // verification catches it.
  ensureFileReplicasOnStorageType(path1, DEFAULT);
  File metaFile = cluster.getBlockMetadataFile(0,
      DFSTestUtil.getFirstBlock(fs, path1));
  MiniDFSCluster.corruptBlock(metaFile);
  exception.expect(ChecksumException.class);
  DFSTestUtil.readFileBuffer(fs, path1);
}
 
Example #2
Source File: TestRenameWithSnapshots.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Similar with testRenameUCFileInSnapshot, but do renaming first and then 
 * append file without closing it. Unit test for HDFS-5425.
 */
@Test
public void testAppendFileAfterRenameInSnapshot() throws Exception {
  final Path test = new Path("/test");
  final Path foo = new Path(test, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
  // rename bar --> bar2
  final Path bar2 = new Path(foo, "bar2");
  hdfs.rename(bar, bar2);
  // append file and keep it as underconstruction.
  FSDataOutputStream out = hdfs.append(bar2);
  out.writeByte(0);
  ((DFSOutputStream) out.getWrappedStream()).hsync(
      EnumSet.of(SyncFlag.UPDATE_LENGTH));

  // save namespace and restart
  restartClusterAndCheckImage(true);
}
 
Example #3
Source File: TestCacheDirectives.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout=60000)
public void testExceedsCapacity() throws Exception {
  // Create a giant file
  final Path fileName = new Path("/exceeds");
  final long fileLen = CACHE_CAPACITY * (NUM_DATANODES*2);
  int numCachedReplicas = (int) ((CACHE_CAPACITY*NUM_DATANODES)/BLOCK_SIZE);
  DFSTestUtil.createFile(dfs, fileName, fileLen, (short) NUM_DATANODES,
      0xFADED);
  dfs.addCachePool(new CachePoolInfo("pool"));
  dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool")
      .setPath(fileName).setReplication((short) 1).build());
  waitForCachedBlocks(namenode, -1, numCachedReplicas,
      "testExceeds:1");
  checkPendingCachedEmpty(cluster);
  Thread.sleep(1000);
  checkPendingCachedEmpty(cluster);

  // Try creating a file with giant-sized blocks that exceed cache capacity
  dfs.delete(fileName, false);
  DFSTestUtil.createFile(dfs, fileName, 4096, fileLen, CACHE_CAPACITY * 2,
      (short) 1, 0xFADED);
  checkPendingCachedEmpty(cluster);
  Thread.sleep(1000);
  checkPendingCachedEmpty(cluster);
}
 
Example #4
Source File: TestRenameWithSnapshots.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Test rename from a non-snapshottable dir to a snapshottable dir
 */
@Test (timeout=60000)
public void testRenameFromNonSDir2SDir() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path foo = new Path(sdir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, snap1);
  
  final Path newfoo = new Path(sdir2, "foo");
  hdfs.rename(foo, newfoo);
  
  INode fooNode = fsdir.getINode4Write(newfoo.toString());
  assertTrue(fooNode instanceof INodeDirectory);
}
 
Example #5
Source File: TestFsDatasetCache.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout=60000)
public void testPageRounder() throws Exception {
  // Write a small file
  Path fileName = new Path("/testPageRounder");
  final int smallBlocks = 512; // This should be smaller than the page size
  assertTrue("Page size should be greater than smallBlocks!",
      PAGE_SIZE > smallBlocks);
  final int numBlocks = 5;
  final int fileLen = smallBlocks * numBlocks;
  FSDataOutputStream out =
      fs.create(fileName, false, 4096, (short)1, smallBlocks);
  out.write(new byte[fileLen]);
  out.close();
  HdfsBlockLocation[] locs = (HdfsBlockLocation[])fs.getFileBlockLocations(
      fileName, 0, fileLen);
  // Cache the file and check the sizes match the page size
  setHeartbeatResponse(cacheBlocks(locs));
  DFSTestUtil.verifyExpectedCacheUsage(PAGE_SIZE * numBlocks, numBlocks, fsd);
  // Uncache and check that it decrements by the page size too
  setHeartbeatResponse(uncacheBlocks(locs));
  DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
}
 
Example #6
Source File: TestHASafeMode.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test case for enter safemode in active namenode, when it is already in startup safemode.
 * It is a regression test for HDFS-2747.
 */
@Test
public void testEnterSafeModeInANNShouldNotThrowNPE() throws Exception {
  banner("Restarting active");
  DFSTestUtil
    .createFile(fs, new Path("/test"), 3 * BLOCK_SIZE, (short) 3, 1L);
  restartActive();
  nn0.getRpcServer().transitionToActive(
      new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));

  FSNamesystem namesystem = nn0.getNamesystem();
  String status = namesystem.getSafemode();
  assertTrue("Bad safemode status: '" + status + "'", status
      .startsWith("Safe mode is ON."));
  NameNodeAdapter.enterSafeMode(nn0, false);
  assertTrue("Failed to enter into safemode in active", namesystem
      .isInSafeMode());
  NameNodeAdapter.enterSafeMode(nn0, false);
  assertTrue("Failed to enter into safemode in active", namesystem
      .isInSafeMode());
}
 
Example #7
Source File: TestWebHDFSForHA.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testMultipleNamespacesConfigured() throws Exception {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  WebHdfsFileSystem fs = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
            .numDataNodes(1).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);

    cluster.waitActive();
    DFSTestUtil.addHAConfiguration(conf, LOGICAL_NAME + "remote");
    DFSTestUtil.setFakeHttpAddresses(conf, LOGICAL_NAME + "remote");

    fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf);
    Assert.assertEquals(2, fs.getResolvedNNAddr().length);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example #8
Source File: TestDistCpSync.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void verifyCopy(FileStatus s, FileStatus t, boolean compareName)
    throws Exception {
  Assert.assertEquals(s.isDirectory(), t.isDirectory());
  if (compareName) {
    Assert.assertEquals(s.getPath().getName(), t.getPath().getName());
  }
  if (!s.isDirectory()) {
    // verify the file content is the same
    byte[] sbytes = DFSTestUtil.readFileBuffer(dfs, s.getPath());
    byte[] tbytes = DFSTestUtil.readFileBuffer(dfs, t.getPath());
    Assert.assertArrayEquals(sbytes, tbytes);
  } else {
    FileStatus[] slist = dfs.listStatus(s.getPath());
    FileStatus[] tlist = dfs.listStatus(t.getPath());
    Assert.assertEquals(slist.length, tlist.length);
    for (int i = 0; i < slist.length; i++) {
      verifyCopy(slist[i], tlist[i], true);
    }
  }
}
 
Example #9
Source File: TestFsDatasetCache.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout=60000)
public void testUncacheUnknownBlock() throws Exception {
  // Create a file
  Path fileName = new Path("/testUncacheUnknownBlock");
  int fileLen = 4096;
  DFSTestUtil.createFile(fs, fileName, fileLen, (short)1, 0xFDFD);
  HdfsBlockLocation[] locs = (HdfsBlockLocation[])fs.getFileBlockLocations(
      fileName, 0, fileLen);

  // Try to uncache it without caching it first
  setHeartbeatResponse(uncacheBlocks(locs));

  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      return fsd.getNumBlocksFailedToUncache() > 0;
    }
  }, 100, 10000);
}
 
Example #10
Source File: TestSnapshotDeletion.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Deleting directory with snapshottable descendant with snapshots must fail.
 */
@Test (timeout=300000)
public void testDeleteDirectoryWithSnapshot2() throws Exception {
  Path file0 = new Path(sub, "file0");
  Path file1 = new Path(sub, "file1");
  DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
  
  Path subfile1 = new Path(subsub, "file0");
  Path subfile2 = new Path(subsub, "file1");
  DFSTestUtil.createFile(hdfs, subfile1, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, subfile2, BLOCKSIZE, REPLICATION, seed);

  // Allow snapshot for subsub1, and create snapshot for it
  hdfs.allowSnapshot(subsub);
  hdfs.createSnapshot(subsub, "s1");

  // Deleting dir while its descedant subsub1 having snapshots should fail
  exception.expect(RemoteException.class);
  String error = subsub.toString()
      + " is snapshottable and already has snapshots";
  exception.expectMessage(error);
  hdfs.delete(dir, true);
}
 
Example #11
Source File: TestDatanodeRestart.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public void testFinalizedReplicas() throws Exception {
  // bring up a cluster of 3
  Configuration conf = new Configuration();
  conf.setLong("dfs.block.size", 1024L);
  conf.setInt("dfs.write.packet.size", 512);
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  try {
    // test finalized replicas
    final String TopDir = "/test";
    DFSTestUtil util = new DFSTestUtil("TestCrcCorruption", 2, 3, 8 * 1024);
    util.createFiles(fs, TopDir, (short) 3);
    util.waitReplication(fs, TopDir, (short) 3);
    util.checkFiles(fs, TopDir);
    cluster.restartDataNodes();
    cluster.waitActive();
    util.checkFiles(fs, TopDir);
  } finally {
    cluster.shutdown();
  }
}
 
Example #12
Source File: TestMetricsTimeVaryingClasses.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public void testMetricsTimeVaryingClasses() throws Exception {
  metrics.bytesWrittenLatency.resetMinMax();
  metrics.bytesWrittenRate.resetMinMax();

  //writesFromLocalClient uses MetricsTimeVaryingInt
  assertEquals(metrics.writesFromLocalClient.getCurrentIntervalValue(),0);
  final long LONG_FILE_LEN = Integer.MAX_VALUE+1L;
  DFSTestUtil.createFile(fileSystem, new Path("/tmp.txt"),
      LONG_FILE_LEN, (short)1, 1L);

  //bytesWritten uses MetricsTimeVaryingLong
  assertEquals(LONG_FILE_LEN, metrics.bytesWritten.getCurrentIntervalValue());
  //bytesWrittenLatency uses MetricsTimeVaryingRate
  assertTrue(metrics.bytesWrittenLatency.getMaxTime()>0);
  assertEquals(LONG_FILE_LEN, metrics.bytesWritten.getCurrentIntervalValue());
 
  //writesFromLocalClient uses MetricsTimeVaryingInt
  assertTrue(metrics.writesFromLocalClient.getCurrentIntervalValue()>0);
}
 
Example #13
Source File: TestHASafeMode.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Tests the case where, while a standby is down, more blocks are
 * added to the namespace, but not rolled. So, when it starts up,
 * it receives notification about the new blocks during
 * the safemode extension period.
 */
@Test
public void testBlocksAddedBeforeStandbyRestart() throws Exception {
  banner("Starting with NN0 active and NN1 standby, creating some blocks");
  DFSTestUtil.createFile(fs, new Path("/test"), 3*BLOCK_SIZE, (short) 3, 1L);
  // Roll edit log so that, when the SBN restarts, it will load
  // the namespace during startup.
  nn0.getRpcServer().rollEditLog();

  banner("Creating some blocks that won't be in the edit log");
  DFSTestUtil.createFile(fs, new Path("/test2"), 5*BLOCK_SIZE, (short) 3, 1L);
  
  banner("Restarting standby");
  restartStandby();

  // We expect it not to be stuck in safemode, since those blocks
  // that are already visible to the SBN should be processed
  // in the initial block reports.
  assertSafeMode(nn1, 3, 3, 3, 0);

  banner("Waiting for standby to catch up to active namespace");
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  assertSafeMode(nn1, 8, 8, 3, 0);
}
 
Example #14
Source File: TestDistCpSync.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void verifyCopy(FileStatus s, FileStatus t, boolean compareName)
    throws Exception {
  Assert.assertEquals(s.isDirectory(), t.isDirectory());
  if (compareName) {
    Assert.assertEquals(s.getPath().getName(), t.getPath().getName());
  }
  if (!s.isDirectory()) {
    // verify the file content is the same
    byte[] sbytes = DFSTestUtil.readFileBuffer(dfs, s.getPath());
    byte[] tbytes = DFSTestUtil.readFileBuffer(dfs, t.getPath());
    Assert.assertArrayEquals(sbytes, tbytes);
  } else {
    FileStatus[] slist = dfs.listStatus(s.getPath());
    FileStatus[] tlist = dfs.listStatus(t.getPath());
    Assert.assertEquals(slist.length, tlist.length);
    for (int i = 0; i < slist.length; i++) {
      verifyCopy(slist[i], tlist[i], true);
    }
  }
}
 
Example #15
Source File: BaseTestHttpFSWith.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void testConcat() throws Exception {
  Configuration config = getProxiedFSConf();
  config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
  if (!isLocalFS()) {
    FileSystem fs = FileSystem.get(config);
    fs.mkdirs(getProxiedFSTestDir());
    Path path1 = new Path("/test/foo.txt");
    Path path2 = new Path("/test/bar.txt");
    Path path3 = new Path("/test/derp.txt");
    DFSTestUtil.createFile(fs, path1, 1024, (short) 3, 0);
    DFSTestUtil.createFile(fs, path2, 1024, (short) 3, 0);
    DFSTestUtil.createFile(fs, path3, 1024, (short) 3, 0);
    fs.close();
    fs = getHttpFSFileSystem();
    fs.concat(path1, new Path[]{path2, path3});
    fs.close();
    fs = FileSystem.get(config);
    Assert.assertTrue(fs.exists(path1));
    Assert.assertFalse(fs.exists(path2));
    Assert.assertFalse(fs.exists(path3));
    fs.close();
  }
}
 
Example #16
Source File: TestDataNodeMetrics.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testSendDataPacketMetrics() throws Exception {
  Configuration conf = new HdfsConfiguration();
  final int interval = 1;
  conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    FileSystem fs = cluster.getFileSystem();
    // Create and read a 1 byte file
    Path tmpfile = new Path("/tmp.txt");
    DFSTestUtil.createFile(fs, tmpfile,
        (long)1, (short)1, 1L);
    DFSTestUtil.readFile(fs, tmpfile);
    List<DataNode> datanodes = cluster.getDataNodes();
    assertEquals(datanodes.size(), 1);
    DataNode datanode = datanodes.get(0);
    MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
    // Expect 2 packets, 1 for the 1 byte read, 1 for the empty packet
    // signaling the end of the block
    assertCounter("SendDataPacketTransferNanosNumOps", (long)2, rb);
    assertCounter("SendDataPacketBlockedOnNetworkNanosNumOps", (long)2, rb);
    // Wait for at least 1 rollover
    Thread.sleep((interval + 1) * 1000);
    // Check that the sendPacket percentiles rolled to non-zero values
    String sec = interval + "s";
    assertQuantileGauges("SendDataPacketBlockedOnNetworkNanos" + sec, rb);
    assertQuantileGauges("SendDataPacketTransferNanos" + sec, rb);
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
Example #17
Source File: TestRenameWithSnapshots.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Rename a single file across snapshottable dirs.
 */
@Test (timeout=60000)
public void testRenameFileAcrossSnapshottableDirs() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path foo = new Path(sdir2, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
  hdfs.createSnapshot(sdir1, "s3");
  
  final Path newfoo = new Path(sdir1, "foo");
  hdfs.rename(foo, newfoo);
  
  // change the replication factor of foo
  hdfs.setReplication(newfoo, REPL_1);
  
  // /dir2/.snapshot/s2/foo should still work
  final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2",
      "foo");
  assertTrue(hdfs.exists(foo_s2));
  FileStatus status = hdfs.getFileStatus(foo_s2);
  assertEquals(REPL, status.getReplication());
  
  final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3",
      "foo");
  assertFalse(hdfs.exists(foo_s3));
  INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
  Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
  INodeFile sfoo = fsdir.getINode(newfoo.toString()).asFile();
  assertEquals(s2.getId(), sfoo.getDiffs().getLastSnapshotId());
}
 
Example #18
Source File: TestHost2NodesMap.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testRemove() throws Exception {
  DatanodeDescriptor nodeNotInMap =
    DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r4");
  assertFalse(map.remove(nodeNotInMap));
  
  assertTrue(map.remove(dataNodes[0]));
  assertTrue(map.getDatanodeByHost("1.1.1.1.")==null);
  assertTrue(map.getDatanodeByHost("2.2.2.2")==dataNodes[1]);
  DatanodeDescriptor node = map.getDatanodeByHost("3.3.3.3");
  assertTrue(node==dataNodes[2] || node==dataNodes[3]);
  assertNull(map.getDatanodeByHost("4.4.4.4"));
  
  assertTrue(map.remove(dataNodes[2]));
  assertNull(map.getDatanodeByHost("1.1.1.1"));
  assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
  assertEquals(map.getDatanodeByHost("3.3.3.3"), dataNodes[3]);
  
  assertTrue(map.remove(dataNodes[3]));
  assertNull(map.getDatanodeByHost("1.1.1.1"));
  assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
  assertNull(map.getDatanodeByHost("3.3.3.3"));
  
  assertFalse(map.remove(null));
  assertTrue(map.remove(dataNodes[1]));
  assertFalse(map.remove(dataNodes[1]));
}
 
Example #19
Source File: TestAuditLogs.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Before
 public void setupCluster() throws Exception {
   // must configure prior to instantiating the namesystem because it
   // will reconfigure the logger if async is enabled
   configureAuditLogs();
   conf = new HdfsConfiguration();
   final long precision = 1L;
   conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
   conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
   conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
   conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, useAsyncLog);
   util = new DFSTestUtil.Builder().setName("TestAuditAllowed").
       setNumFiles(20).build();
   cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
   fs = cluster.getFileSystem();
   util.createFiles(fs, fileName);

   // make sure the appender is what it's supposed to be
   Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
   @SuppressWarnings("unchecked")
   List<Appender> appenders = Collections.list(logger.getAllAppenders());
   assertEquals(1, appenders.size());
   assertEquals(useAsyncLog, appenders.get(0) instanceof AsyncAppender);
   
   fnames = util.getFileNames(fileName);
   util.waitReplication(fs, fileName, (short)3);
   userGroupInfo = UserGroupInformation.createUserForTesting(username, groups);
}
 
Example #20
Source File: TestMRCJCFileInputFormat.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void createInputs(FileSystem fs, Path inDir, String fileName)
    throws IOException, TimeoutException, InterruptedException {
  // create a multi-block file on hdfs
  Path path = new Path(inDir, fileName);
  final short replication = 2;
  DataOutputStream out = fs.create(path, true, 4096,
                                   replication, 512, null);
  for(int i=0; i < 1000; ++i) {
    out.writeChars("Hello\n");
  }
  out.close();
  System.out.println("Wrote file");
  DFSTestUtil.waitReplication(fs, path, replication);
}
 
Example #21
Source File: TestQuotaByStorageType.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 60000)
public void testQuotaByStorageTypeWithFileCreateAppend() throws Exception {
  final Path foo = new Path(dir, "foo");
  Path createdFile1 = new Path(foo, "created_file1.data");
  dfs.mkdirs(foo);

  // set storage policy on directory "foo" to ONESSD
  dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);

  // set quota by storage type on directory "foo"
  dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 4);
  INode fnode = fsdir.getINode4Write(foo.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(fnode.isQuotaSet());

  // Create file of size 2 * BLOCKSIZE under directory "foo"
  long file1Len = BLOCKSIZE * 2;
  int bufLen = BLOCKSIZE / 16;
  DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);

  // Verify space consumed and remaining quota
  long ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, ssdConsumed);

  // append several blocks
  int appendLen = BLOCKSIZE * 2;
  DFSTestUtil.appendFile(dfs, createdFile1, appendLen);
  file1Len += appendLen;

  ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, ssdConsumed);

  ContentSummary cs = dfs.getContentSummary(foo);
  assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
  assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len);
  assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2);
}
 
Example #22
Source File: TestTruncateQuotaUpdate.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public void prepare() throws Exception {
  // original size: 2.5 blocks
  DFSTestUtil.createFile(dfs, file, BLOCKSIZE * 2 + BLOCKSIZE / 2,
      REPLICATION, 0L);
  SnapshotTestHelper.createSnapshot(dfs, dir, "s1");

  // truncate to 1.5 block
  dfs.truncate(file, BLOCKSIZE + BLOCKSIZE / 2);
  TestFileTruncate.checkBlockRecovery(file, dfs);

  // append another 1 BLOCK
  DFSTestUtil.appendFile(dfs, file, BLOCKSIZE);
}
 
Example #23
Source File: TestNestedSnapshots.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test (timeout=300000)
public void testSnapshotName() throws Exception {
  final String dirStr = "/testSnapshotWithQuota/dir";
  final Path dir = new Path(dirStr);
  hdfs.mkdirs(dir, new FsPermission((short)0777));
  hdfs.allowSnapshot(dir);

  // set namespace quota
  final int NS_QUOTA = 6;
  hdfs.setQuota(dir, NS_QUOTA, HdfsConstants.QUOTA_DONT_SET);

  // create object to use up the quota.
  final Path foo = new Path(dir, "foo");
  final Path f1 = new Path(foo, "f1");
  DFSTestUtil.createFile(hdfs, f1, BLOCKSIZE, REPLICATION, SEED);
  {
    //create a snapshot with default snapshot name
    final Path snapshotPath = hdfs.createSnapshot(dir);

    //check snapshot path and the default snapshot name
    final String snapshotName = snapshotPath.getName(); 
    Assert.assertTrue("snapshotName=" + snapshotName, Pattern.matches(
        "s\\d\\d\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d\\.\\d\\d\\d",
        snapshotName));
    final Path parent = snapshotPath.getParent();
    Assert.assertEquals(HdfsConstants.DOT_SNAPSHOT_DIR, parent.getName());
    Assert.assertEquals(dir, parent.getParent());
  }
}
 
Example #24
Source File: TestQuotaByStorageType.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 60000)
public void testQuotaByStorageTypeParentOnChildOff() throws Exception {
  short replication = 1;
  final Path parent = new Path(dir, "parent");
  final Path child = new Path(parent, "child");
  dfs.mkdirs(parent);
  dfs.mkdirs(child);

  dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
  dfs.setQuotaByStorageType(parent, StorageType.SSD, 3 * BLOCKSIZE);

  // Create file of size 2.5 * BLOCKSIZE under child directory
  // Verify parent Quota applies
  Path createdFile1 = new Path(child, "created_file1.data");
  long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
  int bufLen = BLOCKSIZE / 16;
  DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE,
      replication, seed);

  INode fnode = fsdir.getINode4Write(parent.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(fnode.isQuotaSet());
  long currentSSDConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, currentSSDConsumed);

  // Create the 2nd file of size BLOCKSIZE under child directory and expect quota exceeded exception
  Path createdFile2 = new Path(child, "created_file2.data");
  long file2Len = BLOCKSIZE;

  try {
    DFSTestUtil.createFile(dfs, createdFile2, bufLen, file2Len, BLOCKSIZE, replication, seed);
    fail("Should have failed with QuotaByStorageTypeExceededException ");
  } catch (Throwable t) {
    LOG.info("Got expected exception ", t);
    currentSSDConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
        .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
    assertEquals(file1Len, currentSSDConsumed);
  }
}
 
Example #25
Source File: TestSnapshotDiffReport.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Renaming a file/dir then delete the ancestor dir of the rename target
 * should be reported as deleted.
 */
@Test
public void testDiffReportWithRenameAndDelete() throws Exception {
  final Path root = new Path("/");
  final Path dir1 = new Path(root, "dir1");
  final Path dir2 = new Path(root, "dir2");
  final Path foo = new Path(dir1, "foo");
  final Path fileInFoo = new Path(foo, "file");
  final Path bar = new Path(dir2, "bar");
  final Path fileInBar = new Path(bar, "file");
  DFSTestUtil.createFile(hdfs, fileInFoo, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, fileInBar, BLOCKSIZE, REPLICATION, seed);

  SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
  hdfs.rename(fileInFoo, fileInBar, Rename.OVERWRITE);
  SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
  verifyDiffReport(root, "s0", "s1",
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1/foo")),
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2/bar")),
      new DiffReportEntry(DiffType.DELETE, DFSUtil
          .string2Bytes("dir2/bar/file")),
      new DiffReportEntry(DiffType.RENAME,
          DFSUtil.string2Bytes("dir1/foo/file"),
          DFSUtil.string2Bytes("dir2/bar/file")));

  // delete bar
  hdfs.delete(bar, true);
  SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
  verifyDiffReport(root, "s0", "s2",
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1/foo")),
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2")),
      new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("dir2/bar")),
      new DiffReportEntry(DiffType.DELETE,
          DFSUtil.string2Bytes("dir1/foo/file")));
}
 
Example #26
Source File: TestRenameWithSnapshots.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test (timeout=60000)
public void testRenameTwiceInSnapshot() throws Exception {
  hdfs.mkdirs(sub1);
  hdfs.allowSnapshot(sub1);
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPL, SEED);
  hdfs.createSnapshot(sub1, snap1);
  hdfs.rename(file1, file2);
  
  hdfs.createSnapshot(sub1, snap2);
  hdfs.rename(file2, file3);

  SnapshotDiffReport diffReport;
  
  // Query the diff report and make sure it looks as expected.
  diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, snap2);
  LOG.info("DiffList is " + diffReport.toString());
  List<DiffReportEntry> entries = diffReport.getDiffList();
  assertTrue(entries.size() == 2);
  assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
  assertTrue(existsInDiffReport(entries, DiffType.RENAME, file1.getName(),
      file2.getName()));
  
  diffReport = hdfs.getSnapshotDiffReport(sub1, snap2, "");
  LOG.info("DiffList is " + diffReport.toString());
  entries = diffReport.getDiffList();
  assertTrue(entries.size() == 2);
  assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
  assertTrue(existsInDiffReport(entries, DiffType.RENAME, file2.getName(),
      file3.getName()));
  
  diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, "");
  LOG.info("DiffList is " + diffReport.toString());
  entries = diffReport.getDiffList();
  assertTrue(entries.size() == 2);
  assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
  assertTrue(existsInDiffReport(entries, DiffType.RENAME, file1.getName(),
      file3.getName()));
}
 
Example #27
Source File: BlockReportTestBase.java    From big-c with Apache License 2.0 5 votes vote down vote up
private ArrayList<Block> prepareForRide(final Path filePath,
                                        final String METHOD_NAME,
                                        long fileSize) throws IOException {
  LOG.info("Running test " + METHOD_NAME);

  DFSTestUtil.createFile(fs, filePath, fileSize,
    REPL_FACTOR, rand.nextLong());

  return locatedToBlocks(cluster.getNameNodeRpc()
    .getBlockLocations(filePath.toString(), FILE_START,
      fileSize).getLocatedBlocks(), null);
}
 
Example #28
Source File: TestSnapshotDiffReport.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Nested renamed dir/file and the withNameList in the WithCount node of the
 * parental directory is empty due to snapshot deletion. See HDFS-6996 for
 * details.
 */
@Test
public void testDiffReportWithRenameAndSnapshotDeletion() throws Exception {
  final Path root = new Path("/");
  final Path foo = new Path(root, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, seed);

  SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
  // rename /foo to /foo2
  final Path foo2 = new Path(root, "foo2");
  hdfs.rename(foo, foo2);
  // now /foo/bar becomes /foo2/bar
  final Path bar2 = new Path(foo2, "bar");

  // delete snapshot s0 so that the withNameList inside of the WithCount node
  // of foo becomes empty
  hdfs.deleteSnapshot(root, "s0");

  // create snapshot s1 and rename bar again
  SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
  final Path bar3 = new Path(foo2, "bar-new");
  hdfs.rename(bar2, bar3);

  // we always put modification on the file before rename
  verifyDiffReport(root, "s1", "",
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("foo2")),
      new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("foo2/bar"),
          DFSUtil.string2Bytes("foo2/bar-new")));
}
 
Example #29
Source File: TestRetryCacheWithHA.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
void prepare() throws Exception {
  final Path targetPath = new Path(target);
  DFSTestUtil.createFile(dfs, targetPath, BlockSize, DataNodes, 0);
  for (int i = 0; i < srcPaths.length; i++) {
    DFSTestUtil.createFile(dfs, srcPaths[i], BlockSize, DataNodes, 0);
  }
  assertEquals(BlockSize, dfs.getFileStatus(targetPath).getLen());
}
 
Example #30
Source File: TestHostsFiles.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testHostsIncludeForDeadCount() throws Exception {
  Configuration conf = getConf();

  // Configure an excludes file
  FileSystem localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, "build/test/data/temp/decommission");
  Path excludeFile = new Path(dir, "exclude");
  Path includeFile = new Path(dir, "include");
  assertTrue(localFileSys.mkdirs(dir));
  StringBuilder includeHosts = new StringBuilder();
  includeHosts.append("localhost:52").append("\n").append("127.0.0.1:7777")
      .append("\n");
  DFSTestUtil.writeFile(localFileSys, excludeFile, "");
  DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());

  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();
    assertTrue(ns.getNumDeadDataNodes() == 2);
    assertTrue(ns.getNumLiveDataNodes() == 0);

    // Testing using MBeans
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName mxbeanName = new ObjectName(
        "Hadoop:service=NameNode,name=FSNamesystemState");
    String nodes = mbs.getAttribute(mxbeanName, "NumDeadDataNodes") + "";
    assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 2);
    assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumLiveDataNodes") == 0);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}