Java Code Examples for org.apache.hadoop.fs.FileSystemTestHelper#createFile()

The following examples show how to use org.apache.hadoop.fs.FileSystemTestHelper#createFile() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BaseTestHttpFSWith.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void testTruncate() throws Exception {
  if (!isLocalFS()) {
    final short repl = 3;
    final int blockSize = 1024;
    final int numOfBlocks = 2;
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    fs.mkdirs(getProxiedFSTestDir());
    Path file = new Path(getProxiedFSTestDir(), "foo.txt");
    final byte[] data = FileSystemTestHelper.getFileData(
        numOfBlocks, blockSize);
    FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);

    final int newLength = blockSize;

    boolean isReady = fs.truncate(file, newLength);
    Assert.assertTrue("Recovery is not expected.", isReady);

    FileStatus fileStatus = fs.getFileStatus(file);
    Assert.assertEquals(fileStatus.getLen(), newLength);
    AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());

    fs.close();
  }
}
 
Example 2
Source File: ViewFileSystemBaseTest.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  initializeTargetTestRoot();
  
  // Make  user and data dirs - we creates links to them in the mount table
  fsTarget.mkdirs(new Path(targetTestRoot,"user"));
  fsTarget.mkdirs(new Path(targetTestRoot,"data"));
  fsTarget.mkdirs(new Path(targetTestRoot,"dir2"));
  fsTarget.mkdirs(new Path(targetTestRoot,"dir3"));
  FileSystemTestHelper.createFile(fsTarget, new Path(targetTestRoot,"aFile"));
  
  
  // Now we use the mount fs to set links to user and dir
  // in the test root
  
  // Set up the defaultMT in the config with our mount point links
  conf = ViewFileSystemTestSetup.createConfig();
  setupMountPoints();
  fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
}
 
Example 3
Source File: ViewFileSystemBaseTest.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testGetBlockLocations() throws IOException {
  Path targetFilePath = new Path(targetTestRoot,"data/largeFile");
  FileSystemTestHelper.createFile(fsTarget, 
      targetFilePath, 10, 1024);
  Path viewFilePath = new Path("/data/largeFile");
  Assert.assertTrue("Created File should be type File",
      fsView.isFile(viewFilePath));
  BlockLocation[] viewBL = fsView.getFileBlockLocations(fsView.getFileStatus(viewFilePath), 0, 10240+100);
  Assert.assertEquals(SupportsBlocks ? 10 : 1, viewBL.length);
  BlockLocation[] targetBL = fsTarget.getFileBlockLocations(fsTarget.getFileStatus(targetFilePath), 0, 10240+100);
  compareBLs(viewBL, targetBL);
  
  
  // Same test but now get it via the FileStatus Parameter
  fsView.getFileBlockLocations(
      fsView.getFileStatus(viewFilePath), 0, 10240+100);
  targetBL = fsTarget.getFileBlockLocations(
      fsTarget.getFileStatus(targetFilePath), 0, 10240+100);
  compareBLs(viewBL, targetBL);  
}
 
Example 4
Source File: BaseTestHttpFSWith.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void testTruncate() throws Exception {
  if (!isLocalFS()) {
    final short repl = 3;
    final int blockSize = 1024;
    final int numOfBlocks = 2;
    FileSystem fs = FileSystem.get(getProxiedFSConf());
    fs.mkdirs(getProxiedFSTestDir());
    Path file = new Path(getProxiedFSTestDir(), "foo.txt");
    final byte[] data = FileSystemTestHelper.getFileData(
        numOfBlocks, blockSize);
    FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);

    final int newLength = blockSize;

    boolean isReady = fs.truncate(file, newLength);
    Assert.assertTrue("Recovery is not expected.", isReady);

    FileStatus fileStatus = fs.getFileStatus(file);
    Assert.assertEquals(fileStatus.getLen(), newLength);
    AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());

    fs.close();
  }
}
 
Example 5
Source File: ViewFileSystemBaseTest.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  initializeTargetTestRoot();
  
  // Make  user and data dirs - we creates links to them in the mount table
  fsTarget.mkdirs(new Path(targetTestRoot,"user"));
  fsTarget.mkdirs(new Path(targetTestRoot,"data"));
  fsTarget.mkdirs(new Path(targetTestRoot,"dir2"));
  fsTarget.mkdirs(new Path(targetTestRoot,"dir3"));
  FileSystemTestHelper.createFile(fsTarget, new Path(targetTestRoot,"aFile"));
  
  
  // Now we use the mount fs to set links to user and dir
  // in the test root
  
  // Set up the defaultMT in the config with our mount point links
  conf = ViewFileSystemTestSetup.createConfig();
  setupMountPoints();
  fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
}
 
Example 6
Source File: ViewFileSystemBaseTest.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testGetBlockLocations() throws IOException {
  Path targetFilePath = new Path(targetTestRoot,"data/largeFile");
  FileSystemTestHelper.createFile(fsTarget, 
      targetFilePath, 10, 1024);
  Path viewFilePath = new Path("/data/largeFile");
  Assert.assertTrue("Created File should be type File",
      fsView.isFile(viewFilePath));
  BlockLocation[] viewBL = fsView.getFileBlockLocations(fsView.getFileStatus(viewFilePath), 0, 10240+100);
  Assert.assertEquals(SupportsBlocks ? 10 : 1, viewBL.length);
  BlockLocation[] targetBL = fsTarget.getFileBlockLocations(fsTarget.getFileStatus(targetFilePath), 0, 10240+100);
  compareBLs(viewBL, targetBL);
  
  
  // Same test but now get it via the FileStatus Parameter
  fsView.getFileBlockLocations(
      fsView.getFileStatus(viewFilePath), 0, 10240+100);
  targetBL = fsTarget.getFileBlockLocations(
      fsTarget.getFileStatus(targetFilePath), 0, 10240+100);
  compareBLs(viewBL, targetBL);  
}
 
Example 7
Source File: TestSaslDataTransfer.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Tests DataTransferProtocol with the given client configuration.
 *
 * @param conf client configuration
 * @throws IOException if there is an I/O error
 */
private void doTest(HdfsConfiguration conf) throws IOException {
  fs = FileSystem.get(cluster.getURI(), conf);
  FileSystemTestHelper.createFile(fs, PATH, NUM_BLOCKS, BLOCK_SIZE);
  assertArrayEquals(FileSystemTestHelper.getFileData(NUM_BLOCKS, BLOCK_SIZE),
    DFSTestUtil.readFile(fs, PATH).getBytes("UTF-8"));
  BlockLocation[] blockLocations = fs.getFileBlockLocations(PATH, 0,
    Long.MAX_VALUE);
  assertNotNull(blockLocations);
  assertEquals(NUM_BLOCKS, blockLocations.length);
  for (BlockLocation blockLocation: blockLocations) {
    assertNotNull(blockLocation.getHosts());
    assertEquals(3, blockLocation.getHosts().length);
  }
}
 
Example 8
Source File: TestFsShellPermission.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void createFiles(FileSystem fs, String topdir,
    FileEntry[] entries) throws IOException {
  for (FileEntry entry : entries) {
    String newPathStr = topdir + "/" + entry.getPath();
    Path newPath = new Path(newPathStr);
    if (entry.isDirectory()) {
      fs.mkdirs(newPath);
    } else {
      FileSystemTestHelper.createFile(fs,  newPath);
    }
    fs.setPermission(newPath, new FsPermission(entry.getPermission()));
    fs.setOwner(newPath, entry.getOwner(), entry.getGroup());
  }
}
 
Example 9
Source File: TestSaslDataTransfer.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Tests DataTransferProtocol with the given client configuration.
 *
 * @param conf client configuration
 * @throws IOException if there is an I/O error
 */
private void doTest(HdfsConfiguration conf) throws IOException {
  fs = FileSystem.get(cluster.getURI(), conf);
  FileSystemTestHelper.createFile(fs, PATH, NUM_BLOCKS, BLOCK_SIZE);
  assertArrayEquals(FileSystemTestHelper.getFileData(NUM_BLOCKS, BLOCK_SIZE),
    DFSTestUtil.readFile(fs, PATH).getBytes("UTF-8"));
  BlockLocation[] blockLocations = fs.getFileBlockLocations(PATH, 0,
    Long.MAX_VALUE);
  assertNotNull(blockLocations);
  assertEquals(NUM_BLOCKS, blockLocations.length);
  for (BlockLocation blockLocation: blockLocations) {
    assertNotNull(blockLocation.getHosts());
    assertEquals(3, blockLocation.getHosts().length);
  }
}
 
Example 10
Source File: TestFsShellPermission.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void createFiles(FileSystem fs, String topdir,
    FileEntry[] entries) throws IOException {
  for (FileEntry entry : entries) {
    String newPathStr = topdir + "/" + entry.getPath();
    Path newPath = new Path(newPathStr);
    if (entry.isDirectory()) {
      fs.mkdirs(newPath);
    } else {
      FileSystemTestHelper.createFile(fs,  newPath);
    }
    fs.setPermission(newPath, new FsPermission(entry.getPermission()));
    fs.setOwner(newPath, entry.getOwner(), entry.getGroup());
  }
}
 
Example 11
Source File: TestCacheDirectives.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test(timeout=120000)
public void testWaitForCachedReplicas() throws Exception {
  FileSystemTestHelper helper = new FileSystemTestHelper();
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      return ((namenode.getNamesystem().getCacheCapacity() ==
          (NUM_DATANODES * CACHE_CAPACITY)) &&
            (namenode.getNamesystem().getCacheUsed() == 0));
    }
  }, 500, 60000);

  // Send a cache report referring to a bogus block.  It is important that
  // the NameNode be robust against this.
  NamenodeProtocols nnRpc = namenode.getRpcServer();
  DataNode dn0 = cluster.getDataNodes().get(0);
  String bpid = cluster.getNamesystem().getBlockPoolId();
  LinkedList<Long> bogusBlockIds = new LinkedList<Long> ();
  bogusBlockIds.add(999999L);
  nnRpc.cacheReport(dn0.getDNRegistrationForBP(bpid), bpid, bogusBlockIds);

  Path rootDir = helper.getDefaultWorkingDirectory(dfs);
  // Create the pool
  final String pool = "friendlyPool";
  nnRpc.addCachePool(new CachePoolInfo("friendlyPool"));
  // Create some test files
  final int numFiles = 2;
  final int numBlocksPerFile = 2;
  final List<String> paths = new ArrayList<String>(numFiles);
  for (int i=0; i<numFiles; i++) {
    Path p = new Path(rootDir, "testCachePaths-" + i);
    FileSystemTestHelper.createFile(dfs, p, numBlocksPerFile,
        (int)BLOCK_SIZE);
    paths.add(p.toUri().getPath());
  }
  // Check the initial statistics at the namenode
  waitForCachedBlocks(namenode, 0, 0, "testWaitForCachedReplicas:0");
  // Cache and check each path in sequence
  int expected = 0;
  for (int i=0; i<numFiles; i++) {
    CacheDirectiveInfo directive =
        new CacheDirectiveInfo.Builder().
          setPath(new Path(paths.get(i))).
          setPool(pool).
          build();
    nnRpc.addCacheDirective(directive, EnumSet.noneOf(CacheFlag.class));
    expected += numBlocksPerFile;
    waitForCachedBlocks(namenode, expected, expected,
        "testWaitForCachedReplicas:1");
  }

  // Check that the datanodes have the right cache values
  DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
  assertEquals("Unexpected number of live nodes", NUM_DATANODES, live.length);
  long totalUsed = 0;
  for (DatanodeInfo dn : live) {
    final long cacheCapacity = dn.getCacheCapacity();
    final long cacheUsed = dn.getCacheUsed();
    final long cacheRemaining = dn.getCacheRemaining();
    assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
    assertEquals("Capacity not equal to used + remaining",
        cacheCapacity, cacheUsed + cacheRemaining);
    assertEquals("Remaining not equal to capacity - used",
        cacheCapacity - cacheUsed, cacheRemaining);
    totalUsed += cacheUsed;
  }
  assertEquals(expected*BLOCK_SIZE, totalUsed);

  // Uncache and check each path in sequence
  RemoteIterator<CacheDirectiveEntry> entries =
    new CacheDirectiveIterator(nnRpc, null, Sampler.NEVER);
  for (int i=0; i<numFiles; i++) {
    CacheDirectiveEntry entry = entries.next();
    nnRpc.removeCacheDirective(entry.getInfo().getId());
    expected -= numBlocksPerFile;
    waitForCachedBlocks(namenode, expected, expected,
        "testWaitForCachedReplicas:2");
  }
}
 
Example 12
Source File: TestCacheDirectives.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test(timeout=120000)
public void testWaitForCachedReplicasInDirectory() throws Exception {
  // Create the pool
  final String pool = "friendlyPool";
  final CachePoolInfo poolInfo = new CachePoolInfo(pool);
  dfs.addCachePool(poolInfo);
  // Create some test files
  final List<Path> paths = new LinkedList<Path>();
  paths.add(new Path("/foo/bar"));
  paths.add(new Path("/foo/baz"));
  paths.add(new Path("/foo2/bar2"));
  paths.add(new Path("/foo2/baz2"));
  dfs.mkdir(new Path("/foo"), FsPermission.getDirDefault());
  dfs.mkdir(new Path("/foo2"), FsPermission.getDirDefault());
  final int numBlocksPerFile = 2;
  for (Path path : paths) {
    FileSystemTestHelper.createFile(dfs, path, numBlocksPerFile,
        (int)BLOCK_SIZE, (short)3, false);
  }
  waitForCachedBlocks(namenode, 0, 0,
      "testWaitForCachedReplicasInDirectory:0");

  // cache entire directory
  long id = dfs.addCacheDirective(
        new CacheDirectiveInfo.Builder().
          setPath(new Path("/foo")).
          setReplication((short)2).
          setPool(pool).
          build());
  waitForCachedBlocks(namenode, 4, 8,
      "testWaitForCachedReplicasInDirectory:1:blocks");
  // Verify that listDirectives gives the stats we want.
  waitForCacheDirectiveStats(dfs,
      4 * numBlocksPerFile * BLOCK_SIZE, 4 * numBlocksPerFile * BLOCK_SIZE,
      2, 2,
      new CacheDirectiveInfo.Builder().
          setPath(new Path("/foo")).
          build(),
      "testWaitForCachedReplicasInDirectory:1:directive");
  waitForCachePoolStats(dfs,
      4 * numBlocksPerFile * BLOCK_SIZE, 4 * numBlocksPerFile * BLOCK_SIZE,
      2, 2,
      poolInfo, "testWaitForCachedReplicasInDirectory:1:pool");

  long id2 = dfs.addCacheDirective(
        new CacheDirectiveInfo.Builder().
          setPath(new Path("/foo/bar")).
          setReplication((short)4).
          setPool(pool).
          build());
  // wait for an additional 2 cached replicas to come up
  waitForCachedBlocks(namenode, 4, 10,
      "testWaitForCachedReplicasInDirectory:2:blocks");
  // the directory directive's stats are unchanged
  waitForCacheDirectiveStats(dfs,
      4 * numBlocksPerFile * BLOCK_SIZE, 4 * numBlocksPerFile * BLOCK_SIZE,
      2, 2,
      new CacheDirectiveInfo.Builder().
          setPath(new Path("/foo")).
          build(),
      "testWaitForCachedReplicasInDirectory:2:directive-1");
  // verify /foo/bar's stats
  waitForCacheDirectiveStats(dfs,
      4 * numBlocksPerFile * BLOCK_SIZE,
      // only 3 because the file only has 3 replicas, not 4 as requested.
      3 * numBlocksPerFile * BLOCK_SIZE,
      1,
      // only 0 because the file can't be fully cached
      0,
      new CacheDirectiveInfo.Builder().
          setPath(new Path("/foo/bar")).
          build(),
      "testWaitForCachedReplicasInDirectory:2:directive-2");
  waitForCachePoolStats(dfs,
      (4+4) * numBlocksPerFile * BLOCK_SIZE,
      (4+3) * numBlocksPerFile * BLOCK_SIZE,
      3, 2,
      poolInfo, "testWaitForCachedReplicasInDirectory:2:pool");
  // remove and watch numCached go to 0
  dfs.removeCacheDirective(id);
  dfs.removeCacheDirective(id2);
  waitForCachedBlocks(namenode, 0, 0,
      "testWaitForCachedReplicasInDirectory:3:blocks");
  waitForCachePoolStats(dfs,
      0, 0,
      0, 0,
      poolInfo, "testWaitForCachedReplicasInDirectory:3:pool");
}
 
Example 13
Source File: TestCacheDirectives.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Tests stepping the cache replication factor up and down, checking the
 * number of cached replicas and blocks as well as the advertised locations.
 * @throws Exception
 */
@Test(timeout=120000)
public void testReplicationFactor() throws Exception {
  // Create the pool
  final String pool = "friendlyPool";
  dfs.addCachePool(new CachePoolInfo(pool));
  // Create some test files
  final List<Path> paths = new LinkedList<Path>();
  paths.add(new Path("/foo/bar"));
  paths.add(new Path("/foo/baz"));
  paths.add(new Path("/foo2/bar2"));
  paths.add(new Path("/foo2/baz2"));
  dfs.mkdir(new Path("/foo"), FsPermission.getDirDefault());
  dfs.mkdir(new Path("/foo2"), FsPermission.getDirDefault());
  final int numBlocksPerFile = 2;
  for (Path path : paths) {
    FileSystemTestHelper.createFile(dfs, path, numBlocksPerFile,
        (int)BLOCK_SIZE, (short)3, false);
  }
  waitForCachedBlocks(namenode, 0, 0, "testReplicationFactor:0");
  checkNumCachedReplicas(dfs, paths, 0, 0);
  // cache directory
  long id = dfs.addCacheDirective(
      new CacheDirectiveInfo.Builder().
        setPath(new Path("/foo")).
        setReplication((short)1).
        setPool(pool).
        build());
  waitForCachedBlocks(namenode, 4, 4, "testReplicationFactor:1");
  checkNumCachedReplicas(dfs, paths, 4, 4);
  // step up the replication factor
  for (int i=2; i<=3; i++) {
    dfs.modifyCacheDirective(
        new CacheDirectiveInfo.Builder().
        setId(id).
        setReplication((short)i).
        build());
    waitForCachedBlocks(namenode, 4, 4*i, "testReplicationFactor:2");
    checkNumCachedReplicas(dfs, paths, 4, 4*i);
  }
  // step it down
  for (int i=2; i>=1; i--) {
    dfs.modifyCacheDirective(
        new CacheDirectiveInfo.Builder().
        setId(id).
        setReplication((short)i).
        build());
    waitForCachedBlocks(namenode, 4, 4*i, "testReplicationFactor:3");
    checkNumCachedReplicas(dfs, paths, 4, 4*i);
  }
  // remove and watch numCached go to 0
  dfs.removeCacheDirective(id);
  waitForCachedBlocks(namenode, 0, 0, "testReplicationFactor:4");
  checkNumCachedReplicas(dfs, paths, 0, 0);
}
 
Example 14
Source File: TestCacheDirectives.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test(timeout=120000)
public void testWaitForCachedReplicas() throws Exception {
  FileSystemTestHelper helper = new FileSystemTestHelper();
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      return ((namenode.getNamesystem().getCacheCapacity() ==
          (NUM_DATANODES * CACHE_CAPACITY)) &&
            (namenode.getNamesystem().getCacheUsed() == 0));
    }
  }, 500, 60000);

  // Send a cache report referring to a bogus block.  It is important that
  // the NameNode be robust against this.
  NamenodeProtocols nnRpc = namenode.getRpcServer();
  DataNode dn0 = cluster.getDataNodes().get(0);
  String bpid = cluster.getNamesystem().getBlockPoolId();
  LinkedList<Long> bogusBlockIds = new LinkedList<Long> ();
  bogusBlockIds.add(999999L);
  nnRpc.cacheReport(dn0.getDNRegistrationForBP(bpid), bpid, bogusBlockIds);

  Path rootDir = helper.getDefaultWorkingDirectory(dfs);
  // Create the pool
  final String pool = "friendlyPool";
  nnRpc.addCachePool(new CachePoolInfo("friendlyPool"));
  // Create some test files
  final int numFiles = 2;
  final int numBlocksPerFile = 2;
  final List<String> paths = new ArrayList<String>(numFiles);
  for (int i=0; i<numFiles; i++) {
    Path p = new Path(rootDir, "testCachePaths-" + i);
    FileSystemTestHelper.createFile(dfs, p, numBlocksPerFile,
        (int)BLOCK_SIZE);
    paths.add(p.toUri().getPath());
  }
  // Check the initial statistics at the namenode
  waitForCachedBlocks(namenode, 0, 0, "testWaitForCachedReplicas:0");
  // Cache and check each path in sequence
  int expected = 0;
  for (int i=0; i<numFiles; i++) {
    CacheDirectiveInfo directive =
        new CacheDirectiveInfo.Builder().
          setPath(new Path(paths.get(i))).
          setPool(pool).
          build();
    nnRpc.addCacheDirective(directive, EnumSet.noneOf(CacheFlag.class));
    expected += numBlocksPerFile;
    waitForCachedBlocks(namenode, expected, expected,
        "testWaitForCachedReplicas:1");
  }

  // Check that the datanodes have the right cache values
  DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
  assertEquals("Unexpected number of live nodes", NUM_DATANODES, live.length);
  long totalUsed = 0;
  for (DatanodeInfo dn : live) {
    final long cacheCapacity = dn.getCacheCapacity();
    final long cacheUsed = dn.getCacheUsed();
    final long cacheRemaining = dn.getCacheRemaining();
    assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
    assertEquals("Capacity not equal to used + remaining",
        cacheCapacity, cacheUsed + cacheRemaining);
    assertEquals("Remaining not equal to capacity - used",
        cacheCapacity - cacheUsed, cacheRemaining);
    totalUsed += cacheUsed;
  }
  assertEquals(expected*BLOCK_SIZE, totalUsed);

  // Uncache and check each path in sequence
  RemoteIterator<CacheDirectiveEntry> entries =
    new CacheDirectiveIterator(nnRpc, null, Sampler.NEVER);
  for (int i=0; i<numFiles; i++) {
    CacheDirectiveEntry entry = entries.next();
    nnRpc.removeCacheDirective(entry.getInfo().getId());
    expected -= numBlocksPerFile;
    waitForCachedBlocks(namenode, expected, expected,
        "testWaitForCachedReplicas:2");
  }
}
 
Example 15
Source File: TestCacheDirectives.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test(timeout=120000)
public void testWaitForCachedReplicasInDirectory() throws Exception {
  // Create the pool
  final String pool = "friendlyPool";
  final CachePoolInfo poolInfo = new CachePoolInfo(pool);
  dfs.addCachePool(poolInfo);
  // Create some test files
  final List<Path> paths = new LinkedList<Path>();
  paths.add(new Path("/foo/bar"));
  paths.add(new Path("/foo/baz"));
  paths.add(new Path("/foo2/bar2"));
  paths.add(new Path("/foo2/baz2"));
  dfs.mkdir(new Path("/foo"), FsPermission.getDirDefault());
  dfs.mkdir(new Path("/foo2"), FsPermission.getDirDefault());
  final int numBlocksPerFile = 2;
  for (Path path : paths) {
    FileSystemTestHelper.createFile(dfs, path, numBlocksPerFile,
        (int)BLOCK_SIZE, (short)3, false);
  }
  waitForCachedBlocks(namenode, 0, 0,
      "testWaitForCachedReplicasInDirectory:0");

  // cache entire directory
  long id = dfs.addCacheDirective(
        new CacheDirectiveInfo.Builder().
          setPath(new Path("/foo")).
          setReplication((short)2).
          setPool(pool).
          build());
  waitForCachedBlocks(namenode, 4, 8,
      "testWaitForCachedReplicasInDirectory:1:blocks");
  // Verify that listDirectives gives the stats we want.
  waitForCacheDirectiveStats(dfs,
      4 * numBlocksPerFile * BLOCK_SIZE, 4 * numBlocksPerFile * BLOCK_SIZE,
      2, 2,
      new CacheDirectiveInfo.Builder().
          setPath(new Path("/foo")).
          build(),
      "testWaitForCachedReplicasInDirectory:1:directive");
  waitForCachePoolStats(dfs,
      4 * numBlocksPerFile * BLOCK_SIZE, 4 * numBlocksPerFile * BLOCK_SIZE,
      2, 2,
      poolInfo, "testWaitForCachedReplicasInDirectory:1:pool");

  long id2 = dfs.addCacheDirective(
        new CacheDirectiveInfo.Builder().
          setPath(new Path("/foo/bar")).
          setReplication((short)4).
          setPool(pool).
          build());
  // wait for an additional 2 cached replicas to come up
  waitForCachedBlocks(namenode, 4, 10,
      "testWaitForCachedReplicasInDirectory:2:blocks");
  // the directory directive's stats are unchanged
  waitForCacheDirectiveStats(dfs,
      4 * numBlocksPerFile * BLOCK_SIZE, 4 * numBlocksPerFile * BLOCK_SIZE,
      2, 2,
      new CacheDirectiveInfo.Builder().
          setPath(new Path("/foo")).
          build(),
      "testWaitForCachedReplicasInDirectory:2:directive-1");
  // verify /foo/bar's stats
  waitForCacheDirectiveStats(dfs,
      4 * numBlocksPerFile * BLOCK_SIZE,
      // only 3 because the file only has 3 replicas, not 4 as requested.
      3 * numBlocksPerFile * BLOCK_SIZE,
      1,
      // only 0 because the file can't be fully cached
      0,
      new CacheDirectiveInfo.Builder().
          setPath(new Path("/foo/bar")).
          build(),
      "testWaitForCachedReplicasInDirectory:2:directive-2");
  waitForCachePoolStats(dfs,
      (4+4) * numBlocksPerFile * BLOCK_SIZE,
      (4+3) * numBlocksPerFile * BLOCK_SIZE,
      3, 2,
      poolInfo, "testWaitForCachedReplicasInDirectory:2:pool");
  // remove and watch numCached go to 0
  dfs.removeCacheDirective(id);
  dfs.removeCacheDirective(id2);
  waitForCachedBlocks(namenode, 0, 0,
      "testWaitForCachedReplicasInDirectory:3:blocks");
  waitForCachePoolStats(dfs,
      0, 0,
      0, 0,
      poolInfo, "testWaitForCachedReplicasInDirectory:3:pool");
}
 
Example 16
Source File: TestCacheDirectives.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Tests stepping the cache replication factor up and down, checking the
 * number of cached replicas and blocks as well as the advertised locations.
 * @throws Exception
 */
@Test(timeout=120000)
public void testReplicationFactor() throws Exception {
  // Create the pool
  final String pool = "friendlyPool";
  dfs.addCachePool(new CachePoolInfo(pool));
  // Create some test files
  final List<Path> paths = new LinkedList<Path>();
  paths.add(new Path("/foo/bar"));
  paths.add(new Path("/foo/baz"));
  paths.add(new Path("/foo2/bar2"));
  paths.add(new Path("/foo2/baz2"));
  dfs.mkdir(new Path("/foo"), FsPermission.getDirDefault());
  dfs.mkdir(new Path("/foo2"), FsPermission.getDirDefault());
  final int numBlocksPerFile = 2;
  for (Path path : paths) {
    FileSystemTestHelper.createFile(dfs, path, numBlocksPerFile,
        (int)BLOCK_SIZE, (short)3, false);
  }
  waitForCachedBlocks(namenode, 0, 0, "testReplicationFactor:0");
  checkNumCachedReplicas(dfs, paths, 0, 0);
  // cache directory
  long id = dfs.addCacheDirective(
      new CacheDirectiveInfo.Builder().
        setPath(new Path("/foo")).
        setReplication((short)1).
        setPool(pool).
        build());
  waitForCachedBlocks(namenode, 4, 4, "testReplicationFactor:1");
  checkNumCachedReplicas(dfs, paths, 4, 4);
  // step up the replication factor
  for (int i=2; i<=3; i++) {
    dfs.modifyCacheDirective(
        new CacheDirectiveInfo.Builder().
        setId(id).
        setReplication((short)i).
        build());
    waitForCachedBlocks(namenode, 4, 4*i, "testReplicationFactor:2");
    checkNumCachedReplicas(dfs, paths, 4, 4*i);
  }
  // step it down
  for (int i=2; i>=1; i--) {
    dfs.modifyCacheDirective(
        new CacheDirectiveInfo.Builder().
        setId(id).
        setReplication((short)i).
        build());
    waitForCachedBlocks(namenode, 4, 4*i, "testReplicationFactor:3");
    checkNumCachedReplicas(dfs, paths, 4, 4*i);
  }
  // remove and watch numCached go to 0
  dfs.removeCacheDirective(id);
  waitForCachedBlocks(namenode, 0, 0, "testReplicationFactor:4");
  checkNumCachedReplicas(dfs, paths, 0, 0);
}