org.apache.hadoop.hdfs.protocol.DirectoryListing Java Examples

The following examples show how to use org.apache.hadoop.hdfs.protocol.DirectoryListing. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RpcProgramNfs3.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Used by readdir and readdirplus to get dirents. It retries the listing if
 * the startAfter can't be found anymore.
 */
private DirectoryListing listPaths(DFSClient dfsClient, String dirFileIdPath,
    byte[] startAfter) throws IOException {
  DirectoryListing dlisting;
  try {
    dlisting = dfsClient.listPaths(dirFileIdPath, startAfter);
  } catch (RemoteException e) {
    IOException io = e.unwrapRemoteException();
    if (!(io instanceof DirectoryListingStartAfterNotFoundException)) {
      throw io;
    }
    // This happens when startAfter was just deleted
    LOG.info("Cookie couldn't be found: "
        + new String(startAfter, Charset.forName("UTF-8"))
        + ", do listing from beginning");
    dlisting = dfsClient
        .listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME);
  }
  return dlisting;
}
 
Example #2
Source File: ClientNamenodeProtocolTranslatorPB.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public DirectoryListing getListing(String src, byte[] startAfter,
    boolean needLocation) throws AccessControlException,
    FileNotFoundException, UnresolvedLinkException, IOException {
  GetListingRequestProto req = GetListingRequestProto.newBuilder()
      .setSrc(src)
      .setStartAfter(ByteString.copyFrom(startAfter))
      .setNeedLocation(needLocation).build();
  try {
    GetListingResponseProto result = rpcProxy.getListing(null, req);
    
    if (result.hasDirList()) {
      return PBHelper.convert(result.getDirList());
    }
    return null;
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #3
Source File: ClientNamenodeProtocolServerSideTranslatorPB.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public GetListingResponseProto getListing(RpcController controller,
    GetListingRequestProto req) throws ServiceException {
  try {
    DirectoryListing result = server.getListing(
        req.getSrc(), req.getStartAfter().toByteArray(),
        req.getNeedLocation());
    if (result !=null) {
      return GetListingResponseProto.newBuilder().setDirList(
        PBHelper.convert(result)).build();
    } else {
      return VOID_GETLISTING_RESPONSE;
    }
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
Example #4
Source File: TestDFSUpgradeFromImage.java    From big-c with Apache License 2.0 6 votes vote down vote up
static void recoverAllLeases(DFSClient dfs, 
    Path path) throws IOException {
  String pathStr = path.toString();
  HdfsFileStatus status = dfs.getFileInfo(pathStr);
  if (!status.isDir()) {
    dfs.recoverLease(pathStr);
    return;
  }
  byte prev[] = HdfsFileStatus.EMPTY_NAME;
  DirectoryListing dirList;
  do {
    dirList = dfs.listPaths(pathStr, prev);
    HdfsFileStatus files[] = dirList.getPartialListing();
    for (HdfsFileStatus f : files) {
      recoverAllLeases(dfs, f.getFullPath(path));
    }
    prev = dirList.getLastName();
  } while (dirList.hasMore());
}
 
Example #5
Source File: RpcProgramNfs3.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Used by readdir and readdirplus to get dirents. It retries the listing if
 * the startAfter can't be found anymore.
 */
private DirectoryListing listPaths(DFSClient dfsClient, String dirFileIdPath,
    byte[] startAfter) throws IOException {
  DirectoryListing dlisting;
  try {
    dlisting = dfsClient.listPaths(dirFileIdPath, startAfter);
  } catch (RemoteException e) {
    IOException io = e.unwrapRemoteException();
    if (!(io instanceof DirectoryListingStartAfterNotFoundException)) {
      throw io;
    }
    // This happens when startAfter was just deleted
    LOG.info("Cookie couldn't be found: "
        + new String(startAfter, Charset.forName("UTF-8"))
        + ", do listing from beginning");
    dlisting = dfsClient
        .listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME);
  }
  return dlisting;
}
 
Example #6
Source File: TestDFSUpgradeFromImage.java    From hadoop with Apache License 2.0 6 votes vote down vote up
static void recoverAllLeases(DFSClient dfs, 
    Path path) throws IOException {
  String pathStr = path.toString();
  HdfsFileStatus status = dfs.getFileInfo(pathStr);
  if (!status.isDir()) {
    dfs.recoverLease(pathStr);
    return;
  }
  byte prev[] = HdfsFileStatus.EMPTY_NAME;
  DirectoryListing dirList;
  do {
    dirList = dfs.listPaths(pathStr, prev);
    HdfsFileStatus files[] = dirList.getPartialListing();
    for (HdfsFileStatus f : files) {
      recoverAllLeases(dfs, f.getFullPath(path));
    }
    prev = dirList.getLastName();
  } while (dirList.hasMore());
}
 
Example #7
Source File: ClientNamenodeProtocolTranslatorPB.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public DirectoryListing getListing(String src, byte[] startAfter,
    boolean needLocation) throws AccessControlException,
    FileNotFoundException, UnresolvedLinkException, IOException {
  GetListingRequestProto req = GetListingRequestProto.newBuilder()
      .setSrc(src)
      .setStartAfter(ByteString.copyFrom(startAfter))
      .setNeedLocation(needLocation).build();
  try {
    GetListingResponseProto result = rpcProxy.getListing(null, req);
    
    if (result.hasDirList()) {
      return PBHelper.convert(result.getDirList());
    }
    return null;
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #8
Source File: ClientNamenodeProtocolServerSideTranslatorPB.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public GetListingResponseProto getListing(RpcController controller,
    GetListingRequestProto req) throws ServiceException {
  try {
    DirectoryListing result = server.getListing(
        req.getSrc(), req.getStartAfter().toByteArray(),
        req.getNeedLocation());
    if (result !=null) {
      return GetListingResponseProto.newBuilder().setDirList(
        PBHelper.convert(result)).build();
    } else {
      return VOID_GETLISTING_RESPONSE;
    }
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
Example #9
Source File: TerrapinUtil.java    From terrapin with Apache License 2.0 6 votes vote down vote up
/**
 * Retrieve list of files under @hdfsDir for @hdfsClient.
 */
public static List<HdfsFileStatus> getHdfsFileList(DFSClient hdfsClient,
                                                   String hdfsDir)
    throws IOException {
  List<HdfsFileStatus> fileList = Lists.newArrayList();
  // Build a list of files.
  DirectoryListing listing = null;
  String continuation = "";
  while (true) {
    listing = hdfsClient.listPaths(hdfsDir, continuation.getBytes());
    for (HdfsFileStatus fileStatus : listing.getPartialListing()) {
      fileList.add(fileStatus);
    }
    // Go through the listing and paginate.
    if (!listing.hasMore()) {
      break;
    } else {
      continuation = new String(listing.getLastName());
    }
  }
  return fileList;
}
 
Example #10
Source File: FSDirStatAndListingOp.java    From big-c with Apache License 2.0 5 votes vote down vote up
static DirectoryListing getListingInt(FSDirectory fsd, final String srcArg,
    byte[] startAfter, boolean needLocation) throws IOException {
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory
      .getPathComponentsForReservedPath(srcArg);
  final String startAfterString = new String(startAfter, Charsets.UTF_8);
  final String src = fsd.resolvePath(pc, srcArg, pathComponents);
  final INodesInPath iip = fsd.getINodesInPath(src, true);

  // Get file name when startAfter is an INodePath
  if (FSDirectory.isReservedName(startAfterString)) {
    byte[][] startAfterComponents = FSDirectory
        .getPathComponentsForReservedPath(startAfterString);
    try {
      String tmp = FSDirectory.resolvePath(src, startAfterComponents, fsd);
      byte[][] regularPath = INode.getPathComponents(tmp);
      startAfter = regularPath[regularPath.length - 1];
    } catch (IOException e) {
      // Possibly the inode is deleted
      throw new DirectoryListingStartAfterNotFoundException(
          "Can't find startAfter " + startAfterString);
    }
  }

  boolean isSuperUser = true;
  if (fsd.isPermissionEnabled()) {
    if (iip.getLastINode() != null && iip.getLastINode().isDirectory()) {
      fsd.checkPathAccess(pc, iip, FsAction.READ_EXECUTE);
    } else {
      fsd.checkTraverse(pc, iip);
    }
    isSuperUser = pc.isSuperUser();
  }
  return getListing(fsd, iip, src, startAfter, needLocation, isSuperUser);
}
 
Example #11
Source File: TestStorageMover.java    From big-c with Apache License 2.0 5 votes vote down vote up
void verifyFile(final Path file, final Byte expectedPolicyId)
    throws Exception {
  final Path parent = file.getParent();
  DirectoryListing children = dfs.getClient().listPaths(
      parent.toString(), HdfsFileStatus.EMPTY_NAME, true);
  for (HdfsFileStatus child : children.getPartialListing()) {
    if (child.getLocalName().equals(file.getName())) {
      verifyFile(parent,  child, expectedPolicyId);
      return;
    }
  }
  Assert.fail("File " + file + " not found.");
}
 
Example #12
Source File: TestStorageMover.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void verifyRecursively(final Path parent,
    final HdfsFileStatus status) throws Exception {
  if (status.isDir()) {
    Path fullPath = parent == null ?
        new Path("/") : status.getFullPath(parent);
    DirectoryListing children = dfs.getClient().listPaths(
        fullPath.toString(), HdfsFileStatus.EMPTY_NAME, true);
    for (HdfsFileStatus child : children.getPartialListing()) {
      verifyRecursively(fullPath, child);
    }
  } else if (!status.isSymlink()) { // is file
    verifyFile(parent, status, null);
  }
}
 
Example #13
Source File: DFSClient.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Get a partial listing of the indicated directory
 *
 * Recommend to use HdfsFileStatus.EMPTY_NAME as startAfter
 * if the application wants to fetch a listing starting from
 * the first entry in the directory
 *
 * @see ClientProtocol#getListing(String, byte[], boolean)
 */
public DirectoryListing listPaths(String src,  byte[] startAfter,
    boolean needLocation) throws IOException {
  checkOpen();
  TraceScope scope = getPathTraceScope("listPaths", src);
  try {
    return namenode.getListing(src, startAfter, needLocation);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileNotFoundException.class,
                                   UnresolvedPathException.class);
  } finally {
    scope.close();
  }
}
 
Example #14
Source File: PBHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static DirectoryListingProto convert(DirectoryListing d) {
  if (d == null)
    return null;
  return DirectoryListingProto.newBuilder().
      addAllPartialListing(Arrays.asList(
          PBHelper.convert(d.getPartialListing()))).
      setRemainingEntries(d.getRemainingEntries()).
      build();
}
 
Example #15
Source File: PBHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static DirectoryListing convert(DirectoryListingProto dl) {
  if (dl == null)
    return null;
  List<HdfsFileStatusProto> partList =  dl.getPartialListingList();
  return new DirectoryListing( 
      partList.isEmpty() ? new HdfsLocatedFileStatus[0] 
        : PBHelper.convert(
            partList.toArray(new HdfsFileStatusProto[partList.size()])),
      dl.getRemainingEntries());
}
 
Example #16
Source File: NameNodeRpcServer.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override // ClientProtocol
public DirectoryListing getListing(String src, byte[] startAfter,
    boolean needLocation) throws IOException {
  checkNNStartup();
  DirectoryListing files = namesystem.getListing(
      src, startAfter, needLocation);
  if (files != null) {
    metrics.incrGetListingOps();
    metrics.incrFilesInGetListingOps(files.getPartialListing().length);
  }
  return files;
}
 
Example #17
Source File: NamenodeWebHdfsMethods.java    From big-c with Apache License 2.0 5 votes vote down vote up
private static DirectoryListing getDirectoryListing(final NamenodeProtocols np,
    final String p, byte[] startAfter) throws IOException {
  final DirectoryListing listing = np.getListing(p, startAfter, false);
  if (listing == null) { // the directory does not exist
    throw new FileNotFoundException("File " + p + " does not exist.");
  }
  return listing;
}
 
Example #18
Source File: FSDirStatAndListingOp.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private static DirectoryListing getSnapshotsListing(
    FSDirectory fsd, String src, byte[] startAfter)
    throws IOException {
  Preconditions.checkState(fsd.hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = FSDirectory.normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = fsd.getINode(dirPath);
  final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
  final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
  if (sf == null) {
    throw new SnapshotException(
        "Directory is not a snapshottable directory: " + dirPath);
  }
  final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit());
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
    listing[i] = createFileStatus(fsd, src, sRoot.getLocalNameBytes(), sRoot,
        BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
        false, INodesInPath.fromINode(sRoot));
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
 
Example #19
Source File: FSDirStatAndListingOp.java    From hadoop with Apache License 2.0 5 votes vote down vote up
static DirectoryListing getListingInt(FSDirectory fsd, final String srcArg,
    byte[] startAfter, boolean needLocation) throws IOException {
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory
      .getPathComponentsForReservedPath(srcArg);
  final String startAfterString = new String(startAfter, Charsets.UTF_8);
  final String src = fsd.resolvePath(pc, srcArg, pathComponents);
  final INodesInPath iip = fsd.getINodesInPath(src, true);

  // Get file name when startAfter is an INodePath
  if (FSDirectory.isReservedName(startAfterString)) {
    byte[][] startAfterComponents = FSDirectory
        .getPathComponentsForReservedPath(startAfterString);
    try {
      String tmp = FSDirectory.resolvePath(src, startAfterComponents, fsd);
      byte[][] regularPath = INode.getPathComponents(tmp);
      startAfter = regularPath[regularPath.length - 1];
    } catch (IOException e) {
      // Possibly the inode is deleted
      throw new DirectoryListingStartAfterNotFoundException(
          "Can't find startAfter " + startAfterString);
    }
  }

  boolean isSuperUser = true;
  if (fsd.isPermissionEnabled()) {
    if (iip.getLastINode() != null && iip.getLastINode().isDirectory()) {
      fsd.checkPathAccess(pc, iip, FsAction.READ_EXECUTE);
    } else {
      fsd.checkTraverse(pc, iip);
    }
    isSuperUser = pc.isSuperUser();
  }
  return getListing(fsd, iip, src, startAfter, needLocation, isSuperUser);
}
 
Example #20
Source File: NameNodeRpcServer.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override // ClientProtocol
public DirectoryListing getListing(String src, byte[] startAfter,
    boolean needLocation) throws IOException {
  checkNNStartup();
  DirectoryListing files = namesystem.getListing(
      src, startAfter, needLocation);
  if (files != null) {
    metrics.incrGetListingOps();
    metrics.incrFilesInGetListingOps(files.getPartialListing().length);
  }
  return files;
}
 
Example #21
Source File: DFSClient.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Get a partial listing of the indicated directory
 *
 * Recommend to use HdfsFileStatus.EMPTY_NAME as startAfter
 * if the application wants to fetch a listing starting from
 * the first entry in the directory
 *
 * @see ClientProtocol#getListing(String, byte[], boolean)
 */
public DirectoryListing listPaths(String src,  byte[] startAfter,
    boolean needLocation) throws IOException {
  checkOpen();
  TraceScope scope = getPathTraceScope("listPaths", src);
  try {
    return namenode.getListing(src, startAfter, needLocation);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileNotFoundException.class,
                                   UnresolvedPathException.class);
  } finally {
    scope.close();
  }
}
 
Example #22
Source File: DistributedAvatarFileSystem.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public DirectoryListing getPartialListing(final String src,
    final byte[] startAfter) throws IOException {
  return (new ImmutableFSCaller<DirectoryListing>() {
    DirectoryListing call() throws IOException {
      return namenode.getPartialListing(src, startAfter);
    }
  }).callFS();
}
 
Example #23
Source File: FSDirStatAndListingOp.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Get a listing of all the snapshots of a snapshottable directory
 */
private static DirectoryListing getSnapshotsListing(
    FSDirectory fsd, String src, byte[] startAfter)
    throws IOException {
  Preconditions.checkState(fsd.hasReadLock());
  Preconditions.checkArgument(
      src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
      "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);

  final String dirPath = FSDirectory.normalizePath(src.substring(0,
      src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));

  final INode node = fsd.getINode(dirPath);
  final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
  final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
  if (sf == null) {
    throw new SnapshotException(
        "Directory is not a snapshottable directory: " + dirPath);
  }
  final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
  int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
  skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
  int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit());
  final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
  for (int i = 0; i < numOfListing; i++) {
    Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
    listing[i] = createFileStatus(fsd, src, sRoot.getLocalNameBytes(), sRoot,
        BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
        false, INodesInPath.fromINode(sRoot));
  }
  return new DirectoryListing(
      listing, snapshots.size() - skipSize - numOfListing);
}
 
Example #24
Source File: HdfsManagerTest.java    From terrapin with Apache License 2.0 5 votes vote down vote up
private void setupBaseDirListing(List<String> fileSets) throws IOException {
  HdfsFileStatus[] fsStatusList = new HdfsFileStatus[fileSets.size() + 1];
  fsStatusList[0] = buildHdfsStatus(Constants.HDFS_DATA_DIR + "/_distcp_XcndjkA", true, null);
  int i = 1;
  for (String fileSet : fileSets) {
    fsStatusList[i++] = buildHdfsStatus(Constants.HDFS_DATA_DIR + "/" + fileSet, true, null);
  }
  when(mockDfsClient.listPaths(eq(Constants.HDFS_DATA_DIR), any(byte[].class))).thenReturn(
          new DirectoryListing(fsStatusList, 0));
}
 
Example #25
Source File: TestStorageMover.java    From hadoop with Apache License 2.0 5 votes vote down vote up
void verifyFile(final Path file, final Byte expectedPolicyId)
    throws Exception {
  final Path parent = file.getParent();
  DirectoryListing children = dfs.getClient().listPaths(
      parent.toString(), HdfsFileStatus.EMPTY_NAME, true);
  for (HdfsFileStatus child : children.getPartialListing()) {
    if (child.getLocalName().equals(file.getName())) {
      verifyFile(parent,  child, expectedPolicyId);
      return;
    }
  }
  Assert.fail("File " + file + " not found.");
}
 
Example #26
Source File: TestStorageMover.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void verifyRecursively(final Path parent,
    final HdfsFileStatus status) throws Exception {
  if (status.isDir()) {
    Path fullPath = parent == null ?
        new Path("/") : status.getFullPath(parent);
    DirectoryListing children = dfs.getClient().listPaths(
        fullPath.toString(), HdfsFileStatus.EMPTY_NAME, true);
    for (HdfsFileStatus child : children.getPartialListing()) {
      verifyRecursively(fullPath, child);
    }
  } else if (!status.isSymlink()) { // is file
    verifyFile(parent, status, null);
  }
}
 
Example #27
Source File: NamenodeWebHdfsMethods.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static DirectoryListing getDirectoryListing(final NamenodeProtocols np,
    final String p, byte[] startAfter) throws IOException {
  final DirectoryListing listing = np.getListing(p, startAfter, false);
  if (listing == null) { // the directory does not exist
    throw new FileNotFoundException("File " + p + " does not exist.");
  }
  return listing;
}
 
Example #28
Source File: PBHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static DirectoryListingProto convert(DirectoryListing d) {
  if (d == null)
    return null;
  return DirectoryListingProto.newBuilder().
      addAllPartialListing(Arrays.asList(
          PBHelper.convert(d.getPartialListing()))).
      setRemainingEntries(d.getRemainingEntries()).
      build();
}
 
Example #29
Source File: PBHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static DirectoryListing convert(DirectoryListingProto dl) {
  if (dl == null)
    return null;
  List<HdfsFileStatusProto> partList =  dl.getPartialListingList();
  return new DirectoryListing( 
      partList.isEmpty() ? new HdfsLocatedFileStatus[0] 
        : PBHelper.convert(
            partList.toArray(new HdfsFileStatusProto[partList.size()])),
      dl.getRemainingEntries());
}
 
Example #30
Source File: ControllerUtilTest.java    From terrapin with Apache License 2.0 4 votes vote down vote up
public void testBuildIdealStateForHdfsDirHelper(boolean zkCompression,
                                                int numPartitions) throws Exception {
  String hdfsDir = Constants.HDFS_DATA_DIR + "/fileset";
  DFSClient dfsClient = mock(DFSClient.class);

  // Create three hosts in the clusters.
  List<BlockLocation> locations = ImmutableList.of(
          new BlockLocation(new String[]{"host1", "host2"},
                            new String[]{"host1", "host2"}, 0, 0));
  HdfsFileStatus[] fileStatuses = new HdfsFileStatus[numPartitions];
  for (int i = 0; i < numPartitions; ++i) {
    fileStatuses[i] = PowerMockito.mock(HdfsFileStatus.class);
    String localName = TerrapinUtil.formatPartitionName(i);
    when(fileStatuses[i].getLocalName()).thenReturn(localName);
    when(fileStatuses[i].getFullName(eq(hdfsDir))).thenReturn(hdfsDir + "/" + localName);
    when(fileStatuses[i].getLen()).thenReturn(1000L);
    BlockLocation[] locationArray = new BlockLocation[1];
    locations.subList(0, 1).toArray(locationArray);
    when(dfsClient.getBlockLocations(eq(fileStatuses[i].getFullName(hdfsDir)),
        anyLong(), anyLong())).thenReturn(locationArray);
  }


  when(dfsClient.listPaths(eq(hdfsDir), any(byte[].class))).thenReturn(new DirectoryListing(
      fileStatuses, 0));

  IdealState is = ControllerUtil.buildIdealStateForHdfsDir(dfsClient, hdfsDir, "resource",
      PartitionerType.CASCADING, 2, zkCompression);

  assertEquals(numPartitions, is.getNumPartitions());
  assertEquals("resource", is.getResourceName());
  for (int i = 0; i < numPartitions; ++i) {
    String partition;
    if (numPartitions > 1000 && !zkCompression) {
      partition = "resource_" + i;
    } else {
      partition = "resource$" + i;
    }
    assertEquals(Sets.newHashSet("host1", "host2"), is.getInstanceSet(partition));
  }
  assertEquals("OnlineOffline", is.getStateModelDefRef());
  if (zkCompression) {
    assertTrue(is.getRecord().getBooleanField("enableCompression", false));
  }
  assertEquals(IdealState.RebalanceMode.CUSTOMIZED, is.getRebalanceMode());
}