org.apache.hadoop.hdfs.protocol.CachePoolEntry Java Examples

The following examples show how to use org.apache.hadoop.hdfs.protocol.CachePoolEntry. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestRetryCacheWithHA.java    From big-c with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
private void listCachePools(
    HashSet<String> poolNames, int active) throws Exception {
  HashSet<String> tmpNames = (HashSet<String>)poolNames.clone();
  RemoteIterator<CachePoolEntry> pools = dfs.listCachePools();
  int poolCount = poolNames.size();
  for (int i=0; i<poolCount; i++) {
    CachePoolEntry pool = pools.next();
    String pollName = pool.getInfo().getPoolName();
    assertTrue("The pool name should be expected", tmpNames.remove(pollName));
    if (i % 2 == 0) {
      int standby = active;
      active = (standby == 0) ? 1 : 0;
      cluster.transitionToStandby(standby);
      cluster.transitionToActive(active);
      cluster.waitActive(active);
    }
  }
  assertTrue("All pools must be found", tmpNames.isEmpty());
}
 
Example #2
Source File: CacheManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public BatchedListEntries<CachePoolEntry>
    listCachePools(FSPermissionChecker pc, String prevKey) {
  assert namesystem.hasReadLock();
  final int NUM_PRE_ALLOCATED_ENTRIES = 16;
  ArrayList<CachePoolEntry> results = 
      new ArrayList<CachePoolEntry>(NUM_PRE_ALLOCATED_ENTRIES);
  SortedMap<String, CachePool> tailMap = cachePools.tailMap(prevKey, false);
  int numListed = 0;
  for (Entry<String, CachePool> cur : tailMap.entrySet()) {
    if (numListed++ >= maxListCachePoolsResponses) {
      return new BatchedListEntries<CachePoolEntry>(results, true);
    }
    results.add(cur.getValue().getEntry(pc));
  }
  return new BatchedListEntries<CachePoolEntry>(results, false);
}
 
Example #3
Source File: ClientNamenodeProtocolServerSideTranslatorPB.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public ListCachePoolsResponseProto listCachePools(RpcController controller,
    ListCachePoolsRequestProto request) throws ServiceException {
  try {
    BatchedEntries<CachePoolEntry> entries =
      server.listCachePools(request.getPrevPoolName());
    ListCachePoolsResponseProto.Builder responseBuilder =
      ListCachePoolsResponseProto.newBuilder();
    responseBuilder.setHasMore(entries.hasMore());
    for (int i=0, n=entries.size(); i<n; i++) {
      responseBuilder.addEntries(PBHelper.convert(entries.get(i)));
    }
    return responseBuilder.build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
Example #4
Source File: ClientNamenodeProtocolServerSideTranslatorPB.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public ListCachePoolsResponseProto listCachePools(RpcController controller,
    ListCachePoolsRequestProto request) throws ServiceException {
  try {
    BatchedEntries<CachePoolEntry> entries =
      server.listCachePools(request.getPrevPoolName());
    ListCachePoolsResponseProto.Builder responseBuilder =
      ListCachePoolsResponseProto.newBuilder();
    responseBuilder.setHasMore(entries.hasMore());
    for (int i=0, n=entries.size(); i<n; i++) {
      responseBuilder.addEntries(PBHelper.convert(entries.get(i)));
    }
    return responseBuilder.build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
Example #5
Source File: CacheManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
public BatchedListEntries<CachePoolEntry>
    listCachePools(FSPermissionChecker pc, String prevKey) {
  assert namesystem.hasReadLock();
  final int NUM_PRE_ALLOCATED_ENTRIES = 16;
  ArrayList<CachePoolEntry> results = 
      new ArrayList<CachePoolEntry>(NUM_PRE_ALLOCATED_ENTRIES);
  SortedMap<String, CachePool> tailMap = cachePools.tailMap(prevKey, false);
  int numListed = 0;
  for (Entry<String, CachePool> cur : tailMap.entrySet()) {
    if (numListed++ >= maxListCachePoolsResponses) {
      return new BatchedListEntries<CachePoolEntry>(results, true);
    }
    results.add(cur.getValue().getEntry(pc));
  }
  return new BatchedListEntries<CachePoolEntry>(results, false);
}
 
Example #6
Source File: CacheRegistry.java    From nnproxy with Apache License 2.0 6 votes vote down vote up
List<CachePoolEntry> getAllCachePools(UpstreamManager.Upstream upstream) throws IOException {
    String prevPool = "";
    List<CachePoolEntry> pools = new ArrayList<>();

    while (true) {
        BatchedRemoteIterator.BatchedEntries<CachePoolEntry> it = upstream.protocol.listCachePools(prevPool);
        if (it.size() == 0) {
            break;
        }
        for (int i = 0; i < it.size(); i++) {
            CachePoolEntry entry = it.get(i);
            prevPool = entry.getInfo().getPoolName();
            pools.add(entry);
        }
    }
    return pools;
}
 
Example #7
Source File: TestRetryCacheWithHA.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
private void listCachePools(
    HashSet<String> poolNames, int active) throws Exception {
  HashSet<String> tmpNames = (HashSet<String>)poolNames.clone();
  RemoteIterator<CachePoolEntry> pools = dfs.listCachePools();
  int poolCount = poolNames.size();
  for (int i=0; i<poolCount; i++) {
    CachePoolEntry pool = pools.next();
    String pollName = pool.getInfo().getPoolName();
    assertTrue("The pool name should be expected", tmpNames.remove(pollName));
    if (i % 2 == 0) {
      int standby = active;
      active = (standby == 0) ? 1 : 0;
      cluster.transitionToStandby(standby);
      cluster.transitionToActive(active);
      cluster.waitActive(active);
    }
  }
  assertTrue("All pools must be found", tmpNames.isEmpty());
}
 
Example #8
Source File: CacheRegistry.java    From nnproxy with Apache License 2.0 5 votes vote down vote up
public BatchedRemoteIterator.BatchedListEntries<CachePoolEntry> listCachePools(String prevKey) {
    final int NUM_PRE_ALLOCATED_ENTRIES = 16;
    ArrayList<CachePoolEntry> results =
            new ArrayList<CachePoolEntry>(NUM_PRE_ALLOCATED_ENTRIES);
    SortedMap<String, CachePoolEntry> tailMap = cachePools.tailMap(prevKey, false);
    int numListed = 0;
    for (Map.Entry<String, CachePoolEntry> cur : tailMap.entrySet()) {
        if (numListed++ >= maxListCachePoolsResponses) {
            return new BatchedRemoteIterator.BatchedListEntries<>(results, true);
        }
        results.add(cur.getValue());
    }
    return new BatchedRemoteIterator.BatchedListEntries<>(results, false);
}
 
Example #9
Source File: TestCacheDirectives.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout=60000)
public void testListCachePoolPermissions() throws Exception {
  final UserGroupInformation myUser = UserGroupInformation
      .createRemoteUser("myuser");
  final DistributedFileSystem myDfs = 
      (DistributedFileSystem)DFSTestUtil.getFileSystemAs(myUser, conf);
  final String poolName = "poolparty";
  dfs.addCachePool(new CachePoolInfo(poolName)
      .setMode(new FsPermission((short)0700)));
  // Should only see partial info
  RemoteIterator<CachePoolEntry> it = myDfs.listCachePools();
  CachePoolInfo info = it.next().getInfo();
  assertFalse(it.hasNext());
  assertEquals("Expected pool name", poolName, info.getPoolName());
  assertNull("Unexpected owner name", info.getOwnerName());
  assertNull("Unexpected group name", info.getGroupName());
  assertNull("Unexpected mode", info.getMode());
  assertNull("Unexpected limit", info.getLimit());
  // Modify the pool so myuser is now the owner
  final long limit = 99;
  dfs.modifyCachePool(new CachePoolInfo(poolName)
      .setOwnerName(myUser.getShortUserName())
      .setLimit(limit));
  // Should see full info
  it = myDfs.listCachePools();
  info = it.next().getInfo();
  assertFalse(it.hasNext());
  assertEquals("Expected pool name", poolName, info.getPoolName());
  assertEquals("Mismatched owner name", myUser.getShortUserName(),
      info.getOwnerName());
  assertNotNull("Expected group name", info.getGroupName());
  assertEquals("Mismatched mode", (short) 0700,
      info.getMode().toShort());
  assertEquals("Mismatched limit", limit, (long)info.getLimit());
}
 
Example #10
Source File: TestRetryCacheWithHA.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  for (int i = 0; i < CHECKTIMES; i++) {
    RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
    if (!iter.hasNext()) {
      return true;
    }
    Thread.sleep(1000);
  }
  return false;
}
 
Example #11
Source File: TestRetryCacheWithHA.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  for (int i = 0; i < CHECKTIMES; i++) {
    RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
    if (iter.hasNext() && iter.next().getInfo().getLimit() == 99) {
      return true;
    }
    Thread.sleep(1000);
  }
  return false;
}
 
Example #12
Source File: TestRetryCacheWithHA.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  for (int i = 0; i < CHECKTIMES; i++) {
    RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
    if (iter.hasNext()) {
      return true;
    }
    Thread.sleep(1000);
  }
  return false;
}
 
Example #13
Source File: ClientNamenodeProtocolTranslatorPB.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public BatchedEntries<CachePoolEntry> listCachePools(String prevKey)
    throws IOException {
  try {
    return new BatchedCachePoolEntries(
      rpcProxy.listCachePools(null,
        ListCachePoolsRequestProto.newBuilder().
          setPrevPoolName(prevKey).build()));
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #14
Source File: CachePool.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Returns a CachePoolInfo describing this CachePool based on the permissions
 * of the calling user. Unprivileged users will see only minimal descriptive
 * information about the pool.
 * 
 * @param pc Permission checker to be used to validate the user's permissions,
 *          or null
 * @return CachePoolEntry describing this CachePool
 */
public CachePoolEntry getEntry(FSPermissionChecker pc) {
  boolean hasPermission = true;
  if (pc != null) {
    try {
      pc.checkPermission(this, FsAction.READ);
    } catch (AccessControlException e) {
      hasPermission = false;
    }
  }
  return new CachePoolEntry(getInfo(hasPermission), 
      hasPermission ? getStats() : new CachePoolStats.Builder().build());
}
 
Example #15
Source File: TestCacheDirectives.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout=60000)
public void testListCachePoolPermissions() throws Exception {
  final UserGroupInformation myUser = UserGroupInformation
      .createRemoteUser("myuser");
  final DistributedFileSystem myDfs = 
      (DistributedFileSystem)DFSTestUtil.getFileSystemAs(myUser, conf);
  final String poolName = "poolparty";
  dfs.addCachePool(new CachePoolInfo(poolName)
      .setMode(new FsPermission((short)0700)));
  // Should only see partial info
  RemoteIterator<CachePoolEntry> it = myDfs.listCachePools();
  CachePoolInfo info = it.next().getInfo();
  assertFalse(it.hasNext());
  assertEquals("Expected pool name", poolName, info.getPoolName());
  assertNull("Unexpected owner name", info.getOwnerName());
  assertNull("Unexpected group name", info.getGroupName());
  assertNull("Unexpected mode", info.getMode());
  assertNull("Unexpected limit", info.getLimit());
  // Modify the pool so myuser is now the owner
  final long limit = 99;
  dfs.modifyCachePool(new CachePoolInfo(poolName)
      .setOwnerName(myUser.getShortUserName())
      .setLimit(limit));
  // Should see full info
  it = myDfs.listCachePools();
  info = it.next().getInfo();
  assertFalse(it.hasNext());
  assertEquals("Expected pool name", poolName, info.getPoolName());
  assertEquals("Mismatched owner name", myUser.getShortUserName(),
      info.getOwnerName());
  assertNotNull("Expected group name", info.getGroupName());
  assertEquals("Mismatched mode", (short) 0700,
      info.getMode().toShort());
  assertEquals("Mismatched limit", limit, (long)info.getLimit());
}
 
Example #16
Source File: TestRetryCacheWithHA.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  for (int i = 0; i < CHECKTIMES; i++) {
    RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
    if (iter.hasNext() && iter.next().getInfo().getLimit() == 99) {
      return true;
    }
    Thread.sleep(1000);
  }
  return false;
}
 
Example #17
Source File: TestRetryCacheWithHA.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  for (int i = 0; i < CHECKTIMES; i++) {
    RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
    if (iter.hasNext()) {
      return true;
    }
    Thread.sleep(1000);
  }
  return false;
}
 
Example #18
Source File: CachePool.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Returns a CachePoolInfo describing this CachePool based on the permissions
 * of the calling user. Unprivileged users will see only minimal descriptive
 * information about the pool.
 * 
 * @param pc Permission checker to be used to validate the user's permissions,
 *          or null
 * @return CachePoolEntry describing this CachePool
 */
public CachePoolEntry getEntry(FSPermissionChecker pc) {
  boolean hasPermission = true;
  if (pc != null) {
    try {
      pc.checkPermission(this, FsAction.READ);
    } catch (AccessControlException e) {
      hasPermission = false;
    }
  }
  return new CachePoolEntry(getInfo(hasPermission), 
      hasPermission ? getStats() : new CachePoolStats.Builder().build());
}
 
Example #19
Source File: ClientNamenodeProtocolTranslatorPB.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public BatchedEntries<CachePoolEntry> listCachePools(String prevKey)
    throws IOException {
  try {
    return new BatchedCachePoolEntries(
      rpcProxy.listCachePools(null,
        ListCachePoolsRequestProto.newBuilder().
          setPrevPoolName(prevKey).build()));
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #20
Source File: TestRetryCacheWithHA.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  for (int i = 0; i < CHECKTIMES; i++) {
    RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
    if (!iter.hasNext()) {
      return true;
    }
    Thread.sleep(1000);
  }
  return false;
}
 
Example #21
Source File: ClientNamenodeProtocolTranslatorPB.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
public CachePoolEntry get(int i) {
  CachePoolEntryProto elem = proto.getEntries(i);
  return PBHelper.convert(elem);
}
 
Example #22
Source File: FSNDNCacheOp.java    From hadoop with Apache License 2.0 4 votes vote down vote up
static BatchedListEntries<CachePoolEntry> listCachePools(
    FSNamesystem fsn, CacheManager cacheManager, String prevKey)
    throws IOException {
  final FSPermissionChecker pc = getFsPermissionChecker(fsn);
  return cacheManager.listCachePools(pc, prevKey);
}
 
Example #23
Source File: NameNodeRpcServer.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override // ClientProtocol
public BatchedEntries<CachePoolEntry> listCachePools(String prevKey)
    throws IOException {
  checkNNStartup();
  return namesystem.listCachePools(prevKey != null ? prevKey : "");
}
 
Example #24
Source File: CacheAdmin.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public int run(Configuration conf, List<String> args) throws IOException {
  String name = StringUtils.popFirstNonOption(args);
  final boolean printStats = StringUtils.popOption("-stats", args);
  if (!args.isEmpty()) {
    System.err.print("Can't understand arguments: " +
      Joiner.on(" ").join(args) + "\n");
    System.err.println("Usage is " + getShortUsage());
    return 1;
  }
  DistributedFileSystem dfs = AdminHelper.getDFS(conf);
  TableListing.Builder builder = new TableListing.Builder().
      addField("NAME", Justification.LEFT).
      addField("OWNER", Justification.LEFT).
      addField("GROUP", Justification.LEFT).
      addField("MODE", Justification.LEFT).
      addField("LIMIT", Justification.RIGHT).
      addField("MAXTTL", Justification.RIGHT);
  if (printStats) {
    builder.
        addField("BYTES_NEEDED", Justification.RIGHT).
        addField("BYTES_CACHED", Justification.RIGHT).
        addField("BYTES_OVERLIMIT", Justification.RIGHT).
        addField("FILES_NEEDED", Justification.RIGHT).
        addField("FILES_CACHED", Justification.RIGHT);
  }
  TableListing listing = builder.build();
  int numResults = 0;
  try {
    RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
    while (iter.hasNext()) {
      CachePoolEntry entry = iter.next();
      CachePoolInfo info = entry.getInfo();
      LinkedList<String> row = new LinkedList<String>();
      if (name == null || info.getPoolName().equals(name)) {
        row.add(info.getPoolName());
        row.add(info.getOwnerName());
        row.add(info.getGroupName());
        row.add(info.getMode() != null ? info.getMode().toString() : null);
        Long limit = info.getLimit();
        String limitString;
        if (limit != null && limit.equals(CachePoolInfo.LIMIT_UNLIMITED)) {
          limitString = "unlimited";
        } else {
          limitString = "" + limit;
        }
        row.add(limitString);
        Long maxTtl = info.getMaxRelativeExpiryMs();
        String maxTtlString = null;

        if (maxTtl != null) {
          if (maxTtl == CachePoolInfo.RELATIVE_EXPIRY_NEVER) {
            maxTtlString  = "never";
          } else {
            maxTtlString = DFSUtil.durationToString(maxTtl);
          }
        }
        row.add(maxTtlString);
        if (printStats) {
          CachePoolStats stats = entry.getStats();
          row.add(Long.toString(stats.getBytesNeeded()));
          row.add(Long.toString(stats.getBytesCached()));
          row.add(Long.toString(stats.getBytesOverlimit()));
          row.add(Long.toString(stats.getFilesNeeded()));
          row.add(Long.toString(stats.getFilesCached()));
        }
        listing.addRow(row.toArray(new String[row.size()]));
        ++numResults;
        if (name != null) {
          break;
        }
      }
    }
  } catch (IOException e) {
    System.err.println(AdminHelper.prettifyException(e));
    return 2;
  }
  System.out.print(String.format("Found %d result%s.%n", numResults,
      (numResults == 1 ? "" : "s")));
  if (numResults > 0) { 
    System.out.print(listing);
  }
  // If list pools succeed, we return 0 (success exit code)
  return 0;
}
 
Example #25
Source File: PBHelper.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public static CachePoolEntryProto convert(CachePoolEntry entry) {
  CachePoolEntryProto.Builder builder = CachePoolEntryProto.newBuilder();
  builder.setInfo(PBHelper.convert(entry.getInfo()));
  builder.setStats(PBHelper.convert(entry.getStats()));
  return builder.build();
}
 
Example #26
Source File: ClientNamenodeProtocolTranslatorPB.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public CachePoolEntry get(int i) {
  CachePoolEntryProto elem = proto.getEntries(i);
  return PBHelper.convert(elem);
}
 
Example #27
Source File: PBHelper.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public static CachePoolEntry convert (CachePoolEntryProto proto) {
  CachePoolInfo info = PBHelper.convert(proto.getInfo());
  CachePoolStats stats = PBHelper.convert(proto.getStats());
  return new CachePoolEntry(info, stats);
}
 
Example #28
Source File: PBHelper.java    From big-c with Apache License 2.0 4 votes vote down vote up
public static CachePoolEntry convert (CachePoolEntryProto proto) {
  CachePoolInfo info = PBHelper.convert(proto.getInfo());
  CachePoolStats stats = PBHelper.convert(proto.getStats());
  return new CachePoolEntry(info, stats);
}
 
Example #29
Source File: PBHelper.java    From big-c with Apache License 2.0 4 votes vote down vote up
public static CachePoolEntryProto convert(CachePoolEntry entry) {
  CachePoolEntryProto.Builder builder = CachePoolEntryProto.newBuilder();
  builder.setInfo(PBHelper.convert(entry.getInfo()));
  builder.setStats(PBHelper.convert(entry.getStats()));
  return builder.build();
}
 
Example #30
Source File: NameNodeRpcServer.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override // ClientProtocol
public BatchedEntries<CachePoolEntry> listCachePools(String prevKey)
    throws IOException {
  checkNNStartup();
  return namesystem.listCachePools(prevKey != null ? prevKey : "");
}