Java Code Examples for org.apache.hadoop.hdfs.protocol.LocatedBlocks

The following examples show how to use org.apache.hadoop.hdfs.protocol.LocatedBlocks. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: TestRetryCacheWithHA.java    License: Apache License 2.0 7 votes vote down vote up
@Override
void prepare() throws Exception {
  final Path filePath = new Path(file);
  DFSTestUtil.createFile(dfs, filePath, BlockSize, DataNodes, 0);
  // append to the file and leave the last block under construction
  out = this.client.append(file, BlockSize, EnumSet.of(CreateFlag.APPEND),
      null, null);
  byte[] appendContent = new byte[100];
  new Random().nextBytes(appendContent);
  out.write(appendContent);
  ((HdfsDataOutputStream) out).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  
  LocatedBlocks blks = dfs.getClient()
      .getLocatedBlocks(file, BlockSize + 1);
  assertEquals(1, blks.getLocatedBlocks().size());
  nodes = blks.get(0).getLocations();
  oldBlock = blks.get(0).getBlock();
  
  LocatedBlock newLbk = client.getNamenode().updateBlockForPipeline(
      oldBlock, client.getClientName());
  newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(),
      oldBlock.getBlockId(), oldBlock.getNumBytes(), 
      newLbk.getBlock().getGenerationStamp());
}
 
Example 2
Source Project: RDFS   Source File: FileFixer.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Returns the corrupt blocks in a file.
 **/
List<LocatedBlock> corruptBlocksInFile(
  DistributedFileSystem fs, String uriPath, FileStatus stat)
throws IOException {
  List<LocatedBlock> corrupt = new LinkedList<LocatedBlock>();
  LocatedBlocks locatedBlocks = fs.getClient().namenode.getBlockLocations(
    uriPath, 0, stat.getLen());
  for (LocatedBlock b: locatedBlocks.getLocatedBlocks()) {
    if (b.isCorrupt() || 
       (b.getLocations().length == 0 && b.getBlockSize() > 0)) {
      LOG.info("Adding bad block for file " + uriPath);
      corrupt.add(b);
    }
  }
  return corrupt;
}
 
Example 3
@Override
public GetBlockLocationsResponseProto getBlockLocations(
    RpcController controller, GetBlockLocationsRequestProto req)
    throws ServiceException {
  try {
    LocatedBlocks b = server.getBlockLocations(req.getSrc(), req.getOffset(),
        req.getLength());
    Builder builder = GetBlockLocationsResponseProto
        .newBuilder();
    if (b != null) {
      builder.setLocations(PBHelper.convert(b)).build();
    }
    return builder.build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
Example 4
Source Project: hadoop   Source File: ClientNamenodeProtocolTranslatorPB.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public LocatedBlocks getBlockLocations(String src, long offset, long length)
    throws AccessControlException, FileNotFoundException,
    UnresolvedLinkException, IOException {
  GetBlockLocationsRequestProto req = GetBlockLocationsRequestProto
      .newBuilder()
      .setSrc(src)
      .setOffset(offset)
      .setLength(length)
      .build();
  try {
    GetBlockLocationsResponseProto resp = rpcProxy.getBlockLocations(null,
        req);
    return resp.hasLocations() ? 
      PBHelper.convert(resp.getLocations()) : null;
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example 5
Source Project: big-c   Source File: TestLazyPersistFiles.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Delete lazy-persist file that has been persisted to disk
 * Both memory blocks and disk blocks are deleted.
 * @throws IOException
 * @throws InterruptedException
 */
@Test
public void testDeleteAfterPersist()
  throws Exception {
  startUpCluster(true, -1);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeTestFile(path, BLOCK_SIZE, true);
  LocatedBlocks locatedBlocks = ensureFileReplicasOnStorageType(path, RAM_DISK);

  // Sleep for a short time to allow the lazy writer thread to do its job
  Thread.sleep(6 * LAZY_WRITER_INTERVAL_SEC * 1000);

  // Delete after persist
  client.delete(path.toString(), false);
  Assert.assertFalse(fs.exists(path));

  assertThat(verifyDeletedBlocks(locatedBlocks), is(true));

  verifyRamDiskJMXMetric("RamDiskBlocksLazyPersisted", 1);
  verifyRamDiskJMXMetric("RamDiskBytesLazyPersisted", BLOCK_SIZE);
}
 
Example 6
Source Project: hbase   Source File: TestBlockReorderMultiBlocks.java    License: Apache License 2.0 6 votes vote down vote up
private void testFromDFS(DistributedFileSystem dfs, String src, int repCount, String localhost)
    throws Exception {
  // Multiple times as the order is random
  for (int i = 0; i < 10; i++) {
    LocatedBlocks l;
    // The NN gets the block list asynchronously, so we may need multiple tries to get the list
    final long max = System.currentTimeMillis() + 10000;
    boolean done;
    do {
      Assert.assertTrue("Can't get enouth replica.", System.currentTimeMillis() < max);
      l = getNamenode(dfs.getClient()).getBlockLocations(src, 0, 1);
      Assert.assertNotNull("Can't get block locations for " + src, l);
      Assert.assertNotNull(l.getLocatedBlocks());
      Assert.assertTrue(l.getLocatedBlocks().size() > 0);

      done = true;
      for (int y = 0; y < l.getLocatedBlocks().size() && done; y++) {
        done = (l.get(y).getLocations().length == repCount);
      }
    } while (!done);

    for (int y = 0; y < l.getLocatedBlocks().size() && done; y++) {
      Assert.assertEquals(localhost, l.get(y).getLocations()[repCount - 1].getHostName());
    }
  }
}
 
Example 7
Source Project: hadoop-gpu   Source File: FileDataServlet.java    License: Apache License 2.0 6 votes vote down vote up
/** Select a datanode to service this request.
 * Currently, this looks at no more than the first five blocks of a file,
 * selecting a datanode randomly from the most represented.
 */
private static DatanodeID pickSrcDatanode(FileStatus i,
    ClientProtocol nnproxy) throws IOException {
  // a race condition can happen by initializing a static member this way.
  // A proper fix should make JspHelper a singleton. Since it doesn't affect 
  // correctness, we leave it as is for now.
  if (jspHelper == null)
    jspHelper = new JspHelper();
  final LocatedBlocks blks = nnproxy.getBlockLocations(
      i.getPath().toUri().getPath(), 0, 1);
  if (i.getLen() == 0 || blks.getLocatedBlocks().size() <= 0) {
    // pick a random datanode
    return jspHelper.randomNode();
  }
  return jspHelper.bestNode(blks.get(0));
}
 
Example 8
@Override
public GetBlockLocationsResponseProto getBlockLocations(
    RpcController controller, GetBlockLocationsRequestProto req)
    throws ServiceException {
  try {
    LocatedBlocks b = server.getBlockLocations(req.getSrc(), req.getOffset(),
        req.getLength());
    Builder builder = GetBlockLocationsResponseProto
        .newBuilder();
    if (b != null) {
      builder.setLocations(PBHelper.convert(b)).build();
    }
    return builder.build();
  } catch (IOException e) {
    throw new ServiceException(e);
  }
}
 
Example 9
Source Project: hadoop   Source File: TestLazyPersistFiles.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Delete lazy-persist file that has been persisted to disk
 * Both memory blocks and disk blocks are deleted.
 * @throws IOException
 * @throws InterruptedException
 */
@Test
public void testDeleteAfterPersist()
  throws Exception {
  startUpCluster(true, -1);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeTestFile(path, BLOCK_SIZE, true);
  LocatedBlocks locatedBlocks = ensureFileReplicasOnStorageType(path, RAM_DISK);

  // Sleep for a short time to allow the lazy writer thread to do its job
  Thread.sleep(6 * LAZY_WRITER_INTERVAL_SEC * 1000);

  // Delete after persist
  client.delete(path.toString(), false);
  Assert.assertFalse(fs.exists(path));

  assertThat(verifyDeletedBlocks(locatedBlocks), is(true));

  verifyRamDiskJMXMetric("RamDiskBlocksLazyPersisted", 1);
  verifyRamDiskJMXMetric("RamDiskBytesLazyPersisted", BLOCK_SIZE);
}
 
Example 10
Source Project: hadoop   Source File: TestDFSClientRetries.java    License: Apache License 2.0 6 votes vote down vote up
private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) {
  LocatedBlock goodLocatedBlock = goodBlockList.get(0);
  LocatedBlock badLocatedBlock = new LocatedBlock(
    goodLocatedBlock.getBlock(),
    new DatanodeInfo[] {
      DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234)
    },
    goodLocatedBlock.getStartOffset(),
    false);


  List<LocatedBlock> badBlocks = new ArrayList<LocatedBlock>();
  badBlocks.add(badLocatedBlock);
  return new LocatedBlocks(goodBlockList.getFileLength(), false,
                           badBlocks, null, true,
                           null);
}
 
Example 11
Source Project: RDFS   Source File: DistributedAvatarFileSystem.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public LocatedBlock addBlock(final String src, final String clientName,
    final DatanodeInfo[] excludedNodes) throws IOException {
  return (new MutableFSCaller<LocatedBlock>() {
    @Override
    LocatedBlock call(int retries) throws IOException {
      if (retries > 0) {
        FileStatus info = namenode.getFileInfo(src);
        if (info != null) {
          LocatedBlocks blocks = namenode.getBlockLocations(src, 0, info
              .getLen());
          // If atleast one block exists.
          if (blocks.locatedBlockCount() > 0) {
            LocatedBlock last = blocks.get(blocks.locatedBlockCount() - 1);
            if (last.getBlockSize() == 0) {
              // This one has not been written to
              namenode.abandonBlock(last.getBlock(), src, clientName);
            }
          }
        }
      }
      return namenode.addBlock(src, clientName, excludedNodes);
    }

  }).callFS();
}
 
Example 12
Source Project: RDFS   Source File: DFSClient.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Convert an HdfsFileStatus and its block locations to a LocatedFileStatus
 * @param stat an HdfsFileStatus
 * @param locs the file's block locations
 * @param src parent path in string representation
 * @return a FileStatus object
 */
private static LocatedFileStatus toLocatedFileStatus(
    HdfsFileStatus stat, LocatedBlocks locs, String src) {
  if (stat == null) {
    return null;
  }
  return new LocatedFileStatus(stat.getLen(),
      stat.isDir(), stat.getReplication(),
      stat.getBlockSize(), stat.getModificationTime(),
      stat.getAccessTime(),
      stat.getPermission(), stat.getOwner(), stat.getGroup(),
      stat.getFullPath(new Path(src)), // full path
      DFSUtil.locatedBlocks2Locations(locs));
}
 
Example 13
Source Project: big-c   Source File: TestStandbyIsHot.java    License: Apache License 2.0 5 votes vote down vote up
static void waitForBlockLocations(final MiniDFSCluster cluster,
    final NameNode nn,
    final String path, final int expectedReplicas)
    throws Exception {
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    
    @Override
    public Boolean get() {
      try {
        LocatedBlocks locs = NameNodeAdapter.getBlockLocations(nn, path, 0, 1000);
        DatanodeInfo[] dnis = locs.getLastLocatedBlock().getLocations();
        for (DatanodeInfo dni : dnis) {
          Assert.assertNotNull(dni);
        }
        int numReplicas = dnis.length;
        
        LOG.info("Got " + numReplicas + " locs: " + locs);
        if (numReplicas > expectedReplicas) {
          cluster.triggerDeletionReports();
        }
        cluster.triggerHeartbeats();
        return numReplicas == expectedReplicas;
      } catch (IOException e) {
        LOG.warn("No block locations yet: " + e.getMessage());
        return false;
      }
    }
  }, 500, 20000);
  
}
 
Example 14
Source Project: hadoop   Source File: DFSClient.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * @see ClientProtocol#getBlockLocations(String, long, long)
 */
static LocatedBlocks callGetBlockLocations(ClientProtocol namenode,
    String src, long start, long length) 
    throws IOException {
  try {
    return namenode.getBlockLocations(src, start, length);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileNotFoundException.class,
                                   UnresolvedPathException.class);
  }
}
 
Example 15
Source Project: RDFS   Source File: TestBlockMissingException.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test DFS Raid
 */
public void testBlockMissingException() throws Exception {
  LOG.info("Test testBlockMissingException started.");
  long blockSize = 1024L;
  int numBlocks = 4;
  conf = new Configuration();
  try {
    dfs = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
    dfs.waitActive();
    fileSys = (DistributedFileSystem)dfs.getFileSystem();
    Path file1 = new Path("/user/dhruba/raidtest/file1");
    createOldFile(fileSys, file1, 1, numBlocks, blockSize);

    // extract block locations from File system. Wait till file is closed.
    LocatedBlocks locations = null;
    locations = fileSys.dfs.namenode.getBlockLocations(file1.toString(),
                                                           0, numBlocks * blockSize);
    // remove block of file
    LOG.info("Remove first block of file");
    corruptBlock(file1, locations.get(0).getBlock());

    // validate that the system throws BlockMissingException
    validateFile(fileSys, file1);
  } finally {
    if (fileSys != null) fileSys.close();
    if (dfs != null) dfs.shutdown();
  }
  LOG.info("Test testBlockMissingException completed.");
}
 
Example 16
Source Project: hadoop   Source File: FileDataServlet.java    License: Apache License 2.0 5 votes vote down vote up
/** Create a redirection URL */
private URL createRedirectURL(String path, String encodedPath, HdfsFileStatus status, 
    UserGroupInformation ugi, ClientProtocol nnproxy, HttpServletRequest request, String dt)
    throws IOException {
  String scheme = request.getScheme();
  final LocatedBlocks blks = nnproxy.getBlockLocations(
      status.getFullPath(new Path(path)).toUri().getPath(), 0, 1);
  final Configuration conf = NameNodeHttpServer.getConfFromContext(
      getServletContext());
  final DatanodeID host = pickSrcDatanode(blks, status, conf);
  final String hostname;
  if (host instanceof DatanodeInfo) {
    hostname = host.getHostName();
  } else {
    hostname = host.getIpAddr();
  }

  int port = "https".equals(scheme) ? host.getInfoSecurePort() : host
      .getInfoPort();

  String dtParam = "";
  if (dt != null) {
    dtParam = JspHelper.getDelegationTokenUrlParam(dt);
  }

  // Add namenode address to the url params
  NameNode nn = NameNodeHttpServer.getNameNodeFromContext(
      getServletContext());
  String addr = nn.getNameNodeAddressHostPortString();
  String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
  
  return new URL(scheme, hostname, port,
      "/streamFile" + encodedPath + '?' +
      "ugi=" + ServletUtil.encodeQueryValue(ugi.getShortUserName()) +
      dtParam + addrParam);
}
 
Example 17
Source Project: hadoop   Source File: FileDataServlet.java    License: Apache License 2.0 5 votes vote down vote up
/** Select a datanode to service this request.
 * Currently, this looks at no more than the first five blocks of a file,
 * selecting a datanode randomly from the most represented.
 * @param conf 
 */
private DatanodeID pickSrcDatanode(LocatedBlocks blks, HdfsFileStatus i,
    Configuration conf) throws IOException {
  if (i.getLen() == 0 || blks.getLocatedBlocks().size() <= 0) {
    // pick a random datanode
    NameNode nn = NameNodeHttpServer.getNameNodeFromContext(
        getServletContext());
    return NamenodeJspHelper.getRandomDatanode(nn);
  }
  return JspHelper.bestNode(blks, conf);
}
 
Example 18
Source Project: hadoop   Source File: PBHelper.java    License: Apache License 2.0 5 votes vote down vote up
public static LocatedBlocks convert(LocatedBlocksProto lb) {
  return new LocatedBlocks(
      lb.getFileLength(), lb.getUnderConstruction(),
      PBHelper.convertLocatedBlock(lb.getBlocksList()),
      lb.hasLastBlock() ? PBHelper.convert(lb.getLastBlock()) : null,
      lb.getIsLastBlockComplete(),
      lb.hasFileEncryptionInfo() ? convert(lb.getFileEncryptionInfo()) :
          null);
}
 
Example 19
Source Project: hadoop   Source File: PBHelper.java    License: Apache License 2.0 5 votes vote down vote up
public static HdfsFileStatusProto convert(HdfsFileStatus fs) {
  if (fs == null)
    return null;
  FileType fType = FileType.IS_FILE;
  if (fs.isDir()) {
    fType = FileType.IS_DIR;
  } else if (fs.isSymlink()) {
    fType = FileType.IS_SYMLINK;
  }

  HdfsFileStatusProto.Builder builder = 
   HdfsFileStatusProto.newBuilder().
    setLength(fs.getLen()).
    setFileType(fType).
    setBlockReplication(fs.getReplication()).
    setBlocksize(fs.getBlockSize()).
    setModificationTime(fs.getModificationTime()).
    setAccessTime(fs.getAccessTime()).
    setPermission(PBHelper.convert(fs.getPermission())).
    setOwner(fs.getOwner()).
    setGroup(fs.getGroup()).
    setFileId(fs.getFileId()).
    setChildrenNum(fs.getChildrenNum()).
    setPath(ByteString.copyFrom(fs.getLocalNameInBytes())).
    setStoragePolicy(fs.getStoragePolicy());
  if (fs.isSymlink())  {
    builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes()));
  }
  if (fs.getFileEncryptionInfo() != null) {
    builder.setFileEncryptionInfo(convert(fs.getFileEncryptionInfo()));
  }
  if (fs instanceof HdfsLocatedFileStatus) {
    final HdfsLocatedFileStatus lfs = (HdfsLocatedFileStatus) fs;
    LocatedBlocks locations = lfs.getBlockLocations();
    if (locations != null) {
      builder.setLocations(PBHelper.convert(locations));
    }
  }
  return builder.build();
}
 
Example 20
Source Project: big-c   Source File: TestDataNodeRollingUpgrade.java    License: Apache License 2.0 5 votes vote down vote up
/** Test assumes that the file has a single block */
private File getBlockForFile(Path path, boolean exists) throws IOException {
  LocatedBlocks blocks = nn.getRpcServer().getBlockLocations(path.toString(),
      0, Long.MAX_VALUE);
  assertEquals("The test helper functions assume that each file has a single block",
               1, blocks.getLocatedBlocks().size());
  ExtendedBlock block = blocks.getLocatedBlocks().get(0).getBlock();
  BlockLocalPathInfo bInfo = dn0.getFSDataset().getBlockLocalPathInfo(block);
  File blockFile = new File(bInfo.getBlockPath());
  assertEquals(exists, blockFile.exists());
  return blockFile;
}
 
Example 21
Source Project: RDFS   Source File: TestRaidDfs.java    License: Apache License 2.0 5 votes vote down vote up
private void corruptBlockAndValidate(Path srcFile, Path destPath,
  int[] listBlockNumToCorrupt, long blockSize, int numBlocks,
  MiniDFSCluster cluster)
throws IOException, InterruptedException {
  RaidDFSUtil.cleanUp(fileSys, srcFile.getParent());
  fileSys.mkdirs(srcFile.getParent());
  int repl = 1;
  long crc = createTestFilePartialLastBlock(fileSys, srcFile, repl,
                numBlocks, blockSize);
  long length = fileSys.getFileStatus(srcFile).getLen();

  if (codec.isDirRaid) {
    RaidNode.doRaid(conf, fileSys.getFileStatus(srcFile.getParent()),
    destPath, codec, new RaidNode.Statistics(), RaidUtils.NULL_PROGRESSABLE,
    false, repl, repl);
  } else {
    RaidNode.doRaid(conf, fileSys.getFileStatus(srcFile),
    destPath, codec, new RaidNode.Statistics(), RaidUtils.NULL_PROGRESSABLE,
    false, repl, repl);
  }

  // Delete first block of file
  for (int blockNumToCorrupt : listBlockNumToCorrupt) {
    LOG.info("Corrupt block " + blockNumToCorrupt + " of file " + srcFile);
    LocatedBlocks locations = getBlockLocations(srcFile);
    corruptBlock(srcFile, locations.get(blockNumToCorrupt).getBlock(),
          NUM_DATANODES, true, cluster);
  }

  // Validate
  DistributedRaidFileSystem raidfs = getRaidFS();
  assertTrue(validateFile(raidfs, srcFile, length, crc));
}
 
Example 22
Source Project: big-c   Source File: NameNodeRpcServer.java    License: Apache License 2.0 5 votes vote down vote up
@Override // ClientProtocol
public LocatedBlocks getBlockLocations(String src, 
                                        long offset, 
                                        long length) 
    throws IOException {
  checkNNStartup();
  metrics.incrGetBlockLocations();
  return namesystem.getBlockLocations(getClientMachine(), 
                                      src, offset, length);
}
 
Example 23
Source Project: hadoop-gpu   Source File: TestInterDatanodeProtocol.java    License: Apache License 2.0 5 votes vote down vote up
public static LocatedBlock getLastLocatedBlock(
    ClientProtocol namenode, String src
) throws IOException {
  //get block info for the last block
  LocatedBlocks locations = namenode.getBlockLocations(src, 0, Long.MAX_VALUE);
  List<LocatedBlock> blocks = locations.getLocatedBlocks();
  DataNode.LOG.info("blocks.size()=" + blocks.size());
  assertTrue(blocks.size() > 0);

  return blocks.get(blocks.size() - 1);
}
 
Example 24
Source Project: hadoop   Source File: TestFileTruncate.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * The last block is truncated at mid. (non copy-on-truncate)
 * shutdown the datanodes immediately after truncate.
 */
@Test(timeout=60000)
public void testTruncateWithDataNodesShutdownImmediately() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path parent = new Path("/test");
  final Path p = new Path(parent, "testTruncateWithDataNodesShutdownImmediately");

  writeContents(contents, startingFileSize, p);

  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  boolean isReady = fs.truncate(p, newLength);
  assertFalse(isReady);

  cluster.shutdownDataNodes();
  cluster.setDataNodesDead();
  try {
    for(int i = 0; i < SUCCESS_ATTEMPTS && cluster.isDataNodeUp(); i++) {
      Thread.sleep(SLEEP);
    }
    assertFalse("All DataNodes should be down.", cluster.isDataNodeUp());
    LocatedBlocks blocks = getLocatedBlocks(p);
    assertTrue(blocks.isUnderConstruction());
  } finally {
    cluster.startDataNodes(conf, DATANODE_NUM, true,
        StartupOption.REGULAR, null);
    cluster.waitActive();
  }
  checkBlockRecovery(p);

  fs.delete(parent, true);
}
 
Example 25
Source Project: RDFS   Source File: TestAvatarDataNodeRBW.java    License: Apache License 2.0 5 votes vote down vote up
private boolean blocksReceived(int nBlocks, String fileName) throws IOException {
  AvatarNode avatar = cluster.getPrimaryAvatar(0).avatar;
  LocatedBlocks lbks = avatar.namesystem.getBlockLocations(fileName, 0,
      Long.MAX_VALUE);
  int blocks = lbks.locatedBlockCount();
  if (blocks != nBlocks)
    return false;
  for (LocatedBlock lbk : lbks.getLocatedBlocks()) {
    DatanodeInfo[] locs = lbk.getLocations();
    if (locs == null || locs.length == 0) {
      return false;
    }
  }
  return true;
}
 
Example 26
private void waitForBlockReplication(String filename, 
                                     ClientProtocol namenode,
                                     int expected, long maxWaitSec) 
                                     throws IOException {
  long start = System.currentTimeMillis();
  
  //wait for all the blocks to be replicated;
  LOG.info("Checking for block replication for " + filename);
  
  LocatedBlocks blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE);
  assertEquals(numBlocks, blocks.locatedBlockCount());
  
  for (int i = 0; i < numBlocks; ++i) {
    LOG.info("Checking for block:" + (i+1));
    while (true) { // Loop to check for block i (usually when 0 is done all will be done
      blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE);
      assertEquals(numBlocks, blocks.locatedBlockCount());
      LocatedBlock block = blocks.get(i);
      int actual = block.getLocations().length;
      if ( actual == expected ) {
        LOG.info("Got enough replicas for " + (i+1) + "th block " + block.getBlock() +
            ", got " + actual + ".");
        break;
      }
      LOG.info("Not enough replicas for " + (i+1) + "th block " + block.getBlock() +
                             " yet. Expecting " + expected + ", got " + 
                             actual + ".");
    
      if (maxWaitSec > 0 && 
          (System.currentTimeMillis() - start) > (maxWaitSec * 1000)) {
        throw new IOException("Timedout while waiting for all blocks to " +
                              " be replicated for " + filename);
      }
    
      try {
        Thread.sleep(500);
      } catch (InterruptedException ignored) {}
    }
  }
}
 
Example 27
Source Project: hadoop   Source File: LazyPersistTestCase.java    License: Apache License 2.0 5 votes vote down vote up
protected final boolean verifyDeletedBlocks(LocatedBlocks locatedBlocks)
    throws IOException, InterruptedException {

  LOG.info("Verifying replica has no saved copy after deletion.");
  triggerBlockReport();

  while(
    DataNodeTestUtils.getPendingAsyncDeletions(cluster.getDataNodes().get(0))
      > 0L){
    Thread.sleep(1000);
  }

  final String bpid = cluster.getNamesystem().getBlockPoolId();
  List<? extends FsVolumeSpi> volumes =
    cluster.getDataNodes().get(0).getFSDataset().getVolumes();

  // Make sure deleted replica does not have a copy on either finalized dir of
  // transient volume or finalized dir of non-transient volume
  for (FsVolumeSpi v : volumes) {
    FsVolumeImpl volume = (FsVolumeImpl) v;
    File targetDir = (v.isTransientStorage()) ?
        volume.getBlockPoolSlice(bpid).getFinalizedDir() :
        volume.getBlockPoolSlice(bpid).getLazypersistDir();
    if (verifyBlockDeletedFromDir(targetDir, locatedBlocks) == false) {
      return false;
    }
  }
  return true;
}
 
Example 28
Source Project: hadoop   Source File: TestLazyPersistFiles.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * File partially fit in RamDisk after eviction.
 * RamDisk can fit 2 blocks. Write a file with 5 blocks.
 * Expect 2 or less blocks are on RamDisk and 3 or more on disk.
 * @throws IOException
 */
@Test
public void testFallbackToDiskPartial()
  throws IOException, InterruptedException {
  startUpCluster(true, 2);
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path path = new Path("/" + METHOD_NAME + ".dat");

  makeTestFile(path, BLOCK_SIZE * 5, true);

  // Sleep for a short time to allow the lazy writer thread to do its job
  Thread.sleep(6 * LAZY_WRITER_INTERVAL_SEC * 1000);

  triggerBlockReport();

  int numBlocksOnRamDisk = 0;
  int numBlocksOnDisk = 0;

  long fileLength = client.getFileInfo(path.toString()).getLen();
  LocatedBlocks locatedBlocks =
    client.getLocatedBlocks(path.toString(), 0, fileLength);
  for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
    if (locatedBlock.getStorageTypes()[0] == RAM_DISK) {
      numBlocksOnRamDisk++;
    } else if (locatedBlock.getStorageTypes()[0] == DEFAULT) {
      numBlocksOnDisk++;
    }
  }

  // Since eviction is asynchronous, depending on the timing of eviction
  // wrt writes, we may get 2 or less blocks on RAM disk.
  assert(numBlocksOnRamDisk <= 2);
  assert(numBlocksOnDisk >= 3);
}
 
Example 29
Source Project: big-c   Source File: DFSClient.java    License: Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
public LocatedBlocks getLocatedBlocks(String src, long start, long length)
    throws IOException {
  TraceScope scope = getPathTraceScope("getBlockLocations", src);
  try {
    return callGetBlockLocations(namenode, src, start, length);
  } finally {
    scope.close();
  }
}
 
Example 30
Source Project: hadoop   Source File: TestFileCreation.java    License: Apache License 2.0 5 votes vote down vote up
private void assertBlocks(BlockManager bm, LocatedBlocks lbs, 
    boolean exist) {
  for (LocatedBlock locatedBlock : lbs.getLocatedBlocks()) {
    if (exist) {
      assertTrue(bm.getStoredBlock(locatedBlock.getBlock().
          getLocalBlock()) != null);
    } else {
      assertTrue(bm.getStoredBlock(locatedBlock.getBlock().
          getLocalBlock()) == null);
    }
  }
}