Java Code Examples for org.apache.hadoop.hdfs.protocol.LocatedBlocks#locatedBlockCount()

The following examples show how to use org.apache.hadoop.hdfs.protocol.LocatedBlocks#locatedBlockCount() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestAvatarDataNodeRBW.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private void verifyResults(int blocksBefore, String fileName)
  throws IOException {
  // Verify we have RBWs after restart.
  AvatarNode avatarAfter = cluster.getPrimaryAvatar(0).avatar;
  LocatedBlocks lbks = avatarAfter.namesystem
      .getBlockLocations(fileName, 0,
      Long.MAX_VALUE);
  long blocksAfter = lbks.locatedBlockCount();

  System.out.println("blocksBefore : " + blocksBefore + " blocksAfter : "
      + blocksAfter);

  assertEquals(blocksBefore, blocksAfter);
  for (LocatedBlock lbk : lbks.getLocatedBlocks()) {
    DatanodeInfo[] locs = lbk.getLocations();
    assertNotNull(locs);
    assertTrue(locs.length != 0);
  }
}
 
Example 2
Source File: TestRaidShellFsck_CorruptCounter.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * removes a file block in the specified stripe
 */
private void removeFileBlock(Path filePath, int stripe, int blockInStripe)
  throws IOException {
  LocatedBlocks fileBlocks = dfs.getClient().namenode.
    getBlockLocations(filePath.toString(), 0, FILE_BLOCKS * BLOCK_SIZE);
  if (fileBlocks.locatedBlockCount() != FILE_BLOCKS) {
    throw new IOException("expected " + FILE_BLOCKS + 
                          " file blocks but found " + 
                          fileBlocks.locatedBlockCount());
  }
  if (blockInStripe >= STRIPE_BLOCKS) {
    throw new IOException("blockInStripe is " + blockInStripe +
                          " but must be smaller than " + STRIPE_BLOCKS);
  }
  LocatedBlock block = fileBlocks.get(stripe * STRIPE_BLOCKS + blockInStripe);
  removeAndReportBlock(dfs, filePath, block);
  LOG.info("removed file " + filePath.toString() + " block " +
           stripe * STRIPE_BLOCKS + " in stripe " + stripe);
}
 
Example 3
Source File: TestRaidShellFsck.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * removes a file block in the specified stripe
 */
private void removeFileBlock(Path filePath, int stripe, int blockInStripe)
  throws IOException {
  LocatedBlocks fileBlocks = dfs.getClient().namenode.
    getBlockLocations(filePath.toString(), 0, FILE_BLOCKS * BLOCK_SIZE);
  if (fileBlocks.locatedBlockCount() != FILE_BLOCKS) {
    throw new IOException("expected " + FILE_BLOCKS + 
                          " file blocks but found " + 
                          fileBlocks.locatedBlockCount());
  }
  if (blockInStripe >= STRIPE_BLOCKS) {
    throw new IOException("blockInStripe is " + blockInStripe +
                          " but must be smaller than " + STRIPE_BLOCKS);
  }
  LocatedBlock block = fileBlocks.get(stripe * STRIPE_BLOCKS + blockInStripe);
  removeAndReportBlock(dfs, filePath, block);
  LOG.info("removed file " + filePath.toString() + " block " +
           stripe * STRIPE_BLOCKS + " in stripe " + stripe);
}
 
Example 4
Source File: TestAbandonBlock.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
/** Abandon a block while creating a file */
public void testAbandonBlock() throws IOException {
  String src = FILE_NAME_PREFIX + "foo";

  // Start writing a file but do not close it
  FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short)1, 512L);
  for (int i = 0; i < 1024; i++) {
    fout.write(123);
  }
  fout.hflush();
  long fileId = ((DFSOutputStream)fout.getWrappedStream()).getFileId();

  // Now abandon the last block
  DFSClient dfsclient = DFSClientAdapter.getDFSClient(fs);
  LocatedBlocks blocks =
    dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE);
  int orginalNumBlocks = blocks.locatedBlockCount();
  LocatedBlock b = blocks.getLastLocatedBlock();
  dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
      dfsclient.clientName);
  
  // call abandonBlock again to make sure the operation is idempotent
  dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
      dfsclient.clientName);

  // And close the file
  fout.close();

  // Close cluster and check the block has been abandoned after restart
  cluster.restartNameNode();
  blocks = dfsclient.getNamenode().getBlockLocations(src, 0,
      Integer.MAX_VALUE);
  Assert.assertEquals("Blocks " + b + " has not been abandoned.",
      orginalNumBlocks, blocks.locatedBlockCount() + 1);
}
 
Example 5
Source File: TestAvatarDataNodeRBW.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private boolean blocksReceived(int nBlocks, String fileName) throws IOException {
  AvatarNode avatar = cluster.getPrimaryAvatar(0).avatar;
  LocatedBlocks lbks = avatar.namesystem.getBlockLocations(fileName, 0,
      Long.MAX_VALUE);
  int blocks = lbks.locatedBlockCount();
  if (blocks != nBlocks)
    return false;
  for (LocatedBlock lbk : lbks.getLocatedBlocks()) {
    DatanodeInfo[] locs = lbk.getLocations();
    if (locs == null || locs.length == 0) {
      return false;
    }
  }
  return true;
}
 
Example 6
Source File: TestAbandonBlock.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
/** Abandon a block while creating a file */
public void testAbandonBlock() throws IOException {
  String src = FILE_NAME_PREFIX + "foo";

  // Start writing a file but do not close it
  FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short)1, 512L);
  for (int i = 0; i < 1024; i++) {
    fout.write(123);
  }
  fout.hflush();
  long fileId = ((DFSOutputStream)fout.getWrappedStream()).getFileId();

  // Now abandon the last block
  DFSClient dfsclient = DFSClientAdapter.getDFSClient(fs);
  LocatedBlocks blocks =
    dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE);
  int orginalNumBlocks = blocks.locatedBlockCount();
  LocatedBlock b = blocks.getLastLocatedBlock();
  dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
      dfsclient.clientName);
  
  // call abandonBlock again to make sure the operation is idempotent
  dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
      dfsclient.clientName);

  // And close the file
  fout.close();

  // Close cluster and check the block has been abandoned after restart
  cluster.restartNameNode();
  blocks = dfsclient.getNamenode().getBlockLocations(src, 0,
      Integer.MAX_VALUE);
  Assert.assertEquals("Blocks " + b + " has not been abandoned.",
      orginalNumBlocks, blocks.locatedBlockCount() + 1);
}
 
Example 7
Source File: TestAvatarDataNodeRBW.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private int initializeTest(String testName) throws IOException {
  String fileName = testName;
  createRBWFile(fileName);
  // Verify we have 1 RBW block.
  AvatarNode avatar = cluster.getPrimaryAvatar(0).avatar;
  LocatedBlocks lbks = avatar.namesystem.getBlockLocations(fileName, 0,
      Long.MAX_VALUE);
  int blocksBefore = lbks.locatedBlockCount();
  for (LocatedBlock lbk : lbks.getLocatedBlocks()) {
    DatanodeInfo[] locs = lbk.getLocations();
    assertNotNull(locs);
    assertTrue(locs.length != 0);
  }
  return blocksBefore;
}
 
Example 8
Source File: TestRaidShellFsck.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * removes a block from the har part file
 */
private void removeHarParityBlock(int block) throws IOException {
  Path harPath = new Path(RAID_PATH, HAR_NAME);
  FileStatus [] listPaths = dfs.listStatus(harPath);
  
  boolean deleted = false;
  
  for (FileStatus f: listPaths) {
    if (f.getPath().getName().startsWith("part-")) {
      final Path partPath = new Path(f.getPath().toUri().getPath());
      final LocatedBlocks partBlocks  = dfs.getClient().namenode.
        getBlockLocations(partPath.toString(),
                          0,
                          f.getLen());
      
      if (partBlocks.locatedBlockCount() <= block) {
        throw new IOException("invalid har block " + block);
      }

      final LocatedBlock partBlock = partBlocks.get(block);
      removeAndReportBlock(dfs, partPath, partBlock);
      LOG.info("removed block " + block + "/" + 
               partBlocks.locatedBlockCount() +
               " of file " + partPath.toString() +
               " block size " + partBlock.getBlockSize());
      deleted = true;
      break;
    }
  }

  if (!deleted) {
    throw new IOException("cannot find part file in " + harPath.toString());
  }
}
 
Example 9
Source File: TestFileCreation.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
/**
 * Test that the filesystem removes the last block from a file if its
 * lease expires.
 */
public void testFileCreationError2() throws IOException {
  long leasePeriod = 1000;
  System.out.println("testFileCreationError2 start");
  Configuration conf = new Configuration();
  conf.setInt("heartbeat.recheck.interval", 1000);
  conf.setInt("dfs.heartbeat.interval", 1);
  if (simulatedStorage) {
    conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
  }
  // create cluster
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  DistributedFileSystem dfs = null;
  try {
    cluster.waitActive();
    dfs = (DistributedFileSystem)cluster.getFileSystem();
    DFSClient client = dfs.dfs;

    // create a new file.
    //
    Path file1 = new Path("/filestatus.dat");
    createFile(dfs, file1, 1);
    System.out.println("testFileCreationError2: "
                       + "Created file filestatus.dat with one replicas.");

    LocatedBlocks locations = client.namenode.getBlockLocations(
                                file1.toString(), 0, Long.MAX_VALUE);
    System.out.println("testFileCreationError2: "
        + "The file has " + locations.locatedBlockCount() + " blocks.");

    // add another block to the file
    LocatedBlock location = client.namenode.addBlock(file1.toString(), 
        client.clientName);
    System.out.println("testFileCreationError2: "
        + "Added block " + location.getBlock());

    locations = client.namenode.getBlockLocations(file1.toString(), 
                                                  0, Long.MAX_VALUE);
    int count = locations.locatedBlockCount();
    System.out.println("testFileCreationError2: "
        + "The file now has " + count + " blocks.");
    
    // set the soft and hard limit to be 1 second so that the
    // namenode triggers lease recovery
    cluster.setLeasePeriod(leasePeriod, leasePeriod);

    // wait for the lease to expire
    try {
      Thread.sleep(5 * leasePeriod);
    } catch (InterruptedException e) {
    }

    // verify that the last block was synchronized.
    locations = client.namenode.getBlockLocations(file1.toString(), 
                                                  0, Long.MAX_VALUE);
    System.out.println("testFileCreationError2: "
        + "locations = " + locations.locatedBlockCount());
    assertEquals(0, locations.locatedBlockCount());
    System.out.println("testFileCreationError2 successful");
  } finally {
    IOUtils.closeStream(dfs);
    cluster.shutdown();
  }
}
 
Example 10
Source File: FSDirStatAndListingOp.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Get a partial listing of the indicated directory
 *
 * We will stop when any of the following conditions is met:
 * 1) this.lsLimit files have been added
 * 2) needLocation is true AND enough files have been added such
 * that at least this.lsLimit block locations are in the response
 *
 * @param fsd FSDirectory
 * @param iip the INodesInPath instance containing all the INodes along the
 *            path
 * @param src the directory name
 * @param startAfter the name to start listing after
 * @param needLocation if block locations are returned
 * @return a partial listing starting after startAfter
 */
private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip,
    String src, byte[] startAfter, boolean needLocation, boolean isSuperUser)
    throws IOException {
  String srcs = FSDirectory.normalizePath(src);
  final boolean isRawPath = FSDirectory.isReservedRawName(src);

  fsd.readLock();
  try {
    if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
      return getSnapshotsListing(fsd, srcs, startAfter);
    }
    final int snapshot = iip.getPathSnapshotId();
    final INode targetNode = iip.getLastINode();
    if (targetNode == null)
      return null;
    byte parentStoragePolicy = isSuperUser ?
        targetNode.getStoragePolicyID() : BlockStoragePolicySuite
        .ID_UNSPECIFIED;

    if (!targetNode.isDirectory()) {
      return new DirectoryListing(
          new HdfsFileStatus[]{createFileStatus(fsd, src,
              HdfsFileStatus.EMPTY_NAME, targetNode, needLocation,
              parentStoragePolicy, snapshot, isRawPath, iip)}, 0);
    }

    final INodeDirectory dirInode = targetNode.asDirectory();
    final ReadOnlyList<INode> contents = dirInode.getChildrenList(snapshot);
    int startChild = INodeDirectory.nextChild(contents, startAfter);
    int totalNumChildren = contents.size();
    int numOfListing = Math.min(totalNumChildren - startChild,
        fsd.getLsLimit());
    int locationBudget = fsd.getLsLimit();
    int listingCnt = 0;
    HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
    for (int i=0; i<numOfListing && locationBudget>0; i++) {
      INode cur = contents.get(startChild+i);
      byte curPolicy = isSuperUser && !cur.isSymlink()?
          cur.getLocalStoragePolicyID():
          BlockStoragePolicySuite.ID_UNSPECIFIED;
      listing[i] = createFileStatus(fsd, src, cur.getLocalNameBytes(), cur,
          needLocation, getStoragePolicyID(curPolicy,
              parentStoragePolicy), snapshot, isRawPath, iip);
      listingCnt++;
      if (needLocation) {
          // Once we  hit lsLimit locations, stop.
          // This helps to prevent excessively large response payloads.
          // Approximate #locations with locatedBlockCount() * repl_factor
          LocatedBlocks blks =
              ((HdfsLocatedFileStatus)listing[i]).getBlockLocations();
          locationBudget -= (blks == null) ? 0 :
             blks.locatedBlockCount() * listing[i].getReplication();
      }
    }
    // truncate return array if necessary
    if (listingCnt < numOfListing) {
        listing = Arrays.copyOf(listing, listingCnt);
    }
    return new DirectoryListing(
        listing, totalNumChildren-startChild-listingCnt);
  } finally {
    fsd.readUnlock();
  }
}
 
Example 11
Source File: DFSInputStream.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Grab the open-file info from namenode
 */
synchronized void openInfo() throws IOException {
  if (src == null && blocks == null) {
    throw new IOException("No fine provided to open");
  }

  LocatedBlocks newInfo = src != null ? 
                          getLocatedBlocks(src, 0, prefetchSize) : blocks;
  if (newInfo == null) {
    throw new IOException("Cannot open filename " + src);
  }

  // I think this check is not correct. A file could have been appended to
  // between two calls to openInfo().
  if (locatedBlocks != null && !locatedBlocks.isUnderConstruction() &&
      !newInfo.isUnderConstruction()) {
    Iterator<LocatedBlock> oldIter = locatedBlocks.getLocatedBlocks().iterator();
    Iterator<LocatedBlock> newIter = newInfo.getLocatedBlocks().iterator();
    while (oldIter.hasNext() && newIter.hasNext()) {
      if (! oldIter.next().getBlock().equals(newIter.next().getBlock())) {
        throw new IOException("Blocklist for " + src + " has changed!");
      }
    }
  }

  // if the file is under construction, then fetch size of last block
  // from datanode.
  if (newInfo.isUnderConstruction() && newInfo.locatedBlockCount() > 0) {
    LocatedBlock last = newInfo.get(newInfo.locatedBlockCount()-1);
    if (last.getLocations().length > 0) {
      try {
        Block newBlock = getBlockInfo(last);
        // only if the block has data (not null)
        if (newBlock != null) {
          long newBlockSize = newBlock.getNumBytes();
          newInfo.setLastBlockSize(newBlock.getBlockId(), newBlockSize);
        }
      } catch (IOException e) {
        DFSClient.LOG.debug("DFSClient file " + src + 
                  " is being concurrently append to" +
                  " but datanodes probably does not have block " +
                  last.getBlock(), e);
      }
    }
  }
  this.locatedBlocks = new DFSLocatedBlocks(newInfo);
  this.currentNode = null;
}
 
Example 12
Source File: TestRaidShellFsck.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * removes a parity block in the specified stripe
 */
private void removeParityBlock(Path filePath, int stripe) throws IOException {
  // find parity file
  ParityFilePair ppair =
      ParityFilePair.getParityFile(Codec.getCodec("xor"), filePath, conf);
  String parityPathStr = ppair.getPath().toUri().getPath();
  LOG.info("parity path: " + parityPathStr);
  FileSystem parityFS = ppair.getFileSystem();
  if (!(parityFS instanceof DistributedFileSystem)) {
    throw new IOException("parity file is not on distributed file system");
  }
  DistributedFileSystem parityDFS = (DistributedFileSystem) parityFS;

  
  // now corrupt the block corresponding to the stripe selected
  FileStatus parityFileStatus =
    parityDFS.getFileStatus(new Path(parityPathStr));
  long parityBlockSize = parityFileStatus.getBlockSize();
  long parityFileLength = parityFileStatus.getLen();
  long parityFileLengthInBlocks = (parityFileLength / parityBlockSize) + 
    (((parityFileLength % parityBlockSize) == 0) ? 0L : 1L);
  if (parityFileLengthInBlocks <= stripe) {
    throw new IOException("selected stripe " + stripe + 
                          " but parity file only has " + 
                          parityFileLengthInBlocks + " blocks");
  }
  if (parityBlockSize != BLOCK_SIZE) {
    throw new IOException("file block size is " + BLOCK_SIZE + 
                          " but parity file block size is " + 
                          parityBlockSize);
  }
  LocatedBlocks parityFileBlocks = parityDFS.getClient().namenode.
    getBlockLocations(parityPathStr, 0, parityFileLength);
  if (parityFileBlocks.locatedBlockCount() != parityFileLengthInBlocks) {
    throw new IOException("expected " + parityFileLengthInBlocks + 
                          " parity file blocks but got " + 
                          parityFileBlocks.locatedBlockCount() + 
                          " blocks");
  }
  LocatedBlock parityFileBlock = parityFileBlocks.get(stripe);
  removeAndReportBlock(parityDFS, new Path(parityPathStr), parityFileBlock);
  LOG.info("removed parity file block/stripe " + stripe +
           " for " + filePath.toString());

}
 
Example 13
Source File: TestFileCreation.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Test that the filesystem removes the last block from a file if its
 * lease expires.
 */
public void testFileCreationError2() throws IOException {
  long leasePeriod = 1000;
  System.out.println("testFileCreationError2 start");
  Configuration conf = new Configuration();
  conf.setInt("heartbeat.recheck.interval", 1000);
  conf.setInt("dfs.heartbeat.interval", 1);
  if (simulatedStorage) {
    conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
  }
  // create cluster
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  DistributedFileSystem dfs = null;
  try {
    cluster.waitActive();
    dfs = (DistributedFileSystem)cluster.getFileSystem();
    DFSClient client = dfs.dfs;

    // create a new file.
    //
    Path file1 = new Path("/filestatus.dat");
    createFile(dfs, file1, 1);
    System.out.println("testFileCreationError2: "
                       + "Created file filestatus.dat with one replicas.");

    LocatedBlocks locations = client.namenode.getBlockLocations(
                                file1.toString(), 0, Long.MAX_VALUE);
    System.out.println("testFileCreationError2: "
        + "The file has " + locations.locatedBlockCount() + " blocks.");

    // add another block to the file
    LocatedBlock location = client.namenode.addBlock(file1.toString(), 
        client.clientName);
    System.out.println("testFileCreationError2: "
        + "Added block " + location.getBlock());

    locations = client.namenode.getBlockLocations(file1.toString(), 
                                                  0, Long.MAX_VALUE);
    int count = locations.locatedBlockCount();
    System.out.println("testFileCreationError2: "
        + "The file now has " + count + " blocks.");
    
    // set the soft and hard limit to be 1 second so that the
    // namenode triggers lease recovery
    cluster.setLeasePeriod(leasePeriod, leasePeriod);

    // wait for the lease to expire
    try {
      Thread.sleep(5 * leasePeriod);
    } catch (InterruptedException e) {
    }

    // verify that the last block was synchronized.
    locations = client.namenode.getBlockLocations(file1.toString(), 
                                                  0, Long.MAX_VALUE);
    System.out.println("testFileCreationError2: "
        + "locations = " + locations.locatedBlockCount());
    assertEquals(0, locations.locatedBlockCount());
    System.out.println("testFileCreationError2 successful");
  } finally {
    IOUtils.closeStream(dfs);
    cluster.shutdown();
  }
}
 
Example 14
Source File: TestFileCreation.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test that the filesystem removes the last block from a file if its
 * lease expires.
 */
@Test
public void testFileCreationError2() throws IOException {
  long leasePeriod = 1000;
  System.out.println("testFileCreationError2 start");
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
  conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  // create cluster
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  DistributedFileSystem dfs = null;
  try {
    cluster.waitActive();
    dfs = cluster.getFileSystem();
    DFSClient client = dfs.dfs;

    // create a new file.
    //
    Path file1 = new Path("/filestatus.dat");
    createFile(dfs, file1, 1);
    System.out.println("testFileCreationError2: "
                       + "Created file filestatus.dat with one replicas.");

    LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                file1.toString(), 0, Long.MAX_VALUE);
    System.out.println("testFileCreationError2: "
        + "The file has " + locations.locatedBlockCount() + " blocks.");

    // add one block to the file
    LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
        client.clientName, null, null, INodeId.GRANDFATHER_INODE_ID, null);
    System.out.println("testFileCreationError2: "
        + "Added block " + location.getBlock());

    locations = client.getNamenode().getBlockLocations(file1.toString(), 
                                                  0, Long.MAX_VALUE);
    int count = locations.locatedBlockCount();
    System.out.println("testFileCreationError2: "
        + "The file now has " + count + " blocks.");
    
    // set the soft and hard limit to be 1 second so that the
    // namenode triggers lease recovery
    cluster.setLeasePeriod(leasePeriod, leasePeriod);

    // wait for the lease to expire
    try {
      Thread.sleep(5 * leasePeriod);
    } catch (InterruptedException e) {
    }

    // verify that the last block was synchronized.
    locations = client.getNamenode().getBlockLocations(file1.toString(), 
                                                  0, Long.MAX_VALUE);
    System.out.println("testFileCreationError2: "
        + "locations = " + locations.locatedBlockCount());
    assertEquals(0, locations.locatedBlockCount());
    System.out.println("testFileCreationError2 successful");
  } finally {
    IOUtils.closeStream(dfs);
    cluster.shutdown();
  }
}
 
Example 15
Source File: NamenodeWebHdfsMethods.java    From big-c with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode,
    final String path, final HttpOpParam.Op op, final long openOffset,
    final long blocksize, final String excludeDatanodes) throws IOException {
  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  
  HashSet<Node> excludes = new HashSet<Node>();
  if (excludeDatanodes != null) {
    for (String host : StringUtils
        .getTrimmedStringCollection(excludeDatanodes)) {
      int idx = host.indexOf(":");
      if (idx != -1) {          
        excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(
            host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
      } else {
        excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
      }
    }
  }

  if (op == PutOpParam.Op.CREATE) {
    //choose a datanode near to client 
    final DatanodeDescriptor clientNode = bm.getDatanodeManager(
        ).getDatanodeByHost(getRemoteAddress());
    if (clientNode != null) {
      final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS(
          path, clientNode, excludes, blocksize);
      if (storages.length > 0) {
        return storages[0].getDatanodeDescriptor();
      }
    }
  } else if (op == GetOpParam.Op.OPEN
      || op == GetOpParam.Op.GETFILECHECKSUM
      || op == PostOpParam.Op.APPEND) {
    //choose a datanode containing a replica 
    final NamenodeProtocols np = getRPCServer(namenode);
    final HdfsFileStatus status = np.getFileInfo(path);
    if (status == null) {
      throw new FileNotFoundException("File " + path + " not found.");
    }
    final long len = status.getLen();
    if (op == GetOpParam.Op.OPEN) {
      if (openOffset < 0L || (openOffset >= len && len > 0)) {
        throw new IOException("Offset=" + openOffset
            + " out of the range [0, " + len + "); " + op + ", path=" + path);
      }
    }

    if (len > 0) {
      final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
      final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
      final int count = locations.locatedBlockCount();
      if (count > 0) {
        return bestNode(locations.get(0).getLocations(), excludes);
      }
    }
  } 

  return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology(
      ).chooseRandom(NodeBase.ROOT);
}
 
Example 16
Source File: FSDirStatAndListingOp.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Get a partial listing of the indicated directory
 *
 * We will stop when any of the following conditions is met:
 * 1) this.lsLimit files have been added
 * 2) needLocation is true AND enough files have been added such
 * that at least this.lsLimit block locations are in the response
 *
 * @param fsd FSDirectory
 * @param iip the INodesInPath instance containing all the INodes along the
 *            path
 * @param src the directory name
 * @param startAfter the name to start listing after
 * @param needLocation if block locations are returned
 * @return a partial listing starting after startAfter
 */
private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip,
    String src, byte[] startAfter, boolean needLocation, boolean isSuperUser)
    throws IOException {
  String srcs = FSDirectory.normalizePath(src);
  final boolean isRawPath = FSDirectory.isReservedRawName(src);

  fsd.readLock();
  try {
    if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
      return getSnapshotsListing(fsd, srcs, startAfter);
    }
    final int snapshot = iip.getPathSnapshotId();
    final INode targetNode = iip.getLastINode();
    if (targetNode == null)
      return null;
    byte parentStoragePolicy = isSuperUser ?
        targetNode.getStoragePolicyID() : BlockStoragePolicySuite
        .ID_UNSPECIFIED;

    if (!targetNode.isDirectory()) {
      return new DirectoryListing(
          new HdfsFileStatus[]{createFileStatus(fsd, src,
              HdfsFileStatus.EMPTY_NAME, targetNode, needLocation,
              parentStoragePolicy, snapshot, isRawPath, iip)}, 0);
    }

    final INodeDirectory dirInode = targetNode.asDirectory();
    final ReadOnlyList<INode> contents = dirInode.getChildrenList(snapshot);
    int startChild = INodeDirectory.nextChild(contents, startAfter);
    int totalNumChildren = contents.size();
    int numOfListing = Math.min(totalNumChildren - startChild,
        fsd.getLsLimit());
    int locationBudget = fsd.getLsLimit();
    int listingCnt = 0;
    HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
    for (int i=0; i<numOfListing && locationBudget>0; i++) {
      INode cur = contents.get(startChild+i);
      byte curPolicy = isSuperUser && !cur.isSymlink()?
          cur.getLocalStoragePolicyID():
          BlockStoragePolicySuite.ID_UNSPECIFIED;
      listing[i] = createFileStatus(fsd, src, cur.getLocalNameBytes(), cur,
          needLocation, getStoragePolicyID(curPolicy,
              parentStoragePolicy), snapshot, isRawPath, iip);
      listingCnt++;
      if (needLocation) {
          // Once we  hit lsLimit locations, stop.
          // This helps to prevent excessively large response payloads.
          // Approximate #locations with locatedBlockCount() * repl_factor
          LocatedBlocks blks =
              ((HdfsLocatedFileStatus)listing[i]).getBlockLocations();
          locationBudget -= (blks == null) ? 0 :
             blks.locatedBlockCount() * listing[i].getReplication();
      }
    }
    // truncate return array if necessary
    if (listingCnt < numOfListing) {
        listing = Arrays.copyOf(listing, listingCnt);
    }
    return new DirectoryListing(
        listing, totalNumChildren-startChild-listingCnt);
  } finally {
    fsd.readUnlock();
  }
}
 
Example 17
Source File: TestFileCreation.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test that the filesystem removes the last block from a file if its
 * lease expires.
 */
@Test
public void testFileCreationError2() throws IOException {
  long leasePeriod = 1000;
  System.out.println("testFileCreationError2 start");
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
  conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  // create cluster
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  DistributedFileSystem dfs = null;
  try {
    cluster.waitActive();
    dfs = cluster.getFileSystem();
    DFSClient client = dfs.dfs;

    // create a new file.
    //
    Path file1 = new Path("/filestatus.dat");
    createFile(dfs, file1, 1);
    System.out.println("testFileCreationError2: "
                       + "Created file filestatus.dat with one replicas.");

    LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                file1.toString(), 0, Long.MAX_VALUE);
    System.out.println("testFileCreationError2: "
        + "The file has " + locations.locatedBlockCount() + " blocks.");

    // add one block to the file
    LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
        client.clientName, null, null, INodeId.GRANDFATHER_INODE_ID, null);
    System.out.println("testFileCreationError2: "
        + "Added block " + location.getBlock());

    locations = client.getNamenode().getBlockLocations(file1.toString(), 
                                                  0, Long.MAX_VALUE);
    int count = locations.locatedBlockCount();
    System.out.println("testFileCreationError2: "
        + "The file now has " + count + " blocks.");
    
    // set the soft and hard limit to be 1 second so that the
    // namenode triggers lease recovery
    cluster.setLeasePeriod(leasePeriod, leasePeriod);

    // wait for the lease to expire
    try {
      Thread.sleep(5 * leasePeriod);
    } catch (InterruptedException e) {
    }

    // verify that the last block was synchronized.
    locations = client.getNamenode().getBlockLocations(file1.toString(), 
                                                  0, Long.MAX_VALUE);
    System.out.println("testFileCreationError2: "
        + "locations = " + locations.locatedBlockCount());
    assertEquals(0, locations.locatedBlockCount());
    System.out.println("testFileCreationError2 successful");
  } finally {
    IOUtils.closeStream(dfs);
    cluster.shutdown();
  }
}
 
Example 18
Source File: NamenodeWebHdfsMethods.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode,
    final String path, final HttpOpParam.Op op, final long openOffset,
    final long blocksize, final String excludeDatanodes) throws IOException {
  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  
  HashSet<Node> excludes = new HashSet<Node>();
  if (excludeDatanodes != null) {
    for (String host : StringUtils
        .getTrimmedStringCollection(excludeDatanodes)) {
      int idx = host.indexOf(":");
      if (idx != -1) {          
        excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(
            host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
      } else {
        excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
      }
    }
  }

  if (op == PutOpParam.Op.CREATE) {
    //choose a datanode near to client 
    final DatanodeDescriptor clientNode = bm.getDatanodeManager(
        ).getDatanodeByHost(getRemoteAddress());
    if (clientNode != null) {
      final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS(
          path, clientNode, excludes, blocksize);
      if (storages.length > 0) {
        return storages[0].getDatanodeDescriptor();
      }
    }
  } else if (op == GetOpParam.Op.OPEN
      || op == GetOpParam.Op.GETFILECHECKSUM
      || op == PostOpParam.Op.APPEND) {
    //choose a datanode containing a replica 
    final NamenodeProtocols np = getRPCServer(namenode);
    final HdfsFileStatus status = np.getFileInfo(path);
    if (status == null) {
      throw new FileNotFoundException("File " + path + " not found.");
    }
    final long len = status.getLen();
    if (op == GetOpParam.Op.OPEN) {
      if (openOffset < 0L || (openOffset >= len && len > 0)) {
        throw new IOException("Offset=" + openOffset
            + " out of the range [0, " + len + "); " + op + ", path=" + path);
      }
    }

    if (len > 0) {
      final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
      final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
      final int count = locations.locatedBlockCount();
      if (count > 0) {
        return bestNode(locations.get(0).getLocations(), excludes);
      }
    }
  } 

  return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology(
      ).chooseRandom(NodeBase.ROOT);
}