Java Code Examples for org.apache.hadoop.hdfs.protocol.LocatedBlock#isCorrupt()

The following examples show how to use org.apache.hadoop.hdfs.protocol.LocatedBlock#isCorrupt() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FileFixer.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Returns the corrupt blocks in a file.
 **/
List<LocatedBlock> corruptBlocksInFile(
  DistributedFileSystem fs, String uriPath, FileStatus stat)
throws IOException {
  List<LocatedBlock> corrupt = new LinkedList<LocatedBlock>();
  LocatedBlocks locatedBlocks = fs.getClient().namenode.getBlockLocations(
    uriPath, 0, stat.getLen());
  for (LocatedBlock b: locatedBlocks.getLocatedBlocks()) {
    if (b.isCorrupt() || 
       (b.getLocations().length == 0 && b.getBlockSize() > 0)) {
      LOG.info("Adding bad block for file " + uriPath);
      corrupt.add(b);
    }
  }
  return corrupt;
}
 
Example 2
Source File: Decoder.java    From RDFS with Apache License 2.0 5 votes vote down vote up
boolean isBlockCorrupt(LocatedBlock block) {
	if (block.isCorrupt()
			|| (block.getLocations().length == 0 && block.getBlockSize() > 0)) {
		return true;
	}
	
	return false;
}
 
Example 3
Source File: BlockReconstructor.java    From RDFS with Apache License 2.0 5 votes vote down vote up
boolean isBlockCorrupt(LocatedBlock block) {
	if (block.isCorrupt()
			|| (block.getLocations().length == 0 && block.getBlockSize() > 0)) {
		return true;
	}
	
	return false;
}
 
Example 4
Source File: BlockReconstructor.java    From RDFS with Apache License 2.0 5 votes vote down vote up
List<LocatedBlockWithMetaInfo> lostBlocksInFile(
		DistributedFileSystem fs, String uriPath, FileStatus stat)
		throws IOException {

	List<LocatedBlockWithMetaInfo> corrupt = 
			new LinkedList<LocatedBlockWithMetaInfo>();
	VersionedLocatedBlocks locatedBlocks;
	int namespaceId = 0;
	int methodFingerprint = 0;
	if (DFSClient
			.isMetaInfoSuppoted(fs.getClient().namenodeProtocolProxy)) {
		LocatedBlocksWithMetaInfo lbksm = fs.getClient().namenode
				.openAndFetchMetaInfo(uriPath, 0, stat.getLen());
		namespaceId = lbksm.getNamespaceID();
		locatedBlocks = lbksm;
		methodFingerprint = lbksm.getMethodFingerPrint();
		fs.getClient().getNewNameNodeIfNeeded(methodFingerprint);
	} else {
		locatedBlocks = fs.getClient().namenode.open(uriPath, 0,
				stat.getLen());
	}
	final int dataTransferVersion = locatedBlocks
			.getDataProtocolVersion();
	for (LocatedBlock b : locatedBlocks.getLocatedBlocks()) {
		if (b.isCorrupt()
				|| (b.getLocations().length == 0 && b.getBlockSize() > 0)) {
			corrupt.add(new LocatedBlockWithMetaInfo(b.getBlock(), b
					.getLocations(), b.getStartOffset(),
					dataTransferVersion, namespaceId, methodFingerprint));
		}
	}
	return corrupt;
}
 
Example 5
Source File: RaidDFSUtil.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the corrupt blocks in a file.
 */
public static List<LocatedBlock> corruptBlocksInFile(
  DistributedFileSystem dfs, String path, long offset, long length)
throws IOException {
  List<LocatedBlock> corrupt = new LinkedList<LocatedBlock>();
  LocatedBlocks locatedBlocks =
    getBlockLocations(dfs, path, offset, length);
  for (LocatedBlock b: locatedBlocks.getLocatedBlocks()) {
    if (b.isCorrupt() ||
       (b.getLocations().length == 0 && b.getBlockSize() > 0)) {
      corrupt.add(b);
    }
  }
  return corrupt;
}
 
Example 6
Source File: BlockReconstructor.java    From RDFS with Apache License 2.0 4 votes vote down vote up
List<LocatedBlockWithMetaInfo> lostBlocksInFile(
		DistributedFileSystem fs, String uriPath, FileStatus stat)
		throws IOException {

	List<LocatedBlockWithMetaInfo> decommissioning = new LinkedList<LocatedBlockWithMetaInfo>();
	VersionedLocatedBlocks locatedBlocks;
	int namespaceId = 0;
	int methodFingerprint = 0;
	if (DFSClient
			.isMetaInfoSuppoted(fs.getClient().namenodeProtocolProxy)) {
		LocatedBlocksWithMetaInfo lbksm = fs.getClient().namenode
				.openAndFetchMetaInfo(uriPath, 0, stat.getLen());
		namespaceId = lbksm.getNamespaceID();
		locatedBlocks = lbksm;
		methodFingerprint = lbksm.getMethodFingerPrint();
		fs.getClient().getNewNameNodeIfNeeded(methodFingerprint);
	} else {
		locatedBlocks = fs.getClient().namenode.open(uriPath, 0,
				stat.getLen());
	}
	final int dataTransferVersion = locatedBlocks
			.getDataProtocolVersion();

	for (LocatedBlock b : locatedBlocks.getLocatedBlocks()) {
		if (b.isCorrupt()
				|| (b.getLocations().length == 0 && b.getBlockSize() > 0)) {
			// If corrupt, this block is the responsibility of the
			// CorruptBlockReconstructor
			continue;
		}

		// Copy this block iff all good copies are being decommissioned
		boolean allDecommissioning = true;
		for (DatanodeInfo i : b.getLocations()) {
			allDecommissioning &= i.isDecommissionInProgress();
		}
		if (allDecommissioning) {
			decommissioning
					.add(new LocatedBlockWithMetaInfo(b.getBlock(), b
							.getLocations(), b.getStartOffset(),
							dataTransferVersion, namespaceId,
							methodFingerprint));
		}
	}
	return decommissioning;
}
 
Example 7
Source File: DFSInputStream.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private DNAddrPair chooseDataNode(LocatedBlock block)
  throws IOException {
  while (true) {
    DatanodeInfo[] nodes = block.getLocations();
    String blockInfo = block.getBlock() + " file=" + src;
    if(block.isCorrupt())
  	  throw new BlockMissingException(src, "Block: " + 
             blockInfo + " is corrupt ", block.getStartOffset());
    /*if(nodes.length == 1) {
  	  long lastContact = System.currentTimeMillis() - nodes[0].getLastUpdate();
  	  if(lastContact > 9000)
  		  throw new BlockMissingException(src, "Could not obtain block: " + 
  	              blockInfo, block.getStartOffset());
    }*/
    DatanodeInfo chosenNode = null;
    try {
      chosenNode = dfsClient.bestNode(nodes, deadNodes);
      InetSocketAddress targetAddr =
                        NetUtils.createSocketAddr(chosenNode.getName());
      return new DNAddrPair(chosenNode, targetAddr);
    } catch (IOException ie) {
      int failureTimes = DFSClient.dfsInputStreamfailures.get();
      if (failureTimes >= dfsClient.maxBlockAcquireFailures
          || failureTimes >= block.getLocations().length) {
        throw new BlockMissingException(src, "Could not obtain block: " + 
            blockInfo, block.getStartOffset());
      }

      if (nodes == null || nodes.length == 0) {
        DFSClient.LOG.info("No node available for block: " + blockInfo);
      }
      DFSClient.LOG.info("Could not obtain block " + block.getBlock() +
               " from node:  " +
               (chosenNode == null ? "" : chosenNode.getHostName()) + ie +
               ". Will get new block locations from namenode and retry...");       
      try {
        // Introducing a random factor to the wait time before another retry.
        // The wait time is dependent on # of failures and a random factor.
        // At the first time of getting a BlockMissingException, the wait time
        // is a random number between 0..3000 ms. If the first retry
        // still fails, we will wait 3000 ms grace period before the 2nd retry.
        // Also at the second retry, the waiting window is expanded to 6000 ms
        // alleviating the request rate from the server. Similarly the 3rd retry
        // will wait 6000ms grace period before retry and the waiting window is
        // expanded to 9000ms.
        // waitTime = grace period for the last round of attempt + 
        // expanding time window for each failure
        double waitTime = timeWindow * failureTimes + 
          timeWindow * (failureTimes + 1) * DFSClient.r.nextDouble(); 
        DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failureTimes + 1) + 
            " IOException, will wait for " + waitTime + " msec.", ie);
				Thread.sleep((long)waitTime);
      } catch (InterruptedException iex) {
      }
      deadNodes.clear(); //2nd option is to remove only nodes[blockId]
      openInfo();
      block = getBlockAt(block.getStartOffset(), false, true);
      DFSClient.dfsInputStreamfailures.set(failureTimes+1);
      continue;
    }
  }
}