Java Code Examples for org.apache.hadoop.hdfs.DistributedFileSystem#getFileStatus()

The following examples show how to use org.apache.hadoop.hdfs.DistributedFileSystem#getFileStatus() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestCacheDirectives.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static void checkNumCachedReplicas(final DistributedFileSystem dfs,
    final List<Path> paths, final int expectedBlocks,
    final int expectedReplicas)
    throws Exception {
  int numCachedBlocks = 0;
  int numCachedReplicas = 0;
  for (Path p: paths) {
    final FileStatus f = dfs.getFileStatus(p);
    final long len = f.getLen();
    final long blockSize = f.getBlockSize();
    // round it up to full blocks
    final long numBlocks = (len + blockSize - 1) / blockSize;
    BlockLocation[] locs = dfs.getFileBlockLocations(p, 0, len);
    assertEquals("Unexpected number of block locations for path " + p,
        numBlocks, locs.length);
    for (BlockLocation l: locs) {
      if (l.getCachedHosts().length > 0) {
        numCachedBlocks++;
      }
      numCachedReplicas += l.getCachedHosts().length;
    }
  }
  LOG.info("Found " + numCachedBlocks + " of " + expectedBlocks + " blocks");
  LOG.info("Found " + numCachedReplicas + " of " + expectedReplicas
      + " replicas");
  assertEquals("Unexpected number of cached blocks", expectedBlocks,
      numCachedBlocks);
  assertEquals("Unexpected number of cached replicas", expectedReplicas,
      numCachedReplicas);
}
 
Example 2
Source File: TestCacheDirectives.java    From big-c with Apache License 2.0 5 votes vote down vote up
private static void checkNumCachedReplicas(final DistributedFileSystem dfs,
    final List<Path> paths, final int expectedBlocks,
    final int expectedReplicas)
    throws Exception {
  int numCachedBlocks = 0;
  int numCachedReplicas = 0;
  for (Path p: paths) {
    final FileStatus f = dfs.getFileStatus(p);
    final long len = f.getLen();
    final long blockSize = f.getBlockSize();
    // round it up to full blocks
    final long numBlocks = (len + blockSize - 1) / blockSize;
    BlockLocation[] locs = dfs.getFileBlockLocations(p, 0, len);
    assertEquals("Unexpected number of block locations for path " + p,
        numBlocks, locs.length);
    for (BlockLocation l: locs) {
      if (l.getCachedHosts().length > 0) {
        numCachedBlocks++;
      }
      numCachedReplicas += l.getCachedHosts().length;
    }
  }
  LOG.info("Found " + numCachedBlocks + " of " + expectedBlocks + " blocks");
  LOG.info("Found " + numCachedReplicas + " of " + expectedReplicas
      + " replicas");
  assertEquals("Unexpected number of cached blocks", expectedBlocks,
      numCachedBlocks);
  assertEquals("Unexpected number of cached replicas", expectedReplicas,
      numCachedReplicas);
}
 
Example 3
Source File: RaidShell.java    From RDFS with Apache License 2.0 5 votes vote down vote up
void collectFileCorruptBlocksInStripe(final DistributedFileSystem dfs, 
    final RaidInfo raidInfo, final Path filePath, 
    final HashMap<Integer, Integer> corruptBlocksPerStripe)
        throws IOException {
  // read conf
  final int stripeBlocks = raidInfo.codec.stripeLength;

  // figure out which blocks are missing/corrupted
  final FileStatus fileStatus = dfs.getFileStatus(filePath);
  final long blockSize = fileStatus.getBlockSize();
  final long fileLength = fileStatus.getLen();
  final long fileLengthInBlocks = RaidNode.numBlocks(fileStatus); 
  final long fileStripes = RaidNode.numStripes(fileLengthInBlocks,
      stripeBlocks);
  final BlockLocation[] fileBlocks = 
    dfs.getFileBlockLocations(fileStatus, 0, fileLength);
  
  // figure out which stripes these corrupted blocks belong to
  for (BlockLocation fileBlock: fileBlocks) {
    int blockNo = (int) (fileBlock.getOffset() / blockSize);
    final int stripe = blockNo / stripeBlocks;
    if (this.isBlockCorrupt(fileBlock)) {
      this.incCorruptBlocksPerStripe(corruptBlocksPerStripe, stripe);
      if (LOG.isDebugEnabled()) {
        LOG.debug("file " + filePath.toString() + " corrupt in block " + 
                 blockNo + "/" + fileLengthInBlocks + ", stripe " + stripe +
                 "/" + fileStripes);
      }
    } else {
      if (LOG.isDebugEnabled()) {
        LOG.debug("file " + filePath.toString() + " OK in block " + blockNo +
                 "/" + fileLengthInBlocks + ", stripe " + stripe + "/" +
                 fileStripes);
      }
    }
  }
  checkParityBlocks(filePath, corruptBlocksPerStripe, blockSize, 0, fileStripes,
                    fileStripes, raidInfo);
}
 
Example 4
Source File: TestDirectoryRaidShellFsck.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * creates a MiniDFS instance with a raided file in it
 */
public void setUpCluster(int rsPairtyLength) throws IOException, ClassNotFoundException {
  new File(TEST_DIR).mkdirs(); // Make sure data directory exists
  conf = new Configuration();
  Utils.loadTestCodecs(conf, STRIPE_BLOCKS, STRIPE_BLOCKS, 1, rsPairtyLength,
      "/destraid", "/destraidrs", false, true);
  conf.setBoolean("dfs.permissions", false);
  cluster = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
  cluster.waitActive();
  dfs = (DistributedFileSystem) cluster.getFileSystem();
  String namenode = dfs.getUri().toString();
  FileSystem.setDefaultUri(conf, namenode);
  Codec dirRS = Codec.getCodec("rs");
  long[] crcs = new long[fileSizes.length];
  int[] seeds = new int[fileSizes.length];
  files = TestRaidDfs.createTestFiles(srcDir, fileSizes,
    blockSizes, crcs, seeds, (FileSystem)dfs, (short)1);
  assertTrue(RaidNode.doRaid(conf, dfs.getFileStatus(srcDir),
    new Path(dirRS.parityDirectory), dirRS,
    new RaidNode.Statistics(),
    RaidUtils.NULL_PROGRESSABLE,
    false, 1, 1));
  srcStats = new FileStatus[files.length];
  for (int i = 0 ; i < files.length; i++) {
    srcStats[i] = dfs.getFileStatus(files[i]);
  }
  parityStat = dfs.getFileStatus(parityFile);
  clientConf = new Configuration(conf);
  clientConf.set("fs.hdfs.impl",
                 "org.apache.hadoop.hdfs.DistributedRaidFileSystem");
  clientConf.set("fs.raid.underlyingfs.impl",
                 "org.apache.hadoop.hdfs.DistributedFileSystem");
  // prepare shell and arguments
  shell = new RaidShell(clientConf);
  args = new String[2];
  args[0] = "-fsck";
  args[1] = "/";
}
 
Example 5
Source File: TestHASafeMode.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/** Test NN crash and client crash/stuck immediately after block allocation */
@Test(timeout = 100000)
public void testOpenFileWhenNNAndClientCrashAfterAddBlock() throws Exception {
  cluster.getConfiguration(0).set(
      DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "1.0f");
  String testData = "testData";
  // to make sure we write the full block before creating dummy block at NN.
  cluster.getConfiguration(0).setInt("io.bytes.per.checksum",
      testData.length());
  cluster.restartNameNode(0);
  try {
    cluster.waitActive();
    cluster.transitionToActive(0);
    cluster.transitionToStandby(1);
    DistributedFileSystem dfs = cluster.getFileSystem(0);
    String pathString = "/tmp1.txt";
    Path filePath = new Path(pathString);
    FSDataOutputStream create = dfs.create(filePath,
        FsPermission.getDefault(), true, 1024, (short) 3, testData.length(),
        null);
    create.write(testData.getBytes());
    create.hflush();
    long fileId = ((DFSOutputStream)create.
        getWrappedStream()).getFileId();
    FileStatus fileStatus = dfs.getFileStatus(filePath);
    DFSClient client = DFSClientAdapter.getClient(dfs);
    // add one dummy block at NN, but not write to DataNode
    ExtendedBlock previousBlock =
        DFSClientAdapter.getPreviousBlock(client, fileId);
    DFSClientAdapter.getNamenode(client).addBlock(
        pathString,
        client.getClientName(),
        new ExtendedBlock(previousBlock),
        new DatanodeInfo[0],
        DFSClientAdapter.getFileId((DFSOutputStream) create
            .getWrappedStream()), null);
    cluster.restartNameNode(0, true);
    cluster.restartDataNode(0);
    cluster.transitionToActive(0);
    // let the block reports be processed.
    Thread.sleep(2000);
    FSDataInputStream is = dfs.open(filePath);
    is.close();
    dfs.recoverLease(filePath);// initiate recovery
    assertTrue("Recovery also should be success", dfs.recoverLease(filePath));
  } finally {
    cluster.shutdown();
  }
}
 
Example 6
Source File: TestHASafeMode.java    From big-c with Apache License 2.0 4 votes vote down vote up
/** Test NN crash and client crash/stuck immediately after block allocation */
@Test(timeout = 100000)
public void testOpenFileWhenNNAndClientCrashAfterAddBlock() throws Exception {
  cluster.getConfiguration(0).set(
      DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "1.0f");
  String testData = "testData";
  // to make sure we write the full block before creating dummy block at NN.
  cluster.getConfiguration(0).setInt("io.bytes.per.checksum",
      testData.length());
  cluster.restartNameNode(0);
  try {
    cluster.waitActive();
    cluster.transitionToActive(0);
    cluster.transitionToStandby(1);
    DistributedFileSystem dfs = cluster.getFileSystem(0);
    String pathString = "/tmp1.txt";
    Path filePath = new Path(pathString);
    FSDataOutputStream create = dfs.create(filePath,
        FsPermission.getDefault(), true, 1024, (short) 3, testData.length(),
        null);
    create.write(testData.getBytes());
    create.hflush();
    long fileId = ((DFSOutputStream)create.
        getWrappedStream()).getFileId();
    FileStatus fileStatus = dfs.getFileStatus(filePath);
    DFSClient client = DFSClientAdapter.getClient(dfs);
    // add one dummy block at NN, but not write to DataNode
    ExtendedBlock previousBlock =
        DFSClientAdapter.getPreviousBlock(client, fileId);
    DFSClientAdapter.getNamenode(client).addBlock(
        pathString,
        client.getClientName(),
        new ExtendedBlock(previousBlock),
        new DatanodeInfo[0],
        DFSClientAdapter.getFileId((DFSOutputStream) create
            .getWrappedStream()), null);
    cluster.restartNameNode(0, true);
    cluster.restartDataNode(0);
    cluster.transitionToActive(0);
    // let the block reports be processed.
    Thread.sleep(2000);
    FSDataInputStream is = dfs.open(filePath);
    is.close();
    dfs.recoverLease(filePath);// initiate recovery
    assertTrue("Recovery also should be success", dfs.recoverLease(filePath));
  } finally {
    cluster.shutdown();
  }
}
 
Example 7
Source File: BlockReconstructor.java    From RDFS with Apache License 2.0 4 votes vote down vote up
int planRepairTasks(PathInfo pathInfo) throws IOException, InterruptedException {
	Path srcPath = pathInfo.srcPath;
	Path parityPath = pathInfo.parityPath;
	Codec codec = pathInfo.codec;
	
	DistributedFileSystem srcFs = getDFS(srcPath);
	DistributedFileSystem parityFs = getDFS(parityPath);
	String srcUriPath = srcPath.toUri().getPath();
	String parityUriPath = parityPath.toUri().getPath();
	FileStatus srcStat = srcFs.getFileStatus(srcPath);
	FileStatus parityStat = parityFs.getFileStatus(parityPath);

	// Second, get stripes that contain erased block
	List<Stripe> erasedStripeList = getErasedStripes(srcFs, srcUriPath,
			srcStat, parityFs, parityUriPath, parityStat, codec);

	if (erasedStripeList == null) {
		LOG.error("NTar: Error occurred when getting erased stripes for source file:"
				+ srcPath + " and it's parity file:" + parityPath);
		return 0;
	}

	if (erasedStripeList.size() == 0) {
		LOG.info("NTar: There is no stripe about source file: " + srcPath
				+ " that contain blocks needing to be reconstructed. ignoring ...");
		return 0;
	}
	
	ErasureCode ec = null;
	if (codec.id.equals("crs"))
		ec = new CauchyRSCode();
	else if (codec.id.equals("lrc"))
		ec = new LocallyRepairableCode();
	ec.init(codec);
	int number = 0;
	for (Stripe stripe : erasedStripeList) {
		int[] erasedLocations = stripe.getErasures();
		for(int i = 0; i < 1; i++) {
			RepairTask task = planRepairTask(pathInfo, stripe, erasedLocations[i], ec);
			if(task != null) {
				if(isFirstTime) {
					startTime = System.currentTimeMillis();
					isFirstTime = false;
				}
				number = number + 1;
				pendingTasks.put(task);
			}
		}
	}
	return number;
}
 
Example 8
Source File: BlockReconstructor.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Reads through a source file reconstructing lost blocks on the way.
 * 
 * @param srcPath
 *            Path identifying the lost file.
 * @throws IOException
 * @return true if file was reconstructed, false if no reconstruction was
 *         necessary or possible.
 */
boolean processFile(Path srcPath, ParityFilePair parityPair,
		Decoder decoder, Context context) throws IOException,
		InterruptedException {
	//LOG.info("Processing file " + srcPath);
	Progressable progress = context;
	if (progress == null) {
		progress = RaidUtils.NULL_PROGRESSABLE;
	}

	DistributedFileSystem srcFs = getDFS(srcPath);
	FileStatus srcStat = srcFs.getFileStatus(srcPath);
	long blockSize = srcStat.getBlockSize();
	long srcFileSize = srcStat.getLen();
	String uriPath = srcPath.toUri().getPath();

	List<LocatedBlockWithMetaInfo> lostBlocks = lostBlocksInFile(srcFs,
			uriPath, srcStat);
	if (lostBlocks.size() == 0) {
		LOG.warn("Couldn't find any lost blocks in file " + srcPath
				+ ", ignoring...");
		return false;
	}
	
	for (LocatedBlockWithMetaInfo lb : lostBlocks) {
		Block lostBlock = lb.getBlock();
		long lostBlockOffset = lb.getStartOffset();

		final long blockContentsSize = Math.min(blockSize, srcFileSize
				- lostBlockOffset);
		File localBlockFile = File.createTempFile(lostBlock.getBlockName(),
				".tmp");
		localBlockFile.deleteOnExit();

		try {
			decoder.recoverBlockToFile(srcFs, srcPath,
					parityPair.getFileSystem(), parityPair.getPath(),
					blockSize, lostBlockOffset, localBlockFile,
					blockContentsSize, context);

			// Now that we have recovered the file block locally, send it.
			String datanode = chooseDatanode(lb.getLocations());
			computeMetadataAndSendReconstructedBlock(datanode,
					localBlockFile, lostBlock, blockContentsSize,
					lb.getDataProtocolVersion(), lb.getNamespaceID(),
					progress);

		} finally {
			localBlockFile.delete();
		}
		progress.progress();
	}

	return true;
}
 
Example 9
Source File: BlockReconstructor.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Reads through a parity file, reconstructing lost blocks on the way. This
 * function uses the corresponding source file to regenerate parity file
 * blocks.
 * 
 * @return true if file was reconstructed, false if no reconstruction was
 *         necessary or possible.
 */
boolean processParityFile(Path parityPath, Decoder decoder, Context context)
		throws IOException, InterruptedException {

	Progressable progress = context;
	if (progress == null) {
		progress = RaidUtils.NULL_PROGRESSABLE;
	}

	Path srcPath = sourcePathFromParityPath(parityPath);
	if (srcPath == null) {
		LOG.warn("Could not get regular file corresponding to parity file "
				+ parityPath + ", ignoring...");
		return false;
	}

	DistributedFileSystem parityFs = getDFS(parityPath);
	DistributedFileSystem srcFs = getDFS(srcPath);
	FileStatus parityStat = parityFs.getFileStatus(parityPath);
	long blockSize = parityStat.getBlockSize();
	FileStatus srcStat = srcFs.getFileStatus(srcPath);
	
	// Check timestamp.
	if (srcStat.getModificationTime() != parityStat.getModificationTime()) {
		LOG.warn("Mismatching timestamp for " + srcPath + " and "
				+ parityPath + ", ignoring...");
		return false;
	}

	String uriPath = parityPath.toUri().getPath();
	List<LocatedBlockWithMetaInfo> lostBlocks = lostBlocksInFile(parityFs,
			uriPath, parityStat);
	if (lostBlocks.size() == 0) {
		LOG.warn("Couldn't find any lost blocks in parity file "
				+ parityPath + ", ignoring...");
		return false;
	}
	for (LocatedBlockWithMetaInfo lb : lostBlocks) {
		Block lostBlock = lb.getBlock();
		long lostBlockOffset = lb.getStartOffset();

		File localBlockFile = File.createTempFile(lostBlock.getBlockName(),
				".tmp");
		localBlockFile.deleteOnExit();

		try {
			decoder.recoverParityBlockToFile(srcFs, srcPath, parityFs,
					parityPath, blockSize, lostBlockOffset, localBlockFile,
					context);

			// Now that we have recovered the parity file block locally,
			// send it.
			String datanode = chooseDatanode(lb.getLocations());
			computeMetadataAndSendReconstructedBlock(datanode,
					localBlockFile, lostBlock, blockSize,
					lb.getDataProtocolVersion(), lb.getNamespaceID(),
					progress);
		} finally {
			localBlockFile.delete();
		}
		progress.progress();
	}

	return true;
}
 
Example 10
Source File: BlockReconstructor.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Reads through a parity HAR part file, reconstructing lost blocks on the
 * way. A HAR block can contain many file blocks, as long as the HAR part
 * file block size is a multiple of the file block size.
 * 
 * @return true if file was reconstructed, false if no reconstruction was
 *         necessary or possible.
 */
boolean processParityHarPartFile(Path partFile, Progressable progress)
		throws IOException {
	LOG.info("Processing parity HAR file " + partFile);
	// Get some basic information.
	DistributedFileSystem dfs = getDFS(partFile);
	FileStatus partFileStat = dfs.getFileStatus(partFile);
	long partFileBlockSize = partFileStat.getBlockSize();
	LOG.info(partFile + " has block size " + partFileBlockSize);

	// Find the path to the index file.
	// Parity file HARs are only one level deep, so the index files is at
	// the same level as the part file.
	// Parses through the HAR index file.
	HarIndex harIndex = HarIndex.getHarIndex(dfs, partFile);
	String uriPath = partFile.toUri().getPath();
	int numBlocksReconstructed = 0;
	List<LocatedBlockWithMetaInfo> lostBlocks = lostBlocksInFile(dfs,
			uriPath, partFileStat);
	if (lostBlocks.size() == 0) {
		LOG.warn("Couldn't find any lost blocks in HAR file " + partFile
				+ ", ignoring...");
		return false;
	}
	for (LocatedBlockWithMetaInfo lb : lostBlocks) {
		Block lostBlock = lb.getBlock();
		long lostBlockOffset = lb.getStartOffset();

		File localBlockFile = File.createTempFile(lostBlock.getBlockName(),
				".tmp");
		localBlockFile.deleteOnExit();

		try {
			processParityHarPartBlock(dfs, partFile, lostBlock,
					lostBlockOffset, partFileStat, harIndex,
					localBlockFile, progress);

			// Now that we have recovered the part file block locally, send
			// it.
			String datanode = chooseDatanode(lb.getLocations());
			computeMetadataAndSendReconstructedBlock(datanode,
					localBlockFile, lostBlock, localBlockFile.length(),
					lb.getDataProtocolVersion(), lb.getNamespaceID(),
					progress);

			numBlocksReconstructed++;
		} finally {
			localBlockFile.delete();
		}
		progress.progress();
	}

	return true;
}
 
Example 11
Source File: TestRaidShellFsck.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * removes a parity block in the specified stripe
 */
private void removeParityBlock(Path filePath, int stripe) throws IOException {
  // find parity file
  ParityFilePair ppair =
      ParityFilePair.getParityFile(Codec.getCodec("xor"), filePath, conf);
  String parityPathStr = ppair.getPath().toUri().getPath();
  LOG.info("parity path: " + parityPathStr);
  FileSystem parityFS = ppair.getFileSystem();
  if (!(parityFS instanceof DistributedFileSystem)) {
    throw new IOException("parity file is not on distributed file system");
  }
  DistributedFileSystem parityDFS = (DistributedFileSystem) parityFS;

  
  // now corrupt the block corresponding to the stripe selected
  FileStatus parityFileStatus =
    parityDFS.getFileStatus(new Path(parityPathStr));
  long parityBlockSize = parityFileStatus.getBlockSize();
  long parityFileLength = parityFileStatus.getLen();
  long parityFileLengthInBlocks = (parityFileLength / parityBlockSize) + 
    (((parityFileLength % parityBlockSize) == 0) ? 0L : 1L);
  if (parityFileLengthInBlocks <= stripe) {
    throw new IOException("selected stripe " + stripe + 
                          " but parity file only has " + 
                          parityFileLengthInBlocks + " blocks");
  }
  if (parityBlockSize != BLOCK_SIZE) {
    throw new IOException("file block size is " + BLOCK_SIZE + 
                          " but parity file block size is " + 
                          parityBlockSize);
  }
  LocatedBlocks parityFileBlocks = parityDFS.getClient().namenode.
    getBlockLocations(parityPathStr, 0, parityFileLength);
  if (parityFileBlocks.locatedBlockCount() != parityFileLengthInBlocks) {
    throw new IOException("expected " + parityFileLengthInBlocks + 
                          " parity file blocks but got " + 
                          parityFileBlocks.locatedBlockCount() + 
                          " blocks");
  }
  LocatedBlock parityFileBlock = parityFileBlocks.get(stripe);
  removeAndReportBlock(parityDFS, new Path(parityPathStr), parityFileBlock);
  LOG.info("removed parity file block/stripe " + stripe +
           " for " + filePath.toString());

}
 
Example 12
Source File: TestRaidShellFsck_CorruptCounter.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * removes a parity block in the specified stripe
 */
private void removeParityBlock(Path filePath, int stripe, int blockInStripe) throws IOException {
  // find parity file
  ParityFilePair ppair =
      ParityFilePair.getParityFile(Codec.getCodec(CODE_USED), filePath, conf);
 // System.err.println("Got the parityFilePair");
  String parityPathStr = ppair.getPath().toUri().getPath();
 // System.err.println("Path to parity"+parityPathStr);
  LOG.info("parity path: " + parityPathStr);
  FileSystem parityFS = ppair.getFileSystem();
  if (!(parityFS instanceof DistributedFileSystem)) {
    throw new IOException("parity file is not on distributed file system");
  }
  DistributedFileSystem parityDFS = (DistributedFileSystem) parityFS;

  
  // now corrupt the block corresponding to the stripe selected
  FileStatus parityFileStatus =
    parityDFS.getFileStatus(new Path(parityPathStr));
  long parityBlockSize = parityFileStatus.getBlockSize();
  long parityFileLength = parityFileStatus.getLen();
  long parityFileLengthInBlocks = (parityFileLength / parityBlockSize) + 
    (((parityFileLength % parityBlockSize) == 0) ? 0L : 1L);
  if (parityFileLengthInBlocks <= stripe) {
    throw new IOException("selected stripe " + stripe + 
                          " but parity file only has " + 
                          parityFileLengthInBlocks + " blocks");
  }
  if (parityBlockSize != BLOCK_SIZE) {
    throw new IOException("file block size is " + BLOCK_SIZE + 
                          " but parity file block size is " + 
                          parityBlockSize);
  }
  LocatedBlocks parityFileBlocks = parityDFS.getClient().namenode.
    getBlockLocations(parityPathStr, 0, parityFileLength);
  if (blockInStripe >= PARITY_BLOCKS) {
    throw new IOException("blockInStripe is " + blockInStripe +
                          " but must be smaller than " + PARITY_BLOCKS);
  }
  LocatedBlock parityFileBlock = parityFileBlocks.get(stripe * PARITY_BLOCKS + blockInStripe);
  removeAndReportBlock(parityDFS, new Path(parityPathStr), parityFileBlock);
  LOG.info("removed parity file block/stripe " + stripe + " for " + filePath.toString());

}