org.apache.hadoop.hdfs.protocol.ExtendedBlock Java Examples

The following examples show how to use org.apache.hadoop.hdfs.protocol.ExtendedBlock. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: Sender.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public void readBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final long blockOffset,
    final long length,
    final boolean sendChecksum,
    final CachingStrategy cachingStrategy) throws IOException {

  OpReadBlockProto proto = OpReadBlockProto.newBuilder()
    .setHeader(DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken))
    .setOffset(blockOffset)
    .setLen(length)
    .setSendChecksums(sendChecksum)
    .setCachingStrategy(getCachingStrategy(cachingStrategy))
    .build();

  send(out, Op.READ_BLOCK, proto);
}
 
Example #2
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Remove the temporary block file (if any)
 */
@Override // FsDatasetSpi
public synchronized void unfinalizeBlock(ExtendedBlock b) throws IOException {
  ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
      b.getLocalBlock());
  if (replicaInfo != null && replicaInfo.getState() == ReplicaState.TEMPORARY) {
    // remove from volumeMap
    volumeMap.remove(b.getBlockPoolId(), b.getLocalBlock());
    
    // delete the on-disk temp file
    if (delBlockFromDisk(replicaInfo.getBlockFile(), 
        replicaInfo.getMetaFile(), b.getLocalBlock())) {
      LOG.warn("Block " + b + " unfinalized and removed. " );
    }
    if (replicaInfo.getVolume().isTransientStorage()) {
      ramDiskReplicaTracker.discardReplica(b.getBlockPoolId(), b.getBlockId(), true);
    }
  }
}
 
Example #3
Source File: BlockReaderLocalLegacy.java    From big-c with Apache License 2.0 6 votes vote down vote up
LocalDatanodeInfo() {
  final int cacheSize = 10000;
  final float hashTableLoadFactor = 0.75f;
  int hashTableCapacity = (int) Math.ceil(cacheSize / hashTableLoadFactor) + 1;
  cache = Collections
      .synchronizedMap(new LinkedHashMap<ExtendedBlock, BlockLocalPathInfo>(
          hashTableCapacity, hashTableLoadFactor, true) {
        private static final long serialVersionUID = 1;

        @Override
        protected boolean removeEldestEntry(
            Map.Entry<ExtendedBlock, BlockLocalPathInfo> eldest) {
          return size() > cacheSize;
        }
      });
}
 
Example #4
Source File: VolumeScanner.java    From big-c with Apache License 2.0 6 votes vote down vote up
public synchronized void markSuspectBlock(ExtendedBlock block) {
  if (stopping) {
    LOG.info("{}: Not scheduling suspect block {} for " +
        "rescanning, because this volume scanner is stopping.", this, block);
    return;
  }
  Boolean recent = recentSuspectBlocks.getIfPresent(block);
  if (recent != null) {
    LOG.info("{}: Not scheduling suspect block {} for " +
        "rescanning, because we rescanned it recently.", this, block);
    return;
  }
  if (suspectBlocks.contains(block)) {
    LOG.info("{}: suspect block {} is already queued for " +
        "rescanning.", this, block);
    return;
  }
  suspectBlocks.add(block);
  recentSuspectBlocks.put(block, true);
  LOG.info("{}: Scheduling suspect block {} for rescanning.", this, block);
  notify(); // wake scanner thread.
}
 
Example #5
Source File: TestSimulatedFSDataset.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testInvalidate() throws IOException {
  final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
  int bytesAdded = addSomeBlocks(fsdataset);
  Block[] deleteBlocks = new Block[2];
  deleteBlocks[0] = new Block(1, 0, 0);
  deleteBlocks[1] = new Block(2, 0, 0);
  fsdataset.invalidate(bpid, deleteBlocks);
  checkInvalidBlock(new ExtendedBlock(bpid, deleteBlocks[0]));
  checkInvalidBlock(new ExtendedBlock(bpid, deleteBlocks[1]));
  long sizeDeleted = blockIdToLen(1) + blockIdToLen(2);
  assertEquals(bytesAdded-sizeDeleted, fsdataset.getDfsUsed());
  assertEquals(fsdataset.getCapacity()-bytesAdded+sizeDeleted,  fsdataset.getRemaining());
  
  // Now make sure the rest of the blocks are valid
  for (int i=3; i <= NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0);
    assertTrue(fsdataset.isValidBlock(new ExtendedBlock(bpid, b)));
  }
}
 
Example #6
Source File: TestBalancerWithMultipleNameNodes.java    From big-c with Apache License 2.0 6 votes vote down vote up
private static ExtendedBlock[][] generateBlocks(Suite s, long size
    ) throws IOException, InterruptedException, TimeoutException {
  final ExtendedBlock[][] blocks = new ExtendedBlock[s.clients.length][];
  for(int n = 0; n < s.clients.length; n++) {
    final long fileLen = size/s.replication;
    createFile(s, n, fileLen);

    final List<LocatedBlock> locatedBlocks = s.clients[n].getBlockLocations(
        FILE_NAME, 0, fileLen).getLocatedBlocks();

    final int numOfBlocks = locatedBlocks.size();
    blocks[n] = new ExtendedBlock[numOfBlocks];
    for(int i = 0; i < numOfBlocks; i++) {
      final ExtendedBlock b = locatedBlocks.get(i).getBlock();
      blocks[n][i] = new ExtendedBlock(b.getBlockPoolId(), b.getBlockId(),
          b.getNumBytes(), b.getGenerationStamp());
    }
  }
  return blocks;
}
 
Example #7
Source File: Sender.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public void readBlock(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    final String clientName,
    final long blockOffset,
    final long length,
    final boolean sendChecksum,
    final CachingStrategy cachingStrategy) throws IOException {

  OpReadBlockProto proto = OpReadBlockProto.newBuilder()
    .setHeader(DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken))
    .setOffset(blockOffset)
    .setLen(length)
    .setSendChecksums(sendChecksum)
    .setCachingStrategy(getCachingStrategy(cachingStrategy))
    .build();

  send(out, Op.READ_BLOCK, proto);
}
 
Example #8
Source File: DFSInputStream.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void fetchBlockByteRange(LocatedBlock block, long start, long end,
    byte[] buf, int offset,
    Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap)
    throws IOException {
  block = getBlockAt(block.getStartOffset());
  while (true) {
    DNAddrPair addressPair = chooseDataNode(block, null);
    try {
      actualGetFromOneDataNode(addressPair, block, start, end, buf, offset,
          corruptedBlockMap);
      return;
    } catch (IOException e) {
      // Ignore. Already processed inside the function.
      // Loop through to try the next node.
    }
  }
}
 
Example #9
Source File: Sender.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public void requestShortCircuitFds(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> blockToken,
    SlotId slotId, int maxVersion, boolean supportsReceiptVerification)
      throws IOException {
  OpRequestShortCircuitAccessProto.Builder builder =
      OpRequestShortCircuitAccessProto.newBuilder()
        .setHeader(DataTransferProtoUtil.buildBaseHeader(
          blk, blockToken)).setMaxVersion(maxVersion);
  if (slotId != null) {
    builder.setSlotId(PBHelper.convert(slotId));
  }
  builder.setSupportsReceiptVerification(supportsReceiptVerification);
  OpRequestShortCircuitAccessProto proto = builder.build();
  send(out, Op.REQUEST_SHORT_CIRCUIT_FDS, proto);
}
 
Example #10
Source File: TestBalancerWithMultipleNameNodes.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private static ExtendedBlock[][] generateBlocks(Suite s, long size
    ) throws IOException, InterruptedException, TimeoutException {
  final ExtendedBlock[][] blocks = new ExtendedBlock[s.clients.length][];
  for(int n = 0; n < s.clients.length; n++) {
    final long fileLen = size/s.replication;
    createFile(s, n, fileLen);

    final List<LocatedBlock> locatedBlocks = s.clients[n].getBlockLocations(
        FILE_NAME, 0, fileLen).getLocatedBlocks();

    final int numOfBlocks = locatedBlocks.size();
    blocks[n] = new ExtendedBlock[numOfBlocks];
    for(int i = 0; i < numOfBlocks; i++) {
      final ExtendedBlock b = locatedBlocks.get(i).getBlock();
      blocks[n][i] = new ExtendedBlock(b.getBlockPoolId(), b.getBlockId(),
          b.getNumBytes(), b.getGenerationStamp());
    }
  }
  return blocks;
}
 
Example #11
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override  // FsDatasetSpi
public synchronized ReplicaHandler recoverAppend(
    ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
  LOG.info("Recover failed append to " + b);

  ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);

  FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
  ReplicaBeingWritten replica;
  try {
    // change the replica's state/gs etc.
    if (replicaInfo.getState() == ReplicaState.FINALIZED) {
      replica = append(b.getBlockPoolId(), (FinalizedReplica) replicaInfo,
                       newGS, b.getNumBytes());
    } else { //RBW
      bumpReplicaGS(replicaInfo, newGS);
      replica = (ReplicaBeingWritten) replicaInfo;
    }
  } catch (IOException e) {
    IOUtils.cleanup(null, ref);
    throw e;
  }
  return new ReplicaHandler(replica, ref);
}
 
Example #12
Source File: TestWriteBlockGetsBlockLengthHint.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Override createRbw to verify that the block length that is passed
 * is correct. This requires both DFSOutputStream and BlockReceiver to
 * correctly propagate the hint to FsDatasetSpi.
 */
@Override
public synchronized ReplicaHandler createRbw(
    StorageType storageType, ExtendedBlock b, boolean allowLazyPersist)
    throws IOException {
  assertThat(b.getLocalBlock().getNumBytes(), is(EXPECTED_BLOCK_LENGTH));
  return super.createRbw(storageType, b, allowLazyPersist);
}
 
Example #13
Source File: DataNode.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void checkBlockToken(ExtendedBlock block, Token<BlockTokenIdentifier> token,
    AccessMode accessMode) throws IOException {
  if (isBlockTokenEnabled) {
    BlockTokenIdentifier id = new BlockTokenIdentifier();
    ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
    DataInputStream in = new DataInputStream(buf);
    id.readFields(in);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Got: " + id.toString());
    }
    blockPoolTokenSecretManager.checkAccess(id, null, block, accessMode);
  }
}
 
Example #14
Source File: MergeSortRowIdMatcher.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
private static String getFirstBlockId(FileSystem fileSystem, Path realFile) throws IOException {
  FileStatus fileStatus = fileSystem.getFileStatus(realFile);
  BlockLocation[] locations = fileSystem.getFileBlockLocations(fileStatus, 0, 1);
  HdfsBlockLocation location = (HdfsBlockLocation) locations[0];
  LocatedBlock locatedBlock = location.getLocatedBlock();
  ExtendedBlock block = locatedBlock.getBlock();
  return toNiceString(block.getBlockId());
}
 
Example #15
Source File: DataNode.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Notify the corresponding namenode to delete the block. */
public void notifyNamenodeDeletedBlock(ExtendedBlock block, String storageUuid) {
  BPOfferService bpos = blockPoolManager.get(block.getBlockPoolId());
  if (bpos != null) {
    bpos.notifyNamenodeDeletedBlock(block, storageUuid);
  } else {
    LOG.error("Cannot find BPOfferService for reporting block deleted for bpid="
        + block.getBlockPoolId());
  }
}
 
Example #16
Source File: SimulatedFSDataset.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override // FsDatasetSpi
public synchronized long getLength(ExtendedBlock b) throws IOException {
  final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
  BInfo binfo = map.get(b.getLocalBlock());
  if (binfo == null) {
    throw new IOException("Finalizing a non existing block " + b);
  }
  return binfo.getNumBytes();
}
 
Example #17
Source File: SimulatedFSDataset.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Check if a block is valid.
 *
 * @param b           The block to check.
 * @param minLength   The minimum length that the block must have.  May be 0.
 * @param state       If this is null, it is ignored.  If it is non-null, we
 *                        will check that the replica has this state.
 *
 * @throws ReplicaNotFoundException          If the replica is not found
 *
 * @throws UnexpectedReplicaStateException   If the replica is not in the 
 *                                             expected state.
 */
@Override // {@link FsDatasetSpi}
public void checkBlock(ExtendedBlock b, long minLength, ReplicaState state)
    throws ReplicaNotFoundException, UnexpectedReplicaStateException {
  final BInfo binfo = getBInfo(b);
  
  if (binfo == null) {
    throw new ReplicaNotFoundException(b);
  }
  if ((state == ReplicaState.FINALIZED && !binfo.isFinalized()) ||
      (state != ReplicaState.FINALIZED && binfo.isFinalized())) {
    throw new UnexpectedReplicaStateException(b,state);
  }
}
 
Example #18
Source File: TestClientReportBadBlock.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Corrupt a block on a data node. Replace the block file content with content
 * of 1, 2, ...BLOCK_SIZE.
 * 
 * @param block
 *          the ExtendedBlock to be corrupted
 * @param dn
 *          the data node where the block needs to be corrupted
 * @throws FileNotFoundException
 * @throws IOException
 */
private static void corruptBlock(final ExtendedBlock block, final DataNode dn)
    throws FileNotFoundException, IOException {
  final File f = DataNodeTestUtils.getBlockFile(
      dn, block.getBlockPoolId(), block.getLocalBlock());
  final RandomAccessFile raFile = new RandomAccessFile(f, "rw");
  final byte[] bytes = new byte[(int) BLOCK_SIZE];
  for (int i = 0; i < BLOCK_SIZE; i++) {
    bytes[i] = (byte) (i);
  }
  raFile.write(bytes);
  raFile.close();
}
 
Example #19
Source File: DataNode.java    From hadoop with Apache License 2.0 5 votes vote down vote up
protected void notifyNamenodeReceivingBlock(
    ExtendedBlock block, String storageUuid) {
  BPOfferService bpos = blockPoolManager.get(block.getBlockPoolId());
  if(bpos != null) {
    bpos.notifyNamenodeReceivingBlock(block, storageUuid);
  } else {
    LOG.error("Cannot find BPOfferService for reporting block receiving for bpid="
        + block.getBlockPoolId());
  }
}
 
Example #20
Source File: DataNode.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Transfer a replica to the datanode targets.
 * @param b the block to transfer.
 *          The corresponding replica must be an RBW or a Finalized.
 *          Its GS and numBytes will be set to
 *          the stored GS and the visible length. 
 * @param targets targets to transfer the block to
 * @param client client name
 */
void transferReplicaForPipelineRecovery(final ExtendedBlock b,
    final DatanodeInfo[] targets, final StorageType[] targetStorageTypes,
    final String client) throws IOException {
  final long storedGS;
  final long visible;
  final BlockConstructionStage stage;

  //get replica information
  synchronized(data) {
    Block storedBlock = data.getStoredBlock(b.getBlockPoolId(),
        b.getBlockId());
    if (null == storedBlock) {
      throw new IOException(b + " not found in datanode.");
    }
    storedGS = storedBlock.getGenerationStamp();
    if (storedGS < b.getGenerationStamp()) {
      throw new IOException(storedGS
          + " = storedGS < b.getGenerationStamp(), b=" + b);
    }
    // Update the genstamp with storedGS
    b.setGenerationStamp(storedGS);
    if (data.isValidRbw(b)) {
      stage = BlockConstructionStage.TRANSFER_RBW;
    } else if (data.isValidBlock(b)) {
      stage = BlockConstructionStage.TRANSFER_FINALIZED;
    } else {
      final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId());
      throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r);
    }
    visible = data.getReplicaVisibleLength(b);
  }
  //set visible length
  b.setNumBytes(visible);

  if (targets.length > 0) {
    new DataTransfer(targets, targetStorageTypes, b, stage, client).run();
  }
}
 
Example #21
Source File: SimulatedFSDataset.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override // FsDatasetSpi
public synchronized boolean isValidBlock(ExtendedBlock b) {
  try {
    checkBlock(b, 0, ReplicaState.FINALIZED);
  } catch (IOException e) {
    return false;
  }
  return true;
}
 
Example #22
Source File: TestDFSUtil.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test conversion of LocatedBlock to BlockLocation
 */
@Test
public void testLocatedBlocks2Locations() {
  DatanodeInfo d = DFSTestUtil.getLocalDatanodeInfo();
  DatanodeInfo[] ds = new DatanodeInfo[1];
  ds[0] = d;

  // ok
  ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
  LocatedBlock l1 = new LocatedBlock(b1, ds, 0, false);

  // corrupt
  ExtendedBlock b2 = new ExtendedBlock("bpid", 2, 1, 1);
  LocatedBlock l2 = new LocatedBlock(b2, ds, 0, true);

  List<LocatedBlock> ls = Arrays.asList(l1, l2);
  LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null);

  BlockLocation[] bs = DFSUtil.locatedBlocks2Locations(lbs);

  assertTrue("expected 2 blocks but got " + bs.length,
             bs.length == 2);

  int corruptCount = 0;
  for (BlockLocation b: bs) {
    if (b.isCorrupt()) {
      corruptCount++;
    }
  }

  assertTrue("expected 1 corrupt files but got " + corruptCount,
      corruptCount == 1);

  // test an empty location
  bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
  assertEquals(0, bs.length);
}
 
Example #23
Source File: BlockSender.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static Replica getReplica(ExtendedBlock block, DataNode datanode)
    throws ReplicaNotFoundException {
  Replica replica = datanode.data.getReplica(block.getBlockPoolId(),
      block.getBlockId());
  if (replica == null) {
    throw new ReplicaNotFoundException(block);
  }
  return replica;
}
 
Example #24
Source File: TestCachingStrategy.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout=120000)
public void testFadviseAfterWriteThenRead() throws Exception {
  // start a cluster
  LOG.info("testFadviseAfterWriteThenRead");
  tracker.clear();
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  String TEST_PATH = "/test";
  int TEST_PATH_LEN = MAX_TEST_FILE_LEN;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
        .build();
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();

    // create new file
    createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, true);
    // verify that we dropped everything from the cache during file creation.
    ExtendedBlock block = cluster.getNameNode().getRpcServer().getBlockLocations(
        TEST_PATH, 0, Long.MAX_VALUE).get(0).getBlock();
    String fadvisedFileName = cluster.getBlockFile(0, block).getName();
    Stats stats = tracker.getStats(fadvisedFileName);
    stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE);
    stats.clear();
    
    // read file
    readHdfsFile(fs, new Path(TEST_PATH), Long.MAX_VALUE, true);
    // verify that we dropped everything from the cache.
    Assert.assertNotNull(stats);
    stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example #25
Source File: TestSnapshotBlocksMap.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Make sure we delete 0-sized block when deleting an under-construction file
 */
@Test
public void testDeletionWithZeroSizeBlock2() throws Exception {
  final Path foo = new Path("/foo");
  final Path subDir = new Path(foo, "sub");
  final Path bar = new Path(subDir, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

  hdfs.append(bar);

  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
  cluster.getNameNodeRpc()
      .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
          null, barNode.getId(), null);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

  barNode = fsdir.getINode4Write(bar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(2, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  assertEquals(0, blks[1].getNumBytes());

  hdfs.delete(subDir, true);
  final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
  barNode = fsdir.getINode(sbar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
 
Example #26
Source File: VolumeScanner.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public void handle(ExtendedBlock block, IOException e) {
  FsVolumeSpi volume = scanner.volume;
  if (e == null) {
    LOG.trace("Successfully scanned {} on {}", block, volume.getBasePath());
    return;
  }
  // If the block does not exist anymore, then it's not an error.
  if (!volume.getDataset().contains(block)) {
    LOG.debug("Volume {}: block {} is no longer in the dataset.",
        volume.getBasePath(), block);
    return;
  }
  // If the block exists, the exception may due to a race with write:
  // The BlockSender got an old block path in rbw. BlockReceiver removed
  // the rbw block from rbw to finalized but BlockSender tried to open the
  // file before BlockReceiver updated the VolumeMap. The state of the
  // block can be changed again now, so ignore this error here. If there
  // is a block really deleted by mistake, DirectoryScan should catch it.
  if (e instanceof FileNotFoundException ) {
    LOG.info("Volume {}: verification failed for {} because of " +
            "FileNotFoundException.  This may be due to a race with write.",
        volume.getBasePath(), block);
    return;
  }
  LOG.warn("Reporting bad {} on {}", block, volume.getBasePath());
  try {
    scanner.datanode.reportBadBlocks(block);
  } catch (IOException ie) {
    // This is bad, but not bad enough to shut down the scanner.
    LOG.warn("Cannot report bad " + block.getBlockId(), e);
  }
}
 
Example #27
Source File: MiniDFSCluster.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Get all files related to a block from all the datanodes
 * @param block block for which corresponding files are needed
 */
public File[] getAllBlockFiles(ExtendedBlock block) {
  if (dataNodes.size() == 0) return new File[0];
  ArrayList<File> list = new ArrayList<File>();
  for (int i=0; i < dataNodes.size(); i++) {
    File blockFile = getBlockFile(i, block);
    if (blockFile != null) {
      list.add(blockFile);
    }
  }
  return list.toArray(new File[list.size()]);
}
 
Example #28
Source File: TestProcessCorruptBlocks.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void corruptBlock(MiniDFSCluster cluster, FileSystem fs, final Path fileName,
    int dnIndex, ExtendedBlock block) throws IOException {
  // corrupt the block on datanode dnIndex
  // the indexes change once the nodes are restarted.
  // But the datadirectory will not change
  assertTrue(cluster.corruptReplica(dnIndex, block));

  DataNodeProperties dnProps = cluster.stopDataNode(0);

  // Each datanode has multiple data dirs, check each
  for (int dirIndex = 0; dirIndex < 2; dirIndex++) {
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    File storageDir = cluster.getStorageDir(dnIndex, dirIndex);
    File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
    File scanLogFile = new File(dataDir, "dncp_block_verification.log.curr");
    if (scanLogFile.exists()) {
      // wait for one minute for deletion to succeed;
      for (int i = 0; !scanLogFile.delete(); i++) {
        assertTrue("Could not delete log file in one minute", i < 60);
        try {
          Thread.sleep(1000);
        } catch (InterruptedException ignored) {
        }
      }
    }
  }

  // restart the detained so the corrupt replica will be detected
  cluster.restartDataNode(dnProps);
}
 
Example #29
Source File: FsVolumeImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Get the next block.<p/>
 *
 * Each volume has a hierarchical structure.<p/>
 *
 * <code>
 * BPID B0
 *   finalized/
 *     subdir0
 *       subdir0
 *         blk_000
 *         blk_001
 *       ...
 *     subdir1
 *       subdir0
 *         ...
 *   rbw/
 * </code>
 *
 * When we run out of entries at one level of the structure, we search
 * progressively higher levels.  For example, when we run out of blk_
 * entries in a subdirectory, we search for the next subdirectory.
 * And so on.
 */
@Override
public ExtendedBlock nextBlock() throws IOException {
  if (state.atEnd) {
    return null;
  }
  try {
    while (true) {
      List<String> entries = getSubdirEntries();
      if (entries != null) {
        state.curEntry = nextSorted(entries, state.curEntry);
        if (state.curEntry == null) {
          LOG.trace("nextBlock({}, {}): advancing from {} to next " +
              "subdirectory.", storageID, bpid, state.curFinalizedSubDir);
        } else {
          ExtendedBlock block =
              new ExtendedBlock(bpid, Block.filename2id(state.curEntry));
          LOG.trace("nextBlock({}, {}): advancing to {}",
              storageID, bpid, block);
          return block;
        }
      }
      state.curFinalizedSubDir = getNextFinalizedSubDir();
      if (state.curFinalizedSubDir == null) {
        state.curFinalizedDir = getNextFinalizedDir();
        if (state.curFinalizedDir == null) {
          state.atEnd = true;
          return null;
        }
      }
    }
  } catch (IOException e) {
    state.atEnd = true;
    LOG.error("nextBlock({}, {}): I/O error", storageID, bpid, e);
    throw e;
  }
}
 
Example #30
Source File: FsDatasetAsyncDiskService.java    From big-c with Apache License 2.0 5 votes vote down vote up
ReplicaFileDeleteTask(FsVolumeReference volumeRef, File blockFile,
    File metaFile, ExtendedBlock block, String trashDirectory) {
  this.volumeRef = volumeRef;
  this.volume = (FsVolumeImpl) volumeRef.getVolume();
  this.blockFile = blockFile;
  this.metaFile = metaFile;
  this.block = block;
  this.trashDirectory = trashDirectory;
}