org.apache.hadoop.hdfs.protocol.LocatedBlock Java Examples

The following examples show how to use org.apache.hadoop.hdfs.protocol.LocatedBlock. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DFSInputStream.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void fetchBlockByteRange(LocatedBlock block, long start, long end,
    byte[] buf, int offset,
    Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap)
    throws IOException {
  block = getBlockAt(block.getStartOffset());
  while (true) {
    DNAddrPair addressPair = chooseDataNode(block, null);
    try {
      actualGetFromOneDataNode(addressPair, block, start, end, buf, offset,
          corruptedBlockMap);
      return;
    } catch (IOException e) {
      // Ignore. Already processed inside the function.
      // Loop through to try the next node.
    }
  }
}
 
Example #2
Source File: TestDFSClientRetries.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) {
  LocatedBlock goodLocatedBlock = goodBlockList.get(0);
  LocatedBlock badLocatedBlock = new LocatedBlock(
    goodLocatedBlock.getBlock(),
    new DatanodeInfo[] {
      DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234)
    },
    goodLocatedBlock.getStartOffset(),
    false);


  List<LocatedBlock> badBlocks = new ArrayList<LocatedBlock>();
  badBlocks.add(badLocatedBlock);
  return new LocatedBlocks(goodBlockList.getFileLength(), false,
                           badBlocks, null, true,
                           null);
}
 
Example #3
Source File: DFSOutputStream.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Construct a new output stream for append. */
private DFSOutputStream(DFSClient dfsClient, String src,
    EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock,
    HdfsFileStatus stat, DataChecksum checksum) throws IOException {
  this(dfsClient, src, progress, stat, checksum);
  initialFileSize = stat.getLen(); // length of file when opened
  this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK);

  boolean toNewBlock = flags.contains(CreateFlag.NEW_BLOCK);

  // The last partial block of the file has to be filled.
  if (!toNewBlock && lastBlock != null) {
    // indicate that we are appending to an existing block
    bytesCurBlock = lastBlock.getBlockSize();
    streamer = new DataStreamer(lastBlock, stat, bytesPerChecksum);
  } else {
    computePacketChunkSize(dfsClient.getConf().writePacketSize,
        bytesPerChecksum);
    streamer = new DataStreamer(stat,
        lastBlock != null ? lastBlock.getBlock() : null);
  }
  this.fileEncryptionInfo = stat.getFileEncryptionInfo();
}
 
Example #4
Source File: NameNodeRpcServer.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public LocatedBlock addBlock(String src, String clientName,
    ExtendedBlock previous, DatanodeInfo[] excludedNodes, long fileId,
    String[] favoredNodes)
    throws IOException {
  checkNNStartup();
  if (stateChangeLog.isDebugEnabled()) {
    stateChangeLog.debug("*BLOCK* NameNode.addBlock: file " + src
        + " fileId=" + fileId + " for " + clientName);
  }
  Set<Node> excludedNodesSet = null;
  if (excludedNodes != null) {
    excludedNodesSet = new HashSet<Node>(excludedNodes.length);
    for (Node node : excludedNodes) {
      excludedNodesSet.add(node);
    }
  }
  List<String> favoredNodesList = (favoredNodes == null) ? null
      : Arrays.asList(favoredNodes);
  LocatedBlock locatedBlock = namesystem.getAdditionalBlock(src, fileId,
      clientName, previous, excludedNodesSet, favoredNodesList);
  if (locatedBlock != null)
    metrics.incrAddBlockOps();
  return locatedBlock;
}
 
Example #5
Source File: TestBalancerWithMultipleNameNodes.java    From big-c with Apache License 2.0 6 votes vote down vote up
private static ExtendedBlock[][] generateBlocks(Suite s, long size
    ) throws IOException, InterruptedException, TimeoutException {
  final ExtendedBlock[][] blocks = new ExtendedBlock[s.clients.length][];
  for(int n = 0; n < s.clients.length; n++) {
    final long fileLen = size/s.replication;
    createFile(s, n, fileLen);

    final List<LocatedBlock> locatedBlocks = s.clients[n].getBlockLocations(
        FILE_NAME, 0, fileLen).getLocatedBlocks();

    final int numOfBlocks = locatedBlocks.size();
    blocks[n] = new ExtendedBlock[numOfBlocks];
    for(int i = 0; i < numOfBlocks; i++) {
      final ExtendedBlock b = locatedBlocks.get(i).getBlock();
      blocks[n][i] = new ExtendedBlock(b.getBlockPoolId(), b.getBlockId(),
          b.getNumBytes(), b.getGenerationStamp());
    }
  }
  return blocks;
}
 
Example #6
Source File: TestDFSLocatedBlocks.java    From RDFS with Apache License 2.0 6 votes vote down vote up
@Override
public Boolean call() throws Exception {
  for (int i = 0; i < NUM_INSERTS; ++i) {
    List<LocatedBlock> newBlocks = randomBlockSubrange(rand, allBlocks);
    locatedBlocks.insertRange(newBlocks);
    for (LocatedBlock blk : newBlocks) {
      LocatedBlock blockFromArr = locatedBlocks.getBlockContainingOffset(
          blk.getStartOffset());
      assertEquals(blockFromArr.getBlockSize(), blk.getBlockSize());
    }

    List<LocatedBlock> locBlocksCopy =
        locatedBlocks.getLocatedBlocksCopy();
    for (int j = 1; j < locBlocksCopy.size(); ++j) {
      assertTrue(locBlocksCopy.get(j - 1).getStartOffset() <
          locBlocksCopy.get(j).getStartOffset());
    }
  }
  return true;
}
 
Example #7
Source File: ClientNamenodeProtocolTranslatorPB.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public LastBlockWithStatus append(String src, String clientName,
    EnumSetWritable<CreateFlag> flag) throws AccessControlException,
    DSQuotaExceededException, FileNotFoundException, SafeModeException,
    UnresolvedLinkException, IOException {
  AppendRequestProto req = AppendRequestProto.newBuilder().setSrc(src)
      .setClientName(clientName).setFlag(PBHelper.convertCreateFlag(flag))
      .build();
  try {
    AppendResponseProto res = rpcProxy.append(null, req);
    LocatedBlock lastBlock = res.hasBlock() ? PBHelper
        .convert(res.getBlock()) : null;
    HdfsFileStatus stat = (res.hasStat()) ? PBHelper.convert(res.getStat())
        : null;
    return new LastBlockWithStatus(lastBlock, stat);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #8
Source File: DFSInputStream.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private Callable<ByteBuffer> getFromOneDataNode(final DNAddrPair datanode,
    final LocatedBlock block, final long start, final long end,
    final ByteBuffer bb,
    final Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap,
    final int hedgedReadId) {
  final Span parentSpan = Trace.currentSpan();
  return new Callable<ByteBuffer>() {
    @Override
    public ByteBuffer call() throws Exception {
      byte[] buf = bb.array();
      int offset = bb.position();
      TraceScope scope =
          Trace.startSpan("hedgedRead" + hedgedReadId, parentSpan);
      try {
        actualGetFromOneDataNode(datanode, block, start, end, buf, offset,
            corruptedBlockMap);
        return bb;
      } finally {
        scope.close();
      }
    }
  };
}
 
Example #9
Source File: DFSClient.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Connect to the given datanode's datantrasfer port, and return
 * the resulting IOStreamPair. This includes encryption wrapping, etc.
 */
private IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
    LocatedBlock lb) throws IOException {
  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    String dnAddr = dn.getXferAddr(getConf().connectToDnViaHostname);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Connecting to datanode " + dnAddr);
    }
    NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
    sock.setSoTimeout(timeout);

    OutputStream unbufOut = NetUtils.getOutputStream(sock);
    InputStream unbufIn = NetUtils.getInputStream(sock);
    IOStreamPair ret = saslClient.newSocketSend(sock, unbufOut, unbufIn, this,
      lb.getBlockToken(), dn);
    success = true;
    return ret;
  } finally {
    if (!success) {
      IOUtils.closeSocket(sock);
    }
  }
}
 
Example #10
Source File: TestRaidShellFsck.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * removes a file block in the specified stripe
 */
private void removeFileBlock(Path filePath, int stripe, int blockInStripe)
  throws IOException {
  LocatedBlocks fileBlocks = dfs.getClient().namenode.
    getBlockLocations(filePath.toString(), 0, FILE_BLOCKS * BLOCK_SIZE);
  if (fileBlocks.locatedBlockCount() != FILE_BLOCKS) {
    throw new IOException("expected " + FILE_BLOCKS + 
                          " file blocks but found " + 
                          fileBlocks.locatedBlockCount());
  }
  if (blockInStripe >= STRIPE_BLOCKS) {
    throw new IOException("blockInStripe is " + blockInStripe +
                          " but must be smaller than " + STRIPE_BLOCKS);
  }
  LocatedBlock block = fileBlocks.get(stripe * STRIPE_BLOCKS + blockInStripe);
  removeAndReportBlock(dfs, filePath, block);
  LOG.info("removed file " + filePath.toString() + " block " +
           stripe * STRIPE_BLOCKS + " in stripe " + stripe);
}
 
Example #11
Source File: DistributedAvatarFileSystem.java    From RDFS with Apache License 2.0 6 votes vote down vote up
@Override
public LocatedBlock addBlock(final String src, final String clientName,
    final DatanodeInfo[] excludedNodes) throws IOException {
  return (new MutableFSCaller<LocatedBlock>() {
    @Override
    LocatedBlock call(int retries) throws IOException {
      if (retries > 0) {
        FileStatus info = namenode.getFileInfo(src);
        if (info != null) {
          LocatedBlocks blocks = namenode.getBlockLocations(src, 0, info
              .getLen());
          // If atleast one block exists.
          if (blocks.locatedBlockCount() > 0) {
            LocatedBlock last = blocks.get(blocks.locatedBlockCount() - 1);
            if (last.getBlockSize() == 0) {
              // This one has not been written to
              namenode.abandonBlock(last.getBlock(), src, clientName);
            }
          }
        }
      }
      return namenode.addBlock(src, clientName, excludedNodes);
    }

  }).callFS();
}
 
Example #12
Source File: TestStorageMover.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void verifyFile(final Path parent, final HdfsFileStatus status,
    final Byte expectedPolicyId) throws Exception {
  HdfsLocatedFileStatus fileStatus = (HdfsLocatedFileStatus) status;
  byte policyId = fileStatus.getStoragePolicy();
  BlockStoragePolicy policy = policies.getPolicy(policyId);
  if (expectedPolicyId != null) {
    Assert.assertEquals((byte)expectedPolicyId, policy.getId());
  }
  final List<StorageType> types = policy.chooseStorageTypes(
      status.getReplication());
  for(LocatedBlock lb : fileStatus.getBlockLocations().getLocatedBlocks()) {
    final Mover.StorageTypeDiff diff = new Mover.StorageTypeDiff(types,
        lb.getStorageTypes());
    Assert.assertTrue(fileStatus.getFullName(parent.toString())
        + " with policy " + policy + " has non-empty overlap: " + diff
        + ", the corresponding block is " + lb.getBlock().getLocalBlock(),
        diff.removeOverlap(true));
  }
}
 
Example #13
Source File: LazyPersistTestCase.java    From hadoop with Apache License 2.0 6 votes vote down vote up
protected final boolean verifyBlockDeletedFromDir(File dir,
    LocatedBlocks locatedBlocks) {

  for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
    File targetDir =
      DatanodeUtil.idToBlockDir(dir, lb.getBlock().getBlockId());

    File blockFile = new File(targetDir, lb.getBlock().getBlockName());
    if (blockFile.exists()) {
      LOG.warn("blockFile: " + blockFile.getAbsolutePath() +
        " exists after deletion.");
      return false;
    }
    File metaFile = new File(targetDir,
      DatanodeUtil.getMetaName(lb.getBlock().getBlockName(),
        lb.getBlock().getGenerationStamp()));
    if (metaFile.exists()) {
      LOG.warn("metaFile: " + metaFile.getAbsolutePath() +
        " exists after deletion.");
      return false;
    }
  }
  return true;
}
 
Example #14
Source File: ClientDatanodeProtocolTranslatorPB.java    From hadoop with Apache License 2.0 6 votes vote down vote up
static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy(
    DatanodeID datanodeid, Configuration conf, int socketTimeout,
    boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException {
  final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
  InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
  }
  
  // Since we're creating a new UserGroupInformation here, we know that no
  // future RPC proxies will be able to re-use the same connection. And
  // usages of this proxy tend to be one-off calls.
  //
  // This is a temporary fix: callers should really achieve this by using
  // RPC.stopProxy() on the resulting object, but this is currently not
  // working in trunk. See the discussion on HDFS-1965.
  Configuration confWithNoIpcIdle = new Configuration(conf);
  confWithNoIpcIdle.setInt(CommonConfigurationKeysPublic
      .IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);

  UserGroupInformation ticket = UserGroupInformation
      .createRemoteUser(locatedBlock.getBlock().getLocalBlock().toString());
  ticket.addToken(locatedBlock.getBlockToken());
  return createClientDatanodeProtocolProxy(addr, ticket, confWithNoIpcIdle,
      NetUtils.getDefaultSocketFactory(conf), socketTimeout);
}
 
Example #15
Source File: BlockStorageLocationUtil.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Helper method to combine a list of {@link LocatedBlock} with associated
 * {@link VolumeId} information to form a list of {@link BlockStorageLocation}
 * .
 */
static BlockStorageLocation[] convertToVolumeBlockLocations(
    List<LocatedBlock> blocks, 
    Map<LocatedBlock, List<VolumeId>> blockVolumeIds) throws IOException {
  // Construct the final return value of VolumeBlockLocation[]
  BlockLocation[] locations = DFSUtil.locatedBlocks2Locations(blocks);
  List<BlockStorageLocation> volumeBlockLocs = 
      new ArrayList<BlockStorageLocation>(locations.length);
  for (int i = 0; i < locations.length; i++) {
    LocatedBlock locBlock = blocks.get(i);
    List<VolumeId> volumeIds = blockVolumeIds.get(locBlock);
    BlockStorageLocation bsLoc = new BlockStorageLocation(locations[i], 
        volumeIds.toArray(new VolumeId[0]));
    volumeBlockLocs.add(bsLoc);
  }
  return volumeBlockLocs.toArray(new BlockStorageLocation[] {});
}
 
Example #16
Source File: TestBalancer.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
private Block[] generateBlocks(long size, short numNodes) throws IOException {
  cluster = new MiniDFSCluster( CONF, numNodes, true, null);
  try {
    cluster.waitActive();
    client = DFSClient.createNamenode(CONF);

    short replicationFactor = (short)(numNodes-1);
    long fileLen = size/replicationFactor;
    createFile(fileLen, replicationFactor);

    List<LocatedBlock> locatedBlocks = client.
    getBlockLocations(fileName, 0, fileLen).getLocatedBlocks();

    int numOfBlocks = locatedBlocks.size();
    Block[] blocks = new Block[numOfBlocks];
    for(int i=0; i<numOfBlocks; i++) {
      Block b = locatedBlocks.get(i).getBlock();
      blocks[i] = new Block(b.getBlockId(), b.getNumBytes(), b.getGenerationStamp());
    }

    return blocks;
  } finally {
    cluster.shutdown();
  }
}
 
Example #17
Source File: ClientNamenodeProtocolTranslatorPB.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public LastBlockWithStatus append(String src, String clientName,
    EnumSetWritable<CreateFlag> flag) throws AccessControlException,
    DSQuotaExceededException, FileNotFoundException, SafeModeException,
    UnresolvedLinkException, IOException {
  AppendRequestProto req = AppendRequestProto.newBuilder().setSrc(src)
      .setClientName(clientName).setFlag(PBHelper.convertCreateFlag(flag))
      .build();
  try {
    AppendResponseProto res = rpcProxy.append(null, req);
    LocatedBlock lastBlock = res.hasBlock() ? PBHelper
        .convert(res.getBlock()) : null;
    HdfsFileStatus stat = (res.hasStat()) ? PBHelper.convert(res.getStat())
        : null;
    return new LastBlockWithStatus(lastBlock, stat);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #18
Source File: TestClientReportBadBlock.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Create a file with one block and corrupt some/all of the block replicas.
 */
private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl,
    int corruptBlockCount) throws IOException, AccessControlException,
    FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException {
  DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
  DFSTestUtil.waitReplication(dfs, filePath, repl);
  // Locate the file blocks by asking name node
  final LocatedBlocks locatedblocks = dfs.dfs.getNamenode()
      .getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE);
  Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length);
  // The file only has one block
  LocatedBlock lblock = locatedblocks.get(0);
  DatanodeInfo[] datanodeinfos = lblock.getLocations();
  ExtendedBlock block = lblock.getBlock();
  // corrupt some /all of the block replicas
  for (int i = 0; i < corruptBlockCount; i++) {
    DatanodeInfo dninfo = datanodeinfos[i];
    final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
    corruptBlock(block, dn);
    LOG.debug("Corrupted block " + block.getBlockName() + " on data node "
        + dninfo);

  }
}
 
Example #19
Source File: BlockPlacementPolicyRaid.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Count how many companion blocks are on each datanode or the each rack
 * @param companionBlocks a collection of all the companion blocks
 * @param doRackCount count the companion blocks on the racks of datanodes
 * @param result the map from node name to the number of companion blocks
 */
static Map<String, Integer> countCompanionBlocks(
    Collection<LocatedBlock> companionBlocks, boolean doRackCount) {
  Map<String, Integer> result = new HashMap<String, Integer>();
  for (LocatedBlock block : companionBlocks) {
    for (DatanodeInfo d : block.getLocations()) {
      String name = doRackCount ? d.getParent().getName() : d.getName();
      if (result.containsKey(name)) {
        int count = result.get(name) + 1;
        result.put(name, count);
      } else {
        result.put(name, 1);
      }
    }
  }
  return result;
}
 
Example #20
Source File: TestAvatarDataNodeRBW.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private void verifyResults(int blocksBefore, String fileName)
  throws IOException {
  // Verify we have RBWs after restart.
  AvatarNode avatarAfter = cluster.getPrimaryAvatar(0).avatar;
  LocatedBlocks lbks = avatarAfter.namesystem
      .getBlockLocations(fileName, 0,
      Long.MAX_VALUE);
  long blocksAfter = lbks.locatedBlockCount();

  System.out.println("blocksBefore : " + blocksBefore + " blocksAfter : "
      + blocksAfter);

  assertEquals(blocksBefore, blocksAfter);
  for (LocatedBlock lbk : lbks.getLocatedBlocks()) {
    DatanodeInfo[] locs = lbk.getLocations();
    assertNotNull(locs);
    assertTrue(locs.length != 0);
  }
}
 
Example #21
Source File: TestBlockCopier.java    From RDFS with Apache License 2.0 6 votes vote down vote up
@Override
public void configureJob(Job job, 
   Class<? extends BlockReconstructor> rClass) {
  
  super.configureJob(job, rClass);
  
  LocatedBlock[] lb = TestBlockCopier.decommissioningBlocks;
  
  String[] hashes = new String[lb.length];
  for (int i = 0; i < lb.length; i++) {
    hashes[i] = Integer.toString(lb[i].getBlock().hashCode());
  }
  
  ((JobConf)job.getConfiguration()).setClass(ReconstructionMapper.RECONSTRUCTOR_CLASS_TAG, 
                                             ReconstructorFakeData.class, 
                                             BlockReconstructor.class);
  ((JobConf)job.getConfiguration()).setStrings("hdfs.testblockcopier.blockhashes", hashes);
}
 
Example #22
Source File: DFSInputStream.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private void checkLocatedBlocks(LocatedBlocks locatedBlocks)
    throws IOException {
  if (null == locatedBlocks) {
    return;
  }
  if(!locatedBlocks.isUnderConstruction()) {
    return;
  }
  List<LocatedBlock> lbs = locatedBlocks.getLocatedBlocks();
  if (lbs == null) {
    return;
  }
  for (int i = 0; i < lbs.size() - 1; i++) {
    if (lbs.get(i).getBlockSize() <= 1) {
      throw new IOException(
          "File is under construction and namenode hasn't received the second last block yet.");
    }
  }
}
 
Example #23
Source File: TestReplication.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void waitForBlockReplication(String filename, 
                                     ClientProtocol namenode,
                                     int expected, long maxWaitSec) 
                                     throws IOException {
  long start = Time.monotonicNow();
  
  //wait for all the blocks to be replicated;
  LOG.info("Checking for block replication for " + filename);
  while (true) {
    boolean replOk = true;
    LocatedBlocks blocks = namenode.getBlockLocations(filename, 0, 
                                                      Long.MAX_VALUE);
    
    for (Iterator<LocatedBlock> iter = blocks.getLocatedBlocks().iterator();
         iter.hasNext();) {
      LocatedBlock block = iter.next();
      int actual = block.getLocations().length;
      if ( actual < expected ) {
        LOG.info("Not enough replicas for " + block.getBlock()
            + " yet. Expecting " + expected + ", got " + actual + ".");
        replOk = false;
        break;
      }
    }
    
    if (replOk) {
      return;
    }
    
    if (maxWaitSec > 0 && 
        (Time.monotonicNow() - start) > (maxWaitSec * 1000)) {
      throw new IOException("Timedout while waiting for all blocks to " +
                            " be replicated for " + filename);
    }
    
    try {
      Thread.sleep(500);
    } catch (InterruptedException ignored) {}
  }
}
 
Example #24
Source File: TestBlockReplacement.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
private void checkBlocks(DatanodeInfo[] includeNodes, String fileName, 
    long fileLen, short replFactor, DFSClient client) throws IOException {
  Boolean notDone;
  do {
    try {
      Thread.sleep(100);
    } catch(InterruptedException e) {
    }
    List<LocatedBlock> blocks = client.namenode.
    getBlockLocations(fileName, 0, fileLen).getLocatedBlocks();
    assertEquals(1, blocks.size());
    DatanodeInfo[] nodes = blocks.get(0).getLocations();
    notDone = (nodes.length != replFactor);
    if (notDone) {
      LOG.info("Expected replication factor is " + replFactor +
          " but the real replication factor is " + nodes.length );
    } else {
      List<DatanodeInfo> nodeLocations = Arrays.asList(nodes);
      for (DatanodeInfo node : includeNodes) {
        if (!nodeLocations.contains(node) ) {
          notDone=true; 
          LOG.info("Block is not located at " + node.getName() );
          break;
        }
      }
    }
  } while(notDone);
}
 
Example #25
Source File: BlockManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private LocatedBlock createLocatedBlock(final BlockInfoContiguous blk, final long pos,
  final BlockTokenSecretManager.AccessMode mode) throws IOException {
  final LocatedBlock lb = createLocatedBlock(blk, pos);
  if (mode != null) {
    setBlockToken(lb, mode);
  }
  return lb;
}
 
Example #26
Source File: TestReplication.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void waitForBlockReplication(String filename, 
                                     ClientProtocol namenode,
                                     int expected, long maxWaitSec) 
                                     throws IOException {
  long start = Time.monotonicNow();
  
  //wait for all the blocks to be replicated;
  LOG.info("Checking for block replication for " + filename);
  while (true) {
    boolean replOk = true;
    LocatedBlocks blocks = namenode.getBlockLocations(filename, 0, 
                                                      Long.MAX_VALUE);
    
    for (Iterator<LocatedBlock> iter = blocks.getLocatedBlocks().iterator();
         iter.hasNext();) {
      LocatedBlock block = iter.next();
      int actual = block.getLocations().length;
      if ( actual < expected ) {
        LOG.info("Not enough replicas for " + block.getBlock()
            + " yet. Expecting " + expected + ", got " + actual + ".");
        replOk = false;
        break;
      }
    }
    
    if (replOk) {
      return;
    }
    
    if (maxWaitSec > 0 && 
        (Time.monotonicNow() - start) > (maxWaitSec * 1000)) {
      throw new IOException("Timedout while waiting for all blocks to " +
                            " be replicated for " + filename);
    }
    
    try {
      Thread.sleep(500);
    } catch (InterruptedException ignored) {}
  }
}
 
Example #27
Source File: BlockMover.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public void move(LocatedBlock block, DatanodeInfo node,
    Set<DatanodeInfo> excludedNodes, int priority,
    int dataTransferProtocolVersion, int namespaceId) {
  BlockMoveAction action = new BlockMoveAction(
      block, node, excludedNodes, priority,
      dataTransferProtocolVersion, namespaceId);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Bad block placement: " + action);
  }
  int movingQueueSize = movingQueue.size();
  //For high-pri moves, the queue limit is 2*maxQueueSize
  if (movingQueueSize < maxQueueSize ||
      movingQueueSize < 2 * maxQueueSize &&
      action.priority >= alwaysSubmitPriorityLevel) {
  	LOG.info("move : " + action + " started");
    executor.execute(action);
    //Thread mover = new Thread(action);
    //mover.run();
    metrics.blockMoveScheduled.inc();
  } else {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Block move queue is full. Skip the action." +
        " size:" + movingQueueSize +
        " maxSize:" + maxQueueSize);
    }
    metrics.blockMoveSkipped.inc();
  }
}
 
Example #28
Source File: TestBlockUnderConstruction.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test NameNode.getBlockLocations(..) on reading un-closed files.
 */
@Test
public void testGetBlockLocations() throws IOException {
  final NamenodeProtocols namenode = cluster.getNameNodeRpc();
  final Path p = new Path(BASE_DIR, "file2.dat");
  final String src = p.toString();
  final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);

  // write a half block
  int len = BLOCK_SIZE >>> 1;
  writeFile(p, out, len);

  for(int i = 1; i < NUM_BLOCKS; ) {
    // verify consistency
    final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
    final List<LocatedBlock> blocks = lb.getLocatedBlocks();
    assertEquals(i, blocks.size());
    final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
    assertTrue(b instanceof BlockInfoContiguousUnderConstruction);

    if (++i < NUM_BLOCKS) {
      // write one more block
      writeFile(p, out, BLOCK_SIZE);
      len += BLOCK_SIZE;
    }
  }
  // close file
  out.close();
}
 
Example #29
Source File: NNThroughputBenchmark.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void addBlocks(String fileName, String clientName)
		throws IOException {
	for (int jdx = 0; jdx < blocksPerFile; jdx++) {
		LocatedBlock loc = nameNode.addBlock(fileName, clientName);
		for (DatanodeInfo dnInfo : loc.getLocations()) {
			int dnIdx = Arrays
					.binarySearch(datanodes, dnInfo.getName());
			datanodes[dnIdx].addBlock(loc.getBlock());
			Block[] bi = new Block[] { loc.getBlock() };
			nameNode.blockReceivedAndDeleted(
					datanodes[dnIdx].dnRegistration, bi);
		}
	}
}
 
Example #30
Source File: TestPBHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testConvertLocatedBlockNoStorageMedia() {
  LocatedBlock lb = createLocatedBlockNoStorageMedia();
  LocatedBlockProto lbProto = PBHelper.convert(lb);
  LocatedBlock lb2 = PBHelper.convert(lbProto);
  compare(lb,lb2);
}