Java Code Examples for org.apache.hadoop.hdfs.protocol.LocatedBlock

The following examples show how to use org.apache.hadoop.hdfs.protocol.LocatedBlock. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: RDFS   Source File: DFSInputStream.java    License: Apache License 2.0 6 votes vote down vote up
private void checkLocatedBlocks(LocatedBlocks locatedBlocks)
    throws IOException {
  if (null == locatedBlocks) {
    return;
  }
  if(!locatedBlocks.isUnderConstruction()) {
    return;
  }
  List<LocatedBlock> lbs = locatedBlocks.getLocatedBlocks();
  if (lbs == null) {
    return;
  }
  for (int i = 0; i < lbs.size() - 1; i++) {
    if (lbs.get(i).getBlockSize() <= 1) {
      throw new IOException(
          "File is under construction and namenode hasn't received the second last block yet.");
    }
  }
}
 
Example 2
Source Project: RDFS   Source File: TestDFSLocatedBlocks.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Boolean call() throws Exception {
  for (int i = 0; i < NUM_INSERTS; ++i) {
    List<LocatedBlock> newBlocks = randomBlockSubrange(rand, allBlocks);
    locatedBlocks.insertRange(newBlocks);
    for (LocatedBlock blk : newBlocks) {
      LocatedBlock blockFromArr = locatedBlocks.getBlockContainingOffset(
          blk.getStartOffset());
      assertEquals(blockFromArr.getBlockSize(), blk.getBlockSize());
    }

    List<LocatedBlock> locBlocksCopy =
        locatedBlocks.getLocatedBlocksCopy();
    for (int j = 1; j < locBlocksCopy.size(); ++j) {
      assertTrue(locBlocksCopy.get(j - 1).getStartOffset() <
          locBlocksCopy.get(j).getStartOffset());
    }
  }
  return true;
}
 
Example 3
Source Project: hadoop   Source File: DFSOutputStream.java    License: Apache License 2.0 6 votes vote down vote up
/** Construct a new output stream for append. */
private DFSOutputStream(DFSClient dfsClient, String src,
    EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock,
    HdfsFileStatus stat, DataChecksum checksum) throws IOException {
  this(dfsClient, src, progress, stat, checksum);
  initialFileSize = stat.getLen(); // length of file when opened
  this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK);

  boolean toNewBlock = flags.contains(CreateFlag.NEW_BLOCK);

  // The last partial block of the file has to be filled.
  if (!toNewBlock && lastBlock != null) {
    // indicate that we are appending to an existing block
    bytesCurBlock = lastBlock.getBlockSize();
    streamer = new DataStreamer(lastBlock, stat, bytesPerChecksum);
  } else {
    computePacketChunkSize(dfsClient.getConf().writePacketSize,
        bytesPerChecksum);
    streamer = new DataStreamer(stat,
        lastBlock != null ? lastBlock.getBlock() : null);
  }
  this.fileEncryptionInfo = stat.getFileEncryptionInfo();
}
 
Example 4
Source Project: hadoop   Source File: TestDFSClientRetries.java    License: Apache License 2.0 6 votes vote down vote up
private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) {
  LocatedBlock goodLocatedBlock = goodBlockList.get(0);
  LocatedBlock badLocatedBlock = new LocatedBlock(
    goodLocatedBlock.getBlock(),
    new DatanodeInfo[] {
      DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234)
    },
    goodLocatedBlock.getStartOffset(),
    false);


  List<LocatedBlock> badBlocks = new ArrayList<LocatedBlock>();
  badBlocks.add(badLocatedBlock);
  return new LocatedBlocks(goodBlockList.getFileLength(), false,
                           badBlocks, null, true,
                           null);
}
 
Example 5
Source Project: big-c   Source File: NameNodeRpcServer.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public LocatedBlock addBlock(String src, String clientName,
    ExtendedBlock previous, DatanodeInfo[] excludedNodes, long fileId,
    String[] favoredNodes)
    throws IOException {
  checkNNStartup();
  if (stateChangeLog.isDebugEnabled()) {
    stateChangeLog.debug("*BLOCK* NameNode.addBlock: file " + src
        + " fileId=" + fileId + " for " + clientName);
  }
  Set<Node> excludedNodesSet = null;
  if (excludedNodes != null) {
    excludedNodesSet = new HashSet<Node>(excludedNodes.length);
    for (Node node : excludedNodes) {
      excludedNodesSet.add(node);
    }
  }
  List<String> favoredNodesList = (favoredNodes == null) ? null
      : Arrays.asList(favoredNodes);
  LocatedBlock locatedBlock = namesystem.getAdditionalBlock(src, fileId,
      clientName, previous, excludedNodesSet, favoredNodesList);
  if (locatedBlock != null)
    metrics.incrAddBlockOps();
  return locatedBlock;
}
 
Example 6
Source Project: big-c   Source File: TestBalancerWithMultipleNameNodes.java    License: Apache License 2.0 6 votes vote down vote up
private static ExtendedBlock[][] generateBlocks(Suite s, long size
    ) throws IOException, InterruptedException, TimeoutException {
  final ExtendedBlock[][] blocks = new ExtendedBlock[s.clients.length][];
  for(int n = 0; n < s.clients.length; n++) {
    final long fileLen = size/s.replication;
    createFile(s, n, fileLen);

    final List<LocatedBlock> locatedBlocks = s.clients[n].getBlockLocations(
        FILE_NAME, 0, fileLen).getLocatedBlocks();

    final int numOfBlocks = locatedBlocks.size();
    blocks[n] = new ExtendedBlock[numOfBlocks];
    for(int i = 0; i < numOfBlocks; i++) {
      final ExtendedBlock b = locatedBlocks.get(i).getBlock();
      blocks[n][i] = new ExtendedBlock(b.getBlockPoolId(), b.getBlockId(),
          b.getNumBytes(), b.getGenerationStamp());
    }
  }
  return blocks;
}
 
Example 7
Source Project: big-c   Source File: ClientNamenodeProtocolTranslatorPB.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public LastBlockWithStatus append(String src, String clientName,
    EnumSetWritable<CreateFlag> flag) throws AccessControlException,
    DSQuotaExceededException, FileNotFoundException, SafeModeException,
    UnresolvedLinkException, IOException {
  AppendRequestProto req = AppendRequestProto.newBuilder().setSrc(src)
      .setClientName(clientName).setFlag(PBHelper.convertCreateFlag(flag))
      .build();
  try {
    AppendResponseProto res = rpcProxy.append(null, req);
    LocatedBlock lastBlock = res.hasBlock() ? PBHelper
        .convert(res.getBlock()) : null;
    HdfsFileStatus stat = (res.hasStat()) ? PBHelper.convert(res.getStat())
        : null;
    return new LastBlockWithStatus(lastBlock, stat);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example 8
Source Project: hadoop   Source File: DFSInputStream.java    License: Apache License 2.0 6 votes vote down vote up
private Callable<ByteBuffer> getFromOneDataNode(final DNAddrPair datanode,
    final LocatedBlock block, final long start, final long end,
    final ByteBuffer bb,
    final Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap,
    final int hedgedReadId) {
  final Span parentSpan = Trace.currentSpan();
  return new Callable<ByteBuffer>() {
    @Override
    public ByteBuffer call() throws Exception {
      byte[] buf = bb.array();
      int offset = bb.position();
      TraceScope scope =
          Trace.startSpan("hedgedRead" + hedgedReadId, parentSpan);
      try {
        actualGetFromOneDataNode(datanode, block, start, end, buf, offset,
            corruptedBlockMap);
        return bb;
      } finally {
        scope.close();
      }
    }
  };
}
 
Example 9
Source Project: hadoop   Source File: DFSClient.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Connect to the given datanode's datantrasfer port, and return
 * the resulting IOStreamPair. This includes encryption wrapping, etc.
 */
private IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
    LocatedBlock lb) throws IOException {
  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    String dnAddr = dn.getXferAddr(getConf().connectToDnViaHostname);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Connecting to datanode " + dnAddr);
    }
    NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
    sock.setSoTimeout(timeout);

    OutputStream unbufOut = NetUtils.getOutputStream(sock);
    InputStream unbufIn = NetUtils.getInputStream(sock);
    IOStreamPair ret = saslClient.newSocketSend(sock, unbufOut, unbufIn, this,
      lb.getBlockToken(), dn);
    success = true;
    return ret;
  } finally {
    if (!success) {
      IOUtils.closeSocket(sock);
    }
  }
}
 
Example 10
Source Project: RDFS   Source File: TestRaidShellFsck.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * removes a file block in the specified stripe
 */
private void removeFileBlock(Path filePath, int stripe, int blockInStripe)
  throws IOException {
  LocatedBlocks fileBlocks = dfs.getClient().namenode.
    getBlockLocations(filePath.toString(), 0, FILE_BLOCKS * BLOCK_SIZE);
  if (fileBlocks.locatedBlockCount() != FILE_BLOCKS) {
    throw new IOException("expected " + FILE_BLOCKS + 
                          " file blocks but found " + 
                          fileBlocks.locatedBlockCount());
  }
  if (blockInStripe >= STRIPE_BLOCKS) {
    throw new IOException("blockInStripe is " + blockInStripe +
                          " but must be smaller than " + STRIPE_BLOCKS);
  }
  LocatedBlock block = fileBlocks.get(stripe * STRIPE_BLOCKS + blockInStripe);
  removeAndReportBlock(dfs, filePath, block);
  LOG.info("removed file " + filePath.toString() + " block " +
           stripe * STRIPE_BLOCKS + " in stripe " + stripe);
}
 
Example 11
Source Project: RDFS   Source File: DistributedAvatarFileSystem.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public LocatedBlock addBlock(final String src, final String clientName,
    final DatanodeInfo[] excludedNodes) throws IOException {
  return (new MutableFSCaller<LocatedBlock>() {
    @Override
    LocatedBlock call(int retries) throws IOException {
      if (retries > 0) {
        FileStatus info = namenode.getFileInfo(src);
        if (info != null) {
          LocatedBlocks blocks = namenode.getBlockLocations(src, 0, info
              .getLen());
          // If atleast one block exists.
          if (blocks.locatedBlockCount() > 0) {
            LocatedBlock last = blocks.get(blocks.locatedBlockCount() - 1);
            if (last.getBlockSize() == 0) {
              // This one has not been written to
              namenode.abandonBlock(last.getBlock(), src, clientName);
            }
          }
        }
      }
      return namenode.addBlock(src, clientName, excludedNodes);
    }

  }).callFS();
}
 
Example 12
Source Project: RDFS   Source File: TestAvatarDataNodeRBW.java    License: Apache License 2.0 6 votes vote down vote up
private void verifyResults(int blocksBefore, String fileName)
  throws IOException {
  // Verify we have RBWs after restart.
  AvatarNode avatarAfter = cluster.getPrimaryAvatar(0).avatar;
  LocatedBlocks lbks = avatarAfter.namesystem
      .getBlockLocations(fileName, 0,
      Long.MAX_VALUE);
  long blocksAfter = lbks.locatedBlockCount();

  System.out.println("blocksBefore : " + blocksBefore + " blocksAfter : "
      + blocksAfter);

  assertEquals(blocksBefore, blocksAfter);
  for (LocatedBlock lbk : lbks.getLocatedBlocks()) {
    DatanodeInfo[] locs = lbk.getLocations();
    assertNotNull(locs);
    assertTrue(locs.length != 0);
  }
}
 
Example 13
Source Project: hadoop   Source File: TestStorageMover.java    License: Apache License 2.0 6 votes vote down vote up
private void verifyFile(final Path parent, final HdfsFileStatus status,
    final Byte expectedPolicyId) throws Exception {
  HdfsLocatedFileStatus fileStatus = (HdfsLocatedFileStatus) status;
  byte policyId = fileStatus.getStoragePolicy();
  BlockStoragePolicy policy = policies.getPolicy(policyId);
  if (expectedPolicyId != null) {
    Assert.assertEquals((byte)expectedPolicyId, policy.getId());
  }
  final List<StorageType> types = policy.chooseStorageTypes(
      status.getReplication());
  for(LocatedBlock lb : fileStatus.getBlockLocations().getLocatedBlocks()) {
    final Mover.StorageTypeDiff diff = new Mover.StorageTypeDiff(types,
        lb.getStorageTypes());
    Assert.assertTrue(fileStatus.getFullName(parent.toString())
        + " with policy " + policy + " has non-empty overlap: " + diff
        + ", the corresponding block is " + lb.getBlock().getLocalBlock(),
        diff.removeOverlap(true));
  }
}
 
Example 14
Source Project: hadoop   Source File: LazyPersistTestCase.java    License: Apache License 2.0 6 votes vote down vote up
protected final boolean verifyBlockDeletedFromDir(File dir,
    LocatedBlocks locatedBlocks) {

  for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
    File targetDir =
      DatanodeUtil.idToBlockDir(dir, lb.getBlock().getBlockId());

    File blockFile = new File(targetDir, lb.getBlock().getBlockName());
    if (blockFile.exists()) {
      LOG.warn("blockFile: " + blockFile.getAbsolutePath() +
        " exists after deletion.");
      return false;
    }
    File metaFile = new File(targetDir,
      DatanodeUtil.getMetaName(lb.getBlock().getBlockName(),
        lb.getBlock().getGenerationStamp()));
    if (metaFile.exists()) {
      LOG.warn("metaFile: " + metaFile.getAbsolutePath() +
        " exists after deletion.");
      return false;
    }
  }
  return true;
}
 
Example 15
Source Project: hadoop   Source File: ClientDatanodeProtocolTranslatorPB.java    License: Apache License 2.0 6 votes vote down vote up
static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy(
    DatanodeID datanodeid, Configuration conf, int socketTimeout,
    boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException {
  final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
  InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
  }
  
  // Since we're creating a new UserGroupInformation here, we know that no
  // future RPC proxies will be able to re-use the same connection. And
  // usages of this proxy tend to be one-off calls.
  //
  // This is a temporary fix: callers should really achieve this by using
  // RPC.stopProxy() on the resulting object, but this is currently not
  // working in trunk. See the discussion on HDFS-1965.
  Configuration confWithNoIpcIdle = new Configuration(conf);
  confWithNoIpcIdle.setInt(CommonConfigurationKeysPublic
      .IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);

  UserGroupInformation ticket = UserGroupInformation
      .createRemoteUser(locatedBlock.getBlock().getLocalBlock().toString());
  ticket.addToken(locatedBlock.getBlockToken());
  return createClientDatanodeProtocolProxy(addr, ticket, confWithNoIpcIdle,
      NetUtils.getDefaultSocketFactory(conf), socketTimeout);
}
 
Example 16
Source Project: big-c   Source File: BlockStorageLocationUtil.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Helper method to combine a list of {@link LocatedBlock} with associated
 * {@link VolumeId} information to form a list of {@link BlockStorageLocation}
 * .
 */
static BlockStorageLocation[] convertToVolumeBlockLocations(
    List<LocatedBlock> blocks, 
    Map<LocatedBlock, List<VolumeId>> blockVolumeIds) throws IOException {
  // Construct the final return value of VolumeBlockLocation[]
  BlockLocation[] locations = DFSUtil.locatedBlocks2Locations(blocks);
  List<BlockStorageLocation> volumeBlockLocs = 
      new ArrayList<BlockStorageLocation>(locations.length);
  for (int i = 0; i < locations.length; i++) {
    LocatedBlock locBlock = blocks.get(i);
    List<VolumeId> volumeIds = blockVolumeIds.get(locBlock);
    BlockStorageLocation bsLoc = new BlockStorageLocation(locations[i], 
        volumeIds.toArray(new VolumeId[0]));
    volumeBlockLocs.add(bsLoc);
  }
  return volumeBlockLocs.toArray(new BlockStorageLocation[] {});
}
 
Example 17
Source Project: hadoop-gpu   Source File: TestBalancer.java    License: Apache License 2.0 6 votes vote down vote up
private Block[] generateBlocks(long size, short numNodes) throws IOException {
  cluster = new MiniDFSCluster( CONF, numNodes, true, null);
  try {
    cluster.waitActive();
    client = DFSClient.createNamenode(CONF);

    short replicationFactor = (short)(numNodes-1);
    long fileLen = size/replicationFactor;
    createFile(fileLen, replicationFactor);

    List<LocatedBlock> locatedBlocks = client.
    getBlockLocations(fileName, 0, fileLen).getLocatedBlocks();

    int numOfBlocks = locatedBlocks.size();
    Block[] blocks = new Block[numOfBlocks];
    for(int i=0; i<numOfBlocks; i++) {
      Block b = locatedBlocks.get(i).getBlock();
      blocks[i] = new Block(b.getBlockId(), b.getNumBytes(), b.getGenerationStamp());
    }

    return blocks;
  } finally {
    cluster.shutdown();
  }
}
 
Example 18
Source Project: hadoop   Source File: ClientNamenodeProtocolTranslatorPB.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public LastBlockWithStatus append(String src, String clientName,
    EnumSetWritable<CreateFlag> flag) throws AccessControlException,
    DSQuotaExceededException, FileNotFoundException, SafeModeException,
    UnresolvedLinkException, IOException {
  AppendRequestProto req = AppendRequestProto.newBuilder().setSrc(src)
      .setClientName(clientName).setFlag(PBHelper.convertCreateFlag(flag))
      .build();
  try {
    AppendResponseProto res = rpcProxy.append(null, req);
    LocatedBlock lastBlock = res.hasBlock() ? PBHelper
        .convert(res.getBlock()) : null;
    HdfsFileStatus stat = (res.hasStat()) ? PBHelper.convert(res.getStat())
        : null;
    return new LastBlockWithStatus(lastBlock, stat);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example 19
Source Project: hadoop   Source File: TestClientReportBadBlock.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Create a file with one block and corrupt some/all of the block replicas.
 */
private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl,
    int corruptBlockCount) throws IOException, AccessControlException,
    FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException {
  DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
  DFSTestUtil.waitReplication(dfs, filePath, repl);
  // Locate the file blocks by asking name node
  final LocatedBlocks locatedblocks = dfs.dfs.getNamenode()
      .getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE);
  Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length);
  // The file only has one block
  LocatedBlock lblock = locatedblocks.get(0);
  DatanodeInfo[] datanodeinfos = lblock.getLocations();
  ExtendedBlock block = lblock.getBlock();
  // corrupt some /all of the block replicas
  for (int i = 0; i < corruptBlockCount; i++) {
    DatanodeInfo dninfo = datanodeinfos[i];
    final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
    corruptBlock(block, dn);
    LOG.debug("Corrupted block " + block.getBlockName() + " on data node "
        + dninfo);

  }
}
 
Example 20
Source Project: RDFS   Source File: BlockPlacementPolicyRaid.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Count how many companion blocks are on each datanode or the each rack
 * @param companionBlocks a collection of all the companion blocks
 * @param doRackCount count the companion blocks on the racks of datanodes
 * @param result the map from node name to the number of companion blocks
 */
static Map<String, Integer> countCompanionBlocks(
    Collection<LocatedBlock> companionBlocks, boolean doRackCount) {
  Map<String, Integer> result = new HashMap<String, Integer>();
  for (LocatedBlock block : companionBlocks) {
    for (DatanodeInfo d : block.getLocations()) {
      String name = doRackCount ? d.getParent().getName() : d.getName();
      if (result.containsKey(name)) {
        int count = result.get(name) + 1;
        result.put(name, count);
      } else {
        result.put(name, 1);
      }
    }
  }
  return result;
}
 
Example 21
Source Project: RDFS   Source File: TestBlockCopier.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void configureJob(Job job, 
   Class<? extends BlockReconstructor> rClass) {
  
  super.configureJob(job, rClass);
  
  LocatedBlock[] lb = TestBlockCopier.decommissioningBlocks;
  
  String[] hashes = new String[lb.length];
  for (int i = 0; i < lb.length; i++) {
    hashes[i] = Integer.toString(lb[i].getBlock().hashCode());
  }
  
  ((JobConf)job.getConfiguration()).setClass(ReconstructionMapper.RECONSTRUCTOR_CLASS_TAG, 
                                             ReconstructorFakeData.class, 
                                             BlockReconstructor.class);
  ((JobConf)job.getConfiguration()).setStrings("hdfs.testblockcopier.blockhashes", hashes);
}
 
Example 22
Source Project: big-c   Source File: DFSInputStream.java    License: Apache License 2.0 6 votes vote down vote up
private void fetchBlockByteRange(LocatedBlock block, long start, long end,
    byte[] buf, int offset,
    Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap)
    throws IOException {
  block = getBlockAt(block.getStartOffset());
  while (true) {
    DNAddrPair addressPair = chooseDataNode(block, null);
    try {
      actualGetFromOneDataNode(addressPair, block, start, end, buf, offset,
          corruptedBlockMap);
      return;
    } catch (IOException e) {
      // Ignore. Already processed inside the function.
      // Loop through to try the next node.
    }
  }
}
 
Example 23
Source Project: RDFS   Source File: TestAvatarDataNodeRBW.java    License: Apache License 2.0 5 votes vote down vote up
private int initializeTest(String testName) throws IOException {
  String fileName = testName;
  createRBWFile(fileName);
  // Verify we have 1 RBW block.
  AvatarNode avatar = cluster.getPrimaryAvatar(0).avatar;
  LocatedBlocks lbks = avatar.namesystem.getBlockLocations(fileName, 0,
      Long.MAX_VALUE);
  int blocksBefore = lbks.locatedBlockCount();
  for (LocatedBlock lbk : lbks.getLocatedBlocks()) {
    DatanodeInfo[] locs = lbk.getLocations();
    assertNotNull(locs);
    assertTrue(locs.length != 0);
  }
  return blocksBefore;
}
 
Example 24
Source Project: big-c   Source File: BlockManager.java    License: Apache License 2.0 5 votes vote down vote up
/** Generate a block token for the located block. */
public void setBlockToken(final LocatedBlock b,
    final BlockTokenSecretManager.AccessMode mode) throws IOException {
  if (isBlockTokenEnabled()) {
    // Use cached UGI if serving RPC calls.
    b.setBlockToken(blockTokenSecretManager.generateToken(
        NameNode.getRemoteUser().getShortUserName(),
        b.getBlock(), EnumSet.of(mode)));
  }    
}
 
Example 25
Source Project: big-c   Source File: PBHelper.java    License: Apache License 2.0 5 votes vote down vote up
public static List<LocatedBlockProto> convertLocatedBlock2(List<LocatedBlock> lb) {
  if (lb == null) return null;
  final int len = lb.size();
  List<LocatedBlockProto> result = new ArrayList<LocatedBlockProto>(len);
  for (int i = 0; i < len; ++i) {
    result.add(PBHelper.convert(lb.get(i)));
  }
  return result;
}
 
Example 26
Source Project: hadoop-gpu   Source File: TestDecommission.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * For blocks that reside on the nodes that are down, verify that their
 * replication factor is 1 more than the specified one.
 */
private void checkFile(FileSystem fileSys, Path name, int repl,
                       String downnode) throws IOException {
  //
  // sleep an additional 10 seconds for the blockreports from the datanodes
  // to arrive. 
  //
  // need a raw stream
  assertTrue("Not HDFS:"+fileSys.getUri(), fileSys instanceof DistributedFileSystem);
      
  DFSClient.DFSDataInputStream dis = (DFSClient.DFSDataInputStream) 
    ((DistributedFileSystem)fileSys).open(name);
  Collection<LocatedBlock> dinfo = dis.getAllBlocks();

  for (LocatedBlock blk : dinfo) { // for each block
    int hasdown = 0;
    DatanodeInfo[] nodes = blk.getLocations();
    for (int j = 0; j < nodes.length; j++) {     // for each replica
      if (nodes[j].getName().equals(downnode)) {
        hasdown++;
        System.out.println("Block " + blk.getBlock() + " replica " +
                           nodes[j].getName() + " is decommissioned.");
      }
    }
    System.out.println("Block " + blk.getBlock() + " has " + hasdown +
                       " decommissioned replica.");
    assertEquals("Number of replicas for block" + blk.getBlock(),
                 Math.min(numDatanodes, repl+hasdown), nodes.length);  
  }
}
 
Example 27
Source Project: RDFS   Source File: NNThroughputBenchmark.java    License: Apache License 2.0 5 votes vote down vote up
private void addBlocks(String fileName, String clientName)
		throws IOException {
	for (int jdx = 0; jdx < blocksPerFile; jdx++) {
		LocatedBlock loc = nameNode.addBlock(fileName, clientName);
		for (DatanodeInfo dnInfo : loc.getLocations()) {
			int dnIdx = Arrays
					.binarySearch(datanodes, dnInfo.getName());
			datanodes[dnIdx].addBlock(loc.getBlock());
			Block[] bi = new Block[] { loc.getBlock() };
			nameNode.blockReceivedAndDeleted(
					datanodes[dnIdx].dnRegistration, bi);
		}
	}
}
 
Example 28
Source Project: RDFS   Source File: TestBlockReplacement.java    License: Apache License 2.0 5 votes vote down vote up
private void checkBlocks(DatanodeInfo[] includeNodes, String fileName, 
    long fileLen, short replFactor, DFSClient client) throws IOException {
  Boolean notDone;
  do {
    try {
      Thread.sleep(100);
    } catch(InterruptedException e) {
    }
    List<LocatedBlock> blocks = client.namenode.
    getBlockLocations(fileName, 0, fileLen).getLocatedBlocks();
    assertEquals(1, blocks.size());
    DatanodeInfo[] nodes = blocks.get(0).getLocations();
    notDone = (nodes.length != replFactor);
    if (notDone) {
      LOG.info("Expected replication factor is " + replFactor +
          " but the real replication factor is " + nodes.length );
    } else {
      List<DatanodeInfo> nodeLocations = Arrays.asList(nodes);
      for (DatanodeInfo node : includeNodes) {
        if (!nodeLocations.contains(node) ) {
          notDone=true; 
          LOG.info("Block is not located at " + node.getName() );
          break;
        }
      }
    }
  } while(notDone);
}
 
Example 29
Source Project: hadoop   Source File: DFSInputStream.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Get blocks in the specified range.
 * Includes only the complete blocks.
 * Fetch them from the namenode if not cached.
 */
private List<LocatedBlock> getFinalizedBlockRange(
    long offset, long length) throws IOException {
  synchronized(infoLock) {
    assert (locatedBlocks != null) : "locatedBlocks is null";
    List<LocatedBlock> blockRange = new ArrayList<LocatedBlock>();
    // search cached blocks first
    int blockIdx = locatedBlocks.findBlock(offset);
    if (blockIdx < 0) { // block is not cached
      blockIdx = LocatedBlocks.getInsertIndex(blockIdx);
    }
    long remaining = length;
    long curOff = offset;
    while(remaining > 0) {
      LocatedBlock blk = null;
      if(blockIdx < locatedBlocks.locatedBlockCount())
        blk = locatedBlocks.get(blockIdx);
      if (blk == null || curOff < blk.getStartOffset()) {
        LocatedBlocks newBlocks;
        newBlocks = dfsClient.getLocatedBlocks(src, curOff, remaining);
        locatedBlocks.insertRange(blockIdx, newBlocks.getLocatedBlocks());
        continue;
      }
      assert curOff >= blk.getStartOffset() : "Block not found";
      blockRange.add(blk);
      long bytesRead = blk.getStartOffset() + blk.getBlockSize() - curOff;
      remaining -= bytesRead;
      curOff += bytesRead;
      blockIdx++;
    }
    return blockRange;
  }
}
 
Example 30
Source Project: big-c   Source File: TestPBHelper.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testConvertLocatedBlockNoStorageMedia() {
  LocatedBlock lb = createLocatedBlockNoStorageMedia();
  LocatedBlockProto lbProto = PBHelper.convert(lb);
  LocatedBlock lb2 = PBHelper.convert(lbProto);
  compare(lb,lb2);
}