Java Code Examples for org.apache.hadoop.hdfs.protocol.DatanodeInfo

The following examples show how to use org.apache.hadoop.hdfs.protocol.DatanodeInfo. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: RDFS   Source File: DistributedAvatarFileSystem.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public LocatedBlockWithMetaInfo addBlockAndFetchMetaInfo(
    final String src, final String clientName,
    final DatanodeInfo[] excludedNodes) throws IOException {
  return (new MutableFSCaller<LocatedBlockWithMetaInfo>() {
    @Override
    LocatedBlockWithMetaInfo call(int retries) throws IOException {
      if (retries > 0) {
        FileStatus info = namenode.getFileInfo(src);
        if (info != null) {
          LocatedBlocks blocks = namenode.getBlockLocations(src, 0, info
              .getLen());
          if (blocks.locatedBlockCount() > 0 ) {
            LocatedBlock last = blocks.get(blocks.locatedBlockCount() - 1);
            if (last.getBlockSize() == 0) {
              // This one has not been written to
              namenode.abandonBlock(last.getBlock(), src, clientName);
            }
          }
        }
      }
      return namenode.addBlockAndFetchMetaInfo(src, clientName,
          excludedNodes);
    }
  }).callFS();
}
 
Example 2
Source Project: RDFS   Source File: FastCopySetupUtil.java    License: Apache License 2.0 6 votes vote down vote up
private void verifyHardLinks(DatanodeInfo srcInfo, DatanodeInfo dstInfo,
    int srcNamespaceId, Block srcBlock, int dstNamespaceId, Block dstBlock,
    boolean hardlink) throws IOException {
  // Verify hard links.
  DataNode dnSrc = dnMap.get(srcInfo.getPort());
  File blockFileSrc = dnSrc.data.getBlockFile(srcNamespaceId, srcBlock);
  LOG.warn("Link count for : " + blockFileSrc + " is : "
      + HardLink.getLinkCount(blockFileSrc));
  if (hardlink) {
    assertTrue(HardLink.getLinkCount(blockFileSrc) > 1);
  } else {
    assertEquals(1, HardLink.getLinkCount(blockFileSrc));
  }

  DataNode dnDst = dnMap.get(dstInfo.getPort());
  File blockFileDst = dnDst.data.getBlockFile(dstNamespaceId, dstBlock);
  if (hardlink) {
    assertTrue(HardLink.getLinkCount(blockFileDst) > 1);
  } else {
    assertEquals(1, HardLink.getLinkCount(blockFileDst));
  }
}
 
Example 3
Source Project: big-c   Source File: BlockReceiver.java    License: Apache License 2.0 6 votes vote down vote up
PacketResponder(final DataOutputStream upstreamOut,
    final DataInputStream downstreamIn, final DatanodeInfo[] downstreams) {
  this.downstreamIn = downstreamIn;
  this.upstreamOut = upstreamOut;

  this.type = downstreams == null? PacketResponderType.NON_PIPELINE
      : downstreams.length == 0? PacketResponderType.LAST_IN_PIPELINE
          : PacketResponderType.HAS_DOWNSTREAM_IN_PIPELINE;

  final StringBuilder b = new StringBuilder(getClass().getSimpleName())
      .append(": ").append(block).append(", type=").append(type);
  if (type != PacketResponderType.HAS_DOWNSTREAM_IN_PIPELINE) {
    b.append(", downstreams=").append(downstreams.length)
        .append(":").append(Arrays.asList(downstreams));
  }
  this.myString = b.toString();
}
 
Example 4
Source Project: RDFS   Source File: Balancer.java    License: Apache License 2.0 6 votes vote down vote up
private BalancerDatanode(
    DatanodeInfo node, double avgRemaining, double threshold) {
  datanode = node;
  remaining = Balancer.getRemaining(node);
  long sizeToMove; 
  
  if (remaining + threshold <= avgRemaining 
      || remaining - threshold  >= avgRemaining) {
    sizeToMove = (long)(threshold*datanode.getCapacity()/100);
  } else {
    sizeToMove =
      (long)(Math.abs(avgRemaining-remaining)*datanode.getCapacity()/100);
  }
  if (remaining > avgRemaining) {
    sizeToMove = Math.min(datanode.getRemaining(), sizeToMove);
  }
  this.maxSizeToMove = Math.min(MAX_SIZE_TO_MOVE, sizeToMove);
}
 
Example 5
Source Project: hadoop-gpu   Source File: FileDataServlet.java    License: Apache License 2.0 6 votes vote down vote up
/** Create a redirection URI */
protected URI createUri(FileStatus i, UnixUserGroupInformation ugi,
    ClientProtocol nnproxy, HttpServletRequest request)
    throws IOException, URISyntaxException {
  String scheme = request.getScheme();
  final DatanodeID host = pickSrcDatanode(i, nnproxy);
  final String hostname;
  if (host instanceof DatanodeInfo) {
    hostname = ((DatanodeInfo)host).getHostName();
  } else {
    hostname = host.getHost();
  }
  return new URI(scheme, null, hostname,
      "https".equals(scheme)
        ? (Integer)getServletContext().getAttribute("datanode.https.port")
        : host.getInfoPort(),
      "/streamFile", "filename=" + i.getPath() + "&ugi=" + ugi, null);
}
 
Example 6
Source Project: RDFS   Source File: PlacementMonitor.java    License: Apache License 2.0 6 votes vote down vote up
public void initialize(Path path, FileSystem fs) throws IOException {
  if (pathAndOffsetToLocatedBlock.containsKey(path)) {
    return;
  }
  VersionedLocatedBlocks pathLbs = getLocatedBlocks(path, fs);
  pathAndOffsetToLocatedBlock.put(
      path, createOffsetToLocatedBlockMap(pathLbs));

  for (LocatedBlocks lbs : Arrays.asList(pathLbs)) {
    for (LocatedBlock lb : lbs.getLocatedBlocks()) {
      for (DatanodeInfo dn : lb.getLocations()) {
        nameToDatanodeInfo.put(dn.getName(), dn);
      }
    }
  }
}
 
Example 7
Source Project: big-c   Source File: DFSInputStream.java    License: Apache License 2.0 6 votes vote down vote up
private void fetchBlockByteRange(LocatedBlock block, long start, long end,
    byte[] buf, int offset,
    Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap)
    throws IOException {
  block = getBlockAt(block.getStartOffset());
  while (true) {
    DNAddrPair addressPair = chooseDataNode(block, null);
    try {
      actualGetFromOneDataNode(addressPair, block, start, end, buf, offset,
          corruptedBlockMap);
      return;
    } catch (IOException e) {
      // Ignore. Already processed inside the function.
      // Loop through to try the next node.
    }
  }
}
 
Example 8
Source Project: hbase   Source File: FanOutOneBlockAsyncDFSOutputHelper.java    License: Apache License 2.0 6 votes vote down vote up
private static void initialize(Configuration conf, Channel channel, DatanodeInfo dnInfo,
    StorageType storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder, int timeoutMs,
    DFSClient client, Token<BlockTokenIdentifier> accessToken, Promise<Channel> promise)
    throws IOException {
  Promise<Void> saslPromise = channel.eventLoop().newPromise();
  trySaslNegotiate(conf, channel, dnInfo, timeoutMs, client, accessToken, saslPromise);
  saslPromise.addListener(new FutureListener<Void>() {

    @Override
    public void operationComplete(Future<Void> future) throws Exception {
      if (future.isSuccess()) {
        // setup response processing pipeline first, then send request.
        processWriteBlockResponse(channel, dnInfo, promise, timeoutMs);
        requestWriteBlock(channel, storageType, writeBlockProtoBuilder);
      } else {
        promise.tryFailure(future.cause());
      }
    }
  });
}
 
Example 9
Source Project: RDFS   Source File: BlockReconstructor.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Choose a datanode (hostname:portnumber). The datanode is chosen at random
 * from the live datanodes.
 * 
 * @param locationsToAvoid
 *            locations to avoid.
 * @return A string in the format name:port.
 * @throws IOException
 */
private String chooseDatanode(DatanodeInfo[] locationsToAvoid)
		throws IOException {
	DistributedFileSystem dfs = getDFS(new Path("/"));
	DatanodeInfo[] live = dfs.getClient().datanodeReport(
			DatanodeReportType.LIVE);

	Random rand = new Random();
	String chosen = null;
	int maxAttempts = 1000;
	for (int i = 0; i < maxAttempts && chosen == null; i++) {
		int idx = rand.nextInt(live.length);
		chosen = live[idx].name;
		for (DatanodeInfo avoid : locationsToAvoid) {
			if (chosen.equals(avoid.name)) {
				//LOG.info("Avoiding " + avoid.name);
				chosen = null;
				break;
			}
		}
	}
	if (chosen == null) {
		throw new IOException("Could not choose datanode");
	}
	return chosen;
}
 
Example 10
Source Project: big-c   Source File: Receiver.java    License: Apache License 2.0 6 votes vote down vote up
/** Receive {@link Op#TRANSFER_BLOCK} */
private void opTransferBlock(DataInputStream in) throws IOException {
  final OpTransferBlockProto proto =
    OpTransferBlockProto.parseFrom(vintPrefixed(in));
  final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    transferBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
        proto.getHeader().getClientName(),
        targets,
        PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length));
  } finally {
    if (traceScope != null) traceScope.close();
  }
}
 
Example 11
Source Project: RDFS   Source File: DistributedAvatarFileSystem.java    License: Apache License 2.0 6 votes vote down vote up
public LocatedBlock addBlock(final String src, final String clientName,
    final DatanodeInfo[] excludedNodes, final DatanodeInfo[] favoredNodes)
    throws IOException {
  return (new MutableFSCaller<LocatedBlock>() {
    @Override
    LocatedBlock call(int retries) throws IOException {
      if (retries > 0) {
        FileStatus info = namenode.getFileInfo(src);
        if (info != null) {
          LocatedBlocks blocks = namenode.getBlockLocations(src, 0, info
            .getLen());
          if (blocks.locatedBlockCount() > 0 ) {
            LocatedBlock last = blocks.get(blocks.locatedBlockCount() - 1);
            if (last.getBlockSize() == 0) {
              // This one has not been written to
              namenode.abandonBlock(last.getBlock(), src, clientName);
            }
          }
        }
      }
      return namenode.addBlock(src, clientName, excludedNodes,
        favoredNodes);
    }
  }).callFS();
}
 
Example 12
Source Project: RDFS   Source File: TestAvatarAPI.java    License: Apache License 2.0 6 votes vote down vote up
private void checkPrimary() throws Exception {
  FileStatus fs = dafs.getFileStatus(path, false);
  FileStatus[] dir = dafs.listStatus(dirPath, false);
  RemoteIterator<Path> cfb =
    dafs.listCorruptFileBlocks(dirPath, false);
  assertTrue("DAFS file status has the wrong length",
             fs != null && fs.getLen() == FILE_LEN);
  assertTrue("DAFS directory listing has the wrong length",
             dir != null && dir.length == 1);
  assertTrue("DAFS expected 0 corrupt file blocks",
             countPaths(cfb) == 0);

  ContentSummary cs = dafs.getContentSummary(path, false);
  DatanodeInfo[] di = dafs.getDataNodeStats(false);
  assertTrue("DAFS datanode info should contain 3 data nodes",
             di.length == 3);
}
 
Example 13
Source Project: big-c   Source File: ReportBadBlockAction.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void reportTo(DatanodeProtocolClientSideTranslatorPB bpNamenode, 
  DatanodeRegistration bpRegistration) throws BPServiceActorActionException {
  if (bpRegistration == null) {
    return;
  }
  DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) };
  String[] uuids = { storageUuid };
  StorageType[] types = { storageType };
  LocatedBlock[] locatedBlock = { new LocatedBlock(block,
      dnArr, uuids, types) };

  try {
    bpNamenode.reportBadBlocks(locatedBlock);
  } catch (RemoteException re) {
    DataNode.LOG.info("reportBadBlock encountered RemoteException for "
        + "block:  " + block , re);
  } catch (IOException e) {
    throw new BPServiceActorActionException("Failed to report bad block "
        + block + " to namenode: ");
  }
}
 
Example 14
Source Project: big-c   Source File: TestClientReportBadBlock.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Create a file with one block and corrupt some/all of the block replicas.
 */
private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl,
    int corruptBlockCount) throws IOException, AccessControlException,
    FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException {
  DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
  DFSTestUtil.waitReplication(dfs, filePath, repl);
  // Locate the file blocks by asking name node
  final LocatedBlocks locatedblocks = dfs.dfs.getNamenode()
      .getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE);
  Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length);
  // The file only has one block
  LocatedBlock lblock = locatedblocks.get(0);
  DatanodeInfo[] datanodeinfos = lblock.getLocations();
  ExtendedBlock block = lblock.getBlock();
  // corrupt some /all of the block replicas
  for (int i = 0; i < corruptBlockCount; i++) {
    DatanodeInfo dninfo = datanodeinfos[i];
    final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
    corruptBlock(block, dn);
    LOG.debug("Corrupted block " + block.getBlockName() + " on data node "
        + dninfo);

  }
}
 
Example 15
Source Project: RDFS   Source File: TestDecommission.java    License: Apache License 2.0 6 votes vote down vote up
public void testClusterStats(int numNameNodes, boolean federation) throws IOException,
    InterruptedException {
  LOG.info("Starting test testClusterStats");
  int numDatanodes = 1;
  startCluster(numNameNodes, numDatanodes, conf, federation);
  
  for (int i = 0; i < numNameNodes; i++) {
    FileSystem fileSys = cluster.getFileSystem(i);
    Path file = new Path("testClusterStats.dat");
    writeFile(fileSys, file, 1);
    
    NameNode namenode = cluster.getNameNode(i);
    FSNamesystem fsn = namenode.namesystem;
    DatanodeInfo downnode = decommissionNode(i, null,
        AdminStates.DECOMMISSION_INPROGRESS);
    // Check namenode stats for multiple datanode heartbeats
    verifyStats(namenode, fsn, downnode, true);
    
    // Stop decommissioning and verify stats
    writeConfigFile(excludeFile, null);
    fsn.refreshNodes(conf);
    DatanodeInfo ret = fsn.getDatanode(downnode);
    waitNodeState(ret, AdminStates.NORMAL);
    verifyStats(namenode, fsn, ret, false);
  }
}
 
Example 16
Source Project: RDFS   Source File: BlockMover.java    License: Apache License 2.0 5 votes vote down vote up
public void move(LocatedBlock block, DatanodeInfo node,
    Set<DatanodeInfo> excludedNodes, int priority,
    int dataTransferProtocolVersion, int namespaceId) {
  BlockMoveAction action = new BlockMoveAction(
      block, node, excludedNodes, priority,
      dataTransferProtocolVersion, namespaceId);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Bad block placement: " + action);
  }
  int movingQueueSize = movingQueue.size();
  //For high-pri moves, the queue limit is 2*maxQueueSize
  if (movingQueueSize < maxQueueSize ||
      movingQueueSize < 2 * maxQueueSize &&
      action.priority >= alwaysSubmitPriorityLevel) {
  	LOG.info("move : " + action + " started");
    executor.execute(action);
    //Thread mover = new Thread(action);
    //mover.run();
    metrics.blockMoveScheduled.inc();
  } else {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Block move queue is full. Skip the action." +
        " size:" + movingQueueSize +
        " maxSize:" + maxQueueSize);
    }
    metrics.blockMoveSkipped.inc();
  }
}
 
Example 17
Source Project: big-c   Source File: DfsClientShmManager.java    License: Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
public void visit(Visitor visitor) throws IOException {
  lock.lock();
  try {
    HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info = 
        new HashMap<DatanodeInfo, PerDatanodeVisitorInfo>();
    for (Entry<DatanodeInfo, EndpointShmManager> entry :
          datanodes.entrySet()) {
      info.put(entry.getKey(), entry.getValue().getVisitorInfo());
    }
    visitor.visit(info);
  } finally {
    lock.unlock();
  }
}
 
Example 18
Source Project: big-c   Source File: DFSClient.java    License: Apache License 2.0 5 votes vote down vote up
public DatanodeInfo[] datanodeReport(DatanodeReportType type)
    throws IOException {
  checkOpen();
  TraceScope scope = Trace.startSpan("datanodeReport", traceSampler);
  try {
    return namenode.getDatanodeReport(type);
  } finally {
    scope.close();
  }
}
 
Example 19
Source Project: big-c   Source File: InvalidateBlocks.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * @return true if the given storage has the given block listed for
 * invalidation. Blocks are compared including their generation stamps:
 * if a block is pending invalidation but with a different generation stamp,
 * returns false.
 */
synchronized boolean contains(final DatanodeInfo dn, final Block block) {
  final LightWeightHashSet<Block> s = node2blocks.get(dn);
  if (s == null) {
    return false; // no invalidate blocks for this storage ID
  }
  Block blockInSet = s.getElement(block);
  return blockInSet != null &&
      block.getGenerationStamp() == blockInSet.getGenerationStamp();
}
 
Example 20
Source Project: hadoop   Source File: TestDecommissioningStatus.java    License: Apache License 2.0 5 votes vote down vote up
private String decommissionNode(FSNamesystem namesystem, DFSClient client,
    FileSystem localFileSys, int nodeIndex) throws IOException {
  DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);

  String nodename = info[nodeIndex].getXferAddr();
  decommissionNode(namesystem, localFileSys, nodename);
  return nodename;
}
 
Example 21
Source Project: hadoop   Source File: TestDFSUtil.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test conversion of LocatedBlock to BlockLocation
 */
@Test
public void testLocatedBlocks2Locations() {
  DatanodeInfo d = DFSTestUtil.getLocalDatanodeInfo();
  DatanodeInfo[] ds = new DatanodeInfo[1];
  ds[0] = d;

  // ok
  ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
  LocatedBlock l1 = new LocatedBlock(b1, ds, 0, false);

  // corrupt
  ExtendedBlock b2 = new ExtendedBlock("bpid", 2, 1, 1);
  LocatedBlock l2 = new LocatedBlock(b2, ds, 0, true);

  List<LocatedBlock> ls = Arrays.asList(l1, l2);
  LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null);

  BlockLocation[] bs = DFSUtil.locatedBlocks2Locations(lbs);

  assertTrue("expected 2 blocks but got " + bs.length,
             bs.length == 2);

  int corruptCount = 0;
  for (BlockLocation b: bs) {
    if (b.isCorrupt()) {
      corruptCount++;
    }
  }

  assertTrue("expected 1 corrupt files but got " + corruptCount,
      corruptCount == 1);

  // test an empty location
  bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
  assertEquals(0, bs.length);
}
 
Example 22
Source Project: hadoop-gpu   Source File: DataNode.java    License: Apache License 2.0 5 votes vote down vote up
private void transferBlocks( Block blocks[], 
                             DatanodeInfo xferTargets[][] 
                             ) {
  for (int i = 0; i < blocks.length; i++) {
    try {
      transferBlock(blocks[i], xferTargets[i]);
    } catch (IOException ie) {
      LOG.warn("Failed to transfer block " + blocks[i], ie);
    }
  }
}
 
Example 23
Source Project: RDFS   Source File: DistributedAvatarFileSystem.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public LocatedBlockWithMetaInfo addBlockAndFetchMetaInfo(final String src,
    final String clientName, final DatanodeInfo[] excludedNodes,
   	final DatanodeInfo[] favoredNodes, final long startPos,
    final Block lastBlock)
    throws IOException {
  return (new MutableFSCaller<LocatedBlockWithMetaInfo>() {
    @Override
    LocatedBlockWithMetaInfo call(int retries) throws IOException {
      if (retries > 0 && lastBlock == null) {
        FileStatus info = namenode.getFileInfo(src);
        if (info != null) {
          LocatedBlocks blocks = namenode.getBlockLocations(src, 0, info
              .getLen());
          if (blocks.locatedBlockCount() > 0 ) {
            LocatedBlock last = blocks.get(blocks.locatedBlockCount() - 1);
            if (last.getBlockSize() == 0) {
              // This one has not been written to
              namenode.abandonBlock(last.getBlock(), src, clientName);
            }
          }
        }
      }
      return namenode.addBlockAndFetchMetaInfo(src, clientName,
   	      excludedNodes, favoredNodes, startPos, lastBlock);
    }

  }).callFS();
}
 
Example 24
Source Project: big-c   Source File: TestDecommissioningStatus.java    License: Apache License 2.0 5 votes vote down vote up
private String decommissionNode(FSNamesystem namesystem, DFSClient client,
    FileSystem localFileSys, int nodeIndex) throws IOException {
  DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);

  String nodename = info[nodeIndex].getXferAddr();
  decommissionNode(namesystem, localFileSys, nodename);
  return nodename;
}
 
Example 25
Source Project: hadoop   Source File: TestFavoredNodesEndToEnd.java    License: Apache License 2.0 5 votes vote down vote up
@Test(timeout=180000)
public void testWhenSomeNodesAreNotGood() throws Exception {
  // 4 favored nodes
  final InetSocketAddress addrs[] = new InetSocketAddress[4];
  final String[] hosts = new String[addrs.length];
  for (int i = 0; i < addrs.length; i++) {
    addrs[i] = datanodes.get(i).getXferAddress();
    hosts[i] = addrs[i].getAddress().getHostAddress() + ":" + addrs[i].getPort();
  }

  //make some datanode not "good" so that even if the client prefers it,
  //the namenode would not give it as a replica to write to
  DatanodeInfo d = cluster.getNameNode().getNamesystem().getBlockManager()
         .getDatanodeManager().getDatanodeByXferAddr(
             addrs[0].getAddress().getHostAddress(), addrs[0].getPort());
  //set the decommission status to true so that 
  //BlockPlacementPolicyDefault.isGoodTarget returns false for this dn
  d.setDecommissioned();
  Path p = new Path("/filename-foo-bar-baz");
  final short replication = (short)3;
  FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true,
      4096, replication, 4096L, null, addrs);
  out.write(SOME_BYTES);
  out.close();
  //reset the state
  d.stopDecommission();

  BlockLocation[] locations = getBlockLocations(p);
  Assert.assertEquals(replication, locations[0].getNames().length);;
  //also make sure that the datanode[0] is not in the list of hosts
  for (int i = 0; i < replication; i++) {
    final String loc = locations[0].getNames()[i];
    int j = 0;
    for(; j < hosts.length && !loc.equals(hosts[j]); j++);
    Assert.assertTrue("j=" + j, j > 0);
    Assert.assertTrue("loc=" + loc + " not in host list "
        + Arrays.asList(hosts) + ", j=" + j, j < hosts.length);
  }
}
 
Example 26
Source Project: RDFS   Source File: TestGetBlocks.java    License: Apache License 2.0 5 votes vote down vote up
private void getBlocksWithException(NamenodeProtocol namenode,
                                    DatanodeInfo datanode,
                                    long size) throws IOException {
  boolean getException = false;
  try {
      namenode.getBlocks(new DatanodeInfo(), 2);
  } catch(RemoteException e) {
    getException = true;
    assertTrue(e.getMessage().contains("IllegalArgumentException"));
  }
  assertTrue(getException);
}
 
Example 27
Source Project: RDFS   Source File: DFSOutputStream.java    License: Apache License 2.0 5 votes vote down vote up
DatanodeInfo[] getPipeline() {
  synchronized (dataQueue) {
    if (nodes == null) {
      return null;
    }
    DatanodeInfo[] value = new DatanodeInfo[nodes.length];
    for (int i = 0; i < nodes.length; i++) {
      value[i] = nodes[i];
    }
    return value;
  }
}
 
Example 28
Source Project: RDFS   Source File: TestDecommission.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * For blocks that reside on the nodes that are down, verify that their
 * replication factor is 1 more than the specified one.
 */
private void checkFile(FileSystem fileSys, Path name, int repl,
                       String downnode, int numDatanodes) throws IOException {
  //
  // sleep an additional 10 seconds for the blockreports from the datanodes
  // to arrive. 
  //
  // need a raw stream
  assertTrue("Not HDFS:"+fileSys.getUri(), fileSys instanceof DistributedFileSystem);
      
  DFSClient.DFSDataInputStream dis = (DFSClient.DFSDataInputStream) 
    ((DistributedFileSystem)fileSys).open(name);
  Collection<LocatedBlock> dinfo = dis.getAllBlocks();

  for (LocatedBlock blk : dinfo) { // for each block
    int hasdown = 0;
    int firstDecomNodeIndex = -1;
    DatanodeInfo[] nodes = blk.getLocations();
    for (int j = 0; j < nodes.length; j++) {     // for each replica
      if (nodes[j].getName().equals(downnode)) {
        hasdown++;
        LOG.info("Block " + blk.getBlock() + " replica " + nodes[j].getName()
            + " is decommissioned.");
      }
      if (nodes[j].isDecommissioned()) {
        if (firstDecomNodeIndex == -1) {
          firstDecomNodeIndex = j;
        }
        continue;
      }
      assertEquals("Decom node is not at the end", firstDecomNodeIndex, -1);
    }
    LOG.info("Block " + blk.getBlock() + " has " + hasdown
        + " decommissioned replica.");
    assertEquals("Number of replicas for block " + blk.getBlock(),
                 Math.min(numDatanodes, repl+hasdown), nodes.length);  
  }
}
 
Example 29
Source Project: big-c   Source File: DataNode.java    License: Apache License 2.0 5 votes vote down vote up
private static void logRecoverBlock(String who, RecoveringBlock rb) {
  ExtendedBlock block = rb.getBlock();
  DatanodeInfo[] targets = rb.getLocations();
  
  LOG.info(who + " calls recoverBlock(" + block
      + ", targets=[" + Joiner.on(", ").join(targets) + "]"
      + ", newGenerationStamp=" + rb.getNewGenerationStamp() + ")");
}
 
Example 30
Source Project: big-c   Source File: PBHelper.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Copy from {@code dnInfos} to a target of list of same size starting at
 * {@code startIdx}.
 */
public static List<? extends HdfsProtos.DatanodeInfoProto> convert(
    DatanodeInfo[] dnInfos, int startIdx) {
  if (dnInfos == null)
    return null;
  ArrayList<HdfsProtos.DatanodeInfoProto> protos = Lists
      .newArrayListWithCapacity(dnInfos.length);
  for (int i = startIdx; i < dnInfos.length; i++) {
    protos.add(convert(dnInfos[i]));
  }
  return protos;
}