org.apache.hadoop.hdfs.protocol.DatanodeInfo Java Examples

The following examples show how to use org.apache.hadoop.hdfs.protocol.DatanodeInfo. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FileDataServlet.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
/** Create a redirection URI */
protected URI createUri(FileStatus i, UnixUserGroupInformation ugi,
    ClientProtocol nnproxy, HttpServletRequest request)
    throws IOException, URISyntaxException {
  String scheme = request.getScheme();
  final DatanodeID host = pickSrcDatanode(i, nnproxy);
  final String hostname;
  if (host instanceof DatanodeInfo) {
    hostname = ((DatanodeInfo)host).getHostName();
  } else {
    hostname = host.getHost();
  }
  return new URI(scheme, null, hostname,
      "https".equals(scheme)
        ? (Integer)getServletContext().getAttribute("datanode.https.port")
        : host.getInfoPort(),
      "/streamFile", "filename=" + i.getPath() + "&ugi=" + ugi, null);
}
 
Example #2
Source File: Receiver.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** Receive {@link Op#TRANSFER_BLOCK} */
private void opTransferBlock(DataInputStream in) throws IOException {
  final OpTransferBlockProto proto =
    OpTransferBlockProto.parseFrom(vintPrefixed(in));
  final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    transferBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
        proto.getHeader().getClientName(),
        targets,
        PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length));
  } finally {
    if (traceScope != null) traceScope.close();
  }
}
 
Example #3
Source File: FastCopySetupUtil.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private void verifyHardLinks(DatanodeInfo srcInfo, DatanodeInfo dstInfo,
    int srcNamespaceId, Block srcBlock, int dstNamespaceId, Block dstBlock,
    boolean hardlink) throws IOException {
  // Verify hard links.
  DataNode dnSrc = dnMap.get(srcInfo.getPort());
  File blockFileSrc = dnSrc.data.getBlockFile(srcNamespaceId, srcBlock);
  LOG.warn("Link count for : " + blockFileSrc + " is : "
      + HardLink.getLinkCount(blockFileSrc));
  if (hardlink) {
    assertTrue(HardLink.getLinkCount(blockFileSrc) > 1);
  } else {
    assertEquals(1, HardLink.getLinkCount(blockFileSrc));
  }

  DataNode dnDst = dnMap.get(dstInfo.getPort());
  File blockFileDst = dnDst.data.getBlockFile(dstNamespaceId, dstBlock);
  if (hardlink) {
    assertTrue(HardLink.getLinkCount(blockFileDst) > 1);
  } else {
    assertEquals(1, HardLink.getLinkCount(blockFileDst));
  }
}
 
Example #4
Source File: BlockReconstructor.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Choose a datanode (hostname:portnumber). The datanode is chosen at random
 * from the live datanodes.
 * 
 * @param locationsToAvoid
 *            locations to avoid.
 * @return A string in the format name:port.
 * @throws IOException
 */
private String chooseDatanode(DatanodeInfo[] locationsToAvoid)
		throws IOException {
	DistributedFileSystem dfs = getDFS(new Path("/"));
	DatanodeInfo[] live = dfs.getClient().datanodeReport(
			DatanodeReportType.LIVE);

	Random rand = new Random();
	String chosen = null;
	int maxAttempts = 1000;
	for (int i = 0; i < maxAttempts && chosen == null; i++) {
		int idx = rand.nextInt(live.length);
		chosen = live[idx].name;
		for (DatanodeInfo avoid : locationsToAvoid) {
			if (chosen.equals(avoid.name)) {
				//LOG.info("Avoiding " + avoid.name);
				chosen = null;
				break;
			}
		}
	}
	if (chosen == null) {
		throw new IOException("Could not choose datanode");
	}
	return chosen;
}
 
Example #5
Source File: DistributedAvatarFileSystem.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public LocatedBlock addBlock(final String src, final String clientName,
    final DatanodeInfo[] excludedNodes, final DatanodeInfo[] favoredNodes)
    throws IOException {
  return (new MutableFSCaller<LocatedBlock>() {
    @Override
    LocatedBlock call(int retries) throws IOException {
      if (retries > 0) {
        FileStatus info = namenode.getFileInfo(src);
        if (info != null) {
          LocatedBlocks blocks = namenode.getBlockLocations(src, 0, info
            .getLen());
          if (blocks.locatedBlockCount() > 0 ) {
            LocatedBlock last = blocks.get(blocks.locatedBlockCount() - 1);
            if (last.getBlockSize() == 0) {
              // This one has not been written to
              namenode.abandonBlock(last.getBlock(), src, clientName);
            }
          }
        }
      }
      return namenode.addBlock(src, clientName, excludedNodes,
        favoredNodes);
    }
  }).callFS();
}
 
Example #6
Source File: BlockReceiver.java    From big-c with Apache License 2.0 6 votes vote down vote up
PacketResponder(final DataOutputStream upstreamOut,
    final DataInputStream downstreamIn, final DatanodeInfo[] downstreams) {
  this.downstreamIn = downstreamIn;
  this.upstreamOut = upstreamOut;

  this.type = downstreams == null? PacketResponderType.NON_PIPELINE
      : downstreams.length == 0? PacketResponderType.LAST_IN_PIPELINE
          : PacketResponderType.HAS_DOWNSTREAM_IN_PIPELINE;

  final StringBuilder b = new StringBuilder(getClass().getSimpleName())
      .append(": ").append(block).append(", type=").append(type);
  if (type != PacketResponderType.HAS_DOWNSTREAM_IN_PIPELINE) {
    b.append(", downstreams=").append(downstreams.length)
        .append(":").append(Arrays.asList(downstreams));
  }
  this.myString = b.toString();
}
 
Example #7
Source File: Balancer.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private BalancerDatanode(
    DatanodeInfo node, double avgRemaining, double threshold) {
  datanode = node;
  remaining = Balancer.getRemaining(node);
  long sizeToMove; 
  
  if (remaining + threshold <= avgRemaining 
      || remaining - threshold  >= avgRemaining) {
    sizeToMove = (long)(threshold*datanode.getCapacity()/100);
  } else {
    sizeToMove =
      (long)(Math.abs(avgRemaining-remaining)*datanode.getCapacity()/100);
  }
  if (remaining > avgRemaining) {
    sizeToMove = Math.min(datanode.getRemaining(), sizeToMove);
  }
  this.maxSizeToMove = Math.min(MAX_SIZE_TO_MOVE, sizeToMove);
}
 
Example #8
Source File: FanOutOneBlockAsyncDFSOutputHelper.java    From hbase with Apache License 2.0 6 votes vote down vote up
private static void initialize(Configuration conf, Channel channel, DatanodeInfo dnInfo,
    StorageType storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder, int timeoutMs,
    DFSClient client, Token<BlockTokenIdentifier> accessToken, Promise<Channel> promise)
    throws IOException {
  Promise<Void> saslPromise = channel.eventLoop().newPromise();
  trySaslNegotiate(conf, channel, dnInfo, timeoutMs, client, accessToken, saslPromise);
  saslPromise.addListener(new FutureListener<Void>() {

    @Override
    public void operationComplete(Future<Void> future) throws Exception {
      if (future.isSuccess()) {
        // setup response processing pipeline first, then send request.
        processWriteBlockResponse(channel, dnInfo, promise, timeoutMs);
        requestWriteBlock(channel, storageType, writeBlockProtoBuilder);
      } else {
        promise.tryFailure(future.cause());
      }
    }
  });
}
 
Example #9
Source File: TestAvatarAPI.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private void checkPrimary() throws Exception {
  FileStatus fs = dafs.getFileStatus(path, false);
  FileStatus[] dir = dafs.listStatus(dirPath, false);
  RemoteIterator<Path> cfb =
    dafs.listCorruptFileBlocks(dirPath, false);
  assertTrue("DAFS file status has the wrong length",
             fs != null && fs.getLen() == FILE_LEN);
  assertTrue("DAFS directory listing has the wrong length",
             dir != null && dir.length == 1);
  assertTrue("DAFS expected 0 corrupt file blocks",
             countPaths(cfb) == 0);

  ContentSummary cs = dafs.getContentSummary(path, false);
  DatanodeInfo[] di = dafs.getDataNodeStats(false);
  assertTrue("DAFS datanode info should contain 3 data nodes",
             di.length == 3);
}
 
Example #10
Source File: ReportBadBlockAction.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public void reportTo(DatanodeProtocolClientSideTranslatorPB bpNamenode, 
  DatanodeRegistration bpRegistration) throws BPServiceActorActionException {
  if (bpRegistration == null) {
    return;
  }
  DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) };
  String[] uuids = { storageUuid };
  StorageType[] types = { storageType };
  LocatedBlock[] locatedBlock = { new LocatedBlock(block,
      dnArr, uuids, types) };

  try {
    bpNamenode.reportBadBlocks(locatedBlock);
  } catch (RemoteException re) {
    DataNode.LOG.info("reportBadBlock encountered RemoteException for "
        + "block:  " + block , re);
  } catch (IOException e) {
    throw new BPServiceActorActionException("Failed to report bad block "
        + block + " to namenode: ");
  }
}
 
Example #11
Source File: DFSInputStream.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void fetchBlockByteRange(LocatedBlock block, long start, long end,
    byte[] buf, int offset,
    Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap)
    throws IOException {
  block = getBlockAt(block.getStartOffset());
  while (true) {
    DNAddrPair addressPair = chooseDataNode(block, null);
    try {
      actualGetFromOneDataNode(addressPair, block, start, end, buf, offset,
          corruptedBlockMap);
      return;
    } catch (IOException e) {
      // Ignore. Already processed inside the function.
      // Loop through to try the next node.
    }
  }
}
 
Example #12
Source File: TestClientReportBadBlock.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Create a file with one block and corrupt some/all of the block replicas.
 */
private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl,
    int corruptBlockCount) throws IOException, AccessControlException,
    FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException {
  DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
  DFSTestUtil.waitReplication(dfs, filePath, repl);
  // Locate the file blocks by asking name node
  final LocatedBlocks locatedblocks = dfs.dfs.getNamenode()
      .getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE);
  Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length);
  // The file only has one block
  LocatedBlock lblock = locatedblocks.get(0);
  DatanodeInfo[] datanodeinfos = lblock.getLocations();
  ExtendedBlock block = lblock.getBlock();
  // corrupt some /all of the block replicas
  for (int i = 0; i < corruptBlockCount; i++) {
    DatanodeInfo dninfo = datanodeinfos[i];
    final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
    corruptBlock(block, dn);
    LOG.debug("Corrupted block " + block.getBlockName() + " on data node "
        + dninfo);

  }
}
 
Example #13
Source File: TestDecommission.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public void testClusterStats(int numNameNodes, boolean federation) throws IOException,
    InterruptedException {
  LOG.info("Starting test testClusterStats");
  int numDatanodes = 1;
  startCluster(numNameNodes, numDatanodes, conf, federation);
  
  for (int i = 0; i < numNameNodes; i++) {
    FileSystem fileSys = cluster.getFileSystem(i);
    Path file = new Path("testClusterStats.dat");
    writeFile(fileSys, file, 1);
    
    NameNode namenode = cluster.getNameNode(i);
    FSNamesystem fsn = namenode.namesystem;
    DatanodeInfo downnode = decommissionNode(i, null,
        AdminStates.DECOMMISSION_INPROGRESS);
    // Check namenode stats for multiple datanode heartbeats
    verifyStats(namenode, fsn, downnode, true);
    
    // Stop decommissioning and verify stats
    writeConfigFile(excludeFile, null);
    fsn.refreshNodes(conf);
    DatanodeInfo ret = fsn.getDatanode(downnode);
    waitNodeState(ret, AdminStates.NORMAL);
    verifyStats(namenode, fsn, ret, false);
  }
}
 
Example #14
Source File: PlacementMonitor.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public void initialize(Path path, FileSystem fs) throws IOException {
  if (pathAndOffsetToLocatedBlock.containsKey(path)) {
    return;
  }
  VersionedLocatedBlocks pathLbs = getLocatedBlocks(path, fs);
  pathAndOffsetToLocatedBlock.put(
      path, createOffsetToLocatedBlockMap(pathLbs));

  for (LocatedBlocks lbs : Arrays.asList(pathLbs)) {
    for (LocatedBlock lb : lbs.getLocatedBlocks()) {
      for (DatanodeInfo dn : lb.getLocations()) {
        nameToDatanodeInfo.put(dn.getName(), dn);
      }
    }
  }
}
 
Example #15
Source File: DistributedAvatarFileSystem.java    From RDFS with Apache License 2.0 6 votes vote down vote up
@Override
public LocatedBlockWithMetaInfo addBlockAndFetchMetaInfo(
    final String src, final String clientName,
    final DatanodeInfo[] excludedNodes) throws IOException {
  return (new MutableFSCaller<LocatedBlockWithMetaInfo>() {
    @Override
    LocatedBlockWithMetaInfo call(int retries) throws IOException {
      if (retries > 0) {
        FileStatus info = namenode.getFileInfo(src);
        if (info != null) {
          LocatedBlocks blocks = namenode.getBlockLocations(src, 0, info
              .getLen());
          if (blocks.locatedBlockCount() > 0 ) {
            LocatedBlock last = blocks.get(blocks.locatedBlockCount() - 1);
            if (last.getBlockSize() == 0) {
              // This one has not been written to
              namenode.abandonBlock(last.getBlock(), src, clientName);
            }
          }
        }
      }
      return namenode.addBlockAndFetchMetaInfo(src, clientName,
          excludedNodes);
    }
  }).callFS();
}
 
Example #16
Source File: TestDataTransferProtocol.java    From big-c with Apache License 2.0 5 votes vote down vote up
void writeBlock(ExtendedBlock block, BlockConstructionStage stage,
    long newGS, DataChecksum checksum) throws IOException {
  sender.writeBlock(block, StorageType.DEFAULT,
      BlockTokenSecretManager.DUMMY_TOKEN, "cl",
      new DatanodeInfo[1], new StorageType[1], null, stage,
      0, block.getNumBytes(), block.getNumBytes(), newGS,
      checksum, CachingStrategy.newDefaultStrategy(), false, false, null);
}
 
Example #17
Source File: DFSAdmin.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Display each rack and the nodes assigned to that rack, as determined
 * by the NameNode, in a hierarchical manner.  The nodes and racks are
 * sorted alphabetically.
 * 
 * @throws IOException If an error while getting datanode report
 */
public int printTopology() throws IOException {
    DistributedFileSystem dfs = getDFS();
    final DatanodeInfo[] report = dfs.getDataNodeStats();

    // Build a map of rack -> nodes from the datanode report
    HashMap<String, TreeSet<String> > tree = new HashMap<String, TreeSet<String>>();
    for(DatanodeInfo dni : report) {
      String location = dni.getNetworkLocation();
      String name = dni.getName();
      
      if(!tree.containsKey(location)) {
        tree.put(location, new TreeSet<String>());
      }
      
      tree.get(location).add(name);
    }
    
    // Sort the racks (and nodes) alphabetically, display in order
    ArrayList<String> racks = new ArrayList<String>(tree.keySet());
    Collections.sort(racks);
    
    for(String r : racks) {
      System.out.println("Rack: " + r);
      TreeSet<String> nodes = tree.get(r);

      for(String n : nodes) {
        System.out.print("   " + n);
        String hostname = NetUtils.getHostNameOfIP(n);
        if(hostname != null)
          System.out.print(" (" + hostname + ")");
        System.out.println();
      }

      System.out.println();
    }
  return 0;
}
 
Example #18
Source File: DFSClient.java    From RDFS with Apache License 2.0 5 votes vote down vote up
protected int numNodeLeft(DatanodeInfo nodes[],
    AbstractMap<DatanodeInfo, DatanodeInfo> deadNodes) {
  int nodesLeft = 0;
  if (nodes != null) {
    for (int i = 0; i < nodes.length; i++) {
      if (!deadNodes.containsKey(nodes[i])) {
        nodesLeft++;
      }
    }
  }
  return nodesLeft;
}
 
Example #19
Source File: Receiver.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Receive OP_WRITE_BLOCK */
private void opWriteBlock(DataInputStream in) throws IOException {
  final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
  final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
  TraceScope traceScope = continueTraceSpan(proto.getHeader(),
      proto.getClass().getSimpleName());
  try {
    writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()),
        PBHelper.convertStorageType(proto.getStorageType()),
        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
        proto.getHeader().getClientName(),
        targets,
        PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length),
        PBHelper.convert(proto.getSource()),
        fromProto(proto.getStage()),
        proto.getPipelineSize(),
        proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
        proto.getLatestGenerationStamp(),
        fromProto(proto.getRequestedChecksum()),
        (proto.hasCachingStrategy() ?
            getCachingStrategy(proto.getCachingStrategy()) :
          CachingStrategy.newDefaultStrategy()),
        (proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false),
        (proto.hasPinning() ? proto.getPinning(): false),
        (PBHelper.convertBooleanList(proto.getTargetPinningsList())));
  } finally {
   if (traceScope != null) traceScope.close();
  }
}
 
Example #20
Source File: FileDataServlet.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Create a redirection URL */
private URL createRedirectURL(String path, String encodedPath, HdfsFileStatus status, 
    UserGroupInformation ugi, ClientProtocol nnproxy, HttpServletRequest request, String dt)
    throws IOException {
  String scheme = request.getScheme();
  final LocatedBlocks blks = nnproxy.getBlockLocations(
      status.getFullPath(new Path(path)).toUri().getPath(), 0, 1);
  final Configuration conf = NameNodeHttpServer.getConfFromContext(
      getServletContext());
  final DatanodeID host = pickSrcDatanode(blks, status, conf);
  final String hostname;
  if (host instanceof DatanodeInfo) {
    hostname = host.getHostName();
  } else {
    hostname = host.getIpAddr();
  }

  int port = "https".equals(scheme) ? host.getInfoSecurePort() : host
      .getInfoPort();

  String dtParam = "";
  if (dt != null) {
    dtParam = JspHelper.getDelegationTokenUrlParam(dt);
  }

  // Add namenode address to the url params
  NameNode nn = NameNodeHttpServer.getNameNodeFromContext(
      getServletContext());
  String addr = nn.getNameNodeAddressHostPortString();
  String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
  
  return new URL(scheme, hostname, port,
      "/streamFile" + encodedPath + '?' +
      "ugi=" + ServletUtil.encodeQueryValue(ugi.getShortUserName()) +
      dtParam + addrParam);
}
 
Example #21
Source File: DfsServlet.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/** Create a URI for redirecting request */
protected URI createRedirectUri(String servletpath, UserGroupInformation ugi,
    DatanodeID host, HttpServletRequest request) throws URISyntaxException {
  final String hostname = host instanceof DatanodeInfo?
      ((DatanodeInfo)host).getHostName(): host.getHost();
  final String scheme = request.getScheme();
  final int port = "https".equals(scheme)?
      (Integer)getServletContext().getAttribute("datanode.https.port")
      : host.getInfoPort();
  final String filename = request.getPathInfo();
  return new URI(scheme, null, hostname, port, servletpath,
      "filename=" + filename + "&ugi=" + ugi, null);
}
 
Example #22
Source File: TestDecommission.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public void testHostsFile(int numNameNodes, boolean federation) throws IOException,
    InterruptedException {
  conf.set("dfs.hosts", hostsFile.toUri().getPath());
  int numDatanodes = 1;
  cluster = new MiniDFSCluster(0, conf, numDatanodes, true, true,
      true, null, null, null, null, true, true, numNameNodes, federation);
  cluster.waitActive();
  
  // Now empty hosts file and ensure the datanode is disallowed
  // from talking to namenode, resulting in it's shutdown.
  ArrayList<String>list = new ArrayList<String>();
  list.add("invalidhost");
  writeConfigFile(hostsFile, list);
  
  for (int j = 0; j < numNameNodes; j++) {
    cluster.getNameNode(j).namesystem.refreshNodes(conf);
    
    DFSClient client = getDfsClient(cluster.getNameNode(j), conf);
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    for (int i = 0 ; i < 20 && info.length != 0; i++) {
      LOG.info("Waiting for datanode to be marked dead");
      Thread.sleep(HEARTBEAT_INTERVAL * 1000);
      info = client.datanodeReport(DatanodeReportType.LIVE);
    }
    assertEquals("Number of live nodes should be 0", 0, info.length);
  }
}
 
Example #23
Source File: BlockMover.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public void move(LocatedBlock block, DatanodeInfo node,
    Set<DatanodeInfo> excludedNodes, int priority,
    int dataTransferProtocolVersion, int namespaceId) {
  BlockMoveAction action = new BlockMoveAction(
      block, node, excludedNodes, priority,
      dataTransferProtocolVersion, namespaceId);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Bad block placement: " + action);
  }
  int movingQueueSize = movingQueue.size();
  //For high-pri moves, the queue limit is 2*maxQueueSize
  if (movingQueueSize < maxQueueSize ||
      movingQueueSize < 2 * maxQueueSize &&
      action.priority >= alwaysSubmitPriorityLevel) {
  	LOG.info("move : " + action + " started");
    executor.execute(action);
    //Thread mover = new Thread(action);
    //mover.run();
    metrics.blockMoveScheduled.inc();
  } else {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Block move queue is full. Skip the action." +
        " size:" + movingQueueSize +
        " maxSize:" + maxQueueSize);
    }
    metrics.blockMoveSkipped.inc();
  }
}
 
Example #24
Source File: BPServiceActor.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Report a bad block from another DN in this cluster.
 */
void reportRemoteBadBlock(DatanodeInfo dnInfo, ExtendedBlock block)
    throws IOException {
  LocatedBlock lb = new LocatedBlock(block, 
                                  new DatanodeInfo[] {dnInfo});
  bpNamenode.reportBadBlocks(new LocatedBlock[] {lb});
}
 
Example #25
Source File: PBHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static LocatedBlock convert(LocatedBlockProto proto) {
  if (proto == null) return null;
  List<DatanodeInfoProto> locs = proto.getLocsList();
  DatanodeInfo[] targets = new DatanodeInfo[locs.size()];
  for (int i = 0; i < locs.size(); i++) {
    targets[i] = PBHelper.convert(locs.get(i));
  }

  final StorageType[] storageTypes = convertStorageTypes(
      proto.getStorageTypesList(), locs.size());

  final int storageIDsCount = proto.getStorageIDsCount();
  final String[] storageIDs;
  if (storageIDsCount == 0) {
    storageIDs = null;
  } else {
    Preconditions.checkState(storageIDsCount == locs.size());
    storageIDs = proto.getStorageIDsList().toArray(new String[storageIDsCount]);
  }

  // Set values from the isCached list, re-using references from loc
  List<DatanodeInfo> cachedLocs = new ArrayList<DatanodeInfo>(locs.size());
  List<Boolean> isCachedList = proto.getIsCachedList();
  for (int i=0; i<isCachedList.size(); i++) {
    if (isCachedList.get(i)) {
      cachedLocs.add(targets[i]);
    }
  }

  LocatedBlock lb = new LocatedBlock(PBHelper.convert(proto.getB()), targets,
      storageIDs, storageTypes, proto.getOffset(), proto.getCorrupt(),
      cachedLocs.toArray(new DatanodeInfo[0]));
  lb.setBlockToken(PBHelper.convert(proto.getBlockToken()));

  return lb;
}
 
Example #26
Source File: DFSAdmin.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Display each rack and the nodes assigned to that rack, as determined
 * by the NameNode, in a hierarchical manner.  The nodes and racks are
 * sorted alphabetically.
 * 
 * @throws IOException If an error while getting datanode report
 */
public int printTopology() throws IOException {
    DistributedFileSystem dfs = getDFS();
    final DatanodeInfo[] report = dfs.getDataNodeStats();

    // Build a map of rack -> nodes from the datanode report
    HashMap<String, TreeSet<String> > tree = new HashMap<String, TreeSet<String>>();
    for(DatanodeInfo dni : report) {
      String location = dni.getNetworkLocation();
      String name = dni.getName();
      
      if(!tree.containsKey(location)) {
        tree.put(location, new TreeSet<String>());
      }
      
      tree.get(location).add(name);
    }
    
    // Sort the racks (and nodes) alphabetically, display in order
    ArrayList<String> racks = new ArrayList<String>(tree.keySet());
    Collections.sort(racks);
    
    for(String r : racks) {
      System.out.println("Rack: " + r);
      TreeSet<String> nodes = tree.get(r);

      for(String n : nodes) {
        System.out.print("   " + n);
        String hostname = NetUtils.getHostNameOfIP(n);
        if(hostname != null)
          System.out.print(" (" + hostname + ")");
        System.out.println();
      }

      System.out.println();
    }
  return 0;
}
 
Example #27
Source File: TestBlockReplacement.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
private void checkBlocks(DatanodeInfo[] includeNodes, String fileName, 
    long fileLen, short replFactor, DFSClient client) throws IOException {
  Boolean notDone;
  do {
    try {
      Thread.sleep(100);
    } catch(InterruptedException e) {
    }
    List<LocatedBlock> blocks = client.namenode.
    getBlockLocations(fileName, 0, fileLen).getLocatedBlocks();
    assertEquals(1, blocks.size());
    DatanodeInfo[] nodes = blocks.get(0).getLocations();
    notDone = (nodes.length != replFactor);
    if (notDone) {
      LOG.info("Expected replication factor is " + replFactor +
          " but the real replication factor is " + nodes.length );
    } else {
      List<DatanodeInfo> nodeLocations = Arrays.asList(nodes);
      for (DatanodeInfo node : includeNodes) {
        if (!nodeLocations.contains(node) ) {
          notDone=true; 
          LOG.info("Block is not located at " + node.getName() );
          break;
        }
      }
    }
  } while(notDone);
}
 
Example #28
Source File: TestDecommission.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
private void printDatanodeReport(DatanodeInfo[] info) {
  System.out.println("-------------------------------------------------");
  for (int i = 0; i < info.length; i++) {
    System.out.println(info[i].getDatanodeReport());
    System.out.println();
  }
}
 
Example #29
Source File: PBHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static DatanodeInfo[] convert(List<DatanodeInfoProto> list) {
  DatanodeInfo[] info = new DatanodeInfo[list.size()];
  for (int i = 0; i < info.length; i++) {
    info[i] = convert(list.get(i));
  }
  return info;
}
 
Example #30
Source File: FastCopy.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Increments the number of errors for a datanode in the datanodeErrors
 * map by 1.
 *
 * @param node
 *          the datanode which needs to be update
 */
private void updateDatanodeErrors(DatanodeInfo node) {
  synchronized (datanodeErrors) {
    Integer errors = datanodeErrors.get(node);
    if (errors == null) {
      errors = new Integer(0);
    }
    int e = errors;
    datanodeErrors.put(node, new Integer(++e));
  }
}