org.apache.hadoop.hdfs.protocol.DatanodeID Java Examples

The following examples show how to use org.apache.hadoop.hdfs.protocol.DatanodeID. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: NNThroughputBenchmark.java    From hadoop with Apache License 2.0 6 votes vote down vote up
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
 
Example #2
Source File: FileChecksumServlets.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response
    ) throws ServletException, IOException {
  final ServletContext context = getServletContext();
  final Configuration conf = NameNodeHttpServer.getConfFromContext(context);
  final UserGroupInformation ugi = getUGI(request, conf);
  final NameNode namenode = NameNodeHttpServer.getNameNodeFromContext(
      context);
  final DatanodeID datanode = NamenodeJspHelper.getRandomDatanode(namenode);
  try {
    response.sendRedirect(
        createRedirectURL(ugi, datanode, request, namenode).toString());
  } catch (IOException e) {
    response.sendError(400, e.getMessage());
  }
}
 
Example #3
Source File: FileDataServlet.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
/** Select a datanode to service this request.
 * Currently, this looks at no more than the first five blocks of a file,
 * selecting a datanode randomly from the most represented.
 */
private static DatanodeID pickSrcDatanode(FileStatus i,
    ClientProtocol nnproxy) throws IOException {
  // a race condition can happen by initializing a static member this way.
  // A proper fix should make JspHelper a singleton. Since it doesn't affect 
  // correctness, we leave it as is for now.
  if (jspHelper == null)
    jspHelper = new JspHelper();
  final LocatedBlocks blks = nnproxy.getBlockLocations(
      i.getPath().toUri().getPath(), 0, 1);
  if (i.getLen() == 0 || blks.getLocatedBlocks().size() <= 0) {
    // pick a random datanode
    return jspHelper.randomNode();
  }
  return jspHelper.bestNode(blks.get(0));
}
 
Example #4
Source File: DatanodeProtocolClientSideTranslatorPB.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public void commitBlockSynchronization(ExtendedBlock block,
    long newgenerationstamp, long newlength, boolean closeFile,
    boolean deleteblock, DatanodeID[] newtargets, String[] newtargetstorages
    ) throws IOException {
  CommitBlockSynchronizationRequestProto.Builder builder = 
      CommitBlockSynchronizationRequestProto.newBuilder()
      .setBlock(PBHelper.convert(block)).setNewGenStamp(newgenerationstamp)
      .setNewLength(newlength).setCloseFile(closeFile)
      .setDeleteBlock(deleteblock);
  for (int i = 0; i < newtargets.length; i++) {
    builder.addNewTaragets(PBHelper.convert(newtargets[i]));
    builder.addNewTargetStorages(newtargetstorages[i]);
  }
  CommitBlockSynchronizationRequestProto req = builder.build();
  try {
    rpcProxy.commitBlockSynchronization(NULL_CONTROLLER, req);
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
}
 
Example #5
Source File: RemoteBlockReader2.java    From big-c with Apache License 2.0 6 votes vote down vote up
protected RemoteBlockReader2(String file, String bpid, long blockId,
    DataChecksum checksum, boolean verifyChecksum,
    long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
    DatanodeID datanodeID, PeerCache peerCache) {
  this.isLocal = DFSClient.isLocalAddress(NetUtils.
      createSocketAddr(datanodeID.getXferAddr()));
  // Path is used only for printing block and file information in debug
  this.peer = peer;
  this.datanodeID = datanodeID;
  this.in = peer.getInputStreamChannel();
  this.checksum = checksum;
  this.verifyChecksum = verifyChecksum;
  this.startOffset = Math.max( startOffset, 0 );
  this.filename = file;
  this.peerCache = peerCache;
  this.blockId = blockId;

  // The total number of bytes that we need to transfer from the DN is
  // the amount that the user wants (bytesToRead), plus the padding at
  // the beginning in order to chunk-align. Note that the DN may elect
  // to send more than this amount if the read starts/ends mid-chunk.
  this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);
  bytesPerChecksum = this.checksum.getBytesPerChecksum();
  checksumSize = this.checksum.getChecksumSize();
}
 
Example #6
Source File: DataNode.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static InterDatanodeProtocol createInterDataNodeProtocolProxy(
    DatanodeID datanodeid, final Configuration conf, final int socketTimeout,
    final boolean connectToDnViaHostname) throws IOException {
  final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
  final InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
  }
  final UserGroupInformation loginUgi = UserGroupInformation.getLoginUser();
  try {
    return loginUgi
        .doAs(new PrivilegedExceptionAction<InterDatanodeProtocol>() {
          @Override
          public InterDatanodeProtocol run() throws IOException {
            return new InterDatanodeProtocolTranslatorPB(addr, loginUgi,
                conf, NetUtils.getDefaultSocketFactory(conf), socketTimeout);
          }
        });
  } catch (InterruptedException ie) {
    throw new IOException(ie.getMessage());
  }
}
 
Example #7
Source File: PeerCache.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private synchronized Peer getInternal(DatanodeID dnId, boolean isDomain) {
  List<Value> sockStreamList = multimap.get(new Key(dnId, isDomain));
  if (sockStreamList == null) {
    return null;
  }

  Iterator<Value> iter = sockStreamList.iterator();
  while (iter.hasNext()) {
    Value candidate = iter.next();
    iter.remove();
    long ageMs = Time.monotonicNow() - candidate.getTime();
    Peer peer = candidate.getPeer();
    if (ageMs >= expiryPeriod) {
      try {
        peer.close();
      } catch (IOException e) {
        LOG.warn("got IOException closing stale peer " + peer +
              ", which is " + ageMs + " ms old");
      }
    } else if (!peer.isClosed()) {
      return peer;
    }
  }
  return null;
}
 
Example #8
Source File: NameNodeRpcServer.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override // ClientProtocol
public void updatePipeline(String clientName, ExtendedBlock oldBlock,
    ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs)
    throws IOException {
  checkNNStartup();
  CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return; // Return previous response
  }

  boolean success = false;
  try {
    namesystem.updatePipeline(clientName, oldBlock, newBlock, newNodes,
        newStorageIDs, cacheEntry != null);
    success = true;
  } finally {
    RetryCache.setState(cacheEntry, success);
  }
}
 
Example #9
Source File: DatanodeProtocolClientSideTranslatorPB.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public void commitBlockSynchronization(ExtendedBlock block,
    long newgenerationstamp, long newlength, boolean closeFile,
    boolean deleteblock, DatanodeID[] newtargets, String[] newtargetstorages
    ) throws IOException {
  CommitBlockSynchronizationRequestProto.Builder builder = 
      CommitBlockSynchronizationRequestProto.newBuilder()
      .setBlock(PBHelper.convert(block)).setNewGenStamp(newgenerationstamp)
      .setNewLength(newlength).setCloseFile(closeFile)
      .setDeleteBlock(deleteblock);
  for (int i = 0; i < newtargets.length; i++) {
    builder.addNewTaragets(PBHelper.convert(newtargets[i]));
    builder.addNewTargetStorages(newtargetstorages[i]);
  }
  CommitBlockSynchronizationRequestProto req = builder.build();
  try {
    rpcProxy.commitBlockSynchronization(NULL_CONTROLLER, req);
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
}
 
Example #10
Source File: TestDeadDatanode.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * wait for datanode to reach alive or dead state for waitTime given in
 * milliseconds.
 */
private void waitForDatanodeState(DatanodeID nodeID, boolean alive, int waitTime)
    throws TimeoutException, InterruptedException, IOException {
  long stopTime = System.currentTimeMillis() + waitTime;
  FSNamesystem namesystem = cluster.getNameNode().getNamesystem();
  String state = alive ? "alive" : "dead";
  while (System.currentTimeMillis() < stopTime) {
    if (namesystem.getDatanode(nodeID).isAlive == alive) {
      LOG.info("datanode " + nodeID + " is " + state);
      return;
    }
    LOG.info("Waiting for datanode " + nodeID + " to become " + state);
    Thread.sleep(1000);
  }
  throw new TimeoutException("Timedout waiting for datanode reach state "
      + state);
}
 
Example #11
Source File: DFSClient.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override // RemotePeerFactory
public Peer newConnectedPeer(InetSocketAddress addr,
    Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
    throws IOException {
  Peer peer = null;
  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    NetUtils.connect(sock, addr,
      getRandomLocalInterfaceAddr(),
      dfsClientConf.socketTimeout);
    peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this,
        blockToken, datanodeId);
    peer.setReadTimeout(dfsClientConf.socketTimeout);
    success = true;
    return peer;
  } finally {
    if (!success) {
      IOUtils.cleanup(LOG, peer);
      IOUtils.closeSocket(sock);
    }
  }
}
 
Example #12
Source File: FileDataServlet.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
/** Create a redirection URI */
protected URI createUri(FileStatus i, UnixUserGroupInformation ugi,
    ClientProtocol nnproxy, HttpServletRequest request)
    throws IOException, URISyntaxException {
  String scheme = request.getScheme();
  final DatanodeID host = pickSrcDatanode(i, nnproxy);
  final String hostname;
  if (host instanceof DatanodeInfo) {
    hostname = ((DatanodeInfo)host).getHostName();
  } else {
    hostname = host.getHost();
  }
  return new URI(scheme, null, hostname,
      "https".equals(scheme)
        ? (Integer)getServletContext().getAttribute("datanode.https.port")
        : host.getInfoPort(),
      "/streamFile", "filename=" + i.getPath() + "&ugi=" + ugi, null);
}
 
Example #13
Source File: TestFileAppend4.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Test for an intermittent failure of commitBlockSynchronization.
 * This could happen if the DN crashed between calling updateBlocks
 * and commitBlockSynchronization.
 */
public void testDatanodeFailsToCommit() throws Throwable {
  LOG.info("START");
  cluster = new MiniDFSCluster(conf, 1, true, null);
  FileSystem fs1 = cluster.getFileSystem();;
  FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(fs1.getConf());
  try {
    createFile(fs1, "/datanodeFailsCommit.test", 1, BBW_SIZE);
    stm.sync();
    loseLeases(fs1);

    // Make the NN fail to commitBlockSynchronization one time
    NameNode nn = cluster.getNameNode();
    nn.namesystem = spy(nn.namesystem);
    doAnswer(new ThrowNTimesAnswer(IOException.class, 1)).
      when(nn.namesystem).
      commitBlockSynchronization((Block)anyObject(), anyInt(), anyInt(),
                                 anyBoolean(), anyBoolean(),
                                 (DatanodeID[])anyObject());

    recoverFile(fs2);
    // close() should write recovered bbw to HDFS block
    assertFileSize(fs2, BBW_SIZE);
    checkFile(fs2, BBW_SIZE);
  } finally {
    fs2.close();
    fs1.close();
    cluster.shutdown();
  }
  LOG.info("STOP");
}
 
Example #14
Source File: NamenodeProtocolTranslatorPB.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
    throws IOException {
  GetBlocksRequestProto req = GetBlocksRequestProto.newBuilder()
      .setDatanode(PBHelper.convert((DatanodeID)datanode)).setSize(size)
      .build();
  try {
    return PBHelper.convert(rpcProxy.getBlocks(NULL_CONTROLLER, req)
        .getBlocks());
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example #15
Source File: TestBlockRecovery.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * BlockRecoveryFI_11. a replica's recovery id does not match new GS.
 *
 * @throws IOException in case of an error
 */
@Test
public void testNotMatchedReplicaID() throws IOException {
  if(LOG.isDebugEnabled()) {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
  }
  ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(
      StorageType.DEFAULT, block, false).getReplica();
  ReplicaOutputStreams streams = null;
  try {
    streams = replicaInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    streams.getChecksumOut().write('a');
    dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
    try {
      dn.syncBlock(rBlock, initBlockRecords(dn));
      fail("Sync should fail");
    } catch (IOException e) {
      e.getMessage().startsWith("Cannot recover ");
    }
    DatanodeProtocol namenode = dn.getActiveNamenodeForBP(POOL_ID);
    verify(namenode, never()).commitBlockSynchronization(
        any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
        anyBoolean(), any(DatanodeID[].class), any(String[].class));
  } finally {
    streams.close();
  }
}
 
Example #16
Source File: TestPeerCache.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testAddAndRetrieve() throws Exception {
  PeerCache cache = new PeerCache(3, 100000);
  DatanodeID dnId = new DatanodeID("192.168.0.1",
        "fakehostname", "fake_datanode_id",
        100, 101, 102, 103);
  FakePeer peer = new FakePeer(dnId, false);
  cache.put(dnId, peer);
  assertTrue(!peer.isClosed());
  assertEquals(1, cache.size());
  assertEquals(peer, cache.get(dnId, false));
  assertEquals(0, cache.size());
  cache.close();
}
 
Example #17
Source File: ClientDatanodeProtocolTranslatorPB.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Constructor.
 * @param datanodeid Datanode to connect to.
 * @param conf Configuration.
 * @param socketTimeout Socket timeout to use.
 * @param connectToDnViaHostname connect to the Datanode using its hostname
 * @throws IOException
 */
public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid,
    Configuration conf, int socketTimeout, boolean connectToDnViaHostname)
    throws IOException {
  final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
  InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
  }
  rpcProxy = createClientDatanodeProtocolProxy(addr,
      UserGroupInformation.getCurrentUser(), conf,
      NetUtils.getDefaultSocketFactory(conf), socketTimeout);
}
 
Example #18
Source File: PBHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static DatanodeID[] convert(DatanodeIDProto[] did) {
  if (did == null) return null;
  final int len = did.length;
  DatanodeID[] result = new DatanodeID[len];
  for (int i = 0; i < len; ++i) {
    result[i] = convert(did[i]);
  }
  return result;
}
 
Example #19
Source File: DataNodeTestUtils.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static InterDatanodeProtocol createInterDatanodeProtocolProxy(
    DataNode dn, DatanodeID datanodeid, final Configuration conf,
    boolean connectToDnViaHostname) throws IOException {
  if (connectToDnViaHostname != dn.getDnConf().connectToDnViaHostname) {
    throw new AssertionError("Unexpected DN hostname configuration");
  }
  return DataNode.createInterDataNodeProtocolProxy(datanodeid, conf,
      dn.getDnConf().socketTimeout, dn.getDnConf().connectToDnViaHostname);
}
 
Example #20
Source File: DatanodeRegistration.java    From big-c with Apache License 2.0 5 votes vote down vote up
public DatanodeRegistration(DatanodeID dn, StorageInfo info,
    ExportedBlockKeys keys, String softwareVersion) {
  super(dn);
  this.storageInfo = info;
  this.exportedKeys = keys;
  this.softwareVersion = softwareVersion;
}
 
Example #21
Source File: DataTransferTestUtil.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public void run(DatanodeID id) {
  final DataTransferTest test = getDataTransferTest();
  if (test.isNotSuccessAndLastPipelineContains(index, id)
      && countdown.isSatisfied()) {
    final String s = toString(id);
    FiTestUtil.LOG.info(s);
    throw new OutOfMemoryError(s);
  }
}
 
Example #22
Source File: NameNodeAdapter.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Return the datanode descriptor for the given datanode.
 */
public static DatanodeDescriptor getDatanode(final FSNamesystem ns,
    DatanodeID id) throws IOException {
  ns.readLock();
  try {
    return ns.getBlockManager().getDatanodeManager().getDatanode(id);
  } finally {
    ns.readUnlock();
  }
}
 
Example #23
Source File: TestPBHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testConvertDatanodeRegistration() {
  DatanodeID dnId = DFSTestUtil.getLocalDatanodeID();
  BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
  ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
      getBlockKey(1), keys);
  DatanodeRegistration reg = new DatanodeRegistration(dnId,
      new StorageInfo(NodeType.DATA_NODE), expKeys, "3.0.0");
  DatanodeRegistrationProto proto = PBHelper.convert(reg);
  DatanodeRegistration reg2 = PBHelper.convert(proto);
  compare(reg.getStorageInfo(), reg2.getStorageInfo());
  compare(reg.getExportedKeys(), reg2.getExportedKeys());
  compare(reg, reg2);
  assertEquals(reg.getSoftwareVersion(), reg2.getSoftwareVersion());
}
 
Example #24
Source File: DataTransferTestUtil.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public void run(DatanodeID id) {
  final DataTransferTest test = getDataTransferTest();
  if (test.isNotSuccessAndLastPipelineContains(index, id)) {
    FiTestUtil.LOG.info(toString(id));
    if (maxDuration <= 0) {
      for(; FiTestUtil.sleep(1000); ); //sleep forever until interrupt
    } else {
      FiTestUtil.sleep(minDuration, maxDuration);
    }
  }
}
 
Example #25
Source File: DatanodeDescriptor.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * @param nodeReg DatanodeID to update registration for.
 */
@Override
public void updateRegInfo(DatanodeID nodeReg) {
  super.updateRegInfo(nodeReg);
  
  // must re-process IBR after re-registration
  for(DatanodeStorageInfo storage : getStorageInfos()) {
    storage.setBlockReportCount(0);
  }
  heartbeatedSinceRegistration = false;
}
 
Example #26
Source File: DataNode.java    From hadoop with Apache License 2.0 5 votes vote down vote up
BlockRecord(DatanodeID id,
            InterDatanodeProtocol datanode,
            ReplicaRecoveryInfo rInfo) {
  this.id = id;
  this.datanode = datanode;
  this.rInfo = rInfo;
}
 
Example #27
Source File: FiHFlushTestUtil.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** {@inheritDoc} */
public void run(DatanodeID id) throws IOException {
  final Pipeline p = getPipelineTest().getPipelineForDatanode(id);
  if (p == null) {
    return;
  }
  if (p.contains(index, id)) {
    final String s = super.toString(id);
    FiTestUtil.LOG.info(s);
    throw new DiskErrorException(s);
  }
}
 
Example #28
Source File: PBHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static DatanodeID[] convert(DatanodeIDProto[] did) {
  if (did == null) return null;
  final int len = did.length;
  DatanodeID[] result = new DatanodeID[len];
  for (int i = 0; i < len; ++i) {
    result[i] = convert(did[i]);
  }
  return result;
}
 
Example #29
Source File: BlockManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * return a list of blocks & their locations on <code>datanode</code> whose
 * total size is <code>size</code>
 * 
 * @param datanode on which blocks are located
 * @param size total size of blocks
 */
public BlocksWithLocations getBlocks(DatanodeID datanode, long size
    ) throws IOException {
  namesystem.checkOperation(OperationCategory.READ);
  namesystem.readLock();
  try {
    namesystem.checkOperation(OperationCategory.READ);
    return getBlocksWithLocations(datanode, size);  
  } finally {
    namesystem.readUnlock();
  }
}
 
Example #30
Source File: DataNode.java    From big-c with Apache License 2.0 5 votes vote down vote up
BlockRecord(DatanodeID id,
            InterDatanodeProtocol datanode,
            ReplicaRecoveryInfo rInfo) {
  this.id = id;
  this.datanode = datanode;
  this.rInfo = rInfo;
}