Java Code Examples for org.apache.hadoop.hdfs.protocol.DatanodeID

The following examples show how to use org.apache.hadoop.hdfs.protocol.DatanodeID. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: DFSClient.java    License: Apache License 2.0 6 votes vote down vote up
@Override // RemotePeerFactory
public Peer newConnectedPeer(InetSocketAddress addr,
    Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
    throws IOException {
  Peer peer = null;
  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    NetUtils.connect(sock, addr,
      getRandomLocalInterfaceAddr(),
      dfsClientConf.socketTimeout);
    peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this,
        blockToken, datanodeId);
    peer.setReadTimeout(dfsClientConf.socketTimeout);
    success = true;
    return peer;
  } finally {
    if (!success) {
      IOUtils.cleanup(LOG, peer);
      IOUtils.closeSocket(sock);
    }
  }
}
 
Example 2
Source Project: RDFS   Source File: TestDeadDatanode.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * wait for datanode to reach alive or dead state for waitTime given in
 * milliseconds.
 */
private void waitForDatanodeState(DatanodeID nodeID, boolean alive, int waitTime)
    throws TimeoutException, InterruptedException, IOException {
  long stopTime = System.currentTimeMillis() + waitTime;
  FSNamesystem namesystem = cluster.getNameNode().getNamesystem();
  String state = alive ? "alive" : "dead";
  while (System.currentTimeMillis() < stopTime) {
    if (namesystem.getDatanode(nodeID).isAlive == alive) {
      LOG.info("datanode " + nodeID + " is " + state);
      return;
    }
    LOG.info("Waiting for datanode " + nodeID + " to become " + state);
    Thread.sleep(1000);
  }
  throw new TimeoutException("Timedout waiting for datanode reach state "
      + state);
}
 
Example 3
Source Project: hadoop   Source File: PeerCache.java    License: Apache License 2.0 6 votes vote down vote up
private synchronized Peer getInternal(DatanodeID dnId, boolean isDomain) {
  List<Value> sockStreamList = multimap.get(new Key(dnId, isDomain));
  if (sockStreamList == null) {
    return null;
  }

  Iterator<Value> iter = sockStreamList.iterator();
  while (iter.hasNext()) {
    Value candidate = iter.next();
    iter.remove();
    long ageMs = Time.monotonicNow() - candidate.getTime();
    Peer peer = candidate.getPeer();
    if (ageMs >= expiryPeriod) {
      try {
        peer.close();
      } catch (IOException e) {
        LOG.warn("got IOException closing stale peer " + peer +
              ", which is " + ageMs + " ms old");
      }
    } else if (!peer.isClosed()) {
      return peer;
    }
  }
  return null;
}
 
Example 4
Source Project: big-c   Source File: RemoteBlockReader2.java    License: Apache License 2.0 6 votes vote down vote up
protected RemoteBlockReader2(String file, String bpid, long blockId,
    DataChecksum checksum, boolean verifyChecksum,
    long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
    DatanodeID datanodeID, PeerCache peerCache) {
  this.isLocal = DFSClient.isLocalAddress(NetUtils.
      createSocketAddr(datanodeID.getXferAddr()));
  // Path is used only for printing block and file information in debug
  this.peer = peer;
  this.datanodeID = datanodeID;
  this.in = peer.getInputStreamChannel();
  this.checksum = checksum;
  this.verifyChecksum = verifyChecksum;
  this.startOffset = Math.max( startOffset, 0 );
  this.filename = file;
  this.peerCache = peerCache;
  this.blockId = blockId;

  // The total number of bytes that we need to transfer from the DN is
  // the amount that the user wants (bytesToRead), plus the padding at
  // the beginning in order to chunk-align. Note that the DN may elect
  // to send more than this amount if the read starts/ends mid-chunk.
  this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);
  bytesPerChecksum = this.checksum.getBytesPerChecksum();
  checksumSize = this.checksum.getChecksumSize();
}
 
Example 5
Source Project: big-c   Source File: NameNodeRpcServer.java    License: Apache License 2.0 6 votes vote down vote up
@Override // ClientProtocol
public void updatePipeline(String clientName, ExtendedBlock oldBlock,
    ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs)
    throws IOException {
  checkNNStartup();
  CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
  if (cacheEntry != null && cacheEntry.isSuccess()) {
    return; // Return previous response
  }

  boolean success = false;
  try {
    namesystem.updatePipeline(clientName, oldBlock, newBlock, newNodes,
        newStorageIDs, cacheEntry != null);
    success = true;
  } finally {
    RetryCache.setState(cacheEntry, success);
  }
}
 
Example 6
Source Project: big-c   Source File: FileChecksumServlets.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response
    ) throws ServletException, IOException {
  final ServletContext context = getServletContext();
  final Configuration conf = NameNodeHttpServer.getConfFromContext(context);
  final UserGroupInformation ugi = getUGI(request, conf);
  final NameNode namenode = NameNodeHttpServer.getNameNodeFromContext(
      context);
  final DatanodeID datanode = NamenodeJspHelper.getRandomDatanode(namenode);
  try {
    response.sendRedirect(
        createRedirectURL(ugi, datanode, request, namenode).toString());
  } catch (IOException e) {
    response.sendError(400, e.getMessage());
  }
}
 
Example 7
Source Project: hadoop   Source File: DataNode.java    License: Apache License 2.0 6 votes vote down vote up
public static InterDatanodeProtocol createInterDataNodeProtocolProxy(
    DatanodeID datanodeid, final Configuration conf, final int socketTimeout,
    final boolean connectToDnViaHostname) throws IOException {
  final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
  final InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
  }
  final UserGroupInformation loginUgi = UserGroupInformation.getLoginUser();
  try {
    return loginUgi
        .doAs(new PrivilegedExceptionAction<InterDatanodeProtocol>() {
          @Override
          public InterDatanodeProtocol run() throws IOException {
            return new InterDatanodeProtocolTranslatorPB(addr, loginUgi,
                conf, NetUtils.getDefaultSocketFactory(conf), socketTimeout);
          }
        });
  } catch (InterruptedException ie) {
    throw new IOException(ie.getMessage());
  }
}
 
Example 8
Source Project: hadoop-gpu   Source File: FileDataServlet.java    License: Apache License 2.0 6 votes vote down vote up
/** Create a redirection URI */
protected URI createUri(FileStatus i, UnixUserGroupInformation ugi,
    ClientProtocol nnproxy, HttpServletRequest request)
    throws IOException, URISyntaxException {
  String scheme = request.getScheme();
  final DatanodeID host = pickSrcDatanode(i, nnproxy);
  final String hostname;
  if (host instanceof DatanodeInfo) {
    hostname = ((DatanodeInfo)host).getHostName();
  } else {
    hostname = host.getHost();
  }
  return new URI(scheme, null, hostname,
      "https".equals(scheme)
        ? (Integer)getServletContext().getAttribute("datanode.https.port")
        : host.getInfoPort(),
      "/streamFile", "filename=" + i.getPath() + "&ugi=" + ugi, null);
}
 
Example 9
@Override
public void commitBlockSynchronization(ExtendedBlock block,
    long newgenerationstamp, long newlength, boolean closeFile,
    boolean deleteblock, DatanodeID[] newtargets, String[] newtargetstorages
    ) throws IOException {
  CommitBlockSynchronizationRequestProto.Builder builder = 
      CommitBlockSynchronizationRequestProto.newBuilder()
      .setBlock(PBHelper.convert(block)).setNewGenStamp(newgenerationstamp)
      .setNewLength(newlength).setCloseFile(closeFile)
      .setDeleteBlock(deleteblock);
  for (int i = 0; i < newtargets.length; i++) {
    builder.addNewTaragets(PBHelper.convert(newtargets[i]));
    builder.addNewTargetStorages(newtargetstorages[i]);
  }
  CommitBlockSynchronizationRequestProto req = builder.build();
  try {
    rpcProxy.commitBlockSynchronization(NULL_CONTROLLER, req);
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
}
 
Example 10
@Override
public void commitBlockSynchronization(ExtendedBlock block,
    long newgenerationstamp, long newlength, boolean closeFile,
    boolean deleteblock, DatanodeID[] newtargets, String[] newtargetstorages
    ) throws IOException {
  CommitBlockSynchronizationRequestProto.Builder builder = 
      CommitBlockSynchronizationRequestProto.newBuilder()
      .setBlock(PBHelper.convert(block)).setNewGenStamp(newgenerationstamp)
      .setNewLength(newlength).setCloseFile(closeFile)
      .setDeleteBlock(deleteblock);
  for (int i = 0; i < newtargets.length; i++) {
    builder.addNewTaragets(PBHelper.convert(newtargets[i]));
    builder.addNewTargetStorages(newtargetstorages[i]);
  }
  CommitBlockSynchronizationRequestProto req = builder.build();
  try {
    rpcProxy.commitBlockSynchronization(NULL_CONTROLLER, req);
  } catch (ServiceException se) {
    throw ProtobufHelper.getRemoteException(se);
  }
}
 
Example 11
Source Project: hadoop-gpu   Source File: FileDataServlet.java    License: Apache License 2.0 6 votes vote down vote up
/** Select a datanode to service this request.
 * Currently, this looks at no more than the first five blocks of a file,
 * selecting a datanode randomly from the most represented.
 */
private static DatanodeID pickSrcDatanode(FileStatus i,
    ClientProtocol nnproxy) throws IOException {
  // a race condition can happen by initializing a static member this way.
  // A proper fix should make JspHelper a singleton. Since it doesn't affect 
  // correctness, we leave it as is for now.
  if (jspHelper == null)
    jspHelper = new JspHelper();
  final LocatedBlocks blks = nnproxy.getBlockLocations(
      i.getPath().toUri().getPath(), 0, 1);
  if (i.getLen() == 0 || blks.getLocatedBlocks().size() <= 0) {
    // pick a random datanode
    return jspHelper.randomNode();
  }
  return jspHelper.bestNode(blks.get(0));
}
 
Example 12
Source Project: hadoop   Source File: NNThroughputBenchmark.java    License: Apache License 2.0 6 votes vote down vote up
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
 
Example 13
Source Project: big-c   Source File: TestDFSClientRetries.java    License: Apache License 2.0 5 votes vote down vote up
/** Test that timeout occurs when DN does not respond to RPC.
 * Start up a server and ask it to sleep for n seconds. Make an
 * RPC to the server and set rpcTimeout to less than n and ensure
 * that socketTimeoutException is obtained
 */
@Test
public void testClientDNProtocolTimeout() throws IOException {
  final Server server = new TestServer(1, true);
  server.start();

  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
  
  ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
  LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);

  ClientDatanodeProtocol proxy = null;

  try {
    proxy = DFSUtil.createClientDatanodeProtocolProxy(
        fakeDnId, conf, 500, false, fakeBlock);

    proxy.getReplicaVisibleLength(new ExtendedBlock("bpid", 1));
    fail ("Did not get expected exception: SocketTimeoutException");
  } catch (SocketTimeoutException e) {
    LOG.info("Got the expected Exception: SocketTimeoutException");
  } finally {
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
    server.stop();
  }
}
 
Example 14
Source Project: big-c   Source File: DataNode.java    License: Apache License 2.0 5 votes vote down vote up
BlockRecord(DatanodeID id,
            InterDatanodeProtocol datanode,
            ReplicaRecoveryInfo rInfo) {
  this.id = id;
  this.datanode = datanode;
  this.rInfo = rInfo;
}
 
Example 15
Source Project: big-c   Source File: DataTransferTestUtil.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void run(DatanodeID id) {
  final DataTransferTest test = getDataTransferTest();
  if (test.isNotSuccessAndLastPipelineContains(index, id)
      && countdown.isSatisfied()) {
    final String s = toString(id) + ", duration = ["
    + minDuration + "," + maxDuration + ")";
    FiTestUtil.LOG.info(s);
    if (maxDuration <= 1) {
      for(; FiTestUtil.sleep(1000); ); //sleep forever until interrupt
    } else {
      FiTestUtil.sleep(minDuration, maxDuration);
    }
  }
}
 
Example 16
Source Project: big-c   Source File: TestFiDataTransferProtocol.java    License: Apache License 2.0 5 votes vote down vote up
private static void runCallWritePacketToDisk(String methodName,
    int errorIndex, Action<DatanodeID, IOException> a) throws IOException {
  FiTestUtil.LOG.info("Running " + methodName + " ...");
  final DataTransferTest t = (DataTransferTest)DataTransferTestUtil.initTest();
  t.fiCallWritePacketToDisk.set(a);
  t.fiPipelineErrorAfterInit.set(new VerificationAction(methodName, errorIndex));
  write1byte(methodName);
  Assert.assertTrue(t.isSuccess());
}
 
Example 17
Source Project: RDFS   Source File: DatanodeDescriptor.java    License: Apache License 2.0 5 votes vote down vote up
/** DatanodeDescriptor constructor
 * 
 * @param nodeID id of the data node
 * @param networkLocation location of the data node in network
 * @param capacity capacity of the data node, including space used by non-dfs
 * @param dfsUsed the used space by dfs datanode
 * @param remaining remaing capacity of the data node
 * @param namespace space used by the data node
 * @param xceiverCount # of data transfers at the data node
 */
public DatanodeDescriptor(DatanodeID nodeID,
                          String networkLocation,
                          String hostName,
                          long capacity,
                          long dfsUsed,
                          long remaining,
                          long namespaceUsed, 
                          int xceiverCount) {
  super(nodeID, networkLocation, hostName);
  updateHeartbeat(capacity, dfsUsed, remaining, namespaceUsed, xceiverCount);
}
 
Example 18
Source Project: big-c   Source File: TestFiDataTransferProtocol.java    License: Apache License 2.0 5 votes vote down vote up
private static void runStatusReadTest(String methodName, int errorIndex,
    Action<DatanodeID, IOException> a) throws IOException {
  FiTestUtil.LOG.info("Running " + methodName + " ...");
  final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
      .initTest();
  t.fiStatusRead.set(a);
  t.fiPipelineInitErrorNonAppend.set(new VerificationAction(methodName,
      errorIndex));
  write1byte(methodName);
  Assert.assertTrue(t.isSuccess());
}
 
Example 19
Source Project: big-c   Source File: TestPeerCache.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testAddAndRetrieve() throws Exception {
  PeerCache cache = new PeerCache(3, 100000);
  DatanodeID dnId = new DatanodeID("192.168.0.1",
        "fakehostname", "fake_datanode_id",
        100, 101, 102, 103);
  FakePeer peer = new FakePeer(dnId, false);
  cache.put(dnId, peer);
  assertTrue(!peer.isClosed());
  assertEquals(1, cache.size());
  assertEquals(peer, cache.get(dnId, false));
  assertEquals(0, cache.size());
  cache.close();
}
 
Example 20
Source Project: hadoop   Source File: DataTransferTestUtil.java    License: Apache License 2.0 5 votes vote down vote up
/** Set the marker if the DatanodeID is matched. */
@Override
public void run(DatanodeID datanodeid) throws IOException {
  final DataTransferTest test = getDataTransferTest();
  if (test.isNotSuccessAndLastPipelineContains(index, datanodeid)) {
    marker.mark();
  }
}
 
Example 21
Source Project: hadoop   Source File: SaslDataTransferClient.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Sends client SASL negotiation for general-purpose handshake.
 *
 * @param addr connection address
 * @param underlyingOut connection output stream
 * @param underlyingIn connection input stream
 * @param accessToken connection block access token
 * @param datanodeId ID of destination DataNode
 * @return new pair of streams, wrapped after SASL negotiation
 * @throws IOException for any error
 */
private IOStreamPair getSaslStreams(InetAddress addr,
    OutputStream underlyingOut, InputStream underlyingIn,
    Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
    throws IOException {
  Map<String, String> saslProps = saslPropsResolver.getClientProperties(addr);

  String userName = buildUserName(accessToken);
  char[] password = buildClientPassword(accessToken);
  CallbackHandler callbackHandler = new SaslClientCallbackHandler(userName,
    password);
  return doSaslHandshake(underlyingOut, underlyingIn, userName, saslProps,
    callbackHandler);
}
 
Example 22
Source Project: big-c   Source File: NameNodeRpcServer.java    License: Apache License 2.0 5 votes vote down vote up
@Override // DatanodeProtocol
public void commitBlockSynchronization(ExtendedBlock block,
    long newgenerationstamp, long newlength,
    boolean closeFile, boolean deleteblock, DatanodeID[] newtargets,
    String[] newtargetstorages)
    throws IOException {
  checkNNStartup();
  namesystem.commitBlockSynchronization(block, newgenerationstamp,
      newlength, closeFile, deleteblock, newtargets, newtargetstorages);
}
 
Example 23
Source Project: hadoop   Source File: PeerCache.java    License: Apache License 2.0 5 votes vote down vote up
private synchronized void putInternal(DatanodeID dnId, Peer peer) {
  startExpiryDaemon();

  if (capacity == multimap.size()) {
    evictOldest();
  }
  multimap.put(new Key(dnId, peer.getDomainSocket() != null),
      new Value(peer, Time.monotonicNow()));
}
 
Example 24
Source Project: hadoop   Source File: FileChecksumServlets.java    License: Apache License 2.0 5 votes vote down vote up
/** Create a redirection URL */
private URL createRedirectURL(UserGroupInformation ugi, DatanodeID host,
    HttpServletRequest request, NameNode nn) 
    throws IOException {
  final String hostname = host instanceof DatanodeInfo 
      ? host.getHostName() : host.getIpAddr();
  final String scheme = request.getScheme();
  int port = host.getInfoPort();
  if ("https".equals(scheme)) {
    final Integer portObject = (Integer) getServletContext().getAttribute(
        DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY);
    if (portObject != null) {
      port = portObject;
    }
  }
  final String encodedPath = ServletUtil.getRawPath(request, "/fileChecksum");

  String dtParam = "";
  if (UserGroupInformation.isSecurityEnabled()) {
    String tokenString = ugi.getTokens().iterator().next().encodeToUrlString();
    dtParam = JspHelper.getDelegationTokenUrlParam(tokenString);
  }
  String addr = nn.getNameNodeAddressHostPortString();
  String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);

  return new URL(scheme, hostname, port, 
      "/getFileChecksum" + encodedPath + '?' +
      "ugi=" + ServletUtil.encodeQueryValue(ugi.getShortUserName()) + 
      dtParam + addrParam);
}
 
Example 25
Source Project: big-c   Source File: TestDecommission.java    License: Apache License 2.0 5 votes vote down vote up
public void testHostsFile(int numNameNodes) throws IOException,
    InterruptedException {
  int numDatanodes = 1;
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(numNameNodes))
      .numDataNodes(numDatanodes).setupHostsFile(true).build();
  cluster.waitActive();
  
  // Now empty hosts file and ensure the datanode is disallowed
  // from talking to namenode, resulting in it's shutdown.
  ArrayList<String>list = new ArrayList<String>();
  final String bogusIp = "127.0.30.1";
  list.add(bogusIp);
  writeConfigFile(hostsFile, list);
  
  for (int j = 0; j < numNameNodes; j++) {
    refreshNodes(cluster.getNamesystem(j), conf);
    
    DFSClient client = getDfsClient(cluster.getNameNode(j), conf);
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    for (int i = 0 ; i < 5 && info.length != 0; i++) {
      LOG.info("Waiting for datanode to be marked dead");
      Thread.sleep(HEARTBEAT_INTERVAL * 1000);
      info = client.datanodeReport(DatanodeReportType.LIVE);
    }
    assertEquals("Number of live nodes should be 0", 0, info.length);
    
    // Test that non-live and bogus hostnames are considered "dead".
    // The dead report should have an entry for (1) the DN  that is
    // now considered dead because it is no longer allowed to connect
    // and (2) the bogus entry in the hosts file (these entries are
    // always added last)
    info = client.datanodeReport(DatanodeReportType.DEAD);
    assertEquals("There should be 2 dead nodes", 2, info.length);
    DatanodeID id = cluster.getDataNodes().get(0).getDatanodeId();
    assertEquals(id.getHostName(), info[0].getHostName());
    assertEquals(bogusIp, info[1].getHostName());
  }
}
 
Example 26
Source Project: big-c   Source File: TestCommitBlockSynchronization.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testCommitBlockSynchronization() throws IOException {
  INodeFile file = mockFileUnderConstruction();
  Block block = new Block(blockId, length, genStamp);
  FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
  DatanodeID[] newTargets = new DatanodeID[0];

  ExtendedBlock lastBlock = new ExtendedBlock();
  namesystemSpy.commitBlockSynchronization(
      lastBlock, genStamp, length, false,
      false, newTargets, null);

  // Repeat the call to make sure it does not throw
  namesystemSpy.commitBlockSynchronization(
      lastBlock, genStamp, length, false, false, newTargets, null);

  // Simulate 'completing' the block.
  BlockInfoContiguous completedBlockInfo = new BlockInfoContiguous(block, (short) 1);
  completedBlockInfo.setBlockCollection(file);
  completedBlockInfo.setGenerationStamp(genStamp);
  doReturn(completedBlockInfo).when(namesystemSpy)
      .getStoredBlock(any(Block.class));
  doReturn(completedBlockInfo).when(file).getLastBlock();

  // Repeat the call to make sure it does not throw
  namesystemSpy.commitBlockSynchronization(
      lastBlock, genStamp, length, false, false, newTargets, null);
}
 
Example 27
Source Project: hadoop-gpu   Source File: DataNode.java    License: Apache License 2.0 5 votes vote down vote up
private static void logRecoverBlock(String who,
    Block block, DatanodeID[] targets) {
  StringBuilder msg = new StringBuilder(targets[0].getName());
  for (int i = 1; i < targets.length; i++) {
    msg.append(", " + targets[i].getName());
  }
  LOG.info(who + " calls recoverBlock(block=" + block
      + ", targets=[" + msg + "])");
}
 
Example 28
Source Project: hadoop   Source File: TestDFSClientRetries.java    License: Apache License 2.0 5 votes vote down vote up
/** Test that timeout occurs when DN does not respond to RPC.
 * Start up a server and ask it to sleep for n seconds. Make an
 * RPC to the server and set rpcTimeout to less than n and ensure
 * that socketTimeoutException is obtained
 */
@Test
public void testClientDNProtocolTimeout() throws IOException {
  final Server server = new TestServer(1, true);
  server.start();

  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
  
  ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
  LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);

  ClientDatanodeProtocol proxy = null;

  try {
    proxy = DFSUtil.createClientDatanodeProtocolProxy(
        fakeDnId, conf, 500, false, fakeBlock);

    proxy.getReplicaVisibleLength(new ExtendedBlock("bpid", 1));
    fail ("Did not get expected exception: SocketTimeoutException");
  } catch (SocketTimeoutException e) {
    LOG.info("Got the expected Exception: SocketTimeoutException");
  } finally {
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
    server.stop();
  }
}
 
Example 29
Source Project: big-c   Source File: FiHFlushTestUtil.java    License: Apache License 2.0 5 votes vote down vote up
/** {@inheritDoc} */
public void run(DatanodeID id) throws IOException {
  final Pipeline p = getPipelineTest().getPipelineForDatanode(id);
  if (p == null) {
    return;
  }
  if (p.contains(index, id)) {
    final String s = super.toString(id);
    FiTestUtil.LOG.info(s);
    throw new DiskErrorException(s);
  }
}
 
Example 30
Source Project: hadoop   Source File: DatanodeRegistration.java    License: Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
public DatanodeRegistration(String uuid, DatanodeRegistration dnr) {
  this(new DatanodeID(uuid, dnr),
       dnr.getStorageInfo(),
       dnr.getExportedKeys(),
       dnr.getSoftwareVersion());
}