Java Code Examples for org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol#copyBlock()

The following examples show how to use org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol#copyBlock() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FileFixer.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Override
public void run() {

  String msg = "";
  try {
    // find a random datanode from the destination cluster
    DatanodeInfo[] targets = destFs.getClient().datanodeReport(DatanodeReportType.LIVE);
    DatanodeInfo target = targets[rand.nextInt(targets.length)];

    // find a source datanode from among the datanodes that host this block
    DatanodeInfo srcdn  = goodBlock.getLocations()[rand.nextInt(goodBlock.getLocations().length)];
  
    // The RPC is asynchronous, i.e. the RPC will return immediately even before the
    // physical block copy occurs from the datanode.
    msg = "File " + badfile + ": Copying block " + 
          goodBlock.getBlock().getBlockName() + " from " + srcdn.getName() +
          " to block " + badBlock.getBlock().getBlockName() + 
          " on " + target.getName();
    LOG.info(msg);
    ClientDatanodeProtocol datanode = createClientDatanodeProtocolProxy(srcdn, conf);
    datanode.copyBlock(goodBlock.getBlock(), badBlock.getBlock(), target);
    RPC.stopProxy(datanode);
    HighTideNode.getMetrics().fixSuccessfullyStarted.inc();
  } catch (Throwable e) {
    HighTideNode.getMetrics().fixFailedDatanodeError.inc();
    LOG.error(StringUtils.stringifyException(e) + msg + ". Failed to contact datanode.");
  }
}
 
Example 2
Source File: FastCopy.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Copies over a single replica of a block to a destination datanode.
 */
private void copyBlockReplica() {
  boolean error = false;
  try {
    // Timeout of 8 minutes for this RPC, this is sufficient since
    // PendingReplicationMonitor timeout itself is 5 minutes.
    ClientDatanodeProtocol cdp = getDatanodeConnection(srcDn, conf,
        rpcTimeout);
    LOG.debug("Fast Copy : Copying block " + src.getBlockName() + " to "
        + dst.getBlockName() + " on " + dstDn.getHostName());
    // This is a blocking call that does not return until the block is
    // successfully copied on the Datanode.
    if (supportFederation) {
      cdp.copyBlock(srcNamespaceId, src, 
          dstNamespaceId, dst, dstDn,
          false);
    } else {
      cdp.copyBlock(src, dst, dstDn,
          false);
    }
  } catch (Exception e) {
    String errMsg = "Fast Copy : Failed for Copying block "
      + src.getBlockName() + " to " + dst.getBlockName() + " on "
      + dstDn.getHostName();
    LOG.warn(errMsg, e);
    error = true;
    handleException(e);
  }
  updateBlockStatus(dst, error);
}
 
Example 3
Source File: TestCopyBlockAPI.java    From RDFS with Apache License 2.0 4 votes vote down vote up
@Test
public void testCopyBlockAPI() throws Exception {
  // Generate source file and get its locations.
  String filename = "/testCopyBlockAPI";
  DFSTestUtil.createFile(fs, new Path(filename), 1023 * 10, (short) 3,
      (long) 0);
  FileStatus srcFileStatus = fs.getFileStatus(new Path(filename));
  LocatedBlocksWithMetaInfo lbkSrcMetaInfo = cluster.getNameNode()
      .openAndFetchMetaInfo(filename, 0, Long.MAX_VALUE);
  int srcNamespaceId = lbkSrcMetaInfo.getNamespaceID();
  LocatedBlock lbkSrc = lbkSrcMetaInfo.getLocatedBlocks().get(0);
  DatanodeInfo[] srcLocs = lbkSrc.getLocations();

  // Create destination file and add a single block.
  String newFile = "/testCopyBlockAPI_new";
  String clientName = newFile;
  fs.create(new Path(filename + "new"));
  cluster.getNameNode().create(newFile, srcFileStatus.getPermission(),
      clientName, true, true, srcFileStatus.getReplication(),
      srcFileStatus.getBlockSize());
  LocatedBlockWithMetaInfo lbkDstMetaInfo =
    cluster.getNameNode().addBlockAndFetchMetaInfo(newFile, clientName, null, srcLocs);
  int dstNamespaceId = lbkDstMetaInfo.getNamespaceID();
  LocatedBlock lbkDst = lbkDstMetaInfo;

  // Verify locations of src and destination block.
  DatanodeInfo[] dstLocs = lbkDst.getLocations();
  Arrays.sort(srcLocs);
  Arrays.sort(dstLocs);
  assertEquals(srcLocs.length, dstLocs.length);
  for (int i = 0; i < srcLocs.length; i++) {
    assertEquals(srcLocs[i], dstLocs[i]);
  }

  // Create datanode rpc connections.
  ClientDatanodeProtocol cdp2 = DFSClient.createClientDatanodeProtocolProxy(
      srcLocs[2], conf, 5 * 60 * 1000);

  Block srcBlock = new Block(lbkSrc.getBlock());
  Block dstBlock = new Block(lbkDst.getBlock());
  System.out.println("Copying src : " + srcBlock + " dst : " + dstBlock);

  // Find datanode object.
  DataNode datanode = null;
  for (DataNode dn : cluster.getDataNodes()) {
    DatanodeRegistration registration = dn.getDNRegistrationForNS(srcNamespaceId);
    if (registration.equals(srcLocs[0])) {
      datanode = dn;
      break;
    }
  }
  
  assertNotNull(datanode);

  // Submit a block transfer to location 2.
  ExecutorService pool = Executors.newSingleThreadExecutor();
  pool.submit(datanode.new DataTransfer(new DatanodeInfo[] { srcLocs[2] }, srcNamespaceId,
        srcBlock, dstNamespaceId, dstBlock, datanode));

  try {
    Thread.sleep(5000);
    // Submit another transfer to same location, should receive
    // BlockAlreadyExistsException.
    cdp2.copyBlock(srcNamespaceId, srcBlock, dstNamespaceId, dstBlock, srcLocs[2], false);
  } catch (RemoteException re) {
    // pass.
    return;
  } finally {
    // Shutdown RPC connections.
    RPC.stopProxy(cdp2);
  }
  fail("Second RPC did not throw Exception");
}