Java Code Examples for org.apache.hadoop.hdfs.DFSClient#createClientDatanodeProtocolProxy()

The following examples show how to use org.apache.hadoop.hdfs.DFSClient#createClientDatanodeProtocolProxy() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FastCopy.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Creates an RPC connection to a datanode if connection not already
 * cached and caches the connection if a new RPC connection is created
 *
 * @param dn
 *          the datanode to which we need to connect to
 * @param conf
 *          the configuration for this RPC
 * @param timeout
 *          the RPC timeout for this connection
 * @return the RPC protocol object we can use to make RPC calls
 * @throws IOException
 */
private ClientDatanodeProtocol getDatanodeConnection(DatanodeInfo dn,
    Configuration conf, int timeout) throws IOException {
  // This is done to improve read performance, no need for
  // synchronization on the map when we do a read. We go through this
  // method for each block.
  ClientDatanodeProtocol cdp = datanodeMap.get(dn.getName());
  if (cdp != null) {
    return cdp;
  }
  synchronized (datanodeMap) {
    cdp = datanodeMap.get(dn.getName());
    if (cdp == null) {
      LOG.debug("Creating new RPC connection to : " + dn.getName());
      cdp = DFSClient.createClientDatanodeProtocolProxy(
          dn, conf, timeout);
      datanodeMap.put(dn.getName(), cdp);
    }
  }
  return cdp;
}
 
Example 2
Source File: TestCopyBlockAPI.java    From RDFS with Apache License 2.0 4 votes vote down vote up
@Test
public void testCopyBlockAPI() throws Exception {
  // Generate source file and get its locations.
  String filename = "/testCopyBlockAPI";
  DFSTestUtil.createFile(fs, new Path(filename), 1023 * 10, (short) 3,
      (long) 0);
  FileStatus srcFileStatus = fs.getFileStatus(new Path(filename));
  LocatedBlocksWithMetaInfo lbkSrcMetaInfo = cluster.getNameNode()
      .openAndFetchMetaInfo(filename, 0, Long.MAX_VALUE);
  int srcNamespaceId = lbkSrcMetaInfo.getNamespaceID();
  LocatedBlock lbkSrc = lbkSrcMetaInfo.getLocatedBlocks().get(0);
  DatanodeInfo[] srcLocs = lbkSrc.getLocations();

  // Create destination file and add a single block.
  String newFile = "/testCopyBlockAPI_new";
  String clientName = newFile;
  fs.create(new Path(filename + "new"));
  cluster.getNameNode().create(newFile, srcFileStatus.getPermission(),
      clientName, true, true, srcFileStatus.getReplication(),
      srcFileStatus.getBlockSize());
  LocatedBlockWithMetaInfo lbkDstMetaInfo =
    cluster.getNameNode().addBlockAndFetchMetaInfo(newFile, clientName, null, srcLocs);
  int dstNamespaceId = lbkDstMetaInfo.getNamespaceID();
  LocatedBlock lbkDst = lbkDstMetaInfo;

  // Verify locations of src and destination block.
  DatanodeInfo[] dstLocs = lbkDst.getLocations();
  Arrays.sort(srcLocs);
  Arrays.sort(dstLocs);
  assertEquals(srcLocs.length, dstLocs.length);
  for (int i = 0; i < srcLocs.length; i++) {
    assertEquals(srcLocs[i], dstLocs[i]);
  }

  // Create datanode rpc connections.
  ClientDatanodeProtocol cdp2 = DFSClient.createClientDatanodeProtocolProxy(
      srcLocs[2], conf, 5 * 60 * 1000);

  Block srcBlock = new Block(lbkSrc.getBlock());
  Block dstBlock = new Block(lbkDst.getBlock());
  System.out.println("Copying src : " + srcBlock + " dst : " + dstBlock);

  // Find datanode object.
  DataNode datanode = null;
  for (DataNode dn : cluster.getDataNodes()) {
    DatanodeRegistration registration = dn.getDNRegistrationForNS(srcNamespaceId);
    if (registration.equals(srcLocs[0])) {
      datanode = dn;
      break;
    }
  }
  
  assertNotNull(datanode);

  // Submit a block transfer to location 2.
  ExecutorService pool = Executors.newSingleThreadExecutor();
  pool.submit(datanode.new DataTransfer(new DatanodeInfo[] { srcLocs[2] }, srcNamespaceId,
        srcBlock, dstNamespaceId, dstBlock, datanode));

  try {
    Thread.sleep(5000);
    // Submit another transfer to same location, should receive
    // BlockAlreadyExistsException.
    cdp2.copyBlock(srcNamespaceId, srcBlock, dstNamespaceId, dstBlock, srcLocs[2], false);
  } catch (RemoteException re) {
    // pass.
    return;
  } finally {
    // Shutdown RPC connections.
    RPC.stopProxy(cdp2);
  }
  fail("Second RPC did not throw Exception");
}