org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol Java Examples

The following examples show how to use org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BlockStorageLocationUtil.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public HdfsBlocksMetadata call() throws Exception {
  HdfsBlocksMetadata metadata = null;
  // Create the RPC proxy and make the RPC
  ClientDatanodeProtocol cdp = null;
  TraceScope scope =
      Trace.startSpan("getHdfsBlocksMetadata", parentSpan);
  try {
    cdp = DFSUtil.createClientDatanodeProtocolProxy(datanode, configuration,
        timeout, connectToDnViaHostname);
    metadata = cdp.getHdfsBlocksMetadata(poolId, blockIds, dnTokens);
  } catch (IOException e) {
    // Bubble this up to the caller, handle with the Future
    throw e;
  } finally {
    scope.close();
    if (cdp != null) {
      RPC.stopProxy(cdp);
    }
  }
  return metadata;
}
 
Example #2
Source File: BlockReaderLocalLegacy.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private synchronized ClientDatanodeProtocol getDatanodeProxy(
    UserGroupInformation ugi, final DatanodeInfo node,
    final Configuration conf, final int socketTimeout,
    final boolean connectToDnViaHostname) throws IOException {
  if (proxy == null) {
    try {
      proxy = ugi.doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
        @Override
        public ClientDatanodeProtocol run() throws Exception {
          return DFSUtil.createClientDatanodeProtocolProxy(node, conf,
              socketTimeout, connectToDnViaHostname);
        }
      });
    } catch (InterruptedException e) {
      LOG.warn("encountered exception ", e);
    }
  }
  return proxy;
}
 
Example #3
Source File: BlockReaderLocalLegacy.java    From big-c with Apache License 2.0 6 votes vote down vote up
private synchronized ClientDatanodeProtocol getDatanodeProxy(
    UserGroupInformation ugi, final DatanodeInfo node,
    final Configuration conf, final int socketTimeout,
    final boolean connectToDnViaHostname) throws IOException {
  if (proxy == null) {
    try {
      proxy = ugi.doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
        @Override
        public ClientDatanodeProtocol run() throws Exception {
          return DFSUtil.createClientDatanodeProtocolProxy(node, conf,
              socketTimeout, connectToDnViaHostname);
        }
      });
    } catch (InterruptedException e) {
      LOG.warn("encountered exception ", e);
    }
  }
  return proxy;
}
 
Example #4
Source File: BlockStorageLocationUtil.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public HdfsBlocksMetadata call() throws Exception {
  HdfsBlocksMetadata metadata = null;
  // Create the RPC proxy and make the RPC
  ClientDatanodeProtocol cdp = null;
  TraceScope scope =
      Trace.startSpan("getHdfsBlocksMetadata", parentSpan);
  try {
    cdp = DFSUtil.createClientDatanodeProtocolProxy(datanode, configuration,
        timeout, connectToDnViaHostname);
    metadata = cdp.getHdfsBlocksMetadata(poolId, blockIds, dnTokens);
  } catch (IOException e) {
    // Bubble this up to the caller, handle with the Future
    throw e;
  } finally {
    scope.close();
    if (cdp != null) {
      RPC.stopProxy(cdp);
    }
  }
  return metadata;
}
 
Example #5
Source File: FastCopy.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Creates an RPC connection to a datanode if connection not already
 * cached and caches the connection if a new RPC connection is created
 *
 * @param dn
 *          the datanode to which we need to connect to
 * @param conf
 *          the configuration for this RPC
 * @param timeout
 *          the RPC timeout for this connection
 * @return the RPC protocol object we can use to make RPC calls
 * @throws IOException
 */
private ClientDatanodeProtocol getDatanodeConnection(DatanodeInfo dn,
    Configuration conf, int timeout) throws IOException {
  // This is done to improve read performance, no need for
  // synchronization on the map when we do a read. We go through this
  // method for each block.
  ClientDatanodeProtocol cdp = datanodeMap.get(dn.getName());
  if (cdp != null) {
    return cdp;
  }
  synchronized (datanodeMap) {
    cdp = datanodeMap.get(dn.getName());
    if (cdp == null) {
      LOG.debug("Creating new RPC connection to : " + dn.getName());
      cdp = DFSClient.createClientDatanodeProtocolProxy(
          dn, conf, timeout);
      datanodeMap.put(dn.getName(), cdp);
    }
  }
  return cdp;
}
 
Example #6
Source File: DFSAdmin.java    From big-c with Apache License 2.0 6 votes vote down vote up
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
    throws IOException {
  InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
  // Get the current configuration
  Configuration conf = getConf();

  // For datanode proxy the server principal should be DN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
      conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));

  // Create the client
  ClientDatanodeProtocol dnProtocol =     
      DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
          NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
  return dnProtocol;
}
 
Example #7
Source File: FileFixer.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Setup a session with the specified datanode
 */
static ClientDatanodeProtocol createClientDatanodeProtocolProxy (
    DatanodeInfo datanodeid, Configuration conf) throws IOException {
  InetSocketAddress addr = NetUtils.createSocketAddr(
    datanodeid.getHost() + ":" + datanodeid.getIpcPort());
  if (ClientDatanodeProtocol.LOG.isDebugEnabled()) {
    ClientDatanodeProtocol.LOG.info("ClientDatanodeProtocol addr=" + addr);
  }
  try {
    return (ClientDatanodeProtocol)RPC.getProxy(ClientDatanodeProtocol.class,
      ClientDatanodeProtocol.versionID, addr, conf);
  } catch (RPC.VersionMismatch e) {
    long clientVersion = e.getClientVersion();
    long datanodeVersion = e.getServerVersion();
    if (clientVersion > datanodeVersion &&
        !ProtocolCompatible.isCompatibleClientDatanodeProtocol(
            clientVersion, datanodeVersion)) {
      throw new RPC.VersionIncompatible(
          ClientDatanodeProtocol.class.getName(), clientVersion, datanodeVersion);
    }
    return (ClientDatanodeProtocol)e.getProxy();
  }
}
 
Example #8
Source File: DFSAdmin.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
    throws IOException {
  InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
  // Get the current configuration
  Configuration conf = getConf();

  // For datanode proxy the server principal should be DN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
      conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));

  // Create the client
  ClientDatanodeProtocol dnProtocol =     
      DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
          NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
  return dnProtocol;
}
 
Example #9
Source File: DFSClient.java    From RDFS with Apache License 2.0 6 votes vote down vote up
static ProtocolProxy<ClientDatanodeProtocol> createClientDNProtocolProxy (
    DatanodeID datanodeid, Configuration conf, int socketTimeout)
    throws IOException {
  InetSocketAddress addr = NetUtils.createSocketAddr(
    datanodeid.getHost() + ":" + datanodeid.getIpcPort());
  if (ClientDatanodeProtocol.LOG.isDebugEnabled()) {
    ClientDatanodeProtocol.LOG.info("ClientDatanodeProtocol addr=" + addr);
  }
  UserGroupInformation ugi;
  try {
    ugi = UserGroupInformation.login(conf);
  } catch (LoginException le) {
    throw new RuntimeException("Couldn't login!");
  }

  return RPC.getProtocolProxy(ClientDatanodeProtocol.class,
      ClientDatanodeProtocol.versionID, addr, ugi, conf,
      NetUtils.getDefaultSocketFactory(conf), socketTimeout);
}
 
Example #10
Source File: DFSAdmin.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private int shutdownDatanode(String[] argv, int i) throws IOException {
  final String dn = argv[i];
  ClientDatanodeProtocol dnProxy = getDataNodeProxy(dn);
  boolean upgrade = false;
  if (argv.length-1 == i+1) {
    if ("upgrade".equalsIgnoreCase(argv[i+1])) {
      upgrade = true;
    } else {
      printUsage("-shutdownDatanode");
      return -1;
    }
  }
  dnProxy.shutdownDatanode(upgrade);
  System.out.println("Submitted a shutdown request to datanode " + dn);
  return 0;
}
 
Example #11
Source File: DFSAdmin.java    From big-c with Apache License 2.0 6 votes vote down vote up
private int shutdownDatanode(String[] argv, int i) throws IOException {
  final String dn = argv[i];
  ClientDatanodeProtocol dnProxy = getDataNodeProxy(dn);
  boolean upgrade = false;
  if (argv.length-1 == i+1) {
    if ("upgrade".equalsIgnoreCase(argv[i+1])) {
      upgrade = true;
    } else {
      printUsage("-shutdownDatanode");
      return -1;
    }
  }
  dnProxy.shutdownDatanode(upgrade);
  System.out.println("Submitted a shutdown request to datanode " + dn);
  return 0;
}
 
Example #12
Source File: TestDFSClientRetries.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Test that timeout occurs when DN does not respond to RPC.
 * Start up a server and ask it to sleep for n seconds. Make an
 * RPC to the server and set rpcTimeout to less than n and ensure
 * that socketTimeoutException is obtained
 */
@Test
public void testClientDNProtocolTimeout() throws IOException {
  final Server server = new TestServer(1, true);
  server.start();

  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
  
  ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
  LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);

  ClientDatanodeProtocol proxy = null;

  try {
    proxy = DFSUtil.createClientDatanodeProtocolProxy(
        fakeDnId, conf, 500, false, fakeBlock);

    proxy.getReplicaVisibleLength(new ExtendedBlock("bpid", 1));
    fail ("Did not get expected exception: SocketTimeoutException");
  } catch (SocketTimeoutException e) {
    LOG.info("Got the expected Exception: SocketTimeoutException");
  } finally {
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
    server.stop();
  }
}
 
Example #13
Source File: DFSAdmin.java    From big-c with Apache License 2.0 5 votes vote down vote up
public int triggerBlockReport(String[] argv) throws IOException {
  List<String> args = new LinkedList<String>();
  for (int j = 1; j < argv.length; j++) {
    args.add(argv[j]);
  }
  boolean incremental = StringUtils.popOption("-incremental", args);
  String hostPort = StringUtils.popFirstNonOption(args);
  if (hostPort == null) {
    System.err.println("You must specify a host:port pair.");
    return 1;
  }
  if (!args.isEmpty()) {
    System.err.print("Can't understand arguments: " +
      Joiner.on(" ").join(args) + "\n");
    return 1;
  }
  ClientDatanodeProtocol dnProxy = getDataNodeProxy(hostPort);
  try {
    dnProxy.triggerBlockReport(
        new BlockReportOptions.Factory().
            setIncremental(incremental).
            build());
  } catch (IOException e) {
    System.err.println("triggerBlockReport error: " + e);
    return 1;
  }
  System.out.println("Triggering " +
      (incremental ? "an incremental " : "a full ") +
      "block report on " + hostPort + ".");
  return 0;
}
 
Example #14
Source File: DFSAdmin.java    From big-c with Apache License 2.0 5 votes vote down vote up
int startReconfiguration(String nodeType, String address) throws IOException {
  if ("datanode".equals(nodeType)) {
    ClientDatanodeProtocol dnProxy = getDataNodeProxy(address);
    dnProxy.startReconfiguration();
    System.out.println("Started reconfiguration task on DataNode " + address);
    return 0;
  } else {
    System.err.println("Node type " + nodeType +
        " does not support reconfiguration.");
    return 1;
  }
}
 
Example #15
Source File: DFSAdmin.java    From big-c with Apache License 2.0 5 votes vote down vote up
private int deleteBlockPool(String[] argv, int i) throws IOException {
  ClientDatanodeProtocol dnProxy = getDataNodeProxy(argv[i]);
  boolean force = false;
  if (argv.length-1 == i+2) {
    if ("force".equals(argv[i+2])) {
      force = true;
    } else {
      printUsage("-deleteBlockPool");
      return -1;
    }
  }
  dnProxy.deleteBlockPool(argv[i+1], force);
  return 0;
}
 
Example #16
Source File: DFSAdmin.java    From big-c with Apache License 2.0 5 votes vote down vote up
private int refreshNamenodes(String[] argv, int i) throws IOException {
  String datanode = argv[i];
  ClientDatanodeProtocol refreshProtocol = getDataNodeProxy(datanode);
  refreshProtocol.refreshNamenodes();
  
  return 0;
}
 
Example #17
Source File: DFSAdmin.java    From big-c with Apache License 2.0 5 votes vote down vote up
private int getDatanodeInfo(String[] argv, int i) throws IOException {
  ClientDatanodeProtocol dnProxy = getDataNodeProxy(argv[i]);
  try {
    DatanodeLocalInfo dnInfo = dnProxy.getDatanodeInfo();
    System.out.println(dnInfo.getDatanodeLocalReport());
  } catch (IOException ioe) {
    System.err.println("Datanode unreachable.");
    return -1;
  }
  return 0;
}
 
Example #18
Source File: TestBlockToken.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testBlockTokenRpc() throws Exception {
  Configuration conf = new Configuration();
  conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
  UserGroupInformation.setConfiguration(conf);
  
  BlockTokenSecretManager sm = new BlockTokenSecretManager(
      blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null);
  Token<BlockTokenIdentifier> token = sm.generateToken(block3,
      EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));

  final Server server = createMockDatanode(sm, token, conf);

  server.start();

  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  final UserGroupInformation ticket = UserGroupInformation
      .createRemoteUser(block3.toString());
  ticket.addToken(token);

  ClientDatanodeProtocol proxy = null;
  try {
    proxy = DFSUtil.createClientDatanodeProtocolProxy(addr, ticket, conf,
        NetUtils.getDefaultSocketFactory(conf));
    assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
 
Example #19
Source File: TestShortCircuitLocalRead.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout=10000)
public void testDeprecatedGetBlockLocalPathInfoRpc() throws IOException {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  try {
    DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23);
    LocatedBlocks lb = cluster.getNameNode().getRpcServer()
        .getBlockLocations("/tmp/x", 0, 16);
    // Create a new block object, because the block inside LocatedBlock at
    // namenode is of type BlockInfo.
    ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
    Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
    final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
    ClientDatanodeProtocol proxy = 
        DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false);
    try {
      proxy.getBlockLocalPathInfo(blk, token);
      Assert.fail("The call should have failed as this user "
          + " is not allowed to call getBlockLocalPathInfo");
    } catch (IOException ex) {
      Assert.assertTrue(ex.getMessage().contains(
          "not allowed to call getBlockLocalPathInfo"));
    }
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example #20
Source File: DataNode.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/** {@inheritDoc} */
public long getProtocolVersion(String protocol, long clientVersion
    ) throws IOException {
  if (protocol.equals(InterDatanodeProtocol.class.getName())) {
    return InterDatanodeProtocol.versionID; 
  } else if (protocol.equals(ClientDatanodeProtocol.class.getName())) {
    return ClientDatanodeProtocol.versionID; 
  }
  throw new IOException("Unknown protocol to " + getClass().getSimpleName()
      + ": " + protocol);
}
 
Example #21
Source File: FileFixer.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Override
public void run() {

  String msg = "";
  try {
    // find a random datanode from the destination cluster
    DatanodeInfo[] targets = destFs.getClient().datanodeReport(DatanodeReportType.LIVE);
    DatanodeInfo target = targets[rand.nextInt(targets.length)];

    // find a source datanode from among the datanodes that host this block
    DatanodeInfo srcdn  = goodBlock.getLocations()[rand.nextInt(goodBlock.getLocations().length)];
  
    // The RPC is asynchronous, i.e. the RPC will return immediately even before the
    // physical block copy occurs from the datanode.
    msg = "File " + badfile + ": Copying block " + 
          goodBlock.getBlock().getBlockName() + " from " + srcdn.getName() +
          " to block " + badBlock.getBlock().getBlockName() + 
          " on " + target.getName();
    LOG.info(msg);
    ClientDatanodeProtocol datanode = createClientDatanodeProtocolProxy(srcdn, conf);
    datanode.copyBlock(goodBlock.getBlock(), badBlock.getBlock(), target);
    RPC.stopProxy(datanode);
    HighTideNode.getMetrics().fixSuccessfullyStarted.inc();
  } catch (Throwable e) {
    HighTideNode.getMetrics().fixFailedDatanodeError.inc();
    LOG.error(StringUtils.stringifyException(e) + msg + ". Failed to contact datanode.");
  }
}
 
Example #22
Source File: DataNode.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/** {@inheritDoc} */
public long getProtocolVersion(String protocol, long clientVersion
    ) throws IOException {
  if (protocol.equals(InterDatanodeProtocol.class.getName())) {
    return InterDatanodeProtocol.versionID;
  } else if (protocol.equals(ClientDatanodeProtocol.class.getName())) {
    checkVersion(protocol, clientVersion, ClientDatanodeProtocol.versionID);
    return ClientDatanodeProtocol.versionID;
  }
  throw new IOException("Unknown protocol to " + getClass().getSimpleName()
      + ": " + protocol);
}
 
Example #23
Source File: FastCopy.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Copies over a single replica of a block to a destination datanode.
 */
private void copyBlockReplica() {
  boolean error = false;
  try {
    // Timeout of 8 minutes for this RPC, this is sufficient since
    // PendingReplicationMonitor timeout itself is 5 minutes.
    ClientDatanodeProtocol cdp = getDatanodeConnection(srcDn, conf,
        rpcTimeout);
    LOG.debug("Fast Copy : Copying block " + src.getBlockName() + " to "
        + dst.getBlockName() + " on " + dstDn.getHostName());
    // This is a blocking call that does not return until the block is
    // successfully copied on the Datanode.
    if (supportFederation) {
      cdp.copyBlock(srcNamespaceId, src, 
          dstNamespaceId, dst, dstDn,
          false);
    } else {
      cdp.copyBlock(src, dst, dstDn,
          false);
    }
  } catch (Exception e) {
    String errMsg = "Fast Copy : Failed for Copying block "
      + src.getBlockName() + " to " + dst.getBlockName() + " on "
      + dstDn.getHostName();
    LOG.warn(errMsg, e);
    error = true;
    handleException(e);
  }
  updateBlockStatus(dst, error);
}
 
Example #24
Source File: FastCopy.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Tears down all RPC connections, you MUST call this once you are done.
 * @throws IOException
 */
public void shutdown() throws IOException {
  // Clean up RPC connections.
  Iterator <ClientDatanodeProtocol> connections =
    datanodeMap.values().iterator();
  while(connections.hasNext()) {
    ClientDatanodeProtocol cnxn = connections.next();
    RPC.stopProxy(cnxn);
  }
  datanodeMap.clear();
  executor.shutdownNow();
}
 
Example #25
Source File: DFSUtil.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Create a {@link ClientDatanodeProtocol} proxy */
public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
    DatanodeID datanodeid, Configuration conf, int socketTimeout,
    boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException {
  return new ClientDatanodeProtocolTranslatorPB(datanodeid, conf, socketTimeout,
      connectToDnViaHostname, locatedBlock);
}
 
Example #26
Source File: DFSAdmin.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private int refreshNamenodes(String[] argv, int i) throws IOException {
  String datanode = argv[i];
  ClientDatanodeProtocol refreshProtocol = getDataNodeProxy(datanode);
  refreshProtocol.refreshNamenodes();
  
  return 0;
}
 
Example #27
Source File: DFSUtil.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Create {@link ClientDatanodeProtocol} proxy using kerberos ticket */
public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
    DatanodeID datanodeid, Configuration conf, int socketTimeout,
    boolean connectToDnViaHostname) throws IOException {
  return new ClientDatanodeProtocolTranslatorPB(
      datanodeid, conf, socketTimeout, connectToDnViaHostname);
}
 
Example #28
Source File: BlockReaderLocalLegacy.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static BlockLocalPathInfo getBlockPathInfo(UserGroupInformation ugi,
    ExtendedBlock blk, DatanodeInfo node, Configuration conf, int timeout,
    Token<BlockTokenIdentifier> token, boolean connectToDnViaHostname,
    StorageType storageType) throws IOException {
  LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.getIpcPort());
  BlockLocalPathInfo pathinfo = null;
  ClientDatanodeProtocol proxy = localDatanodeInfo.getDatanodeProxy(ugi, node,
      conf, timeout, connectToDnViaHostname);
  try {
    // make RPC to local datanode to find local pathnames of blocks
    pathinfo = proxy.getBlockLocalPathInfo(blk, token);
    // We cannot cache the path information for a replica on transient storage.
    // If the replica gets evicted, then it moves to a different path.  Then,
    // our next attempt to read from the cached path would fail to find the
    // file.  Additionally, the failure would cause us to disable legacy
    // short-circuit read for all subsequent use in the ClientContext.  Unlike
    // the newer short-circuit read implementation, we have no communication
    // channel for the DataNode to notify the client that the path has been
    // invalidated.  Therefore, our only option is to skip caching.
    if (pathinfo != null && !storageType.isTransient()) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Cached location of block " + blk + " as " + pathinfo);
      }
      localDatanodeInfo.setBlockLocalPathInfo(blk, pathinfo);
    }
  } catch (IOException e) {
    localDatanodeInfo.resetDatanodeProxy(); // Reset proxy on error
    throw e;
  }
  return pathinfo;
}
 
Example #29
Source File: DFSAdmin.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public int triggerBlockReport(String[] argv) throws IOException {
  List<String> args = new LinkedList<String>();
  for (int j = 1; j < argv.length; j++) {
    args.add(argv[j]);
  }
  boolean incremental = StringUtils.popOption("-incremental", args);
  String hostPort = StringUtils.popFirstNonOption(args);
  if (hostPort == null) {
    System.err.println("You must specify a host:port pair.");
    return 1;
  }
  if (!args.isEmpty()) {
    System.err.print("Can't understand arguments: " +
      Joiner.on(" ").join(args) + "\n");
    return 1;
  }
  ClientDatanodeProtocol dnProxy = getDataNodeProxy(hostPort);
  try {
    dnProxy.triggerBlockReport(
        new BlockReportOptions.Factory().
            setIncremental(incremental).
            build());
  } catch (IOException e) {
    System.err.println("triggerBlockReport error: " + e);
    return 1;
  }
  System.out.println("Triggering " +
      (incremental ? "an incremental " : "a full ") +
      "block report on " + hostPort + ".");
  return 0;
}
 
Example #30
Source File: DFSAdmin.java    From hadoop with Apache License 2.0 5 votes vote down vote up
int startReconfiguration(String nodeType, String address) throws IOException {
  if ("datanode".equals(nodeType)) {
    ClientDatanodeProtocol dnProxy = getDataNodeProxy(address);
    dnProxy.startReconfiguration();
    System.out.println("Started reconfiguration task on DataNode " + address);
    return 0;
  } else {
    System.err.println("Node type " + nodeType +
        " does not support reconfiguration.");
    return 1;
  }
}