org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB Java Examples

The following examples show how to use org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: NameNodeProxies.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static ClientProtocol createNNProxyWithClientProtocol(
    InetSocketAddress address, Configuration conf, UserGroupInformation ugi,
    boolean withRetries, AtomicBoolean fallbackToSimpleAuth)
    throws IOException {
  RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class);

  final RetryPolicy defaultPolicy = 
      RetryUtils.getDefaultRetryPolicy(
          conf, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY,
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
          SafeModeException.class);
  
  final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
  ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(
      ClientNamenodeProtocolPB.class, version, address, ugi, conf,
      NetUtils.getDefaultSocketFactory(conf),
      org.apache.hadoop.ipc.Client.getTimeout(conf), defaultPolicy,
      fallbackToSimpleAuth).getProxy();

  if (withRetries) { // create the proxy with retries

    Map<String, RetryPolicy> methodNameToPolicyMap 
               = new HashMap<String, RetryPolicy>();
  
    ClientProtocol translatorProxy =
      new ClientNamenodeProtocolTranslatorPB(proxy);
    return (ClientProtocol) RetryProxy.create(
        ClientProtocol.class,
        new DefaultFailoverProxyProvider<ClientProtocol>(
            ClientProtocol.class, translatorProxy),
        methodNameToPolicyMap,
        defaultPolicy);
  } else {
    return new ClientNamenodeProtocolTranslatorPB(proxy);
  }
}
 
Example #2
Source File: TestIsMethodSupported.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testClientNamenodeProtocol() throws IOException {
  ClientProtocol cp =
      NameNodeProxies.createNonHAProxy(
          conf, nnAddress, ClientProtocol.class,
          UserGroupInformation.getCurrentUser(), true).getProxy();
  RpcClientUtil.isMethodSupported(cp,
      ClientNamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
      RPC.getProtocolVersion(ClientNamenodeProtocolPB.class), "mkdirs");
}
 
Example #3
Source File: ProxyServer.java    From nnproxy with Apache License 2.0 5 votes vote down vote up
public void start() throws IOException {
    int rpcHandlerCount = conf.getInt(ProxyConfig.PROXY_HANDLER_COUNT, ProxyConfig.PROXY_HANDLER_COUNT_DEFAULT);
    RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class,
            ProtobufRpcEngine.class);
    RPC.setProtocolEngine(conf, NamenodeProtocolPB.class,
            ProtobufRpcEngine.class);

    this.protocol = (ClientProtocol) Proxy.newProxyInstance(
            this.getClass().getClassLoader(),
            new Class[]{ClientProtocol.class},
            this.invocationHandler);

    ClientNamenodeProtocolPB proxy = new ClientNamenodeProtocolServerSideTranslatorPB(this.protocol);
    BlockingService clientNNPbService = ClientNamenodeProtocolProtos.ClientNamenodeProtocol.
            newReflectiveBlockingService(proxy);

    int port = conf.getInt(ProxyConfig.RPC_PORT, ProxyConfig.RPC_PORT_DEFAULT);

    this.rpcServer = new RPC.Builder(conf)
            .setProtocol(org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class)
            .setInstance(clientNNPbService).setBindAddress("0.0.0.0")
            .setPort(port).setNumHandlers(rpcHandlerCount)
            .setVerbose(false).build();
    this.rpcServer.start();

    InetSocketAddress listenAddr = rpcServer.getListenerAddress();
    rpcAddress = new InetSocketAddress("0.0.0.0", listenAddr.getPort());
}
 
Example #4
Source File: NameNodeProxies.java    From big-c with Apache License 2.0 5 votes vote down vote up
private static ClientProtocol createNNProxyWithClientProtocol(
    InetSocketAddress address, Configuration conf, UserGroupInformation ugi,
    boolean withRetries, AtomicBoolean fallbackToSimpleAuth)
    throws IOException {
  RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class);

  final RetryPolicy defaultPolicy = 
      RetryUtils.getDefaultRetryPolicy(
          conf, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY,
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
          SafeModeException.class);
  
  final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
  ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(
      ClientNamenodeProtocolPB.class, version, address, ugi, conf,
      NetUtils.getDefaultSocketFactory(conf),
      org.apache.hadoop.ipc.Client.getTimeout(conf), defaultPolicy,
      fallbackToSimpleAuth).getProxy();

  if (withRetries) { // create the proxy with retries

    Map<String, RetryPolicy> methodNameToPolicyMap 
               = new HashMap<String, RetryPolicy>();
  
    ClientProtocol translatorProxy =
      new ClientNamenodeProtocolTranslatorPB(proxy);
    return (ClientProtocol) RetryProxy.create(
        ClientProtocol.class,
        new DefaultFailoverProxyProvider<ClientProtocol>(
            ClientProtocol.class, translatorProxy),
        methodNameToPolicyMap,
        defaultPolicy);
  } else {
    return new ClientNamenodeProtocolTranslatorPB(proxy);
  }
}
 
Example #5
Source File: TestIsMethodSupported.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testClientNamenodeProtocol() throws IOException {
  ClientProtocol cp =
      NameNodeProxies.createNonHAProxy(
          conf, nnAddress, ClientProtocol.class,
          UserGroupInformation.getCurrentUser(), true).getProxy();
  RpcClientUtil.isMethodSupported(cp,
      ClientNamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
      RPC.getProtocolVersion(ClientNamenodeProtocolPB.class), "mkdirs");
}
 
Example #6
Source File: HdfsPlugin.java    From crate with Apache License 2.0 5 votes vote down vote up
private static Void eagerInit() {
    /*
     * Hadoop RPC wire serialization uses ProtocolBuffers. All proto classes for Hadoop
     * come annotated with configurations that denote information about if they support
     * certain security options like Kerberos, and how to send information with the
     * message to support that authentication method. SecurityUtil creates a service loader
     * in a static field during its clinit. This loader provides the implementations that
     * pull the security information for each proto class. The service loader sources its
     * services from the current thread's context class loader, which must contain the Hadoop
     * jars. Since plugins don't execute with their class loaders installed as the thread's
     * context class loader, we need to install the loader briefly, allow the util to be
     * initialized, then restore the old loader since we don't actually own this thread.
     */
    ClassLoader oldCCL = Thread.currentThread().getContextClassLoader();
    try {
        Thread.currentThread().setContextClassLoader(HdfsRepository.class.getClassLoader());
        KerberosInfo info = SecurityUtil.getKerberosInfo(ClientNamenodeProtocolPB.class, null);
        // Make sure that the correct class loader was installed.
        if (info == null) {
            throw new RuntimeException("Could not initialize SecurityUtil: " +
                "Unable to find services for [org.apache.hadoop.security.SecurityInfo]");
        }
    } finally {
        Thread.currentThread().setContextClassLoader(oldCCL);
    }
    return null;
}