Java Code Examples for org.apache.hadoop.net.NetUtils#getConnectAddress()

The following examples show how to use org.apache.hadoop.net.NetUtils#getConnectAddress() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestIPC.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout=60000)
public void testIpcConnectTimeout() throws IOException {
  // start server
  Server server = new TestServer(1, true);
  InetSocketAddress addr = NetUtils.getConnectAddress(server);
  //Intentionally do not start server to get a connection timeout

  // start client
  Client.setConnectTimeout(conf, 100);
  Client client = new Client(LongWritable.class, conf);
  // set the rpc timeout to twice the MIN_SLEEP_TIME
  try {
    client.call(new LongWritable(RANDOM.nextLong()),
            addr, null, null, MIN_SLEEP_TIME*2, conf);
    fail("Expected an exception to have been thrown");
  } catch (SocketTimeoutException e) {
    LOG.info("Get a SocketTimeoutException ", e);
  }
}
 
Example 2
Source File: TestRPCCompatibility.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test // Compatible new client & old server
public void testVersion2ClientVersion1Server() throws Exception {
  // create a server with two handlers
  TestImpl1 impl = new TestImpl1();
  server = new RPC.Builder(conf).setProtocol(TestProtocol1.class)
      .setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
      .setVerbose(false).build();
  server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
  server.start();
  addr = NetUtils.getConnectAddress(server);


  Version2Client client = new Version2Client();
  client.ping();
  assertEquals("hello", client.echo("hello"));
  
  // echo(int) is not supported by server, so returning 3
  // This verifies that echo(int) and echo(String)'s hash codes are different
  assertEquals(3, client.echo(3));
}
 
Example 3
Source File: TestRPCCompatibility.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testIsMethodSupported() throws IOException {
  server = new RPC.Builder(conf).setProtocol(TestProtocol2.class)
      .setInstance(new TestImpl2()).setBindAddress(ADDRESS).setPort(0)
      .setNumHandlers(2).setVerbose(false).build();
  server.start();
  addr = NetUtils.getConnectAddress(server);

  TestProtocol2 proxy = RPC.getProxy(TestProtocol2.class,
      TestProtocol2.versionID, addr, conf);
  boolean supported = RpcClientUtil.isMethodSupported(proxy,
      TestProtocol2.class, RPC.RpcKind.RPC_WRITABLE,
      RPC.getProtocolVersion(TestProtocol2.class), "echo");
  Assert.assertTrue(supported);
  supported = RpcClientUtil.isMethodSupported(proxy,
      TestProtocol2.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
      RPC.getProtocolVersion(TestProtocol2.class), "echo");
  Assert.assertFalse(supported);
}
 
Example 4
Source File: WebAppUtils.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private static String getResolvedAddress(InetSocketAddress address) {
  address = NetUtils.getConnectAddress(address);
  StringBuilder sb = new StringBuilder();
  InetAddress resolved = address.getAddress();
  if (resolved == null || resolved.isAnyLocalAddress() ||
      resolved.isLoopbackAddress()) {
    String lh = address.getHostName();
    try {
      lh = InetAddress.getLocalHost().getCanonicalHostName();
    } catch (UnknownHostException e) {
      //Ignore and fallback.
    }
    sb.append(lh);
  } else {
    sb.append(address.getHostName());
  }
  sb.append(":").append(address.getPort());
  return sb.toString();
}
 
Example 5
Source File: TestRMAuditLogger.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test {@link RMAuditLogger} with IP set.
 */
@Test  
public void testRMAuditLoggerWithIP() throws Exception {
  Configuration conf = new Configuration();
  // start the IPC server
  Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
      .setInstance(new MyTestRPCServer()).setBindAddress("0.0.0.0")
      .setPort(0).setNumHandlers(5).setVerbose(true).build();
  server.start();

  InetSocketAddress addr = NetUtils.getConnectAddress(server);

  // Make a client connection and test the audit log
  TestProtocol proxy = (TestProtocol)RPC.getProxy(TestProtocol.class,
                         TestProtocol.versionID, addr, conf);
  // Start the testcase
  proxy.ping();

  server.stop();
}
 
Example 6
Source File: TestRPCCompatibility.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test  // old client vs new server
public void testVersion0ClientVersion1Server() throws Exception {
  // create a server with two handlers
  TestImpl1 impl = new TestImpl1();
  server = new RPC.Builder(conf).setProtocol(TestProtocol1.class)
      .setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
      .setVerbose(false).build();
  server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
  server.start();
  addr = NetUtils.getConnectAddress(server);

  proxy = RPC.getProtocolProxy(
      TestProtocol0.class, TestProtocol0.versionID, addr, conf);

  TestProtocol0 proxy0 = (TestProtocol0)proxy.getProxy();
  proxy0.ping();
}
 
Example 7
Source File: TestDFSClientRetries.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Test that timeout occurs when DN does not respond to RPC.
 * Start up a server and ask it to sleep for n seconds. Make an
 * RPC to the server and set rpcTimeout to less than n and ensure
 * that socketTimeoutException is obtained
 */
@Test
public void testClientDNProtocolTimeout() throws IOException {
  final Server server = new TestServer(1, true);
  server.start();

  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
  
  ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
  LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);

  ClientDatanodeProtocol proxy = null;

  try {
    proxy = DFSUtil.createClientDatanodeProtocolProxy(
        fakeDnId, conf, 500, false, fakeBlock);

    proxy.getReplicaVisibleLength(new ExtendedBlock("bpid", 1));
    fail ("Did not get expected exception: SocketTimeoutException");
  } catch (SocketTimeoutException e) {
    LOG.info("Got the expected Exception: SocketTimeoutException");
  } finally {
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
    server.stop();
  }
}
 
Example 8
Source File: TestSaslRPC.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void doDigestRpc(Server server, TestTokenSecretManager sm
                         ) throws Exception {
  server.start();

  final UserGroupInformation current = UserGroupInformation.getCurrentUser();
  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
      .getUserName()));
  Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
      sm);
  SecurityUtil.setTokenService(token, addr);
  current.addToken(token);

  TestSaslProtocol proxy = null;
  try {
    proxy = RPC.getProxy(TestSaslProtocol.class,
        TestSaslProtocol.versionID, addr, conf);
    AuthMethod authMethod = proxy.getAuthMethod();
    assertEquals(TOKEN, authMethod);
    //QOP must be auth
    assertEquals(expectedQop.saslQop,
                 RPC.getConnectionIdForProxy(proxy).getSaslQop());            
    proxy.ping();
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
 
Example 9
Source File: TestRPC.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
private void doRPCs(Configuration conf, boolean expectFailure) throws Exception {
  SecurityUtil.setPolicy(new ConfiguredPolicy(conf, new TestPolicyProvider()));
  
  Server server = RPC.getServer(new TestImpl(), ADDRESS, 0, 5, true, conf);

  TestProtocol proxy = null;

  server.start();

  InetSocketAddress addr = NetUtils.getConnectAddress(server);
  
  try {
    proxy = (TestProtocol)RPC.getProxy(
        TestProtocol.class, TestProtocol.versionID, addr, conf);
    proxy.ping();

    if (expectFailure) {
      fail("Expect RPC.getProxy to fail with AuthorizationException!");
    }
  } catch (RemoteException e) {
    if (expectFailure) {
      assertTrue(e.unwrapRemoteException() instanceof AuthorizationException);
    } else {
      throw e;
    }
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
 
Example 10
Source File: TestIPC.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test if the rpc server gets the default retry count (0) from client.
 */
@Test(timeout=60000)
public void testInitialCallRetryCount() throws IOException {
  // Override client to store the call id
  final Client client = new Client(LongWritable.class, conf);

  // Attach a listener that tracks every call ID received by the server.
  final TestServer server = new TestServer(1, false);
  server.callListener = new Runnable() {
    @Override
    public void run() {
      // we have not set the retry count for the client, thus on the server
      // side we should see retry count as 0
      Assert.assertEquals(0, Server.getCallRetryCount());
    }
  };

  try {
    InetSocketAddress addr = NetUtils.getConnectAddress(server);
    server.start();
    final SerialCaller caller = new SerialCaller(client, addr, 10);
    caller.run();
    assertFalse(caller.failed);
  } finally {
    client.stop();
    server.stop();
  }
}
 
Example 11
Source File: MiniRPCBenchmark.java    From big-c with Apache License 2.0 4 votes vote down vote up
/** Get RPC server address */
InetSocketAddress getAddress() {
  if(rpcServer == null) return null;
  return NetUtils.getConnectAddress(rpcServer);
}
 
Example 12
Source File: TestIPC.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test if
 * (1) the rpc server uses the call id/retry provided by the rpc client, and
 * (2) the rpc client receives the same call id/retry from the rpc server.
 */
@Test(timeout=60000)
public void testCallIdAndRetry() throws IOException {
  final CallInfo info = new CallInfo();

  // Override client to store the call info and check response
  final Client client = new Client(LongWritable.class, conf) {
    @Override
    Call createCall(RpcKind rpcKind, Writable rpcRequest) {
      final Call call = super.createCall(rpcKind, rpcRequest);
      info.id = call.id;
      info.retry = call.retry;
      return call;
    }
    
    @Override
    void checkResponse(RpcResponseHeaderProto header) throws IOException {
      super.checkResponse(header);
      Assert.assertEquals(info.id, header.getCallId());
      Assert.assertEquals(info.retry, header.getRetryCount());
    }
  };

  // Attach a listener that tracks every call received by the server.
  final TestServer server = new TestServer(1, false);
  server.callListener = new Runnable() {
    @Override
    public void run() {
      Assert.assertEquals(info.id, Server.getCallId());
      Assert.assertEquals(info.retry, Server.getCallRetryCount());
    }
  };

  try {
    InetSocketAddress addr = NetUtils.getConnectAddress(server);
    server.start();
    final SerialCaller caller = new SerialCaller(client, addr, 10);
    caller.run();
    assertFalse(caller.failed);
  } finally {
    client.stop();
    server.stop();
  }
}
 
Example 13
Source File: TestIPC.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
  * Tests that client generates a unique sequential call ID for each RPC call,
  * even if multiple threads are using the same client.
* @throws InterruptedException 
  */
 @Test(timeout=60000)
 public void testUniqueSequentialCallIds() 
     throws IOException, InterruptedException {
   int serverThreads = 10, callerCount = 100, perCallerCallCount = 100;
   TestServer server = new TestServer(serverThreads, false);

   // Attach a listener that tracks every call ID received by the server.  This
   // list must be synchronized, because multiple server threads will add to it.
   final List<Integer> callIds = Collections.synchronizedList(
     new ArrayList<Integer>());
   server.callListener = new Runnable() {
     @Override
     public void run() {
       callIds.add(Server.getCallId());
     }
   };

   Client client = new Client(LongWritable.class, conf);

   try {
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
     server.start();
     SerialCaller[] callers = new SerialCaller[callerCount];
     for (int i = 0; i < callerCount; ++i) {
       callers[i] = new SerialCaller(client, addr, perCallerCallCount);
       callers[i].start();
     }
     for (int i = 0; i < callerCount; ++i) {
       callers[i].join();
       assertFalse(callers[i].failed);
     }
   } finally {
     client.stop();
     server.stop();
   }

   int expectedCallCount = callerCount * perCallerCallCount;
   assertEquals(expectedCallCount, callIds.size());

   // It is not guaranteed that the server executes requests in sequential order
   // of client call ID, so we must sort the call IDs before checking that it
   // contains every expected value.
   Collections.sort(callIds);
   final int startID = callIds.get(0).intValue();
   for (int i = 0; i < expectedCallCount; ++i) {
     assertEquals(startID + i, callIds.get(i).intValue());
   }
 }
 
Example 14
Source File: TestIPC.java    From big-c with Apache License 2.0 4 votes vote down vote up
private void checkBlocking(int readers, int readerQ, int callQ) throws Exception {
  int handlers = 1; // makes it easier
  
  final Configuration conf = new Configuration();
  conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_CONNECTION_QUEUE_SIZE_KEY, readerQ);

  // send in enough clients to block up the handlers, callq, and readers
  int initialClients = readers + callQ + handlers;
  // max connections we should ever end up accepting at once
  int maxAccept = initialClients + readers*readerQ + 1; // 1 = listener
  // stress it with 2X the max
  int clients = maxAccept*2;
  
  final AtomicInteger failures = new AtomicInteger(0);
  final CountDownLatch callFinishedLatch = new CountDownLatch(clients);

  // start server
  final TestServerQueue server =
      new TestServerQueue(clients, readers, callQ, handlers, conf);
  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  server.start();

  Client.setConnectTimeout(conf, 10000);
  
  // instantiate the threads, will start in batches
  Thread[] threads = new Thread[clients];
  for (int i=0; i<clients; i++) {
    threads[i] = new Thread(new Runnable() {
      @Override
      public void run() {
        Client client = new Client(LongWritable.class, conf);
        try {
          client.call(new LongWritable(Thread.currentThread().getId()),
              addr, null, null, 60000, conf);
        } catch (Throwable e) {
          LOG.error(e);
          failures.incrementAndGet();
          return;
        } finally {
          callFinishedLatch.countDown();            
          client.stop();
        }
      }
    });
  }
  
  // start enough clients to block up the handler, callq, and each reader;
  // let the calls sequentially slot in to avoid some readers blocking
  // and others not blocking in the race to fill the callq
  for (int i=0; i < initialClients; i++) {
    threads[i].start();
    if (i==0) {
      // let first reader block in a call
      server.firstCallLatch.await();
    } else if (i <= callQ) {
      // let subsequent readers jam the callq, will happen immediately 
      while (server.getCallQueueLen() != i) {
        Thread.sleep(1);
      }
    } // additional threads block the readers trying to add to the callq
  }

  // wait till everything is slotted, should happen immediately
  Thread.sleep(10);
  if (server.getNumOpenConnections() < initialClients) {
    LOG.info("(initial clients) need:"+initialClients+" connections have:"+server.getNumOpenConnections());
    Thread.sleep(100);
  }
  LOG.info("ipc layer should be blocked");
  assertEquals(callQ, server.getCallQueueLen());
  assertEquals(initialClients, server.getNumOpenConnections());
  
  // now flood the server with the rest of the connections, the reader's
  // connection queues should fill and then the listener should block
  for (int i=initialClients; i<clients; i++) {
    threads[i].start();
  }
  Thread.sleep(10);
  if (server.getNumOpenConnections() < maxAccept) {
    LOG.info("(max clients) need:"+maxAccept+" connections have:"+server.getNumOpenConnections());
    Thread.sleep(100);
  }
  // check a few times to make sure we didn't go over
  for (int i=0; i<4; i++) {
    assertEquals(maxAccept, server.getNumOpenConnections());
    Thread.sleep(100);
  }
  
  // sanity check that no calls have finished
  assertEquals(clients, callFinishedLatch.getCount());
  LOG.info("releasing the calls");
  server.callBlockLatch.countDown();
  callFinishedLatch.await();
  for (Thread t : threads) {
    t.join();
  }
  assertEquals(0, failures.get());
  server.stop();
}
 
Example 15
Source File: TestDoAsEffectiveUser.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testTokenBySuperUser() throws Exception {
  TestTokenSecretManager sm = new TestTokenSecretManager();
  final Configuration newConf = new Configuration(masterConf);
  SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, newConf);
  UserGroupInformation.setConfiguration(newConf);
  final Server server = new RPC.Builder(newConf)
      .setProtocol(TestProtocol.class).setInstance(new TestImpl())
      .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
      .setSecretManager(sm).build();

  server.start();

  final UserGroupInformation current = UserGroupInformation
      .createUserForTesting(REAL_USER_NAME, GROUP_NAMES);
  
  refreshConf(newConf);
  
  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
      .getUserName()), new Text("SomeSuperUser"));
  Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
      sm);
  SecurityUtil.setTokenService(token, addr);
  current.addToken(token);
  String retVal = current.doAs(new PrivilegedExceptionAction<String>() {
    @Override
    public String run() throws Exception {
      try {
        proxy = RPC.getProxy(TestProtocol.class,
            TestProtocol.versionID, addr, newConf);
        String ret = proxy.aMethod();
        return ret;
      } catch (Exception e) {
        e.printStackTrace();
        throw e;
      } finally {
        server.stop();
        if (proxy != null) {
          RPC.stopProxy(proxy);
        }
      }
    }
  });
  String expected = REAL_USER_NAME + " (auth:TOKEN) via SomeSuperUser (auth:SIMPLE)";
  Assert.assertEquals(retVal + "!=" + expected, expected, retVal);
}
 
Example 16
Source File: ContainerManagerImpl.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
protected void serviceStart() throws Exception {

  // Enqueue user dirs in deletion context

  Configuration conf = getConfig();
  final InetSocketAddress initialAddress = conf.getSocketAddr(
      YarnConfiguration.NM_BIND_HOST,
      YarnConfiguration.NM_ADDRESS,
      YarnConfiguration.DEFAULT_NM_ADDRESS,
      YarnConfiguration.DEFAULT_NM_PORT);
  boolean usingEphemeralPort = (initialAddress.getPort() == 0);
  if (context.getNMStateStore().canRecover() && usingEphemeralPort) {
    throw new IllegalArgumentException("Cannot support recovery with an "
        + "ephemeral server port. Check the setting of "
        + YarnConfiguration.NM_ADDRESS);
  }
  // If recovering then delay opening the RPC service until the recovery
  // of resources and containers have completed, otherwise requests from
  // clients during recovery can interfere with the recovery process.
  final boolean delayedRpcServerStart =
      context.getNMStateStore().canRecover();

  Configuration serverConf = new Configuration(conf);

  // always enforce it to be token-based.
  serverConf.set(
    CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
    SaslRpcServer.AuthMethod.TOKEN.toString());
  
  YarnRPC rpc = YarnRPC.create(conf);

  server =
      rpc.getServer(ContainerManagementProtocol.class, this, initialAddress, 
          serverConf, this.context.getNMTokenSecretManager(),
          conf.getInt(YarnConfiguration.NM_CONTAINER_MGR_THREAD_COUNT, 
              YarnConfiguration.DEFAULT_NM_CONTAINER_MGR_THREAD_COUNT));
  
  // Enable service authorization?
  if (conf.getBoolean(
      CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, 
      false)) {
    refreshServiceAcls(conf, new NMPolicyProvider());
  }
  
  LOG.info("Blocking new container-requests as container manager rpc" +
  		" server is still starting.");
  this.setBlockNewContainerRequests(true);

  String bindHost = conf.get(YarnConfiguration.NM_BIND_HOST);
  String nmAddress = conf.getTrimmed(YarnConfiguration.NM_ADDRESS);
  String hostOverride = null;
  if (bindHost != null && !bindHost.isEmpty()
      && nmAddress != null && !nmAddress.isEmpty()) {
    //a bind-host case with an address, to support overriding the first
    //hostname found when querying for our hostname with the specified
    //address, combine the specified address with the actual port listened
    //on by the server
    hostOverride = nmAddress.split(":")[0];
  }

  // setup node ID
  InetSocketAddress connectAddress;
  if (delayedRpcServerStart) {
    connectAddress = NetUtils.getConnectAddress(initialAddress);
  } else {
    server.start();
    connectAddress = NetUtils.getConnectAddress(server);
  }
  NodeId nodeId = buildNodeId(connectAddress, hostOverride);
  ((NodeManager.NMContext)context).setNodeId(nodeId);
  this.context.getNMTokenSecretManager().setNodeId(nodeId);
  this.context.getContainerTokenSecretManager().setNodeId(nodeId);

  // start remaining services
  super.serviceStart();

  if (delayedRpcServerStart) {
    waitForRecoveredContainers();
    server.start();

    // check that the node ID is as previously advertised
    connectAddress = NetUtils.getConnectAddress(server);
    NodeId serverNode = buildNodeId(connectAddress, hostOverride);
    if (!serverNode.equals(nodeId)) {
      throw new IOException("Node mismatch after server started, expected '"
          + nodeId + "' but found '" + serverNode + "'");
    }
  }

  LOG.info("ContainerManager started at " + connectAddress);
  LOG.info("ContainerManager bound to " + initialAddress);
}
 
Example 17
Source File: TestDoAsEffectiveUser.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testRealUserIPNotSpecified() throws IOException {
  final Configuration conf = new Configuration();
  conf.setStrings(DefaultImpersonationProvider.getTestProvider().
      getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
  Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
      .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
      .setNumHandlers(2).setVerbose(false).build();

  refreshConf(conf);

  try {
    server.start();

    final InetSocketAddress addr = NetUtils.getConnectAddress(server);

    UserGroupInformation realUserUgi = UserGroupInformation
        .createRemoteUser(REAL_USER_NAME);

    UserGroupInformation proxyUserUgi = UserGroupInformation
        .createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
    String retVal = proxyUserUgi
        .doAs(new PrivilegedExceptionAction<String>() {
          @Override
          public String run() throws IOException {
            proxy = RPC.getProxy(TestProtocol.class,
                TestProtocol.versionID, addr, conf);
            String ret = proxy.aMethod();
            return ret;
          }
        });

    Assert.fail("The RPC must have failed " + retVal);
  } catch (Exception e) {
    e.printStackTrace();
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
 
Example 18
Source File: MiniRPCBenchmark.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/** Get RPC server address */
InetSocketAddress getAddress() {
  if(rpcServer == null) return null;
  return NetUtils.getConnectAddress(rpcServer);
}
 
Example 19
Source File: StreamingAppMasterService.java    From Bats with Apache License 2.0 4 votes vote down vote up
@Override
protected void serviceStart() throws Exception
{
  super.serviceStart();
  if (UserGroupInformation.isSecurityEnabled()) {
    delegationTokenManager.startThreads();
  }

  // write the connect address for containers to DFS
  InetSocketAddress connectAddress = NetUtils.getConnectAddress(this.heartbeatListener.getAddress());
  URI connectUri = RecoverableRpcProxy.toConnectURI(connectAddress);
  FSRecoveryHandler recoveryHandler = new FSRecoveryHandler(dag.assertAppPath(), getConfig());
  recoveryHandler.writeConnectUri(connectUri.toString());

  // start web service
  try {
    org.mortbay.log.Log.setLog(null);
  } catch (Throwable throwable) {
    // SPOI-2687. As part of Pivotal Certification, we need to catch ClassNotFoundException as Pivotal was using
    // Jetty 7 where as other distros are using Jetty 6.
    // LOG.error("can't set the log to null: ", throwable);
  }

  try {
    Configuration config = SecurityUtils.configureWebAppSecurity(getConfig(), dag.getValue(Context.DAGContext.SSL_CONFIG));
    WebApp webApp = WebApps.$for("stram", StramAppContext.class, appContext, "ws").with(config).start(new StramWebApp(this.dnmgr));
    LOG.info("Started web service at port: " + webApp.port());
    // best effort to produce FQDN for the client to connect with
    // (when SSL is enabled, it may be required to match the certificate)
    connectAddress = NetUtils.getConnectAddress(webApp.getListenerAddress());
    String hostname = connectAddress.getAddress().getCanonicalHostName();
    if (hostname.equals(connectAddress.getAddress().getHostAddress())) {
      // lookup didn't yield a name
      hostname = connectAddress.getHostName();
    }
    appMasterTrackingUrl = hostname + ":" + webApp.port();
    if (ConfigUtils.isSSLEnabled(config)) {
      appMasterTrackingUrl = "https://" + appMasterTrackingUrl;
    }
    LOG.info("Setting tracking URL to: " + appMasterTrackingUrl);
  } catch (Exception e) {
    LOG.error("Webapps failed to start. Ignoring for now:", e);
  }
}
 
Example 20
Source File: Configuration.java    From flink with Apache License 2.0 3 votes vote down vote up
/**
 * Set the socket address a client can use to connect for the
 * <code>name</code> property as a <code>host:port</code>.  The wildcard
 * address is replaced with the local host's address.
 * @param name property name.
 * @param addr InetSocketAddress of a listener to store in the given property
 * @return InetSocketAddress for clients to connect
 */
public InetSocketAddress updateConnectAddr(String name,
                                           InetSocketAddress addr) {
  final InetSocketAddress connectAddr = NetUtils.getConnectAddress(addr);
  setSocketAddr(name, connectAddr);
  return connectAddr;
}