Java Code Examples for org.apache.hadoop.net.NetUtils

The following are top voted examples for showing how to use org.apache.hadoop.net.NetUtils. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: hadoop-oss   File: NuCypherExtUtilClient.java   Source Code and License 8 votes vote down vote up
/**
 * Connect to the given datanode's datantrasfer port, and return
 * the resulting IOStreamPair. This includes encryption wrapping, etc.
 */
public static IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
                                       Configuration conf,
                                       SaslDataTransferClient saslClient,
                                       SocketFactory socketFactory,
                                       boolean connectToDnViaHostname,
                                       DataEncryptionKeyFactory dekFactory,
                                       Token<BlockTokenIdentifier> blockToken)
    throws IOException {

  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    String dnAddr = dn.getXferAddr(connectToDnViaHostname);
    LOG.debug("Connecting to datanode {}", dnAddr);
    NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
    sock.setSoTimeout(timeout);

    OutputStream unbufOut = NetUtils.getOutputStream(sock);
    InputStream unbufIn = NetUtils.getInputStream(sock);
    IOStreamPair pair = saslClient.newSocketSend(sock, unbufOut,
        unbufIn, dekFactory, blockToken, dn);

    IOStreamPair result = new IOStreamPair(
        new DataInputStream(pair.in),
        new DataOutputStream(new BufferedOutputStream(pair.out,
            NuCypherExtUtilClient.getSmallBufferSize(conf)))
    );

    success = true;
    return result;
  } finally {
    if (!success) {
      IOUtils.closeSocket(sock);
    }
  }
}
 
Example 2
Project: hadoop-oss   File: TestSecurityUtil.java   Source Code and License 7 votes vote down vote up
private void verifyServiceAddr(String host, String ip) {
  InetSocketAddress addr;
  int port = 123;

  // test host, port tuple
  //LOG.info("test tuple ("+host+","+port+")");
  addr = NetUtils.createSocketAddrForHost(host, port);
  verifyAddress(addr, host, ip, port);

  // test authority with no default port
  //LOG.info("test authority '"+host+":"+port+"'");
  addr = NetUtils.createSocketAddr(host+":"+port);
  verifyAddress(addr, host, ip, port);

  // test authority with a default port, make sure default isn't used
  //LOG.info("test authority '"+host+":"+port+"' with ignored default port");
  addr = NetUtils.createSocketAddr(host+":"+port, port+1);
  verifyAddress(addr, host, ip, port);

  // test host-only authority, using port as default port
  //LOG.info("test host:"+host+" port:"+port);
  addr = NetUtils.createSocketAddr(host, port);
  verifyAddress(addr, host, ip, port);
}
 
Example 3
Project: hadoop   File: DFSAdmin.java   Source Code and License 6 votes vote down vote up
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
    throws IOException {
  InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
  // Get the current configuration
  Configuration conf = getConf();

  // For datanode proxy the server principal should be DN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
      conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));

  // Create the client
  ClientDatanodeProtocol dnProtocol =     
      DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
          NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
  return dnProtocol;
}
 
Example 4
Project: hadoop-oss   File: Configuration.java   Source Code and License 6 votes vote down vote up
/**
 * Get the socket address for <code>hostProperty</code> as a
 * <code>InetSocketAddress</code>. If <code>hostProperty</code> is
 * <code>null</code>, <code>addressProperty</code> will be used. This
 * is useful for cases where we want to differentiate between host
 * bind address and address clients should use to establish connection.
 *
 * @param hostProperty bind host property name.
 * @param addressProperty address property name.
 * @param defaultAddressValue the default value
 * @param defaultPort the default port
 * @return InetSocketAddress
 */
public InetSocketAddress getSocketAddr(
    String hostProperty,
    String addressProperty,
    String defaultAddressValue,
    int defaultPort) {

  InetSocketAddress bindAddr = getSocketAddr(
    addressProperty, defaultAddressValue, defaultPort);

  final String host = get(hostProperty);

  if (host == null || host.isEmpty()) {
    return bindAddr;
  }

  return NetUtils.createSocketAddr(
      host, bindAddr.getPort(), hostProperty);
}
 
Example 5
Project: hadoop-oss   File: Configuration.java   Source Code and License 6 votes vote down vote up
/**
 * Set the socket address a client can use to connect for the
 * <code>name</code> property as a <code>host:port</code>.  The wildcard
 * address is replaced with the local host's address. If the host and address
 * properties are configured the host component of the address will be combined
 * with the port component of the addr to generate the address.  This is to allow
 * optional control over which host name is used in multi-home bind-host
 * cases where a host can have multiple names
 * @param hostProperty the bind-host configuration name
 * @param addressProperty the service address configuration name
 * @param defaultAddressValue the service default address configuration value
 * @param addr InetSocketAddress of the service listener
 * @return InetSocketAddress for clients to connect
 */
public InetSocketAddress updateConnectAddr(
    String hostProperty,
    String addressProperty,
    String defaultAddressValue,
    InetSocketAddress addr) {

  final String host = get(hostProperty);
  final String connectHostPort = getTrimmed(addressProperty, defaultAddressValue);

  if (host == null || host.isEmpty() || connectHostPort == null || connectHostPort.isEmpty()) {
    //not our case, fall back to original logic
    return updateConnectAddr(addressProperty, addr);
  }

  final String connectHost = connectHostPort.split(":")[0];
  // Create connect address using client address hostname and server port.
  return updateConnectAddr(addressProperty, NetUtils.createSocketAddrForHost(
      connectHost, addr.getPort()));
}
 
Example 6
Project: hadoop   File: RMDelegationTokenIdentifier.java   Source Code and License 6 votes vote down vote up
private static ApplicationClientProtocol getRmClient(Token<?> token,
    Configuration conf) throws IOException {
  String[] services = token.getService().toString().split(",");
  for (String service : services) {
    InetSocketAddress addr = NetUtils.createSocketAddr(service);
    if (localSecretManager != null) {
      // return null if it's our token
      if (localServiceAddress.getAddress().isAnyLocalAddress()) {
        if (NetUtils.isLocalAddress(addr.getAddress()) &&
            addr.getPort() == localServiceAddress.getPort()) {
          return null;
        }
      } else if (addr.equals(localServiceAddress)) {
        return null;
      }
    }
  }
  return ClientRMProxy.createRMProxy(conf, ApplicationClientProtocol.class);
}
 
Example 7
Project: hadoop   File: TestRPCCompatibility.java   Source Code and License 6 votes vote down vote up
@Test  // old client vs new server
public void testVersion0ClientVersion1Server() throws Exception {
  // create a server with two handlers
  TestImpl1 impl = new TestImpl1();
  server = new RPC.Builder(conf).setProtocol(TestProtocol1.class)
      .setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
      .setVerbose(false).build();
  server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
  server.start();
  addr = NetUtils.getConnectAddress(server);

  proxy = RPC.getProtocolProxy(
      TestProtocol0.class, TestProtocol0.versionID, addr, conf);

  TestProtocol0 proxy0 = (TestProtocol0)proxy.getProxy();
  proxy0.ping();
}
 
Example 8
Project: hadoop   File: TestHttpCookieFlag.java   Source Code and License 6 votes vote down vote up
@Test
public void testHttpsCookie() throws IOException, GeneralSecurityException {
  URL base = new URL("https://" + NetUtils.getHostPortString(server
          .getConnectorAddress(1)));
  HttpsURLConnection conn = (HttpsURLConnection) new URL(base,
          "/echo").openConnection();
  conn.setSSLSocketFactory(clientSslFactory.createSSLSocketFactory());

  String header = conn.getHeaderField("Set-Cookie");
  List<HttpCookie> cookies = HttpCookie.parse(header);
  Assert.assertTrue(!cookies.isEmpty());
  Assert.assertTrue(header.contains("; HttpOnly"));
  Assert.assertTrue(cookies.get(0).getSecure());
  Assert.assertTrue("token".equals(cookies.get(0).getValue()));
}
 
Example 9
Project: hadoop   File: DFSClient.java   Source Code and License 6 votes vote down vote up
/**
 * Connect to the given datanode's datantrasfer port, and return
 * the resulting IOStreamPair. This includes encryption wrapping, etc.
 */
private IOStreamPair connectToDN(DatanodeInfo dn, int timeout,
    LocatedBlock lb) throws IOException {
  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    String dnAddr = dn.getXferAddr(getConf().connectToDnViaHostname);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Connecting to datanode " + dnAddr);
    }
    NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
    sock.setSoTimeout(timeout);

    OutputStream unbufOut = NetUtils.getOutputStream(sock);
    InputStream unbufIn = NetUtils.getInputStream(sock);
    IOStreamPair ret = saslClient.newSocketSend(sock, unbufOut, unbufIn, this,
      lb.getBlockToken(), dn);
    success = true;
    return ret;
  } finally {
    if (!success) {
      IOUtils.closeSocket(sock);
    }
  }
}
 
Example 10
Project: hadoop-oss   File: TestAuthenticationSessionCookie.java   Source Code and License 6 votes vote down vote up
@Test
public void testPersistentCookie() throws IOException {
  try {
      startServer(false);
  } catch (Exception e) {
      // Auto-generated catch block
      e.printStackTrace();
  }

  URL base = new URL("http://" + NetUtils.getHostPortString(server
          .getConnectorAddress(0)));
  HttpURLConnection conn = (HttpURLConnection) new URL(base,
          "/echo").openConnection();

  String header = conn.getHeaderField("Set-Cookie");
  List<HttpCookie> cookies = HttpCookie.parse(header);
  Assert.assertTrue(!cookies.isEmpty());
  Log.info(header);
  Assert.assertTrue(header.contains("; Expires="));
  Assert.assertTrue("token".equals(cookies.get(0).getValue()));
}
 
Example 11
Project: hadoop-oss   File: TestHttpCookieFlag.java   Source Code and License 6 votes vote down vote up
@Test
public void testHttpsCookie() throws IOException, GeneralSecurityException {
  URL base = new URL("https://" + NetUtils.getHostPortString(server
          .getConnectorAddress(1)));
  HttpsURLConnection conn = (HttpsURLConnection) new URL(base,
          "/echo").openConnection();
  conn.setSSLSocketFactory(clientSslFactory.createSSLSocketFactory());

  String header = conn.getHeaderField("Set-Cookie");
  List<HttpCookie> cookies = HttpCookie.parse(header);
  Assert.assertTrue(!cookies.isEmpty());
  Assert.assertTrue(header.contains("; HttpOnly"));
  Assert.assertTrue(cookies.get(0).getSecure());
  Assert.assertTrue("token".equals(cookies.get(0).getValue()));
}
 
Example 12
Project: hadoop   File: DFSClient.java   Source Code and License 6 votes vote down vote up
@Override // RemotePeerFactory
public Peer newConnectedPeer(InetSocketAddress addr,
    Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
    throws IOException {
  Peer peer = null;
  boolean success = false;
  Socket sock = null;
  try {
    sock = socketFactory.createSocket();
    NetUtils.connect(sock, addr,
      getRandomLocalInterfaceAddr(),
      dfsClientConf.socketTimeout);
    peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this,
        blockToken, datanodeId);
    peer.setReadTimeout(dfsClientConf.socketTimeout);
    success = true;
    return peer;
  } finally {
    if (!success) {
      IOUtils.cleanup(LOG, peer);
      IOUtils.closeSocket(sock);
    }
  }
}
 
Example 13
Project: hadoop   File: DFSTestUtil.java   Source Code and License 6 votes vote down vote up
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
      new StorageType[]{StorageType.DEFAULT});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
 
Example 14
Project: hadoop   File: TestInterDatanodeProtocol.java   Source Code and License 6 votes vote down vote up
/** Test to verify that InterDatanode RPC timesout as expected when
 *  the server DN does not respond.
 */
@Test(expected=SocketTimeoutException.class)
public void testInterDNProtocolTimeout() throws Throwable {
  final Server server = new TestServer(1, true);
  server.start();

  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
  DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
  InterDatanodeProtocol proxy = null;

  try {
    proxy = DataNode.createInterDataNodeProtocolProxy(
        dInfo, conf, 500, false);
    proxy.initReplicaRecovery(new RecoveringBlock(
        new ExtendedBlock("bpid", 1), null, 100));
    fail ("Expected SocketTimeoutException exception, but did not get.");
  } finally {
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
    server.stop();
  }
}
 
Example 15
Project: hadoop-oss   File: TestIPC.java   Source Code and License 6 votes vote down vote up
@Test(timeout=60000)
public void testIpcConnectTimeout() throws IOException {
  // start server
  Server server = new TestServer(1, true);
  InetSocketAddress addr = NetUtils.getConnectAddress(server);
  //Intentionally do not start server to get a connection timeout

  // start client
  Client.setConnectTimeout(conf, 100);
  Client client = new Client(LongWritable.class, conf);
  // set the rpc timeout to twice the MIN_SLEEP_TIME
  try {
    call(client, new LongWritable(RANDOM.nextLong()), addr,
        MIN_SLEEP_TIME * 2, conf);
    fail("Expected an exception to have been thrown");
  } catch (SocketTimeoutException e) {
    LOG.info("Get a SocketTimeoutException ", e);
  }
  client.stop();
}
 
Example 16
Project: hadoop   File: DFSUtil.java   Source Code and License 6 votes vote down vote up
/**
 * Substitute a default host in the case that an address has been configured
 * with a wildcard. This is used, for example, when determining the HTTP
 * address of the NN -- if it's configured to bind to 0.0.0.0, we want to
 * substitute the hostname from the filesystem URI rather than trying to
 * connect to 0.0.0.0.
 * @param configuredAddress the address found in the configuration
 * @param defaultHost the host to substitute with, if configuredAddress
 * is a local/wildcard address.
 * @return the substituted address
 * @throws IOException if it is a wildcard address and security is enabled
 */
@VisibleForTesting
static String substituteForWildcardAddress(String configuredAddress,
  String defaultHost) throws IOException {
  InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress);
  InetSocketAddress defaultSockAddr = NetUtils.createSocketAddr(defaultHost
      + ":0");
  final InetAddress addr = sockAddr.getAddress();
  if (addr != null && addr.isAnyLocalAddress()) {
    if (UserGroupInformation.isSecurityEnabled() &&
        defaultSockAddr.getAddress().isAnyLocalAddress()) {
      throw new IOException("Cannot use a wildcard address with security. " +
          "Must explicitly set bind address for Kerberos");
    }
    return defaultHost + ":" + sockAddr.getPort();
  } else {
    return configuredAddress;
  }
}
 
Example 17
Project: hadoop-oss   File: TestMultipleProtocolServer.java   Source Code and License 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  // create a server with two handlers
  server = new RPC.Builder(conf).setProtocol(Foo0.class)
      .setInstance(new Foo0Impl()).setBindAddress(ADDRESS).setPort(0)
      .setNumHandlers(2).setVerbose(false).build();
  server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Foo1.class, new Foo1Impl());
  server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Bar.class, new BarImpl());
  server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Mixin.class, new BarImpl());
  
  
  // Add Protobuf server
  // Create server side implementation
  PBServerImpl pbServerImpl = new PBServerImpl();
  BlockingService service = TestProtobufRpcProto
      .newReflectiveBlockingService(pbServerImpl);
  server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, TestRpcService.class,
      service);
  server.start();
  addr = NetUtils.getConnectAddress(server);
}
 
Example 18
Project: hadoop   File: TestWebHdfsUrl.java   Source Code and License 6 votes vote down vote up
private WebHdfsFileSystem getWebHdfsFileSystem(UserGroupInformation ugi,
    Configuration conf) throws IOException {
  if (UserGroupInformation.isSecurityEnabled()) {
    DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text(
        ugi.getUserName()), null, null);
    FSNamesystem namesystem = mock(FSNamesystem.class);
    DelegationTokenSecretManager dtSecretManager = new DelegationTokenSecretManager(
        86400000, 86400000, 86400000, 86400000, namesystem);
    dtSecretManager.startThreads();
    Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(
        dtId, dtSecretManager);
    SecurityUtil.setTokenService(
        token, NetUtils.createSocketAddr(uri.getAuthority()));
    token.setKind(WebHdfsFileSystem.TOKEN_KIND);
    ugi.addToken(token);
  }
  return (WebHdfsFileSystem) FileSystem.get(uri, conf);
}
 
Example 19
Project: hadoop   File: TestConfiguration.java   Source Code and License 5 votes vote down vote up
public void testSocketAddress() throws IOException {
  Configuration conf = new Configuration();
  final String defaultAddr = "host:1";
  final int defaultPort = 2;
  InetSocketAddress addr = null;
  
  addr = conf.getSocketAddr("myAddress", defaultAddr, defaultPort);
  assertEquals(defaultAddr, NetUtils.getHostPortString(addr));
  
  conf.set("myAddress", "host2");
  addr = conf.getSocketAddr("myAddress", defaultAddr, defaultPort);
  assertEquals("host2:"+defaultPort, NetUtils.getHostPortString(addr));
  
  conf.set("myAddress", "host2:3");
  addr = conf.getSocketAddr("myAddress", defaultAddr, defaultPort);
  assertEquals("host2:3", NetUtils.getHostPortString(addr));

  conf.set("myAddress", " \n \t    host4:5     \t \n   ");
  addr = conf.getSocketAddr("myAddress", defaultAddr, defaultPort);
  assertEquals("host4:5", NetUtils.getHostPortString(addr));

  boolean threwException = false;
  conf.set("myAddress", "bad:-port");
  try {
    addr = conf.getSocketAddr("myAddress", defaultAddr, defaultPort);
  } catch (IllegalArgumentException iae) {
    threwException = true;
    assertEquals("Does not contain a valid host:port authority: " +
                 "bad:-port (configuration property 'myAddress')",
                 iae.getMessage());
    
  } finally {
    assertTrue(threwException);
  }
}
 
Example 20
Project: ditb   File: TestSSLHttpServer.java   Source Code and License 5 votes vote down vote up
@BeforeClass
public static void setup() throws Exception {
  conf = new Configuration();
  conf.setInt(HttpServer.HTTP_MAX_THREADS, 10);

  File base = new File(BASEDIR);
  FileUtil.fullyDelete(base);
  base.mkdirs();
  keystoresDir = new File(BASEDIR).getAbsolutePath();
  sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);

  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
  Configuration sslConf = new Configuration(false);
  sslConf.addResource("ssl-server.xml");
  sslConf.addResource("ssl-client.xml");

  clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, sslConf);
  clientSslFactory.init();

  server = new HttpServer.Builder()
      .setName("test")
      .addEndpoint(new URI("https://localhost"))
      .setConf(conf)
      .keyPassword(HBaseConfiguration.getPassword(sslConf, "ssl.server.keystore.keypassword",
          null))
      .keyStore(sslConf.get("ssl.server.keystore.location"),
          HBaseConfiguration.getPassword(sslConf, "ssl.server.keystore.password", null),
          sslConf.get("ssl.server.keystore.type", "jks"))
      .trustStore(sslConf.get("ssl.server.truststore.location"),
          HBaseConfiguration.getPassword(sslConf, "ssl.server.truststore.password", null),
          sslConf.get("ssl.server.truststore.type", "jks")).build();
  server.addServlet("echo", "/echo", TestHttpServer.EchoServlet.class);
  server.start();
  baseUrl = new URL("https://"
      + NetUtils.getHostPortString(server.getConnectorAddress(0)));
  LOG.info("HTTP server started: " + baseUrl);
}
 
Example 21
Project: hadoop-oss   File: HAServiceTarget.java   Source Code and License 5 votes vote down vote up
/**
 * @return a proxy to the ZKFC which is associated with this HA service.
 */
public ZKFCProtocol getZKFCProxy(Configuration conf, int timeoutMs)
    throws IOException {
  Configuration confCopy = new Configuration(conf);
  // Lower the timeout so we quickly fail to connect
  confCopy.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
  SocketFactory factory = NetUtils.getDefaultSocketFactory(confCopy);
  return new ZKFCProtocolClientSideTranslatorPB(
      getZKFCAddress(),
      confCopy, factory, timeoutMs);
}
 
Example 22
Project: hadoop   File: Servers.java   Source Code and License 5 votes vote down vote up
/**
 * Parses a space and/or comma separated sequence of server specifications
 * of the form <i>hostname</i> or <i>hostname:port</i>.  If
 * the specs string is null, defaults to localhost:defaultPort.
 *
 * @param specs   server specs (see description)
 * @param defaultPort the default port if not specified
 * @return a list of InetSocketAddress objects.
 */
public static List<InetSocketAddress> parse(String specs, int defaultPort) {
  List<InetSocketAddress> result = Lists.newArrayList();
  if (specs == null) {
    result.add(new InetSocketAddress("localhost", defaultPort));
  }
  else {
    String[] specStrings = specs.split("[ ,]+");
    for (String specString : specStrings) {
      result.add(NetUtils.createSocketAddr(specString, defaultPort));
    }
  }
  return result;
}
 
Example 23
Project: hadoop   File: NameNode.java   Source Code and License 5 votes vote down vote up
/**
 * Given a configuration get the address of the lifeline RPC server.
 * If the lifeline RPC is not configured returns null.
 *
 * @param conf configuration
 * @return address or null
 */
InetSocketAddress getLifelineRpcServerAddress(Configuration conf) {
  String addr = getTrimmedOrNull(conf, DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY);
  if (addr == null) {
    return null;
  }
  return NetUtils.createSocketAddr(addr);
}
 
Example 24
Project: hadoop-oss   File: Servers.java   Source Code and License 5 votes vote down vote up
/**
 * Parses a space and/or comma separated sequence of server specifications
 * of the form <i>hostname</i> or <i>hostname:port</i>.  If
 * the specs string is null, defaults to localhost:defaultPort.
 *
 * @param specs   server specs (see description)
 * @param defaultPort the default port if not specified
 * @return a list of InetSocketAddress objects.
 */
public static List<InetSocketAddress> parse(String specs, int defaultPort) {
  List<InetSocketAddress> result = Lists.newArrayList();
  if (specs == null) {
    result.add(new InetSocketAddress("localhost", defaultPort));
  }
  else {
    String[] specStrings = specs.split("[ ,]+");
    for (String specString : specStrings) {
      result.add(NetUtils.createSocketAddr(specString, defaultPort));
    }
  }
  return result;
}
 
Example 25
Project: hadoop-oss   File: GetGroupsBase.java   Source Code and License 5 votes vote down vote up
/**
 * Get a client of the {@link GetUserMappingsProtocol}.
 * @return A {@link GetUserMappingsProtocol} client proxy.
 * @throws IOException
 */
protected GetUserMappingsProtocol getUgmProtocol() throws IOException {
  GetUserMappingsProtocol userGroupMappingProtocol =
    RPC.getProxy(GetUserMappingsProtocol.class, 
        GetUserMappingsProtocol.versionID,
        getProtocolAddress(getConf()), UserGroupInformation.getCurrentUser(),
        getConf(), NetUtils.getSocketFactory(getConf(),
            GetUserMappingsProtocol.class));
  return userGroupMappingProtocol;
}
 
Example 26
Project: hadoop-oss   File: StringUtils.java   Source Code and License 5 votes vote down vote up
static void startupShutdownMessage(Class<?> clazz, String[] args,
                                   final LogAdapter LOG) { 
  final String hostname = NetUtils.getHostname();
  final String classname = clazz.getSimpleName();
  LOG.info(
      toStartupShutdownString("STARTUP_MSG: ", new String[] {
          "Starting " + classname,
          "  user = " + System.getProperty("user.name"),
          "  host = " + hostname,
          "  args = " + Arrays.asList(args),
          "  version = " + VersionInfo.getVersion(),
          "  classpath = " + System.getProperty("java.class.path"),
          "  build = " + VersionInfo.getUrl() + " -r "
                       + VersionInfo.getRevision()  
                       + "; compiled by '" + VersionInfo.getUser()
                       + "' on " + VersionInfo.getDate(),
          "  java = " + System.getProperty("java.version") }
      )
    );

  if (SystemUtils.IS_OS_UNIX) {
    try {
      SignalLogger.INSTANCE.register(LOG);
    } catch (Throwable t) {
      LOG.warn("failed to register any UNIX signal loggers: ", t);
    }
  }
  ShutdownHookManager.get().addShutdownHook(
    new Runnable() {
      @Override
      public void run() {
        LOG.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{
          "Shutting down " + classname + " at " + hostname}));
      }
    }, SHUTDOWN_HOOK_PRIORITY);

}
 
Example 27
Project: hadoop   File: TestClientRMTokens.java   Source Code and License 5 votes vote down vote up
@Test
public void testShortCircuitRenewCancelWildcardAddress()
    throws IOException, InterruptedException {
  InetSocketAddress rmAddr = new InetSocketAddress(123);
  InetSocketAddress serviceAddr = NetUtils.createSocketAddr(
      InetAddress.getLocalHost().getHostName(), rmAddr.getPort(), null);
  checkShortCircuitRenewCancel(
      rmAddr,
      serviceAddr,
      true);
}
 
Example 28
Project: hadoop   File: HAUtil.java   Source Code and License 5 votes vote down vote up
/**
 * @param conf Configuration. Please use verifyAndSetRMHAId to check.
 * @return RM Id on success
 */
public static String getRMHAId(Configuration conf) {
  int found = 0;
  String currentRMId = conf.getTrimmed(YarnConfiguration.RM_HA_ID);
  if(currentRMId == null) {
    for(String rmId : getRMHAIds(conf)) {
      String key = addSuffix(YarnConfiguration.RM_ADDRESS, rmId);
      String addr = conf.get(key);
      if (addr == null) {
        continue;
      }
      InetSocketAddress s;
      try {
        s = NetUtils.createSocketAddr(addr);
      } catch (Exception e) {
        LOG.warn("Exception in creating socket address " + addr, e);
        continue;
      }
      if (!s.isUnresolved() && NetUtils.isLocalAddress(s.getAddress())) {
        currentRMId = rmId.trim();
        found++;
      }
    }
  }
  if (found > 1) { // Only one address must match the local address
    String msg = "The HA Configuration has multiple addresses that match "
        + "local node's address.";
    throw new HadoopIllegalArgumentException(msg);
  }
  return currentRMId;
}
 
Example 29
Project: hadoop   File: TestIPCServerResponder.java   Source Code and License 5 votes vote down vote up
public void testServerResponder(final int handlerCount, 
                                final boolean handlerSleep, 
                                final int clientCount,
                                final int callerCount,
                                final int callCount) throws IOException,
                                InterruptedException {
  Server server = new TestServer(handlerCount, handlerSleep);
  server.start();

  InetSocketAddress address = NetUtils.getConnectAddress(server);
  Client[] clients = new Client[clientCount];
  for (int i = 0; i < clientCount; i++) {
    clients[i] = new Client(BytesWritable.class, conf);
  }

  Caller[] callers = new Caller[callerCount];
  for (int i = 0; i < callerCount; i++) {
    callers[i] = new Caller(clients[i % clientCount], address, callCount);
    callers[i].start();
  }
  for (int i = 0; i < callerCount; i++) {
    callers[i].join();
    assertFalse(callers[i].failed);
  }
  for (int i = 0; i < clientCount; i++) {
    clients[i].stop();
  }
  server.stop();
}
 
Example 30
Project: hadoop-oss   File: TestSSLHttpServer.java   Source Code and License 5 votes vote down vote up
@BeforeClass
public static void setup() throws Exception {
  conf = new Configuration();
  conf.setInt(HttpServer2.HTTP_MAX_THREADS, 10);

  File base = new File(BASEDIR);
  FileUtil.fullyDelete(base);
  base.mkdirs();
  keystoresDir = new File(BASEDIR).getAbsolutePath();
  sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);

  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false, true,
      excludeCiphers);

  Configuration sslConf = KeyStoreTestUtil.getSslConfig();

  clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, sslConf);
  clientSslFactory.init();

  server = new HttpServer2.Builder()
      .setName("test")
      .addEndpoint(new URI("https://localhost"))
      .setConf(conf)
      .keyPassword(sslConf.get("ssl.server.keystore.keypassword"))
      .keyStore(sslConf.get("ssl.server.keystore.location"),
          sslConf.get("ssl.server.keystore.password"),
          sslConf.get("ssl.server.keystore.type", "jks"))
      .trustStore(sslConf.get("ssl.server.truststore.location"),
          sslConf.get("ssl.server.truststore.password"),
          sslConf.get("ssl.server.truststore.type", "jks"))
      .excludeCiphers(
          sslConf.get("ssl.server.exclude.cipher.list")).build();
  server.addServlet("echo", "/echo", TestHttpServer.EchoServlet.class);
  server.addServlet("longheader", "/longheader", LongHeaderServlet.class);
  server.start();
  baseUrl = new URL("https://"
      + NetUtils.getHostPortString(server.getConnectorAddress(0)));
  LOG.info("HTTP server started: " + baseUrl);
}
 
Example 31
Project: hadoop   File: Util.java   Source Code and License 5 votes vote down vote up
/**
 * Parses a space and/or comma separated sequence of server specifications
 * of the form <i>hostname</i> or <i>hostname:port</i>.  If 
 * the specs string is null, defaults to localhost:defaultPort.
 * 
 * @return a list of InetSocketAddress objects.
 */
public static List<InetSocketAddress> parse(String specs, int defaultPort) {
  List<InetSocketAddress> result = new ArrayList<InetSocketAddress>(1);
  if (specs == null) {
    result.add(new InetSocketAddress("localhost", defaultPort));
  }
  else {
    String[] specStrings = specs.split("[ ,]+");
    for (String specString : specStrings) {
      result.add(NetUtils.createSocketAddr(specString, defaultPort));
    }
  }
  return result;
}
 
Example 32
Project: hadoop-oss   File: TestHttpServer.java   Source Code and License 5 votes vote down vote up
/**
 * Verify the administrator access for /logs, /stacks, /conf, /logLevel and
 * /metrics servlets.
 * 
 * @throws Exception
 */
@Test
public void testAuthorizationOfDefaultServlets() throws Exception {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
      true);
  conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
      true);
  conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
      DummyFilterInitializer.class.getName());

  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
      MyGroupsProvider.class.getName());
  Groups.getUserToGroupsMappingService(conf);
  MyGroupsProvider.clearMapping();
  MyGroupsProvider.mapping.put("userA", Arrays.asList("groupA"));
  MyGroupsProvider.mapping.put("userB", Arrays.asList("groupB"));
  MyGroupsProvider.mapping.put("userC", Arrays.asList("groupC"));
  MyGroupsProvider.mapping.put("userD", Arrays.asList("groupD"));
  MyGroupsProvider.mapping.put("userE", Arrays.asList("groupE"));

  HttpServer2 myServer = new HttpServer2.Builder().setName("test")
      .addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf)
      .setACL(new AccessControlList("userA,userB groupC,groupD")).build();
  myServer.setAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE, conf);
  myServer.start();

  String serverURL = "http://"
      + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/";
  for (String servlet : new String[] { "conf", "logs", "stacks",
      "logLevel", "metrics" }) {
    for (String user : new String[] { "userA", "userB", "userC", "userD" }) {
      assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL
          + servlet, user));
    }
    assertEquals(HttpURLConnection.HTTP_FORBIDDEN, getHttpStatusCode(
        serverURL + servlet, "userE"));
  }
  myServer.stop();
}
 
Example 33
Project: hadoop-oss   File: TestConfiguration.java   Source Code and License 5 votes vote down vote up
public void testSetSocketAddress() {
  Configuration conf = new Configuration();
  NetUtils.addStaticResolution("host", "127.0.0.1");
  final String defaultAddr = "host:1";
  
  InetSocketAddress addr = NetUtils.createSocketAddr(defaultAddr);    
  conf.setSocketAddr("myAddress", addr);
  assertEquals(defaultAddr, NetUtils.getHostPortString(addr));
}
 
Example 34
Project: hadoop-oss   File: TestIPC.java   Source Code and License 5 votes vote down vote up
@Override
public Object invoke(Object proxy, Method method, Object[] args)
    throws Throwable {
  LongWritable value = call(client, RANDOM.nextLong(),
      NetUtils.getConnectAddress(server), conf);
  return returnValue(value);
}
 
Example 35
Project: hadoop   File: WebAppUtils.java   Source Code and License 5 votes vote down vote up
public static List<String> getProxyHostsAndPortsForAmFilter(
    Configuration conf) {
  List<String> addrs = new ArrayList<String>();
  String proxyAddr = conf.get(YarnConfiguration.PROXY_ADDRESS);
  // If PROXY_ADDRESS isn't set, fallback to RM_WEBAPP(_HTTPS)_ADDRESS
  // There could be multiple if using RM HA
  if (proxyAddr == null || proxyAddr.isEmpty()) {
    // If RM HA is enabled, try getting those addresses
    if (HAUtil.isHAEnabled(conf)) {
      List<String> haAddrs =
          RMHAUtils.getRMHAWebappAddresses(new YarnConfiguration(conf));
      for (String addr : haAddrs) {
        try {
          InetSocketAddress socketAddr = NetUtils.createSocketAddr(addr);
          addrs.add(getResolvedAddress(socketAddr));
        } catch(IllegalArgumentException e) {
          // skip if can't resolve
        }
      }
    }
    // If couldn't resolve any of the addresses or not using RM HA, fallback
    if (addrs.isEmpty()) {
      addrs.add(getResolvedRMWebAppURLWithoutScheme(conf));
    }
  } else {
    addrs.add(proxyAddr);
  }
  return addrs;
}
 
Example 36
Project: hadoop-oss   File: TestIPC.java   Source Code and License 5 votes vote down vote up
/**
 * Generic test case for exceptions thrown at some point in the IPC
 * process.
 * 
 * @param clientParamClass - client writes this writable for parameter
 * @param serverParamClass - server reads this writable for parameter
 * @param serverResponseClass - server writes this writable for response
 * @param clientResponseClass - client reads this writable for response
 */
private void doErrorTest(
    Class<? extends LongWritable> clientParamClass,
    Class<? extends LongWritable> serverParamClass,
    Class<? extends LongWritable> serverResponseClass,
    Class<? extends LongWritable> clientResponseClass) 
    throws IOException, InstantiationException, IllegalAccessException {
  
  // start server
  Server server = new TestServer(1, false,
      serverParamClass, serverResponseClass);
  InetSocketAddress addr = NetUtils.getConnectAddress(server);
  server.start();

  // start client
  WRITABLE_FAULTS_ENABLED = true;
  Client client = new Client(clientResponseClass, conf);
  try {
    LongWritable param = clientParamClass.newInstance();

    try {
      call(client, param, addr, 0, conf);
      fail("Expected an exception to have been thrown");
    } catch (Throwable t) {
      assertExceptionContains(t, "Injected fault");
    }
    
    // Doing a second call with faults disabled should return fine --
    // ie the internal state of the client or server should not be broken
    // by the failed call
    WRITABLE_FAULTS_ENABLED = false;
    call(client, param, addr, 0, conf);
    
  } finally {
    client.stop();
    server.stop();
  }
}
 
Example 37
Project: hadoop   File: TestNameNodeHttpServer.java   Source Code and License 5 votes vote down vote up
private static boolean canAccess(String scheme, InetSocketAddress addr) {
  if (addr == null)
    return false;
  try {
    URL url = new URL(scheme + "://" + NetUtils.getHostPortString(addr));
    URLConnection conn = connectionFactory.openConnection(url);
    conn.connect();
    conn.getContent();
  } catch (Exception e) {
    return false;
  }
  return true;
}
 
Example 38
Project: hadoop-oss   File: TestIPC.java   Source Code and License 5 votes vote down vote up
/**
 * Test if the rpc server gets the retry count from client.
 */
@Test(timeout=60000)
public void testCallRetryCount() throws IOException {
  final int retryCount = 255;
  // Override client to store the call id
  final Client client = new Client(LongWritable.class, conf);
  Client.setCallIdAndRetryCount(Client.nextCallId(), 255);

  // Attach a listener that tracks every call ID received by the server.
  final TestServer server = new TestServer(1, false);
  server.callListener = new Runnable() {
    @Override
    public void run() {
      // we have not set the retry count for the client, thus on the server
      // side we should see retry count as 0
      Assert.assertEquals(retryCount, Server.getCallRetryCount());
    }
  };

  try {
    InetSocketAddress addr = NetUtils.getConnectAddress(server);
    server.start();
    final SerialCaller caller = new SerialCaller(client, addr, 10);
    caller.run();
    assertFalse(caller.failed);
  } finally {
    client.stop();
    server.stop();
  }
}
 
Example 39
Project: hadoop   File: BackupNode.java   Source Code and License 5 votes vote down vote up
@Override // NameNode
protected void initialize(Configuration conf) throws IOException {
  // Trash is disabled in BackupNameNode,
  // but should be turned back on if it ever becomes active.
  conf.setLong(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, 
               CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
  NamespaceInfo nsInfo = handshake(conf);
  super.initialize(conf);
  namesystem.setBlockPoolId(nsInfo.getBlockPoolID());

  if (false == namesystem.isInSafeMode()) {
    namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  }

  // Backup node should never do lease recovery,
  // therefore lease hard limit should never expire.
  namesystem.leaseManager.setLeasePeriod(
      HdfsConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE);

  // register with the active name-node 
  registerWith(nsInfo);
  // Checkpoint daemon should start after the rpc server started
  runCheckpointDaemon(conf);
  InetSocketAddress addr = getHttpAddress();
  if (addr != null) {
    conf.set(BN_HTTP_ADDRESS_NAME_KEY, NetUtils.getHostPortString(getHttpAddress()));
  }
}
 
Example 40
Project: hadoop   File: ContainerManagementProtocolPBClientImpl.java   Source Code and License 5 votes vote down vote up
public ContainerManagementProtocolPBClientImpl(long clientVersion,
    InetSocketAddress addr, Configuration conf) throws IOException {
  RPC.setProtocolEngine(conf, ContainerManagementProtocolPB.class,
    ProtobufRpcEngine.class);
  UserGroupInformation ugi = UserGroupInformation.getCurrentUser();

  int expireIntvl = conf.getInt(NM_COMMAND_TIMEOUT, DEFAULT_COMMAND_TIMEOUT);
  proxy =
      (ContainerManagementProtocolPB) RPC.getProxy(ContainerManagementProtocolPB.class,
        clientVersion, addr, ugi, conf,
        NetUtils.getDefaultSocketFactory(conf), expireIntvl);
}