org.apache.hadoop.net.NetUtils Java Examples

The following examples show how to use org.apache.hadoop.net.NetUtils. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ApplicationMessageService.java    From XLearning with Apache License 2.0 6 votes vote down vote up
@Override
public void start() {
  LOG.info("Starting application message server");
  RPC.Builder builder = new RPC.Builder(getConfig());
  builder.setProtocol(ApplicationMessageProtocol.class);
  builder.setInstance(this);
  builder.setBindAddress("0.0.0.0");
  builder.setPort(0);
  Server server;
  try {
    server = builder.build();
  } catch (Exception e) {
    LOG.error("Error starting message server!", e);
    e.printStackTrace();
    return;
  }
  server.start();

  serverAddress = NetUtils.getConnectAddress(server);
  LOG.info("Started application message server at " + serverAddress);
}
 
Example #2
Source File: DFSClient.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static boolean isLocalAddress(InetSocketAddress targetAddr) {
  InetAddress addr = targetAddr.getAddress();
  Boolean cached = localAddrMap.get(addr.getHostAddress());
  if (cached != null) {
    if (LOG.isTraceEnabled()) {
      LOG.trace("Address " + targetAddr +
                (cached ? " is local" : " is not local"));
    }
    return cached;
  }
  
  boolean local = NetUtils.isLocalAddress(addr);

  if (LOG.isTraceEnabled()) {
    LOG.trace("Address " + targetAddr +
              (local ? " is local" : " is not local"));
  }
  localAddrMap.put(addr.getHostAddress(), local);
  return local;
}
 
Example #3
Source File: DFSTestUtil.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
      new StorageType[]{StorageType.DEFAULT});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
 
Example #4
Source File: TestBlockReplacement.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private boolean replaceBlock( Block block, DatanodeInfo source,
    DatanodeInfo sourceProxy, DatanodeInfo destination, int namespaceId) throws IOException {
  Socket sock = new Socket();
  sock.connect(NetUtils.createSocketAddr(
      destination.getName()), HdfsConstants.READ_TIMEOUT);
  sock.setKeepAlive(true);
  // sendRequest
  DataOutputStream out = new DataOutputStream(sock.getOutputStream());
  out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
  out.writeByte(DataTransferProtocol.OP_REPLACE_BLOCK);
  out.writeInt(namespaceId);
  out.writeLong(block.getBlockId());
  out.writeLong(block.getGenerationStamp());
  Text.writeString(out, source.getStorageID());
  sourceProxy.write(out);
  out.flush();
  // receiveResponse
  DataInputStream reply = new DataInputStream(sock.getInputStream());

  short status = reply.readShort();
  if(status == DataTransferProtocol.OP_STATUS_SUCCESS) {
    return true;
  }
  return false;
}
 
Example #5
Source File: TestSecurityUtil.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void verifyServiceAddr(String host, String ip) {
  InetSocketAddress addr;
  int port = 123;

  // test host, port tuple
  //LOG.info("test tuple ("+host+","+port+")");
  addr = NetUtils.createSocketAddrForHost(host, port);
  verifyAddress(addr, host, ip, port);

  // test authority with no default port
  //LOG.info("test authority '"+host+":"+port+"'");
  addr = NetUtils.createSocketAddr(host+":"+port);
  verifyAddress(addr, host, ip, port);

  // test authority with a default port, make sure default isn't used
  //LOG.info("test authority '"+host+":"+port+"' with ignored default port");
  addr = NetUtils.createSocketAddr(host+":"+port, port+1);
  verifyAddress(addr, host, ip, port);

  // test host-only authority, using port as default port
  //LOG.info("test host:"+host+" port:"+port);
  addr = NetUtils.createSocketAddr(host, port);
  verifyAddress(addr, host, ip, port);
}
 
Example #6
Source File: TestRMAuditLogger.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test {@link RMAuditLogger} with IP set.
 */
@Test  
public void testRMAuditLoggerWithIP() throws Exception {
  Configuration conf = new Configuration();
  // start the IPC server
  Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
      .setInstance(new MyTestRPCServer()).setBindAddress("0.0.0.0")
      .setPort(0).setNumHandlers(5).setVerbose(true).build();
  server.start();

  InetSocketAddress addr = NetUtils.getConnectAddress(server);

  // Make a client connection and test the audit log
  TestProtocol proxy = (TestProtocol)RPC.getProxy(TestProtocol.class,
                         TestProtocol.versionID, addr, conf);
  // Start the testcase
  proxy.ping();

  server.stop();
}
 
Example #7
Source File: Configuration.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Set the socket address a client can use to connect for the
 * <code>name</code> property as a <code>host:port</code>.  The wildcard
 * address is replaced with the local host's address. If the host and address
 * properties are configured the host component of the address will be combined
 * with the port component of the addr to generate the address.  This is to allow
 * optional control over which host name is used in multi-home bind-host
 * cases where a host can have multiple names
 * @param hostProperty the bind-host configuration name
 * @param addressProperty the service address configuration name
 * @param defaultAddressValue the service default address configuration value
 * @param addr InetSocketAddress of the service listener
 * @return InetSocketAddress for clients to connect
 */
public InetSocketAddress updateConnectAddr(
		String hostProperty,
		String addressProperty,
		String defaultAddressValue,
		InetSocketAddress addr) {

	final String host = get(hostProperty);
	final String connectHostPort = getTrimmed(addressProperty, defaultAddressValue);

	if (host == null || host.isEmpty() || connectHostPort == null || connectHostPort.isEmpty()) {
		//not our case, fall back to original logic
		return updateConnectAddr(addressProperty, addr);
	}

	final String connectHost = connectHostPort.split(":")[0];
	// Create connect address using client address hostname and server port.
	return updateConnectAddr(addressProperty, NetUtils.createSocketAddrForHost(
			connectHost, addr.getPort()));
}
 
Example #8
Source File: DFSUtil.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private static Map<String, InetSocketAddress> getAddressesForNameserviceId(
    Configuration conf, String nsId, String defaultValue,
    String... keys) {
  Collection<String> nnIds = getNameNodeIds(conf, nsId);
  Map<String, InetSocketAddress> ret = Maps.newHashMap();
  for (String nnId : emptyAsSingletonNull(nnIds)) {
    String suffix = concatSuffixes(nsId, nnId);
    String address = getConfValue(defaultValue, suffix, conf, keys);
    if (address != null) {
      InetSocketAddress isa = NetUtils.createSocketAddr(address);
      if (isa.isUnresolved()) {
        LOG.warn("Namenode for " + nsId +
                 " remains unresolved for ID " + nnId +
                 ".  Check your hdfs-site.xml file to " +
                 "ensure namenodes are configured properly.");
      }
      ret.put(nnId, isa);
    }
  }
  return ret;
}
 
Example #9
Source File: WebAppUtils.java    From big-c with Apache License 2.0 6 votes vote down vote up
private static String getResolvedAddress(InetSocketAddress address) {
  address = NetUtils.getConnectAddress(address);
  StringBuilder sb = new StringBuilder();
  InetAddress resolved = address.getAddress();
  if (resolved == null || resolved.isAnyLocalAddress() ||
      resolved.isLoopbackAddress()) {
    String lh = address.getHostName();
    try {
      lh = InetAddress.getLocalHost().getCanonicalHostName();
    } catch (UnknownHostException e) {
      //Ignore and fallback.
    }
    sb.append(lh);
  } else {
    sb.append(address.getHostName());
  }
  sb.append(":").append(address.getPort());
  return sb.toString();
}
 
Example #10
Source File: MRAdmin.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Command to ask the jobtracker to reread the hosts and excluded hosts 
 * file.
 * Usage: java MRAdmin -refreshNodes
 * @exception IOException 
 */
private int refreshNodes() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();
  
  // Create the client
  AdminOperationsProtocol adminOperationsProtocol = 
    (AdminOperationsProtocol) 
    RPC.getProxy(AdminOperationsProtocol.class, 
                 AdminOperationsProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                                           AdminOperationsProtocol.class));
  
  // Refresh the queue properties
  adminOperationsProtocol.refreshNodes();
  
  return 0;
}
 
Example #11
Source File: HddsServerUtil.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
/**
 * Retrieve the socket address that should be used by clients to connect
 * to the SCM for
 * {@link org.apache.hadoop.hdds.protocol.SCMSecurityProtocol}. If
 * {@link ScmConfigKeys#OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY} is not defined
 * then {@link ScmConfigKeys#OZONE_SCM_CLIENT_ADDRESS_KEY} is used. If neither
 * is defined then {@link ScmConfigKeys#OZONE_SCM_NAMES} is used.
 *
 * @param conf
 * @return Target {@code InetSocketAddress} for the SCM block client endpoint.
 * @throws IllegalArgumentException if configuration is not defined or invalid
 */
public static InetSocketAddress getScmAddressForSecurityProtocol(
    ConfigurationSource conf) {
  Optional<String> host = getHostNameFromConfigKeys(conf,
      ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY,
      ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);

  if (!host.isPresent()) {
    // Fallback to Ozone SCM name
    host = Optional.of(getSingleSCMAddress(conf).getHostName());
  }

  final int port = getPortNumberFromConfigKeys(conf,
      ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY)
      .orElse(ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT);

  return NetUtils.createSocketAddr(host.get() + ":" + port);
}
 
Example #12
Source File: Configuration.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Get the socket address for <code>hostProperty</code> as a
 * <code>InetSocketAddress</code>. If <code>hostProperty</code> is
 * <code>null</code>, <code>addressProperty</code> will be used. This
 * is useful for cases where we want to differentiate between host
 * bind address and address clients should use to establish connection.
 *
 * @param hostProperty bind host property name.
 * @param addressProperty address property name.
 * @param defaultAddressValue the default value
 * @param defaultPort the default port
 * @return InetSocketAddress
 */
public InetSocketAddress getSocketAddr(
    String hostProperty,
    String addressProperty,
    String defaultAddressValue,
    int defaultPort) {

  InetSocketAddress bindAddr = getSocketAddr(
    addressProperty, defaultAddressValue, defaultPort);

  final String host = get(hostProperty);

  if (host == null || host.isEmpty()) {
    return bindAddr;
  }

  return NetUtils.createSocketAddr(
      host, bindAddr.getPort(), hostProperty);
}
 
Example #13
Source File: TestNodeStatusUpdater.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public RegisterNodeManagerResponse registerNodeManager(
    RegisterNodeManagerRequest request) throws YarnException,
    IOException {
  NodeId nodeId = request.getNodeId();
  Resource resource = request.getResource();
  LOG.info("Registering " + nodeId.toString());
  // NOTE: this really should be checking against the config value
  InetSocketAddress expected = NetUtils.getConnectAddress(
      conf.getSocketAddr(YarnConfiguration.NM_ADDRESS, null, -1));
  Assert.assertEquals(NetUtils.getHostPortString(expected), nodeId.toString());
  Assert.assertEquals(5 * 1024, resource.getMemory());
  registeredNodes.add(nodeId);

  RegisterNodeManagerResponse response = recordFactory
      .newRecordInstance(RegisterNodeManagerResponse.class);
  response.setContainerTokenMasterKey(createMasterKey());
  response.setNMTokenMasterKey(createMasterKey());
  return response;
}
 
Example #14
Source File: ClientCache.java    From hadoop with Apache License 2.0 6 votes vote down vote up
protected MRClientProtocol instantiateHistoryProxy()
    throws IOException {
  final String serviceAddr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS);
  if (StringUtils.isEmpty(serviceAddr)) {
    return null;
  }
  LOG.debug("Connecting to HistoryServer at: " + serviceAddr);
  final YarnRPC rpc = YarnRPC.create(conf);
  LOG.debug("Connected to HistoryServer at: " + serviceAddr);
  UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
  return currentUser.doAs(new PrivilegedAction<MRClientProtocol>() {
    @Override
    public MRClientProtocol run() {
      return (MRClientProtocol) rpc.getProxy(HSClientProtocol.class,
          NetUtils.createSocketAddr(serviceAddr), conf);
    }
  });
}
 
Example #15
Source File: TestRPCCompatibility.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test  // old client vs new server
public void testVersion0ClientVersion1Server() throws Exception {
  // create a server with two handlers
  TestImpl1 impl = new TestImpl1();
  server = new RPC.Builder(conf).setProtocol(TestProtocol1.class)
      .setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
      .setVerbose(false).build();
  server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
  server.start();
  addr = NetUtils.getConnectAddress(server);

  proxy = RPC.getProtocolProxy(
      TestProtocol0.class, TestProtocol0.versionID, addr, conf);

  TestProtocol0 proxy0 = (TestProtocol0)proxy.getProxy();
  proxy0.ping();
}
 
Example #16
Source File: HdfsProxy.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private void initialize(Configuration conf) throws IOException {
  sslAddr = getSslAddr(conf);
  String nn = conf.get("hdfsproxy.dfs.namenode.address");
  if (nn == null)
    throw new IOException("HDFS NameNode address is not specified");
  InetSocketAddress nnAddr = NetUtils.createSocketAddr(nn);
  LOG.info("HDFS NameNode is at: " + nnAddr.getHostName() + ":" + nnAddr.getPort());

  Configuration sslConf = new Configuration(false);
  sslConf.addResource(conf.get("hdfsproxy.https.server.keystore.resource",
      "ssl-server.xml"));
  // unit testing
  sslConf.set("proxy.http.test.listener.addr",
              conf.get("proxy.http.test.listener.addr"));

  this.server = new ProxyHttpServer(sslAddr, sslConf);
  this.server.setAttribute("proxy.https.port", server.getPort());
  this.server.setAttribute("name.node.address", nnAddr);
  this.server.setAttribute("name.conf", new Configuration());
  this.server.addGlobalFilter("ProxyFilter", ProxyFilter.class.getName(), null);
  this.server.addServlet("listPaths", "/listPaths/*", ProxyListPathsServlet.class);
  this.server.addServlet("data", "/data/*", ProxyFileDataServlet.class);
  this.server.addServlet("streamFile", "/streamFile/*", ProxyStreamFile.class);
}
 
Example #17
Source File: TestAuditLogger.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test {@link AuditLogger} with IP set.
 */
public void testAuditLoggerWithIP() throws Exception {
  Configuration conf = new Configuration();
  // start the IPC server
  Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
          .setInstance(new MyTestRPCServer()).setBindAddress("0.0.0.0")
          .setPort(0).build();
  server.start();

  InetSocketAddress addr = NetUtils.getConnectAddress(server);

  // Make a client connection and test the audit log
  TestProtocol proxy = (TestProtocol)RPC.getProxy(TestProtocol.class,
                         TestProtocol.versionID, addr, conf);
  // Start the testcase
  proxy.ping();

  server.stop();
}
 
Example #18
Source File: MRAdmin.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private int refreshQueueAcls() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();
  
  // Create the client
  AdminOperationsProtocol adminOperationsProtocol = 
    (AdminOperationsProtocol) 
    RPC.getProxy(AdminOperationsProtocol.class, 
                 AdminOperationsProtocol.versionID, 
                 JobTracker.getAddress(conf), getUGI(conf), conf,
                 NetUtils.getSocketFactory(conf, 
                                           AdminOperationsProtocol.class));
  
  // Refresh the queue properties
  adminOperationsProtocol.refreshQueueAcls();
  
  return 0;
}
 
Example #19
Source File: TestWebHdfsUrl.java    From big-c with Apache License 2.0 6 votes vote down vote up
private WebHdfsFileSystem getWebHdfsFileSystem(UserGroupInformation ugi,
    Configuration conf) throws IOException {
  if (UserGroupInformation.isSecurityEnabled()) {
    DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text(
        ugi.getUserName()), null, null);
    FSNamesystem namesystem = mock(FSNamesystem.class);
    DelegationTokenSecretManager dtSecretManager = new DelegationTokenSecretManager(
        86400000, 86400000, 86400000, 86400000, namesystem);
    dtSecretManager.startThreads();
    Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(
        dtId, dtSecretManager);
    SecurityUtil.setTokenService(
        token, NetUtils.createSocketAddr(uri.getAuthority()));
    token.setKind(WebHdfsFileSystem.TOKEN_KIND);
    ugi.addToken(token);
  }
  return (WebHdfsFileSystem) FileSystem.get(uri, conf);
}
 
Example #20
Source File: QuorumJournalManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private static List<InetSocketAddress> getLoggerAddresses(URI uri)
    throws IOException {
  String authority = uri.getAuthority();
  Preconditions.checkArgument(authority != null && !authority.isEmpty(),
      "URI has no authority: " + uri);
  
  String[] parts = StringUtils.split(authority, ';');
  for (int i = 0; i < parts.length; i++) {
    parts[i] = parts[i].trim();
  }

  if (parts.length % 2 == 0) {
    LOG.warn("Quorum journal URI '" + uri + "' has an even number " +
        "of Journal Nodes specified. This is not recommended!");
  }
  
  List<InetSocketAddress> addrs = Lists.newArrayList();
  for (String addr : parts) {
    addrs.add(NetUtils.createSocketAddr(
        addr, DFSConfigKeys.DFS_JOURNALNODE_RPC_PORT_DEFAULT));
  }
  return addrs;
}
 
Example #21
Source File: DFSTestUtil.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
      new StorageType[]{StorageType.DEFAULT});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
 
Example #22
Source File: TestRMHA.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void checkActiveRMWebServices() throws JSONException {

    // Validate web-service
    Client webServiceClient = Client.create(new DefaultClientConfig());
    InetSocketAddress rmWebappAddr =
        NetUtils.getConnectAddress(rm.getWebapp().getListenerAddress());
    String webappURL =
        "http://" + rmWebappAddr.getHostName() + ":" + rmWebappAddr.getPort();
    WebResource webResource = webServiceClient.resource(webappURL);
    String path = app.getApplicationId().toString();

    ClientResponse response =
        webResource.path("ws").path("v1").path("cluster").path("apps")
          .path(path).accept(MediaType.APPLICATION_JSON)
          .get(ClientResponse.class);
    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
    JSONObject json = response.getEntity(JSONObject.class);

    assertEquals("incorrect number of elements", 1, json.length());
    JSONObject appJson = json.getJSONObject("app");
    assertEquals("ACCEPTED", appJson.getString("state"));
    // Other stuff is verified in the regular web-services related tests
  }
 
Example #23
Source File: HddsUtils.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
/**
 * Retrieve the socket address that should be used by clients to connect
 * to the SCM.
 *
 * @return Target {@code InetSocketAddress} for the SCM client endpoint.
 */
public static InetSocketAddress getScmAddressForClients(
    ConfigurationSource conf) {
  Optional<String> host = getHostNameFromConfigKeys(conf,
      ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);

  if (!host.isPresent()) {
    // Fallback to Ozone SCM name
    host = Optional.of(getSingleSCMAddress(conf).getHostName());
  }

  final int port = getPortNumberFromConfigKeys(conf,
      ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY)
      .orElse(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT);

  return NetUtils.createSocketAddr(host.get() + ":" + port);
}
 
Example #24
Source File: TestIPC.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout=60000)
public void testIpcConnectTimeout() throws IOException {
  // start server
  Server server = new TestServer(1, true);
  InetSocketAddress addr = NetUtils.getConnectAddress(server);
  //Intentionally do not start server to get a connection timeout

  // start client
  Client.setConnectTimeout(conf, 100);
  Client client = new Client(LongWritable.class, conf);
  // set the rpc timeout to twice the MIN_SLEEP_TIME
  try {
    client.call(new LongWritable(RANDOM.nextLong()),
            addr, null, null, MIN_SLEEP_TIME*2, conf);
    fail("Expected an exception to have been thrown");
  } catch (SocketTimeoutException e) {
    LOG.info("Get a SocketTimeoutException ", e);
  }
}
 
Example #25
Source File: MiniJournalCluster.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public void restartJournalNode(int i) throws InterruptedException, IOException {
  JNInfo info = nodes[i];
  JournalNode jn = info.node;
  Configuration conf = new Configuration(jn.getConf());
  if (jn.isStarted()) {
    jn.stopAndJoin(0);
  }
  
  conf.set(DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY,
      NetUtils.getHostPortString(info.ipcAddr));

  final String uri = info.httpServerURI;
  if (uri.startsWith("http://")) {
    conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY,
        uri.substring(("http://".length())));
  } else if (info.httpServerURI.startsWith("https://")) {
    conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY,
        uri.substring(("https://".length())));
  }

  JournalNode newJN = new JournalNode();
  newJN.setConf(conf);
  newJN.start();
  info.node = newJN;
}
 
Example #26
Source File: ContainerTestUtils.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
/**
 * Creates an Endpoint class for testing purpose.
 *
 * @param conf - Conf
 * @param address - InetAddres
 * @param rpcTimeout - rpcTimeOut
 * @return EndPoint
 * @throws Exception
 */
public static EndpointStateMachine createEndpoint(Configuration conf,
    InetSocketAddress address, int rpcTimeout) throws Exception {
  RPC.setProtocolEngine(conf, StorageContainerDatanodeProtocolPB.class,
      ProtobufRpcEngine.class);
  long version =
      RPC.getProtocolVersion(StorageContainerDatanodeProtocolPB.class);

  StorageContainerDatanodeProtocolPB rpcProxy = RPC.getProtocolProxy(
      StorageContainerDatanodeProtocolPB.class, version,
      address, UserGroupInformation.getCurrentUser(), conf,
      NetUtils.getDefaultSocketFactory(conf), rpcTimeout,
      RetryPolicies.TRY_ONCE_THEN_FAIL).getProxy();

  StorageContainerDatanodeProtocolClientSideTranslatorPB rpcClient =
      new StorageContainerDatanodeProtocolClientSideTranslatorPB(rpcProxy);
  return new EndpointStateMachine(address, rpcClient,
      new LegacyHadoopConfigurationSource(conf));
}
 
Example #27
Source File: ClusterManagerServer.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public ClusterManagerServer(CoronaConf conf, ClusterManager cm)
    throws IOException {
  this.conf = conf;
  String target = conf.getClusterManagerAddress();
  InetSocketAddress addr = NetUtils.createSocketAddr(target);
  this.port = addr.getPort();
  ServerSocket serverSocket = new ServerSocket(addr.getPort());
  this.port = serverSocket.getLocalPort();
  server = TFactoryBasedThreadPoolServer.createNewServer(
    new ClusterManagerService.Processor(cm), serverSocket,
    conf.getCMSoTimeout());
}
 
Example #28
Source File: DistributedFileSystem.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
protected URI canonicalizeUri(URI uri) {
  if (HAUtil.isLogicalUri(getConf(), uri)) {
    // Don't try to DNS-resolve logical URIs, since the 'authority'
    // portion isn't a proper hostname
    return uri;
  } else {
    return NetUtils.getCanonicalUri(uri, getDefaultPort());
  }
}
 
Example #29
Source File: TestIPC.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test if the rpc server gets the retry count from client.
 */
@Test(timeout=60000)
public void testCallRetryCount() throws IOException {
  final int retryCount = 255;
  // Override client to store the call id
  final Client client = new Client(LongWritable.class, conf);
  Client.setCallIdAndRetryCount(Client.nextCallId(), 255);

  // Attach a listener that tracks every call ID received by the server.
  final TestServer server = new TestServer(1, false);
  server.callListener = new Runnable() {
    @Override
    public void run() {
      // we have not set the retry count for the client, thus on the server
      // side we should see retry count as 0
      Assert.assertEquals(retryCount, Server.getCallRetryCount());
    }
  };

  try {
    InetSocketAddress addr = NetUtils.getConnectAddress(server);
    server.start();
    final SerialCaller caller = new SerialCaller(client, addr, 10);
    caller.run();
    assertFalse(caller.failed);
  } finally {
    client.stop();
    server.stop();
  }
}
 
Example #30
Source File: TestRMContainerAllocator.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static ClientService createMockClientService() {
  ClientService service = mock(ClientService.class);
  when(service.getBindAddress()).thenReturn(
      NetUtils.createSocketAddr("localhost:4567"));
  when(service.getHttpPort()).thenReturn(890);
  return service;
}