Java Code Examples for org.apache.hadoop.net.DNS#getDefaultHost()

The following examples show how to use org.apache.hadoop.net.DNS#getDefaultHost() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Standby.java    From RDFS with Apache License 2.0 6 votes vote down vote up
Standby(AvatarNode avatarNode, Configuration startupConf, Configuration conf) 
  throws IOException {
  this.running = true;
  this.avatarNode = avatarNode;
  this.confg = conf;
  this.startupConf = startupConf;
  this.fsImage = avatarNode.getFSImage();
  this.fsnamesys = avatarNode.getNamesystem();
  this.sleepBetweenErrors = startupConf.getInt("hdfs.avatarnode.sleep", 5000);
  initSecondary(startupConf); // start webserver for secondary namenode

  this.machineName =
    DNS.getDefaultHost(conf.get("dfs.namenode.dns.interface","default"),
                       conf.get("dfs.namenode.dns.nameserver","default"));
  LOG.info("machineName=" + machineName);
  
  this.editsFile = this.avatarNode.getRemoteEditsFile(conf);
  this.editsFileNew = this.avatarNode.getRemoteEditsFileNew(conf);
  
  InetSocketAddress addr = NameNode.getAddress(conf);
  this.tmpImageFileForValidation = new File("/tmp", 
      "hadoop_image." + addr.getHostName() + ":" + addr.getPort());
  checkpointStatus("No checkpoint initiated");
}
 
Example 2
Source File: NNThroughputBenchmark.java    From hadoop with Apache License 2.0 6 votes vote down vote up
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
 
Example 3
Source File: GangliaContext31.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public void init(String contextName, ContextFactory factory) {
  super.init(contextName, factory);

  LOG.debug("Initializing the GangliaContext31 for Ganglia 3.1 metrics.");

  // Take the hostname from the DNS class.

  Configuration conf = new Configuration();

  if (conf.get("slave.host.name") != null) {
    hostName = conf.get("slave.host.name");
  } else {
    try {
      hostName = DNS.getDefaultHost(
        conf.get("dfs.datanode.dns.interface","default"),
        conf.get("dfs.datanode.dns.nameserver","default"));
    } catch (UnknownHostException uhe) {
      LOG.error(uhe);
  	hostName = "UNKNOWN.example.com";
    }
  }
}
 
Example 4
Source File: GangliaContext31.java    From big-c with Apache License 2.0 6 votes vote down vote up
public void init(String contextName, ContextFactory factory) {
  super.init(contextName, factory);

  LOG.debug("Initializing the GangliaContext31 for Ganglia 3.1 metrics.");

  // Take the hostname from the DNS class.

  Configuration conf = new Configuration();

  if (conf.get("slave.host.name") != null) {
    hostName = conf.get("slave.host.name");
  } else {
    try {
      hostName = DNS.getDefaultHost(
        conf.get("dfs.datanode.dns.interface","default"),
        conf.get("dfs.datanode.dns.nameserver","default"));
    } catch (UnknownHostException uhe) {
      LOG.error(uhe);
  	hostName = "UNKNOWN.example.com";
    }
  }
}
 
Example 5
Source File: NNThroughputBenchmark.java    From big-c with Apache License 2.0 6 votes vote down vote up
void register() throws IOException {
  // get versions from the namenode
  nsInfo = nameNodeProto.versionRequest();
  dnRegistration = new DatanodeRegistration(
      new DatanodeID(DNS.getDefaultIP("default"),
          DNS.getDefaultHost("default", "default"),
          DataNode.generateUuid(), getNodePort(dnIdx),
          DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
          DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
      new DataStorage(nsInfo),
      new ExportedBlockKeys(), VersionInfo.getVersion());
  // register datanode
  dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
  //first block reports
  storage = new DatanodeStorage(DatanodeStorage.generateUuid());
  final StorageBlockReport[] reports = {
      new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
  };
  nameNodeProto.blockReport(dnRegistration, 
      nameNode.getNamesystem().getBlockPoolId(), reports,
          new BlockReportContext(1, 0, System.nanoTime()));
}
 
Example 6
Source File: NNThroughputBenchmark.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * Get data-node in the form 
 * <host name> : <port>
 * where port is a 6 digit integer.
 * This is necessary in order to provide lexocographic ordering.
 * Host names are all the same, the ordering goes by port numbers.
 */
private static String getNodeName(int port) throws IOException {
  String machineName = DNS.getDefaultHost("default", "default");
  String sPort = String.valueOf(100000 + port);
  if(sPort.length() > 6)
    throw new IOException("Too many data-nodes.");
  return machineName + ":" + sPort;
}
 
Example 7
Source File: HddsUtils.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the hostname for this datanode. If the hostname is not
 * explicitly configured in the given config, then it is determined
 * via the DNS class.
 *
 * @param conf Configuration
 *
 * @return the hostname (NB: may not be a FQDN)
 * @throws UnknownHostException if the dfs.datanode.dns.interface
 *    option is used and the hostname can not be determined
 */
public static String getHostName(ConfigurationSource conf)
    throws UnknownHostException {
  String name = conf.get(DFS_DATANODE_HOST_NAME_KEY);
  if (name == null) {
    String dnsInterface = conf.get(
        CommonConfigurationKeysPublic.HADOOP_SECURITY_DNS_INTERFACE_KEY);
    String nameServer = conf.get(
        CommonConfigurationKeysPublic.HADOOP_SECURITY_DNS_NAMESERVER_KEY);
    boolean fallbackToHosts = false;

    if (dnsInterface == null) {
      // Try the legacy configuration keys.
      dnsInterface = conf.get(DFS_DATANODE_DNS_INTERFACE_KEY);
      dnsInterface = conf.get(DFS_DATANODE_DNS_INTERFACE_KEY);
      nameServer = conf.get(DFS_DATANODE_DNS_NAMESERVER_KEY);
    } else {
      // If HADOOP_SECURITY_DNS_* is set then also attempt hosts file
      // resolution if DNS fails. We will not use hosts file resolution
      // by default to avoid breaking existing clusters.
      fallbackToHosts = true;
    }

    name = DNS.getDefaultHost(dnsInterface, nameServer, fallbackToHosts);
  }
  return name;
}
 
Example 8
Source File: NNThroughputBenchmark.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Get data-node in the form <host name> : <port> where port is a 6
 * digit integer. This is necessary in order to provide lexocographic
 * ordering. Host names are all the same, the ordering goes by port
 * numbers.
 */
private static String getNodeName(int port) throws IOException {
	String machineName = DNS.getDefaultHost("default", "default");
	String sPort = String.valueOf(100000 + port);
	if (sPort.length() > 6)
		throw new IOException("Too many data-nodes.");
	return machineName + ":" + sPort;
}
 
Example 9
Source File: NNThroughputBenchmark.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Get data-node in the form 
 * <host name> : <port>
 * where port is a 6 digit integer.
 * This is necessary in order to provide lexocographic ordering.
 * Host names are all the same, the ordering goes by port numbers.
 */
private static String getNodeName(int port) throws IOException {
  String machineName = DNS.getDefaultHost("default", "default");
  String sPort = String.valueOf(100000 + port);
  if(sPort.length() > 6)
    throw new IOException("Too many data-nodes.");
  return machineName + ":" + sPort;
}
 
Example 10
Source File: ConnectionUtils.java    From hbase with Apache License 2.0 5 votes vote down vote up
private static String getMyAddress() {
  try {
    return DNS.getDefaultHost("default", "default");
  } catch (UnknownHostException uhe) {
    LOG.error("cannot determine my address", uhe);
    return null;
  }
}
 
Example 11
Source File: DataNode.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the hostname for this datanode. If the hostname is not
 * explicitly configured in the given config, then it is determined
 * via the DNS class.
 *
 * @param config configuration
 * @return the hostname (NB: may not be a FQDN)
 * @throws UnknownHostException if the dfs.datanode.dns.interface
 *    option is used and the hostname can not be determined
 */
private static String getHostName(Configuration config)
    throws UnknownHostException {
  String name = config.get(DFS_DATANODE_HOST_NAME_KEY);
  if (name == null) {
    name = DNS.getDefaultHost(
        config.get(DFS_DATANODE_DNS_INTERFACE_KEY,
                   DFS_DATANODE_DNS_INTERFACE_DEFAULT),
        config.get(DFS_DATANODE_DNS_NAMESERVER_KEY,
                   DFS_DATANODE_DNS_NAMESERVER_DEFAULT));
  }
  return name;
}
 
Example 12
Source File: RpcProgramNfs3.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static RpcProgramNfs3 createRpcProgramNfs3(NfsConfiguration config,
    DatagramSocket registrationSocket, boolean allowInsecurePorts)
    throws IOException {
  DefaultMetricsSystem.initialize("Nfs3");
  String displayName = DNS.getDefaultHost("default", "default")
      + config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,
          NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT);
  metrics = Nfs3Metrics.create(config, displayName);
  return new RpcProgramNfs3(config, registrationSocket, allowInsecurePorts);
}
 
Example 13
Source File: DataNode.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the hostname for this datanode. If the hostname is not
 * explicitly configured in the given config, then it is determined
 * via the DNS class.
 *
 * @param config configuration
 * @return the hostname (NB: may not be a FQDN)
 * @throws UnknownHostException if the dfs.datanode.dns.interface
 *    option is used and the hostname can not be determined
 */
private static String getHostName(Configuration config)
    throws UnknownHostException {
  String name = config.get(DFS_DATANODE_HOST_NAME_KEY);
  if (name == null) {
    name = DNS.getDefaultHost(
        config.get(DFS_DATANODE_DNS_INTERFACE_KEY,
                   DFS_DATANODE_DNS_INTERFACE_DEFAULT),
        config.get(DFS_DATANODE_DNS_NAMESERVER_KEY,
                   DFS_DATANODE_DNS_NAMESERVER_DEFAULT));
  }
  return name;
}
 
Example 14
Source File: RpcProgramNfs3.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static RpcProgramNfs3 createRpcProgramNfs3(NfsConfiguration config,
    DatagramSocket registrationSocket, boolean allowInsecurePorts)
    throws IOException {
  DefaultMetricsSystem.initialize("Nfs3");
  String displayName = DNS.getDefaultHost("default", "default")
      + config.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,
          NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT);
  metrics = Nfs3Metrics.create(config, displayName);
  return new RpcProgramNfs3(config, registrationSocket, allowInsecurePorts);
}
 
Example 15
Source File: SecureLogin.java    From pxf with Apache License 2.0 5 votes vote down vote up
/**
 * Retrieve the name of the current host. The code is copied from org.hadoop.security.SecurityUtil class.
 * @param conf configuration
 * @return name of the host
 * @throws IOException
 */
private String getLocalHostName(@Nullable Configuration conf) throws IOException {
    if (conf != null) {
        String dnsInterface = conf.get("hadoop.security.dns.interface");
        String nameServer = conf.get("hadoop.security.dns.nameserver");
        if (dnsInterface != null) {
            return DNS.getDefaultHost(dnsInterface, nameServer, true);
        }
        if (nameServer != null) {
            throw new IllegalArgumentException("hadoop.security.dns.nameserver requires hadoop.security.dns.interface. Check your configuration.");
        }
    }
    return InetAddress.getLocalHost().getCanonicalHostName();
}
 
Example 16
Source File: AbstractGangliaSink.java    From big-c with Apache License 2.0 4 votes vote down vote up
public void init(SubsetConfiguration conf) {
  LOG.debug("Initializing the GangliaSink for Ganglia metrics.");

  this.conf = conf;

  // Take the hostname from the DNS class.
  if (conf.getString("slave.host.name") != null) {
    hostName = conf.getString("slave.host.name");
  } else {
    try {
      hostName = DNS.getDefaultHost(
          conf.getString("dfs.datanode.dns.interface", "default"),
          conf.getString("dfs.datanode.dns.nameserver", "default"));
    } catch (UnknownHostException uhe) {
      LOG.error(uhe);
      hostName = "UNKNOWN.example.com";
    }
  }

  // load the gannglia servers from properties
  metricsServers = Servers.parse(conf.getString(SERVERS_PROPERTY),
      DEFAULT_PORT);
  multicastEnabled = conf.getBoolean(MULTICAST_ENABLED_PROPERTY,
          DEFAULT_MULTICAST_ENABLED);
  multicastTtl = conf.getInt(MULTICAST_TTL_PROPERTY, DEFAULT_MULTICAST_TTL);

  // extract the Ganglia conf per metrics
  gangliaConfMap = new HashMap<String, GangliaConf>();
  loadGangliaConf(GangliaConfType.units);
  loadGangliaConf(GangliaConfType.tmax);
  loadGangliaConf(GangliaConfType.dmax);
  loadGangliaConf(GangliaConfType.slope);

  try {
    if (multicastEnabled) {
      LOG.info("Enabling multicast for Ganglia with TTL " + multicastTtl);
      datagramSocket = new MulticastSocket();
      ((MulticastSocket) datagramSocket).setTimeToLive(multicastTtl);
    } else {
      datagramSocket = new DatagramSocket();
    }
  } catch (IOException e) {
    LOG.error(e);
  }

  // see if sparseMetrics is supported. Default is false
  supportSparseMetrics = conf.getBoolean(SUPPORT_SPARSE_METRICS_PROPERTY,
      SUPPORT_SPARSE_METRICS_DEFAULT);
}
 
Example 17
Source File: AbstractGangliaSink.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public void init(SubsetConfiguration conf) {
  LOG.debug("Initializing the GangliaSink for Ganglia metrics.");

  this.conf = conf;

  // Take the hostname from the DNS class.
  if (conf.getString("slave.host.name") != null) {
    hostName = conf.getString("slave.host.name");
  } else {
    try {
      hostName = DNS.getDefaultHost(
          conf.getString("dfs.datanode.dns.interface", "default"),
          conf.getString("dfs.datanode.dns.nameserver", "default"));
    } catch (UnknownHostException uhe) {
      LOG.error(uhe);
      hostName = "UNKNOWN.example.com";
    }
  }

  // load the gannglia servers from properties
  metricsServers = Servers.parse(conf.getString(SERVERS_PROPERTY),
      DEFAULT_PORT);
  multicastEnabled = conf.getBoolean(MULTICAST_ENABLED_PROPERTY,
          DEFAULT_MULTICAST_ENABLED);
  multicastTtl = conf.getInt(MULTICAST_TTL_PROPERTY, DEFAULT_MULTICAST_TTL);

  // extract the Ganglia conf per metrics
  gangliaConfMap = new HashMap<String, GangliaConf>();
  loadGangliaConf(GangliaConfType.units);
  loadGangliaConf(GangliaConfType.tmax);
  loadGangliaConf(GangliaConfType.dmax);
  loadGangliaConf(GangliaConfType.slope);

  try {
    if (multicastEnabled) {
      LOG.info("Enabling multicast for Ganglia with TTL " + multicastTtl);
      datagramSocket = new MulticastSocket();
      ((MulticastSocket) datagramSocket).setTimeToLive(multicastTtl);
    } else {
      datagramSocket = new DatagramSocket();
    }
  } catch (IOException e) {
    LOG.error(e);
  }

  // see if sparseMetrics is supported. Default is false
  supportSparseMetrics = conf.getBoolean(SUPPORT_SPARSE_METRICS_PROPERTY,
      SUPPORT_SPARSE_METRICS_DEFAULT);
}
 
Example 18
Source File: DataNode.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private void initConfig(Configuration conf) throws IOException {
  if (conf.get("slave.host.name") != null) {
    machineName = conf.get("slave.host.name");   
  }
  if (machineName == null) {
    machineName = DNS.getDefaultHost(
                                   conf.get("dfs.datanode.dns.interface","default"),
                                   conf.get("dfs.datanode.dns.nameserver","default"));
  }
  // Allow configuration to delay block reports to find bugs
  artificialBlockReceivedDelay = conf.getInt(
    "dfs.datanode.artificialBlockReceivedDelay", 0);
  if (conf.getBoolean(
      ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    PolicyProvider policyProvider = (PolicyProvider) (ReflectionUtils
        .newInstance(conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
            HDFSPolicyProvider.class, PolicyProvider.class), conf));
    SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider));
  }
  this.socketTimeout = conf.getInt("dfs.socket.timeout",
      HdfsConstants.READ_TIMEOUT);
  this.socketReadExtentionTimeout = conf.getInt(
      HdfsConstants.DFS_DATANODE_READ_EXTENSION,
      HdfsConstants.READ_TIMEOUT_EXTENSION);
  this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout",
      HdfsConstants.WRITE_TIMEOUT);
  this.socketWriteExtentionTimeout = conf.getInt(
      HdfsConstants.DFS_DATANODE_WRITE_EXTENTSION,
      HdfsConstants.WRITE_TIMEOUT_EXTENSION);
  
  /* Based on results on different platforms, we might need set the default 
   * to false on some of them. */
  this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed",
                                           true);

  // TODO: remove the global setting and change data protocol to support
  // per session setting for this value.
  this.ignoreChecksumWhenRead = conf.getBoolean("dfs.datanode.read.ignore.checksum",
      false);

  this.writePacketSize = conf.getInt("dfs.write.packet.size", 64*1024);
  
  this.deletedReportInterval =
    conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL);
  // Calculate the full block report interval
  int fullReportMagnifier = conf.getInt("dfs.fullblockreport.magnifier", 2);
  this.blockReportInterval = fullReportMagnifier * deletedReportInterval;
  this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L;
  long heartbeatRecheckInterval = conf.getInt(
      "heartbeat.recheck.interval", 5 * 60 * 1000); // 5 minutes
  this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval +
      10 * heartBeatInterval;
  
  this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay",
      BLOCKREPORT_INITIAL_DELAY) * 1000L;
  if (this.initialBlockReportDelay >= blockReportInterval) {
    this.initialBlockReportDelay = 0;
    LOG.info("dfs.blockreport.initialDelay is greater than "
        + "dfs.blockreport.intervalMsec."
        + " Setting initial delay to 0 msec:");
  }

  // do we need to sync block file contents to disk when blockfile is closed?
  this.syncOnClose = conf.getBoolean("dfs.datanode.synconclose", false);
  
  this.minDiskCheckIntervalMsec = conf.getLong(
      "dfs.datnode.checkdisk.mininterval",
      FSConstants.MIN_INTERVAL_CHECK_DIR_MSEC);
}
 
Example 19
Source File: TestHDFSServerPorts.java    From big-c with Apache License 2.0 3 votes vote down vote up
/**
 * Attempt to determine the fully qualified domain name for this host 
 * to compare during testing.
 * 
 * This is necessary because in order for the BackupNode test to correctly 
 * work, the namenode must have its http server started with the fully 
 * qualified address, as this is the one the backupnode will attempt to start
 * on as well.
 * 
 * @return Fully qualified hostname, or 127.0.0.1 if can't determine
 */
public static String getFullHostName() {
  try {
    return DNS.getDefaultHost("default");
  } catch (UnknownHostException e) {
    LOG.warn("Unable to determine hostname.  May interfere with obtaining " +
        "valid test results.");
    return "127.0.0.1";
  }
}
 
Example 20
Source File: TestHDFSServerPorts.java    From hadoop with Apache License 2.0 3 votes vote down vote up
/**
 * Attempt to determine the fully qualified domain name for this host 
 * to compare during testing.
 * 
 * This is necessary because in order for the BackupNode test to correctly 
 * work, the namenode must have its http server started with the fully 
 * qualified address, as this is the one the backupnode will attempt to start
 * on as well.
 * 
 * @return Fully qualified hostname, or 127.0.0.1 if can't determine
 */
public static String getFullHostName() {
  try {
    return DNS.getDefaultHost("default");
  } catch (UnknownHostException e) {
    LOG.warn("Unable to determine hostname.  May interfere with obtaining " +
        "valid test results.");
    return "127.0.0.1";
  }
}