org.apache.hadoop.net.ScriptBasedMapping Java Examples

The following examples show how to use org.apache.hadoop.net.ScriptBasedMapping. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RackResolver.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public synchronized static void init(Configuration conf) {
  if (initCalled) {
    return;
  } else {
    initCalled = true;
  }
  Class<? extends DNSToSwitchMapping> dnsToSwitchMappingClass =
    conf.getClass(
      CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
      ScriptBasedMapping.class,
      DNSToSwitchMapping.class);
  try {
    DNSToSwitchMapping newInstance = ReflectionUtils.newInstance(
        dnsToSwitchMappingClass, conf);
    // Wrap around the configured class with the Cached implementation so as
    // to save on repetitive lookups.
    // Check if the impl is already caching, to avoid double caching.
    dnsToSwitchMapping =
        ((newInstance instanceof CachedDNSToSwitchMapping) ? newInstance
            : new CachedDNSToSwitchMapping(newInstance));
  } catch (Exception e) {
    throw new RuntimeException(e);
  }
}
 
Example #2
Source File: RackResolver.java    From big-c with Apache License 2.0 6 votes vote down vote up
public synchronized static void init(Configuration conf) {
  if (initCalled) {
    return;
  } else {
    initCalled = true;
  }
  Class<? extends DNSToSwitchMapping> dnsToSwitchMappingClass =
    conf.getClass(
      CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
      ScriptBasedMapping.class,
      DNSToSwitchMapping.class);
  try {
    DNSToSwitchMapping newInstance = ReflectionUtils.newInstance(
        dnsToSwitchMappingClass, conf);
    // Wrap around the configured class with the Cached implementation so as
    // to save on repetitive lookups.
    // Check if the impl is already caching, to avoid double caching.
    dnsToSwitchMapping =
        ((newInstance instanceof CachedDNSToSwitchMapping) ? newInstance
            : new CachedDNSToSwitchMapping(newInstance));
  } catch (Exception e) {
    throw new RuntimeException(e);
  }
}
 
Example #3
Source File: TestRackResolverScriptBasedMapping.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testScriptName() {
  Configuration conf = new Configuration();
  conf
      .setClass(
          CommonConfigurationKeysPublic.
              NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
          ScriptBasedMapping.class, DNSToSwitchMapping.class);
  conf.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
      "testScript");
  RackResolver.init(conf);
  Assert.assertEquals(RackResolver.getDnsToSwitchMapping().toString(),
      "script-based mapping with script testScript");
}
 
Example #4
Source File: TestRackResolverScriptBasedMapping.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testScriptName() {
  Configuration conf = new Configuration();
  conf
      .setClass(
          CommonConfigurationKeysPublic.
              NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
          ScriptBasedMapping.class, DNSToSwitchMapping.class);
  conf.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY,
      "testScript");
  RackResolver.init(conf);
  Assert.assertEquals(RackResolver.getDnsToSwitchMapping().toString(),
      "script-based mapping with script testScript");
}
 
Example #5
Source File: TopologyCache.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public TopologyCache(Configuration conf) {
  super(conf);

  dnsToSwitchMapping = ReflectionUtils.newInstance
    (conf.getClass("topology.node.switch.mapping.impl", ScriptBasedMapping.class,
                   DNSToSwitchMapping.class), conf);
  LOG.info("DnsToSwitchMapping class = " + dnsToSwitchMapping.getClass().getName());
}
 
Example #6
Source File: SwiftNativeFileSystemStore.java    From sahara-extra with Apache License 2.0 5 votes vote down vote up
/**
 * Initialize the filesystem store -this creates the REST client binding.
 *
 * @param fsURI         URI of the filesystem, which is used to map to the filesystem-specific
 *                      options in the configuration file
 * @param configuration configuration
 * @throws IOException on any failure.
 */
public void initialize(URI fsURI, Configuration configuration) throws IOException {
  this.uri = fsURI;
  dnsToSwitchMapping = ReflectionUtils.newInstance(
      configuration.getClass("topology.node.switch.mapping.impl", ScriptBasedMapping.class,
          DNSToSwitchMapping.class), configuration);

  this.swiftRestClient = SwiftRestClient.getInstance(fsURI, configuration);
}
 
Example #7
Source File: RackManager.java    From hbase with Apache License 2.0 4 votes vote down vote up
public RackManager(Configuration conf) {
  switchMapping = ReflectionUtils.instantiateWithCustomCtor(
      conf.getClass("hbase.util.ip.to.rack.determiner", ScriptBasedMapping.class,
           DNSToSwitchMapping.class).getName(), new Class<?>[]{Configuration.class},
             new Object[]{conf});
}
 
Example #8
Source File: DFSClient.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Create a new DFSClient connected to the given nameNodeAddr or rpcNamenode.
 * Exactly one of nameNodeAddr or rpcNamenode must be null.
 */
DFSClient(InetSocketAddress nameNodeAddr, ClientProtocol rpcNamenode,
    Configuration conf, FileSystem.Statistics stats)
  throws IOException {
  this.conf = conf;
  this.stats = stats;
  this.socketTimeout = conf.getInt("dfs.socket.timeout",
                                   HdfsConstants.READ_TIMEOUT);
  this.socketReadExtentionTimeout = conf.getInt(
      HdfsConstants.DFS_DATANODE_READ_EXTENSION,
      HdfsConstants.READ_TIMEOUT_EXTENSION);
  this.timeoutValue = this.socketTimeout;
  this.datanodeWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout",
                                          HdfsConstants.WRITE_TIMEOUT);
  this.datanodeWriteExtentionTimeout = conf.getInt(
      HdfsConstants.DFS_DATANODE_WRITE_EXTENTSION,
      HdfsConstants.WRITE_TIMEOUT_EXTENSION);    
  this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
  // dfs.write.packet.size is an internal config variable
  this.writePacketSize = conf.getInt("dfs.write.packet.size", 64*1024);
  this.minReadSpeedBps = conf.getLong("dfs.min.read.speed.bps", -1);
  this.maxBlockAcquireFailures = getMaxBlockAcquireFailures(conf);
  this.localHost = InetAddress.getLocalHost();
  
  // fetch network location of localhost
  this.pseuDatanodeInfoForLocalhost = new DatanodeInfo(new DatanodeID(
      this.localHost.getHostAddress()));
  this.dnsToSwitchMapping = ReflectionUtils.newInstance(
      conf.getClass("topology.node.switch.mapping.impl", ScriptBasedMapping.class,
        DNSToSwitchMapping.class), conf);
  ArrayList<String> tempList = new ArrayList<String>();
  tempList.add(this.localHost.getHostName());
  List<String> retList = dnsToSwitchMapping.resolve(tempList);
  if (retList != null && retList.size() > 0) {
    localhostNetworkLocation = retList.get(0);
    this.pseuDatanodeInfoForLocalhost.setNetworkLocation(localhostNetworkLocation);
  }

  // The hdfsTimeout is currently the same as the ipc timeout
  this.hdfsTimeout = Client.getTimeout(conf);

  this.closeFileTimeout = conf.getLong("dfs.client.closefile.timeout", this.hdfsTimeout);

  try {
    this.ugi = UnixUserGroupInformation.login(conf, true);
  } catch (LoginException e) {
    throw (IOException)(new IOException().initCause(e));
  }

  String taskId = conf.get("mapred.task.id");
  if (taskId != null) {
    this.clientName = "DFSClient_" + taskId + "_" + r.nextInt()
                    + "_" + Thread.currentThread().getId();
  } else {
    this.clientName = "DFSClient_" + r.nextInt();
  }
  defaultBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
  defaultReplication = (short) conf.getInt("dfs.replication", 3);

  if (nameNodeAddr != null && rpcNamenode == null) {
    this.nameNodeAddr = nameNodeAddr;
    getNameNode();
  } else if (nameNodeAddr == null && rpcNamenode != null) {
    //This case is used for testing.
    if (rpcNamenode instanceof NameNode) {
      this.namenodeProtocolProxy = createRPCNamenode(((NameNode)rpcNamenode).getNameNodeAddress(), conf, ugi);
    }
    this.namenode = this.rpcNamenode = rpcNamenode;
  } else {
    throw new IllegalArgumentException(
        "Expecting exactly one of nameNodeAddr and rpcNamenode being null: "
        + "nameNodeAddr=" + nameNodeAddr + ", rpcNamenode=" + rpcNamenode);
  }
  // read directly from the block file if configured.
  this.shortCircuitLocalReads = conf.getBoolean("dfs.read.shortcircuit", false);
  if (this.shortCircuitLocalReads) {
    LOG.debug("Configured to shortcircuit reads to " + localHost);
  }
  this.leasechecker = new LeaseChecker(this.clientName, this.conf);
  // by default, if the ipTosValue is less than 0(for example -1), 
  // we will not set it in the socket.
  this.ipTosValue = conf.getInt("dfs.client.tos.value", 
  							  NetUtils.NOT_SET_IP_TOS);
  if (this.ipTosValue > NetUtils.IP_TOS_MAX_VALUE) {
  	LOG.warn("dfs.client.tos.value " + ipTosValue + 
  			 " exceeds the max allowed value " + NetUtils.IP_TOS_MAX_VALUE + 
  			 ", will not take affect");
  	this.ipTosValue = NetUtils.NOT_SET_IP_TOS;
  }
}
 
Example #9
Source File: FSNamesystem.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
/**
 * Initialize FSNamesystem.
 */
private void initialize(NameNode nn, Configuration conf) throws IOException {
  this.systemStart = now();
  setConfigurationParameters(conf);

  this.nameNodeAddress = nn.getNameNodeAddress();
  this.registerMBean(conf); // register the MBean for the FSNamesystemStutus
  this.dir = new FSDirectory(this, conf);
  StartupOption startOpt = NameNode.getStartupOption(conf);
  this.dir.loadFSImage(getNamespaceDirs(conf),
                       getNamespaceEditsDirs(conf), startOpt);
  long timeTakenToLoadFSImage = now() - systemStart;
  LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
  NameNode.getNameNodeMetrics().fsImageLoadTime.set(
                            (int) timeTakenToLoadFSImage);
  this.safeMode = new SafeModeInfo(conf);
  setBlockTotal();
  pendingReplications = new PendingReplicationBlocks(
                          conf.getInt("dfs.replication.pending.timeout.sec", 
                                      -1) * 1000L);
  this.hbthread = new Daemon(new HeartbeatMonitor());
  this.lmthread = new Daemon(leaseManager.new Monitor());
  this.replthread = new Daemon(new ReplicationMonitor());
  hbthread.start();
  lmthread.start();
  replthread.start();

  this.hostsReader = new HostsFileReader(conf.get("dfs.hosts",""),
                                         conf.get("dfs.hosts.exclude",""));
  this.dnthread = new Daemon(new DecommissionManager(this).new Monitor(
      conf.getInt("dfs.namenode.decommission.interval", 30),
      conf.getInt("dfs.namenode.decommission.nodes.per.interval", 5)));
  dnthread.start();

  this.dnsToSwitchMapping = ReflectionUtils.newInstance(
      conf.getClass("topology.node.switch.mapping.impl", ScriptBasedMapping.class,
          DNSToSwitchMapping.class), conf);
  
  /* If the dns to swith mapping supports cache, resolve network 
   * locations of those hosts in the include list, 
   * and store the mapping in the cache; so future calls to resolve
   * will be fast.
   */
  if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
    dnsToSwitchMapping.resolve(new ArrayList<String>(hostsReader.getHosts()));
  }
}