Java Code Examples for org.apache.hadoop.net.NetUtils#getServerAddress()
The following examples show how to use
org.apache.hadoop.net.NetUtils#getServerAddress() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DataNode.java From RDFS with Apache License 2.0 | 6 votes |
private void initDataXceiver(Configuration conf) throws IOException { String address = NetUtils.getServerAddress(conf, "dfs.datanode.bindAddress", "dfs.datanode.port", "dfs.datanode.address"); InetSocketAddress socAddr = NetUtils.createSocketAddr(address); // find free port ServerSocket ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket(); Server.bind(ss, socAddr, conf.getInt("dfs.datanode.xceiver.listen.queue.size", 128)); ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE); // adjust machine name with the actual port int tmpPort = ss.getLocalPort(); selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), tmpPort); LOG.info("Opened info server at " + tmpPort); this.threadGroup = new ThreadGroup("dataXceiverServer"); this.dataXceiverServer = new Daemon(threadGroup, new DataXceiverServer(ss, conf, this)); this.threadGroup.setDaemon(true); // auto destroy when empty }
Example 2
Source File: TaskTracker.java From RDFS with Apache License 2.0 | 5 votes |
protected void initHttpServer(JobConf conf, boolean useNettyMapOutputs) throws IOException { String infoAddr = NetUtils.getServerAddress(conf, "tasktracker.http.bindAddress", "tasktracker.http.port", "mapred.task.tracker.http.address"); InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); String httpBindAddress = infoSocAddr.getHostName(); int httpPort = infoSocAddr.getPort(); server = new HttpServer("task", httpBindAddress, httpPort, httpPort == 0, conf); workerThreads = conf.getInt("tasktracker.http.threads", 40); server.setThreads(1, workerThreads); // let the jsp pages get to the task tracker, config, and other relevant // objects FileSystem local = FileSystem.getLocal(conf); this.localDirAllocator = new LocalDirAllocator("mapred.local.dir"); server.setAttribute("task.tracker", this); server.setAttribute("local.file.system", local); server.setAttribute("conf", conf); server.setAttribute("log", LOG); server.setAttribute("localDirAllocator", localDirAllocator); server.setAttribute("shuffleServerMetrics", shuffleServerMetrics); server.setAttribute(ReconfigurationServlet. CONF_SERVLET_RECONFIGURABLE_PREFIX + "/ttconfchange", TaskTracker.this); server.setAttribute("nettyMapOutputHttpPort", nettyMapOutputHttpPort); server.addInternalServlet("reconfiguration", "/ttconfchange", ReconfigurationServlet.class); server.addInternalServlet( "mapOutput", "/mapOutput", MapOutputServlet.class); server.addInternalServlet("taskLog", "/tasklog", TaskLogServlet.class); server.start(); this.httpPort = server.getPort(); checkJettyPort(); }
Example 3
Source File: SnapshotNode.java From RDFS with Apache License 2.0 | 5 votes |
/** * Returns the jetty image server that the Namenode is listening on. * @throws IOException */ private String getImageServer() throws IOException { URI fsName = FileSystem.getDefaultUri(conf); if (!"hdfs".equals(fsName.getScheme())) { throw new IOException("This is not a DFS"); } return NetUtils.getServerAddress(conf, "dfs.info.bindAddress", "dfs.info.port", "dfs.http.address"); }
Example 4
Source File: Standby.java From RDFS with Apache License 2.0 | 5 votes |
/** * Initialize the webserver so that the primary namenode can fetch * transaction logs from standby via http. */ void initSecondary(Configuration conf) throws IOException { nameNodeAddr = avatarNode.getRemoteNamenodeAddress(conf); this.primaryNamenode = (NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class, NamenodeProtocol.versionID, nameNodeAddr, conf); fsName = avatarNode.getRemoteNamenodeHttpName(conf); // Initialize other scheduling parameters from the configuration checkpointEnabled = conf.getBoolean("fs.checkpoint.enabled", false); checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600); checkpointSize = conf.getLong("fs.checkpoint.size", 4194304); // initialize the webserver for uploading files. String infoAddr = NetUtils.getServerAddress(conf, "dfs.secondary.info.bindAddress", "dfs.secondary.info.port", "dfs.secondary.http.address"); InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); infoBindAddress = infoSocAddr.getHostName(); int tmpInfoPort = infoSocAddr.getPort(); infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort, tmpInfoPort == 0, conf); infoServer.setAttribute("name.system.image", fsImage); this.infoServer.setAttribute("name.conf", conf); infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class); infoServer.start(); // The web-server port can be ephemeral... ensure we have the correct info infoPort = infoServer.getPort(); conf.set("dfs.secondary.http.address", infoBindAddress + ":" +infoPort); LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort); LOG.warn("Checkpoint Period :" + checkpointPeriod + " secs " + "(" + checkpointPeriod/60 + " min)"); LOG.warn("Log Size Trigger :" + checkpointSize + " bytes " + "(" + checkpointSize/1024 + " KB)"); }
Example 5
Source File: SecondaryNameNode.java From RDFS with Apache License 2.0 | 5 votes |
/** * Returns the Jetty server that the Namenode is listening on. */ private String getInfoServer() throws IOException { URI fsName = FileSystem.getDefaultUri(conf); if (!"hdfs".equals(fsName.getScheme())) { throw new IOException("This is not a DFS"); } return NetUtils.getServerAddress(conf, "dfs.info.bindAddress", "dfs.info.port", "dfs.http.address"); }
Example 6
Source File: DataNode.java From RDFS with Apache License 2.0 | 5 votes |
private void startInfoServer(Configuration conf) throws IOException { String infoAddr = NetUtils.getServerAddress(conf, "dfs.datanode.info.bindAddress", "dfs.datanode.info.port", "dfs.datanode.http.address"); InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); String infoHost = infoSocAddr.getHostName(); int tmpInfoPort = infoSocAddr.getPort(); this.infoServer = new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0, conf); if (conf.getBoolean("dfs.https.enable", false)) { boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false); InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get( "dfs.datanode.https.address", infoHost + ":" + 0)); Configuration sslConf = new Configuration(false); sslConf.addResource(conf.get("dfs.https.server.keystore.resource", "ssl-server.xml")); this.infoServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth); // assume same ssl port for all datanodes InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get( "dfs.datanode.https.address", infoHost + ":" + 50475)); this.infoServer.setAttribute("datanode.https.port", datanodeSslPort .getPort()); } this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class); this.infoServer.addInternalServlet(null, "/getFileChecksum/*", FileChecksumServlets.GetServlet.class); this.infoServer.setAttribute("datanode", this); this.infoServer.addServlet(null, "/blockScannerReport", DataBlockScannerSet.Servlet.class); this.infoServer.setAttribute(ReconfigurationServlet.CONF_SERVLET_RECONFIGURABLE_PREFIX + CONF_SERVLET_PATH, DataNode.this); this.infoServer.addServlet("dnConf", CONF_SERVLET_PATH, ReconfigurationServlet.class); this.infoServer.start(); }
Example 7
Source File: SecondaryNameNode.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** * Returns the Jetty server that the Namenode is listening on. */ private String getInfoServer() throws IOException { URI fsName = FileSystem.getDefaultUri(conf); if (!"hdfs".equals(fsName.getScheme())) { throw new IOException("This is not a DFS"); } return NetUtils.getServerAddress(conf, "dfs.info.bindAddress", "dfs.info.port", "dfs.http.address"); }
Example 8
Source File: NameNode.java From RDFS with Apache License 2.0 | 4 votes |
private void startHttpServer(Configuration conf) throws IOException { String infoAddr = NetUtils.getServerAddress(conf, "dfs.info.bindAddress", "dfs.info.port", "dfs.http.address"); InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); String infoHost = infoSocAddr.getHostName(); int infoPort = infoSocAddr.getPort(); this.httpServer = new HttpServer("hdfs", infoHost, infoPort, infoPort == 0, conf); if (conf.getBoolean("dfs.https.enable", false)) { boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false); InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get( "dfs.https.address", infoHost + ":" + 0)); Configuration sslConf = new Configuration(false); sslConf.addResource(conf.get("dfs.https.server.keystore.resource", "ssl-server.xml")); this.httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth); // assume same ssl port for all datanodes InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get( "dfs.datanode.https.address", infoHost + ":" + 50475)); this.httpServer.setAttribute("datanode.https.port", datanodeSslPort .getPort()); } this.httpServer.setAttribute("name.node", this); this.httpServer.setAttribute("name.node.address", getNameNodeAddress()); this.httpServer.setAttribute("name.system.image", getFSImage()); this.httpServer.setAttribute("name.conf", conf); this.httpServer.addInternalServlet("fsck", "/fsck", FsckServlet.class); this.httpServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class); this.httpServer.addInternalServlet("listPaths", "/listPaths/*", ListPathsServlet.class); this.httpServer.addInternalServlet("data", "/data/*", FileDataServlet.class); this.httpServer.addInternalServlet("checksum", "/fileChecksum/*", FileChecksumServlets.RedirectServlet.class); this.httpServer.addInternalServlet("namenodeMXBean", "/namenodeMXBean", NameNodeMXBeanServlet.class); httpServer.setAttribute(ReconfigurationServlet. CONF_SERVLET_RECONFIGURABLE_PREFIX + CONF_SERVLET_PATH, NameNode.this); httpServer.addInternalServlet("nnconfchange", CONF_SERVLET_PATH, ReconfigurationServlet.class); this.httpServer.start(); // The web-server port can be ephemeral... ensure we have the correct info infoPort = this.httpServer.getPort(); this.httpAddress = new InetSocketAddress(infoHost, infoPort); conf.set("dfs.http.address", infoHost + ":" + infoPort); LOG.info("Web-server up at: " + infoHost + ":" + infoPort); }
Example 9
Source File: SecondaryNameNode.java From RDFS with Apache License 2.0 | 4 votes |
/** * Initialize SecondaryNameNode. */ private void initialize(Configuration conf) throws IOException { // initiate Java VM metrics JvmMetrics.init("SecondaryNameNode", conf.get("session.id")); // Create connection to the namenode. shouldRun = true; nameNodeAddr = NameNode.getAddress(conf); this.conf = conf; this.namenode = (NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class, NamenodeProtocol.versionID, nameNodeAddr, conf); // initialize checkpoint directories fsName = getInfoServer(); checkpointDirs = FSImage.getCheckpointDirs(conf, "/tmp/hadoop/dfs/namesecondary"); checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf, "/tmp/hadoop/dfs/namesecondary"); checkpointImage = new CheckpointStorage(conf); checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs); // Initialize other scheduling parameters from the configuration checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600); checkpointSize = conf.getLong("fs.checkpoint.size", 4194304); // initialize the webserver for uploading files. String infoAddr = NetUtils.getServerAddress(conf, "dfs.secondary.info.bindAddress", "dfs.secondary.info.port", "dfs.secondary.http.address"); InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); infoBindAddress = infoSocAddr.getHostName(); int tmpInfoPort = infoSocAddr.getPort(); infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort, tmpInfoPort == 0, conf); infoServer.setAttribute("name.system.image", checkpointImage); this.infoServer.setAttribute("name.conf", conf); infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class); infoServer.start(); // The web-server port can be ephemeral... ensure we have the correct info infoPort = infoServer.getPort(); conf.set("dfs.secondary.http.address", infoBindAddress + ":" +infoPort); LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort); LOG.warn("Checkpoint Period :" + checkpointPeriod + " secs " + "(" + checkpointPeriod/60 + " min)"); LOG.warn("Log Size Trigger :" + checkpointSize + " bytes " + "(" + checkpointSize/1024 + " KB)"); }
Example 10
Source File: DFSck.java From RDFS with Apache License 2.0 | 4 votes |
protected String getInfoServer() throws Exception { return NetUtils.getServerAddress(getConf(), "dfs.info.bindAddress", "dfs.info.port", "dfs.http.address"); }
Example 11
Source File: TaskTracker.java From hadoop-gpu with Apache License 2.0 | 4 votes |
/** * Start with the local machine name, and the default JobTracker */ public TaskTracker(JobConf conf) throws IOException { originalConf = conf; //maxCurrentMapTasks = conf.getInt( // "mapred.tasktracker.map.tasks.maximum", 2); maxCurrentCPUMapTasks = conf.getInt( "mapred.tasktracker.map.cpu.tasks.maximum", 2); maxCurrentGPUMapTasks = conf.getInt( "mapred.tasktracker.map.gpu.tasks.maximum", 0); maxCurrentMapTasks = maxCurrentCPUMapTasks + maxCurrentGPUMapTasks; maxCurrentReduceTasks = conf.getInt( "mapred.tasktracker.reduce.tasks.maximum", 2); this.jobTrackAddr = JobTracker.getAddress(conf); String infoAddr = NetUtils.getServerAddress(conf, "tasktracker.http.bindAddress", "tasktracker.http.port", "mapred.task.tracker.http.address"); InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); String httpBindAddress = infoSocAddr.getHostName(); int httpPort = infoSocAddr.getPort(); this.server = new HttpServer("task", httpBindAddress, httpPort, httpPort == 0, conf); workerThreads = conf.getInt("tasktracker.http.threads", 40); this.shuffleServerMetrics = new ShuffleServerMetrics(conf); server.setThreads(1, workerThreads); // let the jsp pages get to the task tracker, config, and other relevant // objects FileSystem local = FileSystem.getLocal(conf); this.localDirAllocator = new LocalDirAllocator("mapred.local.dir"); server.setAttribute("task.tracker", this); server.setAttribute("local.file.system", local); server.setAttribute("conf", conf); server.setAttribute("log", LOG); server.setAttribute("localDirAllocator", localDirAllocator); server.setAttribute("shuffleServerMetrics", shuffleServerMetrics); server.addInternalServlet("mapOutput", "/mapOutput", MapOutputServlet.class); server.addInternalServlet("taskLog", "/tasklog", TaskLogServlet.class); server.start(); this.httpPort = server.getPort(); checkJettyPort(httpPort); initialize(); }
Example 12
Source File: NameNode.java From hadoop-gpu with Apache License 2.0 | 4 votes |
private void startHttpServer(Configuration conf) throws IOException { String infoAddr = NetUtils.getServerAddress(conf, "dfs.info.bindAddress", "dfs.info.port", "dfs.http.address"); InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); String infoHost = infoSocAddr.getHostName(); int infoPort = infoSocAddr.getPort(); this.httpServer = new HttpServer("hdfs", infoHost, infoPort, infoPort == 0, conf); if (conf.getBoolean("dfs.https.enable", false)) { boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false); InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get( "dfs.https.address", infoHost + ":" + 0)); Configuration sslConf = new Configuration(false); sslConf.addResource(conf.get("dfs.https.server.keystore.resource", "ssl-server.xml")); this.httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth); // assume same ssl port for all datanodes InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get( "dfs.datanode.https.address", infoHost + ":" + 50475)); this.httpServer.setAttribute("datanode.https.port", datanodeSslPort .getPort()); } this.httpServer.setAttribute("name.node", this); this.httpServer.setAttribute("name.node.address", getNameNodeAddress()); this.httpServer.setAttribute("name.system.image", getFSImage()); this.httpServer.setAttribute("name.conf", conf); this.httpServer.addInternalServlet("fsck", "/fsck", FsckServlet.class); this.httpServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class); this.httpServer.addInternalServlet("listPaths", "/listPaths/*", ListPathsServlet.class); this.httpServer.addInternalServlet("data", "/data/*", FileDataServlet.class); this.httpServer.addInternalServlet("checksum", "/fileChecksum/*", FileChecksumServlets.RedirectServlet.class); this.httpServer.start(); // The web-server port can be ephemeral... ensure we have the correct info infoPort = this.httpServer.getPort(); this.httpAddress = new InetSocketAddress(infoHost, infoPort); conf.set("dfs.http.address", infoHost + ":" + infoPort); LOG.info("Web-server up at: " + infoHost + ":" + infoPort); }
Example 13
Source File: SecondaryNameNode.java From hadoop-gpu with Apache License 2.0 | 4 votes |
/** * Initialize SecondaryNameNode. */ private void initialize(Configuration conf) throws IOException { // initiate Java VM metrics JvmMetrics.init("SecondaryNameNode", conf.get("session.id")); // Create connection to the namenode. shouldRun = true; nameNodeAddr = NameNode.getAddress(conf); this.conf = conf; this.namenode = (NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class, NamenodeProtocol.versionID, nameNodeAddr, conf); // initialize checkpoint directories fsName = getInfoServer(); checkpointDirs = FSImage.getCheckpointDirs(conf, "/tmp/hadoop/dfs/namesecondary"); checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf, "/tmp/hadoop/dfs/namesecondary"); checkpointImage = new CheckpointStorage(); checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs); // Initialize other scheduling parameters from the configuration checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600); checkpointSize = conf.getLong("fs.checkpoint.size", 4194304); // initialize the webserver for uploading files. String infoAddr = NetUtils.getServerAddress(conf, "dfs.secondary.info.bindAddress", "dfs.secondary.info.port", "dfs.secondary.http.address"); InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr); infoBindAddress = infoSocAddr.getHostName(); int tmpInfoPort = infoSocAddr.getPort(); infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort, tmpInfoPort == 0, conf); infoServer.setAttribute("name.system.image", checkpointImage); this.infoServer.setAttribute("name.conf", conf); infoServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class); infoServer.start(); // The web-server port can be ephemeral... ensure we have the correct info infoPort = infoServer.getPort(); conf.set("dfs.secondary.http.address", infoBindAddress + ":" +infoPort); LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort); LOG.warn("Checkpoint Period :" + checkpointPeriod + " secs " + "(" + checkpointPeriod/60 + " min)"); LOG.warn("Log Size Trigger :" + checkpointSize + " bytes " + "(" + checkpointSize/1024 + " KB)"); }
Example 14
Source File: DFSck.java From hadoop-gpu with Apache License 2.0 | 4 votes |
private String getInfoServer() throws IOException { return NetUtils.getServerAddress(getConf(), "dfs.info.bindAddress", "dfs.info.port", "dfs.http.address"); }