org.apache.hadoop.util.ServicePlugin Java Examples
The following examples show how to use
org.apache.hadoop.util.ServicePlugin.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HddsDatanodeService.java From hadoop-ozone with Apache License 2.0 | 6 votes |
/** * Starts all the service plugins which are configured using * OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY. */ private void startPlugins() { try { plugins = conf.getInstances(HDDS_DATANODE_PLUGINS_KEY, ServicePlugin.class); } catch (RuntimeException e) { String pluginsValue = conf.get(HDDS_DATANODE_PLUGINS_KEY); LOG.error("Unable to load HDDS DataNode plugins. " + "Specified list of plugins: {}", pluginsValue, e); throw e; } for (ServicePlugin plugin : plugins) { try { plugin.start(this); LOG.info("Started plug-in {}", plugin); } catch (Throwable t) { LOG.warn("ServicePlugin {} could not be started", plugin, t); } } }
Example #2
Source File: HddsDatanodeService.java From hadoop-ozone with Apache License 2.0 | 6 votes |
@Override public void stop() { if (!isStopped.get()) { isStopped.set(true); if (plugins != null) { for (ServicePlugin plugin : plugins) { try { plugin.stop(); LOG.info("Stopped plug-in {}", plugin); } catch (Throwable t) { LOG.warn("ServicePlugin {} could not be stopped", plugin, t); } } } if (datanodeStateMachine != null) { datanodeStateMachine.stopDaemon(); } if (httpServer != null) { try { httpServer.stop(); } catch (Exception e) { LOG.error("Stopping HttpServer is failed.", e); } } } }
Example #3
Source File: NameNode.java From hadoop with Apache License 2.0 | 6 votes |
/** Start the services common to active and standby states */ private void startCommonServices(Configuration conf) throws IOException { namesystem.startCommonServices(conf, haContext); registerNNSMXBean(); if (NamenodeRole.NAMENODE != role) { startHttpServer(conf); httpServer.setNameNodeAddress(getNameNodeAddress()); httpServer.setFSImage(getFSImage()); } rpcServer.start(); plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY, ServicePlugin.class); for (ServicePlugin p: plugins) { try { p.start(this); } catch (Throwable t) { LOG.warn("ServicePlugin " + p + " could not be started", t); } } LOG.info(getRole() + " RPC up at: " + rpcServer.getRpcAddress()); if (rpcServer.getServiceRpcAddress() != null) { LOG.info(getRole() + " service RPC up at: " + rpcServer.getServiceRpcAddress()); } }
Example #4
Source File: NameNode.java From big-c with Apache License 2.0 | 6 votes |
/** Start the services common to active and standby states */ private void startCommonServices(Configuration conf) throws IOException { namesystem.startCommonServices(conf, haContext); registerNNSMXBean(); if (NamenodeRole.NAMENODE != role) { startHttpServer(conf); httpServer.setNameNodeAddress(getNameNodeAddress()); httpServer.setFSImage(getFSImage()); } rpcServer.start(); plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY, ServicePlugin.class); for (ServicePlugin p: plugins) { try { p.start(this); } catch (Throwable t) { LOG.warn("ServicePlugin " + p + " could not be started", t); } } LOG.info(getRole() + " RPC up at: " + rpcServer.getRpcAddress()); if (rpcServer.getServiceRpcAddress() != null) { LOG.info(getRole() + " service RPC up at: " + rpcServer.getServiceRpcAddress()); } }
Example #5
Source File: HddsDatanodeService.java From hadoop-ozone with Apache License 2.0 | 5 votes |
@Override public void close() { if (plugins != null) { for (ServicePlugin plugin : plugins) { try { plugin.close(); } catch (Throwable t) { LOG.warn("ServicePlugin {} could not be closed", plugin, t); } } } }
Example #6
Source File: TestHddsDatanodeService.java From hadoop-ozone with Apache License 2.0 | 5 votes |
@Before public void setUp() { testDir = GenericTestUtils.getRandomizedTestDir(); conf = new OzoneConfiguration(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath()); conf.setClass(OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY, MockService.class, ServicePlugin.class); String volumeDir = testDir + "/disk1"; conf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, volumeDir); }
Example #7
Source File: TestHddsSecureDatanodeInit.java From hadoop-ozone with Apache License 2.0 | 5 votes |
@BeforeClass public static void setUp() throws Exception { testDir = GenericTestUtils.getRandomizedTestDir(); conf = new OzoneConfiguration(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath()); //conf.set(ScmConfigKeys.OZONE_SCM_NAMES, "localhost"); String volumeDir = testDir + "/disk1"; conf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, volumeDir); conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); conf.setClass(OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY, TestHddsDatanodeService.MockService.class, ServicePlugin.class); securityConfig = new SecurityConfig(conf); service = HddsDatanodeService.createHddsDatanodeService(args); dnLogs = GenericTestUtils.LogCapturer.captureLogs(getLogger()); callQuietly(() -> { service.start(conf); return null; }); callQuietly(() -> { service.initializeCertificateClient(conf); return null; }); certCodec = new CertificateCodec(securityConfig, DN_COMPONENT); keyCodec = new KeyCodec(securityConfig, DN_COMPONENT); dnLogs.clearOutput(); privateKey = service.getCertificateClient().getPrivateKey(); publicKey = service.getCertificateClient().getPublicKey(); X509Certificate x509Certificate = null; x509Certificate = KeyStoreTestUtil.generateCertificate( "CN=Test", new KeyPair(publicKey, privateKey), 10, securityConfig.getSignatureAlgo()); certHolder = new X509CertificateHolder(x509Certificate.getEncoded()); }
Example #8
Source File: NameNode.java From hadoop with Apache License 2.0 | 5 votes |
private void stopCommonServices() { if(rpcServer != null) rpcServer.stop(); if(namesystem != null) namesystem.close(); if (pauseMonitor != null) pauseMonitor.stop(); if (plugins != null) { for (ServicePlugin p : plugins) { try { p.stop(); } catch (Throwable t) { LOG.warn("ServicePlugin " + p + " could not be stopped", t); } } } stopHttpServer(); }
Example #9
Source File: DataNode.java From hadoop with Apache License 2.0 | 5 votes |
private void startPlugins(Configuration conf) { plugins = conf.getInstances(DFS_DATANODE_PLUGINS_KEY, ServicePlugin.class); for (ServicePlugin p: plugins) { try { p.start(this); LOG.info("Started plug-in " + p); } catch (Throwable t) { LOG.warn("ServicePlugin " + p + " could not be started", t); } } }
Example #10
Source File: NameNode.java From big-c with Apache License 2.0 | 5 votes |
private void stopCommonServices() { if(rpcServer != null) rpcServer.stop(); if(namesystem != null) namesystem.close(); if (pauseMonitor != null) pauseMonitor.stop(); if (plugins != null) { for (ServicePlugin p : plugins) { try { p.stop(); } catch (Throwable t) { LOG.warn("ServicePlugin " + p + " could not be stopped", t); } } } stopHttpServer(); }
Example #11
Source File: DataNode.java From big-c with Apache License 2.0 | 5 votes |
private void startPlugins(Configuration conf) { plugins = conf.getInstances(DFS_DATANODE_PLUGINS_KEY, ServicePlugin.class); for (ServicePlugin p: plugins) { try { p.start(this); LOG.info("Started plug-in " + p); } catch (Throwable t) { LOG.warn("ServicePlugin " + p + " could not be started", t); } } }