org.apache.hadoop.util.ShutdownHookManager Java Examples

The following examples show how to use org.apache.hadoop.util.ShutdownHookManager. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SpanReceiverHost.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static SpanReceiverHost get(Configuration conf, String confPrefix) {
  synchronized (SpanReceiverHost.class) {
    SpanReceiverHost host = hosts.get(confPrefix);
    if (host != null) {
      return host;
    }
    final SpanReceiverHost newHost = new SpanReceiverHost(confPrefix);
    newHost.loadSpanReceivers(conf);
    ShutdownHookManager.get().addShutdownHook(new Runnable() {
        public void run() {
          newHost.closeReceivers();
        }
      }, 0);
    hosts.put(confPrefix, newHost);
    return newHost;
  }
}
 
Example #2
Source File: TajoAsyncDispatcher.java    From incubator-tajo with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
protected void dispatch(Event event) {
  //all events go thru this loop
  if (LOG.isDebugEnabled()) {
    LOG.debug("Dispatching the event " + event.getClass().getName() + "."
        + event.toString());
  }
  Class<? extends Enum> type = event.getType().getDeclaringClass();

  try{
    EventHandler handler = eventDispatchers.get(type);
    if(handler != null) {
      handler.handle(event);
    } else {
      throw new Exception("No handler for registered for " + type);
    }
  } catch (Throwable t) {
    //TODO Maybe log the state of the queue
    LOG.fatal("Error in dispatcher thread:" + event.getType(), t);
    if (exitOnDispatchException && (ShutdownHookManager.get().isShutdownInProgress()) == false) {
      LOG.info("Exiting, bye..");
      System.exit(-1);
    }
  } finally {
  }
}
 
Example #3
Source File: SharedCacheManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
  Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
  StringUtils.startupShutdownMessage(SharedCacheManager.class, args, LOG);
  try {
    Configuration conf = new YarnConfiguration();
    SharedCacheManager sharedCacheManager = new SharedCacheManager();
    ShutdownHookManager.get().addShutdownHook(
        new CompositeServiceShutdownHook(sharedCacheManager),
        SHUTDOWN_HOOK_PRIORITY);
    sharedCacheManager.init(conf);
    sharedCacheManager.start();
  } catch (Throwable t) {
    LOG.fatal("Error starting SharedCacheManager", t);
    System.exit(-1);
  }
}
 
Example #4
Source File: NodeManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void initAndStartNodeManager(Configuration conf, boolean hasToReboot) {
  try {

    // Remove the old hook if we are rebooting.
    if (hasToReboot && null != nodeManagerShutdownHook) {
      ShutdownHookManager.get().removeShutdownHook(nodeManagerShutdownHook);
    }

    nodeManagerShutdownHook = new CompositeServiceShutdownHook(this);
    ShutdownHookManager.get().addShutdownHook(nodeManagerShutdownHook,
                                              SHUTDOWN_HOOK_PRIORITY);
    // System exit should be called only when NodeManager is instantiated from
    // main() funtion
    this.shouldExitOnShutdownEvent = true;
    this.init(conf);
    this.start();
  } catch (Throwable t) {
    LOG.fatal("Error starting NodeManager", t);
    System.exit(-1);
  }
}
 
Example #5
Source File: ResourceManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static void main(String argv[]) {
  Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
  StringUtils.startupShutdownMessage(ResourceManager.class, argv, LOG);
  try {
    Configuration conf = new YarnConfiguration();
    GenericOptionsParser hParser = new GenericOptionsParser(conf, argv);
    argv = hParser.getRemainingArgs();
    // If -format-state-store, then delete RMStateStore; else startup normally
    if (argv.length == 1 && argv[0].equals("-format-state-store")) {
      deleteRMStateStore(conf);
    } else {
      ResourceManager resourceManager = new ResourceManager();
      ShutdownHookManager.get().addShutdownHook(
        new CompositeServiceShutdownHook(resourceManager),
        SHUTDOWN_HOOK_PRIORITY);
      resourceManager.init(conf);
      resourceManager.start();
    }
  } catch (Throwable t) {
    LOG.fatal("Error starting ResourceManager", t);
    System.exit(-1);
  }
}
 
Example #6
Source File: ApplicationHistoryServer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
static ApplicationHistoryServer launchAppHistoryServer(String[] args) {
  Thread
    .setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
  StringUtils.startupShutdownMessage(ApplicationHistoryServer.class, args,
    LOG);
  ApplicationHistoryServer appHistoryServer = null;
  try {
    appHistoryServer = new ApplicationHistoryServer();
    ShutdownHookManager.get().addShutdownHook(
      new CompositeServiceShutdownHook(appHistoryServer),
      SHUTDOWN_HOOK_PRIORITY);
    YarnConfiguration conf = new YarnConfiguration();
    new GenericOptionsParser(conf, args);
    appHistoryServer.init(conf);
    appHistoryServer.start();
  } catch (Throwable t) {
    LOG.fatal("Error starting ApplicationHistoryServer", t);
    ExitUtil.terminate(-1, "Error starting ApplicationHistoryServer");
  }
  return appHistoryServer;
}
 
Example #7
Source File: NodeManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
protected void shutDown() {
  new Thread() {
    @Override
    public void run() {
      try {
        NodeManager.this.stop();
      } catch (Throwable t) {
        LOG.error("Error while shutting down NodeManager", t);
      } finally {
        if (shouldExitOnShutdownEvent
            && !ShutdownHookManager.get().isShutdownInProgress()) {
          ExitUtil.terminate(-1);
        }
      }
    }
  }.start();
}
 
Example #8
Source File: ResourceManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static void main(String argv[]) {

   Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
   StringUtils.startupShutdownMessage(ResourceManager.class, argv, LOG);
   try {

     Configuration conf = new YarnConfiguration();
     GenericOptionsParser hParser = new GenericOptionsParser(conf, argv);
     argv = hParser.getRemainingArgs();
     // If -format-state-store, then delete RMStateStore; else startup normally
     if (argv.length == 1 && argv[0].equals("-format-state-store")) {
       deleteRMStateStore(conf);
     } else {
       ResourceManager resourceManager = new ResourceManager();
       ShutdownHookManager.get().addShutdownHook(
         new CompositeServiceShutdownHook(resourceManager),
         SHUTDOWN_HOOK_PRIORITY);
       resourceManager.init(conf);
       resourceManager.start();
     }
   } catch (Throwable t) {
     LOG.fatal("Error starting ResourceManager", t);
     System.exit(-1);
   }
   
 }
 
Example #9
Source File: JobHistoryServer.java    From big-c with Apache License 2.0 6 votes vote down vote up
static JobHistoryServer launchJobHistoryServer(String[] args) {
  Thread.
      setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
  StringUtils.startupShutdownMessage(JobHistoryServer.class, args, LOG);
  JobHistoryServer jobHistoryServer = null;
  try {
    jobHistoryServer = new JobHistoryServer();
    ShutdownHookManager.get().addShutdownHook(
        new CompositeServiceShutdownHook(jobHistoryServer),
        SHUTDOWN_HOOK_PRIORITY);
    YarnConfiguration conf = new YarnConfiguration(new JobConf());
    new GenericOptionsParser(conf, args);
    jobHistoryServer.init(conf);
    jobHistoryServer.start();
  } catch (Throwable t) {
    LOG.fatal("Error starting JobHistoryServer", t);
    ExitUtil.terminate(-1, "Error starting JobHistoryServer");
  }
  return jobHistoryServer;
}
 
Example #10
Source File: JobHistoryServer.java    From XLearning with Apache License 2.0 6 votes vote down vote up
static JobHistoryServer launchJobHistoryServer(String[] args) {
  Thread.
      setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
  StringUtils.startupShutdownMessage(JobHistoryServer.class, args, LOG);
  JobHistoryServer jobHistoryServer = null;
  try {
    jobHistoryServer = new JobHistoryServer();
    ShutdownHookManager.get().addShutdownHook(
        new CompositeServiceShutdownHook(jobHistoryServer),
        SHUTDOWN_HOOK_PRIORITY);
    YarnConfiguration conf = new YarnConfiguration(new JobConf());
    new GenericOptionsParser(conf, args);
    jobHistoryServer.init(conf);
    jobHistoryServer.start();
  } catch (Throwable t) {
    LOG.fatal("Error starting JobHistoryServer", t);
    ExitUtil.terminate(-1, "Error starting JobHistoryServer");
  }
  return jobHistoryServer;
}
 
Example #11
Source File: AMSApplicationServer.java    From ambari-metrics with Apache License 2.0 6 votes vote down vote up
static AMSApplicationServer launchAMSApplicationServer(String[] args) {
  Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
  StringUtils.startupShutdownMessage(AMSApplicationServer.class, args, LOG);
  AMSApplicationServer amsApplicationServer = null;
  try {
    amsApplicationServer = new AMSApplicationServer();
    ShutdownHookManager.get().addShutdownHook(
      new CompositeServiceShutdownHook(amsApplicationServer),
      SHUTDOWN_HOOK_PRIORITY);
    YarnConfiguration conf = new YarnConfiguration();
    amsApplicationServer.init(conf);
    amsApplicationServer.start();
  } catch (Throwable t) {
    LOG.fatal("Error starting AMSApplicationServer", t);
    ExitUtil.terminate(-1, "Error starting AMSApplicationServer");
  }
  return amsApplicationServer;
}
 
Example #12
Source File: StringUtils.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
public static void startupShutdownMessage(VersionInfo versionInfo,
    Class<?> clazz, String[] args, Logger log) {
  final String hostname = NetUtils.getHostname();
  final String className = clazz.getSimpleName();
  if (log.isInfoEnabled()) {
    log.info(createStartupShutdownMessage(versionInfo, className, hostname,
        args));
  }

  if (SystemUtils.IS_OS_UNIX) {
    try {
      SignalLogger.INSTANCE.register(log);
    } catch (Throwable t) {
      log.warn("failed to register any UNIX signal loggers: ", t);
    }
  }
  ShutdownHookManager.get().addShutdownHook(
      () -> log.info(toStartupShutdownString("SHUTDOWN_MSG: ",
          "Shutting down " + className + " at " + hostname)),
      SHUTDOWN_HOOK_PRIORITY);

}
 
Example #13
Source File: Task.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Report a fatal error to the parent (task) tracker.
 */
protected void reportFatalError(TaskAttemptID id, Throwable throwable, 
                                String logMsg) {
  LOG.fatal(logMsg);
  
  if (ShutdownHookManager.get().isShutdownInProgress()) {
    return;
  }
  
  Throwable tCause = throwable.getCause();
  String cause = tCause == null 
                 ? StringUtils.stringifyException(throwable)
                 : StringUtils.stringifyException(tCause);
  try {
    umbilical.fatalError(id, cause);
  } catch (IOException ioe) {
    LOG.fatal("Failed to contact the tasktracker", ioe);
    System.exit(-1);
  }
}
 
Example #14
Source File: Task.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Report a fatal error to the parent (task) tracker.
 */
protected void reportFatalError(TaskAttemptID id, Throwable throwable, 
                                String logMsg) {
  LOG.fatal(logMsg);
  
  if (ShutdownHookManager.get().isShutdownInProgress()) {
    return;
  }
  
  Throwable tCause = throwable.getCause();
  String cause = tCause == null 
                 ? StringUtils.stringifyException(throwable)
                 : StringUtils.stringifyException(tCause);
  try {
    umbilical.fatalError(id, cause);
  } catch (IOException ioe) {
    LOG.fatal("Failed to contact the tasktracker", ioe);
    System.exit(-1);
  }
}
 
Example #15
Source File: DFSClientCache.java    From big-c with Apache License 2.0 6 votes vote down vote up
DFSClientCache(NfsConfiguration config, int clientCache) {
  this.config = config;
  this.clientCache = CacheBuilder.newBuilder()
      .maximumSize(clientCache)
      .removalListener(clientRemovalListener())
      .build(clientLoader());

  this.inputstreamCache = CacheBuilder.newBuilder()
      .maximumSize(DEFAULT_DFS_INPUTSTREAM_CACHE_SIZE)
      .expireAfterAccess(DEFAULT_DFS_INPUTSTREAM_CACHE_TTL, TimeUnit.SECONDS)
      .removalListener(inputStreamRemovalListener())
      .build(inputStreamLoader());
  
  ShutdownHookManager.get().addShutdownHook(new CacheFinalizer(),
      SHUTDOWN_HOOK_PRIORITY);
}
 
Example #16
Source File: FileContext.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Mark a path to be deleted on JVM shutdown.
 * 
 * @param f the existing path to delete.
 *
 * @return  true if deleteOnExit is successful, otherwise false.
 *
 * @throws AccessControlException If access is denied
 * @throws UnsupportedFileSystemException If file system for <code>f</code> is
 *           not supported
 * @throws IOException If an I/O error occurred
 * 
 * Exceptions applicable to file systems accessed over RPC:
 * @throws RpcClientException If an exception occurred in the RPC client
 * @throws RpcServerException If an exception occurred in the RPC server
 * @throws UnexpectedServerException If server implementation throws 
 *           undeclared exception to RPC server
 */
public boolean deleteOnExit(Path f) throws AccessControlException,
    IOException {
  if (!this.util().exists(f)) {
    return false;
  }
  synchronized (DELETE_ON_EXIT) {
    if (DELETE_ON_EXIT.isEmpty()) {
      ShutdownHookManager.get().addShutdownHook(FINALIZER, SHUTDOWN_HOOK_PRIORITY);
    }
    
    Set<Path> set = DELETE_ON_EXIT.get(this);
    if (set == null) {
      set = new TreeSet<Path>();
      DELETE_ON_EXIT.put(this, set);
    }
    set.add(f);
  }
  return true;
}
 
Example #17
Source File: SpanReceiverHost.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static SpanReceiverHost get(Configuration conf, String confPrefix) {
  synchronized (SpanReceiverHost.class) {
    SpanReceiverHost host = hosts.get(confPrefix);
    if (host != null) {
      return host;
    }
    final SpanReceiverHost newHost = new SpanReceiverHost(confPrefix);
    newHost.loadSpanReceivers(conf);
    ShutdownHookManager.get().addShutdownHook(new Runnable() {
        public void run() {
          newHost.closeReceivers();
        }
      }, 0);
    hosts.put(confPrefix, newHost);
    return newHost;
  }
}
 
Example #18
Source File: FileContext.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Mark a path to be deleted on JVM shutdown.
 * 
 * @param f the existing path to delete.
 *
 * @return  true if deleteOnExit is successful, otherwise false.
 *
 * @throws AccessControlException If access is denied
 * @throws UnsupportedFileSystemException If file system for <code>f</code> is
 *           not supported
 * @throws IOException If an I/O error occurred
 * 
 * Exceptions applicable to file systems accessed over RPC:
 * @throws RpcClientException If an exception occurred in the RPC client
 * @throws RpcServerException If an exception occurred in the RPC server
 * @throws UnexpectedServerException If server implementation throws 
 *           undeclared exception to RPC server
 */
public boolean deleteOnExit(Path f) throws AccessControlException,
    IOException {
  if (!this.util().exists(f)) {
    return false;
  }
  synchronized (DELETE_ON_EXIT) {
    if (DELETE_ON_EXIT.isEmpty()) {
      ShutdownHookManager.get().addShutdownHook(FINALIZER, SHUTDOWN_HOOK_PRIORITY);
    }
    
    Set<Path> set = DELETE_ON_EXIT.get(this);
    if (set == null) {
      set = new TreeSet<Path>();
      DELETE_ON_EXIT.put(this, set);
    }
    set.add(f);
  }
  return true;
}
 
Example #19
Source File: DistCp.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Main function of the DistCp program. Parses the input arguments (via OptionsParser),
 * and invokes the DistCp::run() method, via the ToolRunner.
 * @param argv Command-line arguments sent to DistCp.
 */
public static void main(String argv[]) {
  int exitCode;
  try {
    DistCp distCp = new DistCp();
    Cleanup CLEANUP = new Cleanup(distCp);

    ShutdownHookManager.get().addShutdownHook(CLEANUP,
      SHUTDOWN_HOOK_PRIORITY);
    exitCode = ToolRunner.run(getDefaultConf(), distCp, argv);
  }
  catch (Exception e) {
    LOG.error("Couldn't complete DistCp operation: ", e);
    exitCode = DistCpConstants.UNKNOWN_ERROR;
  }
  System.exit(exitCode);
}
 
Example #20
Source File: SharedCacheManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
  Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
  StringUtils.startupShutdownMessage(SharedCacheManager.class, args, LOG);
  try {
    Configuration conf = new YarnConfiguration();
    SharedCacheManager sharedCacheManager = new SharedCacheManager();
    ShutdownHookManager.get().addShutdownHook(
        new CompositeServiceShutdownHook(sharedCacheManager),
        SHUTDOWN_HOOK_PRIORITY);
    sharedCacheManager.init(conf);
    sharedCacheManager.start();
  } catch (Throwable t) {
    LOG.fatal("Error starting SharedCacheManager", t);
    System.exit(-1);
  }
}
 
Example #21
Source File: NodeManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
protected void shutDown() {
  new Thread() {
    @Override
    public void run() {
      try {
        NodeManager.this.stop();
      } catch (Throwable t) {
        LOG.error("Error while shutting down NodeManager", t);
      } finally {
        if (shouldExitOnShutdownEvent
            && !ShutdownHookManager.get().isShutdownInProgress()) {
          ExitUtil.terminate(-1);
        }
      }
    }
  }.start();
}
 
Example #22
Source File: DistCp.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Main function of the DistCp program. Parses the input arguments (via OptionsParser),
 * and invokes the DistCp::run() method, via the ToolRunner.
 * @param argv Command-line arguments sent to DistCp.
 */
public static void main(String argv[]) {
  int exitCode;
  try {
    DistCp distCp = new DistCp();
    Cleanup CLEANUP = new Cleanup(distCp);

    ShutdownHookManager.get().addShutdownHook(CLEANUP,
      SHUTDOWN_HOOK_PRIORITY);
    exitCode = ToolRunner.run(getDefaultConf(), distCp, argv);
  }
  catch (Exception e) {
    LOG.error("Couldn't complete DistCp operation: ", e);
    exitCode = DistCpConstants.UNKNOWN_ERROR;
  }
  System.exit(exitCode);
}
 
Example #23
Source File: NodeManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void initAndStartNodeManager(Configuration conf, boolean hasToReboot) {
  try {

    // Remove the old hook if we are rebooting.
    if (hasToReboot && null != nodeManagerShutdownHook) {
      ShutdownHookManager.get().removeShutdownHook(nodeManagerShutdownHook);
    }

    nodeManagerShutdownHook = new CompositeServiceShutdownHook(this);
    ShutdownHookManager.get().addShutdownHook(nodeManagerShutdownHook,
                                              SHUTDOWN_HOOK_PRIORITY);
    // System exit should be called only when NodeManager is instantiated from
    // main() funtion
    this.shouldExitOnShutdownEvent = true;
    this.init(conf);
    this.start();
  } catch (Throwable t) {
    LOG.fatal("Error starting NodeManager", t);
    System.exit(-1);
  }
}
 
Example #24
Source File: DFSClientCache.java    From hadoop with Apache License 2.0 6 votes vote down vote up
DFSClientCache(NfsConfiguration config, int clientCache) {
  this.config = config;
  this.clientCache = CacheBuilder.newBuilder()
      .maximumSize(clientCache)
      .removalListener(clientRemovalListener())
      .build(clientLoader());

  this.inputstreamCache = CacheBuilder.newBuilder()
      .maximumSize(DEFAULT_DFS_INPUTSTREAM_CACHE_SIZE)
      .expireAfterAccess(DEFAULT_DFS_INPUTSTREAM_CACHE_TTL, TimeUnit.SECONDS)
      .removalListener(inputStreamRemovalListener())
      .build(inputStreamLoader());
  
  ShutdownHookManager.get().addShutdownHook(new CacheFinalizer(),
      SHUTDOWN_HOOK_PRIORITY);
}
 
Example #25
Source File: JobHistoryServer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
static JobHistoryServer launchJobHistoryServer(String[] args) {
  Thread.
      setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
  StringUtils.startupShutdownMessage(JobHistoryServer.class, args, LOG);
  JobHistoryServer jobHistoryServer = null;
  try {
    jobHistoryServer = new JobHistoryServer();
    ShutdownHookManager.get().addShutdownHook(
        new CompositeServiceShutdownHook(jobHistoryServer),
        SHUTDOWN_HOOK_PRIORITY);
    YarnConfiguration conf = new YarnConfiguration(new JobConf());
    new GenericOptionsParser(conf, args);
    jobHistoryServer.init(conf);
    jobHistoryServer.start();
  } catch (Throwable t) {
    LOG.fatal("Error starting JobHistoryServer", t);
    ExitUtil.terminate(-1, "Error starting JobHistoryServer");
  }
  return jobHistoryServer;
}
 
Example #26
Source File: ApplicationHistoryServer.java    From big-c with Apache License 2.0 6 votes vote down vote up
static ApplicationHistoryServer launchAppHistoryServer(String[] args) {
  Thread
    .setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
  StringUtils.startupShutdownMessage(ApplicationHistoryServer.class, args,
    LOG);
  ApplicationHistoryServer appHistoryServer = null;
  try {
    appHistoryServer = new ApplicationHistoryServer();
    ShutdownHookManager.get().addShutdownHook(
      new CompositeServiceShutdownHook(appHistoryServer),
      SHUTDOWN_HOOK_PRIORITY);
    YarnConfiguration conf = new YarnConfiguration();
    new GenericOptionsParser(conf, args);
    appHistoryServer.init(conf);
    appHistoryServer.start();
  } catch (Throwable t) {
    LOG.fatal("Error starting ApplicationHistoryServer", t);
    ExitUtil.terminate(-1, "Error starting ApplicationHistoryServer");
  }
  return appHistoryServer;
}
 
Example #27
Source File: FileSystem.java    From big-c with Apache License 2.0 5 votes vote down vote up
private FileSystem getInternal(URI uri, Configuration conf, Key key) throws IOException{
  FileSystem fs;
  synchronized (this) {
    fs = map.get(key);
  }
  if (fs != null) {
    return fs;
  }

  fs = createFileSystem(uri, conf);
  synchronized (this) { // refetch the lock again
    FileSystem oldfs = map.get(key);
    if (oldfs != null) { // a file system is created while lock is releasing
      fs.close(); // close the new file system
      return oldfs;  // return the old file system
    }
    
    // now insert the new file system into the map
    if (map.isEmpty()
            && !ShutdownHookManager.get().isShutdownInProgress()) {
      ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY);
    }
    fs.key = key;
    map.put(key, fs);
    if (conf.getBoolean("fs.automatic.close", true)) {
      toAutoClose.add(key);
    }
    return fs;
  }
}
 
Example #28
Source File: HdfsErrorHandlingJunitTest.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
public void test005BloomReadErrorByFSClose() throws Exception {
  Configuration hconf = getMiniClusterConf();

  int numDataNodes = 1;
  initMiniCluster(hconf, numDataNodes);
  hdfsStore.getFileSystem().delete(testDataDir, true);
  
  HdfsSortedOplogOrganizer organizer = new HdfsSortedOplogOrganizer(regionManager, 0);

  // flush and create hoplog
  ArrayList<TestEvent> items = new ArrayList<TestEvent>();
  items.add(new TestEvent(("1"), ("1-1")));
  items.add(new TestEvent(("4"), ("1-4")));
  organizer.flush(items.iterator(), items.size());

  List<TrackedReference<Hoplog>> list = organizer.getSortedOplogs();
  assertEquals(1, list.size());
  Hoplog hoplog = list.get(0).get();
  HoplogReader reader = hoplog.getReader();

  ShutdownHookManager mgr = ShutdownHookManager.get();
  Field field = ShutdownHookManager.class.getDeclaredField("shutdownInProgress");
  field.setAccessible(true);
  field.set(mgr, new AtomicBoolean(true));
  
  hdfsStore.getFileSystem().close();
  hdfsStore = null;

  byte[] keyBytes = BlobHelper.serializeToBlob("1");
  try {
    organizer.read(keyBytes);
    fail();
  } catch (CacheClosedException e) {
    // expected
  }
  field.set(mgr, new AtomicBoolean(false));
}
 
Example #29
Source File: MountdBase.java    From big-c with Apache License 2.0 5 votes vote down vote up
public void start(boolean register) {
  startUDPServer();
  startTCPServer();
  if (register) {
    ShutdownHookManager.get().addShutdownHook(new Unregister(),
        SHUTDOWN_HOOK_PRIORITY);
    try {
      rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort);
      rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
    } catch (Throwable e) {
      LOG.fatal("Failed to register the MOUNT service.", e);
      terminate(1, e);
    }
  }
}
 
Example #30
Source File: Nfs3Base.java    From big-c with Apache License 2.0 5 votes vote down vote up
public void start(boolean register) {
  startTCPServer(); // Start TCP server

  if (register) {
    ShutdownHookManager.get().addShutdownHook(new NfsShutdownHook(),
        SHUTDOWN_HOOK_PRIORITY);
    try {
      rpcProgram.register(PortmapMapping.TRANSPORT_TCP, nfsBoundPort);
    } catch (Throwable e) {
      LOG.fatal("Failed to register the NFSv3 service.", e);
      terminate(1, e);
    }
  }
}