org.apache.hadoop.util.Timer Java Examples
The following examples show how to use
org.apache.hadoop.util.Timer.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ThrottledAsyncChecker.java From hadoop-ozone with Apache License 2.0 | 6 votes |
public ThrottledAsyncChecker(final Timer timer, final long minMsBetweenChecks, final long diskCheckTimeout, final ExecutorService executorService) { this.timer = timer; this.minMsBetweenChecks = minMsBetweenChecks; this.diskCheckTimeout = diskCheckTimeout; this.executorService = MoreExecutors.listeningDecorator(executorService); this.checksInProgress = new HashMap<>(); this.completedChecks = new WeakHashMap<>(); if (this.diskCheckTimeout > 0) { ScheduledThreadPoolExecutor scheduledThreadPoolExecutor = new ScheduledThreadPoolExecutor(1); this.scheduledExecutorService = MoreExecutors .getExitingScheduledExecutorService(scheduledThreadPoolExecutor); } else { this.scheduledExecutorService = null; } }
Example #2
Source File: TestVolumeSetDiskChecks.java From hadoop-ozone with Apache License 2.0 | 6 votes |
/** * Verify that bad volumes are filtered at startup. * @throws IOException */ @Test public void testBadDirectoryDetection() throws IOException { final int numVolumes = 5; final int numBadVolumes = 2; conf = getConfWithDataNodeDirs(numVolumes); final MutableVolumeSet volumeSet = new MutableVolumeSet( UUID.randomUUID().toString(), conf) { @Override HddsVolumeChecker getVolumeChecker(ConfigurationSource configuration) throws DiskErrorException { return new DummyChecker(configuration, new Timer(), numBadVolumes); } }; assertThat(volumeSet.getFailedVolumesList().size(), is(numBadVolumes)); assertThat(volumeSet.getVolumesList().size(), is(numVolumes - numBadVolumes)); volumeSet.shutdown(); }
Example #3
Source File: TestVolumeSetDiskChecks.java From hadoop-ozone with Apache License 2.0 | 6 votes |
/** * Verify that all volumes are added to fail list if all volumes are bad. */ @Test public void testAllVolumesAreBad() throws IOException { final int numVolumes = 5; conf = getConfWithDataNodeDirs(numVolumes); final MutableVolumeSet volumeSet = new MutableVolumeSet( UUID.randomUUID().toString(), conf) { @Override HddsVolumeChecker getVolumeChecker(ConfigurationSource configuration) throws DiskErrorException { return new DummyChecker(configuration, new Timer(), numVolumes); } }; assertEquals(volumeSet.getFailedVolumesList().size(), numVolumes); assertEquals(volumeSet.getVolumesList().size(), 0); volumeSet.shutdown(); }
Example #4
Source File: Groups.java From hadoop with Apache License 2.0 | 5 votes |
public Groups(Configuration conf, final Timer timer) { impl = ReflectionUtils.newInstance( conf.getClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, ShellBasedUnixGroupsMapping.class, GroupMappingServiceProvider.class), conf); cacheTimeout = conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS_DEFAULT) * 1000; negativeCacheTimeout = conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS_DEFAULT) * 1000; warningDeltaMs = conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS, CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS_DEFAULT); staticUserToGroupsMap = parseStaticMapping(conf); this.timer = timer; this.cache = CacheBuilder.newBuilder() .refreshAfterWrite(cacheTimeout, TimeUnit.MILLISECONDS) .ticker(new TimerToTickerAdapter(timer)) .expireAfterWrite(10 * cacheTimeout, TimeUnit.MILLISECONDS) .build(new GroupCacheLoader()); if(negativeCacheTimeout > 0) { Cache<String, Boolean> tempMap = CacheBuilder.newBuilder() .expireAfterWrite(negativeCacheTimeout, TimeUnit.MILLISECONDS) .ticker(new TimerToTickerAdapter(timer)) .build(); negativeCache = Collections.newSetFromMap(tempMap.asMap()); } if(LOG.isDebugEnabled()) LOG.debug("Group mapping impl=" + impl.getClass().getName() + "; cacheTimeout=" + cacheTimeout + "; warningDeltaMs=" + warningDeltaMs); }
Example #5
Source File: Groups.java From big-c with Apache License 2.0 | 5 votes |
public Groups(Configuration conf, final Timer timer) { impl = ReflectionUtils.newInstance( conf.getClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, ShellBasedUnixGroupsMapping.class, GroupMappingServiceProvider.class), conf); cacheTimeout = conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS_DEFAULT) * 1000; negativeCacheTimeout = conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS_DEFAULT) * 1000; warningDeltaMs = conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS, CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS_DEFAULT); parseStaticMapping(conf); this.timer = timer; this.cache = CacheBuilder.newBuilder() .refreshAfterWrite(cacheTimeout, TimeUnit.MILLISECONDS) .ticker(new TimerToTickerAdapter(timer)) .expireAfterWrite(10 * cacheTimeout, TimeUnit.MILLISECONDS) .build(new GroupCacheLoader()); if(negativeCacheTimeout > 0) { Cache<String, Boolean> tempMap = CacheBuilder.newBuilder() .expireAfterWrite(negativeCacheTimeout, TimeUnit.MILLISECONDS) .ticker(new TimerToTickerAdapter(timer)) .build(); negativeCache = Collections.newSetFromMap(tempMap.asMap()); } if(LOG.isDebugEnabled()) LOG.debug("Group mapping impl=" + impl.getClass().getName() + "; cacheTimeout=" + cacheTimeout + "; warningDeltaMs=" + warningDeltaMs); }
Example #6
Source File: MutableVolumeSet.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@VisibleForTesting HddsVolumeChecker getVolumeChecker(ConfigurationSource configuration) throws DiskChecker.DiskErrorException { return new HddsVolumeChecker(configuration, new Timer()); }
Example #7
Source File: HddsVolumeChecker.java From hadoop-ozone with Apache License 2.0 | 4 votes |
/** * @param conf Configuration object. * @param timer {@link Timer} object used for throttling checks. */ public HddsVolumeChecker(ConfigurationSource conf, Timer timer) throws DiskErrorException { maxAllowedTimeForCheckMs = conf.getTimeDuration( DFSConfigKeysLegacy.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, DFSConfigKeysLegacy.DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); if (maxAllowedTimeForCheckMs <= 0) { throw new DiskErrorException("Invalid value configured for " + DFSConfigKeysLegacy.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - " + maxAllowedTimeForCheckMs + " (should be > 0)"); } this.timer = timer; /** * Maximum number of volume failures that can be tolerated without * declaring a fatal error. */ int maxVolumeFailuresTolerated = conf.getInt( DFSConfigKeysLegacy.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, DFSConfigKeysLegacy.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT); minDiskCheckGapMs = conf.getTimeDuration( DFSConfigKeysLegacy.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY, DFSConfigKeysLegacy.DFS_DATANODE_DISK_CHECK_MIN_GAP_DEFAULT, TimeUnit.MILLISECONDS); if (minDiskCheckGapMs < 0) { throw new DiskErrorException("Invalid value configured for " + DFSConfigKeysLegacy.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY + " - " + minDiskCheckGapMs + " (should be >= 0)"); } long diskCheckTimeout = conf.getTimeDuration( DFSConfigKeysLegacy.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, DFSConfigKeysLegacy.DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); if (diskCheckTimeout < 0) { throw new DiskErrorException("Invalid value configured for " + DFSConfigKeysLegacy.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - " + diskCheckTimeout + " (should be >= 0)"); } lastAllVolumesCheck = timer.monotonicNow() - minDiskCheckGapMs; if (maxVolumeFailuresTolerated < MAX_VOLUME_FAILURE_TOLERATED_LIMIT) { throw new DiskErrorException("Invalid value configured for " + DFSConfigKeysLegacy.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - " + maxVolumeFailuresTolerated + " " + DataNode.MAX_VOLUME_FAILURES_TOLERATED_MSG); } delegateChecker = new ThrottledAsyncChecker<>( timer, minDiskCheckGapMs, diskCheckTimeout, Executors.newCachedThreadPool( new ThreadFactoryBuilder() .setNameFormat("DataNode DiskChecker thread %d") .setDaemon(true) .build())); checkVolumeResultHandlerExecutorService = Executors.newCachedThreadPool( new ThreadFactoryBuilder() .setNameFormat("VolumeCheck ResultHandler thread %d") .setDaemon(true) .build()); }
Example #8
Source File: TestVolumeSetDiskChecks.java From hadoop-ozone with Apache License 2.0 | 4 votes |
DummyChecker(ConfigurationSource conf, Timer timer, int numBadVolumes) throws DiskErrorException { super(conf, timer); this.numBadVolumes = numBadVolumes; }
Example #9
Source File: Groups.java From hadoop with Apache License 2.0 | 4 votes |
public Groups(Configuration conf) { this(conf, new Timer()); }
Example #10
Source File: Groups.java From hadoop with Apache License 2.0 | 4 votes |
public TimerToTickerAdapter(Timer timer) { this.timer = timer; }
Example #11
Source File: Groups.java From big-c with Apache License 2.0 | 4 votes |
public Groups(Configuration conf) { this(conf, new Timer()); }
Example #12
Source File: Groups.java From big-c with Apache License 2.0 | 4 votes |
public TimerToTickerAdapter(Timer timer) { this.timer = timer; }
Example #13
Source File: BlockPoolSlice.java From lucene-solr with Apache License 2.0 | 4 votes |
/** * Create a blook pool slice * @param bpid Block pool Id * @param volume {@link FsVolumeImpl} to which this BlockPool belongs to * @param bpDir directory corresponding to the BlockPool * @param conf configuration * @param timer include methods for getting time * @throws IOException Error making directories */ BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir, Configuration conf, Timer timer) throws IOException { this.bpid = bpid; this.volume = volume; this.fileIoProvider = volume.getFileIoProvider(); this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); this.finalizedDir = new File( currentDir, DataStorage.STORAGE_DIR_FINALIZED); this.lazypersistDir = new File(currentDir, DataStorage.STORAGE_DIR_LAZY_PERSIST); if (!this.finalizedDir.exists()) { if (!this.finalizedDir.mkdirs()) { throw new IOException("Failed to mkdirs " + this.finalizedDir); } } this.ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(conf); this.deleteDuplicateReplicas = conf.getBoolean( DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION_DEFAULT); this.cachedDfsUsedCheckTime = conf.getLong( DFSConfigKeys.DFS_DN_CACHED_DFSUSED_CHECK_INTERVAL_MS, DFSConfigKeys.DFS_DN_CACHED_DFSUSED_CHECK_INTERVAL_DEFAULT_MS); this.maxDataLength = conf.getInt( CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH_DEFAULT); this.timer = timer; // Files that were being written when the datanode was last shutdown // are now moved back to the data directory. It is possible that // in the future, we might want to do some sort of datanode-local // recovery for these blocks. For example, crc validation. // this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP); if (tmpDir.exists()) { fileIoProvider.fullyDelete(volume, tmpDir); } this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW); // create the rbw and tmp directories if they don't exist. fileIoProvider.mkdirs(volume, rbwDir); fileIoProvider.mkdirs(volume, tmpDir); if (addReplicaThreadPool == null) { // initialize add replica fork join pool initializeAddReplicaPool(conf); } // Make the dfs usage to be saved during shutdown. shutdownHook = new Runnable() { @Override public void run() { addReplicaThreadPool.shutdownNow(); } }; ShutdownHookManager.get().addShutdownHook(shutdownHook, SHUTDOWN_HOOK_PRIORITY); }