org.apache.hadoop.util.Time Java Examples
The following examples show how to use
org.apache.hadoop.util.Time.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BlockTokenSecretManager.java From big-c with Apache License 2.0 | 6 votes |
/** Initialize block keys */ private synchronized void generateKeys() { if (!isMaster) return; /* * Need to set estimated expiry dates for currentKey and nextKey so that if * NN crashes, DN can still expire those keys. NN will stop using the newly * generated currentKey after the first keyUpdateInterval, however it may * still be used by DN and Balancer to generate new tokens before they get a * chance to sync their keys with NN. Since we require keyUpdInterval to be * long enough so that all live DN's and Balancer will sync their keys with * NN at least once during the period, the estimated expiry date for * currentKey is set to now() + 2 * keyUpdateInterval + tokenLifetime. * Similarly, the estimated expiry date for nextKey is one keyUpdateInterval * more. */ setSerialNo(serialNo + 1); currentKey = new BlockKey(serialNo, Time.now() + 2 * keyUpdateInterval + tokenLifetime, generateSecret()); setSerialNo(serialNo + 1); nextKey = new BlockKey(serialNo, Time.now() + 3 * keyUpdateInterval + tokenLifetime, generateSecret()); allKeys.put(currentKey.getKeyId(), currentKey); allKeys.put(nextKey.getKeyId(), nextKey); }
Example #2
Source File: TestZKFailoverControllerStress.java From hadoop with Apache License 2.0 | 6 votes |
/** * Randomly expire the ZK sessions of the two ZKFCs. This differs * from the above test in that it is not a controlled failover - * we just do random expirations and expect neither one to ever * generate fatal exceptions. */ @Test(timeout=(STRESS_RUNTIME_SECS + EXTRA_TIMEOUT_SECS) * 1000) public void testRandomExpirations() throws Exception { cluster.start(); long st = Time.now(); long runFor = STRESS_RUNTIME_SECS * 1000; Random r = new Random(); while (Time.now() - st < runFor) { cluster.getTestContext().checkException(); int targetIdx = r.nextInt(2); ActiveStandbyElector target = cluster.getElector(targetIdx); long sessId = target.getZKSessionIdForTests(); if (sessId != -1) { LOG.info(String.format("Expiring session %x for svc %d", sessId, targetIdx)); getServer(serverFactory).closeSession(sessId); } Thread.sleep(r.nextInt(300)); } }
Example #3
Source File: TestMultithreadedTestUtil.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testThreadFails() throws Exception { TestContext ctx = new TestContext(); ctx.addThread(new TestingThread(ctx) { @Override public void doWork() throws Exception { fail(FAIL_MSG); } }); ctx.startThreads(); long st = Time.now(); try { ctx.waitFor(30000); fail("waitFor did not throw"); } catch (RuntimeException rte) { // expected assertEquals(FAIL_MSG, rte.getCause().getMessage()); } long et = Time.now(); // Test shouldn't have waited the full 30 seconds, since // the thread throws faster than that assertTrue("Test took " + (et - st) + "ms", et - st < 5000); }
Example #4
Source File: PBImageTextWriter.java From hadoop with Apache License 2.0 | 6 votes |
private void output(Configuration conf, FileSummary summary, FileInputStream fin, ArrayList<FileSummary.Section> sections) throws IOException { InputStream is; long startTime = Time.monotonicNow(); out.println(getHeader()); for (FileSummary.Section section : sections) { if (SectionName.fromString(section.getName()) == SectionName.INODE) { fin.getChannel().position(section.getOffset()); is = FSImageUtil.wrapInputStreamForCompression(conf, summary.getCodec(), new BufferedInputStream(new LimitInputStream( fin, section.getLength()))); outputINodes(is); } } long timeTaken = Time.monotonicNow() - startTime; LOG.debug("Time to output inodes: {}ms", timeTaken); }
Example #5
Source File: HATestUtil.java From hadoop with Apache License 2.0 | 6 votes |
public static void waitForCheckpoint(MiniDFSCluster cluster, int nnIdx, List<Integer> txids) throws InterruptedException { long start = Time.now(); while (true) { try { FSImageTestUtil.assertNNHasCheckpoints(cluster, nnIdx, txids); return; } catch (AssertionError err) { if (Time.now() - start > 10000) { throw err; } else { Thread.sleep(300); } } } }
Example #6
Source File: BlockTokenSecretManager.java From hadoop with Apache License 2.0 | 6 votes |
/** * Update block keys, only to be used in master mode */ synchronized boolean updateKeys() throws IOException { if (!isMaster) return false; LOG.info("Updating block keys"); removeExpiredKeys(); // set final expiry date of retiring currentKey allKeys.put(currentKey.getKeyId(), new BlockKey(currentKey.getKeyId(), Time.now() + keyUpdateInterval + tokenLifetime, currentKey.getKey())); // update the estimated expiry date of new currentKey currentKey = new BlockKey(nextKey.getKeyId(), Time.now() + 2 * keyUpdateInterval + tokenLifetime, nextKey.getKey()); allKeys.put(currentKey.getKeyId(), currentKey); // generate a new nextKey setSerialNo(serialNo + 1); nextKey = new BlockKey(serialNo, Time.now() + 3 * keyUpdateInterval + tokenLifetime, generateSecret()); allKeys.put(nextKey.getKeyId(), nextKey); return true; }
Example #7
Source File: ShortCircuitCache.java From big-c with Apache License 2.0 | 6 votes |
/** * Trim the eviction lists. */ private void trimEvictionMaps() { long now = Time.monotonicNow(); demoteOldEvictableMmaped(now); while (true) { long evictableSize = evictable.size(); long evictableMmappedSize = evictableMmapped.size(); if (evictableSize + evictableMmappedSize <= maxTotalSize) { return; } ShortCircuitReplica replica; if (evictableSize == 0) { replica = evictableMmapped.firstEntry().getValue(); } else { replica = evictable.firstEntry().getValue(); } if (LOG.isTraceEnabled()) { LOG.trace(this + ": trimEvictionMaps is purging " + replica + StringUtils.getStackTrace(Thread.currentThread())); } purge(replica); } }
Example #8
Source File: GenericTestUtils.java From hadoop with Apache License 2.0 | 6 votes |
public static void waitFor(Supplier<Boolean> check, int checkEveryMillis, int waitForMillis) throws TimeoutException, InterruptedException { long st = Time.now(); do { boolean result = check.get(); if (result) { return; } Thread.sleep(checkEveryMillis); } while (Time.now() - st < waitForMillis); throw new TimeoutException("Timed out waiting for condition. " + "Thread diagnostics:\n" + TimedOutTestsListener.buildThreadDiagnosticString()); }
Example #9
Source File: TestLog4Json.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testException() throws Throwable { Exception e = new NoRouteToHostException("that box caught fire 3 years ago"); ThrowableInformation ti = new ThrowableInformation(e); Log4Json l4j = new Log4Json(); long timeStamp = Time.now(); String outcome = l4j.toJson(new StringWriter(), "testException", timeStamp, "INFO", "quoted\"", "new line\n and {}", ti) .toString(); println("testException", outcome); }
Example #10
Source File: TestINodeFile.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testFileUnderConstruction() { replication = 3; final INodeFile file = new INodeFile(INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication, 1024L, (byte)0); assertFalse(file.isUnderConstruction()); final String clientName = "client"; final String clientMachine = "machine"; file.toUnderConstruction(clientName, clientMachine); assertTrue(file.isUnderConstruction()); FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature(); assertEquals(clientName, uc.getClientName()); assertEquals(clientMachine, uc.getClientMachine()); file.toCompleteFile(Time.now()); assertFalse(file.isUnderConstruction()); }
Example #11
Source File: OzoneDelegationTokenSecretManager.java From hadoop-ozone with Apache License 2.0 | 6 votes |
/** * Checks if TokenInfo for the given identifier exists in database and if the * token is expired. */ private TokenInfo validateToken(OzoneTokenIdentifier identifier) throws InvalidToken { TokenInfo info = currentTokens.get(identifier); if (info == null) { throw new InvalidToken("token " + formatTokenId(identifier) + " can't be found in cache"); } long now = Time.now(); if (info.getRenewDate() < now) { throw new InvalidToken("token " + formatTokenId(identifier) + " is " + "expired, current time: " + Time.formatTime(now) + " expected renewal time: " + Time.formatTime(info.getRenewDate())); } if (!verifySignature(identifier, info.getPassword())) { throw new InvalidToken("Tampered/Invalid token."); } return info; }
Example #12
Source File: TestOzoneManagerDoubleBufferWithDummyResponse.java From hadoop-ozone with Apache License 2.0 | 6 votes |
/** * Create DummyBucketCreate response. */ private OMDummyCreateBucketResponse createDummyBucketResponse( String volumeName) { OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() .setVolumeName(volumeName) .setBucketName(UUID.randomUUID().toString()) .setCreationTime(Time.now()) .build(); return new OMDummyCreateBucketResponse(omBucketInfo, OMResponse.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.CreateBucket) .setStatus(OzoneManagerProtocolProtos.Status.OK) .setCreateBucketResponse(CreateBucketResponse.newBuilder().build()) .build()); }
Example #13
Source File: PeerCache.java From big-c with Apache License 2.0 | 6 votes |
private synchronized Peer getInternal(DatanodeID dnId, boolean isDomain) { List<Value> sockStreamList = multimap.get(new Key(dnId, isDomain)); if (sockStreamList == null) { return null; } Iterator<Value> iter = sockStreamList.iterator(); while (iter.hasNext()) { Value candidate = iter.next(); iter.remove(); long ageMs = Time.monotonicNow() - candidate.getTime(); Peer peer = candidate.getPeer(); if (ageMs >= expiryPeriod) { try { peer.close(); } catch (IOException e) { LOG.warn("got IOException closing stale peer " + peer + ", which is " + ageMs + " ms old"); } } else if (!peer.isClosed()) { return peer; } } return null; }
Example #14
Source File: TestJsonUtil.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testHdfsFileStatus() throws IOException { final long now = Time.now(); final String parent = "/dir"; final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26, now, now + 10, new FsPermission((short) 0644), "user", "group", DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"), INodeId.GRANDFATHER_INODE_ID, 0, null, (byte) 0); final FileStatus fstatus = toFileStatus(status, parent); System.out.println("status = " + status); System.out.println("fstatus = " + fstatus); final String json = JsonUtil.toJsonString(status, true); System.out.println("json = " + json.replace(",", ",\n ")); ObjectReader reader = new ObjectMapper().reader(Map.class); final HdfsFileStatus s2 = JsonUtil.toFileStatus((Map<?, ?>) reader.readValue(json), true); final FileStatus fs2 = toFileStatus(s2, parent); System.out.println("s2 = " + s2); System.out.println("fs2 = " + fs2); Assert.assertEquals(fstatus, fs2); }
Example #15
Source File: CacheManager.java From big-c with Apache License 2.0 | 5 votes |
public final void processCacheReport(final DatanodeID datanodeID, final List<Long> blockIds) throws IOException { namesystem.writeLock(); final long startTime = Time.monotonicNow(); final long endTime; try { final DatanodeDescriptor datanode = blockManager.getDatanodeManager().getDatanode(datanodeID); if (datanode == null || !datanode.isAlive) { throw new IOException( "processCacheReport from dead or unregistered datanode: " + datanode); } processCacheReportImpl(datanode, blockIds); } finally { endTime = Time.monotonicNow(); namesystem.writeUnlock(); } // Log the block report processing stats from Namenode perspective final NameNodeMetrics metrics = NameNode.getNameNodeMetrics(); if (metrics != null) { metrics.addCacheBlockReport((int) (endTime - startTime)); } LOG.debug("Processed cache report from {}, blocks: {}, " + "processing time: {} msecs", datanodeID, blockIds.size(), (endTime - startTime)); }
Example #16
Source File: TestAppendDifferentChecksum.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test which randomly alternates between appending with * CRC32 and with CRC32C, crossing several block boundaries. * Then, checks that all of the data can be read back correct. */ @Test(timeout=RANDOM_TEST_RUNTIME*2) public void testAlgoSwitchRandomized() throws IOException { FileSystem fsWithCrc32 = createFsWithChecksum("CRC32", 512); FileSystem fsWithCrc32C = createFsWithChecksum("CRC32C", 512); Path p = new Path("/testAlgoSwitchRandomized"); long seed = Time.now(); System.out.println("seed: " + seed); Random r = new Random(seed); // Create empty to start IOUtils.closeStream(fsWithCrc32.create(p)); long st = Time.now(); int len = 0; while (Time.now() - st < RANDOM_TEST_RUNTIME) { int thisLen = r.nextInt(500); FileSystem fs = (r.nextBoolean() ? fsWithCrc32 : fsWithCrc32C); FSDataOutputStream stm = fs.append(p); try { AppendTestUtil.write(stm, len, thisLen); } finally { stm.close(); } len += thisLen; } AppendTestUtil.check(fsWithCrc32, p, len); AppendTestUtil.check(fsWithCrc32C, p, len); }
Example #17
Source File: DFSInotifyEventInputStream.java From hadoop with Apache License 2.0 | 5 votes |
/** * Returns the next event batch in the stream, waiting up to the specified * amount of time for a new batch. Returns null if one is not available at the * end of the specified amount of time. The time before the method returns may * exceed the specified amount of time by up to the time required for an RPC * to the NameNode. * * @param time number of units of the given TimeUnit to wait * @param tu the desired TimeUnit * @throws IOException see {@link DFSInotifyEventInputStream#poll()} * @throws MissingEventsException * see {@link DFSInotifyEventInputStream#poll()} * @throws InterruptedException if the calling thread is interrupted */ public EventBatch poll(long time, TimeUnit tu) throws IOException, InterruptedException, MissingEventsException { TraceScope scope = Trace.startSpan("inotifyPollWithTimeout", traceSampler); EventBatch next = null; try { long initialTime = Time.monotonicNow(); long totalWait = TimeUnit.MILLISECONDS.convert(time, tu); long nextWait = INITIAL_WAIT_MS; while ((next = poll()) == null) { long timeLeft = totalWait - (Time.monotonicNow() - initialTime); if (timeLeft <= 0) { LOG.debug("timed poll(): timed out"); break; } else if (timeLeft < nextWait * 2) { nextWait = timeLeft; } else { nextWait *= 2; } LOG.debug("timed poll(): poll() returned null, sleeping for {} ms", nextWait); Thread.sleep(nextWait); } } finally { scope.close(); } return next; }
Example #18
Source File: FSImage.java From big-c with Apache License 2.0 | 5 votes |
/** * Save the contents of the FS image to the file. */ void saveFSImage(SaveNamespaceContext context, StorageDirectory sd, NameNodeFile dstType) throws IOException { long txid = context.getTxId(); File newFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE_NEW, txid); File dstFile = NNStorage.getStorageFile(sd, dstType, txid); FSImageFormatProtobuf.Saver saver = new FSImageFormatProtobuf.Saver(context); FSImageCompression compression = FSImageCompression.createCompression(conf); saver.save(newFile, compression); MD5FileUtils.saveMD5File(dstFile, saver.getSavedDigest()); storage.setMostRecentCheckpointInfo(txid, Time.now()); }
Example #19
Source File: S3MultipartUploadAbortRequest.java From hadoop-ozone with Apache License 2.0 | 5 votes |
@Override public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { KeyArgs keyArgs = getOmRequest().getAbortMultiPartUploadRequest().getKeyArgs(); return getOmRequest().toBuilder().setAbortMultiPartUploadRequest( getOmRequest().getAbortMultiPartUploadRequest().toBuilder() .setKeyArgs(keyArgs.toBuilder().setModificationTime(Time.now()))) .setUserInfo(getUserInfo()).build(); }
Example #20
Source File: LoadGenerator.java From big-c with Apache License 2.0 | 5 votes |
/** The list operation randomly picks a directory in the test space and * list the directory content. */ private void list() throws IOException { String dirName = dirs.get(r.nextInt(dirs.size())); long startTime = Time.now(); fc.listStatus(new Path(dirName)); executionTime[LIST] += (Time.now()-startTime); totalNumOfOps[LIST]++; }
Example #21
Source File: TestFailoverWithBlockTokensEnabled.java From big-c with Apache License 2.0 | 5 votes |
@Test public void ensureInvalidBlockTokensAreRejected() throws IOException, URISyntaxException { cluster.transitionToActive(0); FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); DFSTestUtil.writeFile(fs, TEST_PATH, TEST_DATA); assertEquals(TEST_DATA, DFSTestUtil.readFile(fs, TEST_PATH)); DFSClient dfsClient = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs); DFSClient spyDfsClient = Mockito.spy(dfsClient); Mockito.doAnswer( new Answer<LocatedBlocks>() { @Override public LocatedBlocks answer(InvocationOnMock arg0) throws Throwable { LocatedBlocks locatedBlocks = (LocatedBlocks)arg0.callRealMethod(); for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) { Token<BlockTokenIdentifier> token = lb.getBlockToken(); BlockTokenIdentifier id = lb.getBlockToken().decodeIdentifier(); // This will make the token invalid, since the password // won't match anymore id.setExpiryDate(Time.now() + 10); Token<BlockTokenIdentifier> newToken = new Token<BlockTokenIdentifier>(id.getBytes(), token.getPassword(), token.getKind(), token.getService()); lb.setBlockToken(newToken); } return locatedBlocks; } }).when(spyDfsClient).getLocatedBlocks(Mockito.anyString(), Mockito.anyLong(), Mockito.anyLong()); DFSClientAdapter.setDFSClient((DistributedFileSystem)fs, spyDfsClient); try { assertEquals(TEST_DATA, DFSTestUtil.readFile(fs, TEST_PATH)); fail("Shouldn't have been able to read a file with invalid block tokens"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains("Could not obtain block", ioe); } }
Example #22
Source File: OmKeyInfo.java From hadoop-ozone with Apache License 2.0 | 5 votes |
/** * Append a set of blocks to the latest version. Note that these blocks are * part of the latest version, not a new version. * * @param newLocationList the list of new blocks to be added. * @param updateTime if true, will update modification time. * @throws IOException */ public synchronized void appendNewBlocks( List<OmKeyLocationInfo> newLocationList, boolean updateTime) throws IOException { if (keyLocationVersions.size() == 0) { throw new IOException("Appending new block, but no version exist"); } OmKeyLocationInfoGroup currentLatestVersion = keyLocationVersions.get(keyLocationVersions.size() - 1); currentLatestVersion.appendNewBlocks(newLocationList); if (updateTime) { setModificationTime(Time.now()); } }
Example #23
Source File: FsDatasetCache.java From hadoop with Apache License 2.0 | 5 votes |
UncachingTask(ExtendedBlockId key, long revocationDelayMs) { this.key = key; if (revocationDelayMs == 0) { this.revocationTimeMs = 0; } else { this.revocationTimeMs = revocationDelayMs + Time.monotonicNow(); } }
Example #24
Source File: TestOMRequestUtils.java From hadoop-ozone with Apache License 2.0 | 5 votes |
/** * Create OMRequest for set volume property request with quota set. * @param volumeName * @param quota * @return OMRequest */ public static OMRequest createSetVolumePropertyRequest(String volumeName, long quota) { SetVolumePropertyRequest setVolumePropertyRequest = SetVolumePropertyRequest.newBuilder().setVolumeName(volumeName) .setQuotaInBytes(quota).setModificationTime(Time.now()).build(); return OMRequest.newBuilder().setClientId(UUID.randomUUID().toString()) .setCmdType(OzoneManagerProtocolProtos.Type.SetVolumeProperty) .setSetVolumePropertyRequest(setVolumePropertyRequest).build(); }
Example #25
Source File: OMKeyDeleteRequest.java From hadoop-ozone with Apache License 2.0 | 5 votes |
@Override public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { DeleteKeyRequest deleteKeyRequest = getOmRequest().getDeleteKeyRequest(); Preconditions.checkNotNull(deleteKeyRequest); OzoneManagerProtocolProtos.KeyArgs keyArgs = deleteKeyRequest.getKeyArgs(); OzoneManagerProtocolProtos.KeyArgs.Builder newKeyArgs = keyArgs.toBuilder().setModificationTime(Time.now()); return getOmRequest().toBuilder() .setDeleteKeyRequest(deleteKeyRequest.toBuilder() .setKeyArgs(newKeyArgs)).setUserInfo(getUserInfo()).build(); }
Example #26
Source File: TrashPolicyDefault.java From big-c with Apache License 2.0 | 5 votes |
@Override public void deleteCheckpoint() throws IOException { FileStatus[] dirs = null; try { dirs = fs.listStatus(trash); // scan trash sub-directories } catch (FileNotFoundException fnfe) { return; } long now = Time.now(); for (int i = 0; i < dirs.length; i++) { Path path = dirs[i].getPath(); String dir = path.toUri().getPath(); String name = path.getName(); if (name.equals(CURRENT.getName())) // skip current continue; long time; try { time = getTimeFromCheckpoint(name); } catch (ParseException e) { LOG.warn("Unexpected item in trash: "+dir+". Ignoring."); continue; } if ((now - deletionInterval) > time) { if (fs.delete(path, true)) { LOG.info("Deleted trash checkpoint: "+dir); } else { LOG.warn("Couldn't delete checkpoint: "+dir+" Ignoring."); } } } }
Example #27
Source File: TestShortCircuitCache.java From hadoop with Apache License 2.0 | 5 votes |
@Override public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() { try { ExtendedBlockId key = new ExtendedBlockId(blockId, "test_bp1"); return new ShortCircuitReplicaInfo( new ShortCircuitReplica(key, pair.getFileInputStreams()[0], pair.getFileInputStreams()[1], cache, Time.monotonicNow(), null)); } catch (IOException e) { throw new RuntimeException(e); } }
Example #28
Source File: ShellBasedIdMapping.java From hadoop with Apache License 2.0 | 5 votes |
synchronized private void loadFullGroupMap() throws IOException { BiMap<Integer, String> gMap = HashBiMap.create(); if (OS.startsWith("Mac")) { updateMapInternal(gMap, "group", MAC_GET_ALL_GROUPS_CMD, "\\s+", staticMapping.gidMapping); } else { updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":", staticMapping.gidMapping); } gidNameMap = gMap; lastUpdateTime = Time.monotonicNow(); }
Example #29
Source File: MockRemoteDirectoryManager.java From submarine with Apache License 2.0 | 5 votes |
private File initializeJobParentDir() throws IOException { File dir = new File(STAGING_AREA, String.valueOf(Time.monotonicNow())); if (!dir.mkdirs()) { throw new IOException( String.format(FAILED_TO_CREATE_DIRS_FORMAT_STRING, dir.getAbsolutePath())); } return dir; }
Example #30
Source File: TestDelegationTokenRenewer.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testStopRenewalWhenFsGone() throws IOException, InterruptedException { Configuration conf = mock(Configuration.class); Token<?> token = mock(Token.class); doReturn(new Text("myservice")).when(token).getService(); doAnswer(new Answer<Long>() { public Long answer(InvocationOnMock invocation) { return Time.now() + RENEW_CYCLE; } }).when(token).renew(any(Configuration.class)); RenewableFileSystem fs = mock(RenewableFileSystem.class); doReturn(conf).when(fs).getConf(); doReturn(token).when(fs).getRenewToken(); renewer.addRenewAction(fs); assertEquals(1, renewer.getRenewQueueLength()); Thread.sleep(RENEW_CYCLE); verify(token, atLeast(1)).renew(eq(conf)); verify(token, atMost(2)).renew(eq(conf)); // drop weak ref fs = null; System.gc(); System.gc(); System.gc(); // next renew should detect the fs as gone Thread.sleep(RENEW_CYCLE); verify(token, atLeast(1)).renew(eq(conf)); verify(token, atMost(2)).renew(eq(conf)); assertEquals(0, renewer.getRenewQueueLength()); }