Java Code Examples for java.util.NavigableMap#isEmpty()
The following examples show how to use
java.util.NavigableMap#isEmpty() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Frequency.java From hipparchus with Apache License 2.0 | 6 votes |
/** * Returns the cumulative frequency of values less than or equal to v. * * @param v the value to lookup. * @return the proportion of values equal to v */ public long getCumFreq(T v) { if (getSumFreq() == 0) { return 0; } NavigableMap<T, Long> headMap = freqTable.headMap(v, true); if (headMap.isEmpty()) { // v is less than first value return 0; } else if (headMap.size() == freqTable.size()) { // v is greater than or equal to last value return getSumFreq(); } return headMap.values() .stream() .mapToLong(Long::longValue) .sum(); }
Example 2
Source File: BaseVersionManager.java From Hive2Hive with MIT License | 6 votes |
protected NavigableMap<Number160, Set<Number160>> buildDigest(Map<PeerAddress, DigestResult> rawDigest) { NavigableMap<Number160, Set<Number160>> digestMap = new TreeMap<Number160, Set<Number160>>(); if (rawDigest == null) { return digestMap; } for (PeerAddress peerAddress : rawDigest.keySet()) { NavigableMap<Number640, Collection<Number160>> tmp = rawDigest.get(peerAddress).keyDigest(); if (tmp == null || tmp.isEmpty()) { // ignore this peer } else { for (Number640 key : tmp.keySet()) { for (Number160 bKey : tmp.get(key)) { if (!digestMap.containsKey(key.versionKey())) { digestMap.put(key.versionKey(), new HashSet<Number160>()); } digestMap.get(key.versionKey()).add(bKey); } } } } return digestMap; }
Example 3
Source File: AbstractServer.java From journalkeeper with Apache License 2.0 | 6 votes |
private void compactJournalToSnapshot(long index) { logger.info("Compact journal to index: {}...", index); try { Snapshot snapshot = snapshots.get(index); if (null != snapshot) { JournalSnapshot journalSnapshot = snapshot.getJournalSnapshot(); logger.info("Compact journal entries, journal snapshot: {}, journal: {}...", journalSnapshot, journal); journal.compact(snapshot.getJournalSnapshot()); logger.info("Compact journal finished, journal: {}.", journal); NavigableMap<Long, Snapshot> headMap = snapshots.headMap(index, false); while (!headMap.isEmpty()) { snapshot = headMap.remove(headMap.firstKey()); logger.info("Discard snapshot: {}.", snapshot.getPath()); snapshot.close(); snapshot.clear(); } } else { logger.warn("Compact journal failed! Cause no snapshot at index: {}.", index); } } catch (Throwable e) { logger.warn("Compact journal exception!", e); } }
Example 4
Source File: BeaconBlocksByRangeMessageHandler.java From teku with Apache License 2.0 | 6 votes |
private SafeFuture<?> sendMatchingBlocks( final BeaconBlocksByRangeRequestMessage message, final ResponseCallback<SignedBeaconBlock> callback) { final UnsignedLong count = min(maxRequestSize, message.getCount()); final UnsignedLong endSlot = message.getStartSlot().plus(message.getStep().times(count)).minus(ONE); final UnsignedLong headBlockSlot = combinedChainDataClient.getBestBlock().map(SignedBeaconBlock::getSlot).orElse(ZERO); final NavigableMap<UnsignedLong, Bytes32> hotRoots; if (combinedChainDataClient.isFinalized(endSlot)) { // All blocks are finalized so skip scanning the protoarray hotRoots = new TreeMap<>(); } else { hotRoots = combinedChainDataClient.getAncestorRoots( message.getStartSlot(), message.getStep(), count); } // Don't send anything past the last slot found in protoarray to ensure blocks are consistent // If we didn't find any blocks in protoarray, every block in the range must be finalized // so we don't need to worry about inconsistent blocks final UnsignedLong headSlot = hotRoots.isEmpty() ? headBlockSlot : hotRoots.lastKey(); return sendNextBlock( new RequestState( message.getStartSlot(), message.getStep(), count, headSlot, hotRoots, callback)); }
Example 5
Source File: KieRepositoryImpl.java From kogito-runtimes with Apache License 2.0 | 6 votes |
synchronized KieModule remove(ReleaseId releaseId) { KieModule removedKieModule = null; String ga = releaseId.getGroupId() + ":" + releaseId.getArtifactId(); ComparableVersion comparableVersion = new ComparableVersion(releaseId.getVersion()); NavigableMap<ComparableVersion, KieModule> artifactMap = kieModules.get(ga); if (artifactMap != null) { removedKieModule = artifactMap.remove(comparableVersion); if (artifactMap.isEmpty()) { kieModules.remove(ga); } oldKieModules.remove(releaseId); } return removedKieModule; }
Example 6
Source File: AlfrescoImapFolder.java From alfresco-repository with GNU Lesser General Public License v3.0 | 5 votes |
/** * Returns UIDNEXT value of the folder. * * @return UIDNEXT value. */ @Override public long getUidNext() { NavigableMap<Long, FileInfo> search = getFolderStatus().search; return search.isEmpty() ? 1 : search.lastKey() + 1; }
Example 7
Source File: CyclicIteration.java From hadoop with Apache License 2.0 | 5 votes |
/** Construct an {@link Iterable} object, * so that an {@link Iterator} can be created * for iterating the given {@link NavigableMap}. * The iteration begins from the starting key exclusively. */ public CyclicIteration(NavigableMap<K, V> navigablemap, K startingkey) { if (navigablemap == null || navigablemap.isEmpty()) { this.navigablemap = null; this.tailmap = null; } else { this.navigablemap = navigablemap; this.tailmap = navigablemap.tailMap(startingkey, false); } }
Example 8
Source File: AclTableMigrationTool.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
private Map<String, LegacyAceInfo> getAllAceInfo(Result result) throws IOException { Map<String, LegacyAceInfo> allAceInfoMap = new HashMap<>(); NavigableMap<byte[], byte[]> familyMap = result.getFamilyMap(Bytes.toBytes(AclConstant.ACL_ACES_FAMILY)); if (familyMap != null && !familyMap.isEmpty()) { for (Map.Entry<byte[], byte[]> entry : familyMap.entrySet()) { String sid = new String(entry.getKey(), StandardCharsets.UTF_8); LegacyAceInfo aceInfo = aceSerializer.deserialize(entry.getValue()); if (null != aceInfo) { allAceInfoMap.put(sid, aceInfo); } } } return allAceInfoMap; }
Example 9
Source File: CyclicIteration.java From big-c with Apache License 2.0 | 5 votes |
/** Construct an {@link Iterable} object, * so that an {@link Iterator} can be created * for iterating the given {@link NavigableMap}. * The iteration begins from the starting key exclusively. */ public CyclicIteration(NavigableMap<K, V> navigablemap, K startingkey) { if (navigablemap == null || navigablemap.isEmpty()) { this.navigablemap = null; this.tailmap = null; } else { this.navigablemap = navigablemap; this.tailmap = navigablemap.tailMap(startingkey, false); } }
Example 10
Source File: CyclicIteration.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** Construct an {@link Iterable} object, * so that an {@link Iterator} can be created * for iterating the given {@link NavigableMap}. * The iteration begins from the starting key exclusively. */ public CyclicIteration(NavigableMap<K, V> navigablemap, K startingkey) { if (navigablemap == null || navigablemap.isEmpty()) { this.navigablemap = null; this.tailmap = null; } else { this.navigablemap = navigablemap; this.tailmap = navigablemap.tailMap(startingkey, false); } }
Example 11
Source File: BlobStoreStats.java From ambry with Apache License 2.0 | 5 votes |
/** * Walk through the entire index and collect valid size information per log segment (size of delete records included). * @param referenceTimeInMs the reference time in ms until which deletes and expiration are relevant * @return a {@link NavigableMap} of log segment name to valid data size */ private NavigableMap<String, Long> collectValidDataSizeByLogSegment(long referenceTimeInMs) throws StoreException { logger.trace("On demand index scanning to collect compaction data stats for store {} wrt ref time {}", storeId, referenceTimeInMs); long startTimeMs = time.milliseconds(); Map<StoreKey, Long> deletedKeys = new HashMap<>(); NavigableMap<String, Long> validSizePerLogSegment = new TreeMap<>(LogSegmentNameHelper.COMPARATOR); int indexSegmentCount = 0; for (IndexSegment indexSegment : index.getIndexSegments().descendingMap().values()) { if (!enabled.get()) { throw new StoreException(String.format("BlobStoreStats is not enabled or closing for store %s", storeId), StoreErrorCodes.Store_Shutting_Down); } long indexSegmentStartProcessTimeMs = time.milliseconds(); String logSegmentName = indexSegment.getLogSegmentName(); forEachValidIndexEntry(indexSegment, getIndexEntries(indexSegment), referenceTimeInMs, deletedKeys, entry -> updateMapHelper(validSizePerLogSegment, logSegmentName, entry.getValue().getSize())); metrics.statsOnDemandScanTimePerIndexSegmentMs.update(time.milliseconds() - indexSegmentStartProcessTimeMs, TimeUnit.MILLISECONDS); indexSegmentCount++; if (indexSegmentCount == 1 || indexSegmentCount % 10 == 0) { logger.info("Compaction Stats: Index segment {} processing complete (on-demand scanning) for store {}", indexSegment.getFile().getName(), storeId); } } metrics.statsOnDemandScanTotalTimeMs.update(time.milliseconds() - startTimeMs, TimeUnit.MILLISECONDS); if (validSizePerLogSegment.isEmpty()) { validSizePerLogSegment.put(index.getStartOffset().getName(), 0L); } return validSizePerLogSegment; }
Example 12
Source File: AclTableMigrationTool.java From kylin with Apache License 2.0 | 5 votes |
private Map<String, LegacyAceInfo> getAllAceInfo(Result result) throws IOException { Map<String, LegacyAceInfo> allAceInfoMap = new HashMap<>(); NavigableMap<byte[], byte[]> familyMap = result.getFamilyMap(Bytes.toBytes(AclConstant.ACL_ACES_FAMILY)); if (familyMap != null && !familyMap.isEmpty()) { for (Map.Entry<byte[], byte[]> entry : familyMap.entrySet()) { String sid = new String(entry.getKey(), StandardCharsets.UTF_8); LegacyAceInfo aceInfo = aceSerializer.deserialize(entry.getValue()); if (null != aceInfo) { allAceInfoMap.put(sid, aceInfo); } } } return allAceInfoMap; }
Example 13
Source File: LogFileService.java From c5-replicator with Apache License 2.0 | 5 votes |
/** * Delete links to all logs for a given quorum except the current log, effectively * removing the past logs from the record but keeping the data. * * @throws IOException */ public void archiveAllButCurrent(String quorumId) throws IOException { NavigableMap<Long, Path> linkPathMap = getLinkPathMap(quorumId); if (linkPathMap.isEmpty()) { return; } long lastLinkId = linkPathMap.lastEntry().getKey(); for (Map.Entry<Long, Path> linkEntry : linkPathMap.entrySet()) { if (linkEntry.getKey() != lastLinkId) { Files.delete(linkEntry.getValue()); } } }
Example 14
Source File: ReentrantTransactionDispatcher.java From xodus with Apache License 2.0 | 5 votes |
private static boolean notifyNextWaiter(@NotNull final NavigableMap<Long, Condition> queue) { if (!queue.isEmpty()) { queue.firstEntry().getValue().signal(); return true; } return false; }
Example 15
Source File: CyclicIteration.java From RDFS with Apache License 2.0 | 5 votes |
/** Construct an {@link Iterable} object, * so that an {@link Iterator} can be created * for iterating the given {@link NavigableMap}. * The iteration begins from the starting key exclusively. */ public CyclicIteration(NavigableMap<K, V> navigablemap, K startingkey) { if (navigablemap == null || navigablemap.isEmpty()) { this.navigablemap = null; this.tailmap = null; } else { this.navigablemap = navigablemap; this.tailmap = navigablemap.tailMap(startingkey, false); } }
Example 16
Source File: KieRepositoryImpl.java From kogito-runtimes with Apache License 2.0 | 4 votes |
synchronized KieModule load(InternalKieScanner kieScanner, ReleaseId releaseId, VersionRange versionRange) { String ga = releaseId.getGroupId() + ":" + releaseId.getArtifactId(); NavigableMap<ComparableVersion, KieModule> artifactMap = kieModules.get(ga); if ( artifactMap == null || artifactMap.isEmpty() ) { return null; } KieModule kieModule = artifactMap.get(new ComparableVersion(releaseId.getVersion())); if (versionRange.fixed) { if ( kieModule != null && releaseId.isSnapshot() ) { String oldSnapshotVersion = ((ReleaseIdImpl)kieModule.getReleaseId()).getSnapshotVersion(); if ( oldSnapshotVersion != null ) { String currentSnapshotVersion = kieScanner.getArtifactVersion(releaseId); if (currentSnapshotVersion != null && new ComparableVersion(currentSnapshotVersion).compareTo(new ComparableVersion(oldSnapshotVersion)) > 0) { // if the snapshot currently available on the maven repo is newer than the cached one // return null to enforce the building of this newer version return null; } } } return kieModule; } Map.Entry<ComparableVersion, KieModule> entry = versionRange.upperBound == null ? artifactMap.lastEntry() : versionRange.upperInclusive ? artifactMap.floorEntry(new ComparableVersion(versionRange.upperBound)) : artifactMap.lowerEntry(new ComparableVersion(versionRange.upperBound)); if ( entry == null ) { return null; } if ( versionRange.lowerBound == null ) { return entry.getValue(); } int comparison = entry.getKey().compareTo(new ComparableVersion(versionRange.lowerBound)); return comparison > 0 || (comparison == 0 && versionRange.lowerInclusive) ? entry.getValue() : null; }
Example 17
Source File: RLESparseResourceAllocation.java From hadoop with Apache License 2.0 | 4 votes |
/** * Add a resource for the specified interval * * @param reservationInterval the interval for which the resource is to be * added * @param capacity the resource to be added * @return true if addition is successful, false otherwise */ public boolean addInterval(ReservationInterval reservationInterval, ReservationRequest capacity) { Resource totCap = Resources.multiply(capacity.getCapability(), (float) capacity.getNumContainers()); if (totCap.equals(ZERO_RESOURCE)) { return true; } writeLock.lock(); try { long startKey = reservationInterval.getStartTime(); long endKey = reservationInterval.getEndTime(); NavigableMap<Long, Resource> ticks = cumulativeCapacity.headMap(endKey, false); if (ticks != null && !ticks.isEmpty()) { Resource updatedCapacity = Resource.newInstance(0, 0, 0); Entry<Long, Resource> lowEntry = ticks.floorEntry(startKey); if (lowEntry == null) { // This is the earliest starting interval cumulativeCapacity.put(startKey, totCap); } else { updatedCapacity = Resources.add(lowEntry.getValue(), totCap); // Add a new tick only if the updated value is different // from the previous tick if ((startKey == lowEntry.getKey()) && (isSameAsPrevious(lowEntry.getKey(), updatedCapacity))) { cumulativeCapacity.remove(lowEntry.getKey()); } else { cumulativeCapacity.put(startKey, updatedCapacity); } } // Increase all the capacities of overlapping intervals Set<Entry<Long, Resource>> overlapSet = ticks.tailMap(startKey, false).entrySet(); for (Entry<Long, Resource> entry : overlapSet) { updatedCapacity = Resources.add(entry.getValue(), totCap); entry.setValue(updatedCapacity); } } else { // This is the first interval to be added cumulativeCapacity.put(startKey, totCap); } Resource nextTick = cumulativeCapacity.get(endKey); if (nextTick != null) { // If there is overlap, remove the duplicate entry if (isSameAsPrevious(endKey, nextTick)) { cumulativeCapacity.remove(endKey); } } else { // Decrease capacity as this is end of the interval cumulativeCapacity.put(endKey, Resources.subtract(cumulativeCapacity .floorEntry(endKey).getValue(), totCap)); } return true; } finally { writeLock.unlock(); } }
Example 18
Source File: RLESparseResourceAllocation.java From big-c with Apache License 2.0 | 4 votes |
/** * Add a resource for the specified interval * * @param reservationInterval the interval for which the resource is to be * added * @param capacity the resource to be added * @return true if addition is successful, false otherwise */ public boolean addInterval(ReservationInterval reservationInterval, ReservationRequest capacity) { Resource totCap = Resources.multiply(capacity.getCapability(), (float) capacity.getNumContainers()); if (totCap.equals(ZERO_RESOURCE)) { return true; } writeLock.lock(); try { long startKey = reservationInterval.getStartTime(); long endKey = reservationInterval.getEndTime(); NavigableMap<Long, Resource> ticks = cumulativeCapacity.headMap(endKey, false); if (ticks != null && !ticks.isEmpty()) { Resource updatedCapacity = Resource.newInstance(0, 0); Entry<Long, Resource> lowEntry = ticks.floorEntry(startKey); if (lowEntry == null) { // This is the earliest starting interval cumulativeCapacity.put(startKey, totCap); } else { updatedCapacity = Resources.add(lowEntry.getValue(), totCap); // Add a new tick only if the updated value is different // from the previous tick if ((startKey == lowEntry.getKey()) && (isSameAsPrevious(lowEntry.getKey(), updatedCapacity))) { cumulativeCapacity.remove(lowEntry.getKey()); } else { cumulativeCapacity.put(startKey, updatedCapacity); } } // Increase all the capacities of overlapping intervals Set<Entry<Long, Resource>> overlapSet = ticks.tailMap(startKey, false).entrySet(); for (Entry<Long, Resource> entry : overlapSet) { updatedCapacity = Resources.add(entry.getValue(), totCap); entry.setValue(updatedCapacity); } } else { // This is the first interval to be added cumulativeCapacity.put(startKey, totCap); } Resource nextTick = cumulativeCapacity.get(endKey); if (nextTick != null) { // If there is overlap, remove the duplicate entry if (isSameAsPrevious(endKey, nextTick)) { cumulativeCapacity.remove(endKey); } } else { // Decrease capacity as this is end of the interval cumulativeCapacity.put(endKey, Resources.subtract(cumulativeCapacity .floorEntry(endKey).getValue(), totCap)); } return true; } finally { writeLock.unlock(); } }
Example 19
Source File: AbstractServer.java From journalkeeper with Apache License 2.0 | 4 votes |
void installSnapshot(long offset, long lastIncludedIndex, int lastIncludedTerm, byte[] data, boolean isDone) throws IOException, TimeoutException { synchronized (partialSnapshot) { logger.info("Install snapshot, offset: {}, lastIncludedIndex: {}, lastIncludedTerm: {}, data length: {}, isDone: {}... " + "journal minIndex: {}, maxIndex: {}, commitIndex: {}...", ThreadSafeFormat.formatWithComma(offset), ThreadSafeFormat.formatWithComma(lastIncludedIndex), lastIncludedTerm, data.length, isDone, ThreadSafeFormat.formatWithComma(journal.minIndex()), ThreadSafeFormat.formatWithComma(journal.maxIndex()), ThreadSafeFormat.formatWithComma(journal.commitIndex()) ); Snapshot snapshot; long lastApplied = lastIncludedIndex + 1; Path snapshotPath = snapshotsPath().resolve(String.valueOf(lastApplied)); partialSnapshot.installTrunk(offset, data, snapshotPath); if (isDone) { logger.info("All snapshot files received, discard any existing snapshot with a same or smaller index..."); // discard any existing snapshot with a same or smaller index NavigableMap<Long, Snapshot> headMap = snapshots.headMap(lastApplied, true); while (!headMap.isEmpty()) { snapshot = headMap.remove(headMap.firstKey()); logger.info("Discard snapshot: {}.", snapshot.getPath()); snapshot.close(); snapshot.clear(); } logger.info("add the installed snapshot to snapshots: {}...", snapshotPath); partialSnapshot.finish(); // add the installed snapshot to snapshots. snapshot = new Snapshot(stateFactory, metadataPersistence); snapshot.recover(snapshotPath, properties); snapshots.put(lastApplied, snapshot); logger.info("New installed snapshot: {}.", snapshot.getJournalSnapshot()); // If existing log entry has same index and term as snapshot’s // last included entry, retain log entries following it. // Discard the entire log logger.info("Compact journal entries, journal: {}...", journal); threads.stopThread(threadName(ThreadNames.FLUSH_JOURNAL_THREAD)); try { if (journal.minIndex() >= lastIncludedIndex && lastIncludedIndex < journal.maxIndex() && journal.getTerm(lastIncludedIndex) == lastIncludedTerm) { journal.compact(snapshot.getJournalSnapshot()); } else { journal.clear(snapshot.getJournalSnapshot()); } } finally { threads.startThread(threadName(ThreadNames.FLUSH_JOURNAL_THREAD)); } logger.info("Compact journal finished, journal: {}.", journal); // Reset state machine using snapshot contents (and load // snapshot’s cluster configuration) logger.info("Use the new installed snapshot as server's state..."); stopAndWaitScheduledFeature(flushStateFuture, 1000L); threads.stopThread(threadName(ThreadNames.STATE_MACHINE_THREAD)); try { state.close(); state.clear(); snapshot.dump(statePath()); state.recover(statePath(), properties); } finally { threads.startThread(threadName(ThreadNames.STATE_MACHINE_THREAD)); flushStateFuture = scheduledExecutor.scheduleAtFixedRate(this::flushState, ThreadLocalRandom.current().nextLong(10L, 50L), config.getFlushIntervalMs(), TimeUnit.MILLISECONDS); } logger.info("Install snapshot successfully!"); } } }
Example 20
Source File: BitflyerAdviser.java From cryptotrader with GNU Affero General Public License v3.0 | 3 votes |
@VisibleForTesting NavigableMap<BigDecimal, BigDecimal> getSfdTable() { String value = StringUtils.trimToEmpty(getStringProperty(KEY_SFD_TBL, null)); NavigableMap<BigDecimal, BigDecimal> table = sfdCache.getIfPresent(value); if (table == null) { try { NavigableMap<BigDecimal, BigDecimal> m = new TreeMap<>(); for (String entry : StringUtils.split(value, '|')) { String[] kv = StringUtils.split(entry, ':'); BigDecimal k = new BigDecimal(kv[0]); BigDecimal v = new BigDecimal(kv[1]); m.put(k, v); } table = m.isEmpty() ? SFD : Collections.unmodifiableNavigableMap(m); } catch (Exception e) { table = SFD; } sfdCache.put(value, table); } return table; }