Java Code Examples for com.google.common.collect.MinMaxPriorityQueue

The following are top voted examples for showing how to use com.google.common.collect.MinMaxPriorityQueue. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: ditb   File: CachedEntryQueue.java   Source Code and License 6 votes vote down vote up
/**
 * @param maxSize the target size of elements in the queue
 * @param blockSize expected average size of blocks
 */
public CachedEntryQueue(long maxSize, long blockSize) {
  int initialSize = (int) (maxSize / blockSize);
  if (initialSize == 0) {
    initialSize++;
  }
  queue = MinMaxPriorityQueue.orderedBy(new Comparator<Map.Entry<BlockCacheKey, BucketEntry>>() {

    public int compare(Entry<BlockCacheKey, BucketEntry> entry1,
        Entry<BlockCacheKey, BucketEntry> entry2) {
      return BucketEntry.COMPARATOR.compare(entry1.getValue(), entry2.getValue());
    }

  }).expectedSize(initialSize).create();
  cacheSize = 0;
  this.maxSize = maxSize;
}
 
Example 2
Project: vespa   File: CachedPostingListCounter.java   Source Code and License 6 votes vote down vote up
public CachedPostingListCounter rebuildCache() {
    MinMaxPriorityQueue<Entry> mostExpensive = MinMaxPriorityQueue
            .maximumSize(32).expectedSize(32).create();
    synchronized (this) {
        for (ObjectLongPair<int[]> p : frequency.keyValuesView()) {
            mostExpensive.add(new Entry(p.getOne(), p.getTwo()));
        }
    }
    ObjectIntHashMap<int[]> postingListMapping = new ObjectIntHashMap<>();
    int[] bitVector = new int[nDocuments];
    int length = mostExpensive.size();
    for (int i = 0; i < length; i++) {
        Entry e = mostExpensive.removeFirst();
        int[] docIds = e.docIds;
        postingListMapping.put(docIds, i);
        for (int docId : docIds) {
            bitVector[docId] |= (1 << i);
        }
    }
    return new CachedPostingListCounter(postingListMapping, bitVector);
}
 
Example 3
Project: maker   File: KNNQuery.java   Source Code and License 6 votes vote down vote up
public Queue<QueryMatch> queryKNN(double lat, double lon, int n)
  throws IOException {
  DistanceComparator comp = new DistanceComparator(lon, lat);
  Queue<QueryMatch> ret
    = MinMaxPriorityQueue.orderedBy(comp)
    .maximumSize(n)
    .create();

  GeoHash target = GeoHash.withCharacterPrecision(lat, lon, precision);
  ret.addAll(takeN(comp, target.toBase32(), n));
  for (GeoHash h : target.getAdjacent()) {
    ret.addAll(takeN(comp, h.toBase32(), n));
  }

  return ret;
}
 
Example 4
Project: pbase   File: CachedEntryQueue.java   Source Code and License 6 votes vote down vote up
/**
 * @param maxSize the target size of elements in the queue
 * @param blockSize expected average size of blocks
 */
public CachedEntryQueue(long maxSize, long blockSize) {
  int initialSize = (int) (maxSize / blockSize);
  if (initialSize == 0)
    initialSize++;
  queue = MinMaxPriorityQueue
      .orderedBy(new Comparator<Map.Entry<BlockCacheKey, BucketEntry>>() {
        public int compare(Entry<BlockCacheKey, BucketEntry> entry1,
            Entry<BlockCacheKey, BucketEntry> entry2) {
          return entry1.getValue().compareTo(entry2.getValue());
        }

      }).expectedSize(initialSize).create();
  cacheSize = 0;
  this.maxSize = maxSize;
}
 
Example 5
Project: HIndex   File: CachedEntryQueue.java   Source Code and License 6 votes vote down vote up
/**
 * @param maxSize the target size of elements in the queue
 * @param blockSize expected average size of blocks
 */
public CachedEntryQueue(long maxSize, long blockSize) {
  int initialSize = (int) (maxSize / blockSize);
  if (initialSize == 0)
    initialSize++;
  queue = MinMaxPriorityQueue
      .orderedBy(new Comparator<Map.Entry<BlockCacheKey, BucketEntry>>() {
        public int compare(Entry<BlockCacheKey, BucketEntry> entry1,
            Entry<BlockCacheKey, BucketEntry> entry2) {
          return entry1.getValue().compareTo(entry2.getValue());
        }

      }).expectedSize(initialSize).create();
  cacheSize = 0;
  this.maxSize = maxSize;
}
 
Example 6
Project: ache   File: MaximizeWebsitesLinkSelector.java   Source Code and License 6 votes vote down vote up
@Override
public List<LinkRelevance> getSelectedLinks() {
    List<LinkRelevance> links = new ArrayList<>();
    while (links.size() < numberOfLinks && !topkLinksPerDomain.isEmpty()) {
        // adds the URL with max score of each domain
        MinMaxPriorityQueue<LinkRelevance> topk = newPriorityQueue(numberOfLinks);
        Iterator<Entry<String, MinMaxPriorityQueue<LinkRelevance>>> it = topkLinksPerDomain.entrySet().iterator();
        while (it.hasNext()) {
            MinMaxPriorityQueue<LinkRelevance> domain = it.next().getValue();
            topk.add(domain.poll());
            if (domain.isEmpty()) {
                it.remove();
            }
        }
        for(LinkRelevance link : topk) {
            links.add(link);
        }
    }
    this.topkLinksPerDomain = null; // clean-up reference
    return links;
}
 
Example 7
Project: bigbase   File: BucketCacheOverhead.java   Source Code and License 6 votes vote down vote up
/**
 * @param maxSize the target size of elements in the queue
 * @param blockSize expected average size of blocks
 */
public CachedEntryQueue(long maxSize, long blockSize) {
  int initialSize = (int) (maxSize / blockSize);
  if (initialSize == 0)
    initialSize++;
  queue = MinMaxPriorityQueue
      .orderedBy(new Comparator<Map.Entry<BlockCacheKey, BucketEntry>>() {
        public int compare(Entry<BlockCacheKey, BucketEntry> entry1,
            Entry<BlockCacheKey, BucketEntry> entry2) {
          return entry1.getValue().compareTo(entry2.getValue());
        }

      }).expectedSize(initialSize).create();
  cacheSize = 0;
  this.maxSize = maxSize;
}
 
Example 8
Project: PyroDB   File: CachedEntryQueue.java   Source Code and License 6 votes vote down vote up
/**
 * @param maxSize the target size of elements in the queue
 * @param blockSize expected average size of blocks
 */
public CachedEntryQueue(long maxSize, long blockSize) {
  int initialSize = (int) (maxSize / blockSize);
  if (initialSize == 0)
    initialSize++;
  queue = MinMaxPriorityQueue
      .orderedBy(new Comparator<Map.Entry<BlockCacheKey, BucketEntry>>() {
        public int compare(Entry<BlockCacheKey, BucketEntry> entry1,
            Entry<BlockCacheKey, BucketEntry> entry2) {
          return entry1.getValue().compareTo(entry2.getValue());
        }

      }).expectedSize(initialSize).create();
  cacheSize = 0;
  this.maxSize = maxSize;
}
 
Example 9
Project: miru   File: AmzaClusterRegistry.java   Source Code and License 6 votes vote down vote up
@Override
public List<MiruPartition> getPartitionsForTenant(MiruTenantId tenantId) throws Exception {
    NavigableMap<MiruPartitionId, MinMaxPriorityQueue<HostAndTimestamp>> partitionIdToLatest = tenantLatestTopologies(tenantId);

    List<MiruPartition> partitions = new ArrayList<>();
    for (MiruPartitionId partitionId : partitionIdToLatest.keySet()) {
        MinMaxPriorityQueue<HostAndTimestamp> got = partitionIdToLatest.get(partitionId);
        for (HostAndTimestamp hat : got) {
            EmbeddedClient topologyInfoClient = topologyInfoClient(hat.host);
            byte[] rawInfo = topologyInfoClient.getValue(Consistency.none, null, toTopologyKey(tenantId, partitionId));
            MiruPartitionCoordInfo info;
            if (rawInfo == null) {
                info = new MiruPartitionCoordInfo(MiruPartitionState.offline, MiruBackingStorage.memory);
            } else {
                MiruTopologyColumnValue columnValue = topologyColumnValueMarshaller.fromBytes(rawInfo);
                info = new MiruPartitionCoordInfo(columnValue.state, columnValue.storage);
            }
            partitions.add(new MiruPartition(new MiruPartitionCoord(tenantId, partitionId, hat.host), info));
        }
    }
    return partitions;
}
 
Example 10
Project: miru   File: AmzaClusterRegistry.java   Source Code and License 6 votes vote down vote up
private NavigableMap<MiruPartitionId, MinMaxPriorityQueue<HostAndTimestamp>> tenantPartitionsLatestTopologies(MiruTenantId tenantId,
    Collection<MiruPartitionId> partitionIds) throws Exception {

    final NavigableMap<MiruPartitionId, MinMaxPriorityQueue<HostAndTimestamp>> partitionIdToLatest = new TreeMap<>();

    for (HostHeartbeat hostHeartbeat : getAllHosts()) {
        EmbeddedClient registryClient = registryClient(hostHeartbeat.host);
        for (MiruPartitionId partitionId : partitionIds) {
            byte[] got = registryClient.getValue(Consistency.quorum, null, toTopologyKey(tenantId, partitionId));
            if (got != null) {
                MinMaxPriorityQueue<HostAndTimestamp> latest = partitionIdToLatest.get(partitionId);
                if (latest == null) {
                    // TODO defaultNumberOfReplicas should come from config?
                    latest = MinMaxPriorityQueue.maximumSize(defaultNumberOfReplicas)
                        .expectedSize(defaultNumberOfReplicas)
                        .<HostAndTimestamp>create();
                    partitionIdToLatest.put(partitionId, latest);
                }
                latest.add(new HostAndTimestamp(hostHeartbeat.host, FilerIO.bytesLong(got)));
            }
        }
    }

    return partitionIdToLatest;
}
 
Example 11
Project: miru   File: AmzaClusterRegistry.java   Source Code and License 6 votes vote down vote up
@Override
public List<MiruPartition> getPartitionsForTenantHost(MiruTenantId tenantId, MiruHost host) throws Exception {
    NavigableMap<MiruPartitionId, MinMaxPriorityQueue<HostAndTimestamp>> partitionIdToLatest = tenantLatestTopologies(tenantId);
    List<MiruPartition> partitions = new ArrayList<>();
    for (MiruPartitionId partitionId : partitionIdToLatest.keySet()) {
        MinMaxPriorityQueue<HostAndTimestamp> got = partitionIdToLatest.get(partitionId);
        for (HostAndTimestamp hat : got) {
            if (hat.host.equals(host)) {
                EmbeddedClient topologyInfoClient = topologyInfoClient(hat.host);
                byte[] rawInfo = topologyInfoClient.getValue(Consistency.none, null, toTopologyKey(tenantId, partitionId));
                MiruPartitionCoordInfo info;
                if (rawInfo == null) {
                    info = new MiruPartitionCoordInfo(MiruPartitionState.offline, MiruBackingStorage.memory);
                } else {
                    MiruTopologyColumnValue columnValue = topologyColumnValueMarshaller.fromBytes(rawInfo);
                    info = new MiruPartitionCoordInfo(columnValue.state, columnValue.storage);
                }
                partitions.add(new MiruPartition(new MiruPartitionCoord(tenantId, partitionId, hat.host), info));
            }
        }
    }
    return partitions;
}
 
Example 12
Project: miru   File: AmzaClusterRegistry.java   Source Code and License 6 votes vote down vote up
@Override
public MiruReplicaSet getReplicaSet(MiruTenantId tenantId, MiruPartitionId partitionId) throws Exception {
    MinMaxPriorityQueue<HostAndTimestamp> latest = tenantLatestTopology(tenantId, partitionId);
    List<MiruPartition> partitions = Lists.newArrayList();
    Set<MiruHost> replicaHosts = Sets.newHashSet();

    for (HostAndTimestamp hat : latest) {
        EmbeddedClient topologyInfoClient = topologyInfoClient(hat.host);
        byte[] rawInfo = topologyInfoClient.getValue(Consistency.none, null, toTopologyKey(tenantId, partitionId));
        MiruPartitionCoordInfo info;
        if (rawInfo == null) {
            info = new MiruPartitionCoordInfo(MiruPartitionState.offline, MiruBackingStorage.memory);
        } else {
            MiruTopologyColumnValue columnValue = topologyColumnValueMarshaller.fromBytes(rawInfo);
            info = new MiruPartitionCoordInfo(columnValue.state, columnValue.storage);
        }
        partitions.add(new MiruPartition(new MiruPartitionCoord(tenantId, partitionId, hat.host), info));
        replicaHosts.add(hat.host);
    }

    int missing = defaultNumberOfReplicas - replicaHosts.size(); // TODO expose to config?
    return new MiruReplicaSet(extractPartitionsByState(partitions), replicaHosts, missing, defaultNumberOfReplicas);
}
 
Example 13
Project: miru   File: CollaborativeFiltering.java   Source Code and License 6 votes vote down vote up
private <BM extends IBM, IBM> RecoAnswer composeAnswer(MiruRequestContext<BM, IBM, ?> requestContext,
    MiruRequest<RecoQuery> request,
    MiruFieldDefinition fieldDefinition,
    MinMaxPriorityQueue<MiruTermCount> heap,
    StackBuffer stackBuffer) throws Exception {

    MiruSchema schema = requestContext.getSchema();
    MiruTermComposer termComposer = requestContext.getTermComposer();
    List<Recommendation> results = new ArrayList<>();
    for (MiruTermCount result : heap) {
        MiruValue term = new MiruValue(termComposer.decompose(schema, fieldDefinition, stackBuffer, result.termId));
        results.add(new Recommendation(term, result.count));
    }
    log.debug("score: results.size={}", results.size());
    boolean resultsExhausted = request.query.timeRange.smallestTimestamp > requestContext.getTimeIndex().getLargestTimestamp();
    return new RecoAnswer(results, 1, resultsExhausted);
}
 
Example 14
Project: c5   File: CachedEntryQueue.java   Source Code and License 6 votes vote down vote up
/**
 * @param maxSize the target size of elements in the queue
 * @param blockSize expected average size of blocks
 */
public CachedEntryQueue(long maxSize, long blockSize) {
  int initialSize = (int) (maxSize / blockSize);
  if (initialSize == 0)
    initialSize++;
  queue = MinMaxPriorityQueue
      .orderedBy(new Comparator<Map.Entry<BlockCacheKey, BucketEntry>>() {
        public int compare(Entry<BlockCacheKey, BucketEntry> entry1,
            Entry<BlockCacheKey, BucketEntry> entry2) {
          return entry1.getValue().compareTo(entry2.getValue());
        }

      }).expectedSize(initialSize).create();
  cacheSize = 0;
  this.maxSize = maxSize;
}
 
Example 15
Project: pinot   File: PinotLLCRealtimeSegmentManager.java   Source Code and License 6 votes vote down vote up
public void completeCommittingSegments(String realtimeTableName, List<String> segmentIds) {
  Comparator<LLCSegmentName> comparator = new Comparator<LLCSegmentName>() {
    @Override
    public int compare(LLCSegmentName o1, LLCSegmentName o2) {
      return o2.compareTo(o1);
    }
  };

  Map<Integer, MinMaxPriorityQueue<LLCSegmentName>> partitionToLatestSegments = new HashMap<>();

  for (String segmentId : segmentIds) {
    LLCSegmentName segmentName = new LLCSegmentName(segmentId);
    final int partitionId = segmentName.getPartitionId();
    MinMaxPriorityQueue latestSegments = partitionToLatestSegments.get(partitionId);
    if (latestSegments == null) {
      latestSegments = MinMaxPriorityQueue.orderedBy(comparator).maximumSize(2).create();
      partitionToLatestSegments.put(partitionId, latestSegments);
    }
    latestSegments.offer(segmentName);
  }

  completeCommittingSegmentsInternal(realtimeTableName, partitionToLatestSegments);
}
 
Example 16
Project: pinot   File: DimensionValueMetricPairTest.java   Source Code and License 6 votes vote down vote up
@Test
public void comparatorTest() throws Exception {

  MinMaxPriorityQueue<DimensionValueMetricPair> testQueue = MinMaxPriorityQueue.maximumSize(2).create();

  DimensionValueMetricPair d1 = new DimensionValueMetricPair("d1", 1);
  DimensionValueMetricPair d2 = new DimensionValueMetricPair("d2", 2);
  DimensionValueMetricPair d3 = new DimensionValueMetricPair(30, 3);
  DimensionValueMetricPair d4 = new DimensionValueMetricPair("d4", 4);

  testQueue.add(d1);
  testQueue.add(d2);
  testQueue.add(d3);
  testQueue.add(d4);

  for (DimensionValueMetricPair pair : testQueue) {
    Assert.assertEquals(pair.getMetricValue().intValue() > 2, true,
        "Incorrect comparator for DimensionValueMetricPair, queue must retain highest metric values");
  }

}
 
Example 17
Project: DominoHBase   File: CachedEntryQueue.java   Source Code and License 6 votes vote down vote up
/**
 * @param maxSize the target size of elements in the queue
 * @param blockSize expected average size of blocks
 */
public CachedEntryQueue(long maxSize, long blockSize) {
  int initialSize = (int) (maxSize / blockSize);
  if (initialSize == 0)
    initialSize++;
  queue = MinMaxPriorityQueue
      .orderedBy(new Comparator<Map.Entry<BlockCacheKey, BucketEntry>>() {
        public int compare(Entry<BlockCacheKey, BucketEntry> entry1,
            Entry<BlockCacheKey, BucketEntry> entry2) {
          return entry1.getValue().compareTo(entry2.getValue());
        }

      }).expectedSize(initialSize).create();
  cacheSize = 0;
  this.maxSize = maxSize;
}
 
Example 18
Project: ditb   File: SimpleLoadBalancer.java   Source Code and License 5 votes vote down vote up
/**
 * Add a region from the head or tail to the List of regions to return.
 */
private void addRegionPlan(final MinMaxPriorityQueue<RegionPlan> regionsToMove,
    final boolean fetchFromTail, final ServerName sn, List<RegionPlan> regionsToReturn) {
  RegionPlan rp = null;
  if (!fetchFromTail) rp = regionsToMove.remove();
  else rp = regionsToMove.removeLast();
  rp.setDestination(sn);
  regionsToReturn.add(rp);
}
 
Example 19
Project: ditb   File: LruCachedBlockQueue.java   Source Code and License 5 votes vote down vote up
/**
 * @param maxSize the target size of elements in the queue
 * @param blockSize expected average size of blocks
 */
public LruCachedBlockQueue(long maxSize, long blockSize) {
  int initialSize = (int)(maxSize / blockSize);
  if(initialSize == 0) initialSize++;
  queue = MinMaxPriorityQueue.expectedSize(initialSize).create();
  heapSize = 0;
  this.maxSize = maxSize;
}
 
Example 20
Project: neuralccg   File: ReservoirSampler.java   Source Code and License 5 votes vote down vote up
public ReservoirSampler(final int k, final Random random, final ToDoubleFunction<Integer> computeWeight) {
    this.minQueue = MinMaxPriorityQueue
            .<Pair<Double, T>>orderedBy((x, y) -> Double.compare(x.first(), y.first()))
            .maximumSize(k).create();
    this.computeWeight = computeWeight;
    this.random = random;
    this.count = new AtomicInteger(0);
}
 
Example 21
Project: LCIndex-HBase-0.94.16   File: DefaultLoadBalancer.java   Source Code and License 5 votes vote down vote up
/**
 * Add a region from the head or tail to the List of regions to return.
 */
void addRegionPlan(final MinMaxPriorityQueue<RegionPlan> regionsToMove,
    final boolean fetchFromTail, final ServerName sn, List<RegionPlan> regionsToReturn) {
  RegionPlan rp = null;
  if (!fetchFromTail) rp = regionsToMove.remove();
  else rp = regionsToMove.removeLast();
  rp.setDestination(sn);
  regionsToReturn.add(rp);
}
 
Example 22
Project: LCIndex-HBase-0.94.16   File: CachedBlockQueue.java   Source Code and License 5 votes vote down vote up
/**
 * @param maxSize the target size of elements in the queue
 * @param blockSize expected average size of blocks
 */
public CachedBlockQueue(long maxSize, long blockSize) {
  int initialSize = (int)(maxSize / blockSize);
  if(initialSize == 0) initialSize++;
  queue = MinMaxPriorityQueue.expectedSize(initialSize).create();
  heapSize = 0;
  this.maxSize = maxSize;
}
 
Example 23
Project: binnavi   File: MonoReilSolver.java   Source Code and License 5 votes vote down vote up
public MonoReilSolver(final IInstructionGraph instructionGraph,
    final AnalysisDirection analysisDirection, final ILattice<LatticeElementType> lattice) {
  m_graph = Preconditions.checkNotNull(instructionGraph,
      "Error: instruction graph argument can not be null");
  m_direction = Preconditions.checkNotNull(analysisDirection,
      "Error: analysis direction argument can not be null");
  m_lattice = Preconditions.checkNotNull(lattice, "Error: latice argument can not be null");

  m_workList = MinMaxPriorityQueue.expectedSize(m_graph.size()).create();
}
 
Example 24
Project: Gobblin   File: KafkaWorkUnitPacker.java   Source Code and License 5 votes vote down vote up
/**
 * Pack a list of {@link WorkUnit}s into a smaller number of {@link MultiWorkUnit}s,
 * using the worst-fit-decreasing algorithm.
 *
 * Each {@link WorkUnit} is assigned to the {@link MultiWorkUnit} with the smallest load.
 */
protected List<WorkUnit> worstFitDecreasingBinPacking(List<WorkUnit> groups, int numOfMultiWorkUnits) {

  // Sort workunit groups by data size desc
  Collections.sort(groups, LOAD_DESC_COMPARATOR);

  MinMaxPriorityQueue<MultiWorkUnit> pQueue =
      MinMaxPriorityQueue.orderedBy(LOAD_ASC_COMPARATOR).expectedSize(numOfMultiWorkUnits).create();
  for (int i = 0; i < numOfMultiWorkUnits; i++) {
    MultiWorkUnit multiWorkUnit = new MultiWorkUnit();
    setWorkUnitEstSize(multiWorkUnit, 0);
    pQueue.add(multiWorkUnit);
  }

  for (WorkUnit group : groups) {
    MultiWorkUnit lightestMultiWorkUnit = pQueue.poll();
    addWorkUnitToMultiWorkUnit(group, lightestMultiWorkUnit);
    pQueue.add(lightestMultiWorkUnit);
  }

  logMultiWorkUnitInfo(pQueue);

  double minLoad = getWorkUnitEstLoad(pQueue.peekFirst());
  double maxLoad = getWorkUnitEstLoad(pQueue.peekLast());
  LOG.info(String.format("Min load of multiWorkUnit = %f; Max load of multiWorkUnit = %f; Diff = %f%%", minLoad,
      maxLoad, (maxLoad - minLoad) / maxLoad * 100.0));

  this.state.setProp(MIN_MULTIWORKUNIT_LOAD, minLoad);
  this.state.setProp(MAX_MULTIWORKUNIT_LOAD, maxLoad);

  List<WorkUnit> multiWorkUnits = Lists.newArrayList();
  multiWorkUnits.addAll(pQueue);
  return multiWorkUnits;
}
 
Example 25
Project: maker   File: KNNQuery.java   Source Code and License 5 votes vote down vote up
Queue<QueryMatch> takeN(Comparator<QueryMatch> comp,
                             String prefix,
                             int n) throws IOException {
  Queue<QueryMatch> candidates
    = MinMaxPriorityQueue.orderedBy(comp)
    .maximumSize(n)
    .create();

  Scan scan = new Scan(prefix.getBytes());
  scan.setFilter(new PrefixFilter(prefix.getBytes()));
  scan.addFamily(FAMILY);
  scan.setMaxVersions(1);
  scan.setCaching(50);

  HTableInterface table = pool.getTable(TABLE);

  int cnt = 0;
  ResultScanner scanner = table.getScanner(scan);
  for (Result r : scanner) {
    String hash = new String(r.getRow());
    String id = new String(r.getValue(FAMILY, ID));
    String lon = new String(r.getValue(FAMILY, X_COL));
    String lat = new String(r.getValue(FAMILY, Y_COL));
    candidates.add(new QueryMatch(id, hash,
                                  Double.parseDouble(lon),
                                  Double.parseDouble(lat)));
    cnt++;
  }

  table.close();

  System.out.println(
    String.format("Scan over '%s' returned %s candidates.",
                  prefix, cnt));
  return candidates;
}
 
Example 26
Project: Megh   File: AbstractBucketManager.java   Source Code and License 5 votes vote down vote up
public AbstractBucketManager()
{
  eventQueue = new LinkedBlockingQueue<Long>();
  evictionCandidates = Sets.newHashSet();
  dirtyBuckets = Maps.newConcurrentMap();
  bucketHeap = MinMaxPriorityQueue.orderedBy(new Comparator<AbstractBucket<T>>()
  {
    @Override
    public int compare(AbstractBucket<T> bucket1, AbstractBucket<T> bucket2)
    {
      if (bucket1.lastUpdateTime() < bucket2.lastUpdateTime()) {
        return -1;
      }
      if (bucket1.lastUpdateTime() > bucket2.lastUpdateTime()) {
        return 1;
      }
      return 0;
    }

  }).create();
  lock = new Lock();
  committedWindow = -1;

  noOfBuckets = DEF_NUM_BUCKETS;
  noOfBucketsInMemory = DEF_NUM_BUCKETS_MEM;
  maxNoOfBucketsInMemory = DEF_NUM_BUCKETS_MEM + 100;
  millisPreventingBucketEviction = DEF_MILLIS_PREVENTING_EVICTION;
  writeEventKeysOnly = true;
  bucketsToDelete = Sets.newHashSet();
}
 
Example 27
Project: pbase   File: SimpleLoadBalancer.java   Source Code and License 5 votes vote down vote up
/**
 * Add a region from the head or tail to the List of regions to return.
 */
private void addRegionPlan(final MinMaxPriorityQueue<RegionPlan> regionsToMove,
    final boolean fetchFromTail, final ServerName sn, List<RegionPlan> regionsToReturn) {
  RegionPlan rp = null;
  if (!fetchFromTail) rp = regionsToMove.remove();
  else rp = regionsToMove.removeLast();
  rp.setDestination(sn);
  regionsToReturn.add(rp);
}
 
Example 28
Project: pbase   File: LruCachedBlockQueue.java   Source Code and License 5 votes vote down vote up
/**
 * @param maxSize the target size of elements in the queue
 * @param blockSize expected average size of blocks
 */
public LruCachedBlockQueue(long maxSize, long blockSize) {
  int initialSize = (int)(maxSize / blockSize);
  if(initialSize == 0) initialSize++;
  queue = MinMaxPriorityQueue.expectedSize(initialSize).create();
  heapSize = 0;
  this.maxSize = maxSize;
}
 
Example 29
Project: HIndex   File: SimpleLoadBalancer.java   Source Code and License 5 votes vote down vote up
/**
 * Add a region from the head or tail to the List of regions to return.
 */
private void addRegionPlan(final MinMaxPriorityQueue<RegionPlan> regionsToMove,
    final boolean fetchFromTail, final ServerName sn, List<RegionPlan> regionsToReturn) {
  RegionPlan rp = null;
  if (!fetchFromTail) rp = regionsToMove.remove();
  else rp = regionsToMove.removeLast();
  rp.setDestination(sn);
  regionsToReturn.add(rp);
}
 
Example 30
Project: HIndex   File: CachedBlockQueue.java   Source Code and License 5 votes vote down vote up
/**
 * @param maxSize the target size of elements in the queue
 * @param blockSize expected average size of blocks
 */
public CachedBlockQueue(long maxSize, long blockSize) {
  int initialSize = (int)(maxSize / blockSize);
  if(initialSize == 0) initialSize++;
  queue = MinMaxPriorityQueue.expectedSize(initialSize).create();
  heapSize = 0;
  this.maxSize = maxSize;
}
 
Example 31
Project: datacollector   File: GoogleCloudStorageSource.java   Source Code and License 5 votes vote down vote up
@Override
protected List<ConfigIssue> init() {
  // Validate configuration values and open any required resources.
  List<ConfigIssue> issues = gcsOriginConfig.init(getContext(), super.init());
  minMaxPriorityQueue = MinMaxPriorityQueue.orderedBy((Blob o1, Blob o2) -> {
    int result = o1.getUpdateTime().compareTo(o2.getUpdateTime());
    if(result != 0) {
      return result;
    }
    //same modified time. Use name to sort
    return o1.getName().compareTo(o2.getName());
  }).maximumSize(gcsOriginConfig.maxResultQueueSize).create();
  antPathMatcher = new AntPathMatcher();

  gcsOriginConfig.credentials.getCredentialsProvider(getContext(), issues)
      .ifPresent(p -> credentialsProvider = p);

  try {
    storage = StorageOptions.newBuilder()
        .setCredentials(credentialsProvider.getCredentials())
        .build()
        .getService();
  } catch (IOException e) {
    LOG.error("Error when initializing storage. Reason : {}", e);
    issues.add(getContext().createConfigIssue(
        Groups.CREDENTIALS.name(),
        "gcsOriginConfig.credentials.credentialsProvider",
        Errors.GCS_01,
        e
    ));
  }

  rateLimitElEval = FileRefUtil.createElEvalForRateLimit(getContext());
  rateLimitElVars = getContext().createELVars();
  errorBlobHandler = new GcsObjectPostProcessingHandler(storage, gcsOriginConfig.gcsOriginErrorConfig);
  return issues;
}
 
Example 32
Project: ache   File: TopkLinkSelector.java   Source Code and License 5 votes vote down vote up
@Override
public void startSelection(int numberOfLinks) {
    this.topkLinks = MinMaxPriorityQueue
            .orderedBy(LinkRelevance.DESC_ORDER_COMPARATOR)
            .maximumSize(numberOfLinks) // keep only top-k items
            .create();
}
 
Example 33
Project: ache   File: RandomLinkSelector.java   Source Code and License 5 votes vote down vote up
@Override
public void startSelection(int numberOfLinks) {
    links = MinMaxPriorityQueue
        .orderedBy(new Comparator<RandomLink>() {
            @Override
            public int compare(RandomLink o1, RandomLink o2) {
                return Double.compare(o1.relevance, o2.relevance);
            }
        })
        .maximumSize(numberOfLinks) // keep only top-k items
        .create();
}
 
Example 34
Project: ache   File: MaximizeWebsitesLinkSelector.java   Source Code and License 5 votes vote down vote up
@Override
public void evaluateLink(LinkRelevance link) {
    if (link.getRelevance() > 0) {
        String domainName = link.getTopLevelDomainName();
        MinMaxPriorityQueue<LinkRelevance> domainQueue = topkLinksPerDomain.get(domainName);
        if (domainQueue == null) {
            domainQueue = newPriorityQueue(MAX_LINKS_PER_DOMAIN);
            topkLinksPerDomain.put(domainName, domainQueue);
        }
        domainQueue.add(link);
    }
}
 
Example 35
Project: termsuite-core   File: BilingualAlignmentService.java   Source Code and License 5 votes vote down vote up
public List<TranslationCandidate> alignDistributional(TermService sourceTerm, int nbCandidates,
		int minCandidateFrequency) {
	Queue<TranslationCandidate> alignedCandidateQueue = MinMaxPriorityQueue.maximumSize(nbCandidates).create();
	ContextVector sourceVector = sourceTerm.getContext();
	if(sourceVector == null)
		return new ArrayList<>();
	ContextVector translatedSourceVector = translateVector(
			sourceVector,
			dico,
			TRANSLATION_STRATEGY_MOST_SPECIFIC,
			targetTermino);
	ExplainedValue v;
	int nbVectorsNotComputed = 0;
	int nbVectorsComputed = 0;
	for(TermService targetTerm:targetTermino.terms().filter(TermService::isSingleWord).collect(Collectors.toList())) {
		if(targetTerm.getFrequency() < minCandidateFrequency)
			continue;
		if(targetTerm.getContext() != null) {
			nbVectorsComputed++;
			v = distance.getExplainedValue(translatedSourceVector, targetTerm.getContext());
			TranslationCandidate candidate = new TranslationCandidate(
					AlignmentMethod.DISTRIBUTIONAL,
					targetTerm, 
					v.getValue(), 
					sourceTerm,
					v.getExplanation());
			alignedCandidateQueue.add(candidate);
		}
	};
	if(nbVectorsNotComputed > 0) {
		LOGGER.warn(MSG_SEVERAL_VECTORS_NOT_COMPUTED, nbVectorsComputed, nbVectorsNotComputed);	
	}
	
	// sort alignedCandidates
	List<TranslationCandidate> alignedCandidates = Lists.newArrayListWithCapacity(alignedCandidateQueue.size());
	alignedCandidates.addAll(alignedCandidateQueue);
	normalizeCandidateScores(alignedCandidates);
	return Lists.newArrayList(alignedCandidateQueue);
}
 
Example 36
Project: IRIndex   File: DefaultLoadBalancer.java   Source Code and License 5 votes vote down vote up
/**
 * Add a region from the head or tail to the List of regions to return.
 */
void addRegionPlan(final MinMaxPriorityQueue<RegionPlan> regionsToMove,
    final boolean fetchFromTail, final ServerName sn, List<RegionPlan> regionsToReturn) {
  RegionPlan rp = null;
  if (!fetchFromTail) rp = regionsToMove.remove();
  else rp = regionsToMove.removeLast();
  rp.setDestination(sn);
  regionsToReturn.add(rp);
}
 
Example 37
Project: IRIndex   File: CachedBlockQueue.java   Source Code and License 5 votes vote down vote up
/**
 * @param maxSize the target size of elements in the queue
 * @param blockSize expected average size of blocks
 */
public CachedBlockQueue(long maxSize, long blockSize) {
  int initialSize = (int)(maxSize / blockSize);
  if(initialSize == 0) initialSize++;
  queue = MinMaxPriorityQueue.expectedSize(initialSize).create();
  heapSize = 0;
  this.maxSize = maxSize;
}
 
Example 38
Project: RStore   File: DefaultLoadBalancer.java   Source Code and License 5 votes vote down vote up
/**
 * Add a region from the head or tail to the List of regions to return.
 */
void addRegionPlan(final MinMaxPriorityQueue<RegionPlan> regionsToMove,
    final boolean fetchFromTail, final ServerName sn, List<RegionPlan> regionsToReturn) {
  RegionPlan rp = null;
  if (!fetchFromTail) rp = regionsToMove.remove();
  else rp = regionsToMove.removeLast();
  rp.setDestination(sn);
  regionsToReturn.add(rp);
}
 
Example 39
Project: incubator-gobblin   File: KafkaWorkUnitPacker.java   Source Code and License 5 votes vote down vote up
/**
 * Pack a list of {@link WorkUnit}s into a smaller number of {@link MultiWorkUnit}s,
 * using the worst-fit-decreasing algorithm.
 *
 * Each {@link WorkUnit} is assigned to the {@link MultiWorkUnit} with the smallest load.
 */
protected List<WorkUnit> worstFitDecreasingBinPacking(List<WorkUnit> groups, int numOfMultiWorkUnits) {

  // Sort workunit groups by data size desc
  Collections.sort(groups, LOAD_DESC_COMPARATOR);

  MinMaxPriorityQueue<MultiWorkUnit> pQueue =
      MinMaxPriorityQueue.orderedBy(LOAD_ASC_COMPARATOR).expectedSize(numOfMultiWorkUnits).create();
  for (int i = 0; i < numOfMultiWorkUnits; i++) {
    MultiWorkUnit multiWorkUnit = MultiWorkUnit.createEmpty();
    setWorkUnitEstSize(multiWorkUnit, 0);
    pQueue.add(multiWorkUnit);
  }

  for (WorkUnit group : groups) {
    MultiWorkUnit lightestMultiWorkUnit = pQueue.poll();
    addWorkUnitToMultiWorkUnit(group, lightestMultiWorkUnit);
    pQueue.add(lightestMultiWorkUnit);
  }

  logMultiWorkUnitInfo(pQueue);

  double minLoad = getWorkUnitEstLoad(pQueue.peekFirst());
  double maxLoad = getWorkUnitEstLoad(pQueue.peekLast());
  LOG.info(String.format("Min load of multiWorkUnit = %f; Max load of multiWorkUnit = %f; Diff = %f%%", minLoad,
      maxLoad, (maxLoad - minLoad) / maxLoad * 100.0));

  this.state.setProp(MIN_MULTIWORKUNIT_LOAD, minLoad);
  this.state.setProp(MAX_MULTIWORKUNIT_LOAD, maxLoad);

  List<WorkUnit> multiWorkUnits = Lists.newArrayList();
  multiWorkUnits.addAll(pQueue);
  return multiWorkUnits;
}
 
Example 40
Project: PyroDB   File: SimpleLoadBalancer.java   Source Code and License 5 votes vote down vote up
/**
 * Add a region from the head or tail to the List of regions to return.
 */
private void addRegionPlan(final MinMaxPriorityQueue<RegionPlan> regionsToMove,
    final boolean fetchFromTail, final ServerName sn, List<RegionPlan> regionsToReturn) {
  RegionPlan rp = null;
  if (!fetchFromTail) rp = regionsToMove.remove();
  else rp = regionsToMove.removeLast();
  rp.setDestination(sn);
  regionsToReturn.add(rp);
}
 
Example 41
Project: PyroDB   File: CachedBlockQueue.java   Source Code and License 5 votes vote down vote up
/**
 * @param maxSize the target size of elements in the queue
 * @param blockSize expected average size of blocks
 */
public CachedBlockQueue(long maxSize, long blockSize) {
  int initialSize = (int)(maxSize / blockSize);
  if(initialSize == 0) initialSize++;
  queue = MinMaxPriorityQueue.expectedSize(initialSize).create();
  heapSize = 0;
  this.maxSize = maxSize;
}
 
Example 42
Project: miru   File: AmzaClusterRegistry.java   Source Code and License 5 votes vote down vote up
private MinMaxPriorityQueue<HostAndTimestamp> tenantLatestTopology(MiruTenantId tenantId, MiruPartitionId partitionId) throws Exception {
    // TODO defaultNumberOfReplicas should come from config?
    MinMaxPriorityQueue<HostAndTimestamp> latest = MinMaxPriorityQueue.maximumSize(defaultNumberOfReplicas)
        .expectedSize(defaultNumberOfReplicas)
        .create();
    for (HostHeartbeat hostHeartbeat : getAllHosts()) {
        EmbeddedClient registryClient = registryClient(hostHeartbeat.host);
        byte[] got = registryClient.getValue(Consistency.quorum, null, toTopologyKey(tenantId, partitionId));
        if (got != null) {
            latest.add(new HostAndTimestamp(hostHeartbeat.host, FilerIO.bytesLong(got)));
        }
    }
    return latest;
}
 
Example 43
Project: miru   File: AmzaClusterRegistry.java   Source Code and License 5 votes vote down vote up
private NavigableMap<MiruPartitionId, MinMaxPriorityQueue<HostAndTimestamp>> tenantLatestTopologies(MiruTenantId tenantId) throws Exception {
    final byte[] from = topologyKeyPrefix(tenantId);
    final byte[] to = WALKey.prefixUpperExclusive(from);
    final NavigableMap<MiruPartitionId, MinMaxPriorityQueue<HostAndTimestamp>> partitionIdToLatest = new TreeMap<>();
    for (HostHeartbeat hostHeartbeat : getAllHosts()) {
        EmbeddedClient registryClient = registryClient(hostHeartbeat.host);
        registryClient.scan(
            Collections.singletonList(new ScanRange(null, from, null, to)),
            (prefix, key, value, timestamp, version) -> {
                RawTenantAndPartition tenantPartitionKey = fromTopologyKey(key);
                MiruPartitionId partitionId = MiruPartitionId.of(tenantPartitionKey.partitionId);
                MinMaxPriorityQueue<HostAndTimestamp> latest = partitionIdToLatest.get(partitionId);
                if (latest == null) {
                    // TODO defaultNumberOfReplicas should come from config?
                    latest = MinMaxPriorityQueue.maximumSize(defaultNumberOfReplicas)
                        .expectedSize(defaultNumberOfReplicas)
                        .create();
                    partitionIdToLatest.put(partitionId, latest);
                }
                latest.add(new HostAndTimestamp(hostHeartbeat.host, FilerIO.bytesLong(value)));
                return true;
            },
            true
        );
    }
    return partitionIdToLatest;
}
 
Example 44
Project: miru   File: AmzaClusterRegistry.java   Source Code and License 5 votes vote down vote up
@Override
public List<MiruTopologyStatus> getTopologyStatusForTenant(MiruTenantId tenantId) throws Exception {
    Map<MiruPartitionCoord, long[]> lastTimestamps = getLastTimestamps(tenantId);

    NavigableMap<MiruPartitionId, MinMaxPriorityQueue<HostAndTimestamp>> partitionIdToLatest = tenantLatestTopologies(tenantId);
    List<MiruTopologyStatus> status = new ArrayList<>();
    for (MiruPartitionId partitionId : partitionIdToLatest.keySet()) {
        MinMaxPriorityQueue<HostAndTimestamp> got = partitionIdToLatest.get(partitionId);
        for (HostAndTimestamp hat : got) {
            EmbeddedClient topologyInfoClient = topologyInfoClient(hat.host);
            long lastIngressTimestampMillis = getIngressUpdate(tenantId, partitionId, IngressType.ingressTimestamp, 0);
            long lastQueryTimestampMillis = 0;
            byte[] rawInfo = topologyInfoClient.getValue(Consistency.none, null, toTopologyKey(tenantId, partitionId));
            MiruPartitionCoordInfo info;
            if (rawInfo == null) {
                info = new MiruPartitionCoordInfo(MiruPartitionState.offline, MiruBackingStorage.memory);
            } else {
                MiruTopologyColumnValue columnValue = topologyColumnValueMarshaller.fromBytes(rawInfo);
                info = new MiruPartitionCoordInfo(columnValue.state, columnValue.storage);
                lastQueryTimestampMillis = columnValue.lastQueryTimestamp;
            }
            MiruPartitionCoord coord = new MiruPartitionCoord(tenantId, partitionId, hat.host);
            MiruPartition miruPartition = new MiruPartition(coord, info);

            long[] lastTimestampAndEpoch = lastTimestamps.getOrDefault(coord, NO_LAST_TIMESTAMP);
            IngressStatusTimestamps ingressStatusTimestamps = getIngressStatusTimestamps(coord.tenantId, coord.partitionId);
            status.add(new MiruTopologyStatus(miruPartition, lastIngressTimestampMillis, lastQueryTimestampMillis,
                ingressStatusTimestamps.destroyAfterTimestamp,
                ingressStatusTimestamps.cleanupAfterTimestamp,
                lastTimestampAndEpoch[0],
                lastTimestampAndEpoch[1]));
        }
    }
    return status;
}
 
Example 45
Project: miru   File: AmzaClusterRegistry.java   Source Code and License 5 votes vote down vote up
@Override
public List<MiruTopologyStatus> getTopologyStatusForTenantHost(MiruTenantId tenantId, MiruHost host) throws Exception {
    Map<MiruPartitionCoord, long[]> lastTimestamps = getLastTimestamps(tenantId);

    NavigableMap<MiruPartitionId, MinMaxPriorityQueue<HostAndTimestamp>> partitionIdToLatest = tenantLatestTopologies(tenantId);
    List<MiruTopologyStatus> status = new ArrayList<>();
    for (MiruPartitionId partitionId : partitionIdToLatest.keySet()) {
        MinMaxPriorityQueue<HostAndTimestamp> got = partitionIdToLatest.get(partitionId);
        for (HostAndTimestamp hat : got) {
            if (hat.host.equals(host)) {
                EmbeddedClient topologyInfoClient = topologyInfoClient(hat.host);
                byte[] rawInfo = topologyInfoClient.getValue(Consistency.none, null, toTopologyKey(tenantId, partitionId));
                MiruPartitionCoordInfo info;
                long lastIngressTimestampMillis = getIngressUpdate(tenantId, partitionId, IngressType.ingressTimestamp, 0);
                long lastQueryTimestampMillis = 0;
                if (rawInfo == null) {
                    info = new MiruPartitionCoordInfo(MiruPartitionState.offline, MiruBackingStorage.memory);
                } else {
                    MiruTopologyColumnValue columnValue = topologyColumnValueMarshaller.fromBytes(rawInfo);
                    info = new MiruPartitionCoordInfo(columnValue.state, columnValue.storage);
                    lastQueryTimestampMillis = columnValue.lastQueryTimestamp;
                }
                MiruPartitionCoord coord = new MiruPartitionCoord(tenantId, partitionId, hat.host);
                MiruPartition miruPartition = new MiruPartition(coord, info);

                long[] lastTimestampAndEpoch = lastTimestamps.getOrDefault(coord, NO_LAST_TIMESTAMP);
                IngressStatusTimestamps ingressStatusTimestamps = getIngressStatusTimestamps(coord.tenantId, coord.partitionId);
                status.add(new MiruTopologyStatus(miruPartition, lastIngressTimestampMillis, lastQueryTimestampMillis,
                    ingressStatusTimestamps.destroyAfterTimestamp,
                    ingressStatusTimestamps.cleanupAfterTimestamp,
                    lastTimestampAndEpoch[0],
                    lastTimestampAndEpoch[1]));
            }
        }
    }
    return status;
}
 
Example 46
Project: c5   File: SimpleLoadBalancer.java   Source Code and License 5 votes vote down vote up
/**
 * Add a region from the head or tail to the List of regions to return.
 */
private void addRegionPlan(final MinMaxPriorityQueue<RegionPlan> regionsToMove,
    final boolean fetchFromTail, final ServerName sn, List<RegionPlan> regionsToReturn) {
  RegionPlan rp = null;
  if (!fetchFromTail) rp = regionsToMove.remove();
  else rp = regionsToMove.removeLast();
  rp.setDestination(sn);
  regionsToReturn.add(rp);
}
 
Example 47
Project: c5   File: CachedBlockQueue.java   Source Code and License 5 votes vote down vote up
/**
 * @param maxSize the target size of elements in the queue
 * @param blockSize expected average size of blocks
 */
public CachedBlockQueue(long maxSize, long blockSize) {
  int initialSize = (int)(maxSize / blockSize);
  if(initialSize == 0) initialSize++;
  queue = MinMaxPriorityQueue.expectedSize(initialSize).create();
  heapSize = 0;
  this.maxSize = maxSize;
}
 
Example 48
Project: citolytics   File: EvaluationTest.java   Source Code and License 5 votes vote down vote up
@Test
    public void MinMaxQueueTest() throws Exception {
        int maxListLength = 4;
        MinMaxPriorityQueue<WikiSimComparableResult<Double>> queue = MinMaxPriorityQueue
                .orderedBy(new Comparator<WikiSimComparableResult<Double>>() {
                    @Override
                    public int compare(WikiSimComparableResult<Double> o1, WikiSimComparableResult<Double> o2) {
                        return -1 * o1.compareTo(o2);
                    }
                })
                .maximumSize(maxListLength).create();

        WikiSimComparableResult<Double> testItemLow = new WikiSimComparableResult<>("B", 2.0, 0);
        WikiSimComparableResult<Double> testItemHigh = new WikiSimComparableResult<>("A", 5.0, 0);

        queue.add(testItemLow);
        queue.add(testItemLow);
        queue.add(testItemLow);
        queue.add(testItemLow);
        queue.add(testItemHigh);
        queue.add(new WikiSimComparableResult<>("A", 4.0, 0));
        queue.add(new WikiSimComparableResult<>("A", 3.0,0));
        queue.add(new WikiSimComparableResult<>("A", 2.0,0));
        queue.add(testItemLow);

//        System.out.println("Simple Queue" + queue);

        if (queue.contains(testItemLow)) {
            throw new Exception("testItemLow NOT should exist in queue: " + testItemLow);
        }

        if (!queue.contains(testItemHigh)) {
            throw new Exception("testItemHigh should exist in queue: " + testItemHigh);
        }

        if (testItemHigh.compareTo(testItemLow) != 1) {
            throw new Exception(testItemHigh + " compareTo " + testItemLow + " = " + testItemHigh.compareTo(testItemLow) + " // should be = 1");
        }

    }
 
Example 49
Project: simhashdb   File: GuavaMinMaxTopK.java   Source Code and License 5 votes vote down vote up
@Override
public long[] getTopKUnSorted() {
	final MinMaxPriorityQueue<Long> queue = MinMaxPriorityQueue
			.maximumSize(k).create();
	long d = 0;
	int i = 0;
	while ((d = iterator.getNextLong()) != -1) {
		queue.add(setDistance(d, i));
		i++;
	}
	return Longs.toArray(queue);
}
 
Example 50
Project: simhashdb   File: GuavaMinMaxTopK.java   Source Code and License 5 votes vote down vote up
@Override
public long[] joinTopKs(final long[]... topks) {
	final MinMaxPriorityQueue<Long> queue = MinMaxPriorityQueue
			.maximumSize(k).create();
	for (final long[] ls : topks) {
		for (final long dist : ls) {
			queue.add(dist);
		}
	}
	return Longs.toArray(queue);
}
 
Example 51
Project: simhashdb   File: InMemorySimHashDB.java   Source Code and License 5 votes vote down vote up
private Long[] mainkNearestNeighbors2(final long[] query, final int k) {
	final MinMaxPriorityQueue<Long> minmaxqueue = MinMaxPriorityQueue
			.maximumSize(k).create();
	final int i = 0;
	for (final long[] hash : hashes) {
		long d = distance(query, hash);
		// System.out.println("Distance with " + i + " " + d);
		d = d << 32;
		d += i;
		minmaxqueue.add(d);
	}
	final Long[] result = minmaxqueue.toArray(new Long[minmaxqueue.size()]);
	return result;

}
 
Example 52
Project: archived-net-virt-platform   File: FlowCacheMgrResource.java   Source Code and License 5 votes vote down vote up
private BetterFlowCacheRestData(BfcDb bfcCore) {
    super();
    this.counters = new BetterFlowCacheRestCounters(bfcCore);
    this.description = "Flow Cache for application=" +
                          this.counters.applName +
                          " application instance=all" +
                          " query type=counters";
    this.status = "OK";
    this.flows = MinMaxPriorityQueue
            .maximumSize(MAX_ENTRIES_TO_RETURN)
            .create();
}
 
Example 53
Project: pinot   File: PinotLLCRealtimeSegmentManager.java   Source Code and License 5 votes vote down vote up
private void completeCommittingSegmentsInternal(String realtimeTableName,
    Map<Integer, MinMaxPriorityQueue<LLCSegmentName>> partitionToLatestSegments) {
  IdealState idealState = getTableIdealState(realtimeTableName);
  Set<String> segmentNamesIS = idealState.getPartitionSet();

  final ZNRecord partitionAssignment = getKafkaPartitionAssignment(realtimeTableName);
  for (Map.Entry<Integer, MinMaxPriorityQueue<LLCSegmentName>> entry : partitionToLatestSegments.entrySet()) {
    final LLCSegmentName segmentName = entry.getValue().pollFirst();
    final String segmentId = segmentName.getSegmentName();
    final int partitionId = entry.getKey();
    if (!segmentNamesIS.contains(segmentId)) {
      LOGGER.info("{}:Repairing segment for partition {}. Segment {} not found in idealstate", realtimeTableName,
          partitionId, segmentId);

      List<String> newInstances = partitionAssignment.getListField(Integer.toString(partitionId));
      LOGGER.info("{}: Assigning segment {} to {}", realtimeTableName, segmentId, newInstances);
      // TODO Re-write num-partitions in metadata if needed.
      // If there was a prev segment in the same partition, then we need to fix it to be ONLINE.
      LLCSegmentName prevSegmentName = entry.getValue().pollLast();
      String prevSegmentNameStr = null;
      if (prevSegmentName != null) {
        prevSegmentNameStr = prevSegmentName.getSegmentName();
      }
      updateIdealState(realtimeTableName, newInstances, prevSegmentNameStr, segmentId);
    }
  }
}
 
Example 54
Project: pinot   File: TopKPhaseJob.java   Source Code and License 5 votes vote down vote up
@Override
protected void cleanup(Context context) throws IOException, InterruptedException {

  for (String dimension : dimensionNames) {

    LOGGER.info("{} records passed metric threshold for dimension {}", thresholdPassCount.get(dimension), dimension);

    // Get top k
    TopKDimensionToMetricsSpec topkSpec = topKDimensionToMetricsSpecMap.get(dimension);
    if (topkSpec != null && topkSpec.getDimensionName() != null && topkSpec.getTopk() != null) {

      // Get top k for each metric specified
      Map<String, Integer> topkMetricsMap = topkSpec.getTopk();
      for (Entry<String, Integer> topKEntry : topkMetricsMap.entrySet()) {

        String metric = topKEntry.getKey();
        int k = topKEntry.getValue();
        MinMaxPriorityQueue<DimensionValueMetricPair> topKQueue = MinMaxPriorityQueue.maximumSize(k).create();

        Map<Object, Number[]> dimensionToMetricsMap = dimensionNameToValuesMap.get(dimension);
        for (Entry<Object, Number[]> entry : dimensionToMetricsMap.entrySet()) {
          topKQueue.add(new DimensionValueMetricPair(entry.getKey(), entry.getValue()[metricToIndexMapping.get(metric)]));
        }
        LOGGER.info("Picking Top {} values for {} based on Metric {} : {}", k, dimension, metric, topKQueue);
        for (DimensionValueMetricPair pair : topKQueue) {
          topkDimensionValues.addValue(dimension, String.valueOf(pair.getDimensionValue()));
        }
      }
    }
  }

  if (topkDimensionValues.getTopKDimensions().size() > 0) {
    String topkValuesPath = configuration.get(TOPK_PHASE_OUTPUT_PATH.toString());
    LOGGER.info("Writing top k values to {}",topkValuesPath);
    FSDataOutputStream topKDimensionValuesOutputStream = fileSystem.create(
        new Path(topkValuesPath + File.separator + ThirdEyeConstants.TOPK_VALUES_FILE));
    OBJECT_MAPPER.writeValue((DataOutput) topKDimensionValuesOutputStream, topkDimensionValues);
    topKDimensionValuesOutputStream.close();
  }
}
 
Example 55
Project: HBase-Research   File: DefaultLoadBalancer.java   Source Code and License 5 votes vote down vote up
/**
 * Add a region from the head or tail to the List of regions to return.
 */
void addRegionPlan(final MinMaxPriorityQueue<RegionPlan> regionsToMove,
    final boolean fetchFromTail, final ServerName sn, List<RegionPlan> regionsToReturn) {
  RegionPlan rp = null;
  if (!fetchFromTail) rp = regionsToMove.remove();
  else rp = regionsToMove.removeLast();
  rp.setDestination(sn);
  regionsToReturn.add(rp);
}
 
Example 56
Project: HBase-Research   File: CachedBlockQueue.java   Source Code and License 5 votes vote down vote up
/**
 * @param maxSize the target size of elements in the queue
 * @param blockSize expected average size of blocks
 */
public CachedBlockQueue(long maxSize, long blockSize) {
  int initialSize = (int)(maxSize / blockSize);
  if(initialSize == 0) initialSize++;
  queue = MinMaxPriorityQueue.expectedSize(initialSize).create();
  heapSize = 0;
  this.maxSize = maxSize;
}
 
Example 57
Project: hbase-0.94.8-qod   File: DefaultLoadBalancer.java   Source Code and License 5 votes vote down vote up
/**
 * Add a region from the head or tail to the List of regions to return.
 */
void addRegionPlan(final MinMaxPriorityQueue<RegionPlan> regionsToMove,
    final boolean fetchFromTail, final ServerName sn, List<RegionPlan> regionsToReturn) {
  RegionPlan rp = null;
  if (!fetchFromTail) rp = regionsToMove.remove();
  else rp = regionsToMove.removeLast();
  rp.setDestination(sn);
  regionsToReturn.add(rp);
}
 
Example 58
Project: hbase-0.94.8-qod   File: CachedBlockQueue.java   Source Code and License 5 votes vote down vote up
/**
 * @param maxSize the target size of elements in the queue
 * @param blockSize expected average size of blocks
 */
public CachedBlockQueue(long maxSize, long blockSize) {
  int initialSize = (int)(maxSize / blockSize);
  if(initialSize == 0) initialSize++;
  queue = MinMaxPriorityQueue.expectedSize(initialSize).create();
  heapSize = 0;
  this.maxSize = maxSize;
}
 
Example 59
Project: hbase-0.94.8-qod   File: DefaultLoadBalancer.java   Source Code and License 5 votes vote down vote up
/**
 * Add a region from the head or tail to the List of regions to return.
 */
void addRegionPlan(final MinMaxPriorityQueue<RegionPlan> regionsToMove,
    final boolean fetchFromTail, final ServerName sn, List<RegionPlan> regionsToReturn) {
  RegionPlan rp = null;
  if (!fetchFromTail) rp = regionsToMove.remove();
  else rp = regionsToMove.removeLast();
  rp.setDestination(sn);
  regionsToReturn.add(rp);
}
 
Example 60
Project: hbase-0.94.8-qod   File: CachedBlockQueue.java   Source Code and License 5 votes vote down vote up
/**
 * @param maxSize the target size of elements in the queue
 * @param blockSize expected average size of blocks
 */
public CachedBlockQueue(long maxSize, long blockSize) {
  int initialSize = (int)(maxSize / blockSize);
  if(initialSize == 0) initialSize++;
  queue = MinMaxPriorityQueue.expectedSize(initialSize).create();
  heapSize = 0;
  this.maxSize = maxSize;
}