com.carrotsearch.hppc.ObjectLongHashMap Java Examples

The following examples show how to use com.carrotsearch.hppc.ObjectLongHashMap. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TableStatsService.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
private static ObjectLongMap<TableIdent> statsFromResponse(SQLResponse sqlResponse) {
    ObjectLongMap<TableIdent> newStats = new ObjectLongHashMap<>((int) sqlResponse.rowCount());
    for (Object[] row : sqlResponse.rows()) {
        newStats.put(new TableIdent((String) row[1], (String) row[2]), (long) row[0]);
    }
    return newStats;
}
 
Example #2
Source File: CompletionStats.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Override
public void readFrom(StreamInput in) throws IOException {
    sizeInBytes = in.readVLong();
    if (in.readBoolean()) {
        int size = in.readVInt();
        fields = new ObjectLongHashMap<>(size);
        for (int i = 0; i < size; i++) {
            fields.put(in.readString(), in.readVLong());
        }
    }
}
 
Example #3
Source File: ShardFieldData.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
public FieldDataStats stats(String... fields) {
    ObjectLongHashMap<String> fieldTotals = null;
    if (fields != null && fields.length > 0) {
        fieldTotals = new ObjectLongHashMap<>();
        for (Map.Entry<String, CounterMetric> entry : perFieldTotals.entrySet()) {
            if (Regex.simpleMatch(fields, entry.getKey())) {
                fieldTotals.put(entry.getKey(), entry.getValue().count());
            }
        }
    }
    return new FieldDataStats(totalMetric.count(), evictionsMetric.count(), fieldTotals);
}
 
Example #4
Source File: FieldDataStats.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Override
public void readFrom(StreamInput in) throws IOException {
    memorySize = in.readVLong();
    evictions = in.readVLong();
    if (in.readBoolean()) {
        int size = in.readVInt();
        fields = new ObjectLongHashMap<>(size);
        for (int i = 0; i < size; i++) {
            fields.put(in.readString(), in.readVLong());
        }
    }
}
 
Example #5
Source File: CompletionStats.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
public CompletionStats(long size, @Nullable ObjectLongHashMap<String> fields) {
    this.sizeInBytes = size;
    this.fields = fields;
}
 
Example #6
Source File: CompletionStats.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
public ObjectLongHashMap<String> getFields() {
    return fields;
}
 
Example #7
Source File: FieldDataStats.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
public FieldDataStats(long memorySize, long evictions, @Nullable ObjectLongHashMap<String> fields) {
    this.memorySize = memorySize;
    this.evictions = evictions;
    this.fields = fields;
}
 
Example #8
Source File: FieldDataStats.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
@Nullable
public ObjectLongHashMap<String> getFields() {
    return fields;
}
 
Example #9
Source File: ReplicaShardAllocator.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
private MatchingNodes findMatchingNodes(ShardRouting shard, RoutingAllocation allocation,
                                        TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore,
                                        AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> data) {
    ObjectLongMap<DiscoveryNode> nodesToSize = new ObjectLongHashMap<>();
    for (Map.Entry<DiscoveryNode, TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> nodeStoreEntry : data.getData().entrySet()) {
        DiscoveryNode discoNode = nodeStoreEntry.getKey();
        TransportNodesListShardStoreMetaData.StoreFilesMetaData storeFilesMetaData = nodeStoreEntry.getValue().storeFilesMetaData();
        if (storeFilesMetaData == null) {
            // already allocated on that node...
            continue;
        }

        RoutingNode node = allocation.routingNodes().node(discoNode.id());
        if (node == null) {
            continue;
        }

        // check if we can allocate on that node...
        // we only check for NO, since if this node is THROTTLING and it has enough "same data"
        // then we will try and assign it next time
        Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
        if (decision.type() == Decision.Type.NO) {
            continue;
        }

        // if it is already allocated, we can't assign to it... (and it might be primary as well)
        if (storeFilesMetaData.allocated()) {
            continue;
        }

        // we don't have any files at all, it is an empty index
        if (storeFilesMetaData.iterator().hasNext() == false) {
            continue;
        }

        String primarySyncId = primaryStore.syncId();
        String replicaSyncId = storeFilesMetaData.syncId();
        // see if we have a sync id we can make use of
        if (replicaSyncId != null && replicaSyncId.equals(primarySyncId)) {
            logger.trace("{}: node [{}] has same sync id {} as primary", shard, discoNode.name(), replicaSyncId);
            nodesToSize.put(discoNode, Long.MAX_VALUE);
        } else {
            long sizeMatched = 0;
            for (StoreFileMetaData storeFileMetaData : storeFilesMetaData) {
                String metaDataFileName = storeFileMetaData.name();
                if (primaryStore.fileExists(metaDataFileName) && primaryStore.file(metaDataFileName).isSame(storeFileMetaData)) {
                    sizeMatched += storeFileMetaData.length();
                }
            }
            logger.trace("{}: node [{}] has [{}/{}] bytes of re-usable data",
                    shard, discoNode.name(), new ByteSizeValue(sizeMatched), sizeMatched);
            nodesToSize.put(discoNode, sizeMatched);
        }
    }

    return new MatchingNodes(nodesToSize);
}
 
Example #10
Source File: ReplicaShardAllocator.java    From crate with Apache License 2.0 4 votes vote down vote up
private MatchingNodes findMatchingNodes(ShardRouting shard, RoutingAllocation allocation,
                                        TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore,
                                        AsyncShardFetch.FetchResult<NodeStoreFilesMetaData> data,
                                        boolean explain) {
    ObjectLongMap<DiscoveryNode> nodesToSize = new ObjectLongHashMap<>();
    Map<String, NodeAllocationResult> nodeDecisions = explain ? new HashMap<>() : null;
    for (Map.Entry<DiscoveryNode, NodeStoreFilesMetaData> nodeStoreEntry : data.getData().entrySet()) {
        DiscoveryNode discoNode = nodeStoreEntry.getKey();
        TransportNodesListShardStoreMetaData.StoreFilesMetaData storeFilesMetaData = nodeStoreEntry.getValue().storeFilesMetaData();
        // we don't have any files at all, it is an empty index
        if (storeFilesMetaData.isEmpty()) {
            continue;
        }

        RoutingNode node = allocation.routingNodes().node(discoNode.getId());
        if (node == null) {
            continue;
        }

        // check if we can allocate on that node...
        // we only check for NO, since if this node is THROTTLING and it has enough "same data"
        // then we will try and assign it next time
        Decision decision = allocation.deciders().canAllocate(shard, node, allocation);

        long matchingBytes = -1;
        if (explain) {
            matchingBytes = computeMatchingBytes(primaryStore, storeFilesMetaData);
            ShardStoreInfo shardStoreInfo = new ShardStoreInfo(matchingBytes);
            nodeDecisions.put(node.nodeId(), new NodeAllocationResult(discoNode, shardStoreInfo, decision));
        }

        if (decision.type() == Decision.Type.NO) {
            continue;
        }

        if (matchingBytes < 0) {
            matchingBytes = computeMatchingBytes(primaryStore, storeFilesMetaData);
        }
        nodesToSize.put(discoNode, matchingBytes);
        if (logger.isTraceEnabled()) {
            if (matchingBytes == Long.MAX_VALUE) {
                logger.trace("{}: node [{}] has same sync id {} as primary", shard, discoNode.getName(), storeFilesMetaData.syncId());
            } else {
                logger.trace("{}: node [{}] has [{}/{}] bytes of re-usable data",
                    shard, discoNode.getName(), new ByteSizeValue(matchingBytes), matchingBytes);
            }
        }
    }

    return new MatchingNodes(nodesToSize, nodeDecisions);
}