com.carrotsearch.hppc.IntObjectHashMap Java Examples
The following examples show how to use
com.carrotsearch.hppc.IntObjectHashMap.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: NodeFetchRequestTest.java From crate with Apache License 2.0 | 6 votes |
@Test public void testStreaming() throws Exception { IntObjectHashMap<IntContainer> toFetch = new IntObjectHashMap<>(); IntHashSet docIds = new IntHashSet(3); toFetch.put(1, docIds); NodeFetchRequest orig = new NodeFetchRequest(UUID.randomUUID(), 1, true, toFetch); BytesStreamOutput out = new BytesStreamOutput(); orig.writeTo(out); StreamInput in = out.bytes().streamInput(); NodeFetchRequest streamed = new NodeFetchRequest(in); assertThat(orig.jobId(), is(streamed.jobId())); assertThat(orig.fetchPhaseId(), is(streamed.fetchPhaseId())); assertThat(orig.isCloseContext(), is(streamed.isCloseContext())); assertThat(orig.toFetch().toString(), is(streamed.toFetch().toString())); }
Example #2
Source File: BulkWriteResult.java From spliceengine with GNU Affero General Public License v3.0 | 6 votes |
@Override public BulkWriteResult read(Kryo kryo, Input input, Class<BulkWriteResult> type) { WriteResult globalStatus = kryo.readObject(input,WriteResult.class); int notRunSize = input.readInt(); IntHashSet notRunRows = new IntHashSet(notRunSize); for(int i=0;i<notRunSize;i++){ notRunRows.add(input.readInt()); } int failedSize = input.readInt(); IntObjectHashMap<WriteResult> failedRows = new IntObjectHashMap<>(failedSize,0.9f); for(int i=0;i<failedSize;i++){ int k = input.readInt(); WriteResult result = kryo.readObject(input,WriteResult.class); failedRows.put(k,result); } return new BulkWriteResult(globalStatus,notRunRows,failedRows); }
Example #3
Source File: NodeFetchRequest.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); jobId = new UUID(in.readLong(), in.readLong()); fetchPhaseId = in.readVInt(); int numReaders = in.readVInt(); if (numReaders > 0) { toFetch = new IntObjectHashMap<>(numReaders); for (int i = 0; i < numReaders; i++) { int readerId = in.readVInt(); int numDocs = in.readVInt(); IntArrayList docs = new IntArrayList(numDocs); toFetch.put(readerId, docs); for (int j = 0; j < numDocs; j++) { docs.add(in.readInt()); } } } }
Example #4
Source File: FetchMapper.java From crate with Apache License 2.0 | 6 votes |
@Override public CompletableFuture<? extends Iterator<Row>> apply(ReaderBuckets readerBuckets, boolean isLastCall) { List<CompletableFuture<IntObjectMap<? extends Bucket>>> futures = new ArrayList<>(); Iterator<Map.Entry<String, IntSet>> it = readerIdsByNode.entrySet().iterator(); while (it.hasNext()) { Map.Entry<String, IntSet> entry = it.next(); IntObjectHashMap<IntContainer> toFetch = readerBuckets.generateToFetch(entry.getValue()); if (toFetch.isEmpty() && !isLastCall) { continue; } final String nodeId = entry.getKey(); try { futures.add(fetchOperation.fetch(nodeId, toFetch, isLastCall)); } catch (Throwable t) { futures.add(CompletableFuture.failedFuture(t)); } if (isLastCall) { it.remove(); } } return CompletableFutures.allAsList(futures).thenApply(readerBuckets::getOutputRows); }
Example #5
Source File: BaseWriteConfiguration.java From spliceengine with GNU Affero General Public License v3.0 | 6 votes |
@Override public WriteResponse processGlobalResult(BulkWriteResult bulkWriteResult) throws Throwable { WriteResult writeResult = bulkWriteResult.getGlobalResult(); if (writeResult.isSuccess()) return WriteResponse.SUCCESS; else if (writeResult.isPartial()) { IntObjectHashMap<WriteResult> failedRows = bulkWriteResult.getFailedRows(); if (failedRows != null && failedRows.size() > 0) { return WriteResponse.PARTIAL; } IntHashSet notRun = bulkWriteResult.getNotRunRows(); if(notRun!=null && notRun.size()>0) return WriteResponse.PARTIAL; /* * We got a partial result, but didn't specify which rows needed behavior. * That's weird, but since we weren't told there would be a problem, we may * as well ignore */ return WriteResponse.IGNORE; } else if (!writeResult.canRetry()) throw exceptionFactory.processErrorResult(writeResult); else return WriteResponse.RETRY; }
Example #6
Source File: FetchProjection.java From crate with Apache License 2.0 | 6 votes |
@SuppressWarnings({"rawtypes"}) public Map<String, ? extends IntObjectMap<Streamer[]>> generateStreamersGroupedByReaderAndNode() { HashMap<String, IntObjectHashMap<Streamer[]>> streamersByReaderByNode = new HashMap<>(); for (Map.Entry<String, IntSet> entry : nodeReaders.entrySet()) { IntObjectHashMap<Streamer[]> streamersByReaderId = new IntObjectHashMap<>(); String nodeId = entry.getKey(); streamersByReaderByNode.put(nodeId, streamersByReaderId); for (IntCursor readerIdCursor : entry.getValue()) { int readerId = readerIdCursor.value; String index = readerIndices.floorEntry(readerId).getValue(); RelationName relationName = indicesToIdents.get(index); FetchSource fetchSource = fetchSources.get(relationName); if (fetchSource == null) { continue; } streamersByReaderId.put(readerIdCursor.value, Symbols.streamerArray(fetchSource.references())); } } return streamersByReaderByNode; }
Example #7
Source File: NodeFetchRequest.java From crate with Apache License 2.0 | 6 votes |
public NodeFetchRequest(StreamInput in) throws IOException { super(in); jobId = new UUID(in.readLong(), in.readLong()); fetchPhaseId = in.readVInt(); closeContext = in.readBoolean(); int numReaders = in.readVInt(); if (numReaders > 0) { IntObjectHashMap<IntArrayList> toFetch = new IntObjectHashMap<>(numReaders); for (int i = 0; i < numReaders; i++) { int readerId = in.readVInt(); int numDocs = in.readVInt(); IntArrayList docs = new IntArrayList(numDocs); toFetch.put(readerId, docs); for (int j = 0; j < numDocs; j++) { docs.add(in.readInt()); } } this.toFetch = toFetch; } else { this.toFetch = null; } }
Example #8
Source File: PipelineUtils.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
public static Collection<KVPair> doPartialRetry(BulkWrite bulkWrite,BulkWriteResult response,long id) throws Exception{ IntHashSet notRunRows=response.getNotRunRows(); IntObjectHashMap<WriteResult> failedRows=response.getFailedRows(); Collection<KVPair> toRetry=new ArrayList<>(failedRows.size()+notRunRows.size()); List<String> errorMsgs= Lists.newArrayListWithCapacity(failedRows.size()); int i=0; Collection<KVPair> allWrites=bulkWrite.getMutations(); for(KVPair kvPair : allWrites){ if(notRunRows.contains(i)) toRetry.add(kvPair); else{ WriteResult writeResult=failedRows.get(i); if(writeResult!=null){ errorMsgs.add(writeResult.getErrorMessage()); if(writeResult.canRetry()) toRetry.add(kvPair); } } i++; } if(LOG.isTraceEnabled()){ int[] errorCounts=new int[11]; for(IntObjectCursor<WriteResult> failedCursor : failedRows){ errorCounts[failedCursor.value.getCode().ordinal()]++; } SpliceLogUtils.trace(LOG,"[%d] %d failures with types: %s",id,failedRows.size(),Arrays.toString(errorCounts)); } return toRetry; }
Example #9
Source File: RequestIdMap.java From Bats with Apache License 2.0 | 5 votes |
void channelClosed(Throwable ex) { isOpen.set(false); if (ex != null) { final RpcException e = RpcException.mapException(ex); IntObjectHashMap<RpcOutcome<?>> clonedMap; synchronized (map) { clonedMap = map.clone(); map.clear(); } clonedMap.forEach(new SetExceptionProcedure(e)); } }
Example #10
Source File: BulkWriteAction.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
/** * Return an error message describing the types and number of failures in the BatchWrite. * * @param failedRows the rows which failed, and their respective error messages * @return error message describing the failed rows */ private String getFailedRowsMessage(IntObjectHashMap<WriteResult> failedRows){ if(failedRows!=null && !failedRows.isEmpty()){ // Aggregate the error counts by code. HashMap<Code, Integer> errorCodeToCountMap=new HashMap<>(); for(IntObjectCursor<WriteResult> failedRowCursor : failedRows){ WriteResult wr=failedRowCursor.value; Code errorCode=(wr==null?null:wr.getCode()); Integer errorCount=errorCodeToCountMap.get(errorCode); errorCodeToCountMap.put(errorCode,(errorCode==null || errorCount==null?1:errorCount+1)); } // Make a string out of the error map. StringBuilder buf=new StringBuilder(); buf.append("{ "); boolean first=true; for(Map.Entry<Code, Integer> entry : errorCodeToCountMap.entrySet()){ if(!first){ buf.append(", "); }else{ first=false; } buf.append(String.format("%s=%s",entry.getKey(),entry.getValue())); } buf.append(" }"); return buf.toString(); }else{ return "NONE"; } }
Example #11
Source File: BulkWriteAction.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
private Exception parseIntoException(BulkWriteResult response){ IntObjectHashMap<WriteResult> failedRows=response.getFailedRows(); Exception first = null; for(IntObjectCursor<WriteResult> cursor : failedRows){ @SuppressWarnings("ThrowableResultOfMethodCallIgnored") Throwable e=pipelineExceptionFactory.processErrorResult(cursor.value); if(e instanceof WriteConflict){ //TODO -sf- find a way to add in StandardExceptions here return (Exception)e; }else if(first==null) first = (Exception)e; } return first; }
Example #12
Source File: DefaultWriteConfiguration.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
@Override public WriteResponse partialFailure(BulkWriteResult result, BulkWrite request) throws ExecutionException { IntObjectHashMap<WriteResult> failedRows = result.getFailedRows(); for (IntObjectCursor<WriteResult> cursor : failedRows) { if (!cursor.value.canRetry()) return WriteResponse.THROW_ERROR; } return WriteResponse.RETRY; }
Example #13
Source File: NodeFetchOperation.java From crate with Apache License 2.0 | 5 votes |
public CompletableFuture<? extends IntObjectMap<StreamBucket>> fetch(UUID jobId, int phaseId, @Nullable IntObjectMap<? extends IntContainer> docIdsToFetch, boolean closeTaskOnFinish) { if (docIdsToFetch == null) { if (closeTaskOnFinish) { tryCloseTask(jobId, phaseId); } jobsLogs.operationStarted(phaseId, jobId, "fetch", () -> -1); jobsLogs.operationFinished(phaseId, jobId, null); return CompletableFuture.completedFuture(new IntObjectHashMap<>(0)); } RootTask context = tasksService.getTask(jobId); FetchTask fetchTask = context.getTask(phaseId); jobsLogs.operationStarted(phaseId, jobId, "fetch", () -> -1); BiConsumer<? super IntObjectMap<StreamBucket>, ? super Throwable> whenComplete = (res, err) -> { if (closeTaskOnFinish) { if (err == null) { fetchTask.close(); } else { fetchTask.kill(err); } } if (err == null) { jobsLogs.operationFinished(phaseId, jobId, null); } else { jobsLogs.operationFinished(phaseId, jobId, SQLExceptions.messageOf(err)); } }; try { return doFetch(fetchTask, docIdsToFetch).whenComplete(whenComplete); } catch (Throwable t) { whenComplete.accept(null, t); return CompletableFuture.failedFuture(t); } }
Example #14
Source File: ReaderBuckets.java From crate with Apache License 2.0 | 5 votes |
public IntObjectHashMap<IntContainer> generateToFetch(IntSet readerIds) { IntObjectHashMap<IntContainer> toFetch = new IntObjectHashMap<>(readerIds.size()); for (IntCursor readerIdCursor : readerIds) { ReaderBucket readerBucket = readerBuckets.get(readerIdCursor.value); if (readerBucket != null && readerBucket.docs.size() > 0) { toFetch.put(readerIdCursor.value, readerBucket.docs.keys()); } } return toFetch; }
Example #15
Source File: FetchRows.java From crate with Apache License 2.0 | 5 votes |
public FetchRows(IntArrayList fetchIdPositions, List<Input<?>> outputExpressions, UnsafeArrayRow inputRow, IntObjectHashMap<UnsafeArrayRow> fetchedRows, ArrayList<Object[]> nullRows) { this.fetchedRows = fetchedRows; this.nullRows = nullRows; this.fetchIdPositions = fetchIdPositions.toArray(); this.output = new InputRow(outputExpressions); this.inputRow = inputRow; }
Example #16
Source File: FetchMapper.java From crate with Apache License 2.0 | 5 votes |
@Override public void close() { for (String nodeId : readerIdsByNode.keySet()) { fetchOperation.fetch(nodeId, new IntObjectHashMap<>(0), true) .exceptionally(e -> { LOGGER.error("An error happened while sending close fetchRequest to node=" + nodeId, e); return null; }); } }
Example #17
Source File: JobSetup.java From crate with Apache License 2.0 | 5 votes |
private static IntObjectHashMap<NodeOperation> groupNodeOperationsByPhase(Collection<? extends NodeOperation> nodeOperations) { IntObjectHashMap<NodeOperation> map = new IntObjectHashMap<>(nodeOperations.size()); for (NodeOperation nodeOperation : nodeOperations) { map.put(nodeOperation.executionPhase().phaseId(), nodeOperation); } return map; }
Example #18
Source File: TransportFetchOperationTest.java From crate with Apache License 2.0 | 5 votes |
@Test public void test_no_ram_accounting_on_empty_fetch_ids_and_close() { RamAccounting ramAccounting = TransportFetchOperation.ramAccountingForIncomingResponse( RamAccounting.NO_ACCOUNTING, new IntObjectHashMap<>(), true); assertThat(ramAccounting, is(RamAccounting.NO_ACCOUNTING)); }
Example #19
Source File: TransportFetchOperationTest.java From crate with Apache License 2.0 | 5 votes |
@Test public void test_ram_accounting_on_non_empty_fetch_ids_and_close() { var toFetch = new IntObjectHashMap<IntContainer>(); toFetch.put(1, new IntArrayList()); RamAccounting ramAccounting = TransportFetchOperation.ramAccountingForIncomingResponse( RamAccounting.NO_ACCOUNTING, toFetch, true); assertThat(ramAccounting, instanceOf(BlockBasedRamAccounting.class)); }
Example #20
Source File: NodeFetchResponseTest.java From crate with Apache License 2.0 | 5 votes |
@Before public void setUpStreamBucketsAndStreamer() throws Exception { streamers = new IntObjectHashMap<>(1); streamers.put(1, new Streamer[]{DataTypes.BOOLEAN.streamer()}); IntObjectHashMap<IntContainer> toFetch = new IntObjectHashMap<>(); IntHashSet docIds = new IntHashSet(3); toFetch.put(1, docIds); StreamBucket.Builder builder = new StreamBucket.Builder(streamers.get(1), RamAccounting.NO_ACCOUNTING); builder.add(new RowN(new Object[]{true})); fetched = new IntObjectHashMap<>(1); fetched.put(1, builder.build()); }
Example #21
Source File: HppcIntObjectMapTest.java From hashmapTest with The Unlicense | 5 votes |
@Override public int test() { final IntObjectHashMap<Integer> m_map = new IntObjectHashMap<>( m_keys.length, 0.5f ); for ( int i = 0; i < m_keys.length; ++i ) m_map.put( m_keys[ i ], null ); for ( int i = 0; i < m_keys.length; ++i ) m_map.put( m_keys[ i ], null ); return m_map.size(); }
Example #22
Source File: RequestIdMap.java From dremio-oss with Apache License 2.0 | 5 votes |
void channelClosed(Throwable ex) { isOpen.set(false); if (ex != null) { final IntObjectHashMap<RpcOutcome<?>> clonedMap; synchronized (map) { clonedMap = map.clone(); map.clear(); } final RpcException e = RpcException.mapException(ex); clonedMap.forEach(new SetExceptionProcedure(e)); } }
Example #23
Source File: ExpandComponent.java From lucene-solr with Apache License 2.0 | 5 votes |
@SuppressWarnings({"unchecked"}) private void addGroupSliceToOutputMap(FieldType fieldType, IntObjectHashMap<BytesRef> ordBytes, @SuppressWarnings({"rawtypes"})NamedList outMap, CharsRefBuilder charsRef, long groupValue, DocSlice slice) { if(fieldType instanceof StrField) { final BytesRef bytesRef = ordBytes.get((int)groupValue); fieldType.indexedToReadable(bytesRef, charsRef); String group = charsRef.toString(); outMap.add(group, slice); } else { outMap.add(numericToString(fieldType, groupValue), slice); } }
Example #24
Source File: ExpandComponent.java From lucene-solr with Apache License 2.0 | 5 votes |
private Query getGroupQuery(String fname, int size, IntObjectHashMap<BytesRef> ordBytes) { BytesRef[] bytesRefs = new BytesRef[size]; int index = -1; Iterator<IntObjectCursor<BytesRef>>it = ordBytes.iterator(); while (it.hasNext()) { IntObjectCursor<BytesRef> cursor = it.next(); bytesRefs[++index] = cursor.value; } return new TermInSetQuery(fname, bytesRefs); }
Example #25
Source File: PermissiveInsertWriteConfiguration.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
@Override public WriteResponse partialFailure(BulkWriteResult result, BulkWrite request) throws ExecutionException { if (LOG.isDebugEnabled()) SpliceLogUtils.debug(LOG, "partialFailure result=%s", result); if(operationContext.isFailed()) return WriteResponse.IGNORE; //filter out and report bad records IntObjectHashMap<WriteResult> failedRows = result.getFailedRows(); @SuppressWarnings("MismatchedReadAndWriteOfArray") Object[] fRows = failedRows.values; boolean ignore = result.getNotRunRows().size()<=0 && result.getFailedRows().size()<=0; List<KVPair> kvPairList = request.mutationsList(); for(IntObjectCursor<WriteResult> resultCursor:failedRows) { WriteResult value = resultCursor.value; int rowNum = resultCursor.key; if (!value.canRetry()) { if (operationContext.isFailed()) ignore = true; try { operationContext.recordBadRecord(errorRow(pairDecoder.get().decode(kvPairList.get(rowNum).shallowClone()).toString(), value), null); } catch (Exception e) { ignore = true; } if (operationContext.isFailed()) ignore = true; } } if(ignore) return WriteResponse.IGNORE; else return WriteResponse.RETRY; }
Example #26
Source File: NumericDateAnalyzer.java From Elasticsearch with Apache License 2.0 | 5 votes |
public static synchronized NamedAnalyzer buildNamedAnalyzer(FormatDateTimeFormatter formatter, int precisionStep) { IntObjectHashMap<NamedAnalyzer> precisionMap = globalAnalyzers.get(formatter.format()); if (precisionMap == null) { precisionMap = new IntObjectHashMap<>(); globalAnalyzers.put(formatter.format(), precisionMap); } NamedAnalyzer namedAnalyzer = precisionMap.get(precisionStep); if (namedAnalyzer == null) { String name = "_date/" + ((precisionStep == Integer.MAX_VALUE) ? "max" : precisionStep); namedAnalyzer = new NamedAnalyzer(name, AnalyzerScope.GLOBAL, new NumericDateAnalyzer(precisionStep, formatter.parser())); precisionMap.put(precisionStep, namedAnalyzer); } return namedAnalyzer; }
Example #27
Source File: PercolatorService.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Inject public PercolatorService(Settings settings, IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, HighlightPhase highlightPhase, ClusterService clusterService, AggregationPhase aggregationPhase, ScriptService scriptService, MappingUpdatedAction mappingUpdatedAction) { super(settings); this.indexNameExpressionResolver = indexNameExpressionResolver; this.parseFieldMatcher = new ParseFieldMatcher(settings); this.indicesService = indicesService; this.pageCacheRecycler = pageCacheRecycler; this.bigArrays = bigArrays; this.clusterService = clusterService; this.highlightPhase = highlightPhase; this.aggregationPhase = aggregationPhase; this.scriptService = scriptService; this.mappingUpdatedAction = mappingUpdatedAction; this.sortParseElement = new SortParseElement(); final long maxReuseBytes = settings.getAsBytesSize("indices.memory.memory_index.size_per_thread", new ByteSizeValue(1, ByteSizeUnit.MB)).bytes(); cache = new CloseableThreadLocal<MemoryIndex>() { @Override protected MemoryIndex initialValue() { // TODO: should we expose payloads as an option? should offsets be turned on always? return new ExtendedMemoryIndex(true, false, maxReuseBytes); } }; single = new SingleDocumentPercolatorIndex(cache); multi = new MultiDocumentPercolatorIndex(cache); percolatorTypes = new IntObjectHashMap<>(6); percolatorTypes.put(countPercolator.id(), countPercolator); percolatorTypes.put(queryCountPercolator.id(), queryCountPercolator); percolatorTypes.put(matchPercolator.id(), matchPercolator); percolatorTypes.put(queryPercolator.id(), queryPercolator); percolatorTypes.put(scoringPercolator.id(), scoringPercolator); percolatorTypes.put(topMatchingPercolator.id(), topMatchingPercolator); }
Example #28
Source File: HppcIntObjectMapTest.java From hashmapTest with The Unlicense | 5 votes |
@Override public int test() { final IntObjectHashMap<Integer> m_map = new IntObjectHashMap<>( m_keys.length / 2 + 1, 0.5f ); final Integer value = 1; int add = 0, remove = 0; while ( add < m_keys.length ) { m_map.put( m_keys[ add ], value ); ++add; m_map.put( m_keys[ add ], value ); ++add; m_map.remove( m_keys[ remove++ ] ); } return m_map.size(); }
Example #29
Source File: NodeFetchRequest.java From Elasticsearch with Apache License 2.0 | 5 votes |
public NodeFetchRequest(UUID jobId, int fetchPhaseId, IntObjectHashMap<IntContainer> toFetch) { this.jobId = jobId; this.fetchPhaseId = fetchPhaseId; if (!toFetch.isEmpty()) { this.toFetch = toFetch; } }
Example #30
Source File: QueryManager.java From Bats with Apache License 2.0 | 5 votes |
@Override public boolean apply(final int majorFragmentId, final IntObjectHashMap<FragmentData> minorMap) { final MajorFragmentProfile.Builder builder = MajorFragmentProfile.newBuilder().setMajorFragmentId(majorFragmentId); minorMap.forEach(new InnerIter(builder)); profileBuilder.addFragmentProfile(builder); return true; }