Java Code Examples for com.carrotsearch.hppc.IntObjectHashMap

The following examples show how to use com.carrotsearch.hppc.IntObjectHashMap. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: Elasticsearch   Source File: NodeFetchRequest.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void readFrom(StreamInput in) throws IOException {
    super.readFrom(in);
    jobId = new UUID(in.readLong(), in.readLong());
    fetchPhaseId = in.readVInt();
    int numReaders = in.readVInt();
    if (numReaders > 0) {
        toFetch = new IntObjectHashMap<>(numReaders);
        for (int i = 0; i < numReaders; i++) {
            int readerId = in.readVInt();
            int numDocs = in.readVInt();
            IntArrayList docs = new IntArrayList(numDocs);
            toFetch.put(readerId, docs);
            for (int j = 0; j < numDocs; j++) {
                docs.add(in.readInt());
            }
        }
    }
}
 
Example 2
@Override
public BulkWriteResult read(Kryo kryo, Input input, Class<BulkWriteResult> type) {
		WriteResult globalStatus = kryo.readObject(input,WriteResult.class);
		int notRunSize = input.readInt();
		IntHashSet notRunRows = new IntHashSet(notRunSize);
		for(int i=0;i<notRunSize;i++){
				notRunRows.add(input.readInt());
		}
		int failedSize = input.readInt();
		IntObjectHashMap<WriteResult> failedRows = new IntObjectHashMap<>(failedSize,0.9f);
		for(int i=0;i<failedSize;i++){
				int k = input.readInt();
				WriteResult result = kryo.readObject(input,WriteResult.class);
				failedRows.put(k,result);
		}
		return new BulkWriteResult(globalStatus,notRunRows,failedRows);
}
 
Example 3
@Override
public WriteResponse processGlobalResult(BulkWriteResult bulkWriteResult) throws Throwable {
    WriteResult writeResult = bulkWriteResult.getGlobalResult();
    if (writeResult.isSuccess())
        return WriteResponse.SUCCESS;
    else if (writeResult.isPartial()) {
        IntObjectHashMap<WriteResult> failedRows = bulkWriteResult.getFailedRows();
        if (failedRows != null && failedRows.size() > 0) {
            return WriteResponse.PARTIAL;
        }
        IntHashSet notRun = bulkWriteResult.getNotRunRows();
        if(notRun!=null && notRun.size()>0)
            return WriteResponse.PARTIAL;
        /*
         * We got a partial result, but didn't specify which rows needed behavior.
         * That's weird, but since we weren't told there would be a problem, we may
         * as well ignore
         */
        return WriteResponse.IGNORE;
    } else if (!writeResult.canRetry())
        throw exceptionFactory.processErrorResult(writeResult);
    else
        return WriteResponse.RETRY;
}
 
Example 4
Source Project: crate   Source File: FetchProjection.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings({"rawtypes"})
public Map<String, ? extends IntObjectMap<Streamer[]>> generateStreamersGroupedByReaderAndNode() {
    HashMap<String, IntObjectHashMap<Streamer[]>> streamersByReaderByNode = new HashMap<>();
    for (Map.Entry<String, IntSet> entry : nodeReaders.entrySet()) {
        IntObjectHashMap<Streamer[]> streamersByReaderId = new IntObjectHashMap<>();
        String nodeId = entry.getKey();
        streamersByReaderByNode.put(nodeId, streamersByReaderId);
        for (IntCursor readerIdCursor : entry.getValue()) {
            int readerId = readerIdCursor.value;
            String index = readerIndices.floorEntry(readerId).getValue();
            RelationName relationName = indicesToIdents.get(index);
            FetchSource fetchSource = fetchSources.get(relationName);
            if (fetchSource == null) {
                continue;
            }
            streamersByReaderId.put(readerIdCursor.value, Symbols.streamerArray(fetchSource.references()));
        }
    }
    return streamersByReaderByNode;
}
 
Example 5
Source Project: crate   Source File: NodeFetchRequest.java    License: Apache License 2.0 6 votes vote down vote up
public NodeFetchRequest(StreamInput in) throws IOException {
    super(in);
    jobId = new UUID(in.readLong(), in.readLong());
    fetchPhaseId = in.readVInt();
    closeContext = in.readBoolean();
    int numReaders = in.readVInt();
    if (numReaders > 0) {
        IntObjectHashMap<IntArrayList> toFetch = new IntObjectHashMap<>(numReaders);
        for (int i = 0; i < numReaders; i++) {
            int readerId = in.readVInt();
            int numDocs = in.readVInt();
            IntArrayList docs = new IntArrayList(numDocs);
            toFetch.put(readerId, docs);
            for (int j = 0; j < numDocs; j++) {
                docs.add(in.readInt());
            }
        }
        this.toFetch = toFetch;
    } else {
        this.toFetch = null;
    }
}
 
Example 6
Source Project: crate   Source File: FetchMapper.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public CompletableFuture<? extends Iterator<Row>> apply(ReaderBuckets readerBuckets, boolean isLastCall) {
    List<CompletableFuture<IntObjectMap<? extends Bucket>>> futures = new ArrayList<>();
    Iterator<Map.Entry<String, IntSet>> it = readerIdsByNode.entrySet().iterator();
    while (it.hasNext()) {
        Map.Entry<String, IntSet> entry = it.next();
        IntObjectHashMap<IntContainer> toFetch = readerBuckets.generateToFetch(entry.getValue());
        if (toFetch.isEmpty() && !isLastCall) {
            continue;
        }
        final String nodeId = entry.getKey();
        try {
            futures.add(fetchOperation.fetch(nodeId, toFetch, isLastCall));
        } catch (Throwable t) {
            futures.add(CompletableFuture.failedFuture(t));
        }
        if (isLastCall) {
            it.remove();
        }
    }
    return CompletableFutures.allAsList(futures).thenApply(readerBuckets::getOutputRows);
}
 
Example 7
Source Project: crate   Source File: NodeFetchRequestTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testStreaming() throws Exception {

    IntObjectHashMap<IntContainer> toFetch = new IntObjectHashMap<>();
    IntHashSet docIds = new IntHashSet(3);
    toFetch.put(1, docIds);

    NodeFetchRequest orig = new NodeFetchRequest(UUID.randomUUID(), 1, true, toFetch);

    BytesStreamOutput out = new BytesStreamOutput();
    orig.writeTo(out);

    StreamInput in = out.bytes().streamInput();

    NodeFetchRequest streamed = new NodeFetchRequest(in);

    assertThat(orig.jobId(), is(streamed.jobId()));
    assertThat(orig.fetchPhaseId(), is(streamed.fetchPhaseId()));
    assertThat(orig.isCloseContext(), is(streamed.isCloseContext()));
    assertThat(orig.toFetch().toString(), is(streamed.toFetch().toString()));
}
 
Example 8
Source Project: Bats   Source File: RequestIdMap.java    License: Apache License 2.0 5 votes vote down vote up
void channelClosed(Throwable ex) {
  isOpen.set(false);
  if (ex != null) {
    final RpcException e = RpcException.mapException(ex);
    IntObjectHashMap<RpcOutcome<?>> clonedMap;
    synchronized (map) {
      clonedMap = map.clone();
      map.clear();
    }
    clonedMap.forEach(new SetExceptionProcedure(e));
  }
}
 
Example 9
Source Project: Bats   Source File: QueryManager.java    License: Apache License 2.0 5 votes vote down vote up
private void addFragment(final FragmentData fragmentData) {
  final FragmentHandle fragmentHandle = fragmentData.getHandle();
  final int majorFragmentId = fragmentHandle.getMajorFragmentId();
  final int minorFragmentId = fragmentHandle.getMinorFragmentId();

  IntObjectHashMap<FragmentData> minorMap = fragmentDataMap.get(majorFragmentId);
  if (minorMap == null) {
    minorMap = new IntObjectHashMap<>();
    fragmentDataMap.put(majorFragmentId, minorMap);
  }
  minorMap.put(minorFragmentId, fragmentData);
  fragmentDataSet.add(fragmentData);
}
 
Example 10
Source Project: Bats   Source File: QueryManager.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public boolean apply(final int majorFragmentId, final IntObjectHashMap<FragmentData> minorMap) {
  final MajorFragmentProfile.Builder builder = MajorFragmentProfile.newBuilder().setMajorFragmentId(majorFragmentId);
  minorMap.forEach(new InnerIter(builder));
  profileBuilder.addFragmentProfile(builder);
  return true;
}
 
Example 11
Source Project: Elasticsearch   Source File: NodeFetchRequest.java    License: Apache License 2.0 5 votes vote down vote up
public NodeFetchRequest(UUID jobId, int fetchPhaseId, IntObjectHashMap<IntContainer> toFetch) {
    this.jobId = jobId;
    this.fetchPhaseId = fetchPhaseId;
    if (!toFetch.isEmpty()) {
        this.toFetch = toFetch;
    }
}
 
Example 12
Source Project: Elasticsearch   Source File: PercolatorService.java    License: Apache License 2.0 5 votes vote down vote up
@Inject
public PercolatorService(Settings settings, IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService,
                         PageCacheRecycler pageCacheRecycler, BigArrays bigArrays,
                         HighlightPhase highlightPhase, ClusterService clusterService,
                         AggregationPhase aggregationPhase, ScriptService scriptService,
                         MappingUpdatedAction mappingUpdatedAction) {
    super(settings);
    this.indexNameExpressionResolver = indexNameExpressionResolver;
    this.parseFieldMatcher = new ParseFieldMatcher(settings);
    this.indicesService = indicesService;
    this.pageCacheRecycler = pageCacheRecycler;
    this.bigArrays = bigArrays;
    this.clusterService = clusterService;
    this.highlightPhase = highlightPhase;
    this.aggregationPhase = aggregationPhase;
    this.scriptService = scriptService;
    this.mappingUpdatedAction = mappingUpdatedAction;
    this.sortParseElement = new SortParseElement();

    final long maxReuseBytes = settings.getAsBytesSize("indices.memory.memory_index.size_per_thread", new ByteSizeValue(1, ByteSizeUnit.MB)).bytes();
    cache = new CloseableThreadLocal<MemoryIndex>() {
        @Override
        protected MemoryIndex initialValue() {
            // TODO: should we expose payloads as an option? should offsets be turned on always?
            return new ExtendedMemoryIndex(true, false, maxReuseBytes);
        }
    };
    single = new SingleDocumentPercolatorIndex(cache);
    multi = new MultiDocumentPercolatorIndex(cache);

    percolatorTypes = new IntObjectHashMap<>(6);
    percolatorTypes.put(countPercolator.id(), countPercolator);
    percolatorTypes.put(queryCountPercolator.id(), queryCountPercolator);
    percolatorTypes.put(matchPercolator.id(), matchPercolator);
    percolatorTypes.put(queryPercolator.id(), queryPercolator);
    percolatorTypes.put(scoringPercolator.id(), scoringPercolator);
    percolatorTypes.put(topMatchingPercolator.id(), topMatchingPercolator);
}
 
Example 13
Source Project: Elasticsearch   Source File: NumericDateAnalyzer.java    License: Apache License 2.0 5 votes vote down vote up
public static synchronized NamedAnalyzer buildNamedAnalyzer(FormatDateTimeFormatter formatter, int precisionStep) {
    IntObjectHashMap<NamedAnalyzer> precisionMap = globalAnalyzers.get(formatter.format());
    if (precisionMap == null) {
        precisionMap = new IntObjectHashMap<>();
        globalAnalyzers.put(formatter.format(), precisionMap);
    }
    NamedAnalyzer namedAnalyzer = precisionMap.get(precisionStep);
    if (namedAnalyzer == null) {
        String name = "_date/" + ((precisionStep == Integer.MAX_VALUE) ? "max" : precisionStep);
        namedAnalyzer = new NamedAnalyzer(name, AnalyzerScope.GLOBAL, new NumericDateAnalyzer(precisionStep, formatter.parser()));
        precisionMap.put(precisionStep, namedAnalyzer);
    }
    return namedAnalyzer;
}
 
Example 14
Source Project: dremio-oss   Source File: RequestIdMap.java    License: Apache License 2.0 5 votes vote down vote up
void channelClosed(Throwable ex) {
  isOpen.set(false);
  if (ex != null) {
    final IntObjectHashMap<RpcOutcome<?>> clonedMap;
    synchronized (map) {
      clonedMap = map.clone();
      map.clear();
    }
    final RpcException e = RpcException.mapException(ex);
    clonedMap.forEach(new SetExceptionProcedure(e));
  }
}
 
Example 15
Source Project: lucene-solr   Source File: ExpandComponent.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings({"unchecked"})
private void addGroupSliceToOutputMap(FieldType fieldType, IntObjectHashMap<BytesRef> ordBytes,
                                      @SuppressWarnings({"rawtypes"})NamedList outMap, CharsRefBuilder charsRef, long groupValue, DocSlice slice) {
  if(fieldType instanceof StrField) {
    final BytesRef bytesRef = ordBytes.get((int)groupValue);
    fieldType.indexedToReadable(bytesRef, charsRef);
    String group = charsRef.toString();
    outMap.add(group, slice);
  } else {
    outMap.add(numericToString(fieldType, groupValue), slice);
  }
}
 
Example 16
Source Project: lucene-solr   Source File: ExpandComponent.java    License: Apache License 2.0 5 votes vote down vote up
private Query getGroupQuery(String fname,
                            int size,
                            IntObjectHashMap<BytesRef> ordBytes) {
  BytesRef[] bytesRefs = new BytesRef[size];
  int index = -1;
  Iterator<IntObjectCursor<BytesRef>>it = ordBytes.iterator();
  while (it.hasNext()) {
    IntObjectCursor<BytesRef> cursor = it.next();
    bytesRefs[++index] = cursor.value;
  }
  return new TermInSetQuery(fname, bytesRefs);
}
 
Example 17
Source Project: hashmapTest   Source File: HppcIntObjectMapTest.java    License: The Unlicense 5 votes vote down vote up
@Override
public int test() {
    final IntObjectHashMap<Integer> m_map = new IntObjectHashMap<>( m_keys.length, 0.5f );
    for ( int i = 0; i < m_keys.length; ++i )
        m_map.put( m_keys[ i ], null );
    for ( int i = 0; i < m_keys.length; ++i )
        m_map.put( m_keys[ i ], null );
    return m_map.size();
}
 
Example 18
Source Project: hashmapTest   Source File: HppcIntObjectMapTest.java    License: The Unlicense 5 votes vote down vote up
@Override
public int test() {
    final IntObjectHashMap<Integer> m_map = new IntObjectHashMap<>( m_keys.length / 2 + 1, 0.5f );
    final Integer value = 1;
    int add = 0, remove = 0;
    while ( add < m_keys.length )
    {
        m_map.put( m_keys[ add ], value );
        ++add;
        m_map.put( m_keys[ add ], value );
        ++add;
        m_map.remove( m_keys[ remove++ ] );
    }
    return m_map.size();
}
 
Example 19
Source Project: more-lambdas-java   Source File: MoreCollectors.java    License: Artistic License 2.0 5 votes vote down vote up
public static <T, K, U> Collector<T, IntObjectHashMap<U>, IntObjectHashMap<U>> toIntMap(
        ToIntFunction<? super T> keyMapper, Function<? super T, ? extends U> valueMapper) {
    BiConsumer<IntObjectHashMap<U>, T> accumulator = (map, element) -> map
            .put(keyMapper.applyAsInt(element), valueMapper.apply(element));
    return new CollectorImpl<>(IntObjectHashMap::new, accumulator, (m1, m2) -> {
        m1.putAll(m2);
        return m1;
    }, CH_ID);
}
 
Example 20
@Override
public WriteResponse partialFailure(BulkWriteResult result, BulkWrite request) throws ExecutionException {
    if (LOG.isDebugEnabled())
        SpliceLogUtils.debug(LOG, "partialFailure result=%s", result);
    if(operationContext.isFailed()) return WriteResponse.IGNORE;
    //filter out and report bad records
    IntObjectHashMap<WriteResult> failedRows = result.getFailedRows();
    @SuppressWarnings("MismatchedReadAndWriteOfArray") Object[] fRows = failedRows.values;
    boolean ignore = result.getNotRunRows().size()<=0 && result.getFailedRows().size()<=0;
    List<KVPair> kvPairList = request.mutationsList();
    for(IntObjectCursor<WriteResult> resultCursor:failedRows) {
        WriteResult value = resultCursor.value;
        int rowNum = resultCursor.key;
        if (!value.canRetry()) {
            if (operationContext.isFailed())
                ignore = true;
            try {
                operationContext.recordBadRecord(errorRow(pairDecoder.get().decode(kvPairList.get(rowNum).shallowClone()).toString(), value), null);
            } catch (Exception e) {
                ignore = true;
            }

            if (operationContext.isFailed())
                ignore = true;
        }
     }
    if(ignore)
        return WriteResponse.IGNORE;
    else
        return WriteResponse.RETRY;
}
 
Example 21
public static Collection<KVPair> doPartialRetry(BulkWrite bulkWrite,BulkWriteResult response,long id) throws Exception{
    IntHashSet notRunRows=response.getNotRunRows();
    IntObjectHashMap<WriteResult> failedRows=response.getFailedRows();
    Collection<KVPair> toRetry=new ArrayList<>(failedRows.size()+notRunRows.size());
    List<String> errorMsgs= Lists.newArrayListWithCapacity(failedRows.size());
    int i=0;
    Collection<KVPair> allWrites=bulkWrite.getMutations();
    for(KVPair kvPair : allWrites){
        if(notRunRows.contains(i))
            toRetry.add(kvPair);
        else{
            WriteResult writeResult=failedRows.get(i);
            if(writeResult!=null){
                errorMsgs.add(writeResult.getErrorMessage());
                if(writeResult.canRetry())
                    toRetry.add(kvPair);
            }
        }
        i++;
    }
    if(LOG.isTraceEnabled()){
        int[] errorCounts=new int[11];
        for(IntObjectCursor<WriteResult> failedCursor : failedRows){
            errorCounts[failedCursor.value.getCode().ordinal()]++;
        }
        SpliceLogUtils.trace(LOG,"[%d] %d failures with types: %s",id,failedRows.size(),Arrays.toString(errorCounts));
    }

    return toRetry;
}
 
Example 22
/**
 * Return an error message describing the types and number of failures in the BatchWrite.
 *
 * @param failedRows the rows which failed, and their respective error messages
 * @return error message describing the failed rows
 */
private String getFailedRowsMessage(IntObjectHashMap<WriteResult> failedRows){

    if(failedRows!=null && !failedRows.isEmpty()){

        // Aggregate the error counts by code.
        HashMap<Code, Integer> errorCodeToCountMap=new HashMap<>();
        for(IntObjectCursor<WriteResult> failedRowCursor : failedRows){
            WriteResult wr=failedRowCursor.value;
            Code errorCode=(wr==null?null:wr.getCode());
            Integer errorCount=errorCodeToCountMap.get(errorCode);
            errorCodeToCountMap.put(errorCode,(errorCode==null || errorCount==null?1:errorCount+1));
        }

        // Make a string out of the error map.
        StringBuilder buf=new StringBuilder();
        buf.append("{ ");
        boolean first=true;
        for(Map.Entry<Code, Integer> entry : errorCodeToCountMap.entrySet()){
            if(!first){
                buf.append(", ");
            }else{
                first=false;
            }
            buf.append(String.format("%s=%s",entry.getKey(),entry.getValue()));
        }
        buf.append(" }");
        return buf.toString();
    }else{
        return "NONE";
    }
}
 
Example 23
private Exception parseIntoException(BulkWriteResult response){
    IntObjectHashMap<WriteResult> failedRows=response.getFailedRows();
    Exception first = null;
    for(IntObjectCursor<WriteResult> cursor : failedRows){
        @SuppressWarnings("ThrowableResultOfMethodCallIgnored") Throwable e=pipelineExceptionFactory.processErrorResult(cursor.value);
        if(e instanceof WriteConflict){ //TODO -sf- find a way to add in StandardExceptions here
            return (Exception)e;
        }else if(first==null)
            first = (Exception)e;
    }
    return first;
}
 
Example 24
@Override
public WriteResponse partialFailure(BulkWriteResult result, BulkWrite request) throws ExecutionException {
    IntObjectHashMap<WriteResult> failedRows = result.getFailedRows();
    for (IntObjectCursor<WriteResult> cursor : failedRows) {
        if (!cursor.value.canRetry())
            return WriteResponse.THROW_ERROR;
    }
    return WriteResponse.RETRY;
}
 
Example 25
Source Project: crate   Source File: NodeFetchOperation.java    License: Apache License 2.0 5 votes vote down vote up
public CompletableFuture<? extends IntObjectMap<StreamBucket>> fetch(UUID jobId,
                                                                     int phaseId,
                                                                     @Nullable IntObjectMap<? extends IntContainer> docIdsToFetch,
                                                                     boolean closeTaskOnFinish) {
    if (docIdsToFetch == null) {
        if (closeTaskOnFinish) {
            tryCloseTask(jobId, phaseId);
        }
        jobsLogs.operationStarted(phaseId, jobId, "fetch", () -> -1);
        jobsLogs.operationFinished(phaseId, jobId, null);
        return CompletableFuture.completedFuture(new IntObjectHashMap<>(0));
    }

    RootTask context = tasksService.getTask(jobId);
    FetchTask fetchTask = context.getTask(phaseId);
    jobsLogs.operationStarted(phaseId, jobId, "fetch", () -> -1);
    BiConsumer<? super IntObjectMap<StreamBucket>, ? super Throwable> whenComplete = (res, err) -> {
        if (closeTaskOnFinish) {
            if (err == null) {
                fetchTask.close();
            } else {
                fetchTask.kill(err);
            }
        }
        if (err == null) {
            jobsLogs.operationFinished(phaseId, jobId, null);
        } else {
            jobsLogs.operationFinished(phaseId, jobId, SQLExceptions.messageOf(err));
        }
    };
    try {
        return doFetch(fetchTask, docIdsToFetch).whenComplete(whenComplete);
    } catch (Throwable t) {
        whenComplete.accept(null, t);
        return CompletableFuture.failedFuture(t);
    }
}
 
Example 26
Source Project: crate   Source File: ReaderBuckets.java    License: Apache License 2.0 5 votes vote down vote up
public IntObjectHashMap<IntContainer> generateToFetch(IntSet readerIds) {
    IntObjectHashMap<IntContainer> toFetch = new IntObjectHashMap<>(readerIds.size());
    for (IntCursor readerIdCursor : readerIds) {
        ReaderBucket readerBucket = readerBuckets.get(readerIdCursor.value);
        if (readerBucket != null && readerBucket.docs.size() > 0) {
            toFetch.put(readerIdCursor.value, readerBucket.docs.keys());
        }
    }
    return toFetch;
}
 
Example 27
Source Project: crate   Source File: FetchRows.java    License: Apache License 2.0 5 votes vote down vote up
public FetchRows(IntArrayList fetchIdPositions,
                 List<Input<?>> outputExpressions,
                 UnsafeArrayRow inputRow,
                 IntObjectHashMap<UnsafeArrayRow> fetchedRows,
                 ArrayList<Object[]> nullRows) {
    this.fetchedRows = fetchedRows;
    this.nullRows = nullRows;
    this.fetchIdPositions = fetchIdPositions.toArray();
    this.output = new InputRow(outputExpressions);
    this.inputRow = inputRow;
}
 
Example 28
Source Project: crate   Source File: FetchMapper.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void close() {
    for (String nodeId : readerIdsByNode.keySet()) {
        fetchOperation.fetch(nodeId, new IntObjectHashMap<>(0), true)
            .exceptionally(e -> {
                LOGGER.error("An error happened while sending close fetchRequest to node=" + nodeId, e);
                return null;
            });
    }
}
 
Example 29
Source Project: crate   Source File: JobSetup.java    License: Apache License 2.0 5 votes vote down vote up
private static IntObjectHashMap<NodeOperation> groupNodeOperationsByPhase(Collection<? extends NodeOperation> nodeOperations) {
    IntObjectHashMap<NodeOperation> map = new IntObjectHashMap<>(nodeOperations.size());
    for (NodeOperation nodeOperation : nodeOperations) {
        map.put(nodeOperation.executionPhase().phaseId(), nodeOperation);
    }
    return map;
}
 
Example 30
Source Project: crate   Source File: TransportFetchOperationTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void test_no_ram_accounting_on_empty_fetch_ids_and_close() {
    RamAccounting ramAccounting = TransportFetchOperation.ramAccountingForIncomingResponse(
        RamAccounting.NO_ACCOUNTING,
        new IntObjectHashMap<>(),
        true);
    assertThat(ramAccounting, is(RamAccounting.NO_ACCOUNTING));
}