org.openjdk.jmh.annotations.OperationsPerInvocation Java Examples

The following examples show how to use org.openjdk.jmh.annotations.OperationsPerInvocation. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BenchmarkGroupByHash.java    From presto with Apache License 2.0 6 votes vote down vote up
@Benchmark
@OperationsPerInvocation(POSITIONS)
public Object groupByHashPreCompute(BenchmarkData data)
{
    GroupByHash groupByHash = new MultiChannelGroupByHash(data.getTypes(), data.getChannels(), data.getHashChannel(), EXPECTED_SIZE, false, getJoinCompiler(), NOOP);
    data.getPages().forEach(p -> groupByHash.getGroupIds(p).process());

    ImmutableList.Builder<Page> pages = ImmutableList.builder();
    PageBuilder pageBuilder = new PageBuilder(groupByHash.getTypes());
    for (int groupId = 0; groupId < groupByHash.getGroupCount(); groupId++) {
        pageBuilder.declarePosition();
        groupByHash.appendValuesTo(groupId, pageBuilder, 0);
        if (pageBuilder.isFull()) {
            pages.add(pageBuilder.build());
            pageBuilder.reset();
        }
    }
    pages.add(pageBuilder.build());
    return pageBuilder.build();
}
 
Example #2
Source File: SerializationFrameworkAllBenchmarks.java    From flink-benchmarks with Apache License 2.0 6 votes vote down vote up
@Benchmark
@OperationsPerInvocation(value = SerializationFrameworkMiniBenchmarks.RECORDS_PER_INVOCATION)
public void serializerKryoThrift(FlinkEnvironmentContext context) throws Exception {
	StreamExecutionEnvironment env = context.env;
	env.setParallelism(4);
	ExecutionConfig executionConfig = env.getConfig();
	executionConfig.enableForceKryo();
	executionConfig.addDefaultKryoSerializer(org.apache.flink.benchmark.thrift.MyPojo.class, TBaseSerializer.class);
	executionConfig.addDefaultKryoSerializer(org.apache.flink.benchmark.thrift.MyOperation.class, TBaseSerializer.class);

	env.addSource(new ThriftPojoSource(RECORDS_PER_INVOCATION, 10))
			.rebalance()
			.addSink(new DiscardingSink<>());

	env.execute();
}
 
Example #3
Source File: HttpTraceContextExtractBenchmark.java    From opentelemetry-java with Apache License 2.0 6 votes vote down vote up
/** Benchmark for measuring HttpTraceContext extract. */
@Benchmark
@BenchmarkMode({Mode.AverageTime})
@Fork(1)
@Measurement(iterations = 15, time = 1)
@OutputTimeUnit(TimeUnit.NANOSECONDS)
@Warmup(iterations = 5, time = 1)
@OperationsPerInvocation(COUNT)
@Nullable
public Context measureExtract() {
  Context result = null;
  for (int i = 0; i < COUNT; i++) {
    result = httpTraceContext.extract(Context.ROOT, carriers.get(i), getter);
  }
  return result;
}
 
Example #4
Source File: SerializationFrameworkAllBenchmarks.java    From flink-benchmarks with Apache License 2.0 6 votes vote down vote up
@Benchmark
@OperationsPerInvocation(value = SerializationFrameworkMiniBenchmarks.RECORDS_PER_INVOCATION)
public void serializerKryoProtobuf(FlinkEnvironmentContext context) throws Exception {
	StreamExecutionEnvironment env = context.env;
	env.setParallelism(4);
	ExecutionConfig executionConfig = env.getConfig();
	executionConfig.enableForceKryo();
	executionConfig.registerTypeWithKryoSerializer(org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyPojo.class, ProtobufSerializer.class);
	executionConfig.registerTypeWithKryoSerializer(org.apache.flink.benchmark.protobuf.MyPojoOuterClass.MyOperation.class, ProtobufSerializer.class);

	env.addSource(new ProtobufPojoSource(RECORDS_PER_INVOCATION, 10))
			.rebalance()
			.addSink(new DiscardingSink<>());

	env.execute();
}
 
Example #5
Source File: BenchmarkGroupByHash.java    From presto with Apache License 2.0 6 votes vote down vote up
@Benchmark
@OperationsPerInvocation(POSITIONS)
public Object addPagePreCompute(BenchmarkData data)
{
    GroupByHash groupByHash = new MultiChannelGroupByHash(data.getTypes(), data.getChannels(), data.getHashChannel(), EXPECTED_SIZE, false, getJoinCompiler(), NOOP);
    data.getPages().forEach(p -> groupByHash.addPage(p).process());

    ImmutableList.Builder<Page> pages = ImmutableList.builder();
    PageBuilder pageBuilder = new PageBuilder(groupByHash.getTypes());
    for (int groupId = 0; groupId < groupByHash.getGroupCount(); groupId++) {
        pageBuilder.declarePosition();
        groupByHash.appendValuesTo(groupId, pageBuilder, 0);
        if (pageBuilder.isFull()) {
            pages.add(pageBuilder.build());
            pageBuilder.reset();
        }
    }
    pages.add(pageBuilder.build());
    return pageBuilder.build();
}
 
Example #6
Source File: TwoInputBenchmark.java    From flink-benchmarks with Apache License 2.0 6 votes vote down vote up
@Benchmark
@OperationsPerInvocation(value = TwoInputBenchmark.ONE_IDLE_RECORDS_PER_INVOCATION)
public void twoInputOneIdleMapSink(FlinkEnvironmentContext context) throws Exception {

	StreamExecutionEnvironment env = context.env;
	env.enableCheckpointing(CHECKPOINT_INTERVAL_MS);
	env.setParallelism(1);

	QueuingLongSource.reset();
	DataStreamSource<Long> source1 = env.addSource(new QueuingLongSource(1, ONE_IDLE_RECORDS_PER_INVOCATION - 1));
	DataStreamSource<Long> source2 = env.addSource(new QueuingLongSource(2, 1));

	source1
			.connect(source2)
			.transform("custom operator", TypeInformation.of(Long.class), new MultiplyByTwoCoStreamMap())
			.addSink(new DiscardingSink<>());

	env.execute();
}
 
Example #7
Source File: TwoInputBenchmark.java    From flink-benchmarks with Apache License 2.0 6 votes vote down vote up
@Benchmark
@OperationsPerInvocation(value = TwoInputBenchmark.RECORDS_PER_INVOCATION)
public void twoInputMapSink(FlinkEnvironmentContext context) throws Exception {

	StreamExecutionEnvironment env = context.env;

	env.enableCheckpointing(CHECKPOINT_INTERVAL_MS);
	env.setParallelism(1);

	// Setting buffer timeout to 1 is an attempt to improve twoInputMapSink benchmark stability.
	// Without 1ms buffer timeout, some JVM forks are much slower then others, making results
	// unstable and unreliable.
	env.setBufferTimeout(1);

	long numRecordsPerInput = RECORDS_PER_INVOCATION / 2;
	DataStreamSource<Long> source1 = env.addSource(new LongSource(numRecordsPerInput));
	DataStreamSource<Long> source2 = env.addSource(new LongSource(numRecordsPerInput));

	source1
		.connect(source2)
		.transform("custom operator", TypeInformation.of(Long.class), new MultiplyByTwoCoStreamMap())
		.addSink(new DiscardingSink<>());

	env.execute();
}
 
Example #8
Source File: BenchmarkGroupByHash.java    From presto with Apache License 2.0 6 votes vote down vote up
@Benchmark
@OperationsPerInvocation(POSITIONS)
public Object bigintGroupByHash(SingleChannelBenchmarkData data)
{
    GroupByHash groupByHash = new BigintGroupByHash(0, data.getHashEnabled(), EXPECTED_SIZE, NOOP);
    data.getPages().forEach(p -> groupByHash.addPage(p).process());

    ImmutableList.Builder<Page> pages = ImmutableList.builder();
    PageBuilder pageBuilder = new PageBuilder(groupByHash.getTypes());
    for (int groupId = 0; groupId < groupByHash.getGroupCount(); groupId++) {
        pageBuilder.declarePosition();
        groupByHash.appendValuesTo(groupId, pageBuilder, 0);
        if (pageBuilder.isFull()) {
            pages.add(pageBuilder.build());
            pageBuilder.reset();
        }
    }
    pages.add(pageBuilder.build());
    return pageBuilder.build();
}
 
Example #9
Source File: SerializationFrameworkMiniBenchmarks.java    From flink-benchmarks with Apache License 2.0 6 votes vote down vote up
@Benchmark
@OperationsPerInvocation(value = SerializationFrameworkMiniBenchmarks.RECORDS_PER_INVOCATION)
public void serializerKryo(FlinkEnvironmentContext context) throws Exception {
	StreamExecutionEnvironment env = context.env;
	env.setParallelism(4);
	ExecutionConfig executionConfig = env.getConfig();
	executionConfig.enableForceKryo();
	executionConfig.registerKryoType(MyPojo.class);
	executionConfig.registerKryoType(MyOperation.class);

	env.addSource(new PojoSource(RECORDS_PER_INVOCATION, 10))
			.rebalance()
			.addSink(new DiscardingSink<>());

	env.execute();
}
 
Example #10
Source File: MapStateBenchmark.java    From flink-benchmarks with Apache License 2.0 5 votes vote down vote up
@Benchmark
@OperationsPerInvocation(mapKeyCount)
public void mapValues(KeyValue keyValue, Blackhole bh) throws Exception {
    keyedStateBackend.setCurrentKey(keyValue.setUpKey);
    for (Double value : mapState.values()) {
        bh.consume(value);
    }
}
 
Example #11
Source File: FriendlyIdBenchmark.java    From friendly-id with Apache License 2.0 5 votes vote down vote up
@Benchmark
@OperationsPerInvocation(SIZE)
public void deserializeId(Blackhole blackhole) {
	for (int i = 0; i < SIZE; i++) {
		blackhole.consume(FriendlyId.toUuid(ids[i]));
	}
}
 
Example #12
Source File: UuidConverterBenchmark.java    From friendly-id with Apache License 2.0 5 votes vote down vote up
@Benchmark
@OperationsPerInvocation(SIZE)
public void convertToBigInteger(Blackhole blackhole) {
	for (int i = 0; i < SIZE; i++) {
		blackhole.consume(UuidConverter.toBigInteger(uuids[i]));
	}
}
 
Example #13
Source File: UuidConverterBenchmark.java    From friendly-id with Apache License 2.0 5 votes vote down vote up
@Benchmark
@OperationsPerInvocation(SIZE)
public void convertFromBigInteger(Blackhole blackhole) {
	for (int i = 0; i < SIZE; i++) {
		blackhole.consume(UuidConverter.toUuid(ids[i]));
	}
}
 
Example #14
Source File: FriendlyIdBenchmark.java    From friendly-id with Apache License 2.0 5 votes vote down vote up
@Benchmark
@OperationsPerInvocation(SIZE)
public void serializeUuid(Blackhole blackhole) {
	for (int i = 0; i < SIZE; i++) {
		blackhole.consume(FriendlyId.toFriendlyId(uuids[i]));
	}
}
 
Example #15
Source File: SerializationFrameworkMiniBenchmarks.java    From flink-benchmarks with Apache License 2.0 5 votes vote down vote up
@Benchmark
@OperationsPerInvocation(value = SerializationFrameworkMiniBenchmarks.RECORDS_PER_INVOCATION)
public void serializerAvro(FlinkEnvironmentContext context) throws Exception {
	StreamExecutionEnvironment env = context.env;
	env.setParallelism(4);

	env.addSource(new AvroPojoSource(RECORDS_PER_INVOCATION, 10))
			.rebalance()
			.addSink(new DiscardingSink<>());

	env.execute();
}
 
Example #16
Source File: SerializationFrameworkMiniBenchmarks.java    From flink-benchmarks with Apache License 2.0 5 votes vote down vote up
@Benchmark
@OperationsPerInvocation(value = SerializationFrameworkMiniBenchmarks.RECORDS_PER_INVOCATION)
public void serializerRow(FlinkEnvironmentContext context) throws Exception {
	StreamExecutionEnvironment env = context.env;
	env.setParallelism(4);

	env.addSource(new RowSource(RECORDS_PER_INVOCATION, 10))
			.rebalance()
			.addSink(new DiscardingSink<>());

	env.execute();
}
 
Example #17
Source File: MapStateBenchmark.java    From flink-benchmarks with Apache License 2.0 5 votes vote down vote up
@Benchmark
@OperationsPerInvocation(mapKeyCount)
public void mapKeys(KeyValue keyValue, Blackhole bh) throws Exception {
    keyedStateBackend.setCurrentKey(keyValue.setUpKey);
    for (Long key : mapState.keys()) {
        bh.consume(key);
    }
}
 
Example #18
Source File: MapStateBenchmark.java    From flink-benchmarks with Apache License 2.0 5 votes vote down vote up
@Benchmark
@OperationsPerInvocation(mapKeyCount)
public void mapIterator(KeyValue keyValue, Blackhole bh) throws Exception {
    keyedStateBackend.setCurrentKey(keyValue.setUpKey);
    Iterator<Map.Entry<Long, Double>> iterator = mapState.iterator();
    while (iterator.hasNext()) {
        Map.Entry<Long, Double> entry = iterator.next();
        bh.consume(entry.getKey());
        bh.consume(entry.getValue());
    }
}
 
Example #19
Source File: RngNextIntInRangeBenchmark.java    From commons-rng with Apache License 2.0 5 votes vote down vote up
@Benchmark
@OperationsPerInvocation(65_536)
public int nextIntNloop65536(IntRange range, Source source) {
    int sum = 0;
    for (int i = 0; i < 65_536; i++) {
        sum += source.getRng().nextInt(range.getN());
    }
    return sum;
}
 
Example #20
Source File: BenchmarkCPUCounters.java    From presto with Apache License 2.0 5 votes vote down vote up
@Benchmark
@OperationsPerInvocation(ITERATIONS)
public void nanoTime(Blackhole blackhole)
{
    for (int i = 0; i < ITERATIONS; i++) {
        blackhole.consume(System.nanoTime());
    }
}
 
Example #21
Source File: BaseNettyBenchmark.java    From aws-sdk-java-v2 with Apache License 2.0 5 votes vote down vote up
@Override
@Benchmark
@OperationsPerInvocation(CONCURRENT_CALLS)
public void concurrentApiCall(Blackhole blackhole) {
    CountDownLatch countDownLatch = new CountDownLatch(CONCURRENT_CALLS);
    for (int i = 0; i < CONCURRENT_CALLS; i++) {
        countDownUponCompletion(blackhole, client.allTypes(), countDownLatch);
    }

    awaitCountdownLatchUninterruptibly(countDownLatch, 10, TimeUnit.SECONDS);
}
 
Example #22
Source File: ApacheHttpClientBenchmark.java    From aws-sdk-java-v2 with Apache License 2.0 5 votes vote down vote up
@Benchmark
@Override
@OperationsPerInvocation(CONCURRENT_CALLS)
public void concurrentApiCall(Blackhole blackhole) {
    CountDownLatch countDownLatch = new CountDownLatch(CONCURRENT_CALLS);
    for (int i = 0; i < CONCURRENT_CALLS; i++) {
        countDownUponCompletion(blackhole,
                                CompletableFuture.runAsync(() -> client.allTypes(), executorService), countDownLatch);
    }

    awaitCountdownLatchUninterruptibly(countDownLatch, 10, TimeUnit.SECONDS);
}
 
Example #23
Source File: ArrayDuplicationBenchmark.java    From openjdk-jdk9 with GNU General Public License v2.0 5 votes vote down vote up
@Benchmark
@OperationsPerInvocation(TESTSIZE)
public Object[] cloneObjectArray() {
    int j = 0;
    for (int i = 0; i < TESTSIZE; i++) {
        dummy[j++] = arraysClone(testObjectArray[i]);
    }
    return dummy;
}
 
Example #24
Source File: ArrayDuplicationBenchmark.java    From openjdk-jdk9 with GNU General Public License v2.0 5 votes vote down vote up
@Benchmark
@OperationsPerInvocation(TESTSIZE)
public Object[] arraysCopyOf() {
    int j = 0;
    for (int i = 0; i < TESTSIZE; i++) {
        dummy[j++] = arraysCopyOf(testObjectArray[i]);
    }
    return dummy;
}
 
Example #25
Source File: ArrayDuplicationBenchmark.java    From openjdk-jdk9 with GNU General Public License v2.0 5 votes vote down vote up
@Benchmark
@OperationsPerInvocation(TESTSIZE)
public Object[] normalArraycopy() {
    int j = 0;
    for (int i = 0; i < TESTSIZE; i++) {
        dummy[j++] = normalArraycopy(testObjectArray[i]);
    }
    return dummy;
}
 
Example #26
Source File: FastAvroSerdesBenchmark.java    From avro-util with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Benchmark
@OperationsPerInvocation(NUMBER_OF_OPERATIONS)
public void testFastAvroSerialization(Blackhole bh) throws Exception {
  for (int i = 0; i < NUMBER_OF_OPERATIONS; i++) {
    bh.consume(fastSerializer.serialize(generatedRecord));
  }
}
 
Example #27
Source File: FastAvroSerdesBenchmark.java    From avro-util with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Benchmark
@OperationsPerInvocation(NUMBER_OF_OPERATIONS)
public void testAvroSerialization(Blackhole bh) throws Exception {
  for (int i = 0; i < NUMBER_OF_OPERATIONS; i++) {
    // use vanilla avro 1.4 encoder, do not use buffer binary encoder
    bh.consume(serializer.serialize(generatedRecord));
  }
}
 
Example #28
Source File: HttpTraceContextInjectBenchmark.java    From opentelemetry-java with Apache License 2.0 5 votes vote down vote up
/** Benchmark for measuring inject with default trace state and sampled trace options. */
@Benchmark
@BenchmarkMode({Mode.AverageTime})
@Fork(1)
@Measurement(iterations = 15, time = 1)
@OutputTimeUnit(TimeUnit.NANOSECONDS)
@Warmup(iterations = 5, time = 1)
@OperationsPerInvocation(COUNT)
public Map<String, String> measureInject() {
  for (int i = 0; i < COUNT; i++) {
    httpTraceContext.inject(contexts.get(i), carrier, setter);
  }
  return carrier;
}
 
Example #29
Source File: BenchmarkReferenceCountMap.java    From presto with Apache License 2.0 5 votes vote down vote up
@Benchmark
@OperationsPerInvocation(NUMBER_OF_ENTRIES)
public ReferenceCountMap benchmarkInserts(Data data)
{
    ReferenceCountMap map = new ReferenceCountMap();
    for (int i = 0; i < NUMBER_OF_ENTRIES; i++) {
        map.incrementAndGet(data.slices[i]);
        map.incrementAndGet(data.slices[i].getBase());
    }
    return map;
}
 
Example #30
Source File: BenchmarkNodeScheduler.java    From presto with Apache License 2.0 5 votes vote down vote up
@Benchmark
@OperationsPerInvocation(SPLITS)
public Object benchmark(BenchmarkData data)
{
    List<RemoteTask> remoteTasks = ImmutableList.copyOf(data.getTaskMap().values());
    Iterator<MockRemoteTaskFactory.MockRemoteTask> finishingTask = Iterators.cycle(data.getTaskMap().values());
    Iterator<Split> splits = data.getSplits().iterator();
    Set<Split> batch = new HashSet<>();
    while (splits.hasNext() || !batch.isEmpty()) {
        Multimap<InternalNode, Split> assignments = data.getNodeSelector().computeAssignments(batch, remoteTasks).getAssignments();
        for (InternalNode node : assignments.keySet()) {
            MockRemoteTaskFactory.MockRemoteTask remoteTask = data.getTaskMap().get(node);
            remoteTask.addSplits(ImmutableMultimap.<PlanNodeId, Split>builder()
                    .putAll(new PlanNodeId("sourceId"), assignments.get(node))
                    .build());
            remoteTask.startSplits(MAX_SPLITS_PER_NODE);
        }
        if (assignments.size() == batch.size()) {
            batch.clear();
        }
        else {
            batch.removeAll(assignments.values());
        }
        while (batch.size() < SPLIT_BATCH_SIZE && splits.hasNext()) {
            batch.add(splits.next());
        }
        finishingTask.next().finishSplits((int) Math.ceil(MAX_SPLITS_PER_NODE / 50.0));
    }

    return remoteTasks;
}