org.openjdk.jmh.annotations.Threads Java Examples

The following examples show how to use org.openjdk.jmh.annotations.Threads. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SparkParquetReadersFlatDataBenchmark.java    From iceberg with Apache License 2.0 6 votes vote down vote up
@Benchmark
@Threads(1)
public void readUsingIcebergReaderUnsafe(Blackhole blackhole) throws IOException {
  try (CloseableIterable<InternalRow> rows = Parquet.read(Files.localInput(dataFile))
      .project(SCHEMA)
      .createReaderFunc(type -> SparkParquetReaders.buildReader(SCHEMA, type))
      .build()) {

    Iterable<InternalRow> unsafeRows = Iterables.transform(
        rows,
        APPLY_PROJECTION.bind(SparkBenchmarkUtil.projection(SCHEMA, SCHEMA))::invoke);

    for (InternalRow row : unsafeRows) {
      blackhole.consume(row);
    }
  }
}
 
Example #2
Source File: SparkParquetReadersFlatDataBenchmark.java    From iceberg with Apache License 2.0 6 votes vote down vote up
@Benchmark
@Threads(1)
public void readUsingSparkReader(Blackhole blackhole) throws IOException {
  StructType sparkSchema = SparkSchemaUtil.convert(SCHEMA);
  try (CloseableIterable<InternalRow> rows = Parquet.read(Files.localInput(dataFile))
      .project(SCHEMA)
      .readSupport(new ParquetReadSupport())
      .set("org.apache.spark.sql.parquet.row.requested_schema", sparkSchema.json())
      .set("spark.sql.parquet.binaryAsString", "false")
      .set("spark.sql.parquet.int96AsTimestamp", "false")
      .callInit()
      .build()) {

    for (InternalRow row : rows) {
      blackhole.consume(row);
    }
  }
}
 
Example #3
Source File: SparkParquetReadersFlatDataBenchmark.java    From iceberg with Apache License 2.0 6 votes vote down vote up
@Benchmark
@Threads(1)
public void readWithProjectionUsingIcebergReaderUnsafe(Blackhole blackhole) throws IOException {
  try (CloseableIterable<InternalRow> rows = Parquet.read(Files.localInput(dataFile))
      .project(PROJECTED_SCHEMA)
      .createReaderFunc(type -> SparkParquetReaders.buildReader(PROJECTED_SCHEMA, type))
      .build()) {

    Iterable<InternalRow> unsafeRows = Iterables.transform(
        rows,
        APPLY_PROJECTION.bind(SparkBenchmarkUtil.projection(PROJECTED_SCHEMA, PROJECTED_SCHEMA))::invoke);

    for (InternalRow row : unsafeRows) {
      blackhole.consume(row);
    }
  }
}
 
Example #4
Source File: SparkParquetReadersFlatDataBenchmark.java    From iceberg with Apache License 2.0 6 votes vote down vote up
@Benchmark
@Threads(1)
public void readWithProjectionUsingSparkReader(Blackhole blackhole) throws IOException {
  StructType sparkSchema = SparkSchemaUtil.convert(PROJECTED_SCHEMA);
  try (CloseableIterable<InternalRow> rows = Parquet.read(Files.localInput(dataFile))
      .project(PROJECTED_SCHEMA)
      .readSupport(new ParquetReadSupport())
      .set("org.apache.spark.sql.parquet.row.requested_schema", sparkSchema.json())
      .set("spark.sql.parquet.binaryAsString", "false")
      .set("spark.sql.parquet.int96AsTimestamp", "false")
      .callInit()
      .build()) {

    for (InternalRow row : rows) {
      blackhole.consume(row);
    }
  }
}
 
Example #5
Source File: SparkParquetReadersNestedDataBenchmark.java    From iceberg with Apache License 2.0 6 votes vote down vote up
@Benchmark
@Threads(1)
public void readWithProjectionUsingSparkReader(Blackhole blackhole) throws IOException {
  StructType sparkSchema = SparkSchemaUtil.convert(PROJECTED_SCHEMA);
  try (CloseableIterable<InternalRow> rows = Parquet.read(Files.localInput(dataFile))
      .project(PROJECTED_SCHEMA)
      .readSupport(new ParquetReadSupport())
      .set("org.apache.spark.sql.parquet.row.requested_schema", sparkSchema.json())
      .set("spark.sql.parquet.binaryAsString", "false")
      .set("spark.sql.parquet.int96AsTimestamp", "false")
      .callInit()
      .build()) {

    for (InternalRow row : rows) {
      blackhole.consume(row);
    }
  }
}
 
Example #6
Source File: SparkParquetWritersFlatDataBenchmark.java    From iceberg with Apache License 2.0 6 votes vote down vote up
@Benchmark
@Threads(1)
public void writeUsingSparkWriter() throws IOException {
  StructType sparkSchema = SparkSchemaUtil.convert(SCHEMA);
  try (FileAppender<InternalRow> writer = Parquet.write(Files.localOutput(dataFile))
      .writeSupport(new ParquetWriteSupport())
      .set("org.apache.spark.sql.parquet.row.attributes", sparkSchema.json())
      .set("spark.sql.parquet.writeLegacyFormat", "false")
      .set("spark.sql.parquet.binaryAsString", "false")
      .set("spark.sql.parquet.int96AsTimestamp", "false")
      .set("spark.sql.parquet.outputTimestampType", "TIMESTAMP_MICROS")
      .schema(SCHEMA)
      .build()) {

    writer.addAll(rows);
  }
}
 
Example #7
Source File: BenchMarkOzoneManager.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
@Threads(4)
@Benchmark
public void createAndCommitKeyBenchMark(BenchMarkOzoneManager state,
    Blackhole bh) throws IOException {
  String key = UUID.randomUUID().toString();
  OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
      .setVolumeName(volumeName)
      .setBucketName(bucketName)
      .setKeyName(key)
      .setDataSize(50)
      .setFactor(HddsProtos.ReplicationFactor.THREE)
      .setType(HddsProtos.ReplicationType.RATIS)
      .build();
  OpenKeySession openKeySession = state.om.openKey(omKeyArgs);
  state.om.allocateBlock(omKeyArgs, openKeySession.getId(),
      new ExcludeList());
}
 
Example #8
Source File: BenchMarkOzoneManager.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
@Threads(4)
@Benchmark
public void allocateBlockBenchMark(BenchMarkOzoneManager state,
    Blackhole bh) throws IOException {
  int index = (int) (Math.random() * keyNames.size());
  String key = keyNames.get(index);
  OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
      .setVolumeName(volumeName)
      .setBucketName(bucketName)
      .setKeyName(key)
      .setDataSize(50)
      .setFactor(HddsProtos.ReplicationFactor.THREE)
      .setType(HddsProtos.ReplicationType.RATIS)
      .build();
  state.om.allocateBlock(omKeyArgs, clientIDs.get(index), new ExcludeList());
}
 
Example #9
Source File: IcebergSourceNestedAvroDataReadBenchmark.java    From iceberg with Apache License 2.0 5 votes vote down vote up
@Benchmark
@Threads(1)
public void readFileSource() {
  Map<String, String> conf = Maps.newHashMap();
  conf.put(SQLConf.FILES_OPEN_COST_IN_BYTES().key(), Integer.toString(128 * 1024 * 1024));
  withSQLConf(conf, () -> {
    Dataset<Row> df = spark().read().format("avro").load(dataLocation());
    materialize(df);
  });
}
 
Example #10
Source File: IcebergSourceNestedParquetDataReadBenchmark.java    From iceberg with Apache License 2.0 5 votes vote down vote up
@Benchmark
@Threads(1)
public void readWithProjectionFileSourceNonVectorized() {
  Map<String, String> conf = Maps.newHashMap();
  conf.put(SQLConf.PARQUET_VECTORIZED_READER_ENABLED().key(), "false");
  conf.put(SQLConf.FILES_OPEN_COST_IN_BYTES().key(), Integer.toString(128 * 1024 * 1024));
  conf.put(SQLConf.NESTED_SCHEMA_PRUNING_ENABLED().key(), "true");
  withSQLConf(conf, () -> {
    Dataset<Row> df = spark().read().parquet(dataLocation()).selectExpr("nested.col3");
    materialize(df);
  });
}
 
Example #11
Source File: IcebergSourceFlatAvroDataReadBenchmark.java    From iceberg with Apache License 2.0 5 votes vote down vote up
@Benchmark
@Threads(1)
public void readIceberg() {
  Map<String, String> tableProperties = Maps.newHashMap();
  tableProperties.put(SPLIT_OPEN_FILE_COST, Integer.toString(128 * 1024 * 1024));
  withTableProperties(tableProperties, () -> {
    String tableLocation = table().location();
    Dataset<Row> df = spark().read().format("iceberg").load(tableLocation);
    materialize(df);
  });
}
 
Example #12
Source File: IcebergSourceFlatParquetDataWriteBenchmark.java    From iceberg with Apache License 2.0 5 votes vote down vote up
@Benchmark
@Threads(1)
public void writeFileSource() {
  Map<String, String> conf = Maps.newHashMap();
  conf.put(SQLConf.PARQUET_COMPRESSION().key(), "gzip");
  withSQLConf(conf, () -> benchmarkData().write().mode(SaveMode.Append).parquet(dataLocation()));
}
 
Example #13
Source File: IcebergSourceNestedORCDataReadBenchmark.java    From iceberg with Apache License 2.0 5 votes vote down vote up
@Benchmark
@Threads(1)
public void readWithProjectionFileSourceVectorized() {
  Map<String, String> conf = Maps.newHashMap();
  conf.put(SQLConf.ORC_VECTORIZED_READER_ENABLED().key(), "true");
  conf.put(SQLConf.FILES_OPEN_COST_IN_BYTES().key(), Integer.toString(128 * 1024 * 1024));
  withSQLConf(conf, () -> {
    Dataset<Row> df = spark().read().orc(dataLocation()).selectExpr("nested.col3");
    materialize(df);
  });
}
 
Example #14
Source File: IcebergSourceNestedORCDataReadBenchmark.java    From iceberg with Apache License 2.0 5 votes vote down vote up
@Benchmark
@Threads(1)
public void readFileSourceVectorized() {
  Map<String, String> conf = Maps.newHashMap();
  conf.put(SQLConf.ORC_VECTORIZED_READER_ENABLED().key(), "true");
  conf.put(SQLConf.FILES_OPEN_COST_IN_BYTES().key(), Integer.toString(128 * 1024 * 1024));
  withSQLConf(conf, () -> {
    Dataset<Row> df = spark().read().orc(dataLocation());
    materialize(df);
  });
}
 
Example #15
Source File: IcebergSourceNestedORCDataReadBenchmark.java    From iceberg with Apache License 2.0 5 votes vote down vote up
@Benchmark
@Threads(1)
public void readFileSourceNonVectorized() {
  Map<String, String> conf = Maps.newHashMap();
  conf.put(SQLConf.ORC_VECTORIZED_READER_ENABLED().key(), "false");
  conf.put(SQLConf.FILES_OPEN_COST_IN_BYTES().key(), Integer.toString(128 * 1024 * 1024));
  withSQLConf(conf, () -> {
    Dataset<Row> df = spark().read().orc(dataLocation());
    materialize(df);
  });
}
 
Example #16
Source File: VectorizedReadFlatParquetDataBenchmark.java    From iceberg with Apache License 2.0 5 votes vote down vote up
@Benchmark
@Threads(1)
public void readTimestampsSparkVectorized5k() {
  withSQLConf(sparkConfWithVectorizationEnabled(5000), () -> {
    Dataset<Row> df = spark().read().parquet(dataLocation()).select("timestampCol");
    materialize(df);
  });
}
 
Example #17
Source File: LitePoolBenchmark.java    From lite-pool with Apache License 2.0 5 votes vote down vote up
@Benchmark
@Threads(5)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
public void lite_pool_05_thread() {
    TestObject object = pool.acquire();
    if (object != null) pool.release(object);
}
 
Example #18
Source File: IcebergSourceFlatORCDataReadBenchmark.java    From iceberg with Apache License 2.0 5 votes vote down vote up
@Benchmark
@Threads(1)
public void readFileSourceNonVectorized() {
  Map<String, String> conf = Maps.newHashMap();
  conf.put(SQLConf.ORC_VECTORIZED_READER_ENABLED().key(), "false");
  conf.put(SQLConf.FILES_OPEN_COST_IN_BYTES().key(), Integer.toString(128 * 1024 * 1024));
  withSQLConf(conf, () -> {
    Dataset<Row> df = spark().read().orc(dataLocation());
    materialize(df);
  });
}
 
Example #19
Source File: VectorizedReadFlatParquetDataBenchmark.java    From iceberg with Apache License 2.0 5 votes vote down vote up
@Benchmark
@Threads(1)
public void readDecimalsSparkVectorized5k() {
  withSQLConf(sparkConfWithVectorizationEnabled(5000), () -> {
    Dataset<Row> df = spark().read().parquet(dataLocation()).select("decimalCol");
    materialize(df);
  });
}
 
Example #20
Source File: SparkParquetReadersFlatDataBenchmark.java    From iceberg with Apache License 2.0 5 votes vote down vote up
@Benchmark
@Threads(1)
public void readWithProjectionUsingIcebergReader(Blackhole blackhole) throws IOException {
  try (CloseableIterable<InternalRow> rows = Parquet.read(Files.localInput(dataFile))
      .project(PROJECTED_SCHEMA)
      .createReaderFunc(type -> SparkParquetReaders.buildReader(PROJECTED_SCHEMA, type))
      .build()) {

    for (InternalRow row : rows) {
      blackhole.consume(row);
    }
  }
}
 
Example #21
Source File: VectorizedReadFlatParquetDataBenchmark.java    From iceberg with Apache License 2.0 5 votes vote down vote up
@Benchmark
@Threads(1)
public void readStringsIcebergVectorized5k() {
  withTableProperties(tablePropsWithVectorizationEnabled(5000), () -> {
    String tableLocation = table().location();
    Dataset<Row> df = spark().read().format("iceberg")
        .load(tableLocation).select("stringCol");
    materialize(df);
  });
}
 
Example #22
Source File: SparkParquetReadersFlatDataBenchmark.java    From iceberg with Apache License 2.0 5 votes vote down vote up
@Benchmark
@Threads(1)
public void readUsingIcebergReader(Blackhole blackHole) throws IOException {
  try (CloseableIterable<InternalRow> rows = Parquet.read(Files.localInput(dataFile))
      .project(SCHEMA)
      .createReaderFunc(type -> SparkParquetReaders.buildReader(SCHEMA, type))
      .build()) {

    for (InternalRow row : rows) {
      blackHole.consume(row);
    }
  }
}
 
Example #23
Source File: CommonsPool2Benchmark.java    From lite-pool with Apache License 2.0 5 votes vote down vote up
@Benchmark
@Threads(20)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
public void commons_pool2_20_thread() throws Exception  {
    TestObject object = pool.borrowObject();
    if (object != null) pool.returnObject(object);
}
 
Example #24
Source File: ByteBufferBenchmark.java    From Oak with Apache License 2.0 5 votes vote down vote up
@Warmup(iterations = 1)
@Measurement(iterations = 10)
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@Fork(value = 1)
@Threads(1)
@Benchmark
public void get(Blackhole blackhole, BenchmarkState state) {
    for (int i=0; i < state.bytes; ++i) {
        blackhole.consume(state.byteBuffer.get(i));
    }
}
 
Example #25
Source File: ByteBufferBenchmark.java    From Oak with Apache License 2.0 5 votes vote down vote up
@Warmup(iterations = 1)
@Measurement(iterations = 10)
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@Fork(value = 1)
@Threads(1)
@Benchmark
public void put(Blackhole blackhole, BenchmarkState state) {
    for (int i=0; i < state.bytes; ++i) {
        state.byteBuffer.put(i, (byte) i);
    }
}
 
Example #26
Source File: PutBenchmark.java    From Oak with Apache License 2.0 5 votes vote down vote up
@Warmup(iterations = 5)
@Measurement(iterations = 10)
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@Fork(value = 1)
@Threads(8)
@Benchmark
public void putIfAbsent(Blackhole blackhole,BenchmarkState state,ThreadState threadState) {
    for (int i = 0; i < threadState.numRows; ++i) {
        Map.Entry<String, String> pair = threadState.rows.get(i);
        state.oakMap.zc().put(pair.getKey(), pair.getValue());
        blackhole.consume(state.oakMap);
    }
}
 
Example #27
Source File: VectorizedReadFlatParquetDataBenchmark.java    From iceberg with Apache License 2.0 5 votes vote down vote up
@Benchmark
@Threads(1)
public void readTimestampsIcebergVectorized5k() {
  withTableProperties(tablePropsWithVectorizationEnabled(5000), () -> {
    String tableLocation = table().location();
    Dataset<Row> df = spark().read().format("iceberg")
        .load(tableLocation).select("timestampCol");
    materialize(df);
  });
}
 
Example #28
Source File: VectorizedReadFlatParquetDataBenchmark.java    From iceberg with Apache License 2.0 5 votes vote down vote up
@Benchmark
@Threads(1)
public void readIntegersIcebergVectorized5k() {
  withTableProperties(tablePropsWithVectorizationEnabled(5000), () -> {
    String tableLocation = table().location();
    Dataset<Row> df = spark().read().format("iceberg")
        .load(tableLocation).select("intCol");
    materialize(df);
  });
}
 
Example #29
Source File: VectorizedReadFlatParquetDataBenchmark.java    From iceberg with Apache License 2.0 5 votes vote down vote up
@Benchmark
@Threads(1)
public void readLongsSparkVectorized5k() {
  withSQLConf(sparkConfWithVectorizationEnabled(5000), () -> {
    Dataset<Row> df = spark().read().parquet(dataLocation()).select("longCol");
    materialize(df);
  });
}
 
Example #30
Source File: IcebergSourceFlatParquetDataReadBenchmark.java    From iceberg with Apache License 2.0 5 votes vote down vote up
@Benchmark
@Threads(1)
public void readFileSourceNonVectorized() {
  Map<String, String> conf = Maps.newHashMap();
  conf.put(SQLConf.PARQUET_VECTORIZED_READER_ENABLED().key(), "false");
  conf.put(SQLConf.FILES_OPEN_COST_IN_BYTES().key(), Integer.toString(128 * 1024 * 1024));
  withSQLConf(conf, () -> {
    Dataset<Row> df = spark().read().parquet(dataLocation());
    materialize(df);
  });
}