Java Code Examples for io.airlift.units.DataSize#of()

The following examples show how to use io.airlift.units.DataSize#of() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: RcFileTester.java    From presto with Apache License 2.0 6 votes vote down vote up
private static RcFileReader createRcFileReader(TempFile tempFile, Type type, RcFileEncoding encoding)
        throws IOException
{
    RcFileDataSource rcFileDataSource = new FileRcFileDataSource(tempFile.getFile());
    RcFileReader rcFileReader = new RcFileReader(
            rcFileDataSource,
            encoding,
            ImmutableMap.of(0, type),
            new AircompressorCodecFactory(new HadoopCodecFactory(RcFileTester.class.getClassLoader())),
            0,
            tempFile.getFile().length(),
            DataSize.of(8, MEGABYTE));

    assertEquals(rcFileReader.getColumnCount(), 1);

    return rcFileReader;
}
 
Example 2
Source File: BenchmarkPartitionedOutputOperator.java    From presto with Apache License 2.0 6 votes vote down vote up
private PartitionedOutputOperator createPartitionedOutputOperator()
{
    PartitionFunction partitionFunction = new LocalPartitionGenerator(new InterpretedHashGenerator(ImmutableList.of(BIGINT), new int[] {0}), PARTITION_COUNT);
    PagesSerdeFactory serdeFactory = new PagesSerdeFactory(createTestMetadataManager().getBlockEncodingSerde(), false);
    OutputBuffers buffers = createInitialEmptyOutputBuffers(PARTITIONED);
    for (int partition = 0; partition < PARTITION_COUNT; partition++) {
        buffers = buffers.withBuffer(new OutputBuffers.OutputBufferId(partition), partition);
    }
    PartitionedOutputBuffer buffer = createPartitionedBuffer(
            buffers.withNoMoreBufferIds(),
            DataSize.ofBytes(Long.MAX_VALUE)); // don't let output buffer block
    PartitionedOutputFactory operatorFactory = new PartitionedOutputFactory(
            partitionFunction,
            ImmutableList.of(0),
            ImmutableList.of(Optional.empty()),
            false,
            OptionalInt.empty(),
            buffer,
            DataSize.of(1, GIGABYTE));
    return (PartitionedOutputOperator) operatorFactory
            .createOutputOperator(0, new PlanNodeId("plan-node-0"), TYPES, Function.identity(), serdeFactory)
            .createOperator(createDriverContext());
}
 
Example 3
Source File: TestExchangeOperator.java    From presto with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("resource")
@BeforeClass
public void setUp()
{
    scheduler = newScheduledThreadPool(4, daemonThreadsNamed("test-%s"));
    scheduledExecutor = newScheduledThreadPool(2, daemonThreadsNamed("test-scheduledExecutor-%s"));
    pageBufferClientCallbackExecutor = Executors.newSingleThreadExecutor();
    httpClient = new TestingHttpClient(new TestingExchangeHttpClientHandler(taskBuffers), scheduler);

    exchangeClientSupplier = (systemMemoryUsageListener) -> new ExchangeClient(
            "localhost",
            DataIntegrityVerification.ABORT,
            DataSize.of(32, MEGABYTE),
            DataSize.of(10, MEGABYTE),
            3,
            new Duration(1, TimeUnit.MINUTES),
            true,
            httpClient,
            scheduler,
            systemMemoryUsageListener,
            pageBufferClientCallbackExecutor);
}
 
Example 4
Source File: TestSqlTask.java    From presto with Apache License 2.0 5 votes vote down vote up
private SqlTask createInitialTask()
{
    TaskId taskId = new TaskId("query", 0, nextTaskId.incrementAndGet());
    URI location = URI.create("fake://task/" + taskId);

    QueryContext queryContext = new QueryContext(new QueryId("query"),
            DataSize.of(1, MEGABYTE),
            DataSize.of(2, MEGABYTE),
            new MemoryPool(new MemoryPoolId("test"), DataSize.of(1, GIGABYTE)),
            new TestingGcMonitor(),
            taskNotificationExecutor,
            driverYieldExecutor,
            DataSize.of(1, MEGABYTE),
            new SpillSpaceTracker(DataSize.of(1, GIGABYTE)));

    queryContext.addTaskContext(new TaskStateMachine(taskId, taskNotificationExecutor), testSessionBuilder().build(), false, false, OptionalInt.empty());

    return createSqlTask(
            taskId,
            location,
            "fake",
            queryContext,
            sqlTaskExecutionFactory,
            taskNotificationExecutor,
            Functions.identity(),
            DataSize.of(32, MEGABYTE),
            new CounterStat());
}
 
Example 5
Source File: TestHiveSplitSource.java    From presto with Apache License 2.0 5 votes vote down vote up
@Test
public void testOutstandingSplitSize()
{
    DataSize maxOutstandingSplitsSize = DataSize.of(1, MEGABYTE);
    HiveSplitSource hiveSplitSource = HiveSplitSource.allAtOnce(
            SESSION,
            "database",
            "table",
            10,
            10000,
            maxOutstandingSplitsSize,
            Integer.MAX_VALUE,
            new TestingHiveSplitLoader(),
            Executors.newFixedThreadPool(5),
            new CounterStat());
    int testSplitSizeInBytes = new TestSplit(0).getEstimatedSizeInBytes();

    int maxSplitCount = toIntExact(maxOutstandingSplitsSize.toBytes()) / testSplitSizeInBytes;
    for (int i = 0; i < maxSplitCount; i++) {
        hiveSplitSource.addToQueue(new TestSplit(i));
        assertEquals(hiveSplitSource.getBufferedInternalSplitCount(), i + 1);
    }

    assertEquals(getSplits(hiveSplitSource, maxSplitCount).size(), maxSplitCount);

    for (int i = 0; i < maxSplitCount; i++) {
        hiveSplitSource.addToQueue(new TestSplit(i));
        assertEquals(hiveSplitSource.getBufferedInternalSplitCount(), i + 1);
    }
    try {
        hiveSplitSource.addToQueue(new TestSplit(0));
        fail("expect failure");
    }
    catch (PrestoException e) {
        assertContains(e.getMessage(), "Split buffering for database.table exceeded memory limit");
    }
}
 
Example 6
Source File: TestRcFileReaderManual.java    From presto with Apache License 2.0 5 votes vote down vote up
private static List<Integer> readValues(Slice data, int offset, int length)
        throws IOException
{
    // to simplify the testing:
    //     change negative offsets to 0
    //     truncate length so it is not off the end of the file

    if (offset < 0) {
        // adjust length to new offset
        length += offset;
        offset = 0;
    }
    if (offset + length > data.length()) {
        length = data.length() - offset;
    }

    RcFileReader reader = new RcFileReader(
            new SliceRcFileDataSource(data),
            new BinaryRcFileEncoding(),
            ImmutableMap.of(0, SMALLINT),
            new BogusRcFileCodecFactory(),
            offset,
            length,
            DataSize.of(8, MEGABYTE));

    ImmutableList.Builder<Integer> values = ImmutableList.builder();
    while (reader.advance() >= 0) {
        Block block = reader.readBlock(0);
        for (int position = 0; position < block.getPositionCount(); position++) {
            values.add((int) SMALLINT.getLong(block, position));
        }
    }

    return values.build();
}
 
Example 7
Source File: TestMemoryPools.java    From presto with Apache License 2.0 5 votes vote down vote up
private void setUp(Supplier<List<Driver>> driversSupplier)
{
    checkState(localQueryRunner == null, "Already set up");

    Session session = testSessionBuilder()
            .setCatalog("tpch")
            .setSchema("tiny")
            .setSystemProperty("task_default_concurrency", "1")
            .build();

    localQueryRunner = LocalQueryRunner.builder(session)
            .withInitialTransaction()
            .build();

    // add tpch
    localQueryRunner.createCatalog("tpch", new TpchConnectorFactory(1), ImmutableMap.of());

    userPool = new MemoryPool(new MemoryPoolId("test"), TEN_MEGABYTES);
    fakeQueryId = new QueryId("fake");
    SpillSpaceTracker spillSpaceTracker = new SpillSpaceTracker(DataSize.of(1, GIGABYTE));
    QueryContext queryContext = new QueryContext(new QueryId("query"),
            TEN_MEGABYTES,
            DataSize.of(20, MEGABYTE),
            userPool,
            new TestingGcMonitor(),
            localQueryRunner.getExecutor(),
            localQueryRunner.getScheduler(),
            TEN_MEGABYTES,
            spillSpaceTracker);
    taskContext = createTaskContext(queryContext, localQueryRunner.getExecutor(), localQueryRunner.getDefaultSession());
    drivers = driversSupplier.get();
}
 
Example 8
Source File: RcFileTester.java    From presto with Apache License 2.0 5 votes vote down vote up
private static DataSize writeRcFileColumnNew(File outputFile, Format format, Compression compression, Type type, Iterator<?> values, Map<String, String> metadata)
        throws Exception
{
    OutputStreamSliceOutput output = new OutputStreamSliceOutput(new FileOutputStream(outputFile));
    AircompressorCodecFactory codecFactory = new AircompressorCodecFactory(new HadoopCodecFactory(RcFileTester.class.getClassLoader()));
    RcFileWriter writer = new RcFileWriter(
            output,
            ImmutableList.of(type),
            format.getVectorEncoding(),
            compression.getCodecName(),
            codecFactory,
            metadata,
            DataSize.of(100, KILOBYTE),   // use a smaller size to create more row groups
            DataSize.of(200, KILOBYTE),
            true);
    BlockBuilder blockBuilder = type.createBlockBuilder(null, 1024);
    while (values.hasNext()) {
        Object value = values.next();
        writeValue(type, blockBuilder, value);
    }

    writer.write(new Page(blockBuilder.build()));
    writer.close();

    writer.validate(new FileRcFileDataSource(outputFile));

    return DataSize.ofBytes(output.size());
}
 
Example 9
Source File: TestSqlTaskExecution.java    From presto with Apache License 2.0 5 votes vote down vote up
private PartitionedOutputBuffer newTestingOutputBuffer(ScheduledExecutorService taskNotificationExecutor)
{
    return new PartitionedOutputBuffer(
            TASK_ID.toString(),
            new StateMachine<>("bufferState", taskNotificationExecutor, OPEN, TERMINAL_BUFFER_STATES),
            createInitialEmptyOutputBuffers(PARTITIONED)
                    .withBuffer(OUTPUT_BUFFER_ID, 0)
                    .withNoMoreBufferIds(),
            DataSize.of(1, MEGABYTE),
            () -> new SimpleLocalMemoryContext(newSimpleAggregatedMemoryContext(), "test"),
            taskNotificationExecutor);
}
 
Example 10
Source File: TestSqlTaskExecution.java    From presto with Apache License 2.0 5 votes vote down vote up
private TaskContext newTestingTaskContext(ScheduledExecutorService taskNotificationExecutor, ScheduledExecutorService driverYieldExecutor, TaskStateMachine taskStateMachine)
{
    QueryContext queryContext = new QueryContext(
            new QueryId("queryid"),
            DataSize.of(1, MEGABYTE),
            DataSize.of(2, MEGABYTE),
            new MemoryPool(new MemoryPoolId("test"), DataSize.of(1, GIGABYTE)),
            new TestingGcMonitor(),
            taskNotificationExecutor,
            driverYieldExecutor,
            DataSize.of(1, MEGABYTE),
            new SpillSpaceTracker(DataSize.of(1, GIGABYTE)));
    return queryContext.addTaskContext(taskStateMachine, TEST_SESSION, false, false, OptionalInt.empty());
}
 
Example 11
Source File: TestHttpPageBufferClient.java    From presto with Apache License 2.0 4 votes vote down vote up
@Test
public void testLifecycle()
        throws Exception
{
    CyclicBarrier beforeRequest = new CyclicBarrier(2);
    CyclicBarrier afterRequest = new CyclicBarrier(2);
    StaticRequestProcessor processor = new StaticRequestProcessor(beforeRequest, afterRequest);
    processor.setResponse(new TestingResponse(HttpStatus.NO_CONTENT, ImmutableListMultimap.of(), new byte[0]));

    CyclicBarrier requestComplete = new CyclicBarrier(2);
    TestingClientCallback callback = new TestingClientCallback(requestComplete);

    URI location = URI.create("http://localhost:8080");
    HttpPageBufferClient client = new HttpPageBufferClient(
            "localhost",
            new TestingHttpClient(processor, scheduler),
            DataIntegrityVerification.ABORT,
            DataSize.of(10, Unit.MEGABYTE),
            new Duration(1, TimeUnit.MINUTES),
            true,
            location,
            callback,
            scheduler,
            pageBufferClientCallbackExecutor);

    assertStatus(client, location, "queued", 0, 0, 0, 0, "not scheduled");

    client.scheduleRequest();
    beforeRequest.await(10, TimeUnit.SECONDS);
    assertStatus(client, location, "running", 0, 1, 0, 0, "PROCESSING_REQUEST");
    assertEquals(client.isRunning(), true);
    afterRequest.await(10, TimeUnit.SECONDS);

    requestComplete.await(10, TimeUnit.SECONDS);
    assertStatus(client, location, "queued", 0, 1, 1, 1, "not scheduled");

    client.close();
    beforeRequest.await(10, TimeUnit.SECONDS);
    assertStatus(client, location, "closed", 0, 1, 1, 1, "PROCESSING_REQUEST");
    afterRequest.await(10, TimeUnit.SECONDS);
    requestComplete.await(10, TimeUnit.SECONDS);
    assertStatus(client, location, "closed", 0, 1, 2, 1, "not scheduled");
}
 
Example 12
Source File: MockRemoteTaskFactory.java    From presto with Apache License 2.0 4 votes vote down vote up
public MockRemoteTask(
        TaskId taskId,
        PlanFragment fragment,
        String nodeId,
        Executor executor,
        ScheduledExecutorService scheduledExecutor,
        Multimap<PlanNodeId, Split> initialSplits,
        OptionalInt totalPartitions,
        PartitionedSplitCountTracker partitionedSplitCountTracker)
{
    this.taskStateMachine = new TaskStateMachine(requireNonNull(taskId, "taskId is null"), requireNonNull(executor, "executor is null"));

    MemoryPool memoryPool = new MemoryPool(new MemoryPoolId("test"), DataSize.of(1, GIGABYTE));
    SpillSpaceTracker spillSpaceTracker = new SpillSpaceTracker(DataSize.of(1, GIGABYTE));
    QueryContext queryContext = new QueryContext(taskId.getQueryId(),
            DataSize.of(1, MEGABYTE),
            DataSize.of(2, MEGABYTE),
            memoryPool,
            new TestingGcMonitor(),
            executor,
            scheduledExecutor,
            DataSize.of(1, MEGABYTE),
            spillSpaceTracker);
    this.taskContext = queryContext.addTaskContext(taskStateMachine, TEST_SESSION, true, true, totalPartitions);

    this.location = URI.create("fake://task/" + taskId);

    this.outputBuffer = new LazyOutputBuffer(
            taskId,
            TASK_INSTANCE_ID,
            executor,
            DataSize.ofBytes(1),
            () -> new SimpleLocalMemoryContext(newSimpleAggregatedMemoryContext(), "test"));

    this.fragment = requireNonNull(fragment, "fragment is null");
    this.nodeId = requireNonNull(nodeId, "nodeId is null");
    splits.putAll(initialSplits);
    this.partitionedSplitCountTracker = requireNonNull(partitionedSplitCountTracker, "partitionedSplitCountTracker is null");
    partitionedSplitCountTracker.setPartitionedSplitCount(getPartitionedSplitCount());
    updateSplitQueueSpace();
}
 
Example 13
Source File: TestHashJoinOperator.java    From presto with Apache License 2.0 4 votes vote down vote up
private BuildSideSetup setupBuildSide(
        boolean parallelBuild,
        TaskContext taskContext,
        List<Integer> hashChannels,
        RowPagesBuilder buildPages,
        Optional<InternalJoinFilterFunction> filterFunction,
        boolean spillEnabled,
        SingleStreamSpillerFactory singleStreamSpillerFactory)
{
    Optional<JoinFilterFunctionFactory> filterFunctionFactory = filterFunction
            .map(function -> (session, addresses, pages) -> new StandardJoinFilterFunction(function, addresses, pages));

    int partitionCount = parallelBuild ? PARTITION_COUNT : 1;
    LocalExchangeFactory localExchangeFactory = new LocalExchangeFactory(
            FIXED_HASH_DISTRIBUTION,
            partitionCount,
            buildPages.getTypes(),
            hashChannels,
            buildPages.getHashChannel(),
            UNGROUPED_EXECUTION,
            DataSize.of(32, DataSize.Unit.MEGABYTE));
    LocalExchangeSinkFactoryId localExchangeSinkFactoryId = localExchangeFactory.newSinkFactoryId();
    localExchangeFactory.noMoreSinkFactories();

    // collect input data into the partitioned exchange
    DriverContext collectDriverContext = taskContext.addPipelineContext(0, true, true, false).addDriverContext();
    ValuesOperatorFactory valuesOperatorFactory = new ValuesOperatorFactory(0, new PlanNodeId("values"), buildPages.build());
    LocalExchangeSinkOperatorFactory sinkOperatorFactory = new LocalExchangeSinkOperatorFactory(localExchangeFactory, 1, new PlanNodeId("sink"), localExchangeSinkFactoryId, Function.identity());
    Driver sourceDriver = Driver.createDriver(collectDriverContext,
            valuesOperatorFactory.createOperator(collectDriverContext),
            sinkOperatorFactory.createOperator(collectDriverContext));
    valuesOperatorFactory.noMoreOperators();
    sinkOperatorFactory.noMoreOperators();

    while (!sourceDriver.isFinished()) {
        sourceDriver.process();
    }

    // build side operator factories
    LocalExchangeSourceOperatorFactory sourceOperatorFactory = new LocalExchangeSourceOperatorFactory(0, new PlanNodeId("source"), localExchangeFactory);
    JoinBridgeManager<PartitionedLookupSourceFactory> lookupSourceFactoryManager = JoinBridgeManager.lookupAllAtOnce(new PartitionedLookupSourceFactory(
            buildPages.getTypes(),
            rangeList(buildPages.getTypes().size()).stream()
                    .map(buildPages.getTypes()::get)
                    .collect(toImmutableList()),
            hashChannels.stream()
                    .map(buildPages.getTypes()::get)
                    .collect(toImmutableList()),
            partitionCount,
            false));

    HashBuilderOperatorFactory buildOperatorFactory = new HashBuilderOperatorFactory(
            1,
            new PlanNodeId("build"),
            lookupSourceFactoryManager,
            rangeList(buildPages.getTypes().size()),
            hashChannels,
            buildPages.getHashChannel()
                    .map(OptionalInt::of).orElse(OptionalInt.empty()),
            filterFunctionFactory,
            Optional.empty(),
            ImmutableList.of(),
            100,
            new PagesIndex.TestingFactory(false),
            spillEnabled,
            singleStreamSpillerFactory);
    return new BuildSideSetup(lookupSourceFactoryManager, buildOperatorFactory, sourceOperatorFactory, partitionCount);
}
 
Example 14
Source File: HiveTestUtils.java    From presto with Apache License 2.0 4 votes vote down vote up
public static HiveRecordCursorProvider createGenericHiveRecordCursorProvider(HdfsEnvironment hdfsEnvironment)
{
    return new GenericHiveRecordCursorProvider(hdfsEnvironment, DataSize.of(100, MEGABYTE));
}
 
Example 15
Source File: TestHttpPageBufferClient.java    From presto with Apache License 2.0 4 votes vote down vote up
@Test
public void testExceptionFromResponseHandler()
        throws Exception
{
    TestingTicker ticker = new TestingTicker();
    AtomicReference<Duration> tickerIncrement = new AtomicReference<>(new Duration(0, TimeUnit.SECONDS));

    TestingHttpClient.Processor processor = (input) -> {
        Duration delta = tickerIncrement.get();
        ticker.increment(delta.toMillis(), TimeUnit.MILLISECONDS);
        throw new RuntimeException("Foo");
    };

    CyclicBarrier requestComplete = new CyclicBarrier(2);
    TestingClientCallback callback = new TestingClientCallback(requestComplete);

    URI location = URI.create("http://localhost:8080");
    HttpPageBufferClient client = new HttpPageBufferClient(
            "localhost",
            new TestingHttpClient(processor, scheduler),
            DataIntegrityVerification.ABORT,
            DataSize.of(10, Unit.MEGABYTE),
            new Duration(30, TimeUnit.SECONDS),
            true,
            location,
            callback,
            scheduler,
            ticker,
            pageBufferClientCallbackExecutor);

    assertStatus(client, location, "queued", 0, 0, 0, 0, "not scheduled");

    // request processor will throw exception, verify the request is marked a completed
    // this starts the error stopwatch
    client.scheduleRequest();
    requestComplete.await(10, TimeUnit.SECONDS);
    assertEquals(callback.getPages().size(), 0);
    assertEquals(callback.getCompletedRequests(), 1);
    assertEquals(callback.getFinishedBuffers(), 0);
    assertEquals(callback.getFailedBuffers(), 0);
    assertStatus(client, location, "queued", 0, 1, 1, 1, "not scheduled");

    // advance time forward, but not enough to fail the client
    tickerIncrement.set(new Duration(30, TimeUnit.SECONDS));

    // verify that the client has not failed
    client.scheduleRequest();
    requestComplete.await(10, TimeUnit.SECONDS);
    assertEquals(callback.getPages().size(), 0);
    assertEquals(callback.getCompletedRequests(), 2);
    assertEquals(callback.getFinishedBuffers(), 0);
    assertEquals(callback.getFailedBuffers(), 0);
    assertStatus(client, location, "queued", 0, 2, 2, 2, "not scheduled");

    // advance time forward beyond the minimum error duration
    tickerIncrement.set(new Duration(31, TimeUnit.SECONDS));

    // verify that the client has failed
    client.scheduleRequest();
    requestComplete.await(10, TimeUnit.SECONDS);
    assertEquals(callback.getPages().size(), 0);
    assertEquals(callback.getCompletedRequests(), 3);
    assertEquals(callback.getFinishedBuffers(), 0);
    assertEquals(callback.getFailedBuffers(), 1);
    assertInstanceOf(callback.getFailure(), PageTransportTimeoutException.class);
    assertContains(callback.getFailure().getMessage(), WORKER_NODE_ERROR + " (http://localhost:8080/0 - 3 failures, failure duration 31.00s, total failed request time 31.00s)");
    assertStatus(client, location, "queued", 0, 3, 3, 3, "not scheduled");
}
 
Example 16
Source File: TestHttpPageBufferClient.java    From presto with Apache License 2.0 4 votes vote down vote up
@Test
public void testInvalidResponses()
        throws Exception
{
    CyclicBarrier beforeRequest = new CyclicBarrier(1);
    CyclicBarrier afterRequest = new CyclicBarrier(1);
    StaticRequestProcessor processor = new StaticRequestProcessor(beforeRequest, afterRequest);

    CyclicBarrier requestComplete = new CyclicBarrier(2);
    TestingClientCallback callback = new TestingClientCallback(requestComplete);

    URI location = URI.create("http://localhost:8080");
    HttpPageBufferClient client = new HttpPageBufferClient(
            "localhost",
            new TestingHttpClient(processor, scheduler),
            DataIntegrityVerification.ABORT,
            DataSize.of(10, Unit.MEGABYTE),
            new Duration(1, TimeUnit.MINUTES),
            true,
            location,
            callback,
            scheduler,
            pageBufferClientCallbackExecutor);

    assertStatus(client, location, "queued", 0, 0, 0, 0, "not scheduled");

    // send not found response and verify response was ignored
    processor.setResponse(new TestingResponse(HttpStatus.NOT_FOUND, ImmutableListMultimap.of(CONTENT_TYPE, PRESTO_PAGES), new byte[0]));
    client.scheduleRequest();
    requestComplete.await(10, TimeUnit.SECONDS);
    assertEquals(callback.getPages().size(), 0);
    assertEquals(callback.getCompletedRequests(), 1);
    assertEquals(callback.getFinishedBuffers(), 0);
    assertEquals(callback.getFailedBuffers(), 1);
    assertInstanceOf(callback.getFailure(), PageTransportErrorException.class);
    assertContains(callback.getFailure().getMessage(), "Expected response code to be 200, but was 404");
    assertStatus(client, location, "queued", 0, 1, 1, 1, "not scheduled");

    // send invalid content type response and verify response was ignored
    callback.resetStats();
    processor.setResponse(new TestingResponse(HttpStatus.OK, ImmutableListMultimap.of(CONTENT_TYPE, "INVALID_TYPE"), new byte[0]));
    client.scheduleRequest();
    requestComplete.await(10, TimeUnit.SECONDS);
    assertEquals(callback.getPages().size(), 0);
    assertEquals(callback.getCompletedRequests(), 1);
    assertEquals(callback.getFinishedBuffers(), 0);
    assertEquals(callback.getFailedBuffers(), 1);
    assertInstanceOf(callback.getFailure(), PageTransportErrorException.class);
    assertContains(callback.getFailure().getMessage(), "Expected application/x-presto-pages response from server but got INVALID_TYPE");
    assertStatus(client, location, "queued", 0, 2, 2, 2, "not scheduled");

    // send unexpected content type response and verify response was ignored
    callback.resetStats();
    processor.setResponse(new TestingResponse(HttpStatus.OK, ImmutableListMultimap.of(CONTENT_TYPE, "text/plain"), new byte[0]));
    client.scheduleRequest();
    requestComplete.await(10, TimeUnit.SECONDS);
    assertEquals(callback.getPages().size(), 0);
    assertEquals(callback.getCompletedRequests(), 1);
    assertEquals(callback.getFinishedBuffers(), 0);
    assertEquals(callback.getFailedBuffers(), 1);
    assertInstanceOf(callback.getFailure(), PageTransportErrorException.class);
    assertContains(callback.getFailure().getMessage(), "Expected application/x-presto-pages response from server but got text/plain");
    assertStatus(client, location, "queued", 0, 3, 3, 3, "not scheduled");

    // close client and verify
    client.close();
    requestComplete.await(10, TimeUnit.SECONDS);
    assertStatus(client, location, "closed", 0, 3, 4, 3, "not scheduled");
}
 
Example 17
Source File: AbstractOperatorBenchmark.java    From presto with Apache License 2.0 4 votes vote down vote up
@Override
protected Map<String, Long> runOnce()
{
    Session session = testSessionBuilder()
            .setSystemProperty("optimizer.optimize-hash-generation", "true")
            .setTransactionId(this.session.getRequiredTransactionId())
            .build();
    MemoryPool memoryPool = new MemoryPool(new MemoryPoolId("test"), DataSize.of(1, GIGABYTE));
    SpillSpaceTracker spillSpaceTracker = new SpillSpaceTracker(DataSize.of(1, GIGABYTE));

    TaskContext taskContext = new QueryContext(
            new QueryId("test"),
            DataSize.of(256, MEGABYTE),
            DataSize.of(512, MEGABYTE),
            memoryPool,
            new TestingGcMonitor(),
            localQueryRunner.getExecutor(),
            localQueryRunner.getScheduler(),
            DataSize.of(256, MEGABYTE),
            spillSpaceTracker)
            .addTaskContext(new TaskStateMachine(new TaskId("query", 0, 0), localQueryRunner.getExecutor()),
                    session,
                    false,
                    false,
                    OptionalInt.empty());

    CpuTimer cpuTimer = new CpuTimer();
    Map<String, Long> executionStats = execute(taskContext);
    CpuDuration executionTime = cpuTimer.elapsedTime();

    TaskStats taskStats = taskContext.getTaskStats();
    long inputRows = taskStats.getRawInputPositions();
    long inputBytes = taskStats.getRawInputDataSize().toBytes();
    long outputRows = taskStats.getOutputPositions();
    long outputBytes = taskStats.getOutputDataSize().toBytes();

    double inputMegaBytes = ((double) inputBytes) / MEGABYTE.inBytes();

    return ImmutableMap.<String, Long>builder()
            // legacy computed values
            .putAll(executionStats)
            .put("elapsed_millis", executionTime.getWall().toMillis())
            .put("input_rows_per_second", (long) (inputRows / executionTime.getWall().getValue(SECONDS)))
            .put("output_rows_per_second", (long) (outputRows / executionTime.getWall().getValue(SECONDS)))
            .put("input_megabytes", (long) inputMegaBytes)
            .put("input_megabytes_per_second", (long) (inputMegaBytes / executionTime.getWall().getValue(SECONDS)))

            .put("wall_nanos", executionTime.getWall().roundTo(NANOSECONDS))
            .put("cpu_nanos", executionTime.getCpu().roundTo(NANOSECONDS))
            .put("user_nanos", executionTime.getUser().roundTo(NANOSECONDS))
            .put("input_rows", inputRows)
            .put("input_bytes", inputBytes)
            .put("output_rows", outputRows)
            .put("output_bytes", outputBytes)

            .build();
}
 
Example 18
Source File: TestHttpPageBufferClient.java    From presto with Apache License 2.0 4 votes vote down vote up
@Test
public void testHappyPath()
        throws Exception
{
    Page expectedPage = new Page(100);

    DataSize expectedMaxSize = DataSize.of(11, Unit.MEGABYTE);
    MockExchangeRequestProcessor processor = new MockExchangeRequestProcessor(expectedMaxSize);

    CyclicBarrier requestComplete = new CyclicBarrier(2);

    TestingClientCallback callback = new TestingClientCallback(requestComplete);

    URI location = URI.create("http://localhost:8080");
    HttpPageBufferClient client = new HttpPageBufferClient(
            "localhost",
            new TestingHttpClient(processor, scheduler),
            DataIntegrityVerification.ABORT,
            expectedMaxSize,
            new Duration(1, TimeUnit.MINUTES),
            true,
            location,
            callback,
            scheduler,
            pageBufferClientCallbackExecutor);

    assertStatus(client, location, "queued", 0, 0, 0, 0, "not scheduled");

    // fetch a page and verify
    processor.addPage(location, expectedPage);
    callback.resetStats();
    client.scheduleRequest();
    requestComplete.await(10, TimeUnit.SECONDS);

    assertEquals(callback.getPages().size(), 1);
    assertPageEquals(expectedPage, callback.getPages().get(0));
    assertEquals(callback.getCompletedRequests(), 1);
    assertEquals(callback.getFinishedBuffers(), 0);
    assertStatus(client, location, "queued", 1, 1, 1, 0, "not scheduled");

    // fetch no data and verify
    callback.resetStats();
    client.scheduleRequest();
    requestComplete.await(10, TimeUnit.SECONDS);

    assertEquals(callback.getPages().size(), 0);
    assertEquals(callback.getCompletedRequests(), 1);
    assertEquals(callback.getFinishedBuffers(), 0);
    assertStatus(client, location, "queued", 1, 2, 2, 0, "not scheduled");

    // fetch two more pages and verify
    processor.addPage(location, expectedPage);
    processor.addPage(location, expectedPage);
    callback.resetStats();
    client.scheduleRequest();
    requestComplete.await(10, TimeUnit.SECONDS);

    assertEquals(callback.getPages().size(), 2);
    assertPageEquals(expectedPage, callback.getPages().get(0));
    assertPageEquals(expectedPage, callback.getPages().get(1));
    assertEquals(callback.getCompletedRequests(), 1);
    assertEquals(callback.getFinishedBuffers(), 0);
    assertEquals(callback.getFailedBuffers(), 0);
    callback.resetStats();
    assertStatus(client, location, "queued", 3, 3, 3, 0, "not scheduled");

    // finish and verify
    callback.resetStats();
    processor.setComplete(location);
    client.scheduleRequest();
    requestComplete.await(10, TimeUnit.SECONDS);

    // get the buffer complete signal
    assertEquals(callback.getPages().size(), 0);
    assertEquals(callback.getCompletedRequests(), 1);

    // schedule the delete call to the buffer
    callback.resetStats();
    client.scheduleRequest();
    requestComplete.await(10, TimeUnit.SECONDS);
    assertEquals(callback.getFinishedBuffers(), 1);

    assertEquals(callback.getPages().size(), 0);
    assertEquals(callback.getCompletedRequests(), 0);
    assertEquals(callback.getFailedBuffers(), 0);

    assertStatus(client, location, "closed", 3, 5, 5, 0, "not scheduled");
}
 
Example 19
Source File: TestCachingOrcDataSource.java    From presto with Apache License 2.0 4 votes vote down vote up
@Test
public void testWrapWithCacheIfTinyStripes()
{
    DataSize maxMergeDistance = DataSize.of(1, Unit.MEGABYTE);
    DataSize tinyStripeThreshold = DataSize.of(8, Unit.MEGABYTE);

    OrcDataSource actual = wrapWithCacheIfTinyStripes(
            FakeOrcDataSource.INSTANCE,
            ImmutableList.of(),
            maxMergeDistance,
            tinyStripeThreshold);
    assertInstanceOf(actual, CachingOrcDataSource.class);

    actual = wrapWithCacheIfTinyStripes(
            FakeOrcDataSource.INSTANCE,
            ImmutableList.of(new StripeInformation(123, 3, 10, 10, 10)),
            maxMergeDistance,
            tinyStripeThreshold);
    assertInstanceOf(actual, CachingOrcDataSource.class);

    actual = wrapWithCacheIfTinyStripes(
            FakeOrcDataSource.INSTANCE,
            ImmutableList.of(new StripeInformation(123, 3, 10, 10, 10), new StripeInformation(123, 33, 10, 10, 10), new StripeInformation(123, 63, 10, 10, 10)),
            maxMergeDistance,
            tinyStripeThreshold);
    assertInstanceOf(actual, CachingOrcDataSource.class);

    actual = wrapWithCacheIfTinyStripes(
            FakeOrcDataSource.INSTANCE,
            ImmutableList.of(new StripeInformation(123, 3, 10, 10, 10), new StripeInformation(123, 33, 10, 10, 10), new StripeInformation(123, 63, 1048576 * 8 - 20, 10, 10)),
            maxMergeDistance,
            tinyStripeThreshold);
    assertInstanceOf(actual, CachingOrcDataSource.class);

    actual = wrapWithCacheIfTinyStripes(
            FakeOrcDataSource.INSTANCE,
            ImmutableList.of(new StripeInformation(123, 3, 10, 10, 10), new StripeInformation(123, 33, 10, 10, 10), new StripeInformation(123, 63, 1048576 * 8 - 20 + 1, 10, 10)),
            maxMergeDistance,
            tinyStripeThreshold);
    assertNotInstanceOf(actual, CachingOrcDataSource.class);
}
 
Example 20
Source File: MemoryLocalQueryRunner.java    From presto with Apache License 2.0 4 votes vote down vote up
public List<Page> execute(@Language("SQL") String query)
{
    MemoryPool memoryPool = new MemoryPool(new MemoryPoolId("test"), DataSize.of(2, GIGABYTE));
    SpillSpaceTracker spillSpaceTracker = new SpillSpaceTracker(DataSize.of(1, GIGABYTE));
    QueryContext queryContext = new QueryContext(
            new QueryId("test"),
            DataSize.of(1, GIGABYTE),
            DataSize.of(2, GIGABYTE),
            memoryPool,
            new TestingGcMonitor(),
            localQueryRunner.getExecutor(),
            localQueryRunner.getScheduler(),
            DataSize.of(4, GIGABYTE),
            spillSpaceTracker);

    TaskContext taskContext = queryContext
            .addTaskContext(new TaskStateMachine(new TaskId("query", 0, 0), localQueryRunner.getExecutor()),
                    localQueryRunner.getDefaultSession(),
                    false,
                    false,
                    OptionalInt.empty());

    // Use NullOutputFactory to avoid coping out results to avoid affecting benchmark results
    ImmutableList.Builder<Page> output = ImmutableList.builder();
    List<Driver> drivers = localQueryRunner.createDrivers(
            query,
            new PageConsumerOperator.PageConsumerOutputFactory(types -> output::add),
            taskContext);

    boolean done = false;
    while (!done) {
        boolean processed = false;
        for (Driver driver : drivers) {
            if (!driver.isFinished()) {
                driver.process();
                processed = true;
            }
        }
        done = !processed;
    }

    return output.build();
}