Java Code Examples for io.prestosql.spi.connector.ConnectorSplit

The following examples show how to use io.prestosql.spi.connector.ConnectorSplit. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: presto   Source File: TestPrometheusSplit.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testQueryDividedIntoSplitsLastSplitHasRightTime()
        throws URISyntaxException
{
    LocalDateTime now = LocalDateTime.of(2019, 10, 2, 7, 26, 56, 00);
    PrometheusConnectorConfig config = getCommonConfig(prometheusHttpServer.resolve("/prometheus-data/prometheus-metrics.json"), now);
    PrometheusClient client = new PrometheusClient(config, METRIC_CODEC, TYPE_MANAGER);
    PrometheusTable table = client.getTable("default", "up");
    PrometheusSplitManager splitManager = new PrometheusSplitManager(client, config);
    ConnectorSplitSource splitsMaybe = splitManager.getSplits(
            null,
            null,
            (ConnectorTableHandle) new PrometheusTableHandle("default", table.getName()),
            null);
    List<ConnectorSplit> splits = splitsMaybe.getNextBatch(NOT_PARTITIONED, NUMBER_MORE_THAN_EXPECTED_NUMBER_SPLITS).getNow(null).getSplits();
    int lastSplitIndex = splits.size() - 1;
    PrometheusSplit lastSplit = (PrometheusSplit) splits.get(lastSplitIndex);
    String queryInSplit = lastSplit.getUri().getQuery();
    String timeShouldBe = decimalSecondString(now.atZone(ZoneId.systemDefault()).toInstant().toEpochMilli());
    URI uriAsFormed = new URI("http://doesnotmatter:9090/api/v1/query?query=up[" +
            getQueryChunkSizeDurationAsPrometheusCompatibleDurationString(config) + "]" +
            "&time=" + timeShouldBe);
    assertEquals(queryInSplit, uriAsFormed.getQuery());
}
 
Example 2
Source Project: presto   Source File: PinotSplitManager.java    License: Apache License 2.0 6 votes vote down vote up
protected void generateSegmentSplits(
        List<ConnectorSplit> splits,
        Map<String, Map<String, List<String>>> routingTable,
        String tableName,
        String tableNameSuffix,
        ConnectorSession session,
        Optional<String> timePredicate)
{
    final String finalTableName = tableName + tableNameSuffix;
    int segmentsPerSplitConfigured = PinotSessionProperties.getSegmentsPerSplit(session);
    for (String routingTableName : routingTable.keySet()) {
        if (!routingTableName.equalsIgnoreCase(finalTableName)) {
            continue;
        }

        Map<String, List<String>> hostToSegmentsMap = routingTable.get(routingTableName);
        hostToSegmentsMap.forEach((host, segments) -> {
            int numSegmentsInThisSplit = Math.min(segments.size(), segmentsPerSplitConfigured);
            // segments is already shuffled
            Iterables.partition(segments, numSegmentsInThisSplit).forEach(
                    segmentsForThisSplit -> splits.add(
                            createSegmentSplit(tableNameSuffix, segmentsForThisSplit, host, timePredicate)));
        });
    }
}
 
Example 3
Source Project: presto   Source File: ExampleSplitManager.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public ConnectorSplitSource getSplits(
        ConnectorTransactionHandle transaction,
        ConnectorSession session,
        ConnectorTableHandle connectorTableHandle,
        SplitSchedulingStrategy splitSchedulingStrategy)
{
    ExampleTableHandle tableHandle = (ExampleTableHandle) connectorTableHandle;
    ExampleTable table = exampleClient.getTable(tableHandle.getSchemaName(), tableHandle.getTableName());

    // this can happen if table is removed during a query
    if (table == null) {
        throw new TableNotFoundException(tableHandle.toSchemaTableName());
    }

    List<ConnectorSplit> splits = new ArrayList<>();
    for (URI uri : table.getSources()) {
        splits.add(new ExampleSplit(uri));
    }
    Collections.shuffle(splits);

    return new FixedSplitSource(splits);
}
 
Example 4
Source Project: presto   Source File: BlackHolePageSourceProvider.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public ConnectorPageSource createPageSource(
        ConnectorTransactionHandle transactionHandle,
        ConnectorSession session,
        ConnectorSplit split,
        ConnectorTableHandle tableHandle,
        List<ColumnHandle> columns,
        TupleDomain<ColumnHandle> dynamicFilter)
{
    BlackHoleTableHandle table = (BlackHoleTableHandle) tableHandle;

    ImmutableList.Builder<Type> builder = ImmutableList.builder();

    for (ColumnHandle column : columns) {
        builder.add(((BlackHoleColumnHandle) column).getColumnType());
    }
    List<Type> types = builder.build();

    Page page = generateZeroPage(types, table.getRowsPerPage(), table.getFieldsLength());
    return new BlackHolePageSource(page, table.getPagesPerSplit(), executorService, table.getPageProcessingDelay());
}
 
Example 5
Source Project: presto   Source File: TestBeginQuery.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public ConnectorPageSourceProvider getPageSourceProvider()
{
    return new ConnectorPageSourceProvider()
    {
        @Override
        public ConnectorPageSource createPageSource(
                ConnectorTransactionHandle transaction,
                ConnectorSession session,
                ConnectorSplit split,
                ConnectorTableHandle table,
                List<ColumnHandle> columns,
                TupleDomain<ColumnHandle> dynamicFilter)
        {
            return new FixedPageSource(ImmutableList.of());
        }
    };
}
 
Example 6
Source Project: presto   Source File: JmxSplitManager.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableHandle table, SplitSchedulingStrategy splitSchedulingStrategy)
{
    JmxTableHandle tableHandle = (JmxTableHandle) table;

    //TODO is there a better way to get the node column?
    Optional<JmxColumnHandle> nodeColumnHandle = tableHandle.getColumnHandles().stream()
            .filter(jmxColumnHandle -> jmxColumnHandle.getColumnName().equals(NODE_COLUMN_NAME))
            .findFirst();
    checkState(nodeColumnHandle.isPresent(), "Failed to find %s column", NODE_COLUMN_NAME);

    TupleDomain<ColumnHandle> nodeFilter = tableHandle.getNodeFilter();

    List<ConnectorSplit> splits = nodeManager.getAllNodes().stream()
            .filter(node -> {
                NullableValue value = NullableValue.of(createUnboundedVarcharType(), utf8Slice(node.getNodeIdentifier()));
                return nodeFilter.overlaps(fromFixedValues(ImmutableMap.of(nodeColumnHandle.get(), value)));
            })
            .map(node -> new JmxSplit(ImmutableList.of(node.getHostAndPort())))
            .collect(toList());

    return new FixedSplitSource(splits);
}
 
Example 7
Source Project: presto   Source File: TestJmxSplitManager.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testPredicatePushdown()
        throws Exception
{
    for (Node node : nodes) {
        String nodeIdentifier = node.getNodeIdentifier();
        TupleDomain<ColumnHandle> nodeTupleDomain = TupleDomain.fromFixedValues(ImmutableMap.of(columnHandle, NullableValue.of(createUnboundedVarcharType(), utf8Slice(nodeIdentifier))));
        JmxTableHandle tableHandle = new JmxTableHandle(new SchemaTableName("schema", "tableName"), ImmutableList.of("objectName"), ImmutableList.of(columnHandle), true, nodeTupleDomain);

        ConnectorSplitSource splitSource = splitManager.getSplits(JmxTransactionHandle.INSTANCE, SESSION, tableHandle, UNGROUPED_SCHEDULING);
        List<ConnectorSplit> allSplits = getAllSplits(splitSource);

        assertEquals(allSplits.size(), 1);
        assertEquals(allSplits.get(0).getAddresses().size(), 1);
        assertEquals(allSplits.get(0).getAddresses().get(0).getHostText(), nodeIdentifier);
    }
}
 
Example 8
Source Project: presto   Source File: BigQueryPageSourceProvider.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public ConnectorPageSource createPageSource(
        ConnectorTransactionHandle transaction,
        ConnectorSession session,
        ConnectorSplit split,
        ConnectorTableHandle table,
        List<ColumnHandle> columns,
        TupleDomain<ColumnHandle> dynamicFilter)
{
    log.debug("createPageSource(transaction=%s, session=%s, split=%s, table=%s, columns=%s)", transaction, session, split, table, columns);
    BigQuerySplit bigQuerySplit = (BigQuerySplit) split;
    if (bigQuerySplit.representsEmptyProjection()) {
        return new BigQueryEmptyProjectionPageSource(bigQuerySplit.getEmptyRowsToGenerate());
    }

    // not empty projection
    List<BigQueryColumnHandle> bigQueryColumnHandles = columns.stream()
            .map(BigQueryColumnHandle.class::cast)
            .collect(toImmutableList());

    return new BigQueryResultPageSource(bigQueryStorageClientFactory, maxReadRowsRetries, bigQuerySplit, bigQueryColumnHandles);
}
 
Example 9
Source Project: presto   Source File: JdbcRecordSetProvider.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public RecordSet getRecordSet(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorSplit split, ConnectorTableHandle table, List<? extends ColumnHandle> columns)
{
    JdbcSplit jdbcSplit = (JdbcSplit) split;
    JdbcTableHandle jdbcTable = (JdbcTableHandle) table;

    // In the current API, the columns (and order) needed by the engine are provided via an argument to this method. Make sure that
    // any columns that were recorded in the table handle match the requested set.
    // If no columns are recorded, it means that applyProjection never got called (e.g., in the case all columns are being used) and all
    // table columns should be returned. TODO: this is something that should be addressed once the getRecordSet API is revamped
    jdbcTable.getColumns()
            .ifPresent(tableColumns -> verify(columns.equals(tableColumns)));

    ImmutableList.Builder<JdbcColumnHandle> handles = ImmutableList.builder();
    for (ColumnHandle handle : columns) {
        handles.add((JdbcColumnHandle) handle);
    }

    return new JdbcRecordSet(jdbcClient, session, jdbcSplit, jdbcTable, handles.build());
}
 
Example 10
Source Project: presto   Source File: TpcdsSplitManager.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableLayoutHandle layout, SplitSchedulingStrategy splitSchedulingStrategy)
{
    Set<Node> nodes = nodeManager.getRequiredWorkerNodes();
    checkState(!nodes.isEmpty(), "No TPCDS nodes available");

    int totalParts = nodes.size() * splitsPerNode;
    int partNumber = 0;

    // Split the data using split and skew by the number of nodes available.
    ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder();
    for (Node node : nodes) {
        for (int i = 0; i < splitsPerNode; i++) {
            splits.add(new TpcdsSplit(partNumber, totalParts, ImmutableList.of(node.getHostAndPort()), noSexism));
            partNumber++;
        }
    }
    return new FixedSplitSource(splits.build());
}
 
Example 11
Source Project: presto   Source File: CassandraSplitManager.java    License: Apache License 2.0 6 votes vote down vote up
private List<ConnectorSplit> getSplitsByTokenRange(CassandraTable table, String partitionId, Optional<Long> sessionSplitsPerNode)
{
    String schema = table.getTableHandle().getSchemaName();
    String tableName = table.getTableHandle().getTableName();
    String tokenExpression = table.getTokenExpression();

    ImmutableList.Builder<ConnectorSplit> builder = ImmutableList.builder();
    List<CassandraTokenSplitManager.TokenSplit> tokenSplits = tokenSplitMgr.getSplits(schema, tableName, sessionSplitsPerNode);
    for (CassandraTokenSplitManager.TokenSplit tokenSplit : tokenSplits) {
        String condition = buildTokenCondition(tokenExpression, tokenSplit.getStartToken(), tokenSplit.getEndToken());
        List<HostAddress> addresses = new HostAddressFactory().hostAddressNamesToHostAddressList(tokenSplit.getHosts());
        CassandraSplit split = new CassandraSplit(partitionId, condition, addresses);
        builder.add(split);
    }

    return builder.build();
}
 
Example 12
Source Project: presto   Source File: AtopPageSourceProvider.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public ConnectorPageSource createPageSource(
        ConnectorTransactionHandle transaction,
        ConnectorSession session,
        ConnectorSplit split,
        ConnectorTableHandle table,
        List<ColumnHandle> columns,
        TupleDomain<ColumnHandle> dynamicFilter)
{
    AtopTableHandle tableHandle = (AtopTableHandle) table;
    AtopSplit atopSplit = (AtopSplit) split;

    ImmutableList.Builder<Type> types = ImmutableList.builder();
    ImmutableList.Builder<AtopColumn> atopColumns = ImmutableList.builder();

    for (ColumnHandle column : columns) {
        AtopColumnHandle atopColumnHandle = (AtopColumnHandle) column;
        AtopColumn atopColumn = tableHandle.getTable().getColumn(atopColumnHandle.getName());
        atopColumns.add(atopColumn);
        types.add(typeManager.getType(atopColumn.getType()));
    }

    ZonedDateTime date = atopSplit.getDate();
    checkArgument(date.equals(date.withHour(0).withMinute(0).withSecond(0).withNano(0)), "Expected date to be at beginning of day");
    return new AtopPageSource(readerPermits, atopFactory, session, utf8Slice(atopSplit.getHost().getHostText()), tableHandle.getTable(), date, atopColumns.build(), types.build());
}
 
Example 13
Source Project: presto   Source File: NodePartitioningManager.java    License: Apache License 2.0 6 votes vote down vote up
private ToIntFunction<Split> getSplitToBucket(Session session, PartitioningHandle partitioningHandle)
{
    ConnectorNodePartitioningProvider partitioningProvider = partitioningProviders.get(partitioningHandle.getConnectorId().get());
    checkArgument(partitioningProvider != null, "No partitioning provider for connector %s", partitioningHandle.getConnectorId().get());

    ToIntFunction<ConnectorSplit> splitBucketFunction = partitioningProvider.getSplitBucketFunction(
            partitioningHandle.getTransactionHandle().orElse(null),
            session.toConnectorSession(),
            partitioningHandle.getConnectorHandle());
    checkArgument(splitBucketFunction != null, "No partitioning %s", partitioningHandle);

    return split -> {
        int bucket;
        if (split.getConnectorSplit() instanceof EmptySplit) {
            bucket = split.getLifespan().isTaskWide() ? 0 : split.getLifespan().getId();
        }
        else {
            bucket = splitBucketFunction.applyAsInt(split.getConnectorSplit());
        }
        if (!split.getLifespan().isTaskWide()) {
            checkArgument(split.getLifespan().getId() == bucket);
        }
        return bucket;
    };
}
 
Example 14
Source Project: presto   Source File: CassandraRecordSetProvider.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public RecordSet getRecordSet(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorSplit split, ConnectorTableHandle table, List<? extends ColumnHandle> columns)
{
    CassandraSplit cassandraSplit = (CassandraSplit) split;
    CassandraTableHandle cassandraTable = (CassandraTableHandle) table;

    List<CassandraColumnHandle> cassandraColumns = columns.stream()
            .map(column -> (CassandraColumnHandle) column)
            .collect(toList());

    String selectCql = CassandraCqlUtils.selectFrom(cassandraTable, cassandraColumns).getQueryString();
    StringBuilder sb = new StringBuilder(selectCql);
    if (sb.charAt(sb.length() - 1) == ';') {
        sb.setLength(sb.length() - 1);
    }
    sb.append(cassandraSplit.getWhereClause());
    String cql = sb.toString();
    log.debug("Creating record set: %s", cql);

    return new CassandraRecordSet(cassandraSession, cql, cassandraColumns);
}
 
Example 15
Source Project: presto   Source File: TestingPageSourceProvider.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public ConnectorPageSource createPageSource(
        ConnectorTransactionHandle transaction,
        ConnectorSession session,
        ConnectorSplit split,
        ConnectorTableHandle table,
        List<ColumnHandle> columns,
        TupleDomain<ColumnHandle> dynamicFilter)
{
    requireNonNull(columns, "columns is null");

    ImmutableList<Block> blocks = columns.stream()
            .map(column -> new ByteArrayBlock(1, Optional.of(new boolean[] {true}), new byte[1]))
            .collect(toImmutableList());

    return new FixedPageSource(ImmutableList.of(new Page(blocks.toArray(new Block[blocks.size()]))));
}
 
Example 16
Source Project: presto   Source File: TestSourcePartitionedScheduler.java    License: Apache License 2.0 6 votes vote down vote up
private synchronized List<ConnectorSplit> getBatch(int maxSize)
{
    // take up to maxSize elements from the queue
    List<ConnectorSplit> elements = new ArrayList<>(maxSize);
    queue.drainTo(elements, maxSize);

    // if the queue is empty and the current future is finished, create a new one so
    // a new readers can be notified when the queue has elements to read
    if (queue.isEmpty() && !closed) {
        if (notEmptyFuture.isDone()) {
            notEmptyFuture = new CompletableFuture<>();
        }
    }

    return ImmutableList.copyOf(elements);
}
 
Example 17
Source Project: presto   Source File: MongoPageSourceProvider.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public ConnectorPageSource createPageSource(
        ConnectorTransactionHandle transaction,
        ConnectorSession session,
        ConnectorSplit split,
        ConnectorTableHandle table,
        List<ColumnHandle> columns,
        TupleDomain<ColumnHandle> dynamicFilter)
{
    MongoTableHandle tableHandle = (MongoTableHandle) table;

    ImmutableList.Builder<MongoColumnHandle> handles = ImmutableList.builder();
    for (ColumnHandle handle : requireNonNull(columns, "columns is null")) {
        handles.add((MongoColumnHandle) handle);
    }

    return new MongoPageSource(mongoSession, tableHandle, handles.build());
}
 
Example 18
Source Project: presto   Source File: TpchSplitManager.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableHandle tableHandle, SplitSchedulingStrategy splitSchedulingStrategy)
{
    Set<Node> nodes = nodeManager.getRequiredWorkerNodes();

    int totalParts = nodes.size() * splitsPerNode;
    int partNumber = 0;

    // Split the data using split and skew by the number of nodes available.
    ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder();
    for (Node node : nodes) {
        for (int i = 0; i < splitsPerNode; i++) {
            splits.add(new TpchSplit(partNumber, totalParts, ImmutableList.of(node.getHostAndPort())));
            partNumber++;
        }
    }
    return new FixedSplitSource(splits.build());
}
 
Example 19
Source Project: presto   Source File: RaptorSplitManager.java    License: Apache License 2.0 6 votes vote down vote up
private Supplier<ConnectorSplitBatch> batchSupplier(int maxSize)
{
    return () -> {
        ImmutableList.Builder<ConnectorSplit> list = ImmutableList.builder();
        for (int i = 0; i < maxSize; i++) {
            if (Thread.currentThread().isInterrupted()) {
                throw new RuntimeException("Split batch fetch was interrupted");
            }
            if (!iterator.hasNext()) {
                break;
            }
            list.add(createSplit(iterator.next()));
        }
        return new ConnectorSplitBatch(list.build(), isFinished());
    };
}
 
Example 20
Source Project: presto   Source File: RaptorSplitManager.java    License: Apache License 2.0 6 votes vote down vote up
private ConnectorSplit createBucketSplit(int bucketNumber, Set<ShardNodes> shards)
{
    // Bucket splits contain all the shards for the bucket
    // and run on the node assigned to the bucket.

    String nodeId = bucketToNode.get().get(bucketNumber);
    Node node = nodesById.get(nodeId);
    if (node == null) {
        throw new PrestoException(NO_NODES_AVAILABLE, "Node for bucket is offline: " + nodeId);
    }

    Set<UUID> shardUuids = shards.stream()
            .map(ShardNodes::getShardUuid)
            .collect(toSet());
    HostAddress address = node.getHostAndPort();

    return new RaptorSplit(shardUuids, bucketNumber, address, transactionId);
}
 
Example 21
Source Project: presto   Source File: SheetsSplitManager.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public ConnectorSplitSource getSplits(
        ConnectorTransactionHandle transaction,
        ConnectorSession session,
        ConnectorTableHandle connectorTableHandle,
        SplitSchedulingStrategy splitSchedulingStrategy)
{
    SheetsTableHandle tableHandle = (SheetsTableHandle) connectorTableHandle;
    Optional<SheetsTable> table = sheetsClient.getTable(tableHandle.getTableName());

    // this can happen if table is removed during a query
    if (table.isEmpty()) {
        throw new TableNotFoundException(tableHandle.toSchemaTableName());
    }

    List<ConnectorSplit> splits = new ArrayList<>();
    splits.add(new SheetsSplit(tableHandle.getSchemaName(), tableHandle.getTableName(), table.get().getValues()));
    Collections.shuffle(splits);
    return new FixedSplitSource(splits);
}
 
Example 22
Source Project: presto   Source File: ThriftSplitManager.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Returns a future with a list of splits.
 * This method is assumed to be called in a single-threaded way.
 * It can be called by multiple threads, but only if the previous call finished.
 */
@Override
public CompletableFuture<ConnectorSplitBatch> getNextBatch(ConnectorPartitionHandle partitionHandle, int maxSize)
{
    checkState(future.get() == null || future.get().isDone(), "previous batch not completed");
    checkState(hasMoreData.get(), "this method cannot be invoked when there's no more data");
    PrestoThriftId currentToken = nextToken.get();
    ListenableFuture<PrestoThriftSplitBatch> splitsFuture = client.getSplits(
            schemaTableName,
            new PrestoThriftNullableColumnSet(columnNames.orElse(null)),
            constraint,
            maxSize,
            new PrestoThriftNullableToken(currentToken));
    ListenableFuture<ConnectorSplitBatch> resultFuture = Futures.transform(
            splitsFuture,
            batch -> {
                requireNonNull(batch, "batch is null");
                List<ConnectorSplit> splits = batch.getSplits().stream()
                        .map(ThriftSplitSource::toConnectorSplit)
                        .collect(toImmutableList());
                checkState(nextToken.compareAndSet(currentToken, batch.getNextToken()));
                checkState(hasMoreData.compareAndSet(true, nextToken.get() != null));
                return new ConnectorSplitBatch(splits, isFinished());
            }, directExecutor());
    resultFuture = catchingThriftException(resultFuture);
    future.set(resultFuture);
    return toCompletableFuture(resultFuture);
}
 
Example 23
Source Project: presto   Source File: PrometheusRecordSetProvider.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public RecordSet getRecordSet(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorSplit split, ConnectorTableHandle table, List<? extends ColumnHandle> columns)
{
    PrometheusSplit prometheusSplit = (PrometheusSplit) split;

    ImmutableList.Builder<PrometheusColumnHandle> handles = ImmutableList.builder();
    for (ColumnHandle handle : columns) {
        handles.add((PrometheusColumnHandle) handle);
    }

    return new PrometheusRecordSet(prometheusSplit, handles.build());
}
 
Example 24
Source Project: presto   Source File: PrometheusSplitManager.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public ConnectorSplitSource getSplits(
        ConnectorTransactionHandle transaction,
        ConnectorSession session,
        ConnectorTableHandle connectorTableHandle,
        SplitSchedulingStrategy splitSchedulingStrategy)
{
    PrometheusTableHandle tableHandle = (PrometheusTableHandle) connectorTableHandle;
    PrometheusTable table = prometheusClient.getTable(tableHandle.getSchemaName(), tableHandle.getTableName());

    // this can happen if table is removed during a query
    if (table == null) {
        throw new TableNotFoundException(tableHandle.toSchemaTableName());
    }
    List<ConnectorSplit> splits = generateTimesForSplits(PrometheusTimeMachine.now(), config.getMaxQueryRangeDuration(), config.getQueryChunkSizeDuration(), tableHandle)
            .stream()
            .map(time -> {
                try {
                    return new PrometheusSplit(buildQuery(config.getPrometheusURI(),
                            time,
                            table.getName(),
                            config.getQueryChunkSizeDuration()));
                }
                catch (URISyntaxException e) {
                    throw new PrestoException(PROMETHEUS_UNKNOWN_ERROR, "split URI invalid: " + e.getMessage());
                }
            }).collect(Collectors.toList());
    return new FixedSplitSource(splits);
}
 
Example 25
Source Project: presto   Source File: ExampleRecordSetProvider.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public RecordSet getRecordSet(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorSplit split, ConnectorTableHandle table, List<? extends ColumnHandle> columns)
{
    ExampleSplit exampleSplit = (ExampleSplit) split;

    ImmutableList.Builder<ExampleColumnHandle> handles = ImmutableList.builder();
    for (ColumnHandle handle : columns) {
        handles.add((ExampleColumnHandle) handle);
    }

    return new ExampleRecordSet(exampleSplit, handles.build());
}
 
Example 26
Source Project: presto   Source File: RedisRecordSetProvider.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public RecordSet getRecordSet(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorSplit split, ConnectorTableHandle table, List<? extends ColumnHandle> columns)
{
    RedisSplit redisSplit = convertSplit(split);

    List<RedisColumnHandle> redisColumns = columns.stream()
            .map(RedisHandleResolver::convertColumnHandle)
            .collect(ImmutableList.toImmutableList());

    RowDecoder keyDecoder = decoderFactory.create(
            redisSplit.getKeyDataFormat(),
            emptyMap(),
            redisColumns.stream()
                    .filter(col -> !col.isInternal())
                    .filter(RedisColumnHandle::isKeyDecoder)
                    .collect(toImmutableSet()));

    RowDecoder valueDecoder = decoderFactory.create(
            redisSplit.getValueDataFormat(),
            emptyMap(),
            redisColumns.stream()
                    .filter(col -> !col.isInternal())
                    .filter(col -> !col.isKeyDecoder())
                    .collect(toImmutableSet()));

    return new RedisRecordSet(redisSplit, jedisManager, redisColumns, keyDecoder, valueDecoder);
}
 
Example 27
Source Project: presto   Source File: TestCassandraConnector.java    License: Apache License 2.0 5 votes vote down vote up
private static List<ConnectorSplit> getAllSplits(ConnectorSplitSource splitSource)
{
    ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder();
    while (!splitSource.isFinished()) {
        splits.addAll(getFutureValue(splitSource.getNextBatch(NOT_PARTITIONED, 1000)).getSplits());
    }
    return splits.build();
}
 
Example 28
Source Project: presto   Source File: BlackHoleNodePartitioningProvider.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public ToIntFunction<ConnectorSplit> getSplitBucketFunction(
        ConnectorTransactionHandle transactionHandle,
        ConnectorSession session,
        ConnectorPartitioningHandle partitioningHandle)
{
    return value -> {
        throw new PrestoException(NOT_SUPPORTED, "Black hole connector does not supported distributed reads");
    };
}
 
Example 29
Source Project: presto   Source File: SheetsRecordSetProvider.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public RecordSet getRecordSet(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorSplit split, ConnectorTableHandle table, List<? extends ColumnHandle> columns)
{
    requireNonNull(split, "split is null");
    SheetsSplit sheetsSplit = (SheetsSplit) split;

    List<SheetsColumnHandle> handles = columns.stream().map(c -> (SheetsColumnHandle) c).collect(Collectors.toList());
    return new SheetsRecordSet(sheetsSplit, handles);
}
 
Example 30
Source Project: presto   Source File: IcebergPageSourceProvider.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public ConnectorPageSource createPageSource(
        ConnectorTransactionHandle transaction,
        ConnectorSession session,
        ConnectorSplit connectorSplit,
        ConnectorTableHandle connectorTable,
        List<ColumnHandle> columns,
        TupleDomain<ColumnHandle> dynamicFilter)
{
    IcebergSplit split = (IcebergSplit) connectorSplit;
    IcebergTableHandle table = (IcebergTableHandle) connectorTable;

    List<IcebergColumnHandle> icebergColumns = columns.stream()
            .map(IcebergColumnHandle.class::cast)
            .collect(toImmutableList());

    Map<Integer, String> partitionKeys = split.getPartitionKeys();

    List<IcebergColumnHandle> regularColumns = columns.stream()
            .map(IcebergColumnHandle.class::cast)
            .filter(column -> !partitionKeys.containsKey(column.getId()))
            .collect(toImmutableList());

    HdfsContext hdfsContext = new HdfsContext(session, table.getSchemaName(), table.getTableName());
    ConnectorPageSource dataPageSource = createDataPageSource(
            session,
            hdfsContext,
            new Path(split.getPath()),
            split.getStart(),
            split.getLength(),
            split.getFileFormat(),
            regularColumns,
            table.getPredicate());

    return new IcebergPageSource(icebergColumns, partitionKeys, dataPageSource, session.getTimeZoneKey());
}