Java Code Examples for io.prestosql.spi.connector.ConnectorTableHandle

The following examples show how to use io.prestosql.spi.connector.ConnectorTableHandle. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: presto   Source File: MongoMetadata.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Optional<ConstraintApplicationResult<ConnectorTableHandle>> applyFilter(ConnectorSession session, ConnectorTableHandle table, Constraint constraint)
{
    MongoTableHandle handle = (MongoTableHandle) table;

    TupleDomain<ColumnHandle> oldDomain = handle.getConstraint();
    TupleDomain<ColumnHandle> newDomain = oldDomain.intersect(constraint.getSummary());
    if (oldDomain.equals(newDomain)) {
        return Optional.empty();
    }

    handle = new MongoTableHandle(
            handle.getSchemaTableName(),
            newDomain);

    return Optional.of(new ConstraintApplicationResult<>(handle, constraint.getSummary()));
}
 
Example 2
Source Project: presto   Source File: TestTpcdsMetadataStatistics.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testTableStatsExistenceSupportedSchema()
{
    Stream.of("sf0.01", "tiny", "sf1", "sf1.000")
            .forEach(schemaName -> Table.getBaseTables()
                    .forEach(table -> {
                        SchemaTableName schemaTableName = new SchemaTableName(schemaName, table.getName());
                        ConnectorTableHandle tableHandle = metadata.getTableHandle(session, schemaTableName);
                        TableStatistics tableStatistics = metadata.getTableStatistics(session, tableHandle, alwaysTrue());
                        assertFalse(tableStatistics.getRowCount().isUnknown());
                        for (ColumnHandle column : metadata.getColumnHandles(session, tableHandle).values()) {
                            assertTrue(tableStatistics.getColumnStatistics().containsKey(column));
                            assertNotNull(tableStatistics.getColumnStatistics().get(column));
                        }
                    }));
}
 
Example 3
Source Project: presto   Source File: TestRaptorMetadata.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testRenameTable()
{
    assertNull(metadata.getTableHandle(SESSION, DEFAULT_TEST_ORDERS));
    metadata.createTable(SESSION, getOrdersTable(), false);
    ConnectorTableHandle tableHandle = metadata.getTableHandle(SESSION, DEFAULT_TEST_ORDERS);
    assertInstanceOf(tableHandle, RaptorTableHandle.class);

    RaptorTableHandle raptorTableHandle = (RaptorTableHandle) tableHandle;
    SchemaTableName renamedTable = new SchemaTableName(raptorTableHandle.getSchemaName(), "orders_renamed");

    metadata.renameTable(SESSION, raptorTableHandle, renamedTable);
    assertNull(metadata.getTableHandle(SESSION, DEFAULT_TEST_ORDERS));
    ConnectorTableHandle renamedTableHandle = metadata.getTableHandle(SESSION, renamedTable);
    assertNotNull(renamedTableHandle);
    assertEquals(((RaptorTableHandle) renamedTableHandle).getTableName(), renamedTable.getTableName());
}
 
Example 4
Source Project: presto   Source File: BigQuerySplitManager.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public ConnectorSplitSource getSplits(
        ConnectorTransactionHandle transaction,
        ConnectorSession session,
        ConnectorTableHandle table,
        SplitSchedulingStrategy splitSchedulingStrategy)
{
    log.debug("getSplits(transaction=%s, session=%s, table=%s, splitSchedulingStrategy=%s)", transaction, session, table, splitSchedulingStrategy);
    BigQueryTableHandle bigQueryTableHandle = (BigQueryTableHandle) table;

    TableId tableId = bigQueryTableHandle.getTableId();
    int actualParallelism = parallelism.orElse(nodeManager.getRequiredWorkerNodes().size());
    TupleDomain<ColumnHandle> constraint = bigQueryTableHandle.getConstraint();
    Optional<String> filter = BigQueryFilterQueryBuilder.buildFilter(constraint);
    List<BigQuerySplit> splits = emptyProjectionIsRequired(bigQueryTableHandle.getProjectedColumns()) ?
            createEmptyProjection(tableId, actualParallelism, filter) :
            readFromBigQuery(tableId, bigQueryTableHandle.getProjectedColumns(), actualParallelism, filter);
    return new FixedSplitSource(splits);
}
 
Example 5
Source Project: presto   Source File: JdbcRecordSetProvider.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public RecordSet getRecordSet(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorSplit split, ConnectorTableHandle table, List<? extends ColumnHandle> columns)
{
    JdbcSplit jdbcSplit = (JdbcSplit) split;
    JdbcTableHandle jdbcTable = (JdbcTableHandle) table;

    // In the current API, the columns (and order) needed by the engine are provided via an argument to this method. Make sure that
    // any columns that were recorded in the table handle match the requested set.
    // If no columns are recorded, it means that applyProjection never got called (e.g., in the case all columns are being used) and all
    // table columns should be returned. TODO: this is something that should be addressed once the getRecordSet API is revamped
    jdbcTable.getColumns()
            .ifPresent(tableColumns -> verify(columns.equals(tableColumns)));

    ImmutableList.Builder<JdbcColumnHandle> handles = ImmutableList.builder();
    for (ColumnHandle handle : columns) {
        handles.add((JdbcColumnHandle) handle);
    }

    return new JdbcRecordSet(jdbcClient, session, jdbcSplit, jdbcTable, handles.build());
}
 
Example 6
Source Project: presto   Source File: PinotMetadata.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Optional<ConstraintApplicationResult<ConnectorTableHandle>> applyFilter(ConnectorSession session, ConnectorTableHandle table, Constraint constraint)
{
    PinotTableHandle handle = (PinotTableHandle) table;
    TupleDomain<ColumnHandle> oldDomain = handle.getConstraint();
    TupleDomain<ColumnHandle> newDomain = oldDomain.intersect(constraint.getSummary());
    if (oldDomain.equals(newDomain)) {
        return Optional.empty();
    }

    handle = new PinotTableHandle(
            handle.getSchemaName(),
            handle.getTableName(),
            newDomain,
            handle.getLimit(),
            handle.getQuery());
    return Optional.of(new ConstraintApplicationResult<>(handle, constraint.getSummary()));
}
 
Example 7
Source Project: presto   Source File: RaptorMetadata.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void addColumn(ConnectorSession session, ConnectorTableHandle tableHandle, ColumnMetadata column)
{
    RaptorTableHandle table = (RaptorTableHandle) tableHandle;

    // Always add new columns to the end.
    List<TableColumn> existingColumns = dao.listTableColumns(table.getSchemaName(), table.getTableName());
    TableColumn lastColumn = existingColumns.get(existingColumns.size() - 1);
    long columnId = lastColumn.getColumnId() + 1;
    int ordinalPosition = lastColumn.getOrdinalPosition() + 1;

    String type = column.getType().getTypeId().getId();
    daoTransaction(dbi, MetadataDao.class, dao -> {
        dao.insertColumn(table.getTableId(), columnId, column.getName(), ordinalPosition, type, null, null);
        dao.updateTableVersion(table.getTableId(), session.getStart().toEpochMilli());
    });

    shardManager.addColumn(table.getTableId(), new ColumnInfo(columnId, column.getType()));
}
 
Example 8
Source Project: presto   Source File: ElasticsearchMetadata.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Optional<LimitApplicationResult<ConnectorTableHandle>> applyLimit(ConnectorSession session, ConnectorTableHandle table, long limit)
{
    ElasticsearchTableHandle handle = (ElasticsearchTableHandle) table;

    if (isPassthroughQuery(handle)) {
        // limit pushdown currently not supported passthrough query
        return Optional.empty();
    }

    if (handle.getLimit().isPresent() && handle.getLimit().getAsLong() <= limit) {
        return Optional.empty();
    }

    handle = new ElasticsearchTableHandle(
            handle.getType(),
            handle.getSchema(),
            handle.getIndex(),
            handle.getConstraint(),
            handle.getQuery(),
            OptionalLong.of(limit));

    return Optional.of(new LimitApplicationResult<>(handle, false));
}
 
Example 9
Source Project: presto   Source File: HiveMetadata.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void validateScan(ConnectorSession session, ConnectorTableHandle tableHandle)
{
    HiveTableHandle handle = (HiveTableHandle) tableHandle;
    if (HiveSessionProperties.isQueryPartitionFilterRequired(session) && handle.getAnalyzePartitionValues().isEmpty() && handle.getEnforcedConstraint().isAll()) {
        List<HiveColumnHandle> partitionColumns = handle.getPartitionColumns();
        if (!partitionColumns.isEmpty()) {
            Optional<Set<ColumnHandle>> referencedColumns = handle.getConstraintColumns();
            if (referencedColumns.isEmpty() || Collections.disjoint(referencedColumns.get(), partitionColumns)) {
                String partitionColumnNames = partitionColumns.stream()
                        .map(HiveColumnHandle::getName)
                        .collect(Collectors.joining(","));
                throw new PrestoException(
                        StandardErrorCode.QUERY_REJECTED,
                        String.format("Filter required on %s.%s for at least one partition column: %s ", handle.getSchemaName(), handle.getTableName(), partitionColumnNames));
            }
        }
    }
}
 
Example 10
Source Project: presto   Source File: ElasticsearchMetadata.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public ColumnMetadata getColumnMetadata(ConnectorSession session, ConnectorTableHandle tableHandle, ColumnHandle columnHandle)
{
    ElasticsearchTableHandle table = (ElasticsearchTableHandle) tableHandle;
    ElasticsearchColumnHandle column = (ElasticsearchColumnHandle) columnHandle;

    if (isPassthroughQuery(table)) {
        if (column.getName().equals(PASSTHROUGH_QUERY_RESULT_COLUMN_METADATA.getName())) {
            return PASSTHROUGH_QUERY_RESULT_COLUMN_METADATA;
        }

        throw new IllegalArgumentException(format("Unexpected column for table '%s$query': %s", table.getIndex(), column.getName()));
    }

    return ColumnMetadata.builder()
            .setName(column.getName())
            .setType(column.getType())
            .build();
}
 
Example 11
Source Project: presto   Source File: BigQueryMetadata.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Optional<ProjectionApplicationResult<ConnectorTableHandle>> applyProjection(
        ConnectorSession session,
        ConnectorTableHandle handle,
        List<ConnectorExpression> projections,
        Map<String, ColumnHandle> assignments)
{
    log.debug("applyProjection(session=%s, handle=%s, projections=%s, assignments=%s)",
            session, handle, projections, assignments);
    BigQueryTableHandle bigQueryTableHandle = (BigQueryTableHandle) handle;

    if (bigQueryTableHandle.getProjectedColumns().isPresent()) {
        return Optional.empty();
    }

    ImmutableList.Builder<ColumnHandle> projectedColumns = ImmutableList.builder();
    ImmutableList.Builder<Assignment> assignmentList = ImmutableList.builder();
    assignments.forEach((name, column) -> {
        projectedColumns.add(column);
        assignmentList.add(new Assignment(name, column, ((BigQueryColumnHandle) column).getPrestoType()));
    });

    bigQueryTableHandle = bigQueryTableHandle.withProjectedColumns(projectedColumns.build());

    return Optional.of(new ProjectionApplicationResult<>(bigQueryTableHandle, projections, assignmentList.build()));
}
 
Example 12
Source Project: presto   Source File: AccumuloMetadata.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Optional<ConstraintApplicationResult<ConnectorTableHandle>> applyFilter(ConnectorSession session, ConnectorTableHandle table, Constraint constraint)
{
    AccumuloTableHandle handle = (AccumuloTableHandle) table;

    TupleDomain<ColumnHandle> oldDomain = handle.getConstraint();
    TupleDomain<ColumnHandle> newDomain = oldDomain.intersect(constraint.getSummary());
    if (oldDomain.equals(newDomain)) {
        return Optional.empty();
    }

    handle = new AccumuloTableHandle(
            handle.getSchema(),
            handle.getTable(),
            handle.getRowId(),
            newDomain,
            handle.isExternal(),
            handle.getSerializerClassName(),
            handle.getScanAuthorizations());

    return Optional.of(new ConstraintApplicationResult<>(handle, constraint.getSummary()));
}
 
Example 13
Source Project: presto   Source File: TestInformationSchemaMetadata.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testInformationSchemaPredicatePushdownForEmptyNames()
{
    TransactionId transactionId = transactionManager.beginTransaction(false);
    ConnectorSession session = createNewSession(transactionId);
    ConnectorMetadata metadata = new InformationSchemaMetadata("test_catalog", this.metadata);
    InformationSchemaColumnHandle tableSchemaColumn = new InformationSchemaColumnHandle("table_schema");
    InformationSchemaColumnHandle tableNameColumn = new InformationSchemaColumnHandle("table_name");
    ConnectorTableHandle tableHandle = metadata.getTableHandle(session, new SchemaTableName("information_schema", "tables"));

    // Empty schema name
    InformationSchemaTableHandle filtered = metadata.applyFilter(session, tableHandle, new Constraint(TupleDomain.withColumnDomains(
            ImmutableMap.of(tableSchemaColumn, Domain.singleValue(VARCHAR, Slices.utf8Slice(""))))))
            .map(ConstraintApplicationResult::getHandle)
            .map(InformationSchemaTableHandle.class::cast)
            .orElseThrow(AssertionError::new);

    // "" schema name is valid schema name, but is (currently) valid for QualifiedTablePrefix
    assertEquals(filtered.getPrefixes(), ImmutableSet.of(new QualifiedTablePrefix("test_catalog", "")));

    // Empty table name
    filtered = metadata.applyFilter(session, tableHandle, new Constraint(TupleDomain.withColumnDomains(
            ImmutableMap.of(tableNameColumn, Domain.singleValue(VARCHAR, Slices.utf8Slice(""))))))
            .map(ConstraintApplicationResult::getHandle)
            .map(InformationSchemaTableHandle.class::cast)
            .orElseThrow(AssertionError::new);

    // "" table name is valid schema name, but is (currently) valid for QualifiedTablePrefix
    assertEquals(filtered.getPrefixes(), ImmutableSet.of(new QualifiedTablePrefix("test_catalog", "test_schema", "")));
}
 
Example 14
Source Project: presto   Source File: AbstractTestHiveFileSystem.java    License: Apache License 2.0 5 votes vote down vote up
protected MaterializedResult readTable(SchemaTableName tableName)
        throws IOException
{
    try (Transaction transaction = newTransaction()) {
        ConnectorMetadata metadata = transaction.getMetadata();
        ConnectorSession session = newSession();

        ConnectorTableHandle table = getTableHandle(metadata, tableName);
        List<ColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(session, table).values());

        metadata.beginQuery(session);
        ConnectorSplitSource splitSource = splitManager.getSplits(transaction.getTransactionHandle(), session, table, UNGROUPED_SCHEDULING);

        List<Type> allTypes = getTypes(columnHandles);
        List<Type> dataTypes = getTypes(columnHandles.stream()
                .filter(columnHandle -> !((HiveColumnHandle) columnHandle).isHidden())
                .collect(toImmutableList()));
        MaterializedResult.Builder result = MaterializedResult.resultBuilder(session, dataTypes);

        List<ConnectorSplit> splits = getAllSplits(splitSource);
        for (ConnectorSplit split : splits) {
            try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, split, table, columnHandles, TupleDomain.all())) {
                MaterializedResult pageSourceResult = materializeSourceDataStream(session, pageSource, allTypes);
                for (MaterializedRow row : pageSourceResult.getMaterializedRows()) {
                    Object[] dataValues = IntStream.range(0, row.getFieldCount())
                            .filter(channel -> !((HiveColumnHandle) columnHandles.get(channel)).isHidden())
                            .mapToObj(row::getField)
                            .toArray();
                    result.row(dataValues);
                }
            }
        }

        metadata.cleanupQuery(session);
        return result.build();
    }
}
 
Example 15
Source Project: presto   Source File: KuduPageSourceProvider.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public ConnectorPageSource createPageSource(
        ConnectorTransactionHandle transaction,
        ConnectorSession session,
        ConnectorSplit split,
        ConnectorTableHandle table,
        List<ColumnHandle> columns,
        TupleDomain<ColumnHandle> dynamicFilter)
{
    KuduRecordSet recordSet = (KuduRecordSet) recordSetProvider.getRecordSet(transaction, session, split, table, columns);
    if (columns.contains(KuduColumnHandle.ROW_ID_HANDLE)) {
        return new KuduUpdatablePageSource(recordSet);
    }
    return new RecordPageSource(recordSet);
}
 
Example 16
Source Project: presto   Source File: MetadataManager.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Optional<TableLayoutResult> getLayout(Session session, TableHandle table, Constraint constraint, Optional<Set<ColumnHandle>> desiredColumns)
{
    if (constraint.getSummary().isNone()) {
        return Optional.empty();
    }

    CatalogName catalogName = table.getCatalogName();
    ConnectorTableHandle connectorTable = table.getConnectorHandle();

    CatalogMetadata catalogMetadata = getCatalogMetadata(session, catalogName);
    ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName);

    checkState(metadata.usesLegacyTableLayouts(), "getLayout() was called even though connector doesn't support legacy Table Layout");

    ConnectorTransactionHandle transaction = catalogMetadata.getTransactionHandleFor(catalogName);
    ConnectorSession connectorSession = session.toConnectorSession(catalogName);
    List<ConnectorTableLayoutResult> layouts = metadata.getTableLayouts(connectorSession, connectorTable, constraint, desiredColumns);
    if (layouts.isEmpty()) {
        return Optional.empty();
    }

    if (layouts.size() > 1) {
        throw new PrestoException(NOT_SUPPORTED, format("Connector returned multiple layouts for table %s", table));
    }

    ConnectorTableLayout tableLayout = layouts.get(0).getTableLayout();
    return Optional.of(new TableLayoutResult(
            new TableHandle(catalogName, connectorTable, transaction, Optional.of(tableLayout.getHandle())),
            new TableProperties(catalogName, transaction, new ConnectorTableProperties(tableLayout)),
            layouts.get(0).getUnenforcedConstraint()));
}
 
Example 17
Source Project: presto   Source File: AbstractTestHiveLocal.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected ConnectorTableHandle getTableHandle(ConnectorMetadata metadata, SchemaTableName tableName)
{
    if (tableName.getTableName().startsWith(TEMPORARY_TABLE_PREFIX)) {
        return super.getTableHandle(metadata, tableName);
    }
    throw new SkipException("tests using existing tables are not supported");
}
 
Example 18
Source Project: presto   Source File: HiveMetadata.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Map<String, ColumnHandle> getColumnHandles(ConnectorSession session, ConnectorTableHandle tableHandle)
{
    SchemaTableName tableName = ((HiveTableHandle) tableHandle).getSchemaTableName();
    Table table = metastore.getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName())
            .orElseThrow(() -> new TableNotFoundException(tableName));
    return hiveColumnHandles(table, typeManager).stream()
            .collect(toImmutableMap(HiveColumnHandle::getName, identity()));
}
 
Example 19
Source Project: presto   Source File: TestRaptorMetadata.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testCreateBucketedTable()
{
    assertNull(metadata.getTableHandle(SESSION, DEFAULT_TEST_ORDERS));

    ConnectorTableMetadata ordersTable = getOrdersTable(ImmutableMap.of(
            BUCKET_COUNT_PROPERTY, 16,
            BUCKETED_ON_PROPERTY, ImmutableList.of("custkey", "orderkey")));
    metadata.createTable(SESSION, ordersTable, false);

    ConnectorTableHandle tableHandle = metadata.getTableHandle(SESSION, DEFAULT_TEST_ORDERS);
    assertInstanceOf(tableHandle, RaptorTableHandle.class);
    RaptorTableHandle raptorTableHandle = (RaptorTableHandle) tableHandle;
    assertEquals(raptorTableHandle.getTableId(), 1);

    long tableId = raptorTableHandle.getTableId();
    MetadataDao metadataDao = dbi.onDemand(MetadataDao.class);

    assertTableColumnsEqual(metadataDao.listBucketColumns(tableId), ImmutableList.of(
            new TableColumn(DEFAULT_TEST_ORDERS, "custkey", BIGINT, 2, 1, OptionalInt.of(0), OptionalInt.empty(), false),
            new TableColumn(DEFAULT_TEST_ORDERS, "orderkey", BIGINT, 1, 0, OptionalInt.of(1), OptionalInt.empty(), false)));

    assertEquals(raptorTableHandle.getBucketCount(), OptionalInt.of(16));

    assertEquals(getTableDistributionId(tableId), Long.valueOf(1));

    metadata.dropTable(SESSION, tableHandle);

    // create a new table and verify it has a different distribution
    metadata.createTable(SESSION, ordersTable, false);
    tableId = ((RaptorTableHandle) metadata.getTableHandle(SESSION, DEFAULT_TEST_ORDERS)).getTableId();
    assertEquals(tableId, 2);
    assertEquals(getTableDistributionId(tableId), Long.valueOf(2));
}
 
Example 20
Source Project: presto   Source File: RaptorSplitManager.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableHandle handle, SplitSchedulingStrategy splitSchedulingStrategy)
{
    RaptorTableHandle table = (RaptorTableHandle) handle;
    long tableId = table.getTableId();
    boolean bucketed = table.getBucketCount().isPresent();
    boolean merged = bucketed && !table.isDelete() && (table.getBucketCount().getAsInt() >= getOneSplitPerBucketThreshold(session));
    OptionalLong transactionId = table.getTransactionId();
    Optional<List<String>> bucketToNode = table.getBucketAssignments();
    verify(bucketed == bucketToNode.isPresent(), "mismatched bucketCount and bucketToNode presence");
    return new RaptorSplitSource(tableId, merged, table.getConstraint(), transactionId, bucketToNode);
}
 
Example 21
Source Project: presto   Source File: ClassLoaderSafeConnectorMetadata.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Optional<AggregationApplicationResult<ConnectorTableHandle>> applyAggregation(
        ConnectorSession session,
        ConnectorTableHandle table,
        List<AggregateFunction> aggregates,
        Map<String, ColumnHandle> assignments,
        List<List<ColumnHandle>> groupingSets)
{
    try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) {
        return delegate.applyAggregation(session, table, aggregates, assignments, groupingSets);
    }
}
 
Example 22
Source Project: presto   Source File: IcebergMetadata.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void dropColumn(ConnectorSession session, ConnectorTableHandle tableHandle, ColumnHandle column)
{
    IcebergTableHandle icebergTableHandle = (IcebergTableHandle) tableHandle;
    IcebergColumnHandle handle = (IcebergColumnHandle) column;
    org.apache.iceberg.Table icebergTable = getIcebergTable(metastore, hdfsEnvironment, session, icebergTableHandle.getSchemaTableName());
    icebergTable.updateSchema().deleteColumn(handle.getName()).commit();
}
 
Example 23
Source Project: presto   Source File: PhoenixMetadata.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void dropTable(ConnectorSession session, ConnectorTableHandle tableHandle)
{
    // if we autogenerated a ROWKEY for this table, delete the associated sequence as well
    boolean hasRowkey = getColumnHandles(session, tableHandle).values().stream()
            .map(JdbcColumnHandle.class::cast)
            .map(JdbcColumnHandle::getColumnName)
            .anyMatch(ROWKEY::equals);
    if (hasRowkey) {
        JdbcTableHandle jdbcHandle = (JdbcTableHandle) tableHandle;
        phoenixClient.execute(session, format("DROP SEQUENCE %s", getEscapedTableName(Optional.ofNullable(jdbcHandle.getSchemaName()), jdbcHandle.getTableName() + "_sequence")));
    }
    phoenixClient.dropTable(JdbcIdentity.from(session), (JdbcTableHandle) tableHandle);
}
 
Example 24
Source Project: presto   Source File: ClassLoaderSafeConnectorMetadata.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public ConnectorInsertTableHandle beginInsert(ConnectorSession session, ConnectorTableHandle tableHandle)
{
    try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) {
        return delegate.beginInsert(session, tableHandle);
    }
}
 
Example 25
Source Project: presto   Source File: MemoryMetadata.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public synchronized ConnectorTableHandle getTableHandle(ConnectorSession session, SchemaTableName schemaTableName)
{
    Long id = tableIds.get(schemaTableName);
    if (id == null) {
        return null;
    }

    return new MemoryTableHandle(id);
}
 
Example 26
Source Project: presto   Source File: AtopMetadata.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Optional<ConstraintApplicationResult<ConnectorTableHandle>> applyFilter(ConnectorSession session, ConnectorTableHandle table, Constraint constraint)
{
    AtopTableHandle handle = (AtopTableHandle) table;

    Optional<Map<ColumnHandle, Domain>> domains = constraint.getSummary().getDomains();

    Domain oldEndTimeDomain = handle.getEndTimeConstraint();
    Domain oldStartTimeDomain = handle.getStartTimeConstraint();
    Domain newEndTimeDomain = oldEndTimeDomain;
    Domain newStartTimeDomain = oldStartTimeDomain;

    if (domains.isPresent()) {
        if (domains.get().containsKey(START_TIME_HANDLE)) {
            newStartTimeDomain = domains.get().get(START_TIME_HANDLE).intersect(oldStartTimeDomain);
        }
        if (domains.get().containsKey(END_TIME_HANDLE)) {
            newEndTimeDomain = domains.get().get(END_TIME_HANDLE).intersect(oldEndTimeDomain);
        }
    }

    if (oldEndTimeDomain.equals(newEndTimeDomain) && oldStartTimeDomain.equals(newStartTimeDomain)) {
        return Optional.empty();
    }

    handle = new AtopTableHandle(
            handle.getSchema(),
            handle.getTable(),
            newStartTimeDomain,
            newEndTimeDomain);

    return Optional.of(new ConstraintApplicationResult<>(handle, constraint.getSummary()));
}
 
Example 27
Source Project: presto   Source File: InformationSchemaMetadata.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public ConnectorTableHandle getTableHandle(ConnectorSession connectorSession, SchemaTableName tableName)
{
    return InformationSchemaTable.of(tableName)
            .map(table -> new InformationSchemaTableHandle(catalogName, table, defaultPrefixes(catalogName), Optional.empty(), Optional.empty(), OptionalLong.empty()))
            .orElse(null);
}
 
Example 28
Source Project: presto   Source File: TpchMetadata.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public ConnectorTableMetadata getTableMetadata(ConnectorSession session, ConnectorTableHandle tableHandle)
{
    TpchTableHandle tpchTableHandle = (TpchTableHandle) tableHandle;

    TpchTable<?> tpchTable = TpchTable.getTable(tpchTableHandle.getTableName());
    String schemaName = scaleFactorSchemaName(tpchTableHandle.getScaleFactor());

    return getTableMetadata(schemaName, tpchTable, columnNaming);
}
 
Example 29
Source Project: presto   Source File: InformationSchemaMetadata.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public ColumnMetadata getColumnMetadata(ConnectorSession session, ConnectorTableHandle tableHandle, ColumnHandle columnHandle)
{
    InformationSchemaTableHandle informationSchemaTableHandle = (InformationSchemaTableHandle) tableHandle;
    ConnectorTableMetadata tableMetadata = informationSchemaTableHandle.getTable().getTableMetadata();

    String columnName = ((InformationSchemaColumnHandle) columnHandle).getColumnName();

    ColumnMetadata columnMetadata = findColumnMetadata(tableMetadata, columnName);
    checkArgument(columnMetadata != null, "Column '%s' on table '%s' does not exist", columnName, tableMetadata.getTable());
    return columnMetadata;
}
 
Example 30
Source Project: presto   Source File: TestRaptorMetadata.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testTableProperties()
{
    assertNull(metadata.getTableHandle(SESSION, DEFAULT_TEST_ORDERS));

    ConnectorTableMetadata ordersTable = getOrdersTable(ImmutableMap.of(
            ORDERING_PROPERTY, ImmutableList.of("orderdate", "custkey"),
            TEMPORAL_COLUMN_PROPERTY, "orderdate"));
    metadata.createTable(SESSION, ordersTable, false);

    ConnectorTableHandle tableHandle = metadata.getTableHandle(SESSION, DEFAULT_TEST_ORDERS);
    assertInstanceOf(tableHandle, RaptorTableHandle.class);
    RaptorTableHandle raptorTableHandle = (RaptorTableHandle) tableHandle;
    assertEquals(raptorTableHandle.getTableId(), 1);

    long tableId = raptorTableHandle.getTableId();
    MetadataDao metadataDao = dbi.onDemand(MetadataDao.class);

    // verify sort columns
    List<TableColumn> sortColumns = metadataDao.listSortColumns(tableId);
    assertTableColumnsEqual(sortColumns, ImmutableList.of(
            new TableColumn(DEFAULT_TEST_ORDERS, "orderdate", DATE, 4, 3, OptionalInt.empty(), OptionalInt.of(0), true),
            new TableColumn(DEFAULT_TEST_ORDERS, "custkey", BIGINT, 2, 1, OptionalInt.empty(), OptionalInt.of(1), false)));

    // verify temporal column
    assertEquals(metadataDao.getTemporalColumnId(tableId), Long.valueOf(4));

    // verify no organization
    assertFalse(metadataDao.getTableInformation(tableId).isOrganized());

    metadata.dropTable(SESSION, tableHandle);
}