Java Code Examples for io.prestosql.spi.type.TypeManager

The following examples show how to use io.prestosql.spi.type.TypeManager. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: presto   Source File: KafkaConnectorFactory.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Connector create(String catalogName, Map<String, String> config, ConnectorContext context)
{
    requireNonNull(catalogName, "catalogName is null");
    requireNonNull(config, "config is null");

    Bootstrap app = new Bootstrap(
            new JsonModule(),
            new KafkaConnectorModule(),
            extension,
            binder -> {
                binder.bind(ClassLoader.class).toInstance(KafkaConnectorFactory.class.getClassLoader());
                binder.bind(TypeManager.class).toInstance(context.getTypeManager());
                binder.bind(NodeManager.class).toInstance(context.getNodeManager());
            });

    Injector injector = app
            .strictConfig()
            .doNotInitializeLogging()
            .setRequiredConfigurationProperties(config)
            .initialize();

    return injector.getInstance(KafkaConnector.class);
}
 
Example 2
Source Project: presto   Source File: HiveCoercionRecordCursor.java    License: Apache License 2.0 6 votes vote down vote up
public MapCoercer(TypeManager typeManager, HiveType fromHiveType, HiveType toHiveType, BridgingRecordCursor bridgingRecordCursor)
{
    requireNonNull(typeManager, "typeManage is null");
    requireNonNull(fromHiveType, "fromHiveType is null");
    requireNonNull(toHiveType, "toHiveType is null");
    this.bridgingRecordCursor = requireNonNull(bridgingRecordCursor, "bridgingRecordCursor is null");
    HiveType fromKeyHiveType = HiveType.valueOf(((MapTypeInfo) fromHiveType.getTypeInfo()).getMapKeyTypeInfo().getTypeName());
    HiveType fromValueHiveType = HiveType.valueOf(((MapTypeInfo) fromHiveType.getTypeInfo()).getMapValueTypeInfo().getTypeName());
    HiveType toKeyHiveType = HiveType.valueOf(((MapTypeInfo) toHiveType.getTypeInfo()).getMapKeyTypeInfo().getTypeName());
    HiveType toValueHiveType = HiveType.valueOf(((MapTypeInfo) toHiveType.getTypeInfo()).getMapValueTypeInfo().getTypeName());
    this.fromKeyValueTypes = fromHiveType.getType(typeManager).getTypeParameters();
    this.toType = toHiveType.getType(typeManager);
    this.toKeyValueTypes = toType.getTypeParameters();
    this.coercers = new Coercer[2];
    coercers[0] = fromKeyHiveType.equals(toKeyHiveType) ? null : createCoercer(typeManager, fromKeyHiveType, toKeyHiveType, bridgingRecordCursor);
    coercers[1] = fromValueHiveType.equals(toValueHiveType) ? null : createCoercer(typeManager, fromValueHiveType, toValueHiveType, bridgingRecordCursor);
    this.pageBuilder = coercers[0] == null && coercers[1] == null ? null : new PageBuilder(ImmutableList.of(toType));
}
 
Example 3
Source Project: presto   Source File: JdbcConnectorFactory.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Connector create(String catalogName, Map<String, String> requiredConfig, ConnectorContext context)
{
    requireNonNull(requiredConfig, "requiredConfig is null");

    Bootstrap app = new Bootstrap(
            binder -> binder.bind(TypeManager.class).toInstance(context.getTypeManager()),
            binder -> binder.bind(NodeManager.class).toInstance(context.getNodeManager()),
            binder -> binder.bind(VersionEmbedder.class).toInstance(context.getVersionEmbedder()),
            new JdbcModule(catalogName),
            moduleProvider.getModule(catalogName));

    Injector injector = app
            .strictConfig()
            .doNotInitializeLogging()
            .setRequiredConfigurationProperties(requiredConfig)
            .initialize();

    return injector.getInstance(JdbcConnector.class);
}
 
Example 4
Source Project: presto   Source File: OrcFileWriterFactory.java    License: Apache License 2.0 6 votes vote down vote up
public OrcFileWriterFactory(
        HdfsEnvironment hdfsEnvironment,
        TypeManager typeManager,
        NodeVersion nodeVersion,
        DateTimeZone hiveStorageTimeZone,
        boolean writeLegacyVersion,
        FileFormatDataSourceStats readStats,
        OrcWriterOptions orcWriterOptions)
{
    this.hdfsEnvironment = requireNonNull(hdfsEnvironment, "hdfsEnvironment is null");
    this.typeManager = requireNonNull(typeManager, "typeManager is null");
    this.nodeVersion = requireNonNull(nodeVersion, "nodeVersion is null");
    this.hiveStorageTimeZone = requireNonNull(hiveStorageTimeZone, "hiveStorageTimeZone is null");
    this.writeLegacyVersion = writeLegacyVersion;
    this.readStats = requireNonNull(readStats, "stats is null");
    this.orcWriterOptions = requireNonNull(orcWriterOptions, "orcWriterOptions is null");
}
 
Example 5
Source Project: presto   Source File: HiveUtil.java    License: Apache License 2.0 6 votes vote down vote up
public static List<HiveColumnHandle> hiveColumnHandles(Table table, TypeManager typeManager)
{
    ImmutableList.Builder<HiveColumnHandle> columns = ImmutableList.builder();

    // add the data fields first
    columns.addAll(getRegularColumnHandles(table, typeManager));

    // add the partition keys last (like Hive does)
    columns.addAll(getPartitionKeyColumnHandles(table, typeManager));

    // add hidden columns
    columns.add(pathColumnHandle());
    if (table.getStorage().getBucketProperty().isPresent()) {
        // TODO (https://github.com/prestosql/presto/issues/1706): support bucketing v2 for timestamp
        if (!containsTimestampBucketedV2(table.getStorage().getBucketProperty().get(), table)) {
            columns.add(bucketColumnHandle());
        }
    }
    columns.add(fileSizeColumnHandle());
    columns.add(fileModifiedTimeColumnHandle());
    if (!table.getPartitionColumns().isEmpty()) {
        columns.add(partitionColumnHandle());
    }

    return columns.build();
}
 
Example 6
Source Project: presto   Source File: TypeRegistry.java    License: Apache License 2.0 6 votes vote down vote up
private Type instantiateParametricType(TypeManager typeManager, TypeSignature signature)
{
    List<TypeParameter> parameters = new ArrayList<>();

    for (TypeSignatureParameter parameter : signature.getParameters()) {
        TypeParameter typeParameter = TypeParameter.of(parameter, typeManager);
        parameters.add(typeParameter);
    }

    ParametricType parametricType = parametricTypes.get(signature.getBase().toLowerCase(Locale.ENGLISH));
    if (parametricType == null) {
        throw new TypeNotFoundException(signature);
    }

    Type instantiatedType;
    try {
        instantiatedType = parametricType.createType(typeManager, parameters);
    }
    catch (IllegalArgumentException e) {
        throw new TypeNotFoundException(signature, e);
    }

    // TODO: reimplement this check? Currently "varchar(Integer.MAX_VALUE)" fails with "varchar"
    //checkState(instantiatedType.equalsSignature(signature), "Instantiated parametric type name (%s) does not match expected name (%s)", instantiatedType, signature);
    return instantiatedType;
}
 
Example 7
Source Project: presto   Source File: MapParametricType.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Type createType(TypeManager typeManager, List<TypeParameter> parameters)
{
    checkArgument(parameters.size() == 2, "Expected two parameters, got %s", parameters);
    TypeParameter firstParameter = parameters.get(0);
    TypeParameter secondParameter = parameters.get(1);
    checkArgument(
            firstParameter.getKind() == ParameterKind.TYPE && secondParameter.getKind() == ParameterKind.TYPE,
            "Expected key and type to be types, got %s",
            parameters);

    Type keyType = firstParameter.getType();
    Type valueType = secondParameter.getType();
    MethodHandle keyNativeEquals = typeManager.resolveOperator(OperatorType.EQUAL, ImmutableList.of(keyType, keyType));
    MethodHandle keyBlockNativeEquals = compose(keyNativeEquals, nativeValueGetter(keyType));
    MethodHandle keyBlockEquals = compose(keyNativeEquals, nativeValueGetter(keyType), nativeValueGetter(keyType));
    MethodHandle keyNativeHashCode = typeManager.resolveOperator(OperatorType.HASH_CODE, ImmutableList.of(keyType));
    MethodHandle keyBlockHashCode = compose(keyNativeHashCode, nativeValueGetter(keyType));
    return new MapType(
            keyType,
            valueType,
            keyBlockNativeEquals,
            keyBlockEquals,
            keyNativeHashCode,
            keyBlockHashCode);
}
 
Example 8
Source Project: presto   Source File: CharParametricType.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Type createType(TypeManager typeManager, List<TypeParameter> parameters)
{
    if (parameters.isEmpty()) {
        return createCharType(1);
    }
    if (parameters.size() != 1) {
        throw new IllegalArgumentException("Expected at most one parameter for CHAR");
    }

    TypeParameter parameter = parameters.get(0);

    if (!parameter.isLongLiteral()) {
        throw new IllegalArgumentException("CHAR length must be a number");
    }

    return createCharType(parameter.getLongLiteral());
}
 
Example 9
Source Project: presto   Source File: KuduModule.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected void configure()
{
    bind(TypeManager.class).toInstance(typeManager);

    bind(KuduConnector.class).in(Scopes.SINGLETON);
    bind(KuduMetadata.class).in(Scopes.SINGLETON);
    bind(KuduTableProperties.class).in(Scopes.SINGLETON);
    bind(ConnectorSplitManager.class).to(KuduSplitManager.class).in(Scopes.SINGLETON);
    bind(ConnectorPageSourceProvider.class).to(KuduPageSourceProvider.class)
            .in(Scopes.SINGLETON);
    bind(ConnectorPageSinkProvider.class).to(KuduPageSinkProvider.class).in(Scopes.SINGLETON);
    bind(KuduHandleResolver.class).in(Scopes.SINGLETON);
    bind(KuduRecordSetProvider.class).in(Scopes.SINGLETON);
    configBinder(binder()).bindConfig(KuduClientConfig.class);

    bind(RangePartitionProcedures.class).in(Scopes.SINGLETON);
    Multibinder.newSetBinder(binder(), Procedure.class);
}
 
Example 10
Source Project: presto   Source File: MongoConnectorFactory.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Connector create(String catalogName, Map<String, String> config, ConnectorContext context)
{
    requireNonNull(config, "config is null");

    Bootstrap app = new Bootstrap(
            new JsonModule(),
            new MongoClientModule(),
            binder -> binder.bind(TypeManager.class).toInstance(context.getTypeManager()));

    Injector injector = app.strictConfig().doNotInitializeLogging()
            .setRequiredConfigurationProperties(config)
            .initialize();

    return injector.getInstance(MongoConnector.class);
}
 
Example 11
Source Project: presto   Source File: TableMetadataSystemTable.java    License: Apache License 2.0 6 votes vote down vote up
@Inject
public TableMetadataSystemTable(@ForMetadata IDBI dbi, TypeManager typeManager)
{
    this.dao = onDemandDao(dbi, MetadataDao.class);
    requireNonNull(typeManager, "typeManager is null");

    this.tableMetadata = new ConnectorTableMetadata(
            new SchemaTableName("system", "tables"),
            ImmutableList.of(
                    new ColumnMetadata(SCHEMA_NAME, VARCHAR),
                    new ColumnMetadata(TABLE_NAME, VARCHAR),
                    new ColumnMetadata("temporal_column", VARCHAR),
                    new ColumnMetadata("ordering_columns", new ArrayType(VARCHAR)),
                    new ColumnMetadata("distribution_name", VARCHAR),
                    new ColumnMetadata("bucket_count", BIGINT),
                    new ColumnMetadata("bucketing_columns", new ArrayType(VARCHAR)),
                    new ColumnMetadata("organized", BOOLEAN)));
}
 
Example 12
Source Project: presto   Source File: OrcFileWriterFactory.java    License: Apache License 2.0 6 votes vote down vote up
@Inject
public OrcFileWriterFactory(
        HdfsEnvironment hdfsEnvironment,
        TypeManager typeManager,
        NodeVersion nodeVersion,
        HiveConfig hiveConfig,
        OrcWriterConfig orcWriterConfig,
        FileFormatDataSourceStats readStats,
        OrcWriterConfig config)
{
    this(
            hdfsEnvironment,
            typeManager,
            nodeVersion,
            requireNonNull(hiveConfig, "hiveConfig is null").getDateTimeZone(),
            requireNonNull(orcWriterConfig, "orcWriterConfig is null").isUseLegacyVersion(),
            readStats,
            requireNonNull(config, "config is null").toOrcWriterOptions());
}
 
Example 13
Source Project: presto   Source File: OrcStorageManager.java    License: Apache License 2.0 6 votes vote down vote up
static Type toOrcFileType(Type raptorType, TypeManager typeManager)
{
    // TIMESTAMPS are stored as BIGINT to void the poor encoding in ORC
    if (raptorType.equals(TimestampType.TIMESTAMP)) {
        return BIGINT;
    }
    if (raptorType instanceof ArrayType) {
        Type elementType = toOrcFileType(((ArrayType) raptorType).getElementType(), typeManager);
        return new ArrayType(elementType);
    }
    if (raptorType instanceof MapType) {
        TypeSignature keyType = toOrcFileType(((MapType) raptorType).getKeyType(), typeManager).getTypeSignature();
        TypeSignature valueType = toOrcFileType(((MapType) raptorType).getValueType(), typeManager).getTypeSignature();
        return typeManager.getParameterizedType(StandardTypes.MAP, ImmutableList.of(TypeSignatureParameter.typeParameter(keyType), TypeSignatureParameter.typeParameter(valueType)));
    }
    if (raptorType instanceof RowType) {
        List<Field> fields = ((RowType) raptorType).getFields().stream()
                .map(field -> new Field(field.getName(), toOrcFileType(field.getType(), typeManager)))
                .collect(toImmutableList());
        return RowType.from(fields);
    }
    return raptorType;
}
 
Example 14
Source Project: presto   Source File: HiveBucketing.java    License: Apache License 2.0 6 votes vote down vote up
public static Optional<HiveBucketHandle> getHiveBucketHandle(Table table, TypeManager typeManager)
{
    Optional<HiveBucketProperty> hiveBucketProperty = table.getStorage().getBucketProperty();
    if (hiveBucketProperty.isEmpty()) {
        return Optional.empty();
    }

    Map<String, HiveColumnHandle> map = getRegularColumnHandles(table, typeManager).stream()
            .collect(Collectors.toMap(HiveColumnHandle::getName, identity()));

    ImmutableList.Builder<HiveColumnHandle> bucketColumns = ImmutableList.builder();
    for (String bucketColumnName : hiveBucketProperty.get().getBucketedBy()) {
        HiveColumnHandle bucketColumnHandle = map.get(bucketColumnName);
        if (bucketColumnHandle == null) {
            throw new PrestoException(
                    HIVE_INVALID_METADATA,
                    format("Table '%s.%s' is bucketed on non-existent column '%s'", table.getDatabaseName(), table.getTableName(), bucketColumnName));
        }
        bucketColumns.add(bucketColumnHandle);
    }

    BucketingVersion bucketingVersion = hiveBucketProperty.get().getBucketingVersion();
    int bucketCount = hiveBucketProperty.get().getBucketCount();
    return Optional.of(new HiveBucketHandle(bucketColumns.build(), bucketingVersion, bucketCount, bucketCount));
}
 
Example 15
Source Project: presto   Source File: HivePageSourceProvider.java    License: Apache License 2.0 6 votes vote down vote up
@Inject
public HivePageSourceProvider(
        TypeManager typeManager,
        HiveConfig hiveConfig,
        HdfsEnvironment hdfsEnvironment,
        Set<HivePageSourceFactory> pageSourceFactories,
        Set<HiveRecordCursorProvider> cursorProviders,
        GenericHiveRecordCursorProvider genericCursorProvider)
{
    this.typeManager = requireNonNull(typeManager, "typeManager is null");
    this.hiveStorageTimeZone = requireNonNull(hiveConfig, "hiveConfig is null").getDateTimeZone();
    this.hdfsEnvironment = requireNonNull(hdfsEnvironment, "hdfsEnvironment is null");
    this.pageSourceFactories = ImmutableSet.copyOf(requireNonNull(pageSourceFactories, "pageSourceFactories is null"));
    this.cursorProviders = ImmutableSet.<HiveRecordCursorProvider>builder()
            .addAll(requireNonNull(cursorProviders, "cursorProviders is null"))
            .add(genericCursorProvider) // generic should be last, as a fallback option
            .build();
}
 
Example 16
Source Project: presto   Source File: ElasticsearchConnectorFactory.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Connector create(String catalogName, Map<String, String> config, ConnectorContext context)
{
    requireNonNull(catalogName, "catalogName is null");
    requireNonNull(config, "config is null");

    Bootstrap app = new Bootstrap(
            new MBeanModule(),
            new MBeanServerModule(),
            new ConnectorObjectNameGeneratorModule(catalogName, "io.prestosql.elasticsearch", "presto.plugin.elasticsearch"),
            new JsonModule(),
            new ElasticsearchConnectorModule(),
            binder -> {
                binder.bind(TypeManager.class).toInstance(context.getTypeManager());
                binder.bind(NodeManager.class).toInstance(context.getNodeManager());
            });

    Injector injector = app.strictConfig()
            .doNotInitializeLogging()
            .setRequiredConfigurationProperties(config)
            .initialize();

    return injector.getInstance(ElasticsearchConnector.class);
}
 
Example 17
Source Project: presto   Source File: HivePageSource.java    License: Apache License 2.0 6 votes vote down vote up
public StructCoercer(TypeManager typeManager, HiveType fromHiveType, HiveType toHiveType)
{
    requireNonNull(typeManager, "typeManage is null");
    requireNonNull(fromHiveType, "fromHiveType is null");
    requireNonNull(toHiveType, "toHiveType is null");
    List<HiveType> fromFieldTypes = extractStructFieldTypes(fromHiveType);
    List<HiveType> toFieldTypes = extractStructFieldTypes(toHiveType);
    ImmutableList.Builder<Optional<Function<Block, Block>>> coercers = ImmutableList.builder();
    this.nullBlocks = new Block[toFieldTypes.size()];
    for (int i = 0; i < toFieldTypes.size(); i++) {
        if (i >= fromFieldTypes.size()) {
            nullBlocks[i] = toFieldTypes.get(i).getType(typeManager).createBlockBuilder(null, 1).appendNull().build();
            coercers.add(Optional.empty());
        }
        else if (!fromFieldTypes.get(i).equals(toFieldTypes.get(i))) {
            coercers.add(Optional.of(createCoercer(typeManager, fromFieldTypes.get(i), toFieldTypes.get(i))));
        }
        else {
            coercers.add(Optional.empty());
        }
    }
    this.coercers = coercers.build();
}
 
Example 18
Source Project: presto   Source File: FilesTable.java    License: Apache License 2.0 6 votes vote down vote up
public FilesTable(SchemaTableName tableName, Table icebergTable, Optional<Long> snapshotId, TypeManager typeManager)
{
    this.icebergTable = requireNonNull(icebergTable, "icebergTable is null");

    tableMetadata = new ConnectorTableMetadata(requireNonNull(tableName, "tableName is null"),
            ImmutableList.<ColumnMetadata>builder()
                    .add(new ColumnMetadata("file_path", VARCHAR))
                    .add(new ColumnMetadata("file_format", VARCHAR))
                    .add(new ColumnMetadata("record_count", BIGINT))
                    .add(new ColumnMetadata("file_size_in_bytes", BIGINT))
                    .add(new ColumnMetadata("column_sizes", typeManager.getType(mapType(INTEGER.getTypeSignature(), BIGINT.getTypeSignature()))))
                    .add(new ColumnMetadata("value_counts", typeManager.getType(mapType(INTEGER.getTypeSignature(), BIGINT.getTypeSignature()))))
                    .add(new ColumnMetadata("null_value_counts", typeManager.getType(mapType(INTEGER.getTypeSignature(), BIGINT.getTypeSignature()))))
                    .add(new ColumnMetadata("lower_bounds", typeManager.getType(mapType(INTEGER.getTypeSignature(), VARCHAR.getTypeSignature()))))
                    .add(new ColumnMetadata("upper_bounds", typeManager.getType(mapType(INTEGER.getTypeSignature(), VARCHAR.getTypeSignature()))))
                    .add(new ColumnMetadata("key_metadata", VARBINARY))
                    .add(new ColumnMetadata("split_offsets", new ArrayType(BIGINT)))
                    .build());
    this.snapshotId = requireNonNull(snapshotId, "snapshotId is null");
}
 
Example 19
Source Project: presto   Source File: HiveCoercionRecordCursor.java    License: Apache License 2.0 6 votes vote down vote up
public HiveCoercionRecordCursor(
        List<ColumnMapping> columnMappings,
        TypeManager typeManager,
        RecordCursor delegate)
{
    requireNonNull(columnMappings, "columns is null");
    requireNonNull(typeManager, "typeManager is null");

    this.delegate = requireNonNull(delegate, "delegate is null");
    this.columnMappings = ImmutableList.copyOf(columnMappings);

    int size = columnMappings.size();

    this.coercers = new Coercer[size];

    BridgingRecordCursor bridgingRecordCursor = new BridgingRecordCursor();

    for (int columnIndex = 0; columnIndex < size; columnIndex++) {
        ColumnMapping columnMapping = columnMappings.get(columnIndex);

        if (columnMapping.getBaseTypeCoercionFrom().isPresent()) {
            coercers[columnIndex] = createCoercer(typeManager, columnMapping.getBaseTypeCoercionFrom().get(), columnMapping.getHiveColumnHandle().getHiveType(), bridgingRecordCursor);
        }
    }
}
 
Example 20
Source Project: presto   Source File: ThriftConnectorFactory.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Connector create(String catalogName, Map<String, String> config, ConnectorContext context)
{
    Bootstrap app = new Bootstrap(
            new MBeanModule(),
            new MBeanServerModule(),
            new ConnectorObjectNameGeneratorModule(catalogName, "io.prestosql.plugin.thrift", "presto.plugin.thrift"),
            new DriftNettyClientModule(),
            binder -> {
                binder.bind(TypeManager.class).toInstance(context.getTypeManager());
            },
            locationModule,
            new ThriftModule());

    Injector injector = app
            .strictConfig()
            .doNotInitializeLogging()
            .setRequiredConfigurationProperties(config)
            .initialize();

    return injector.getInstance(ThriftConnector.class);
}
 
Example 21
Source Project: presto   Source File: BigQueryConnectorFactory.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Connector create(String catalogName, Map<String, String> config, ConnectorContext context)
{
    requireNonNull(catalogName, "catalogName is null");
    requireNonNull(config, "config is null");

    Bootstrap app = new Bootstrap(
            new JsonModule(),
            new BigQueryConnectorModule(context.getNodeManager()),
            binder -> {
                binder.bind(TypeManager.class).toInstance(context.getTypeManager());
                binder.bind(NodeManager.class).toInstance(context.getNodeManager());
            });

    Injector injector = app.strictConfig()
            .doNotInitializeLogging()
            .setRequiredConfigurationProperties(config)
            .initialize();

    return injector.getInstance(BigQueryConnector.class);
}
 
Example 22
Source Project: presto   Source File: RcFileFileWriterFactory.java    License: Apache License 2.0 5 votes vote down vote up
public RcFileFileWriterFactory(
        HdfsEnvironment hdfsEnvironment,
        TypeManager typeManager,
        NodeVersion nodeVersion,
        DateTimeZone hiveStorageTimeZone,
        FileFormatDataSourceStats stats)
{
    this.hdfsEnvironment = requireNonNull(hdfsEnvironment, "hdfsEnvironment is null");
    this.typeManager = requireNonNull(typeManager, "typeManager is null");
    this.nodeVersion = requireNonNull(nodeVersion, "nodeVersion is null");
    this.hiveStorageTimeZone = requireNonNull(hiveStorageTimeZone, "hiveStorageTimeZone is null");
    this.stats = requireNonNull(stats, "stats is null");
}
 
Example 23
Source Project: presto   Source File: PrometheusClient.java    License: Apache License 2.0 5 votes vote down vote up
@Inject
public PrometheusClient(PrometheusConnectorConfig config, JsonCodec<Map<String, Object>> metricCodec, TypeManager typeManager)
        throws URISyntaxException
{
    requireNonNull(config, "config is null");
    requireNonNull(metricCodec, "metricCodec is null");
    requireNonNull(typeManager, "typeManager is null");

    tableSupplier = Suppliers.memoizeWithExpiration(metricsSupplier(metricCodec, getPrometheusMetricsURI(config)),
            (long) config.getCacheDuration().getValue(), config.getCacheDuration().getUnit());
    this.typeManager = typeManager;
    this.config = config;
}
 
Example 24
Source Project: presto   Source File: ParquetFileWriterFactory.java    License: Apache License 2.0 5 votes vote down vote up
public ParquetFileWriterFactory(
        HdfsEnvironment hdfsEnvironment,
        TypeManager typeManager,
        NodeVersion nodeVersion,
        DateTimeZone hiveStorageTimeZone)
{
    this.hdfsEnvironment = requireNonNull(hdfsEnvironment, "hdfsEnvironment is null");
    this.typeManager = requireNonNull(typeManager, "typeManager is null");
    this.nodeVersion = requireNonNull(nodeVersion, "nodeVersion is null");
    this.hiveStorageTimeZone = requireNonNull(hiveStorageTimeZone, "hiveStorageTimeZone is null");
}
 
Example 25
Source Project: presto   Source File: ExampleModule.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void configure(Binder binder)
{
    binder.bind(TypeManager.class).toInstance(typeManager);

    binder.bind(ExampleConnector.class).in(Scopes.SINGLETON);
    binder.bind(ExampleMetadata.class).in(Scopes.SINGLETON);
    binder.bind(ExampleClient.class).in(Scopes.SINGLETON);
    binder.bind(ExampleSplitManager.class).in(Scopes.SINGLETON);
    binder.bind(ExampleRecordSetProvider.class).in(Scopes.SINGLETON);
    configBinder(binder).bindConfig(ExampleConfig.class);

    jsonBinder(binder).addDeserializerBinding(Type.class).to(TypeDeserializer.class);
    jsonCodecBinder(binder).bindMapJsonCodec(String.class, listJsonCodec(ExampleTable.class));
}
 
Example 26
Source Project: presto   Source File: RedisConnectorFactory.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Connector create(String catalogName, Map<String, String> config, ConnectorContext context)
{
    requireNonNull(catalogName, "catalogName is null");
    requireNonNull(config, "config is null");

    Bootstrap app = new Bootstrap(
            new JsonModule(),
            new RedisConnectorModule(),
            binder -> {
                binder.bind(TypeManager.class).toInstance(context.getTypeManager());
                binder.bind(NodeManager.class).toInstance(context.getNodeManager());

                if (tableDescriptionSupplier.isPresent()) {
                    binder.bind(new TypeLiteral<Supplier<Map<SchemaTableName, RedisTableDescription>>>() {}).toInstance(tableDescriptionSupplier.get());
                }
                else {
                    binder.bind(new TypeLiteral<Supplier<Map<SchemaTableName, RedisTableDescription>>>() {})
                            .to(RedisTableDescriptionSupplier.class)
                            .in(Scopes.SINGLETON);
                }
            });

    Injector injector = app.strictConfig()
            .doNotInitializeLogging()
            .setRequiredConfigurationProperties(config)
            .initialize();

    return injector.getInstance(RedisConnector.class);
}
 
Example 27
Source Project: presto   Source File: PinotModule.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void configure(Binder binder)
{
    configBinder(binder).bindConfig(PinotConfig.class);
    binder.bind(PinotConnector.class).in(Scopes.SINGLETON);
    binder.bind(PinotMetadata.class).in(Scopes.SINGLETON);
    binder.bind(PinotSplitManager.class).in(Scopes.SINGLETON);
    binder.bind(PinotPageSourceProvider.class).in(Scopes.SINGLETON);
    binder.bind(PinotClient.class).in(Scopes.SINGLETON);
    binder.bind(Executor.class).annotatedWith(ForPinot.class)
            .toInstance(newCachedThreadPool(threadsNamed("pinot-metadata-fetcher-" + catalogName)));

    binder.bind(PinotSessionProperties.class).in(Scopes.SINGLETON);
    binder.bind(PinotNodePartitioningProvider.class).in(Scopes.SINGLETON);
    httpClientBinder(binder).bindHttpClient("pinot", ForPinot.class)
            .withConfigDefaults(cfg -> {
                cfg.setIdleTimeout(new Duration(300, SECONDS));
                cfg.setConnectTimeout(new Duration(300, SECONDS));
                cfg.setRequestTimeout(new Duration(300, SECONDS));
                cfg.setMaxConnectionsPerServer(250);
                cfg.setMaxContentLength(DataSize.of(32, MEGABYTE));
                cfg.setSelectorCount(10);
                cfg.setTimeoutThreads(8);
                cfg.setTimeoutConcurrency(4);
            });

    jsonBinder(binder).addDeserializerBinding(Type.class).to(TypeDeserializer.class);
    jsonBinder(binder).addDeserializerBinding(DataSchema.class).to(DataSchemaDeserializer.class);
    PinotClient.addJsonBinders(jsonCodecBinder(binder));
    binder.bind(MBeanServer.class).toInstance(new RebindSafeMBeanServer(getPlatformMBeanServer()));
    binder.bind(TypeManager.class).toInstance(typeManager);
    binder.bind(NodeManager.class).toInstance(nodeManager);
    binder.bind(PinotMetrics.class).in(Scopes.SINGLETON);
    newExporter(binder).export(PinotMetrics.class).as(generatedNameOf(PinotMetrics.class, catalogName));
    binder.bind(ConnectorNodePartitioningProvider.class).to(PinotNodePartitioningProvider.class).in(Scopes.SINGLETON);
}
 
Example 28
Source Project: presto   Source File: AccumuloModule.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void configure(Binder binder)
{
    // Add appender to Log4J root logger
    JulAppender appender = new JulAppender(); //create appender
    appender.setLayout(new PatternLayout("%d %-5p %c - %m%n"));
    appender.setThreshold(Level.INFO);
    appender.activateOptions();
    org.apache.log4j.Logger.getRootLogger().addAppender(appender);

    binder.bind(TypeManager.class).toInstance(typeManager);

    binder.bind(AccumuloConnector.class).in(Scopes.SINGLETON);
    binder.bind(AccumuloMetadata.class).in(Scopes.SINGLETON);
    binder.bind(AccumuloMetadataFactory.class).in(Scopes.SINGLETON);
    binder.bind(AccumuloClient.class).in(Scopes.SINGLETON);
    binder.bind(AccumuloSplitManager.class).in(Scopes.SINGLETON);
    binder.bind(AccumuloRecordSetProvider.class).in(Scopes.SINGLETON);
    binder.bind(AccumuloPageSinkProvider.class).in(Scopes.SINGLETON);
    binder.bind(AccumuloHandleResolver.class).in(Scopes.SINGLETON);
    binder.bind(AccumuloSessionProperties.class).in(Scopes.SINGLETON);
    binder.bind(AccumuloTableProperties.class).in(Scopes.SINGLETON);
    binder.bind(ZooKeeperMetadataManager.class).in(Scopes.SINGLETON);
    binder.bind(AccumuloTableManager.class).in(Scopes.SINGLETON);
    binder.bind(IndexLookup.class).in(Scopes.SINGLETON);
    binder.bind(ColumnCardinalityCache.class).in(Scopes.SINGLETON);
    binder.bind(Connector.class).toProvider(ConnectorProvider.class);

    configBinder(binder).bindConfig(AccumuloConfig.class);

    jsonBinder(binder).addDeserializerBinding(Type.class).to(TypeDeserializer.class);
    jsonCodecBinder(binder).bindMapJsonCodec(String.class, JsonCodec.listJsonCodec(AccumuloTable.class));
}
 
Example 29
Source Project: presto   Source File: PostgreSqlClient.java    License: Apache License 2.0 5 votes vote down vote up
@Inject
public PostgreSqlClient(
        BaseJdbcConfig config,
        PostgreSqlConfig postgreSqlConfig,
        ConnectionFactory connectionFactory,
        TypeManager typeManager)
{
    super(config, "\"", connectionFactory);
    this.jsonType = typeManager.getType(new TypeSignature(JSON));
    this.uuidType = typeManager.getType(new TypeSignature(StandardTypes.UUID));
    this.varcharMapType = (MapType) typeManager.getType(mapType(VARCHAR.getTypeSignature(), VARCHAR.getTypeSignature()));

    List<String> tableTypes = new ArrayList<>();
    addAll(tableTypes, "TABLE", "VIEW", "MATERIALIZED VIEW", "FOREIGN TABLE");
    if (postgreSqlConfig.isIncludeSystemTables()) {
        addAll(tableTypes, "SYSTEM TABLE", "SYSTEM VIEW");
    }
    this.tableTypes = tableTypes.toArray(new String[0]);

    JdbcTypeHandle bigintTypeHandle = new JdbcTypeHandle(Types.BIGINT, Optional.of("bigint"), 0, 0, Optional.empty(), Optional.empty());
    this.aggregateFunctionRewriter = new AggregateFunctionRewriter(
            this::quoted,
            ImmutableSet.<AggregateFunctionRule>builder()
                    .add(new ImplementCountAll(bigintTypeHandle))
                    .add(new ImplementCount(bigintTypeHandle))
                    .add(new ImplementMinMax())
                    .add(new ImplementSum(PostgreSqlClient::toTypeHandle))
                    .add(new ImplementAvgFloatingPoint())
                    .add(new ImplementAvgDecimal())
                    .add(new ImplementAvgBigint())
                    .build());
}
 
Example 30
Source Project: presto   Source File: HiveSplitManager.java    License: Apache License 2.0 5 votes vote down vote up
@Inject
public HiveSplitManager(
        HiveConfig hiveConfig,
        Function<HiveTransactionHandle, SemiTransactionalHiveMetastore> metastoreProvider,
        HivePartitionManager partitionManager,
        NamenodeStats namenodeStats,
        HdfsEnvironment hdfsEnvironment,
        DirectoryLister directoryLister,
        ExecutorService executorService,
        VersionEmbedder versionEmbedder,
        CoercionPolicy coercionPolicy,
        TypeManager typeManager)
{
    this(
            metastoreProvider,
            partitionManager,
            namenodeStats,
            hdfsEnvironment,
            directoryLister,
            versionEmbedder.embedVersion(new BoundedExecutor(executorService, hiveConfig.getMaxSplitIteratorThreads())),
            coercionPolicy,
            new CounterStat(),
            hiveConfig.getMaxOutstandingSplits(),
            hiveConfig.getMaxOutstandingSplitsSize(),
            hiveConfig.getMinPartitionBatchSize(),
            hiveConfig.getMaxPartitionBatchSize(),
            hiveConfig.getMaxInitialSplits(),
            hiveConfig.getSplitLoaderConcurrency(),
            hiveConfig.getMaxSplitsPerSecond(),
            hiveConfig.getRecursiveDirWalkerEnabled(),
            typeManager);
}