org.apache.hadoop.hive.metastore.conf.MetastoreConf Java Examples

The following examples show how to use org.apache.hadoop.hive.metastore.conf.MetastoreConf. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HiveTestDataGenerator.java    From dremio-oss with Apache License 2.0 6 votes vote down vote up
private HiveConf newHiveConf() {
  HiveConf conf = new HiveConf(SessionState.class);

  MetastoreConf.setVar(conf, MetastoreConf.ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
  conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");
  // Metastore needs to be set, and WITH the deprecated key :(
  // Otherwise, will default to /user/hive/warehouse when trying to create a new database
  // (database location is now sent by the client to the server...)
  HiveConf.setVar(conf, ConfVars.METASTOREWAREHOUSE, whDir);
  conf.set("mapred.job.tracker", "local");
  HiveConf.setVar(conf, ConfVars.SCRATCHDIR,  getTempDir("scratch_dir"));
  HiveConf.setVar(conf, ConfVars.LOCALSCRATCHDIR, getTempDir("local_scratch_dir"));
  HiveConf.setVar(conf, ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
  HiveConf.setBoolVar(conf, ConfVars.HIVE_CBO_ENABLED, false);

  return conf;

}
 
Example #2
Source File: TestPutHive3Streaming.java    From nifi with Apache License 2.0 6 votes vote down vote up
@Override
StreamingConnection makeStreamingConnection(HiveOptions options, RecordReader reader) throws StreamingException {

    // Test here to ensure the 'hive.metastore.uris' property matches the options.getMetastoreUri() value (if it is set)
    String userDefinedMetastoreURI = options.getMetaStoreURI();
    if (null != userDefinedMetastoreURI) {
        assertEquals(userDefinedMetastoreURI, options.getHiveConf().get(MetastoreConf.ConfVars.THRIFT_URIS.getHiveName()));
    }

    if (generateConnectFailure) {
        throw new StubConnectionError("Unit Test - Connection Error");
    }

    HiveRecordWriter hiveRecordWriter = new HiveRecordWriter(reader, getLogger());
    if (generatePermissionsFailure) {
        throw new StreamingException("Permission denied");
    }
    MockHiveStreamingConnection hiveConnection = new MockHiveStreamingConnection(options, reader, hiveRecordWriter, schema);
    hiveConnection.setGenerateWriteFailure(generateWriteFailure);
    hiveConnection.setGenerateSerializationError(generateSerializationError);
    hiveConnection.setGenerateCommitFailure(generateCommitFailure);
    return hiveConnection;
}
 
Example #3
Source File: HiveTables.java    From iceberg with Apache License 2.0 5 votes vote down vote up
private ThriftHiveMetastore.Client getClient() {
  final URI metastoreUri = URI.create(MetastoreConf.getAsString(conf, THRIFT_URIS));
  final int socketTimeOut = (int) MetastoreConf.getTimeVar(conf, CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS);
  TTransport transport = new TSocket(metastoreUri.getHost(), metastoreUri.getPort(), socketTimeOut);
  try {
    transport.open();
  } catch (TTransportException e) {
    throw new RuntimeException("failed to open socket for " + metastoreUri + " with timeoutMillis " + socketTimeOut);
  }
  return new ThriftHiveMetastore.Client(new TBinaryProtocol(transport));
}
 
Example #4
Source File: HiveTestDataGenerator.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
private HiveTestDataGenerator(Map<String, String> config) throws Exception {
  String root = getTempDir("");
  this.dbDir = root + "metastore_db";

  // metastore helper will create wh in a subdirectory
  String whDirBase = root + "warehouse";
  new File(whDirBase).mkdirs();

  Configuration conf = MetastoreConf.newMetastoreConf();

  // Force creating schemas. Note that JDO creates schema on demand
  MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.SCHEMA_VERIFICATION, false);
  MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.AUTO_CREATE_ALL, true);
  // Disable direct SQL as it might cause not all schemas to be created
  MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.TRY_DIRECT_SQL, false);
  MetastoreConf.setVar(conf, MetastoreConf.ConfVars.WAREHOUSE, whDirBase);
  MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECT_URL_KEY, String.format("jdbc:derby:;databaseName=%s;create=true", dbDir));
  HiveConf.setVar(conf, ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict");
  if (config != null) {
    config.forEach((k, v) -> conf.set(k, v));
  }

  this.port = MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf);
  this.whDir = MetastoreConf.getVar(conf, MetastoreConf.ConfVars.WAREHOUSE);

  this.config = new HashedMap<>();
  this.config.put(FileSystem.FS_DEFAULT_NAME_KEY, "file:///");
}
 
Example #5
Source File: HiveStreamingDataWriter.java    From spark-llap with Apache License 2.0 5 votes vote down vote up
private void createStreamingConnection() throws StreamingException {
  final StrictDelimitedInputWriter strictDelimitedInputWriter = StrictDelimitedInputWriter.newBuilder()
    .withFieldDelimiter(',').build();
  HiveConf hiveConf = new HiveConf();
  hiveConf.set(MetastoreConf.ConfVars.THRIFT_URIS.getHiveName(), metastoreUri);
  // isolated classloader and shadeprefix are required for reflective instantiation of outputformat class when
  // creating hive streaming connection (isolated classloader to load different hive versions and shadeprefix for
  // HIVE-19494)
  hiveConf.setVar(HiveConf.ConfVars.HIVE_CLASSLOADER_SHADE_PREFIX, "shadehive");
  hiveConf.set(MetastoreConf.ConfVars.CATALOG_DEFAULT.getHiveName(), "hive");

  // HiveStreamingCredentialProvider requires metastore uri and kerberos principal to obtain delegation token to
  // talk to secure metastore. When HiveConf is created above, hive-site.xml is used from classpath. This option
  // is just an override in case if kerberos principal cannot be obtained from hive-site.xml.
  if (metastoreKrbPrincipal != null) {
    hiveConf.set(MetastoreConf.ConfVars.KERBEROS_PRINCIPAL.getHiveName(), metastoreKrbPrincipal);
  }

  LOG.info("Creating hive streaming connection..");
  streamingConnection = HiveStreamingConnection.newBuilder()
    .withDatabase(db)
    .withTable(table)
    .withStaticPartitionValues(partition)
    .withRecordWriter(strictDelimitedInputWriter)
    .withHiveConf(hiveConf)
    .withAgentInfo(jobId + "(" + partitionId + ")")
    .connect();
  streamingConnection.beginTransaction();
  LOG.info("{} created hive streaming connection.", streamingConnection.getAgentInfo());
}