org.apache.hadoop.hive.conf.HiveConf Java Examples
The following examples show how to use
org.apache.hadoop.hive.conf.HiveConf.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TableEnvHiveConnectorITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testDefaultPartitionName() throws Exception { TableEnvironment tableEnv = getTableEnvWithHiveCatalog(); tableEnv.executeSql("create database db1"); tableEnv.executeSql("create table db1.src (x int, y int)"); tableEnv.executeSql("create table db1.part (x int) partitioned by (y int)"); HiveTestUtils.createTextTableInserter(hiveShell, "db1", "src").addRow(new Object[]{1, 1}).addRow(new Object[]{2, null}).commit(); // test generating partitions with default name TableEnvUtil.execInsertSqlAndWaitResult(tableEnv, "insert into db1.part select * from db1.src"); HiveConf hiveConf = hiveShell.getHiveConf(); String defaultPartName = hiveConf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME); Table hiveTable = hmsClient.getTable("db1", "part"); Path defaultPartPath = new Path(hiveTable.getSd().getLocation(), "y=" + defaultPartName); FileSystem fs = defaultPartPath.getFileSystem(hiveConf); assertTrue(fs.exists(defaultPartPath)); TableImpl flinkTable = (TableImpl) tableEnv.sqlQuery("select y, x from db1.part order by x"); List<Row> rows = Lists.newArrayList(flinkTable.execute().collect()); assertEquals(Arrays.toString(new String[]{"1,1", "null,2"}), rows.toString()); tableEnv.executeSql("drop database db1 cascade"); }
Example #2
Source File: SentryAuthorizerFactory.java From incubator-sentry with Apache License 2.0 | 6 votes |
/** * Get instance of SentryAccessController from configuration * Default return DefaultSentryAccessController * * @param conf * @param authzConf * @param hiveAuthzBinding * @param authenticator * @throws HiveAuthzPluginException */ public static SentryHiveAccessController getAccessController(HiveConf conf, HiveAuthzConf authzConf, HiveAuthenticationProvider authenticator, HiveAuthzSessionContext ctx) throws HiveAuthzPluginException { Class<? extends SentryHiveAccessController> clazz = conf.getClass(HIVE_SENTRY_ACCESS_CONTROLLER, DefaultSentryAccessController.class, SentryHiveAccessController.class); if (clazz == null) { // should not happen as default value is set throw new HiveAuthzPluginException("Configuration value " + HIVE_SENTRY_ACCESS_CONTROLLER + " is not set to valid SentryAccessController subclass"); } try { return new DefaultSentryAccessController(conf, authzConf, authenticator, ctx); } catch (Exception e) { throw new HiveAuthzPluginException(e); } }
Example #3
Source File: RangerHiveAuthorizerBase.java From ranger with Apache License 2.0 | 6 votes |
@Override public void applyAuthorizationConfigPolicy(HiveConf hiveConf) throws HiveAuthzPluginException { LOG.debug("RangerHiveAuthorizerBase.applyAuthorizationConfigPolicy()"); // from SQLStdHiveAccessController.applyAuthorizationConfigPolicy() if (mSessionContext != null && mSessionContext.getClientType() == CLIENT_TYPE.HIVESERVER2) { // Configure PREEXECHOOKS with DisallowTransformHook to disallow transform queries String hooks = hiveConf.getVar(ConfVars.PREEXECHOOKS).trim(); if (hooks.isEmpty()) { hooks = DisallowTransformHook.class.getName(); } else { hooks = hooks + "," + DisallowTransformHook.class.getName(); } hiveConf.setVar(ConfVars.PREEXECHOOKS, hooks); SettableConfigUpdater.setHiveConfWhiteList(hiveConf); } }
Example #4
Source File: CommonBeans.java From circus-train with Apache License 2.0 | 6 votes |
private Supplier<CloseableMetaStoreClient> metaStoreClientSupplier( HiveConf hiveConf, String name, MetastoreTunnel metastoreTunnel, MetaStoreClientFactory metaStoreClientFactory) { if (metastoreTunnel != null) { return new TunnellingMetaStoreClientSupplierBuilder() .withName(name) .withRoute(metastoreTunnel.getRoute()) .withKnownHosts(metastoreTunnel.getKnownHosts()) .withLocalHost(metastoreTunnel.getLocalhost()) .withPort(metastoreTunnel.getPort()) .withPrivateKeys(metastoreTunnel.getPrivateKeys()) .withTimeout(metastoreTunnel.getTimeout()) .withStrictHostKeyChecking(metastoreTunnel.getStrictHostKeyChecking()) .build(hiveConf, metaStoreClientFactory); } else { return new HiveMetaStoreClientSupplier(metaStoreClientFactory, hiveConf, name); } }
Example #5
Source File: HiveMetaStoreClientFactoryTest.java From incubator-gobblin with Apache License 2.0 | 6 votes |
@Test public void testCreate() throws TException { HiveConf hiveConf = new HiveConf(); HiveMetaStoreClientFactory factory = new HiveMetaStoreClientFactory(hiveConf); // Since we havE a specified hive-site in the classpath, so have to null it out here to proceed the test // The original value it will get if no local hive-site is placed, will be an empty string. hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, ""); hiveConf.set(HIVE_METASTORE_TOKEN_SIGNATURE, ""); IMetaStoreClient msc = factory.create(); String dbName = "test_db"; String description = "test database"; String location = "file:/tmp/" + dbName; Database db = new Database(dbName, description, location, null); msc.dropDatabase(dbName, true, true); msc.createDatabase(db); db = msc.getDatabase(dbName); Assert.assertEquals(db.getName(), dbName); Assert.assertEquals(db.getDescription(), description); Assert.assertEquals(db.getLocationUri(), location); }
Example #6
Source File: FlinkStandaloneHiveServerContext.java From flink with Apache License 2.0 | 6 votes |
private void configureMetaStore(HiveConf conf) { String jdbcDriver = org.apache.derby.jdbc.EmbeddedDriver.class.getName(); try { Class.forName(jdbcDriver); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } // Set the Hive Metastore DB driver hiveConf.set("datanucleus.schema.autoCreateAll", "true"); hiveConf.set("hive.metastore.schema.verification", "false"); hiveConf.set("hive.metastore.uris", toHmsURI()); // No pooling needed. This will save us a lot of threads hiveConf.set("datanucleus.connectionPoolingType", "None"); conf.setBoolVar(METASTORE_VALIDATE_CONSTRAINTS, true); conf.setBoolVar(METASTORE_VALIDATE_COLUMNS, true); conf.setBoolVar(METASTORE_VALIDATE_TABLES, true); // disable authorization to avoid NPE conf.set(HIVE_AUTHORIZATION_MANAGER.varname, "org.apache.hive.hcatalog.storagehandler.DummyHCatAuthProvider"); }
Example #7
Source File: HiveConfFactoryTest.java From incubator-gobblin with Apache License 2.0 | 6 votes |
@Test public void testHiveConfFactory() throws Exception { HiveConf hiveConf = HiveConfFactory.get(Optional.absent(), SharedResourcesBrokerFactory.getImplicitBroker()); HiveConf hiveConf1 = HiveConfFactory.get(Optional.absent(), SharedResourcesBrokerFactory.getImplicitBroker()); Assert.assertEquals(hiveConf, hiveConf1); // When there's no hcatURI specified, the default hive-site should be loaded. Assert.assertTrue(hiveConf.getVar(METASTOREURIS).equals("file:///test")); Assert.assertTrue(hiveConf.get(HIVE_METASTORE_TOKEN_SIGNATURE).equals("file:///test")); HiveConf hiveConf2 = HiveConfFactory.get(Optional.of("hcat1"), SharedResourcesBrokerFactory.getImplicitBroker()); HiveConf hiveConf3 = HiveConfFactory.get(Optional.of("hcat1"), SharedResourcesBrokerFactory.getImplicitBroker()); Assert.assertEquals(hiveConf2, hiveConf3); HiveConf hiveConf4 = HiveConfFactory.get(Optional.of("hcat11"), SharedResourcesBrokerFactory.getImplicitBroker()); Assert.assertNotEquals(hiveConf3, hiveConf4); Assert.assertNotEquals(hiveConf4, hiveConf); // THe uri should be correctly set. Assert.assertEquals(hiveConf3.getVar(METASTOREURIS), "hcat1"); Assert.assertEquals(hiveConf3.get(HIVE_METASTORE_TOKEN_SIGNATURE), "hcat1"); Assert.assertEquals(hiveConf4.getVar(METASTOREURIS), "hcat11"); Assert.assertEquals(hiveConf4.get(HIVE_METASTORE_TOKEN_SIGNATURE), "hcat11"); }
Example #8
Source File: BaseTestHiveImpersonation.java From dremio-oss with Apache License 2.0 | 6 votes |
protected static void prepHiveConfAndData() throws Exception { hiveConf = new HiveConf(); // Configure metastore persistence db location on local filesystem final String dbUrl = String.format("jdbc:derby:;databaseName=%s;create=true", getTempDir("metastore_db")); hiveConf.set(ConfVars.METASTORECONNECTURLKEY.varname, dbUrl); hiveConf.set(ConfVars.SCRATCHDIR.varname, "file:///" + getTempDir("scratch_dir")); hiveConf.set(ConfVars.LOCALSCRATCHDIR.varname, getTempDir("local_scratch_dir")); hiveConf.set(ConfVars.METASTORE_SCHEMA_VERIFICATION.varname, "false"); hiveConf.set(ConfVars.METASTORE_AUTO_CREATE_ALL.varname, "true"); hiveConf.set(ConfVars.HIVE_CBO_ENABLED.varname, "false"); // Set MiniDFS conf in HiveConf hiveConf.set(FS_DEFAULT_NAME_KEY, dfsConf.get(FS_DEFAULT_NAME_KEY)); whDir = hiveConf.get(ConfVars.METASTOREWAREHOUSE.varname); FileSystem.mkdirs(fs, new Path(whDir), new FsPermission((short) 0777)); studentData = getPhysicalFileFromResource("student.txt"); voterData = getPhysicalFileFromResource("voter.txt"); }
Example #9
Source File: GlueMetastoreClientDelegateTest.java From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 | 6 votes |
@Before public void setup() throws Exception { conf = new HiveConf(); glueClient = mock(AWSGlue.class); wh = mock(Warehouse.class); metastoreClientDelegate = new GlueMetastoreClientDelegate(conf, new DefaultAWSGlueMetastore(conf, glueClient), wh); // Create a client delegate with CatalogId hiveConfCatalogId = new HiveConf(); hiveConfCatalogId.set(GlueMetastoreClientDelegate.CATALOG_ID_CONF, CATALOG_ID); metastoreClientDelegateCatalogId = new GlueMetastoreClientDelegate(hiveConfCatalogId, new DefaultAWSGlueMetastore(hiveConfCatalogId, glueClient), wh); testDb = getTestDatabase(); testTbl= getTestTable(testDb.getName()); setupMockWarehouseForPath(new Path(testTbl.getStorageDescriptor().getLocation().toString()), false, true); }
Example #10
Source File: HiveMetadataService.java From streamline with Apache License 2.0 | 6 votes |
/** * Creates secure {@link HiveMetadataService}, which delegates to {@link HiveMetaStoreClient} * instantiated with the {@link HiveConf} provided using the first parameter */ public static HiveMetadataService newInstance(HiveConf hiveConf, SecurityContext securityContext, Subject subject, Component hiveMetastore, Collection<ComponentProcess> hiveMetastoreProcesses) throws MetaException, IOException, EntityNotFoundException, PrivilegedActionException { if (SecurityUtil.isKerberosAuthenticated(securityContext)) { UserGroupInformation.setConfiguration(hiveConf); // Sets Kerberos rules UserGroupInformation.getUGIFromSubject(subject); // Adds User principal to this subject return new HiveMetadataService( SecurityUtil.execute(() -> new HiveMetaStoreClient(hiveConf), securityContext, subject), hiveConf, securityContext, subject, hiveMetastore, hiveMetastoreProcesses); } else { return new HiveMetadataService(new HiveMetaStoreClient(hiveConf), hiveConf, securityContext, subject, hiveMetastore, hiveMetastoreProcesses); } }
Example #11
Source File: HiveSyncTool.java From hudi with Apache License 2.0 | 6 votes |
public HiveSyncTool(HiveSyncConfig cfg, HiveConf configuration, FileSystem fs) { this.hoodieHiveClient = new HoodieHiveClient(cfg, configuration, fs); this.cfg = cfg; // Set partitionFields to empty, when the NonPartitionedExtractor is used if (NonPartitionedExtractor.class.getName().equals(cfg.partitionValueExtractorClass)) { LOG.warn("Set partitionFields to empty, since the NonPartitionedExtractor is used"); cfg.partitionFields = new ArrayList<>(); } switch (hoodieHiveClient.getTableType()) { case COPY_ON_WRITE: this.snapshotTableName = cfg.tableName; this.roTableTableName = Option.empty(); break; case MERGE_ON_READ: this.snapshotTableName = cfg.tableName + SUFFIX_SNAPSHOT_TABLE; this.roTableTableName = cfg.skipROSuffix ? Option.of(cfg.tableName) : Option.of(cfg.tableName + SUFFIX_READ_OPTIMIZED_TABLE); break; default: LOG.error("Unknown table type " + hoodieHiveClient.getTableType()); throw new InvalidTableException(hoodieHiveClient.getBasePath()); } }
Example #12
Source File: HiveTestDataGenerator.java From dremio-oss with Apache License 2.0 | 6 votes |
private HiveConf newHiveConf() { HiveConf conf = new HiveConf(SessionState.class); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.THRIFT_URIS, "thrift://localhost:" + port); conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///"); // Metastore needs to be set, and WITH the deprecated key :( // Otherwise, will default to /user/hive/warehouse when trying to create a new database // (database location is now sent by the client to the server...) HiveConf.setVar(conf, ConfVars.METASTOREWAREHOUSE, whDir); conf.set("mapred.job.tracker", "local"); HiveConf.setVar(conf, ConfVars.SCRATCHDIR, getTempDir("scratch_dir")); HiveConf.setVar(conf, ConfVars.LOCALSCRATCHDIR, getTempDir("local_scratch_dir")); HiveConf.setVar(conf, ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); HiveConf.setBoolVar(conf, ConfVars.HIVE_CBO_ENABLED, false); return conf; }
Example #13
Source File: OrcFlowFileWriter.java From nifi with Apache License 2.0 | 6 votes |
StringTreeWriter(int columnId, ObjectInspector inspector, StreamFactory writer, boolean nullable) throws IOException { super(columnId, inspector, writer, nullable); this.isDirectV2 = isNewWriteFormat(writer); stringOutput = writer.createStream(id, OrcProto.Stream.Kind.DICTIONARY_DATA); lengthOutput = createIntegerWriter(writer.createStream(id, OrcProto.Stream.Kind.LENGTH), false, isDirectV2, writer); rowOutput = createIntegerWriter(writer.createStream(id, OrcProto.Stream.Kind.DATA), false, isDirectV2, writer); recordPosition(rowIndexPosition); rowIndexValueCount.add(0L); buildIndex = writer.buildIndex(); directStreamOutput = writer.createStream(id, OrcProto.Stream.Kind.DATA); directLengthOutput = createIntegerWriter(writer.createStream(id, OrcProto.Stream.Kind.LENGTH), false, isDirectV2, writer); dictionaryKeySizeThreshold = writer.getConfiguration().getFloat( HiveConf.ConfVars.HIVE_ORC_DICTIONARY_KEY_SIZE_THRESHOLD.varname, HiveConf.ConfVars.HIVE_ORC_DICTIONARY_KEY_SIZE_THRESHOLD.defaultFloatVal); strideDictionaryCheck = writer.getConfiguration().getBoolean( HiveConf.ConfVars.HIVE_ORC_ROW_INDEX_STRIDE_DICTIONARY_CHECK.varname, HiveConf.ConfVars.HIVE_ORC_ROW_INDEX_STRIDE_DICTIONARY_CHECK.defaultBoolVal); doneDictionaryCheck = false; }
Example #14
Source File: HdfsSnapshotLocationManager.java From circus-train with Apache License 2.0 | 6 votes |
HdfsSnapshotLocationManager( HiveConf sourceHiveConf, String eventId, Table sourceTable, List<Partition> sourcePartitions, boolean snapshotsDisabled, String tableBasePath, FileSystemFactory fileSystemFactory, SourceCatalogListener sourceCatalogListener) throws IOException { this.sourceHiveConf = sourceHiveConf; this.eventId = eventId; this.sourceTable = sourceTable; this.snapshotsDisabled = snapshotsDisabled; this.sourceCatalogListener = sourceCatalogListener; this.fileSystemFactory = fileSystemFactory; String sourceDataLocation; if (StringUtils.isNotBlank(tableBasePath)) { sourceDataLocation = tableBasePath; } else { sourceDataLocation = sourceTable.getSd().getLocation(); } sourceDataPath = new Path(sourceDataLocation); copyBasePath = createSnapshot(); String copyBaseLocation = copyBasePath.toString(); subPaths = calculateSubPaths(sourcePartitions, sourceDataLocation, copyBaseLocation); }
Example #15
Source File: TestHiveUtils.java From kite with Apache License 2.0 | 6 votes |
@Test public void testRoundTripDescriptorNoCompressionProperty() throws Exception { String namespace = "test_ns"; String name = "test_table"; DatasetDescriptor original = new DatasetDescriptor.Builder() .schemaUri("resource:schema/user.avsc") .location("file:/tmp/data/test_table") .build(); boolean external = true; Table table = HiveUtils.tableForDescriptor(namespace, name, original, external); assertEquals("snappy", table.getParameters().get("kite.compression.type")); table.getParameters().remove("kite.compression.type"); Configuration conf = new HiveConf(); DatasetDescriptor result = HiveUtils.descriptorForTable(conf, table); assertEquals(original, result); }
Example #16
Source File: HiveJdbcCommon.java From nifi with Apache License 2.0 | 5 votes |
public static Configuration getConfigurationFromFiles(final String configFiles) { final Configuration hiveConfig = new HiveConf(); if (StringUtils.isNotBlank(configFiles)) { for (final String configFile : configFiles.split(",")) { hiveConfig.addResource(new Path(configFile.trim())); } } return hiveConfig; }
Example #17
Source File: AWSGlueMetastoreCacheDecorator.java From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 | 5 votes |
public AWSGlueMetastoreCacheDecorator(HiveConf conf, AWSGlueMetastore awsGlueMetastore) { super(awsGlueMetastore); checkNotNull(conf, "conf can not be null"); this.conf = conf; databaseCacheEnabled = conf.getBoolean(AWS_GLUE_DB_CACHE_ENABLE, false); if(databaseCacheEnabled) { int dbCacheSize = conf.getInt(AWS_GLUE_DB_CACHE_SIZE, 0); int dbCacheTtlMins = conf.getInt(AWS_GLUE_DB_CACHE_TTL_MINS, 0); //validate config values for size and ttl validateConfigValueIsGreaterThanZero(AWS_GLUE_DB_CACHE_SIZE, dbCacheSize); validateConfigValueIsGreaterThanZero(AWS_GLUE_DB_CACHE_TTL_MINS, dbCacheTtlMins); //initialize database cache databaseCache = CacheBuilder.newBuilder().maximumSize(dbCacheSize) .expireAfterWrite(dbCacheTtlMins, TimeUnit.MINUTES).build(); } else { databaseCache = null; } tableCacheEnabled = conf.getBoolean(AWS_GLUE_TABLE_CACHE_ENABLE, false); if(tableCacheEnabled) { int tableCacheSize = conf.getInt(AWS_GLUE_TABLE_CACHE_SIZE, 0); int tableCacheTtlMins = conf.getInt(AWS_GLUE_TABLE_CACHE_TTL_MINS, 0); //validate config values for size and ttl validateConfigValueIsGreaterThanZero(AWS_GLUE_TABLE_CACHE_SIZE, tableCacheSize); validateConfigValueIsGreaterThanZero(AWS_GLUE_TABLE_CACHE_TTL_MINS, tableCacheTtlMins); //initialize table cache tableCache = CacheBuilder.newBuilder().maximumSize(tableCacheSize) .expireAfterWrite(tableCacheTtlMins, TimeUnit.MINUTES).build(); } else { tableCache = null; } logger.info("Constructed"); }
Example #18
Source File: HMSHandlerProxy.java From metacat with Apache License 2.0 | 5 votes |
/** * getProxy. * * @param hiveConf hive configuration * @param registry registry * @return MetacatHMSHandler * @throws Exception Exception */ public static IMetacatHMSHandler getProxy(final HiveConf hiveConf, final Registry registry) throws Exception { final HMSHandlerProxy handler = new HMSHandlerProxy(hiveConf, registry); return (IMetacatHMSHandler) Proxy.newProxyInstance( HMSHandlerProxy.class.getClassLoader(), new Class[]{IMetacatHMSHandler.class}, handler); }
Example #19
Source File: HiveResolver.java From pxf with Apache License 2.0 | 5 votes |
/** * Initializes the HiveResolver by parsing the request context and * obtaining the serde class name, the serde properties string and the * partition keys. * * @param requestContext request context */ @Override public void initialize(RequestContext requestContext) { super.initialize(requestContext); hiveDefaultPartName = HiveConf.getVar(configuration, HiveConf.ConfVars.DEFAULTPARTITIONNAME); try { parseUserData(context); initPartitionFields(); initSerde(context); } catch (Exception e) { throw new RuntimeException("Failed to initialize HiveResolver", e); } }
Example #20
Source File: ComparisonTool.java From circus-train with Apache License 2.0 | 5 votes |
@Bean HiveEndpoint replica( ReplicaCatalog replicaCatalog, HiveConf replicaHiveConf, Supplier<CloseableMetaStoreClient> replicaMetaStoreClientSupplier) { return new ReplicaHiveEndpoint(replicaCatalog.getName(), replicaHiveConf, replicaMetaStoreClientSupplier); }
Example #21
Source File: HiveMetastoreService.java From hadoop-etl-udfs with MIT License | 5 votes |
public static HiveMetaStoreClient checkHiveMetaStoreClient(String hiveMetastoreUrl,boolean useKerberos, String hcatUserOrServicePrincipal) throws MetaException { HiveConf hiveConf = new HiveConf(new Configuration(), HiveConf.class); hiveConf.set("hive.metastore.local", "false"); hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, hiveMetastoreUrl); hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); if (useKerberos) { System.out.println("Add hive.metastore.kerberos.principal: " + hcatUserOrServicePrincipal); hiveConf.set("hive.metastore.kerberos.principal", hcatUserOrServicePrincipal); hiveConf.set("hive.metastore.sasl.enabled", "true"); } return new HiveMetaStoreClient(hiveConf); }
Example #22
Source File: Hive012Binding.java From parquet-mr with Apache License 2.0 | 5 votes |
/** * Initialize the mapWork variable in order to get all the partition and start to update the jobconf * * @param job */ private void init(final JobConf job) { final String plan = HiveConf.getVar(job, HiveConf.ConfVars.PLAN); if (mapWork == null && plan != null && plan.length() > 0) { mapWork = Utilities.getMapWork(job); pathToPartitionInfo.clear(); for (final Map.Entry<String, PartitionDesc> entry : mapWork.getPathToPartitionInfo().entrySet()) { pathToPartitionInfo.put(new Path(entry.getKey()).toUri().getPath().toString(), entry.getValue()); } } }
Example #23
Source File: HiveAuthzBindingSessionHook.java From incubator-sentry with Apache License 2.0 | 5 votes |
/** * The session hook for sentry authorization that sets the required session level configuration * 1. Setup the sentry hooks - * semantic, exec and filter hooks * 2. Set additional config properties required for auth * set HIVE_EXTENDED_ENITITY_CAPTURE = true * set SCRATCHDIRPERMISSION = 700 * 3. Add sensitive config parameters to the config restrict list so that they can't be overridden by users */ @Override public void run(HiveSessionHookContext sessionHookContext) throws HiveSQLException { // Add sentry hooks to the session configuration HiveConf sessionConf = sessionHookContext.getSessionConf(); appendConfVar(sessionConf, ConfVars.SEMANTIC_ANALYZER_HOOK.varname, SEMANTIC_HOOK); HiveAuthzConf authzConf = HiveAuthzBindingHook.loadAuthzConf(sessionConf); String commandWhitelist = authzConf.get(HiveAuthzConf.HIVE_SENTRY_SECURITY_COMMAND_WHITELIST, HiveAuthzConf.HIVE_SENTRY_SECURITY_COMMAND_WHITELIST_DEFAULT); sessionConf.setVar(ConfVars.HIVE_SECURITY_COMMAND_WHITELIST, commandWhitelist); sessionConf.setVar(ConfVars.SCRATCHDIRPERMISSION, SCRATCH_DIR_PERMISSIONS); sessionConf.setBoolVar(ConfVars.HIVE_CAPTURE_TRANSFORM_ENTITY, true); // set user name sessionConf.set(HiveAuthzConf.HIVE_ACCESS_SUBJECT_NAME, sessionHookContext.getSessionUser()); sessionConf.set(HiveAuthzConf.HIVE_SENTRY_SUBJECT_NAME, sessionHookContext.getSessionUser()); sessionConf.setVar(ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.sentry.binding.hive.HiveAuthzBindingSessionHook$SentryHiveAuthorizerFactory"); // Set MR ACLs to session user appendConfVar(sessionConf, JobContext.JOB_ACL_VIEW_JOB, sessionHookContext.getSessionUser()); appendConfVar(sessionConf, JobContext.JOB_ACL_MODIFY_JOB, sessionHookContext.getSessionUser()); // setup restrict list sessionConf.addToRestrictList(ACCESS_RESTRICT_LIST); }
Example #24
Source File: HiveShimV200.java From flink with Apache License 2.0 | 5 votes |
@Override public IMetaStoreClient getHiveMetastoreClient(HiveConf hiveConf) { try { Class<?>[] constructorArgTypes = {HiveConf.class}; Object[] constructorArgs = {hiveConf}; Method method = RetryingMetaStoreClient.class.getMethod("getProxy", HiveConf.class, constructorArgTypes.getClass(), constructorArgs.getClass(), String.class); // getProxy is a static method return (IMetaStoreClient) method.invoke(null, hiveConf, constructorArgTypes, constructorArgs, HiveMetaStoreClient.class.getName()); } catch (Exception ex) { throw new CatalogException("Failed to create Hive Metastore client", ex); } }
Example #25
Source File: GlueMetastoreClientDelegateTest.java From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 | 5 votes |
@Test public void testExecutorService() throws Exception { Object defaultExecutorService = new DefaultExecutorServiceFactory().getExecutorService(conf); assertEquals("Default executor service should be used", metastoreClientDelegate.getExecutorService(), defaultExecutorService); HiveConf customConf = new HiveConf(); customConf.set(GlueMetastoreClientDelegate.CATALOG_ID_CONF, CATALOG_ID); customConf.setClass(GlueMetastoreClientDelegate.CUSTOM_EXECUTOR_FACTORY_CONF, TestExecutorServiceFactory.class, ExecutorServiceFactory.class); GlueMetastoreClientDelegate customDelegate = new GlueMetastoreClientDelegate(customConf, mock(AWSGlueMetastore.class), mock(Warehouse.class)); Object customExecutorService = new TestExecutorServiceFactory().getExecutorService(customConf); assertEquals("Custom executor service should be used", customDelegate.getExecutorService(), customExecutorService); }
Example #26
Source File: HiveTestDataGenerator.java From dremio-oss with Apache License 2.0 | 5 votes |
public void executeDDL(String query, Map<String,String> confOverrides) throws IOException { final HiveConf conf = newHiveConf(); for(Map.Entry<String,String> entry : confOverrides.entrySet()) { conf.set(entry.getKey(), entry.getValue()); } runDDL(query, conf); }
Example #27
Source File: FilterTool.java From circus-train with Apache License 2.0 | 5 votes |
@Bean HiveEndpointFactory<ReplicaHiveEndpoint> replicaFactory( final ReplicaCatalog replicaCatalog, final HiveConf replicaHiveConf, final Supplier<CloseableMetaStoreClient> replicaMetaStoreClientSupplier) { return new HiveEndpointFactory<ReplicaHiveEndpoint>() { @Override public ReplicaHiveEndpoint newInstance(TableReplication tableReplication) { return new ReplicaHiveEndpoint(replicaCatalog.getName(), replicaHiveConf, replicaMetaStoreClientSupplier); } }; }
Example #28
Source File: AWSCatalogMetastoreClient.java From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 | 5 votes |
private AWSCatalogMetastoreClient(Builder builder) throws MetaException { conf = Objects.firstNonNull(builder.conf, new HiveConf()); if (builder.wh != null) { this.wh = builder.wh; } else { this.wh = new Warehouse(conf); } if (builder.catalogId != null) { this.catalogId = builder.catalogId; } else { this.catalogId = null; } GlueClientFactory clientFactory = Objects.firstNonNull(builder.clientFactory, new AWSGlueClientFactory(conf)); AWSGlueMetastoreFactory metastoreFactory = Objects.firstNonNull(builder.metastoreFactory, new AWSGlueMetastoreFactory()); glueClient = clientFactory.newClient(); AWSGlueMetastore glueMetastore = metastoreFactory.newMetastore(conf); glueMetastoreClientDelegate = new GlueMetastoreClientDelegate(this.conf, glueMetastore, wh); /** * It seems weird to create databases as part of glueClient construction. This * part should probably be moved to the section in hive code right after the * metastore glueClient is instantiated. For now, simply copying the * functionality in the thrift server */ if(builder.createDefaults && !doesDefaultDBExist()) { createDefaultDatabase(); } }
Example #29
Source File: Main.java From kite with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { // reconfigure logging with the kite CLI configuration PropertyConfigurator.configure( Main.class.getResource("/kite-cli-logging.properties")); Logger console = LoggerFactory.getLogger(Main.class); // use Log4j for any libraries using commons-logging LogFactory.getFactory().setAttribute( "org.apache.commons.logging.Log", "org.apache.commons.logging.impl.Log4JLogger"); int rc = ToolRunner.run(new HiveConf(), new Main(console), args); System.exit(rc); }
Example #30
Source File: SentryAuthorizerFactory.java From incubator-sentry with Apache License 2.0 | 5 votes |
/** * just for testing */ @VisibleForTesting protected HiveAuthorizer createHiveAuthorizer(HiveMetastoreClientFactory metastoreClientFactory, HiveConf conf, HiveAuthzConf authzConf, HiveAuthenticationProvider authenticator, HiveAuthzSessionContext ctx) throws HiveAuthzPluginException { SentryHiveAccessController accessController = getAccessController(conf, authzConf, authenticator, ctx); SentryHiveAuthorizationValidator authzValidator = getAuthzValidator(conf, authzConf, authenticator); return new SentryHiveAuthorizer(accessController, authzValidator); }