Java Code Examples for org.apache.hadoop.hive.conf.HiveConf#getVar()

The following examples show how to use org.apache.hadoop.hive.conf.HiveConf#getVar() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TableEnvHiveConnectorTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testDefaultPartitionName() throws Exception {
	hiveShell.execute("create database db1");
	hiveShell.execute("create table db1.src (x int, y int)");
	hiveShell.execute("create table db1.part (x int) partitioned by (y int)");
	hiveShell.insertInto("db1", "src").addRow(1, 1).addRow(2, null).commit();

	TableEnvironment tableEnv = getTableEnvWithHiveCatalog();

	// test generating partitions with default name
	tableEnv.sqlUpdate("insert into db1.part select * from db1.src");
	tableEnv.execute("mytest");
	HiveConf hiveConf = hiveShell.getHiveConf();
	String defaultPartName = hiveConf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME);
	Table hiveTable = hmsClient.getTable("db1", "part");
	Path defaultPartPath = new Path(hiveTable.getSd().getLocation(), "y=" + defaultPartName);
	FileSystem fs = defaultPartPath.getFileSystem(hiveConf);
	assertTrue(fs.exists(defaultPartPath));

	// TODO: test reading from flink when https://issues.apache.org/jira/browse/FLINK-13279 is fixed
	assertEquals(Arrays.asList("1\t1", "2\tNULL"), hiveShell.executeQuery("select * from db1.part"));

	hiveShell.execute("drop database db1 cascade");
}
 
Example 2
Source File: TableEnvHiveConnectorITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testDefaultPartitionName() throws Exception {
	TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
	tableEnv.executeSql("create database db1");
	tableEnv.executeSql("create table db1.src (x int, y int)");
	tableEnv.executeSql("create table db1.part (x int) partitioned by (y int)");
	HiveTestUtils.createTextTableInserter(hiveShell, "db1", "src").addRow(new Object[]{1, 1}).addRow(new Object[]{2, null}).commit();

	// test generating partitions with default name
	TableEnvUtil.execInsertSqlAndWaitResult(tableEnv, "insert into db1.part select * from db1.src");
	HiveConf hiveConf = hiveShell.getHiveConf();
	String defaultPartName = hiveConf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME);
	Table hiveTable = hmsClient.getTable("db1", "part");
	Path defaultPartPath = new Path(hiveTable.getSd().getLocation(), "y=" + defaultPartName);
	FileSystem fs = defaultPartPath.getFileSystem(hiveConf);
	assertTrue(fs.exists(defaultPartPath));

	TableImpl flinkTable = (TableImpl) tableEnv.sqlQuery("select y, x from db1.part order by x");
	List<Row> rows = Lists.newArrayList(flinkTable.execute().collect());
	assertEquals(Arrays.toString(new String[]{"1,1", "null,2"}), rows.toString());

	tableEnv.executeSql("drop database db1 cascade");
}
 
Example 3
Source File: HiveAuthzBindingHook.java    From incubator-sentry with Apache License 2.0 6 votes vote down vote up
@VisibleForTesting
protected static AccessURI parseURI(String uri, boolean isLocal)
    throws SemanticException {
  try {
    HiveConf conf = SessionState.get().getConf();
    String warehouseDir = conf.getVar(ConfVars.METASTOREWAREHOUSE);
    Path warehousePath = new Path(warehouseDir);
    if (warehousePath.isAbsoluteAndSchemeAuthorityNull()) {
      FileSystem fs = FileSystem.get(conf);
      warehouseDir = fs.makeQualified(warehousePath).toUri().toString();
    }
    return new AccessURI(PathUtils.parseURI(warehouseDir, uri, isLocal));
  } catch (Exception e) {
    throw new SemanticException("Error parsing URI " + uri + ": " +
      e.getMessage(), e);
  }
}
 
Example 4
Source File: MetastoreAuthzBinding.java    From incubator-sentry with Apache License 2.0 6 votes vote down vote up
public MetastoreAuthzBinding(Configuration config) throws Exception {
  super(config);
  String hiveAuthzConf = config.get(HiveAuthzConf.HIVE_SENTRY_CONF_URL);
  if (hiveAuthzConf == null
      || (hiveAuthzConf = hiveAuthzConf.trim()).isEmpty()) {
    throw new IllegalArgumentException("Configuration key "
        + HiveAuthzConf.HIVE_SENTRY_CONF_URL + " value '" + hiveAuthzConf
        + "' is invalid.");
  }
  try {
    authzConf = new HiveAuthzConf(new URL(hiveAuthzConf));
  } catch (MalformedURLException e) {
    throw new IllegalArgumentException("Configuration key "
        + HiveAuthzConf.HIVE_SENTRY_CONF_URL + " specifies a malformed URL '"
        + hiveAuthzConf + "'", e);
  }
  hiveConf = new HiveConf(config, this.getClass());
  this.authServer = new Server(authzConf.get(AuthzConfVars.AUTHZ_SERVER_NAME
      .getVar()));
  serviceUsers = ImmutableSet.copyOf(toTrimedLower(Sets.newHashSet(authzConf
      .getStrings(AuthzConfVars.AUTHZ_METASTORE_SERVICE_USERS.getVar(),
          new String[] { "" }))));
  warehouseDir = hiveConf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE);

}
 
Example 5
Source File: HiveResolver.java    From pxf with Apache License 2.0 5 votes vote down vote up
/**
 * Initializes the HiveResolver by parsing the request context and
 * obtaining the serde class name, the serde properties string and the
 * partition keys.
 *
 * @param requestContext request context
 */
@Override
public void initialize(RequestContext requestContext) {
    super.initialize(requestContext);

    hiveDefaultPartName = HiveConf.getVar(configuration, HiveConf.ConfVars.DEFAULTPARTITIONNAME);

    try {
        parseUserData(context);
        initPartitionFields();
        initSerde(context);
    } catch (Exception e) {
        throw new RuntimeException("Failed to initialize HiveResolver", e);
    }
}
 
Example 6
Source File: ReplicaTableFactory.java    From circus-train with Apache License 2.0 5 votes vote down vote up
ReplicaTableFactory(
    HiveConf sourceHiveConf,
    TableTransformation tableTransformation,
    PartitionTransformation partitionTransformation,
    ColumnStatisticsTransformation columnStatisticsTransformation) {
  this(sourceHiveConf.getVar(ConfVars.METASTOREURIS), tableTransformation, partitionTransformation,
      columnStatisticsTransformation);
}
 
Example 7
Source File: HiveTableUtil.java    From flink with Apache License 2.0 5 votes vote down vote up
public static void setStorageFormat(StorageDescriptor sd, String format, HiveConf hiveConf) {
	StorageFormatDescriptor storageFormatDescriptor = storageFormatFactory.get(format);
	checkArgument(storageFormatDescriptor != null, "Unknown storage format " + format);
	sd.setInputFormat(storageFormatDescriptor.getInputFormat());
	sd.setOutputFormat(storageFormatDescriptor.getOutputFormat());
	String serdeLib = storageFormatDescriptor.getSerde();
	if (serdeLib == null && storageFormatDescriptor instanceof RCFileStorageFormatDescriptor) {
		serdeLib = hiveConf.getVar(HiveConf.ConfVars.HIVEDEFAULTRCFILESERDE);
	}
	if (serdeLib != null) {
		sd.getSerdeInfo().setSerializationLib(serdeLib);
	}
}
 
Example 8
Source File: Hive012Binding.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
/**
 * Initialize the mapWork variable in order to get all the partition and start to update the jobconf
 *
 * @param job
 */
private void init(final JobConf job) {
  final String plan = HiveConf.getVar(job, HiveConf.ConfVars.PLAN);
  if (mapWork == null && plan != null && plan.length() > 0) {
    mapWork = Utilities.getMapWork(job);
    pathToPartitionInfo.clear();
    for (final Map.Entry<String, PartitionDesc> entry : mapWork.getPathToPartitionInfo().entrySet()) {
      pathToPartitionInfo.put(new Path(entry.getKey()).toUri().getPath().toString(), entry.getValue());
    }
  }
}
 
Example 9
Source File: Hive010Binding.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
/**
 * Initialize the mrwork variable in order to get all the partition and start to update the jobconf
 *
 * @param job
 */
private void init(final JobConf job) {
  final String plan = HiveConf.getVar(job, HiveConf.ConfVars.PLAN);
  if (mrwork == null && plan != null && plan.length() > 0) {
    mrwork = Utilities.getMapRedWork(job);
    pathToPartitionInfo.clear();
    for (final Map.Entry<String, PartitionDesc> entry : mrwork.getPathToPartitionInfo().entrySet()) {
      pathToPartitionInfo.put(new Path(entry.getKey()).toUri().getPath().toString(), entry.getValue());
    }
  }
}
 
Example 10
Source File: HiveAuthzBindingHook.java    From incubator-sentry with Apache License 2.0 5 votes vote down vote up
private boolean filterWriteEntity(WriteEntity writeEntity)
    throws AuthorizationException {
  // skip URI validation for session scratch file URIs
  if (writeEntity.isTempURI()) {
    return true;
  }
  try {
    if (writeEntity.getTyp().equals(Type.DFS_DIR)
        || writeEntity.getTyp().equals(Type.LOCAL_DIR)) {
      HiveConf conf = SessionState.get().getConf();
      String warehouseDir = conf.getVar(ConfVars.METASTOREWAREHOUSE);
      URI scratchURI = new URI(PathUtils.parseDFSURI(warehouseDir,
        conf.getVar(HiveConf.ConfVars.SCRATCHDIR)));
      URI requestURI = new URI(PathUtils.parseDFSURI(warehouseDir,
        writeEntity.getLocation().getPath()));
      LOG.debug("scratchURI = " + scratchURI + ", requestURI = " + requestURI);
      if (PathUtils.impliesURI(scratchURI, requestURI)) {
        return true;
      }
      URI localScratchURI = new URI(PathUtils.parseLocalURI(conf.getVar(HiveConf.ConfVars.LOCALSCRATCHDIR)));
      URI localRequestURI = new URI(PathUtils.parseLocalURI(writeEntity.getLocation().getPath()));
      LOG.debug("localScratchURI = " + localScratchURI + ", localRequestURI = " + localRequestURI);
      if (PathUtils.impliesURI(localScratchURI, localRequestURI)) {
        return true;
      }
    }
  } catch (Exception e) {
    throw new AuthorizationException("Failed to extract uri details", e);
  }
  return false;
}
 
Example 11
Source File: NiFiOrcUtils.java    From localization_nifi with Apache License 2.0 4 votes vote down vote up
public static OrcFlowFileWriter createWriter(OutputStream flowFileOutputStream,
                                             Path path,
                                             Configuration conf,
                                             TypeInfo orcSchema,
                                             long stripeSize,
                                             CompressionKind compress,
                                             int bufferSize) throws IOException {

    int rowIndexStride = HiveConf.getIntVar(conf, HIVE_ORC_DEFAULT_ROW_INDEX_STRIDE);

    boolean addBlockPadding = HiveConf.getBoolVar(conf, HIVE_ORC_DEFAULT_BLOCK_PADDING);

    String versionName = HiveConf.getVar(conf, HIVE_ORC_WRITE_FORMAT);
    OrcFile.Version versionValue = (versionName == null)
            ? OrcFile.Version.CURRENT
            : OrcFile.Version.byName(versionName);

    OrcFile.EncodingStrategy encodingStrategy;
    String enString = conf.get(HiveConf.ConfVars.HIVE_ORC_ENCODING_STRATEGY.varname);
    if (enString == null) {
        encodingStrategy = OrcFile.EncodingStrategy.SPEED;
    } else {
        encodingStrategy = OrcFile.EncodingStrategy.valueOf(enString);
    }

    OrcFile.CompressionStrategy compressionStrategy;
    String compString = conf.get(HiveConf.ConfVars.HIVE_ORC_COMPRESSION_STRATEGY.varname);
    if (compString == null) {
        compressionStrategy = OrcFile.CompressionStrategy.SPEED;
    } else {
        compressionStrategy = OrcFile.CompressionStrategy.valueOf(compString);
    }

    float paddingTolerance;
    paddingTolerance = conf.getFloat(HiveConf.ConfVars.HIVE_ORC_BLOCK_PADDING_TOLERANCE.varname,
            HiveConf.ConfVars.HIVE_ORC_BLOCK_PADDING_TOLERANCE.defaultFloatVal);

    long blockSizeValue = HiveConf.getLongVar(conf, HIVE_ORC_DEFAULT_BLOCK_SIZE);

    double bloomFilterFpp = BloomFilterIO.DEFAULT_FPP;

    ObjectInspector inspector = OrcStruct.createObjectInspector(orcSchema);

    return new OrcFlowFileWriter(flowFileOutputStream,
            path,
            conf,
            inspector,
            stripeSize,
            compress,
            bufferSize,
            rowIndexStride,
            getMemoryManager(conf),
            addBlockPadding,
            versionValue,
            null, // no callback
            encodingStrategy,
            compressionStrategy,
            paddingTolerance,
            blockSizeValue,
            null, // no Bloom Filter column names
            bloomFilterFpp);
}
 
Example 12
Source File: NiFiOrcUtils.java    From nifi with Apache License 2.0 4 votes vote down vote up
public static OrcFlowFileWriter createWriter(OutputStream flowFileOutputStream,
                                             Path path,
                                             Configuration conf,
                                             TypeInfo orcSchema,
                                             long stripeSize,
                                             CompressionKind compress,
                                             int bufferSize) throws IOException {

    int rowIndexStride = HiveConf.getIntVar(conf, HIVE_ORC_DEFAULT_ROW_INDEX_STRIDE);

    boolean addBlockPadding = HiveConf.getBoolVar(conf, HIVE_ORC_DEFAULT_BLOCK_PADDING);

    String versionName = HiveConf.getVar(conf, HIVE_ORC_WRITE_FORMAT);
    OrcFile.Version versionValue = (versionName == null)
            ? OrcFile.Version.CURRENT
            : OrcFile.Version.byName(versionName);

    OrcFile.EncodingStrategy encodingStrategy;
    String enString = conf.get(HiveConf.ConfVars.HIVE_ORC_ENCODING_STRATEGY.varname);
    if (enString == null) {
        encodingStrategy = OrcFile.EncodingStrategy.SPEED;
    } else {
        encodingStrategy = OrcFile.EncodingStrategy.valueOf(enString);
    }

    OrcFile.CompressionStrategy compressionStrategy;
    String compString = conf.get(HiveConf.ConfVars.HIVE_ORC_COMPRESSION_STRATEGY.varname);
    if (compString == null) {
        compressionStrategy = OrcFile.CompressionStrategy.SPEED;
    } else {
        compressionStrategy = OrcFile.CompressionStrategy.valueOf(compString);
    }

    float paddingTolerance;
    paddingTolerance = conf.getFloat(HiveConf.ConfVars.HIVE_ORC_BLOCK_PADDING_TOLERANCE.varname,
            HiveConf.ConfVars.HIVE_ORC_BLOCK_PADDING_TOLERANCE.defaultFloatVal);

    long blockSizeValue = HiveConf.getLongVar(conf, HIVE_ORC_DEFAULT_BLOCK_SIZE);

    double bloomFilterFpp = BloomFilterIO.DEFAULT_FPP;

    ObjectInspector inspector = OrcStruct.createObjectInspector(orcSchema);

    return new OrcFlowFileWriter(flowFileOutputStream,
            path,
            conf,
            inspector,
            stripeSize,
            compress,
            bufferSize,
            rowIndexStride,
            getMemoryManager(conf),
            addBlockPadding,
            versionValue,
            null, // no callback
            encodingStrategy,
            compressionStrategy,
            paddingTolerance,
            blockSizeValue,
            null, // no Bloom Filter column names
            bloomFilterFpp);
}
 
Example 13
Source File: SentryAuthorizerUtil.java    From incubator-sentry with Apache License 2.0 2 votes vote down vote up
/**
 * Convert string to URI
 *
 * @param uri
 * @param isLocal
 * @throws SemanticException
 * @throws URISyntaxException
 */
public static AccessURI parseURI(String uri, boolean isLocal) throws URISyntaxException {
  HiveConf conf = SessionState.get().getConf();
  String warehouseDir = conf.getVar(ConfVars.METASTOREWAREHOUSE);
  return new AccessURI(PathUtils.parseURI(warehouseDir, uri, isLocal));
}