Java Code Examples for org.apache.hadoop.mapred.JobConf#getCredentials()

The following examples show how to use org.apache.hadoop.mapred.JobConf#getCredentials() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HiveTableOutputFormat.java    From flink with Apache License 2.0 6 votes vote down vote up
public HiveTableOutputFormat(JobConf jobConf, ObjectPath tablePath, CatalogTable table, HiveTablePartition hiveTablePartition,
							Properties tableProperties, boolean overwrite) {
	super(jobConf.getCredentials());

	Preconditions.checkNotNull(table, "table cannot be null");
	Preconditions.checkNotNull(hiveTablePartition, "HiveTablePartition cannot be null");
	Preconditions.checkNotNull(tableProperties, "Table properties cannot be null");

	HadoopUtils.mergeHadoopConf(jobConf);
	this.jobConf = jobConf;
	this.tablePath = tablePath;
	this.partitionColumns = table.getPartitionKeys();
	TableSchema tableSchema = table.getSchema();
	this.fieldNames = tableSchema.getFieldNames();
	this.fieldTypes = tableSchema.getFieldDataTypes();
	this.hiveTablePartition = hiveTablePartition;
	this.tableProperties = tableProperties;
	this.overwrite = overwrite;
	isPartitioned = partitionColumns != null && !partitionColumns.isEmpty();
	isDynamicPartition = isPartitioned && partitionColumns.size() > hiveTablePartition.getPartitionSpec().size();
	hiveVersion = Preconditions.checkNotNull(jobConf.get(HiveCatalogValidator.CATALOG_HIVE_VERSION),
			"Hive version is not defined");
}
 
Example 2
Source File: HiveTableInputFormat.java    From flink with Apache License 2.0 6 votes vote down vote up
public HiveTableInputFormat(
		JobConf jobConf,
		CatalogTable catalogTable,
		List<HiveTablePartition> partitions,
		int[] projectedFields,
		long limit,
		String hiveVersion,
		boolean useMapRedReader) {
	super(jobConf.getCredentials());
	this.partitionKeys = catalogTable.getPartitionKeys();
	this.fieldTypes = catalogTable.getSchema().getFieldDataTypes();
	this.fieldNames = catalogTable.getSchema().getFieldNames();
	this.limit = limit;
	this.hiveVersion = hiveVersion;
	checkNotNull(catalogTable, "catalogTable can not be null.");
	this.partitions = checkNotNull(partitions, "partitions can not be null.");
	this.jobConf = new JobConf(jobConf);
	int rowArity = catalogTable.getSchema().getFieldCount();
	selectedFields = projectedFields != null ? projectedFields : IntStream.range(0, rowArity).toArray();
	this.useMapRedReader = useMapRedReader;
}
 
Example 3
Source File: MRHelpers.java    From incubator-tez with Apache License 2.0 6 votes vote down vote up
/**
 * Generate old-api mapred InputFormat splits
 * @param jobConf JobConf required by InputFormat class
 * @param inputSplitDir Directory in which to generate splits information
 *
 * @return InputSplitInfo containing the split files' information and the
 * number of splits generated to be used to determining parallelism of
 * the map stage.
 *
 * @throws IOException
 */
private static InputSplitInfoDisk writeOldSplits(JobConf jobConf,
    Path inputSplitDir) throws IOException {
  
  org.apache.hadoop.mapred.InputSplit[] splits = 
      generateOldSplits(jobConf, null, 0);
  
  JobSplitWriter.createSplitFiles(inputSplitDir, jobConf,
      inputSplitDir.getFileSystem(jobConf), splits);

  List<TaskLocationHint> locationHints =
      new ArrayList<TaskLocationHint>(splits.length);
  for (int i = 0; i < splits.length; ++i) {
    locationHints.add(
        new TaskLocationHint(new HashSet<String>(
            Arrays.asList(splits[i].getLocations())), null));
  }

  return new InputSplitInfoDisk(
      JobSubmissionFiles.getJobSplitFile(inputSplitDir),
      JobSubmissionFiles.getJobSplitMetaFile(inputSplitDir),
      splits.length, locationHints, jobConf.getCredentials());
}
 
Example 4
Source File: MRInputHelpers.java    From tez with Apache License 2.0 6 votes vote down vote up
/**
 * Generate old-api mapred InputFormat splits
 * @param jobConf JobConf required by InputFormat class
 * @param inputSplitDir Directory in which to generate splits information
 *
 * @return InputSplitInfo containing the split files' information and the
 * number of splits generated to be used to determining parallelism of
 * the map stage.
 *
 * @throws IOException
 */
private static InputSplitInfoDisk writeOldSplits(JobConf jobConf,
                                                 Path inputSplitDir) throws IOException {

  org.apache.hadoop.mapred.InputSplit[] splits =
      generateOldSplits(jobConf, false, true, 0);

  JobSplitWriter.createSplitFiles(inputSplitDir, jobConf,
      inputSplitDir.getFileSystem(jobConf), splits);

  List<TaskLocationHint> locationHints =
      new ArrayList<TaskLocationHint>(splits.length);
  for (int i = 0; i < splits.length; ++i) {
    locationHints.add(
        TaskLocationHint.createTaskLocationHint(new HashSet<String>(
            Arrays.asList(splits[i].getLocations())), null)
    );
  }

  return new InputSplitInfoDisk(
      JobSubmissionFiles.getJobSplitFile(inputSplitDir),
      JobSubmissionFiles.getJobSplitMetaFile(inputSplitDir),
      splits.length, locationHints, jobConf.getCredentials());
}
 
Example 5
Source File: HBaseTap.java    From SpyGlass with Apache License 2.0 6 votes vote down vote up
private void obtainToken(JobConf conf) {
  if (User.isHBaseSecurityEnabled(conf)) {
    String user = conf.getUser();
    LOG.info("obtaining HBase token for: {}", user);
    try {
      UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
      user = currentUser.getUserName();
      Credentials credentials = conf.getCredentials();
      for (Token t : currentUser.getTokens()) {
        LOG.debug("Token {} is available", t);
        if ("HBASE_AUTH_TOKEN".equalsIgnoreCase(t.getKind().toString()))
          credentials.addToken(t.getKind(), t);
      }
    } catch (IOException e) {
      throw new TapException("Unable to obtain HBase auth token for " + user, e);
    }
  }
}
 
Example 6
Source File: HadoopInputFormatBase.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public HadoopInputFormatBase(org.apache.hadoop.mapred.InputFormat<K, V> mapredInputFormat, Class<K> key, Class<V> value, JobConf job) {
	super(job.getCredentials());
	this.mapredInputFormat = mapredInputFormat;
	this.keyClass = key;
	this.valueClass = value;
	HadoopUtils.mergeHadoopConf(job);
	this.jobConf = job;
	ReflectionUtils.setConf(mapredInputFormat, jobConf);
}
 
Example 7
Source File: HadoopInputFormatBase.java    From flink with Apache License 2.0 5 votes vote down vote up
public HadoopInputFormatBase(org.apache.hadoop.mapred.InputFormat<K, V> mapredInputFormat, Class<K> key, Class<V> value, JobConf job) {
	super(job.getCredentials());
	this.mapredInputFormat = mapredInputFormat;
	this.keyClass = key;
	this.valueClass = value;
	HadoopUtils.mergeHadoopConf(job);
	this.jobConf = job;
	ReflectionUtils.setConf(mapredInputFormat, jobConf);
}
 
Example 8
Source File: HiveTableInputFormat.java    From flink with Apache License 2.0 5 votes vote down vote up
public HiveTableInputFormat(
		JobConf jobConf,
		CatalogTable catalogTable,
		List<HiveTablePartition> partitions) {
	super(jobConf.getCredentials());
	checkNotNull(catalogTable, "catalogTable can not be null.");
	this.partitions = checkNotNull(partitions, "partitions can not be null.");

	this.jobConf = new JobConf(jobConf);
	this.partitionColNames = catalogTable.getPartitionKeys();
	rowArity = catalogTable.getSchema().getFieldCount();
}
 
Example 9
Source File: HadoopInputFormatBase.java    From flink with Apache License 2.0 5 votes vote down vote up
public HadoopInputFormatBase(org.apache.hadoop.mapred.InputFormat<K, V> mapredInputFormat, Class<K> key, Class<V> value, JobConf job) {
	super(job.getCredentials());
	this.mapredInputFormat = mapredInputFormat;
	this.keyClass = key;
	this.valueClass = value;
	HadoopUtils.mergeHadoopConf(job);
	this.jobConf = job;
	ReflectionUtils.setConf(mapredInputFormat, jobConf);
}
 
Example 10
Source File: HadoopOutputFormatBase.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
public HadoopOutputFormatBase(org.apache.hadoop.mapred.OutputFormat<K, V> mapredOutputFormat, JobConf job) {
	super(job.getCredentials());
	this.mapredOutputFormat = mapredOutputFormat;
	HadoopUtils.mergeHadoopConf(job);
	this.jobConf = job;
}
 
Example 11
Source File: HadoopOutputFormatBase.java    From flink with Apache License 2.0 4 votes vote down vote up
public HadoopOutputFormatBase(org.apache.hadoop.mapred.OutputFormat<K, V> mapredOutputFormat, JobConf job) {
	super(job.getCredentials());
	this.mapredOutputFormat = mapredOutputFormat;
	HadoopUtils.mergeHadoopConf(job);
	this.jobConf = job;
}
 
Example 12
Source File: TestMRAppMaster.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testMRAppMasterCredentials() throws Exception {

  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);

  // Simulate credentials passed to AM via client->RM->NM
  Credentials credentials = new Credentials();
  byte[] identifier = "MyIdentifier".getBytes();
  byte[] password = "MyPassword".getBytes();
  Text kind = new Text("MyTokenKind");
  Text service = new Text("host:port");
  Token<? extends TokenIdentifier> myToken =
      new Token<TokenIdentifier>(identifier, password, kind, service);
  Text tokenAlias = new Text("myToken");
  credentials.addToken(tokenAlias, myToken);

  Text appTokenService = new Text("localhost:0");
  Token<AMRMTokenIdentifier> appToken =
      new Token<AMRMTokenIdentifier>(identifier, password,
          AMRMTokenIdentifier.KIND_NAME, appTokenService);
  credentials.addToken(appTokenService, appToken);
  
  Text keyAlias = new Text("mySecretKeyAlias");
  credentials.addSecretKey(keyAlias, "mySecretKey".getBytes());
  Token<? extends TokenIdentifier> storedToken =
      credentials.getToken(tokenAlias);

  JobConf conf = new JobConf();

  Path tokenFilePath = new Path(testDir.getAbsolutePath(), "tokens-file");
  Map<String, String> newEnv = new HashMap<String, String>();
  newEnv.put(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION, tokenFilePath
    .toUri().getPath());
  setNewEnvironmentHack(newEnv);
  credentials.writeTokenStorageFile(tokenFilePath, conf);

  ApplicationId appId = ApplicationId.newInstance(12345, 56);
  ApplicationAttemptId applicationAttemptId =
      ApplicationAttemptId.newInstance(appId, 1);
  ContainerId containerId =
      ContainerId.newContainerId(applicationAttemptId, 546);
  String userName = UserGroupInformation.getCurrentUser().getShortUserName();

  // Create staging dir, so MRAppMaster doesn't barf.
  File stagingDir =
      new File(MRApps.getStagingAreaDir(conf, userName).toString());
  stagingDir.mkdirs();

  // Set login-user to null as that is how real world MRApp starts with.
  // This is null is the reason why token-file is read by UGI.
  UserGroupInformation.setLoginUser(null);

  MRAppMasterTest appMaster =
      new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1,
        System.currentTimeMillis(), false, true);
  MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);

  // Now validate the task credentials
  Credentials appMasterCreds = appMaster.getCredentials();
  Assert.assertNotNull(appMasterCreds);
  Assert.assertEquals(1, appMasterCreds.numberOfSecretKeys());
  Assert.assertEquals(1, appMasterCreds.numberOfTokens());

  // Validate the tokens - app token should not be present
  Token<? extends TokenIdentifier> usedToken =
      appMasterCreds.getToken(tokenAlias);
  Assert.assertNotNull(usedToken);
  Assert.assertEquals(storedToken, usedToken);

  // Validate the keys
  byte[] usedKey = appMasterCreds.getSecretKey(keyAlias);
  Assert.assertNotNull(usedKey);
  Assert.assertEquals("mySecretKey", new String(usedKey));

  // The credentials should also be added to conf so that OuputCommitter can
  // access it - app token should not be present
  Credentials confCredentials = conf.getCredentials();
  Assert.assertEquals(1, confCredentials.numberOfSecretKeys());
  Assert.assertEquals(1, confCredentials.numberOfTokens());
  Assert.assertEquals(storedToken, confCredentials.getToken(tokenAlias));
  Assert.assertEquals("mySecretKey",
    new String(confCredentials.getSecretKey(keyAlias)));
  
  // Verify the AM's ugi - app token should be present
  Credentials ugiCredentials = appMaster.getUgi().getCredentials();
  Assert.assertEquals(1, ugiCredentials.numberOfSecretKeys());
  Assert.assertEquals(2, ugiCredentials.numberOfTokens());
  Assert.assertEquals(storedToken, ugiCredentials.getToken(tokenAlias));
  Assert.assertEquals(appToken, ugiCredentials.getToken(appTokenService));
  Assert.assertEquals("mySecretKey",
    new String(ugiCredentials.getSecretKey(keyAlias)));


}
 
Example 13
Source File: TestMRAppMaster.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testMRAppMasterCredentials() throws Exception {

  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);

  // Simulate credentials passed to AM via client->RM->NM
  Credentials credentials = new Credentials();
  byte[] identifier = "MyIdentifier".getBytes();
  byte[] password = "MyPassword".getBytes();
  Text kind = new Text("MyTokenKind");
  Text service = new Text("host:port");
  Token<? extends TokenIdentifier> myToken =
      new Token<TokenIdentifier>(identifier, password, kind, service);
  Text tokenAlias = new Text("myToken");
  credentials.addToken(tokenAlias, myToken);

  Text appTokenService = new Text("localhost:0");
  Token<AMRMTokenIdentifier> appToken =
      new Token<AMRMTokenIdentifier>(identifier, password,
          AMRMTokenIdentifier.KIND_NAME, appTokenService);
  credentials.addToken(appTokenService, appToken);
  
  Text keyAlias = new Text("mySecretKeyAlias");
  credentials.addSecretKey(keyAlias, "mySecretKey".getBytes());
  Token<? extends TokenIdentifier> storedToken =
      credentials.getToken(tokenAlias);

  JobConf conf = new JobConf();

  Path tokenFilePath = new Path(testDir.getAbsolutePath(), "tokens-file");
  Map<String, String> newEnv = new HashMap<String, String>();
  newEnv.put(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION, tokenFilePath
    .toUri().getPath());
  setNewEnvironmentHack(newEnv);
  credentials.writeTokenStorageFile(tokenFilePath, conf);

  ApplicationId appId = ApplicationId.newInstance(12345, 56);
  ApplicationAttemptId applicationAttemptId =
      ApplicationAttemptId.newInstance(appId, 1);
  ContainerId containerId =
      ContainerId.newContainerId(applicationAttemptId, 546);
  String userName = UserGroupInformation.getCurrentUser().getShortUserName();

  // Create staging dir, so MRAppMaster doesn't barf.
  File stagingDir =
      new File(MRApps.getStagingAreaDir(conf, userName).toString());
  stagingDir.mkdirs();

  // Set login-user to null as that is how real world MRApp starts with.
  // This is null is the reason why token-file is read by UGI.
  UserGroupInformation.setLoginUser(null);

  MRAppMasterTest appMaster =
      new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1,
        System.currentTimeMillis(), false, true);
  MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);

  // Now validate the task credentials
  Credentials appMasterCreds = appMaster.getCredentials();
  Assert.assertNotNull(appMasterCreds);
  Assert.assertEquals(1, appMasterCreds.numberOfSecretKeys());
  Assert.assertEquals(1, appMasterCreds.numberOfTokens());

  // Validate the tokens - app token should not be present
  Token<? extends TokenIdentifier> usedToken =
      appMasterCreds.getToken(tokenAlias);
  Assert.assertNotNull(usedToken);
  Assert.assertEquals(storedToken, usedToken);

  // Validate the keys
  byte[] usedKey = appMasterCreds.getSecretKey(keyAlias);
  Assert.assertNotNull(usedKey);
  Assert.assertEquals("mySecretKey", new String(usedKey));

  // The credentials should also be added to conf so that OuputCommitter can
  // access it - app token should not be present
  Credentials confCredentials = conf.getCredentials();
  Assert.assertEquals(1, confCredentials.numberOfSecretKeys());
  Assert.assertEquals(1, confCredentials.numberOfTokens());
  Assert.assertEquals(storedToken, confCredentials.getToken(tokenAlias));
  Assert.assertEquals("mySecretKey",
    new String(confCredentials.getSecretKey(keyAlias)));
  
  // Verify the AM's ugi - app token should be present
  Credentials ugiCredentials = appMaster.getUgi().getCredentials();
  Assert.assertEquals(1, ugiCredentials.numberOfSecretKeys());
  Assert.assertEquals(2, ugiCredentials.numberOfTokens());
  Assert.assertEquals(storedToken, ugiCredentials.getToken(tokenAlias));
  Assert.assertEquals(appToken, ugiCredentials.getToken(appTokenService));
  Assert.assertEquals("mySecretKey",
    new String(ugiCredentials.getSecretKey(keyAlias)));


}
 
Example 14
Source File: HadoopOutputFormatBase.java    From flink with Apache License 2.0 4 votes vote down vote up
public HadoopOutputFormatBase(org.apache.hadoop.mapred.OutputFormat<K, V> mapredOutputFormat, JobConf job) {
	super(job.getCredentials());
	this.mapredOutputFormat = mapredOutputFormat;
	HadoopUtils.mergeHadoopConf(job);
	this.jobConf = job;
}