org.apache.hadoop.mapreduce.security.TokenCache Java Examples

The following examples show how to use org.apache.hadoop.mapreduce.security.TokenCache. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: Utils.java    From flink with Apache License 2.0 6 votes vote down vote up
public static void setTokensFor(ContainerLaunchContext amContainer, List<Path> paths, Configuration conf) throws IOException {
	Credentials credentials = new Credentials();
	// for HDFS
	TokenCache.obtainTokensForNamenodes(credentials, paths.toArray(new Path[0]), conf);
	// for HBase
	obtainTokenForHBase(credentials, conf);
	// for user
	UserGroupInformation currUsr = UserGroupInformation.getCurrentUser();

	Collection<Token<? extends TokenIdentifier>> usrTok = currUsr.getTokens();
	for (Token<? extends TokenIdentifier> token : usrTok) {
		final Text id = new Text(token.getIdentifier());
		LOG.info("Adding user token " + id + " with " + token);
		credentials.addToken(id, token);
	}
	try (DataOutputBuffer dob = new DataOutputBuffer()) {
		credentials.writeTokenStorageToStream(dob);

		if (LOG.isDebugEnabled()) {
			LOG.debug("Wrote tokens. Credentials buffer length: " + dob.getLength());
		}

		ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
		amContainer.setTokens(securityTokens);
	}
}
 
Example #2
Source File: ZephyrOutputFormat.java    From zephyr with Apache License 2.0 6 votes vote down vote up
@Override
public void checkOutputSpecs(FileSystem ignored, JobConf job) throws FileAlreadyExistsException, InvalidJobConfException, IOException {
    // Ensure that the output directory is set and not already there
    Path outDir = getOutputPath(job);
    if (outDir == null && job.getNumReduceTasks() != 0) {
        throw new InvalidJobConfException("Output directory not set in JobConf.");
    }
    if (outDir != null) {
        FileSystem fs = outDir.getFileSystem(job);
        // normalize the output directory
        outDir = fs.makeQualified(outDir);
        setOutputPath(job, outDir);

        // get delegation token for the outDir's file system
        TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[]{outDir}, job);
        String jobUuid = job.get("zephyr.job.uuid");
        if (jobUuid == null)
            throw new InvalidJobConfException("This output format REQUIRES the value zephyr.job.uuid to be specified in the job configuration!");
        // // check its existence
        // if (fs.exists(outDir)) {
        // throw new FileAlreadyExistsException("Output directory " + outDir
        // + " already exists");
        // }
    }
}
 
Example #3
Source File: SparkDataSet.java    From spliceengine with GNU Affero General Public License v3.0 6 votes vote down vote up
/**
 * Overridden to avoid throwing an exception if the specified directory
 * for export already exists.
 */
@Override
public void checkOutputSpecs(JobContext job) throws FileAlreadyExistsException, IOException {
    Path outDir = getOutputPath(job);
    if(outDir == null) {
        throw new InvalidJobConfException("Output directory not set.");
    } else {
        TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[]{outDir}, job.getConfiguration());
        /*
        if(outDir.getFileSystem(job.getConfiguration()).exists(outDir)) {
            System.out.println("Output dir already exists, no problem");
            throw new FileAlreadyExistsException("Output directory " + outDir + " already exists");
        }
        */
    }
}
 
Example #4
Source File: NativeSparkDataSet.java    From spliceengine with GNU Affero General Public License v3.0 6 votes vote down vote up
/**
 * Overridden to avoid throwing an exception if the specified directory
 * for export already exists.
 */
@Override
public void checkOutputSpecs(JobContext job) throws FileAlreadyExistsException, IOException {
    Path outDir = getOutputPath(job);
    if(outDir == null) {
        throw new InvalidJobConfException("Output directory not set.");
    } else {
        TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[]{outDir}, job.getConfiguration());
        /*
        if(outDir.getFileSystem(job.getConfiguration()).exists(outDir)) {
            System.out.println("Output dir already exists, no problem");
            throw new FileAlreadyExistsException("Output directory " + outDir + " already exists");
        }
        */
    }
}
 
Example #5
Source File: Utils.java    From stratosphere with Apache License 2.0 6 votes vote down vote up
public static void setTokensFor(ContainerLaunchContext amContainer, Path[] paths, Configuration conf) throws IOException {
	Credentials credentials = new Credentials();
	// for HDFS
	TokenCache.obtainTokensForNamenodes(credentials, paths, conf);
	// for user
	UserGroupInformation currUsr = UserGroupInformation.getCurrentUser();
	
	Collection<Token<? extends TokenIdentifier>> usrTok = currUsr.getTokens();
	for(Token<? extends TokenIdentifier> token : usrTok) {
		final Text id = new Text(token.getIdentifier());
		LOG.info("Adding user token "+id+" with "+token);
		credentials.addToken(id, token);
	}
	DataOutputBuffer dob = new DataOutputBuffer();
	credentials.writeTokenStorageToStream(dob);
	LOG.debug("Wrote tokens. Credentials buffer length: "+dob.getLength());
	
	ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
	amContainer.setTokens(securityTokens);
}
 
Example #6
Source File: Utils.java    From flink with Apache License 2.0 6 votes vote down vote up
public static void setTokensFor(ContainerLaunchContext amContainer, List<Path> paths, Configuration conf) throws IOException {
	Credentials credentials = new Credentials();
	// for HDFS
	TokenCache.obtainTokensForNamenodes(credentials, paths.toArray(new Path[0]), conf);
	// for HBase
	obtainTokenForHBase(credentials, conf);
	// for user
	UserGroupInformation currUsr = UserGroupInformation.getCurrentUser();

	Collection<Token<? extends TokenIdentifier>> usrTok = currUsr.getTokens();
	for (Token<? extends TokenIdentifier> token : usrTok) {
		final Text id = new Text(token.getIdentifier());
		LOG.info("Adding user token " + id + " with " + token);
		credentials.addToken(id, token);
	}
	try (DataOutputBuffer dob = new DataOutputBuffer()) {
		credentials.writeTokenStorageToStream(dob);

		if (LOG.isDebugEnabled()) {
			LOG.debug("Wrote tokens. Credentials buffer length: " + dob.getLength());
		}

		ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
		amContainer.setTokens(securityTokens);
	}
}
 
Example #7
Source File: ImportInputFormat.java    From emr-dynamodb-connector with Apache License 2.0 6 votes vote down vote up
private List<InputSplit> getSplitsFromManifest(JobConf job) throws IOException {
  Path[] dirs = getInputPaths(job);
  if (dirs.length == 0) {
    throw new IOException("No input path specified in job");
  } else if (dirs.length > 1) {
    throw new IOException("Will only look for manifests in a single input directory (" + dirs
        .length + " directories provided).");
  }
  TokenCache.obtainTokensForNamenodes(job.getCredentials(), dirs, job);

  Path dir = dirs[0];

  FileSystem fs = dir.getFileSystem(job);
  if (!fs.getFileStatus(dir).isDirectory()) {
    throw new IOException("Input path not a directory: " + dir);
  }

  Path manifestPath = new Path(dir, ExportManifestOutputFormat.MANIFEST_FILENAME);
  if (!fs.isFile(manifestPath)) {
    return null;
  }

  return parseManifest(fs, manifestPath, job);
}
 
Example #8
Source File: CopyOutputFormat.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** {@inheritDoc} */
@Override
public void checkOutputSpecs(JobContext context) throws IOException {
  Configuration conf = context.getConfiguration();

  if (getCommitDirectory(conf) == null) {
    throw new IllegalStateException("Commit directory not configured");
  }

  Path workingPath = getWorkingDirectory(conf);
  if (workingPath == null) {
    throw new IllegalStateException("Working directory not configured");
  }

  // get delegation token for outDir's file system
  TokenCache.obtainTokensForNamenodes(context.getCredentials(),
                                      new Path[] {workingPath}, conf);
}
 
Example #9
Source File: FileOutputFormat.java    From big-c with Apache License 2.0 6 votes vote down vote up
public void checkOutputSpecs(JobContext job
                             ) throws FileAlreadyExistsException, IOException{
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null) {
    throw new InvalidJobConfException("Output directory not set.");
  }

  // get delegation token for outDir's file system
  TokenCache.obtainTokensForNamenodes(job.getCredentials(),
      new Path[] { outDir }, job.getConfiguration());

  if (outDir.getFileSystem(job.getConfiguration()).exists(outDir)) {
    throw new FileAlreadyExistsException("Output directory " + outDir + 
                                         " already exists");
  }
}
 
Example #10
Source File: ClientDistributedCacheManager.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * For each archive or cache file - get the corresponding delegation token
 * @param job
 * @param credentials
 * @throws IOException
 */
public static void getDelegationTokens(Configuration job,
    Credentials credentials) throws IOException {
  URI[] tarchives = DistributedCache.getCacheArchives(job);
  URI[] tfiles = DistributedCache.getCacheFiles(job);
  
  int size = (tarchives!=null? tarchives.length : 0) + (tfiles!=null ? tfiles.length :0);
  Path[] ps = new Path[size];
  
  int i = 0;
  if (tarchives != null) {
    for (i=0; i < tarchives.length; i++) {
      ps[i] = new Path(tarchives[i].toString());
    }
  }
  
  if (tfiles != null) {
    for(int j=0; j< tfiles.length; j++) {
      ps[i+j] = new Path(tfiles[j].toString());
    }
  }
  
  TokenCache.obtainTokensForNamenodes(credentials, ps, job);
}
 
Example #11
Source File: FileOutputFormat.java    From big-c with Apache License 2.0 6 votes vote down vote up
public void checkOutputSpecs(FileSystem ignored, JobConf job) 
  throws FileAlreadyExistsException, 
         InvalidJobConfException, IOException {
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null && job.getNumReduceTasks() != 0) {
    throw new InvalidJobConfException("Output directory not set in JobConf.");
  }
  if (outDir != null) {
    FileSystem fs = outDir.getFileSystem(job);
    // normalize the output directory
    outDir = fs.makeQualified(outDir);
    setOutputPath(job, outDir);
    
    // get delegation token for the outDir's file system
    TokenCache.obtainTokensForNamenodes(job.getCredentials(), 
                                        new Path[] {outDir}, job);
    
    // check its existence
    if (fs.exists(outDir)) {
      throw new FileAlreadyExistsException("Output directory " + outDir + 
                                           " already exists");
    }
  }
}
 
Example #12
Source File: CopyOutputFormat.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** {@inheritDoc} */
@Override
public void checkOutputSpecs(JobContext context) throws IOException {
  Configuration conf = context.getConfiguration();

  if (getCommitDirectory(conf) == null) {
    throw new IllegalStateException("Commit directory not configured");
  }

  Path workingPath = getWorkingDirectory(conf);
  if (workingPath == null) {
    throw new IllegalStateException("Working directory not configured");
  }

  // get delegation token for outDir's file system
  TokenCache.obtainTokensForNamenodes(context.getCredentials(),
                                      new Path[] {workingPath}, conf);
}
 
Example #13
Source File: FileOutputFormat.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public void checkOutputSpecs(JobContext job
                             ) throws FileAlreadyExistsException, IOException{
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null) {
    throw new InvalidJobConfException("Output directory not set.");
  }

  // get delegation token for outDir's file system
  TokenCache.obtainTokensForNamenodes(job.getCredentials(),
      new Path[] { outDir }, job.getConfiguration());

  if (outDir.getFileSystem(job.getConfiguration()).exists(outDir)) {
    throw new FileAlreadyExistsException("Output directory " + outDir + 
                                         " already exists");
  }
}
 
Example #14
Source File: ClientDistributedCacheManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * For each archive or cache file - get the corresponding delegation token
 * @param job
 * @param credentials
 * @throws IOException
 */
public static void getDelegationTokens(Configuration job,
    Credentials credentials) throws IOException {
  URI[] tarchives = DistributedCache.getCacheArchives(job);
  URI[] tfiles = DistributedCache.getCacheFiles(job);
  
  int size = (tarchives!=null? tarchives.length : 0) + (tfiles!=null ? tfiles.length :0);
  Path[] ps = new Path[size];
  
  int i = 0;
  if (tarchives != null) {
    for (i=0; i < tarchives.length; i++) {
      ps[i] = new Path(tarchives[i].toString());
    }
  }
  
  if (tfiles != null) {
    for(int j=0; j< tfiles.length; j++) {
      ps[i+j] = new Path(tfiles[j].toString());
    }
  }
  
  TokenCache.obtainTokensForNamenodes(credentials, ps, job);
}
 
Example #15
Source File: FileOutputFormat.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public void checkOutputSpecs(FileSystem ignored, JobConf job) 
  throws FileAlreadyExistsException, 
         InvalidJobConfException, IOException {
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null && job.getNumReduceTasks() != 0) {
    throw new InvalidJobConfException("Output directory not set in JobConf.");
  }
  if (outDir != null) {
    FileSystem fs = outDir.getFileSystem(job);
    // normalize the output directory
    outDir = fs.makeQualified(outDir);
    setOutputPath(job, outDir);
    
    // get delegation token for the outDir's file system
    TokenCache.obtainTokensForNamenodes(job.getCredentials(), 
                                        new Path[] {outDir}, job);
    
    // check its existence
    if (fs.exists(outDir)) {
      throw new FileAlreadyExistsException("Output directory " + outDir + 
                                           " already exists");
    }
  }
}
 
Example #16
Source File: Utils.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
public static void setTokensFor(ContainerLaunchContext amContainer, List<Path> paths, Configuration conf) throws IOException {
	Credentials credentials = new Credentials();
	// for HDFS
	TokenCache.obtainTokensForNamenodes(credentials, paths.toArray(new Path[0]), conf);
	// for HBase
	obtainTokenForHBase(credentials, conf);
	// for user
	UserGroupInformation currUsr = UserGroupInformation.getCurrentUser();

	Collection<Token<? extends TokenIdentifier>> usrTok = currUsr.getTokens();
	for (Token<? extends TokenIdentifier> token : usrTok) {
		final Text id = new Text(token.getIdentifier());
		LOG.info("Adding user token " + id + " with " + token);
		credentials.addToken(id, token);
	}
	try (DataOutputBuffer dob = new DataOutputBuffer()) {
		credentials.writeTokenStorageToStream(dob);

		if (LOG.isDebugEnabled()) {
			LOG.debug("Wrote tokens. Credentials buffer length: " + dob.getLength());
		}

		ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
		amContainer.setTokens(securityTokens);
	}
}
 
Example #17
Source File: JobSubmitter.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void populateTokenCache(Configuration conf, Credentials credentials) 
throws IOException{
  readTokensFromFiles(conf, credentials);
  // add the delegation tokens from configuration
  String [] nameNodes = conf.getStrings(MRJobConfig.JOB_NAMENODES);
  LOG.debug("adding the following namenodes' delegation tokens:" + 
      Arrays.toString(nameNodes));
  if(nameNodes != null) {
    Path [] ps = new Path[nameNodes.length];
    for(int i=0; i< nameNodes.length; i++) {
      ps[i] = new Path(nameNodes[i]);
    }
    TokenCache.obtainTokensForNamenodes(credentials, ps, conf);
  }
}
 
Example #18
Source File: CopyOutputFormat.java    From circus-train with Apache License 2.0 5 votes vote down vote up
/** @inheritDoc */
@Override
public void checkOutputSpecs(JobContext context) throws IOException {
  Configuration conf = context.getConfiguration();

  Path workingPath = getCommitDirectory(conf);
  if (getCommitDirectory(conf) == null) {
    throw new IllegalStateException("Commit directory not configured");
  }

  // get delegation token for outDir's file system
  TokenCache.obtainTokensForNamenodes(context.getCredentials(), new Path[] { workingPath }, conf);
}
 
Example #19
Source File: YarnChild.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Utility method to check if the Encrypted Spill Key needs to be set into the
 * user credentials of the user running the Map / Reduce Task
 * @param task The Map / Reduce task to set the Encrypted Spill information in
 * @throws Exception
 */
public static void setEncryptedSpillKeyIfRequired(Task task) throws
        Exception {
  if ((task != null) && (task.getEncryptedSpillKey() != null) && (task
          .getEncryptedSpillKey().length > 1)) {
    Credentials creds =
            UserGroupInformation.getCurrentUser().getCredentials();
    TokenCache.setEncryptedSpillKey(task.getEncryptedSpillKey(), creds);
    UserGroupInformation.getCurrentUser().addCredentials(creds);
  }
}
 
Example #20
Source File: TeraOutputFormat.java    From incubator-tez with Apache License 2.0 5 votes vote down vote up
@Override
public void checkOutputSpecs(JobContext job
                            ) throws InvalidJobConfException, IOException {
  // Ensure that the output directory is set
  Path outDir = getOutputPath(job);
  if (outDir == null) {
    throw new InvalidJobConfException("Output directory not set in JobConf.");
  }

  // get delegation token for outDir's file system
  TokenCache.obtainTokensForNamenodes(job.getCredentials(),
      new Path[] { outDir }, job.getConfiguration());
}
 
Example #21
Source File: SecurityHelper.java    From spork with Apache License 2.0 5 votes vote down vote up
public static void populateTokenCache(Configuration conf,
        Credentials credentials) throws IOException {
    readTokensFromFiles(conf, credentials);
    // add the delegation tokens from configuration
    String[] nameNodes = conf.getStrings(MRJobConfig.JOB_NAMENODES);
    LOG.debug("adding the following namenodes' delegation tokens:"
            + Arrays.toString(nameNodes));
    if (nameNodes != null) {
        Path[] ps = new Path[nameNodes.length];
        for (int i = 0; i < nameNodes.length; i++) {
            ps[i] = new Path(nameNodes[i]);
        }
        TokenCache.obtainTokensForNamenodes(credentials, ps, conf);
    }
}
 
Example #22
Source File: TokenUtils.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
private static void getOtherNamenodesToken(List<String> otherNamenodes, Configuration conf, Credentials cred)
    throws IOException {
  LOG.info(OTHER_NAMENODES + ": " + otherNamenodes);
  Path[] ps = new Path[otherNamenodes.size()];
  for (int i = 0; i < ps.length; i++) {
    ps[i] = new Path(otherNamenodes.get(i).trim());
  }
  TokenCache.obtainTokensForNamenodes(cred, ps, conf);
  LOG.info("Successfully fetched tokens for: " + otherNamenodes);
}
 
Example #23
Source File: CompactionTool.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Execute compaction, using a Map-Reduce job.
 */
private int doMapReduce(final FileSystem fs, final Set<Path> toCompactDirs,
    final boolean compactOnce, final boolean major) throws Exception {
  Configuration conf = getConf();
  conf.setBoolean(CONF_COMPACT_ONCE, compactOnce);
  conf.setBoolean(CONF_COMPACT_MAJOR, major);

  Job job = new Job(conf);
  job.setJobName("CompactionTool");
  job.setJarByClass(CompactionTool.class);
  job.setMapperClass(CompactionMapper.class);
  job.setInputFormatClass(CompactionInputFormat.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setMapSpeculativeExecution(false);
  job.setNumReduceTasks(0);

  // add dependencies (including HBase ones)
  TableMapReduceUtil.addDependencyJars(job);

  Path stagingDir = JobUtil.getQualifiedStagingDir(conf);
  FileSystem stagingFs = stagingDir.getFileSystem(conf);
  try {
    // Create input file with the store dirs
    Path inputPath = new Path(stagingDir, "compact-"+ EnvironmentEdgeManager.currentTime());
    List<Path> storeDirs = CompactionInputFormat.createInputFile(fs, stagingFs,
        inputPath, toCompactDirs);
    CompactionInputFormat.addInputPath(job, inputPath);

    // Initialize credential for secure cluster
    TableMapReduceUtil.initCredentials(job);
    // Despite the method name this will get delegation token for the filesystem
    TokenCache.obtainTokensForNamenodes(job.getCredentials(),
      storeDirs.toArray(new Path[0]), conf);

    // Start the MR Job and wait
    return job.waitForCompletion(true) ? 0 : 1;
  } finally {
    fs.delete(stagingDir, true);
  }
}
 
Example #24
Source File: JobImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
protected void setup(JobImpl job) throws IOException {

      String oldJobIDString = job.oldJobId.toString();
      String user = 
        UserGroupInformation.getCurrentUser().getShortUserName();
      Path path = MRApps.getStagingAreaDir(job.conf, user);
      if(LOG.isDebugEnabled()) {
        LOG.debug("startJobs: parent=" + path + " child=" + oldJobIDString);
      }

      job.remoteJobSubmitDir =
          FileSystem.get(job.conf).makeQualified(
              new Path(path, oldJobIDString));
      job.remoteJobConfFile =
          new Path(job.remoteJobSubmitDir, MRJobConfig.JOB_CONF_FILE);

      // Prepare the TaskAttemptListener server for authentication of Containers
      // TaskAttemptListener gets the information via jobTokenSecretManager.
      JobTokenIdentifier identifier =
          new JobTokenIdentifier(new Text(oldJobIDString));
      job.jobToken =
          new Token<JobTokenIdentifier>(identifier, job.jobTokenSecretManager);
      job.jobToken.setService(identifier.getJobId());
      // Add it to the jobTokenSecretManager so that TaskAttemptListener server
      // can authenticate containers(tasks)
      job.jobTokenSecretManager.addTokenForJob(oldJobIDString, job.jobToken);
      LOG.info("Adding job token for " + oldJobIDString
          + " to jobTokenSecretManager");

      // If the job client did not setup the shuffle secret then reuse
      // the job token secret for the shuffle.
      if (TokenCache.getShuffleSecretKey(job.jobCredentials) == null) {
        LOG.warn("Shuffle secret key missing from job credentials."
            + " Using job token secret as shuffle secret.");
        TokenCache.setShuffleSecretKey(job.jobToken.getPassword(),
            job.jobCredentials);
      }
    }
 
Example #25
Source File: MRAppMaster.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void processRecovery() throws IOException{
  if (appAttemptID.getAttemptId() == 1) {
    return;  // no need to recover on the first attempt
  }

  boolean recoveryEnabled = getConfig().getBoolean(
      MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE,
      MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE_DEFAULT);

  boolean recoverySupportedByCommitter = isRecoverySupported();

  // If a shuffle secret was not provided by the job client then this app
  // attempt will generate one.  However that disables recovery if there
  // are reducers as the shuffle secret would be app attempt specific.
  int numReduceTasks = getConfig().getInt(MRJobConfig.NUM_REDUCES, 0);
  boolean shuffleKeyValidForRecovery =
      TokenCache.getShuffleSecretKey(jobCredentials) != null;

  if (recoveryEnabled && recoverySupportedByCommitter
      && (numReduceTasks <= 0 || shuffleKeyValidForRecovery)) {
    LOG.info("Recovery is enabled. "
        + "Will try to recover from previous life on best effort basis.");
    try {
      parsePreviousJobHistory();
    } catch (IOException e) {
      LOG.warn("Unable to parse prior job history, aborting recovery", e);
      // try to get just the AMInfos
      amInfos.addAll(readJustAMInfos());
    }
  } else {
    LOG.info("Will not try to recover. recoveryEnabled: "
          + recoveryEnabled + " recoverySupportedByCommitter: "
          + recoverySupportedByCommitter + " numReduceTasks: "
          + numReduceTasks + " shuffleKeyValidForRecovery: "
          + shuffleKeyValidForRecovery + " ApplicationAttemptID: "
          + appAttemptID.getAttemptId());
    // Get the amInfos anyways whether recovery is enabled or not
    amInfos.addAll(readJustAMInfos());
  }
}
 
Example #26
Source File: DistCpV1.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Sanity check for srcPath */
private static void checkSrcPath(JobConf jobConf, List<Path> srcPaths) 
throws IOException {
  List<IOException> rslt = new ArrayList<IOException>();
  List<Path> unglobbed = new LinkedList<Path>();
  
  Path[] ps = new Path[srcPaths.size()];
  ps = srcPaths.toArray(ps);
  TokenCache.obtainTokensForNamenodes(jobConf.getCredentials(), ps, jobConf);
  
  
  for (Path p : srcPaths) {
    FileSystem fs = p.getFileSystem(jobConf);
    FileStatus[] inputs = fs.globStatus(p);
    
    if(inputs != null && inputs.length > 0) {
      for (FileStatus onePath: inputs) {
        unglobbed.add(onePath.getPath());
      }
    } else {
      rslt.add(new IOException("Input source " + p + " does not exist."));
    }
  }
  if (!rslt.isEmpty()) {
    throw new InvalidInputException(rslt);
  }
  srcPaths.clear();
  srcPaths.addAll(unglobbed);
}
 
Example #27
Source File: TeraOutputFormat.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public void checkOutputSpecs(JobContext job
                            ) throws InvalidJobConfException, IOException {
  // Ensure that the output directory is set
  Path outDir = getOutputPath(job);
  if (outDir == null) {
    throw new InvalidJobConfException("Output directory not set in JobConf.");
  }

  final Configuration jobConf = job.getConfiguration();

  // get delegation token for outDir's file system
  TokenCache.obtainTokensForNamenodes(job.getCredentials(),
      new Path[] { outDir }, jobConf);

  final FileSystem fs = outDir.getFileSystem(jobConf);

  if (fs.exists(outDir)) {
    // existing output dir is considered empty iff its only content is the
    // partition file.
    //
    final FileStatus[] outDirKids = fs.listStatus(outDir);
    boolean empty = false;
    if (outDirKids != null && outDirKids.length == 1) {
      final FileStatus st = outDirKids[0];
      final String fname = st.getPath().getName();
      empty =
        !st.isDirectory() && TeraInputFormat.PARTITION_FILENAME.equals(fname);
    }
    if (TeraSort.getUseSimplePartitioner(job) || !empty) {
      throw new FileAlreadyExistsException("Output directory " + outDir
          + " already exists");
    }
  }
}
 
Example #28
Source File: TestMerger.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testEncryptedMerger() throws Throwable {
  jobConf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true);
  conf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true);
  Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
  TokenCache.setEncryptedSpillKey(new byte[16], credentials);
  UserGroupInformation.getCurrentUser().addCredentials(credentials);
  testInMemoryAndOnDiskMerger();
}
 
Example #29
Source File: JobSubmitter.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void populateTokenCache(Configuration conf, Credentials credentials) 
throws IOException{
  readTokensFromFiles(conf, credentials);
  // add the delegation tokens from configuration
  String [] nameNodes = conf.getStrings(MRJobConfig.JOB_NAMENODES);
  LOG.debug("adding the following namenodes' delegation tokens:" + 
      Arrays.toString(nameNodes));
  if(nameNodes != null) {
    Path [] ps = new Path[nameNodes.length];
    for(int i=0; i< nameNodes.length; i++) {
      ps[i] = new Path(nameNodes[i]);
    }
    TokenCache.obtainTokensForNamenodes(credentials, ps, conf);
  }
}
 
Example #30
Source File: TestMerger.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testEncryptedMerger() throws Throwable {
  jobConf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true);
  conf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true);
  Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
  TokenCache.setEncryptedSpillKey(new byte[16], credentials);
  UserGroupInformation.getCurrentUser().addCredentials(credentials);
  testInMemoryAndOnDiskMerger();
}