Java Code Examples for org.apache.hadoop.security.Credentials#getAllTokens()

The following examples show how to use org.apache.hadoop.security.Credentials#getAllTokens() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AutoHDFS.java    From jstorm with Apache License 2.0 6 votes vote down vote up
public void addTokensToUGI(Subject subject) {
    if(subject != null) {
        Set<Credentials> privateCredentials = subject.getPrivateCredentials(Credentials.class);
        if (privateCredentials != null) {
            for (Credentials cred : privateCredentials) {
                Collection<Token<? extends TokenIdentifier>> allTokens = cred.getAllTokens();
                if (allTokens != null) {
                    for (Token<? extends TokenIdentifier> token : allTokens) {
                        try {
                            UserGroupInformation.getCurrentUser().addToken(token);
                            LOG.info("Added delegation tokens to UGI.");
                        } catch (IOException e) {
                            LOG.error("Exception while trying to add tokens to ugi", e);
                        }
                    }
                }
            }
        }
    }
}
 
Example 2
Source File: TestBinaryTokenFile.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private static void createBinaryTokenFile(Configuration conf) {
  // Fetch delegation tokens and store in binary token file.
  try {
    Credentials cred1 = new Credentials();
    Credentials cred2 = new Credentials();
    TokenCache.obtainTokensForNamenodesInternal(cred1, new Path[] { p1 },
        conf);
    for (Token<? extends TokenIdentifier> t : cred1.getAllTokens()) {
      cred2.addToken(new Text(DELEGATION_TOKEN_KEY), t);
    }
    DataOutputStream os = new DataOutputStream(new FileOutputStream(
        binaryTokenFileName.toString()));
    try {
      cred2.writeTokenStorageToStream(os);
    } finally {
      os.close();
    }
  } catch (IOException e) {
    Assert.fail("Exception " + e);
  }
}
 
Example 3
Source File: ProxyUtils.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
public static void cancelTokens(State state)
    throws IOException, InterruptedException, TException {
  Preconditions.checkArgument(state.contains(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION),
      "Missing required property " + ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION);
  Preconditions.checkArgument(state.contains(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_SUPER_USER),
      "Missing required property " + ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_SUPER_USER);
  Preconditions.checkArgument(state.contains(ConfigurationKeys.KERBEROS_REALM),
      "Missing required property " + ConfigurationKeys.KERBEROS_REALM);

  String superUser = state.getProp(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_SUPER_USER);
  String keytabLocation = state.getProp(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION);
  String realm = state.getProp(ConfigurationKeys.KERBEROS_REALM);

  UserGroupInformation.loginUserFromKeytab(HostUtils.getPrincipalUsingHostname(superUser, realm), keytabLocation);
  UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
  UserGroupInformation realUser = currentUser.getRealUser();
  Credentials credentials = realUser.getCredentials();
  for (Token<?> token : credentials.getAllTokens()) {
    if (token.getKind().equals(DelegationTokenIdentifier.HIVE_DELEGATION_KIND)) {
      log.info("Cancelling hive token");
      HiveMetaStoreClient hiveClient = new HiveMetaStoreClient(new HiveConf());
      hiveClient.cancelDelegationToken(token.encodeToUrlString());
    }
  }
}
 
Example 4
Source File: ContainerManagerImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
private Credentials parseCredentials(ContainerLaunchContext launchContext)
    throws IOException {
  Credentials credentials = new Credentials();
  // //////////// Parse credentials
  ByteBuffer tokens = launchContext.getTokens();

  if (tokens != null) {
    DataInputByteBuffer buf = new DataInputByteBuffer();
    tokens.rewind();
    buf.reset(tokens);
    credentials.readTokenStorageStream(buf);
    if (LOG.isDebugEnabled()) {
      for (Token<? extends TokenIdentifier> tk : credentials.getAllTokens()) {
        LOG.debug(tk.getService() + " = " + tk.toString());
      }
    }
  }
  // //////////// End of parsing credentials
  return credentials;
}
 
Example 5
Source File: YarnTestBase.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public static boolean verifyTokenKindInContainerCredentials(final Collection<String> tokens, final String containerId)
	throws IOException {
	File cwd = new File("target/" + YARN_CONFIGURATION.get(TEST_CLUSTER_NAME_KEY));
	if (!cwd.exists() || !cwd.isDirectory()) {
		return false;
	}

	File containerTokens = findFile(cwd.getAbsolutePath(), new FilenameFilter() {
		@Override
		public boolean accept(File dir, String name) {
			return name.equals(containerId + ".tokens");
		}
	});

	if (containerTokens != null) {
		LOG.info("Verifying tokens in {}", containerTokens.getAbsolutePath());

		Credentials tmCredentials = Credentials.readTokenStorageFile(containerTokens, new Configuration());

		Collection<Token<? extends TokenIdentifier>> userTokens = tmCredentials.getAllTokens();
		Set<String> tokenKinds = new HashSet<>(4);
		for (Token<? extends TokenIdentifier> token : userTokens) {
			tokenKinds.add(token.getKind().toString());
		}

		return tokenKinds.containsAll(tokens);
	} else {
		LOG.warn("Unable to find credential file for container {}", containerId);
		return false;
	}
}
 
Example 6
Source File: YarnTestBase.java    From flink with Apache License 2.0 5 votes vote down vote up
public static boolean verifyTokenKindInContainerCredentials(final Collection<String> tokens, final String containerId)
	throws IOException {
	File cwd = new File("target/" + YARN_CONFIGURATION.get(TEST_CLUSTER_NAME_KEY));
	if (!cwd.exists() || !cwd.isDirectory()) {
		return false;
	}

	File containerTokens = findFile(cwd.getAbsolutePath(), new FilenameFilter() {
		@Override
		public boolean accept(File dir, String name) {
			return name.equals(containerId + ".tokens");
		}
	});

	if (containerTokens != null) {
		LOG.info("Verifying tokens in {}", containerTokens.getAbsolutePath());

		Credentials tmCredentials = Credentials.readTokenStorageFile(containerTokens, new Configuration());

		Collection<Token<? extends TokenIdentifier>> userTokens = tmCredentials.getAllTokens();
		Set<String> tokenKinds = new HashSet<>(4);
		for (Token<? extends TokenIdentifier> token : userTokens) {
			tokenKinds.add(token.getKind().toString());
		}

		return tokenKinds.containsAll(tokens);
	} else {
		LOG.warn("Unable to find credential file for container {}", containerId);
		return false;
	}
}
 
Example 7
Source File: JobSubmitter.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void printTokens(JobID jobId,
    Credentials credentials) throws IOException {
  LOG.info("Submitting tokens for job: " + jobId);
  for (Token<?> token: credentials.getAllTokens()) {
    LOG.info(token);
  }
}
 
Example 8
Source File: YarnContainerSecurityManager.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
void addCredentials(Credentials credentials) throws IOException {
  for (Token<? extends TokenIdentifier> token : credentials.getAllTokens()) {
    LOGGER.info("updating " + token.toString());
  }
  UserGroupInformation.getCurrentUser().addCredentials(credentials);
}
 
Example 9
Source File: HadoopSecurityManager_H_1_0.java    From azkaban-plugins with Apache License 2.0 5 votes vote down vote up
@Override
public void cancelTokens(File tokenFile, String userToProxy, Logger logger)
    throws HadoopSecurityManagerException {
  // nntoken
  Credentials cred = null;
  try {
    cred =
        Credentials.readTokenStorageFile(new Path(tokenFile.toURI()),
            new Configuration());
    for (Token<? extends TokenIdentifier> t : cred.getAllTokens()) {
      logger.info("Got token: " + t.toString());
      logger.info("Token kind: " + t.getKind());
      logger.info("Token id: " + new String(t.getIdentifier()));
      logger.info("Token service: " + t.getService());
      if (t.getKind().equals(new Text("HIVE_DELEGATION_TOKEN"))) {
        logger.info("Cancelling hive token " + new String(t.getIdentifier()));
        cancelHiveToken(t, userToProxy);
      } else if (t.getKind().equals(new Text("MAPREDUCE_DELEGATION_TOKEN"))) {
        logger.info("Cancelling mr job tracker token "
            + new String(t.getIdentifier()));
        cancelMRJobTrackerToken(t, userToProxy);
      } else if (t.getKind().equals(new Text("HDFS_DELEGATION_TOKEN"))) {
        logger.info("Cancelling namenode token "
            + new String(t.getIdentifier()));
        cancelNameNodeToken(t, userToProxy);
      } else {
        logger.info("unknown token type " + t.getKind());
      }
    }
  } catch (Exception e) {
    e.printStackTrace();
  }

}
 
Example 10
Source File: YarnClientImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void addTimelineDelegationToken(
    ContainerLaunchContext clc) throws YarnException, IOException {
  Credentials credentials = new Credentials();
  DataInputByteBuffer dibb = new DataInputByteBuffer();
  ByteBuffer tokens = clc.getTokens();
  if (tokens != null) {
    dibb.reset(tokens);
    credentials.readTokenStorageStream(dibb);
    tokens.rewind();
  }
  // If the timeline delegation token is already in the CLC, no need to add
  // one more
  for (org.apache.hadoop.security.token.Token<? extends TokenIdentifier> token : credentials
      .getAllTokens()) {
    if (token.getKind().equals(TimelineDelegationTokenIdentifier.KIND_NAME)) {
      return;
    }
  }
  org.apache.hadoop.security.token.Token<TimelineDelegationTokenIdentifier>
      timelineDelegationToken = getTimelineDelegationToken();
  if (timelineDelegationToken == null) {
    return;
  }
  credentials.addToken(timelineService, timelineDelegationToken);
  if (LOG.isDebugEnabled()) {
    LOG.debug("Add timline delegation token into credentials: "
        + timelineDelegationToken);
  }
  DataOutputBuffer dob = new DataOutputBuffer();
  credentials.writeTokenStorageToStream(dob);
  tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
  clc.setTokens(tokens);
}
 
Example 11
Source File: YarnTestBase.java    From flink with Apache License 2.0 5 votes vote down vote up
public static boolean verifyTokenKindInContainerCredentials(final Collection<String> tokens, final String containerId)
	throws IOException {
	File cwd = new File("target/" + YARN_CONFIGURATION.get(TEST_CLUSTER_NAME_KEY));
	if (!cwd.exists() || !cwd.isDirectory()) {
		return false;
	}

	File containerTokens = TestUtils.findFile(cwd.getAbsolutePath(), new FilenameFilter() {
		@Override
		public boolean accept(File dir, String name) {
			return name.equals(containerId + ".tokens");
		}
	});

	if (containerTokens != null) {
		LOG.info("Verifying tokens in {}", containerTokens.getAbsolutePath());

		Credentials tmCredentials = Credentials.readTokenStorageFile(containerTokens, new Configuration());

		Collection<Token<? extends TokenIdentifier>> userTokens = tmCredentials.getAllTokens();
		Set<String> tokenKinds = new HashSet<>(4);
		for (Token<? extends TokenIdentifier> token : userTokens) {
			tokenKinds.add(token.getKind().toString());
		}

		return tokenKinds.containsAll(tokens);
	} else {
		LOG.warn("Unable to find credential file for container {}", containerId);
		return false;
	}
}
 
Example 12
Source File: AutoHDFS.java    From jstorm with Apache License 2.0 5 votes vote down vote up
/**
 * {@inheritDoc}
 */
@Override
@SuppressWarnings("unchecked")
public void renew(Map<String, String> credentials, Map topologyConf) {
    try {
        Credentials credential = getCredentials(credentials);
        if (credential != null) {
            Configuration configuration = new Configuration();
            Collection<Token<? extends TokenIdentifier>> tokens = credential.getAllTokens();

            if(tokens != null && tokens.isEmpty() == false) {
                for (Token token : tokens) {
                    //We need to re-login some other thread might have logged into hadoop using
                    // their credentials (e.g. AutoHBase might be also part of nimbu auto creds)
                    login(configuration);
                    long expiration = (Long) token.renew(configuration);
                    LOG.info("HDFS delegation token renewed, new expiration time {}", expiration);
                }
            } else {
                LOG.debug("No tokens found for credentials, skipping renewal.");
            }
        }
    } catch (Exception e) {
        LOG.warn("could not renew the credentials, one of the possible reason is tokens are beyond " +
                "renewal period so attempting to get new tokens.", e);
        populateCredentials(credentials, topologyConf);
    }
}
 
Example 13
Source File: DelegationTokenFetcher.java    From big-c with Apache License 2.0 4 votes vote down vote up
private static Collection<Token<?>> readTokens(Path file, Configuration conf)
    throws IOException {
  Credentials creds = Credentials.readTokenStorageFile(file, conf);
  return creds.getAllTokens();
}
 
Example 14
Source File: UtilsTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testCreateTaskExecutorCredentials() throws Exception {
	File root = temporaryFolder.getRoot();
	File home = new File(root, "home");
	boolean created = home.mkdir();
	assertTrue(created);

	Configuration flinkConf = new Configuration();
	YarnConfiguration yarnConf = new YarnConfiguration();

	Map<String, String> env = new HashMap<>();
	env.put(YarnConfigKeys.ENV_APP_ID, "foo");
	env.put(YarnConfigKeys.ENV_CLIENT_HOME_DIR, home.getAbsolutePath());
	env.put(YarnConfigKeys.ENV_CLIENT_SHIP_FILES, "");
	env.put(YarnConfigKeys.ENV_FLINK_CLASSPATH, "");
	env.put(YarnConfigKeys.ENV_HADOOP_USER_NAME, "foo");
	env.put(YarnConfigKeys.FLINK_DIST_JAR, new YarnLocalResourceDescriptor(
		"flink.jar",
		new Path(root.toURI()),
		0,
		System.currentTimeMillis(),
		LocalResourceVisibility.APPLICATION).toString());
	env = Collections.unmodifiableMap(env);

	File credentialFile = temporaryFolder.newFile("container_tokens");
	final Text amRmTokenKind = AMRMTokenIdentifier.KIND_NAME;
	final Text hdfsDelegationTokenKind = new Text("HDFS_DELEGATION_TOKEN");
	final Text service = new Text("test-service");
	Credentials amCredentials = new Credentials();
	amCredentials.addToken(amRmTokenKind, new Token<>(new byte[4], new byte[4], amRmTokenKind, service));
	amCredentials.addToken(hdfsDelegationTokenKind, new Token<>(new byte[4], new byte[4],
		hdfsDelegationTokenKind, service));
	amCredentials.writeTokenStorageFile(new org.apache.hadoop.fs.Path(credentialFile.getAbsolutePath()), yarnConf);

	TaskExecutorProcessSpec spec = TaskExecutorProcessUtils
		.newProcessSpecBuilder(flinkConf)
		.withTotalProcessMemory(MemorySize.parse("1g"))
		.build();
	ContaineredTaskManagerParameters tmParams = new ContaineredTaskManagerParameters(spec, new HashMap<>(1));
	Configuration taskManagerConf = new Configuration();

	String workingDirectory = root.getAbsolutePath();
	Class<?> taskManagerMainClass = YarnTaskExecutorRunner.class;
	ContainerLaunchContext ctx;

	final Map<String, String> originalEnv = System.getenv();
	try {
		Map<String, String> systemEnv = new HashMap<>(originalEnv);
		systemEnv.put("HADOOP_TOKEN_FILE_LOCATION", credentialFile.getAbsolutePath());
		CommonTestUtils.setEnv(systemEnv);
		ctx = Utils.createTaskExecutorContext(flinkConf, yarnConf, env, tmParams,
			"", workingDirectory, taskManagerMainClass, LOG);
	} finally {
		CommonTestUtils.setEnv(originalEnv);
	}

	Credentials credentials = new Credentials();
	try (DataInputStream dis = new DataInputStream(new ByteArrayInputStream(ctx.getTokens().array()))) {
		credentials.readTokenStorageStream(dis);
	}
	Collection<Token<? extends TokenIdentifier>> tokens = credentials.getAllTokens();
	boolean hasHdfsDelegationToken = false;
	boolean hasAmRmToken = false;
	for (Token<? extends TokenIdentifier> token : tokens) {
		if (token.getKind().equals(amRmTokenKind)) {
			hasAmRmToken = true;
		} else if (token.getKind().equals(hdfsDelegationTokenKind)) {
			hasHdfsDelegationToken = true;
		}
	}
	assertTrue(hasHdfsDelegationToken);
	assertFalse(hasAmRmToken);
}
 
Example 15
Source File: DelegationTokenRenewer.java    From big-c with Apache License 2.0 4 votes vote down vote up
private void handleAppSubmitEvent(DelegationTokenRenewerAppSubmitEvent evt)
    throws IOException, InterruptedException {
  ApplicationId applicationId = evt.getApplicationId();
  Credentials ts = evt.getCredentials();
  boolean shouldCancelAtEnd = evt.shouldCancelAtEnd();
  if (ts == null) {
    return; // nothing to add
  }

  if (LOG.isDebugEnabled()) {
    LOG.debug("Registering tokens for renewal for:" +
        " appId = " + applicationId);
  }

  Collection<Token<?>> tokens = ts.getAllTokens();
  long now = System.currentTimeMillis();

  // find tokens for renewal, but don't add timers until we know
  // all renewable tokens are valid
  // At RM restart it is safe to assume that all the previously added tokens
  // are valid
  appTokens.put(applicationId,
    Collections.synchronizedSet(new HashSet<DelegationTokenToRenew>()));
  Set<DelegationTokenToRenew> tokenList = new HashSet<DelegationTokenToRenew>();
  boolean hasHdfsToken = false;
  for (Token<?> token : tokens) {
    if (token.isManaged()) {
      if (token.getKind().equals(new Text("HDFS_DELEGATION_TOKEN"))) {
        LOG.info(applicationId + " found existing hdfs token " + token);
        hasHdfsToken = true;
      }

      DelegationTokenToRenew dttr = allTokens.get(token);
      if (dttr == null) {
        dttr = new DelegationTokenToRenew(Arrays.asList(applicationId), token,
            getConfig(), now, shouldCancelAtEnd, evt.getUser());
        try {
          renewToken(dttr);
        } catch (IOException ioe) {
          throw new IOException("Failed to renew token: " + dttr.token, ioe);
        }
      }
      tokenList.add(dttr);
    }
  }

  if (!tokenList.isEmpty()) {
    // Renewing token and adding it to timer calls are separated purposefully
    // If user provides incorrect token then it should not be added for
    // renewal.
    for (DelegationTokenToRenew dtr : tokenList) {
      DelegationTokenToRenew currentDtr =
          allTokens.putIfAbsent(dtr.token, dtr);
      if (currentDtr != null) {
        // another job beat us
        currentDtr.referringAppIds.add(applicationId);
        appTokens.get(applicationId).add(currentDtr);
      } else {
        appTokens.get(applicationId).add(dtr);
        setTimerForTokenRenewal(dtr);
      }
    }
  }

  if (!hasHdfsToken) {
    requestNewHdfsDelegationToken(Arrays.asList(applicationId), evt.getUser(),
      shouldCancelAtEnd);
  }
}
 
Example 16
Source File: HadoopSecurityManager_H_2_0.java    From azkaban-plugins with Apache License 2.0 4 votes vote down vote up
@Override
public void cancelTokens(File tokenFile, String userToProxy, Logger logger)
    throws HadoopSecurityManagerException {
  // nntoken
  Credentials cred = null;
  try {
    cred =
        Credentials.readTokenStorageFile(new Path(tokenFile.toURI()),
            new Configuration());
    for (Token<? extends TokenIdentifier> t : cred.getAllTokens()) {

      logger.info("Got token: " + t.toString());
      logger.info("Token kind: " + t.getKind());
      logger.info("Token id: " + new String(t.getIdentifier()));
      logger.info("Token service: " + t.getService());

      if (t.getKind().equals(new Text("HIVE_DELEGATION_TOKEN"))) {
        logger.info("Cancelling hive token " + new String(t.getIdentifier()));
        cancelHiveToken(t, userToProxy);
      } else if (t.getKind().equals(new Text("RM_DELEGATION_TOKEN"))) {
        logger.info("Cancelling mr job tracker token "
            + new String(t.getIdentifier()));
        // cancelMRJobTrackerToken(t, userToProxy);
      } else if (t.getKind().equals(new Text("HDFS_DELEGATION_TOKEN"))) {
        logger.info("Cancelling namenode token "
            + new String(t.getIdentifier()));
        // cancelNameNodeToken(t, userToProxy);
      } else if (t.getKind().equals(new Text("MR_DELEGATION_TOKEN"))) {
        logger.info("Cancelling jobhistoryserver mr token "
            + new String(t.getIdentifier()));
        // cancelJhsToken(t, userToProxy);
      } else {
        logger.info("unknown token type " + t.getKind());
      }
    }
  } catch (Exception e) {
    throw new HadoopSecurityManagerException("Failed to cancel tokens "
        + e.getMessage() + e.getCause(), e);
  }

}
 
Example 17
Source File: TestTaskAttemptContainerRequest.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testAttemptContainerRequest() throws Exception {
  final Text SECRET_KEY_ALIAS = new Text("secretkeyalias");
  final byte[] SECRET_KEY = ("secretkey").getBytes();
  Map<ApplicationAccessType, String> acls =
      new HashMap<ApplicationAccessType, String>(1);
  acls.put(ApplicationAccessType.VIEW_APP, "otheruser");
  ApplicationId appId = ApplicationId.newInstance(1, 1);
  JobId jobId = MRBuilderUtils.newJobId(appId, 1);
  TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
  Path jobFile = mock(Path.class);

  EventHandler eventHandler = mock(EventHandler.class);
  TaskAttemptListener taListener = mock(TaskAttemptListener.class);
  when(taListener.getAddress()).thenReturn(new InetSocketAddress("localhost", 0));

  JobConf jobConf = new JobConf();
  jobConf.setClass("fs.file.impl", StubbedFS.class, FileSystem.class);
  jobConf.setBoolean("fs.file.impl.disable.cache", true);
  jobConf.set(JobConf.MAPRED_MAP_TASK_ENV, "");

  // setup UGI for security so tokens and keys are preserved
  jobConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
  UserGroupInformation.setConfiguration(jobConf);

  Credentials credentials = new Credentials();
  credentials.addSecretKey(SECRET_KEY_ALIAS, SECRET_KEY);
  Token<JobTokenIdentifier> jobToken = new Token<JobTokenIdentifier>(
      ("tokenid").getBytes(), ("tokenpw").getBytes(),
      new Text("tokenkind"), new Text("tokenservice"));

  TaskAttemptImpl taImpl =
      new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
          mock(TaskSplitMetaInfo.class), jobConf, taListener,
          jobToken, credentials,
          new SystemClock(), null);

  jobConf.set(MRJobConfig.APPLICATION_ATTEMPT_ID, taImpl.getID().toString());

  ContainerLaunchContext launchCtx =
      TaskAttemptImpl.createContainerLaunchContext(acls,
          jobConf, jobToken, taImpl.createRemoteTask(),
          TypeConverter.fromYarn(jobId),
          mock(WrappedJvmID.class), taListener,
          credentials);

  Assert.assertEquals("ACLs mismatch", acls, launchCtx.getApplicationACLs());
  Credentials launchCredentials = new Credentials();

  DataInputByteBuffer dibb = new DataInputByteBuffer();
  dibb.reset(launchCtx.getTokens());
  launchCredentials.readTokenStorageStream(dibb);

  // verify all tokens specified for the task attempt are in the launch context
  for (Token<? extends TokenIdentifier> token : credentials.getAllTokens()) {
    Token<? extends TokenIdentifier> launchToken =
        launchCredentials.getToken(token.getService());
    Assert.assertNotNull("Token " + token.getService() + " is missing",
        launchToken);
    Assert.assertEquals("Token " + token.getService() + " mismatch",
        token, launchToken);
  }

  // verify the secret key is in the launch context
  Assert.assertNotNull("Secret key missing",
      launchCredentials.getSecretKey(SECRET_KEY_ALIAS));
  Assert.assertTrue("Secret key mismatch", Arrays.equals(SECRET_KEY,
      launchCredentials.getSecretKey(SECRET_KEY_ALIAS)));
}
 
Example 18
Source File: DelegationTokenRenewer.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private void handleAppSubmitEvent(DelegationTokenRenewerAppSubmitEvent evt)
    throws IOException, InterruptedException {
  ApplicationId applicationId = evt.getApplicationId();
  Credentials ts = evt.getCredentials();
  boolean shouldCancelAtEnd = evt.shouldCancelAtEnd();
  if (ts == null) {
    return; // nothing to add
  }

  if (LOG.isDebugEnabled()) {
    LOG.debug("Registering tokens for renewal for:" +
        " appId = " + applicationId);
  }

  Collection<Token<?>> tokens = ts.getAllTokens();
  long now = System.currentTimeMillis();

  // find tokens for renewal, but don't add timers until we know
  // all renewable tokens are valid
  // At RM restart it is safe to assume that all the previously added tokens
  // are valid
  appTokens.put(applicationId,
    Collections.synchronizedSet(new HashSet<DelegationTokenToRenew>()));
  Set<DelegationTokenToRenew> tokenList = new HashSet<DelegationTokenToRenew>();
  boolean hasHdfsToken = false;
  for (Token<?> token : tokens) {
    if (token.isManaged()) {
      if (token.getKind().equals(new Text("HDFS_DELEGATION_TOKEN"))) {
        LOG.info(applicationId + " found existing hdfs token " + token);
        hasHdfsToken = true;
      }

      DelegationTokenToRenew dttr = allTokens.get(token);
      if (dttr == null) {
        dttr = new DelegationTokenToRenew(Arrays.asList(applicationId), token,
            getConfig(), now, shouldCancelAtEnd, evt.getUser());
        try {
          renewToken(dttr);
        } catch (IOException ioe) {
          throw new IOException("Failed to renew token: " + dttr.token, ioe);
        }
      }
      tokenList.add(dttr);
    }
  }

  if (!tokenList.isEmpty()) {
    // Renewing token and adding it to timer calls are separated purposefully
    // If user provides incorrect token then it should not be added for
    // renewal.
    for (DelegationTokenToRenew dtr : tokenList) {
      DelegationTokenToRenew currentDtr =
          allTokens.putIfAbsent(dtr.token, dtr);
      if (currentDtr != null) {
        // another job beat us
        currentDtr.referringAppIds.add(applicationId);
        appTokens.get(applicationId).add(currentDtr);
      } else {
        appTokens.get(applicationId).add(dtr);
        setTimerForTokenRenewal(dtr);
      }
    }
  }

  if (!hasHdfsToken) {
    requestNewHdfsDelegationToken(Arrays.asList(applicationId), evt.getUser(),
      shouldCancelAtEnd);
  }
}
 
Example 19
Source File: TezChild.java    From tez with Apache License 2.0 4 votes vote down vote up
public TezChild(Configuration conf, String host, int port, String containerIdentifier,
    String tokenIdentifier, int appAttemptNumber, String workingDir, String[] localDirs,
    Map<String, String> serviceProviderEnvMap,
    ObjectRegistryImpl objectRegistry, String pid,
    ExecutionContext executionContext,
    Credentials credentials, long memAvailable, String user, TezTaskUmbilicalProtocol umbilical,
    boolean updateSysCounters, HadoopShim hadoopShim) throws IOException, InterruptedException {
  this.defaultConf = conf;
  this.containerIdString = containerIdentifier;
  this.appAttemptNumber = appAttemptNumber;
  this.localDirs = localDirs;
  this.serviceProviderEnvMap = serviceProviderEnvMap;
  this.workingDir = workingDir;
  this.pid = pid;
  this.executionContext = executionContext;
  this.credentials = credentials;
  this.memAvailable = memAvailable;
  this.user = user;
  this.updateSysCounters = updateSysCounters;
  this.hadoopShim = hadoopShim;
  this.sharedExecutor = new TezSharedExecutor(defaultConf);

  getTaskMaxSleepTime = defaultConf.getInt(
      TezConfiguration.TEZ_TASK_GET_TASK_SLEEP_INTERVAL_MS_MAX,
      TezConfiguration.TEZ_TASK_GET_TASK_SLEEP_INTERVAL_MS_MAX_DEFAULT);

  amHeartbeatInterval = defaultConf.getInt(TezConfiguration.TEZ_TASK_AM_HEARTBEAT_INTERVAL_MS,
      TezConfiguration.TEZ_TASK_AM_HEARTBEAT_INTERVAL_MS_DEFAULT);

  sendCounterInterval = defaultConf.getLong(
      TezConfiguration.TEZ_TASK_AM_HEARTBEAT_COUNTER_INTERVAL_MS,
      TezConfiguration.TEZ_TASK_AM_HEARTBEAT_COUNTER_INTERVAL_MS_DEFAULT);

  maxEventsToGet = defaultConf.getInt(TezConfiguration.TEZ_TASK_MAX_EVENTS_PER_HEARTBEAT,
      TezConfiguration.TEZ_TASK_MAX_EVENTS_PER_HEARTBEAT_DEFAULT);

  ExecutorService executor = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder()
      .setDaemon(true).setNameFormat("TezChild").build());
  this.executor = MoreExecutors.listeningDecorator(executor);

  this.objectRegistry = objectRegistry;


  if (LOG.isDebugEnabled()) {
    LOG.debug("Executing with tokens:");
    for (Token<?> token : credentials.getAllTokens()) {
      LOG.debug("",token);
    }
  }

  UserGroupInformation taskOwner = UserGroupInformation.createRemoteUser(tokenIdentifier);
  Token<JobTokenIdentifier> jobToken = TokenCache.getSessionToken(credentials);

  String auxiliaryService = defaultConf.get(TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID,
      TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID_DEFAULT);
  serviceConsumerMetadata.put(auxiliaryService,
      TezCommonUtils.convertJobTokenToBytes(jobToken));

  if (umbilical == null) {
    final InetSocketAddress address = NetUtils.createSocketAddrForHost(host, port);
    SecurityUtil.setTokenService(jobToken, address);
    taskOwner.addToken(jobToken);
    this.umbilical = taskOwner.doAs(new PrivilegedExceptionAction<TezTaskUmbilicalProtocol>() {
      @Override
      public TezTaskUmbilicalProtocol run() throws Exception {
        return RPC.getProxy(TezTaskUmbilicalProtocol.class,
            TezTaskUmbilicalProtocol.versionID, address, defaultConf);
      }
    });
    ownUmbilical = true;
  } else {
    this.umbilical = umbilical;
    ownUmbilical = false;
  }
  TezCommonUtils.logCredentials(LOG, credentials, "tezChildInit");
}
 
Example 20
Source File: UtilsTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testCreateTaskExecutorCredentials() throws Exception {
	File root = temporaryFolder.getRoot();
	File home = new File(root, "home");
	boolean created = home.mkdir();
	assertTrue(created);

	Configuration flinkConf = new Configuration();
	YarnConfiguration yarnConf = new YarnConfiguration();

	Map<String, String> env = new HashMap<>();
	env.put(YarnConfigKeys.ENV_APP_ID, "foo");
	env.put(YarnConfigKeys.ENV_CLIENT_HOME_DIR, home.getAbsolutePath());
	env.put(YarnConfigKeys.ENV_CLIENT_SHIP_FILES, "");
	env.put(YarnConfigKeys.ENV_FLINK_CLASSPATH, "");
	env.put(YarnConfigKeys.ENV_HADOOP_USER_NAME, "foo");
	env.put(YarnConfigKeys.FLINK_JAR_PATH, root.toURI().toString());
	env = Collections.unmodifiableMap(env);

	File credentialFile = temporaryFolder.newFile("container_tokens");
	final Text amRmTokenKind = AMRMTokenIdentifier.KIND_NAME;
	final Text hdfsDelegationTokenKind = new Text("HDFS_DELEGATION_TOKEN");
	final Text service = new Text("test-service");
	Credentials amCredentials = new Credentials();
	amCredentials.addToken(amRmTokenKind, new Token<>(new byte[4], new byte[4], amRmTokenKind, service));
	amCredentials.addToken(hdfsDelegationTokenKind, new Token<>(new byte[4], new byte[4],
		hdfsDelegationTokenKind, service));
	amCredentials.writeTokenStorageFile(new org.apache.hadoop.fs.Path(credentialFile.getAbsolutePath()), yarnConf);

	ContaineredTaskManagerParameters tmParams = new ContaineredTaskManagerParameters(64,
		64, 16, 1, new HashMap<>(1));
	Configuration taskManagerConf = new Configuration();

	String workingDirectory = root.getAbsolutePath();
	Class<?> taskManagerMainClass = YarnTaskExecutorRunner.class;
	ContainerLaunchContext ctx;

	final Map<String, String> originalEnv = System.getenv();
	try {
		Map<String, String> systemEnv = new HashMap<>(originalEnv);
		systemEnv.put("HADOOP_TOKEN_FILE_LOCATION", credentialFile.getAbsolutePath());
		CommonTestUtils.setEnv(systemEnv);
		ctx = Utils.createTaskExecutorContext(flinkConf, yarnConf, env, tmParams,
			taskManagerConf, workingDirectory, taskManagerMainClass, LOG);
	} finally {
		CommonTestUtils.setEnv(originalEnv);
	}

	Credentials credentials = new Credentials();
	try (DataInputStream dis = new DataInputStream(new ByteArrayInputStream(ctx.getTokens().array()))) {
		credentials.readTokenStorageStream(dis);
	}
	Collection<Token<? extends TokenIdentifier>> tokens = credentials.getAllTokens();
	boolean hasHdfsDelegationToken = false;
	boolean hasAmRmToken = false;
	for (Token<? extends TokenIdentifier> token : tokens) {
		if (token.getKind().equals(amRmTokenKind)) {
			hasAmRmToken = true;
		} else if (token.getKind().equals(hdfsDelegationTokenKind)) {
			hasHdfsDelegationToken = true;
		}
	}
	assertTrue(hasHdfsDelegationToken);
	assertFalse(hasAmRmToken);
}