Java Code Examples for org.apache.hadoop.security.token.Token#getPassword()

The following examples show how to use org.apache.hadoop.security.token.Token#getPassword() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HadoopUser.java    From elasticsearch-hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public EsToken getEsToken(String clusterName) {
    // An unset cluster name - Wouldn't have a token for it.
    if (clusterName == null || clusterName.equals("") || clusterName.equals(ClusterName.UNNAMED_CLUSTER_NAME)) {
        return null;
    }
    for (Token<? extends TokenIdentifier> token : ugi.getTokens()) {
        if (EsTokenIdentifier.KIND_NAME.equals(token.getKind()) && clusterName.equals(token.getService().toString())) {
            try {
                return new EsToken(new DataInputStream(new ByteArrayInputStream(token.getPassword())));
            } catch (IOException e) {
                throw new EsHadoopSerializationException("Could not read token information from UGI", e);
            }
        }
    }
    return null; // Token not found
}
 
Example 2
Source File: OzoneDelegationTokenSecretManager.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
/**
 * Add delegation token in to in-memory map of tokens.
 * @param token
 * @param ozoneTokenIdentifier
 * @return renewTime - If updated successfully, return renewTime.
 */
public long updateToken(Token<OzoneTokenIdentifier> token,
    OzoneTokenIdentifier ozoneTokenIdentifier, long tokenRenewInterval) {
  long renewTime = ozoneTokenIdentifier.getIssueDate() + tokenRenewInterval;
  TokenInfo tokenInfo = new TokenInfo(renewTime, token.getPassword(),
      ozoneTokenIdentifier.getTrackingId());
  currentTokens.put(ozoneTokenIdentifier, tokenInfo);
  return renewTime;
}
 
Example 3
Source File: OzoneDelegationTokenSecretManager.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
public void updateRenewToken(Token<OzoneTokenIdentifier> token,
    OzoneTokenIdentifier ozoneTokenIdentifier, long expiryTime) {
  //TODO: Instead of having in-memory map inside this class, we can use
  // cache from table and make this table cache clean up policy NEVER. In
  // this way, we don't need to maintain seperate in-memory map. To do this
  // work we need to merge HA/Non-HA code.
  TokenInfo tokenInfo = new TokenInfo(expiryTime, token.getPassword(),
      ozoneTokenIdentifier.getTrackingId());
  currentTokens.put(ozoneTokenIdentifier, tokenInfo);
}
 
Example 4
Source File: EsTokenIdentifier.java    From elasticsearch-hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public long renew(Token<?> token, Configuration conf) throws IOException, InterruptedException {
    if (!KIND_NAME.equals(token.getKind())) {
        throw new IOException("Could not renew token of invalid type [" + token.getKind().toString() + "]");
    }
    EsToken esToken = new EsToken(new DataInputStream(new ByteArrayInputStream(token.getPassword())));
    return esToken.getExpirationTime();
}
 
Example 5
Source File: EsTokenIdentifier.java    From elasticsearch-hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public void cancel(Token<?> token, Configuration conf) throws IOException, InterruptedException {
    if (!KIND_NAME.equals(token.getKind())) {
        throw new IOException("Could not renew token of invalid type [" + token.getKind().toString() + "]");
    }
    EsToken esToken = new EsToken(new DataInputStream(new ByteArrayInputStream(token.getPassword())));
    Settings settings = HadoopSettingsManager.loadFrom(conf);
    // Create a composite settings object so we can make some changes to the settings without affecting the underlying config
    CompositeSettings compositeSettings = new CompositeSettings(Collections.singletonList(settings));
    // Extract the cluster name from the esToken so that the rest client can locate it for auth purposes
    ClusterInfo info = new ClusterInfo(new ClusterName(esToken.getClusterName(), null), esToken.getMajorVersion());
    compositeSettings.setInternalClusterInfo(info);

    // The RestClient gets the es token for authentication from the current subject, but the subject running this code
    // could be ANYONE. We don't want to just give anyone the token in their credentials, so we create a throw away
    // subject and set it on there. That way we auth with the API key, and once the auth is done, it will be cancelled.
    // We'll do this with the JDK user to avoid the whole Hadoop Library's weird obsession with a static global user subject.
    InitializationUtils.setUserProviderIfNotSet(compositeSettings, JdkUserProvider.class, new NoOpLog());
    Subject subject = new Subject();
    JdkUser user = new JdkUser(subject, settings);
    user.addEsToken(esToken);
    user.doAs(new PrivilegedAction<Void>() {
        @Override
        public Void run() {
            RestClient client = null;
            try {
                // TODO: Does not support multiple clusters yet
                // the client will need to point to the cluster that this token is associated with in order to cancel it.
                client = createClient(compositeSettings);
                client.cancelToken(esToken);
            } finally {
                if (client != null) {
                    client.close();
                }
            }
            return null;
        }
    });
}
 
Example 6
Source File: TestSecureOzoneCluster.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
/**
 * Tests delegation token renewal.
 */
@Test
public void testDelegationTokenRenewal() throws Exception {
  GenericTestUtils
      .setLogLevel(LoggerFactory.getLogger(Server.class.getName()), INFO);
  LogCapturer omLogs = LogCapturer.captureLogs(OzoneManager.getLogger());

  // Setup secure OM for start.
  OzoneConfiguration newConf = new OzoneConfiguration(conf);
  int tokenMaxLifetime = 1000;
  newConf.setLong(DELEGATION_TOKEN_MAX_LIFETIME_KEY, tokenMaxLifetime);
  setupOm(newConf);
  long omVersion =
      RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
  OzoneManager.setTestSecureOmFlag(true);
  // Start OM

  try {
    om.setCertClient(new CertificateClientTestImpl(conf));
    om.start();

    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();

    // Get first OM client which will authenticate via Kerberos
    omClient = new OzoneManagerProtocolClientSideTranslatorPB(
        OmTransportFactory.create(conf, ugi, null),
        RandomStringUtils.randomAscii(5));

    // Since client is already connected get a delegation token
    Token<OzoneTokenIdentifier> token = omClient.getDelegationToken(
        new Text("om"));

    // Check if token is of right kind and renewer is running om instance
    assertNotNull(token);
    assertEquals("OzoneToken", token.getKind().toString());
    assertEquals(OmUtils.getOmRpcAddress(conf),
        token.getService().toString());

    // Renew delegation token
    long expiryTime = omClient.renewDelegationToken(token);
    assertTrue(expiryTime > 0);
    omLogs.clearOutput();

    // Test failure of delegation renewal
    // 1. When token maxExpiryTime exceeds
    Thread.sleep(tokenMaxLifetime);
    OMException ex = LambdaTestUtils.intercept(OMException.class,
        "TOKEN_EXPIRED",
        () -> omClient.renewDelegationToken(token));
    assertEquals(TOKEN_EXPIRED, ex.getResult());
    omLogs.clearOutput();

    // 2. When renewer doesn't match (implicitly covers when renewer is
    // null or empty )
    Token<OzoneTokenIdentifier> token2 = omClient.getDelegationToken(
        new Text("randomService"));
    assertNotNull(token2);
    LambdaTestUtils.intercept(OMException.class,
        "Delegation token renewal failed",
        () -> omClient.renewDelegationToken(token2));
    assertTrue(omLogs.getOutput().contains(" with non-matching " +
        "renewer randomService"));
    omLogs.clearOutput();

    // 3. Test tampered token
    OzoneTokenIdentifier tokenId = OzoneTokenIdentifier.readProtoBuf(
        token.getIdentifier());
    tokenId.setRenewer(new Text("om"));
    tokenId.setMaxDate(System.currentTimeMillis() * 2);
    Token<OzoneTokenIdentifier> tamperedToken = new Token<>(
        tokenId.getBytes(), token2.getPassword(), token2.getKind(),
        token2.getService());
    LambdaTestUtils.intercept(OMException.class,
        "Delegation token renewal failed",
        () -> omClient.renewDelegationToken(tamperedToken));
    assertTrue(omLogs.getOutput().contains("can't be found in " +
        "cache"));
    omLogs.clearOutput();

  } finally {
    om.stop();
    om.join();
  }
}
 
Example 7
Source File: TestClientToAMTokens.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private void verifyTamperedToken(final Configuration conf, final CustomAM am,
    Token<ClientToAMTokenIdentifier> token, UserGroupInformation ugi,
    ClientToAMTokenIdentifier maliciousID) {
  Token<ClientToAMTokenIdentifier> maliciousToken =
      new Token<ClientToAMTokenIdentifier>(maliciousID.getBytes(),
        token.getPassword(), token.getKind(),
        token.getService());
  ugi.addToken(maliciousToken);

  try {
    ugi.doAs(new PrivilegedExceptionAction<Void>()  {
      @Override
      public Void run() throws Exception {
        try {
          CustomProtocol client =
              (CustomProtocol) RPC.getProxy(CustomProtocol.class, 1L,
                am.address, conf);
          client.ping();
          fail("Connection initiation with illegally modified "
              + "tokens is expected to fail.");
          return null;
        } catch (YarnException ex) {
          fail("Cannot get a YARN remote exception as "
              + "it will indicate RPC success");
          throw ex;
        }
      }
    });
  } catch (Exception e) {
    Assert.assertEquals(RemoteException.class.getName(), e.getClass()
        .getName());
    e = ((RemoteException)e).unwrapRemoteException();
    Assert
      .assertEquals(SaslException.class
        .getCanonicalName(), e.getClass().getCanonicalName());
    Assert.assertTrue(e
      .getMessage()
      .contains(
        "DIGEST-MD5: digest response format violation. "
            + "Mismatched response."));
    Assert.assertFalse(am.pinged);
  }
}
 
Example 8
Source File: YarnChild.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private static void configureTask(JobConf job, Task task,
    Credentials credentials, Token<JobTokenIdentifier> jt) throws IOException {
  job.setCredentials(credentials);
  
  ApplicationAttemptId appAttemptId =
      ConverterUtils.toContainerId(
          System.getenv(Environment.CONTAINER_ID.name()))
          .getApplicationAttemptId();
  LOG.debug("APPLICATION_ATTEMPT_ID: " + appAttemptId);
  // Set it in conf, so as to be able to be used the the OutputCommitter.
  job.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID,
      appAttemptId.getAttemptId());

  // set tcp nodelay
  job.setBoolean("ipc.client.tcpnodelay", true);
  job.setClass(MRConfig.TASK_LOCAL_OUTPUT_CLASS,
      YarnOutputFiles.class, MapOutputFile.class);
  // set the jobToken and shuffle secrets into task
  task.setJobTokenSecret(
      JobTokenSecretManager.createSecretKey(jt.getPassword()));
  byte[] shuffleSecret = TokenCache.getShuffleSecretKey(credentials);
  if (shuffleSecret == null) {
    LOG.warn("Shuffle secret missing from task credentials."
        + " Using job token secret as shuffle secret.");
    shuffleSecret = jt.getPassword();
  }
  task.setShuffleSecret(
      JobTokenSecretManager.createSecretKey(shuffleSecret));

  // setup the child's MRConfig.LOCAL_DIR.
  configureLocalDirs(task, job);

  // setup the child's attempt directories
  // Do the task-type specific localization
  task.localizeConfiguration(job);

  // Set up the DistributedCache related configs
  MRApps.setupDistributedCacheLocal(job);

  // Overwrite the localized task jobconf which is linked to in the current
  // work-dir.
  Path localTaskFile = new Path(MRJobConfig.JOB_CONF_FILE);
  writeLocalJobFile(localTaskFile, job);
  task.setJobFile(localTaskFile.toString());
  task.setConf(job);
}
 
Example 9
Source File: Application.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Start the child process to handle the task for us.
 * @param conf the task's configuration
 * @param recordReader the fake record reader to update progress with
 * @param output the collector to send output to
 * @param reporter the reporter for the task
 * @param outputKeyClass the class of the output keys
 * @param outputValueClass the class of the output values
 * @throws IOException
 * @throws InterruptedException
 */
Application(JobConf conf, 
            RecordReader<FloatWritable, NullWritable> recordReader, 
            OutputCollector<K2,V2> output, Reporter reporter,
            Class<? extends K2> outputKeyClass,
            Class<? extends V2> outputValueClass
            ) throws IOException, InterruptedException {
  serverSocket = new ServerSocket(0);
  Map<String, String> env = new HashMap<String,String>();
  // add TMPDIR environment variable with the value of java.io.tmpdir
  env.put("TMPDIR", System.getProperty("java.io.tmpdir"));
  env.put(Submitter.PORT, 
          Integer.toString(serverSocket.getLocalPort()));
  
  //Add token to the environment if security is enabled
  Token<JobTokenIdentifier> jobToken = TokenCache.getJobToken(conf
      .getCredentials());
  // This password is used as shared secret key between this application and
  // child pipes process
  byte[]  password = jobToken.getPassword();
  String localPasswordFile = new File(".") + Path.SEPARATOR
      + "jobTokenPassword";
  writePasswordToLocalFile(localPasswordFile, password, conf);
  env.put("hadoop.pipes.shared.secret.location", localPasswordFile);
 
  List<String> cmd = new ArrayList<String>();
  String interpretor = conf.get(Submitter.INTERPRETOR);
  if (interpretor != null) {
    cmd.add(interpretor);
  }
  String executable = DistributedCache.getLocalCacheFiles(conf)[0].toString();
  if (!FileUtil.canExecute(new File(executable))) {
    // LinuxTaskController sets +x permissions on all distcache files already.
    // In case of DefaultTaskController, set permissions here.
    FileUtil.chmod(executable, "u+x");
  }
  cmd.add(executable);
  // wrap the command in a stdout/stderr capture
  // we are starting map/reduce task of the pipes job. this is not a cleanup
  // attempt. 
  TaskAttemptID taskid = 
    TaskAttemptID.forName(conf.get(MRJobConfig.TASK_ATTEMPT_ID));
  File stdout = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDOUT);
  File stderr = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDERR);
  long logLength = TaskLog.getTaskLogLength(conf);
  cmd = TaskLog.captureOutAndError(null, cmd, stdout, stderr, logLength,
                                   false);
  
  process = runClient(cmd, env);
  clientSocket = serverSocket.accept();
  
  String challenge = getSecurityChallenge();
  String digestToSend = createDigest(password, challenge);
  String digestExpected = createDigest(password, digestToSend);
  
  handler = new OutputHandler<K2, V2>(output, reporter, recordReader, 
      digestExpected);
  K2 outputKey = (K2)
    ReflectionUtils.newInstance(outputKeyClass, conf);
  V2 outputValue = (V2) 
    ReflectionUtils.newInstance(outputValueClass, conf);
  downlink = new BinaryProtocol<K1, V1, K2, V2>(clientSocket, handler, 
                                outputKey, outputValue, conf);
  
  downlink.authenticate(digestToSend, challenge);
  waitForAuthentication();
  LOG.debug("Authentication succeeded");
  downlink.start();
  downlink.setJobConf(conf);
}
 
Example 10
Source File: TestDelegationToken.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test(timeout = 10000)
public void testRollMasterKey() throws Exception {
  TestDelegationTokenSecretManager dtSecretManager = 
    new TestDelegationTokenSecretManager(800,
      800,1*1000,3600000);
  try {
    dtSecretManager.startThreads();
    //generate a token and store the password
    Token<TestDelegationTokenIdentifier> token = generateDelegationToken(
        dtSecretManager, "SomeUser", "JobTracker");
    byte[] oldPasswd = token.getPassword();
    //store the length of the keys list
    int prevNumKeys = dtSecretManager.getAllKeys().length;
    
    dtSecretManager.rollMasterKey();
    Assert.assertTrue(dtSecretManager.isStoreNewMasterKeyCalled);

    //after rolling, the length of the keys list must increase
    int currNumKeys = dtSecretManager.getAllKeys().length;
    Assert.assertEquals((currNumKeys - prevNumKeys) >= 1, true);
    
    //after rolling, the token that was generated earlier must
    //still be valid (retrievePassword will fail if the token
    //is not valid)
    ByteArrayInputStream bi = 
      new ByteArrayInputStream(token.getIdentifier());
    TestDelegationTokenIdentifier identifier = 
      dtSecretManager.createIdentifier();
    identifier.readFields(new DataInputStream(bi));
    byte[] newPasswd = 
      dtSecretManager.retrievePassword(identifier);
    //compare the passwords
    Assert.assertEquals(oldPasswd, newPasswd);
    // wait for keys to expire
    while(!dtSecretManager.isRemoveStoredMasterKeyCalled) {
      Thread.sleep(200);
    }
  } finally {
    dtSecretManager.stopThreads();
  }
}
 
Example 11
Source File: TestClientToAMTokens.java    From big-c with Apache License 2.0 4 votes vote down vote up
private void verifyTamperedToken(final Configuration conf, final CustomAM am,
    Token<ClientToAMTokenIdentifier> token, UserGroupInformation ugi,
    ClientToAMTokenIdentifier maliciousID) {
  Token<ClientToAMTokenIdentifier> maliciousToken =
      new Token<ClientToAMTokenIdentifier>(maliciousID.getBytes(),
        token.getPassword(), token.getKind(),
        token.getService());
  ugi.addToken(maliciousToken);

  try {
    ugi.doAs(new PrivilegedExceptionAction<Void>()  {
      @Override
      public Void run() throws Exception {
        try {
          CustomProtocol client =
              (CustomProtocol) RPC.getProxy(CustomProtocol.class, 1L,
                am.address, conf);
          client.ping();
          fail("Connection initiation with illegally modified "
              + "tokens is expected to fail.");
          return null;
        } catch (YarnException ex) {
          fail("Cannot get a YARN remote exception as "
              + "it will indicate RPC success");
          throw ex;
        }
      }
    });
  } catch (Exception e) {
    Assert.assertEquals(RemoteException.class.getName(), e.getClass()
        .getName());
    e = ((RemoteException)e).unwrapRemoteException();
    Assert
      .assertEquals(SaslException.class
        .getCanonicalName(), e.getClass().getCanonicalName());
    Assert.assertTrue(e
      .getMessage()
      .contains(
        "DIGEST-MD5: digest response format violation. "
            + "Mismatched response."));
    Assert.assertFalse(am.pinged);
  }
}
 
Example 12
Source File: YarnChild.java    From big-c with Apache License 2.0 4 votes vote down vote up
private static void configureTask(JobConf job, Task task,
    Credentials credentials, Token<JobTokenIdentifier> jt) throws IOException {
  job.setCredentials(credentials);
  
  ApplicationAttemptId appAttemptId =
      ConverterUtils.toContainerId(
          System.getenv(Environment.CONTAINER_ID.name()))
          .getApplicationAttemptId();
  LOG.debug("APPLICATION_ATTEMPT_ID: " + appAttemptId);
  // Set it in conf, so as to be able to be used the the OutputCommitter.
  job.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID,
      appAttemptId.getAttemptId());

  // set tcp nodelay
  job.setBoolean("ipc.client.tcpnodelay", true);
  job.setClass(MRConfig.TASK_LOCAL_OUTPUT_CLASS,
      YarnOutputFiles.class, MapOutputFile.class);
  // set the jobToken and shuffle secrets into task
  task.setJobTokenSecret(
      JobTokenSecretManager.createSecretKey(jt.getPassword()));
  byte[] shuffleSecret = TokenCache.getShuffleSecretKey(credentials);
  if (shuffleSecret == null) {
    LOG.warn("Shuffle secret missing from task credentials."
        + " Using job token secret as shuffle secret.");
    shuffleSecret = jt.getPassword();
  }
  task.setShuffleSecret(
      JobTokenSecretManager.createSecretKey(shuffleSecret));

  // setup the child's MRConfig.LOCAL_DIR.
  configureLocalDirs(task, job);

  // setup the child's attempt directories
  // Do the task-type specific localization
  task.localizeConfiguration(job);

  // Set up the DistributedCache related configs
  MRApps.setupDistributedCacheLocal(job);

  // Overwrite the localized task jobconf which is linked to in the current
  // work-dir.
  Path localTaskFile = new Path(MRJobConfig.JOB_CONF_FILE);
  writeLocalJobFile(localTaskFile, job);
  task.setJobFile(localTaskFile.toString());
  task.setConf(job);
}
 
Example 13
Source File: Application.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Start the child process to handle the task for us.
 * @param conf the task's configuration
 * @param recordReader the fake record reader to update progress with
 * @param output the collector to send output to
 * @param reporter the reporter for the task
 * @param outputKeyClass the class of the output keys
 * @param outputValueClass the class of the output values
 * @throws IOException
 * @throws InterruptedException
 */
Application(JobConf conf, 
            RecordReader<FloatWritable, NullWritable> recordReader, 
            OutputCollector<K2,V2> output, Reporter reporter,
            Class<? extends K2> outputKeyClass,
            Class<? extends V2> outputValueClass
            ) throws IOException, InterruptedException {
  serverSocket = new ServerSocket(0);
  Map<String, String> env = new HashMap<String,String>();
  // add TMPDIR environment variable with the value of java.io.tmpdir
  env.put("TMPDIR", System.getProperty("java.io.tmpdir"));
  env.put(Submitter.PORT, 
          Integer.toString(serverSocket.getLocalPort()));
  
  //Add token to the environment if security is enabled
  Token<JobTokenIdentifier> jobToken = TokenCache.getJobToken(conf
      .getCredentials());
  // This password is used as shared secret key between this application and
  // child pipes process
  byte[]  password = jobToken.getPassword();
  String localPasswordFile = new File(".") + Path.SEPARATOR
      + "jobTokenPassword";
  writePasswordToLocalFile(localPasswordFile, password, conf);
  env.put("hadoop.pipes.shared.secret.location", localPasswordFile);
 
  List<String> cmd = new ArrayList<String>();
  String interpretor = conf.get(Submitter.INTERPRETOR);
  if (interpretor != null) {
    cmd.add(interpretor);
  }
  String executable = DistributedCache.getLocalCacheFiles(conf)[0].toString();
  if (!FileUtil.canExecute(new File(executable))) {
    // LinuxTaskController sets +x permissions on all distcache files already.
    // In case of DefaultTaskController, set permissions here.
    FileUtil.chmod(executable, "u+x");
  }
  cmd.add(executable);
  // wrap the command in a stdout/stderr capture
  // we are starting map/reduce task of the pipes job. this is not a cleanup
  // attempt. 
  TaskAttemptID taskid = 
    TaskAttemptID.forName(conf.get(MRJobConfig.TASK_ATTEMPT_ID));
  File stdout = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDOUT);
  File stderr = TaskLog.getTaskLogFile(taskid, false, TaskLog.LogName.STDERR);
  long logLength = TaskLog.getTaskLogLength(conf);
  cmd = TaskLog.captureOutAndError(null, cmd, stdout, stderr, logLength,
                                   false);
  
  process = runClient(cmd, env);
  clientSocket = serverSocket.accept();
  
  String challenge = getSecurityChallenge();
  String digestToSend = createDigest(password, challenge);
  String digestExpected = createDigest(password, digestToSend);
  
  handler = new OutputHandler<K2, V2>(output, reporter, recordReader, 
      digestExpected);
  K2 outputKey = (K2)
    ReflectionUtils.newInstance(outputKeyClass, conf);
  V2 outputValue = (V2) 
    ReflectionUtils.newInstance(outputValueClass, conf);
  downlink = new BinaryProtocol<K1, V1, K2, V2>(clientSocket, handler, 
                                outputKey, outputValue, conf);
  
  downlink.authenticate(digestToSend, challenge);
  waitForAuthentication();
  LOG.debug("Authentication succeeded");
  downlink.start();
  downlink.setJobConf(conf);
}
 
Example 14
Source File: TestDelegationToken.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test(timeout = 10000)
public void testRollMasterKey() throws Exception {
  TestDelegationTokenSecretManager dtSecretManager = 
    new TestDelegationTokenSecretManager(800,
      800,1*1000,3600000);
  try {
    dtSecretManager.startThreads();
    //generate a token and store the password
    Token<TestDelegationTokenIdentifier> token = generateDelegationToken(
        dtSecretManager, "SomeUser", "JobTracker");
    byte[] oldPasswd = token.getPassword();
    //store the length of the keys list
    int prevNumKeys = dtSecretManager.getAllKeys().length;
    
    dtSecretManager.rollMasterKey();
    Assert.assertTrue(dtSecretManager.isStoreNewMasterKeyCalled);

    //after rolling, the length of the keys list must increase
    int currNumKeys = dtSecretManager.getAllKeys().length;
    Assert.assertEquals((currNumKeys - prevNumKeys) >= 1, true);
    
    //after rolling, the token that was generated earlier must
    //still be valid (retrievePassword will fail if the token
    //is not valid)
    ByteArrayInputStream bi = 
      new ByteArrayInputStream(token.getIdentifier());
    TestDelegationTokenIdentifier identifier = 
      dtSecretManager.createIdentifier();
    identifier.readFields(new DataInputStream(bi));
    byte[] newPasswd = 
      dtSecretManager.retrievePassword(identifier);
    //compare the passwords
    Assert.assertEquals(oldPasswd, newPasswd);
    // wait for keys to expire
    while(!dtSecretManager.isRemoveStoredMasterKeyCalled) {
      Thread.sleep(200);
    }
  } finally {
    dtSecretManager.stopThreads();
  }
}