Java Code Examples for org.apache.hadoop.security.token.Token#renew()

The following examples show how to use org.apache.hadoop.security.token.Token#renew() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DFSClient.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Renew a delegation token
 * @param token the token to renew
 * @return the new expiration time
 * @throws InvalidToken
 * @throws IOException
 * @deprecated Use Token.renew instead.
 */
@Deprecated
public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
    throws InvalidToken, IOException {
  LOG.info("Renewing " + DelegationTokenIdentifier.stringifyToken(token));
  try {
    return token.renew(conf);
  } catch (InterruptedException ie) {                                       
    throw new RuntimeException("caught interrupted", ie);
  } catch (RemoteException re) {
    throw re.unwrapRemoteException(InvalidToken.class,
                                   AccessControlException.class);
  }
}
 
Example 2
Source File: TestWebHDFSForHA.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testSecureHAToken() throws IOException, InterruptedException {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  conf.setBoolean(DFSConfigKeys
          .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

  MiniDFSCluster cluster = null;
  WebHdfsFileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
        .numDataNodes(0).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
    cluster.waitActive();

    fs = spy((WebHdfsFileSystem) FileSystem.get(WEBHDFS_URI, conf));
    FileSystemTestHelper.addFileSystemForTesting(WEBHDFS_URI, conf, fs);

    cluster.transitionToActive(0);
    Token<?> token = fs.getDelegationToken(null);

    cluster.shutdownNameNode(0);
    cluster.transitionToActive(1);
    token.renew(conf);
    token.cancel(conf);
    verify(fs).renewDelegationToken(token);
    verify(fs).cancelDelegationToken(token);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 3
Source File: TestDelegationTokensWithHA.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * HDFS-3062: DistributedFileSystem.getCanonicalServiceName() throws an
 * exception if the URI is a logical URI. This bug fails the combination of
 * ha + mapred + security.
 */
@Test(timeout = 300000)
public void testDFSGetCanonicalServiceName() throws Exception {
  URI hAUri = HATestUtil.getLogicalUri(cluster);
  String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri,
      HdfsConstants.HDFS_URI_SCHEME).toString();
  assertEquals(haService, dfs.getCanonicalServiceName());
  final String renewer = UserGroupInformation.getCurrentUser().getShortUserName();
  final Token<DelegationTokenIdentifier> token =
      getDelegationToken(dfs, renewer);
  assertEquals(haService, token.getService().toString());
  // make sure the logical uri is handled correctly
  token.renew(dfs.getConf());
  token.cancel(dfs.getConf());
}
 
Example 4
Source File: TestDelegationTokensWithHA.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 300000)
public void testHdfsGetCanonicalServiceName() throws Exception {
  Configuration conf = dfs.getConf();
  URI haUri = HATestUtil.getLogicalUri(cluster);
  AbstractFileSystem afs =  AbstractFileSystem.createFileSystem(haUri, conf);    
  String haService = HAUtil.buildTokenServiceForLogicalUri(haUri,
      HdfsConstants.HDFS_URI_SCHEME).toString();
  assertEquals(haService, afs.getCanonicalServiceName());
  Token<?> token = afs.getDelegationTokens(
      UserGroupInformation.getCurrentUser().getShortUserName()).get(0);
  assertEquals(haService, token.getService().toString());
  // make sure the logical uri is handled correctly
  token.renew(conf);
  token.cancel(conf);
}
 
Example 5
Source File: DFSClient.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Renew a delegation token
 * @param token the token to renew
 * @return the new expiration time
 * @throws InvalidToken
 * @throws IOException
 * @deprecated Use Token.renew instead.
 */
@Deprecated
public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
    throws InvalidToken, IOException {
  LOG.info("Renewing " + DelegationTokenIdentifier.stringifyToken(token));
  try {
    return token.renew(conf);
  } catch (InterruptedException ie) {                                       
    throw new RuntimeException("caught interrupted", ie);
  } catch (RemoteException re) {
    throw re.unwrapRemoteException(InvalidToken.class,
                                   AccessControlException.class);
  }
}
 
Example 6
Source File: TestWebHDFSForHA.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testSecureHAToken() throws IOException, InterruptedException {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  conf.setBoolean(DFSConfigKeys
          .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

  MiniDFSCluster cluster = null;
  WebHdfsFileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
        .numDataNodes(0).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
    cluster.waitActive();

    fs = spy((WebHdfsFileSystem) FileSystem.get(WEBHDFS_URI, conf));
    FileSystemTestHelper.addFileSystemForTesting(WEBHDFS_URI, conf, fs);

    cluster.transitionToActive(0);
    Token<?> token = fs.getDelegationToken(null);

    cluster.shutdownNameNode(0);
    cluster.transitionToActive(1);
    token.renew(conf);
    token.cancel(conf);
    verify(fs).renewDelegationToken(token);
    verify(fs).cancelDelegationToken(token);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 7
Source File: TestDelegationTokensWithHA.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * HDFS-3062: DistributedFileSystem.getCanonicalServiceName() throws an
 * exception if the URI is a logical URI. This bug fails the combination of
 * ha + mapred + security.
 */
@Test(timeout = 300000)
public void testDFSGetCanonicalServiceName() throws Exception {
  URI hAUri = HATestUtil.getLogicalUri(cluster);
  String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri,
      HdfsConstants.HDFS_URI_SCHEME).toString();
  assertEquals(haService, dfs.getCanonicalServiceName());
  final String renewer = UserGroupInformation.getCurrentUser().getShortUserName();
  final Token<DelegationTokenIdentifier> token =
      getDelegationToken(dfs, renewer);
  assertEquals(haService, token.getService().toString());
  // make sure the logical uri is handled correctly
  token.renew(dfs.getConf());
  token.cancel(dfs.getConf());
}
 
Example 8
Source File: TestDelegationTokensWithHA.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 300000)
public void testHdfsGetCanonicalServiceName() throws Exception {
  Configuration conf = dfs.getConf();
  URI haUri = HATestUtil.getLogicalUri(cluster);
  AbstractFileSystem afs =  AbstractFileSystem.createFileSystem(haUri, conf);    
  String haService = HAUtil.buildTokenServiceForLogicalUri(haUri,
      HdfsConstants.HDFS_URI_SCHEME).toString();
  assertEquals(haService, afs.getCanonicalServiceName());
  Token<?> token = afs.getDelegationTokens(
      UserGroupInformation.getCurrentUser().getShortUserName()).get(0);
  assertEquals(haService, token.getService().toString());
  // make sure the logical uri is handled correctly
  token.renew(conf);
  token.cancel(conf);
}
 
Example 9
Source File: AutoHDFS.java    From jstorm with Apache License 2.0 5 votes vote down vote up
/**
 * {@inheritDoc}
 */
@Override
@SuppressWarnings("unchecked")
public void renew(Map<String, String> credentials, Map topologyConf) {
    try {
        Credentials credential = getCredentials(credentials);
        if (credential != null) {
            Configuration configuration = new Configuration();
            Collection<Token<? extends TokenIdentifier>> tokens = credential.getAllTokens();

            if(tokens != null && tokens.isEmpty() == false) {
                for (Token token : tokens) {
                    //We need to re-login some other thread might have logged into hadoop using
                    // their credentials (e.g. AutoHBase might be also part of nimbu auto creds)
                    login(configuration);
                    long expiration = (Long) token.renew(configuration);
                    LOG.info("HDFS delegation token renewed, new expiration time {}", expiration);
                }
            } else {
                LOG.debug("No tokens found for credentials, skipping renewal.");
            }
        }
    } catch (Exception e) {
        LOG.warn("could not renew the credentials, one of the possible reason is tokens are beyond " +
                "renewal period so attempting to get new tokens.", e);
        populateCredentials(credentials, topologyConf);
    }
}
 
Example 10
Source File: JobClient.java    From hadoop with Apache License 2.0 2 votes vote down vote up
/**
 * Renew a delegation token
 * @param token the token to renew
 * @return true if the renewal went well
 * @throws InvalidToken
 * @throws IOException
 * @deprecated Use {@link Token#renew} instead
 */
public long renewDelegationToken(Token<DelegationTokenIdentifier> token
                                 ) throws InvalidToken, IOException, 
                                          InterruptedException {
  return token.renew(getConf());
}
 
Example 11
Source File: Cluster.java    From hadoop with Apache License 2.0 2 votes vote down vote up
/**
 * Renew a delegation token
 * @param token the token to renew
 * @return the new expiration time
 * @throws InvalidToken
 * @throws IOException
 * @deprecated Use {@link Token#renew} instead
 */
public long renewDelegationToken(Token<DelegationTokenIdentifier> token
                                 ) throws InvalidToken, IOException,
                                          InterruptedException {
  return token.renew(getConf());
}
 
Example 12
Source File: JobClient.java    From big-c with Apache License 2.0 2 votes vote down vote up
/**
 * Renew a delegation token
 * @param token the token to renew
 * @return true if the renewal went well
 * @throws InvalidToken
 * @throws IOException
 * @deprecated Use {@link Token#renew} instead
 */
public long renewDelegationToken(Token<DelegationTokenIdentifier> token
                                 ) throws InvalidToken, IOException, 
                                          InterruptedException {
  return token.renew(getConf());
}
 
Example 13
Source File: Cluster.java    From big-c with Apache License 2.0 2 votes vote down vote up
/**
 * Renew a delegation token
 * @param token the token to renew
 * @return the new expiration time
 * @throws InvalidToken
 * @throws IOException
 * @deprecated Use {@link Token#renew} instead
 */
public long renewDelegationToken(Token<DelegationTokenIdentifier> token
                                 ) throws InvalidToken, IOException,
                                          InterruptedException {
  return token.renew(getConf());
}