Java Code Examples for org.apache.hadoop.security.token.Token

The following are top voted examples for showing how to use org.apache.hadoop.security.token.Token. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: hadoop   File: TestDelegationToken.java   View source code 7 votes vote down vote up
@Test
public void testDelegationTokenSecretManager() throws Exception {
  Token<DelegationTokenIdentifier> token = generateDelegationToken(
      "SomeUser", "JobTracker");
  // Fake renewer should not be able to renew
  try {
	  dtSecretManager.renewToken(token, "FakeRenewer");
	  Assert.fail("should have failed");
  } catch (AccessControlException ace) {
    // PASS
  }
 dtSecretManager.renewToken(token, "JobTracker");
  DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
  byte[] tokenId = token.getIdentifier();
  identifier.readFields(new DataInputStream(
           new ByteArrayInputStream(tokenId)));
  Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
  LOG.info("Sleep to expire the token");
 Thread.sleep(6000);
 //Token should be expired
 try {
   dtSecretManager.retrievePassword(identifier);
   //Should not come here
   Assert.fail("Token should have expired");
 } catch (InvalidToken e) {
   //Success
 }
 dtSecretManager.renewToken(token, "JobTracker");
 LOG.info("Sleep beyond the max lifetime");
 Thread.sleep(5000);
 try {
	  dtSecretManager.renewToken(token, "JobTracker");
	  Assert.fail("should have been expired");
 } catch (InvalidToken it) {
   // PASS
 }
}
 
Example 2
Project: hadoop   File: BlockTokenSecretManager.java   View source code 6 votes vote down vote up
/** Check if access should be allowed. userID is not checked if null */
public void checkAccess(Token<BlockTokenIdentifier> token, String userId,
    ExtendedBlock block, AccessMode mode) throws InvalidToken {
  BlockTokenIdentifier id = new BlockTokenIdentifier();
  try {
    id.readFields(new DataInputStream(new ByteArrayInputStream(token
        .getIdentifier())));
  } catch (IOException e) {
    throw new InvalidToken(
        "Unable to de-serialize block token identifier for user=" + userId
            + ", block=" + block + ", access mode=" + mode);
  }
  checkAccess(id, userId, block, mode);
  if (!Arrays.equals(retrievePassword(id), token.getPassword())) {
    throw new InvalidToken("Block token with " + id.toString()
        + " doesn't have the correct token password");
  }
}
 
Example 3
Project: hadoop   File: RMDelegationTokenIdentifier.java   View source code 6 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
public long renew(Token<?> token, Configuration conf) throws IOException,
    InterruptedException {
  final ApplicationClientProtocol rmClient = getRmClient(token, conf);
  if (rmClient != null) {
    try {
      RenewDelegationTokenRequest request =
          Records.newRecord(RenewDelegationTokenRequest.class);
      request.setDelegationToken(convertToProtoToken(token));
      return rmClient.renewDelegationToken(request).getNextExpirationTime();
    } catch (YarnException e) {
      throw new IOException(e);
    } finally {
      RPC.stopProxy(rmClient);
    }
  } else {
    return localSecretManager.renewToken(
        (Token<RMDelegationTokenIdentifier>)token, getRenewer(token));
  }
}
 
Example 4
Project: hadoop   File: TestBinaryTokenFile.java   View source code 6 votes vote down vote up
private static void createBinaryTokenFile(Configuration conf) {
  // Fetch delegation tokens and store in binary token file.
  try {
    Credentials cred1 = new Credentials();
    Credentials cred2 = new Credentials();
    TokenCache.obtainTokensForNamenodesInternal(cred1, new Path[] { p1 },
        conf);
    for (Token<? extends TokenIdentifier> t : cred1.getAllTokens()) {
      cred2.addToken(new Text(DELEGATION_TOKEN_KEY), t);
    }
    DataOutputStream os = new DataOutputStream(new FileOutputStream(
        binaryTokenFileName.toString()));
    try {
      cred2.writeTokenStorageToStream(os);
    } finally {
      os.close();
    }
  } catch (IOException e) {
    Assert.fail("Exception " + e);
  }
}
 
Example 5
Project: hadoop   File: AMRMTokenSelector.java   View source code 6 votes vote down vote up
@SuppressWarnings("unchecked")
public Token<AMRMTokenIdentifier> selectToken(Text service,
    Collection<Token<? extends TokenIdentifier>> tokens) {
  if (service == null) {
    return null;
  }
  LOG.debug("Looking for a token with service " + service.toString());
  for (Token<? extends TokenIdentifier> token : tokens) {
    LOG.debug("Token kind is " + token.getKind().toString()
        + " and the token's service name is " + token.getService());
    if (AMRMTokenIdentifier.KIND_NAME.equals(token.getKind())
        && checkService(service, token)) {
      return (Token<AMRMTokenIdentifier>) token;
    }
  }
  return null;
}
 
Example 6
Project: hadoop   File: TestDelegationTokensWithHA.java   View source code 6 votes vote down vote up
private static void doRenewOrCancel(
    final Token<DelegationTokenIdentifier> token, final Configuration conf,
    final TokenTestAction action)
    throws IOException, InterruptedException {
  UserGroupInformation.createRemoteUser("JobTracker").doAs(
      new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
          switch (action) {
          case RENEW:
            token.renew(conf);
            break;
          case CANCEL:
            token.cancel(conf);
            break;
          default:
            fail("bad action:" + action);
          }
          return null;
        }
      });
}
 
Example 7
Project: hadoop   File: TestApplicationMasterServiceProtocolOnHA.java   View source code 6 votes vote down vote up
@Before
public void initialize() throws Exception {
  startHACluster(0, false, false, true);
  attemptId = this.cluster.createFakeApplicationAttemptId();
  amClient = ClientRMProxy
      .createRMProxy(this.conf, ApplicationMasterProtocol.class);

  Token<AMRMTokenIdentifier> appToken =
      this.cluster.getResourceManager().getRMContext()
        .getAMRMTokenSecretManager().createAndGetAMRMToken(attemptId);
  appToken.setService(ClientRMProxy.getAMRMTokenService(conf));
  UserGroupInformation.setLoginUser(UserGroupInformation
      .createRemoteUser(UserGroupInformation.getCurrentUser()
          .getUserName()));
  UserGroupInformation.getCurrentUser().addToken(appToken);
  syncToken(appToken);
}
 
Example 8
Project: hadoop   File: ViewFileSystemBaseTest.java   View source code 6 votes vote down vote up
@Test
public void testGetDelegationTokensWithCredentials() throws IOException {
  Credentials credentials = new Credentials();
  List<Token<?>> delTokens =
      Arrays.asList(fsView.addDelegationTokens("sanjay", credentials));

  int expectedTokenCount = getExpectedDelegationTokenCountWithCredentials();

  Assert.assertEquals(expectedTokenCount, delTokens.size());
  Credentials newCredentials = new Credentials();
  for (int i = 0; i < expectedTokenCount / 2; i++) {
    Token<?> token = delTokens.get(i);
    newCredentials.addToken(token.getService(), token);
  }

  List<Token<?>> delTokens2 =
      Arrays.asList(fsView.addDelegationTokens("sanjay", newCredentials));
  Assert.assertEquals((expectedTokenCount + 1) / 2, delTokens2.size());
}
 
Example 9
Project: hadoop   File: HftpFileSystem.java   View source code 6 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
public long renewDelegationToken(final Token<?> token) throws IOException {
  // update the kerberos credentials, if they are coming from a keytab
  UserGroupInformation connectUgi = ugi.getRealUser();
  if (connectUgi == null) {
    connectUgi = ugi;
  }
  try {
    return connectUgi.doAs(new PrivilegedExceptionAction<Long>() {
      @Override
      public Long run() throws Exception {
        InetSocketAddress serviceAddr = SecurityUtil
            .getTokenServiceAddr(token);
        return DelegationTokenFetcher.renewDelegationToken(connectionFactory,
            DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr),
            (Token<DelegationTokenIdentifier>) token);
      }
    });
  } catch (InterruptedException e) {
    throw new IOException(e);
  }
}
 
Example 10
Project: hadoop   File: HistoryClientService.java   View source code 6 votes vote down vote up
@Override
public RenewDelegationTokenResponse renewDelegationToken(
    RenewDelegationTokenRequest request) throws IOException {
    if (!isAllowedDelegationTokenOp()) {
      throw new IOException(
          "Delegation Token can be renewed only with kerberos authentication");
    }

    org.apache.hadoop.yarn.api.records.Token protoToken = request.getDelegationToken();
    Token<MRDelegationTokenIdentifier> token =
        new Token<MRDelegationTokenIdentifier>(
            protoToken.getIdentifier().array(), protoToken.getPassword()
                .array(), new Text(protoToken.getKind()), new Text(
                protoToken.getService()));

    String user = UserGroupInformation.getCurrentUser().getShortUserName();
    long nextExpTime = jhsDTSecretManager.renewToken(token, user);
    RenewDelegationTokenResponse renewResponse = Records
        .newRecord(RenewDelegationTokenResponse.class);
    renewResponse.setNextExpirationTime(nextExpTime);
    return renewResponse;
}
 
Example 11
Project: hadoop   File: TestBlockToken.java   View source code 6 votes vote down vote up
private static Server createMockDatanode(BlockTokenSecretManager sm,
    Token<BlockTokenIdentifier> token, Configuration conf)
    throws IOException, ServiceException {
  ClientDatanodeProtocolPB mockDN = mock(ClientDatanodeProtocolPB.class);

  BlockTokenIdentifier id = sm.createIdentifier();
  id.readFields(new DataInputStream(new ByteArrayInputStream(token
      .getIdentifier())));
  
  doAnswer(new GetLengthAnswer(sm, id)).when(mockDN)
      .getReplicaVisibleLength(any(RpcController.class),
          any(GetReplicaVisibleLengthRequestProto.class));

  RPC.setProtocolEngine(conf, ClientDatanodeProtocolPB.class,
      ProtobufRpcEngine.class);
  BlockingService service = ClientDatanodeProtocolService
      .newReflectiveBlockingService(mockDN);
  return new RPC.Builder(conf).setProtocol(ClientDatanodeProtocolPB.class)
      .setInstance(service).setBindAddress(ADDRESS).setPort(0)
      .setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
}
 
Example 12
Project: ditb   File: TokenUtil.java   View source code 6 votes vote down vote up
/**
 * Get the authentication token of the user for the cluster specified in the configuration
 * @return null if the user does not have the token, otherwise the auth token for the cluster.
 */
private static Token<AuthenticationTokenIdentifier> getAuthToken(Configuration conf, User user)
    throws IOException, InterruptedException {
  ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "TokenUtil-getAuthToken", null);
  try {
    String clusterId = ZKClusterId.readClusterIdZNode(zkw);
    if (clusterId == null) {
      throw new IOException("Failed to get cluster ID");
    }
    return new AuthenticationTokenSelector().selectToken(new Text(clusterId), user.getTokens());
  } catch (KeeperException e) {
    throw new IOException(e);
  } finally {
    zkw.close();
  }
}
 
Example 13
Project: hadoop   File: ClientRMService.java   View source code 6 votes vote down vote up
@Override
public CancelDelegationTokenResponse cancelDelegationToken(
    CancelDelegationTokenRequest request) throws YarnException {
  try {
    if (!isAllowedDelegationTokenOp()) {
      throw new IOException(
          "Delegation Token can be cancelled only with kerberos authentication");
    }
    org.apache.hadoop.yarn.api.records.Token protoToken = request.getDelegationToken();
    Token<RMDelegationTokenIdentifier> token = new Token<RMDelegationTokenIdentifier>(
        protoToken.getIdentifier().array(), protoToken.getPassword().array(),
        new Text(protoToken.getKind()), new Text(protoToken.getService()));

    String user = UserGroupInformation.getCurrentUser().getUserName();
    rmDTSecretManager.cancelToken(token, user);
    return Records.newRecord(CancelDelegationTokenResponse.class);
  } catch (IOException e) {
    throw RPCUtil.getRemoteException(e);
  }
}
 
Example 14
Project: hadoop   File: DistributedFileSystem.java   View source code 6 votes vote down vote up
@Override
public Token<?>[] addDelegationTokens(
    final String renewer, Credentials credentials) throws IOException {
  Token<?>[] tokens = super.addDelegationTokens(renewer, credentials);
  if (dfs.isHDFSEncryptionEnabled()) {
    KeyProviderDelegationTokenExtension keyProviderDelegationTokenExtension =
        KeyProviderDelegationTokenExtension.
            createKeyProviderDelegationTokenExtension(dfs.getKeyProvider());
    Token<?>[] kpTokens = keyProviderDelegationTokenExtension.
        addDelegationTokens(renewer, credentials);
    if (tokens != null && kpTokens != null) {
      Token<?>[] all = new Token<?>[tokens.length + kpTokens.length];
      System.arraycopy(tokens, 0, all, 0, tokens.length);
      System.arraycopy(kpTokens, 0, all, tokens.length, kpTokens.length);
      tokens = all;
    } else {
      tokens = (tokens != null) ? tokens : kpTokens;
    }
  }
  return tokens;
}
 
Example 15
Project: hadoop   File: DFSTestUtil.java   View source code 6 votes vote down vote up
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
    final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
  assertEquals(2, datanodes.length);
  final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
      datanodes.length, dfsClient);
  final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
  final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
      NetUtils.getOutputStream(s, writeTimeout),
      HdfsConstants.SMALL_BUFFER_SIZE));
  final DataInputStream in = new DataInputStream(NetUtils.getInputStream(s));

  // send the request
  new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(),
      dfsClient.clientName, new DatanodeInfo[]{datanodes[1]},
      new StorageType[]{StorageType.DEFAULT});
  out.flush();

  return BlockOpResponseProto.parseDelimitedFrom(in);
}
 
Example 16
Project: hadoop   File: DelegationTokenRenewer.java   View source code 6 votes vote down vote up
@VisibleForTesting
protected Token<?>[] obtainSystemTokensForUser(String user,
    final Credentials credentials) throws IOException, InterruptedException {
  // Get new hdfs tokens on behalf of this user
  UserGroupInformation proxyUser =
      UserGroupInformation.createProxyUser(user,
        UserGroupInformation.getLoginUser());
  Token<?>[] newTokens =
      proxyUser.doAs(new PrivilegedExceptionAction<Token<?>[]>() {
        @Override
        public Token<?>[] run() throws Exception {
          FileSystem fs = FileSystem.get(getConfig());
          try {
            return fs.addDelegationTokens(
                UserGroupInformation.getLoginUser().getUserName(),
                credentials);
          } finally {
            // Close the FileSystem created by the new proxy user,
            // So that we don't leave an entry in the FileSystem cache
            fs.close();
          }
        }
      });
  return newTokens;
}
 
Example 17
Project: hadoop   File: TestHttpFSWithKerberos.java   View source code 6 votes vote down vote up
@Test
@TestDir
@TestJetty
@TestHdfs
public void testValidHttpFSAccess() throws Exception {
  createHttpFSServer();

  KerberosTestUtils.doAsClient(new Callable<Void>() {
    @Override
    public Void call() throws Exception {
      URL url = new URL(TestJettyHelper.getJettyURL(),
                        "/webhdfs/v1/?op=GETHOMEDIRECTORY");
      AuthenticatedURL aUrl = new AuthenticatedURL();
      AuthenticatedURL.Token aToken = new AuthenticatedURL.Token();
      HttpURLConnection conn = aUrl.openConnection(url, aToken);
      Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
      return null;
    }
  });
}
 
Example 18
Project: hadoop-oss   File: TestFileSystemTokens.java   View source code 6 votes vote down vote up
@Test
public void testFsWithDuplicateChildrenTokenExists() throws Exception {
  Credentials credentials = new Credentials();
  Text service = new Text("singleTokenFs1");
  Token<?> token = mock(Token.class);
  credentials.addToken(service, token);

  MockFileSystem fs = createFileSystemForServiceName(service);
  MockFileSystem multiFs =
      createFileSystemForServiceName(null, fs, new FilterFileSystem(fs));
  
  multiFs.addDelegationTokens(renewer, credentials);
  verifyTokenFetch(multiFs, false);
  verifyTokenFetch(fs, false);
  
  assertEquals(1, credentials.numberOfTokens());
  assertSame(token, credentials.getToken(service));
}
 
Example 19
Project: hadoop-oss   File: TestFileSystemTokens.java   View source code 6 votes vote down vote up
@Test
public void testFsWithMyOwnAndChildTokens() throws Exception {
  Credentials credentials = new Credentials();
  Text service1 = new Text("singleTokenFs1");
  Text service2 = new Text("singleTokenFs2");
  Text myService = new Text("multiTokenFs");
  Token<?> token = mock(Token.class);
  credentials.addToken(service2, token);

  MockFileSystem fs1 = createFileSystemForServiceName(service1);
  MockFileSystem fs2 = createFileSystemForServiceName(service2);
  MockFileSystem multiFs = createFileSystemForServiceName(myService, fs1, fs2);
  
  multiFs.addDelegationTokens(renewer, credentials);
  verifyTokenFetch(multiFs, true); // its own token and also of its children
  verifyTokenFetch(fs1, true);
  verifyTokenFetch(fs2, false);  // we had added its token to credentials 
  
  assertEquals(3, credentials.numberOfTokens());
  assertNotNull(credentials.getToken(myService));
  assertNotNull(credentials.getToken(service1));
  assertNotNull(credentials.getToken(service2));
}
 
Example 20
Project: hadoop-oss   File: TestFileSystemTokens.java   View source code 6 votes vote down vote up
public static MockFileSystem createFileSystemForServiceName(
    final Text service, final FileSystem... children) throws IOException {
  final MockFileSystem fs = new MockFileSystem();
  final MockFileSystem mockFs = fs.getRawFileSystem();
  if (service != null) {
    when(mockFs.getCanonicalServiceName()).thenReturn(service.toString());
    when(mockFs.getDelegationToken(any(String.class))).thenAnswer(
      new Answer<Token<?>>() {
        @Override
        public Token<?> answer(InvocationOnMock invocation) throws Throwable {
          Token<?> token = new Token<TokenIdentifier>();
          token.setService(service);
          return token;
        }
      });
  }
  when(mockFs.getChildFileSystems()).thenReturn(children);
  return fs;
}
 
Example 21
Project: hadoop   File: TestSecurityTokenEditLog.java   View source code 6 votes vote down vote up
@Override
public void run() {
  FSEditLog editLog = namesystem.getEditLog();

  for (int i = 0; i < numTransactions; i++) {
    try {
      String renewer = UserGroupInformation.getLoginUser().getUserName();
      Token<DelegationTokenIdentifier> token = namesystem
          .getDelegationToken(new Text(renewer));
      namesystem.renewDelegationToken(token);
      namesystem.cancelDelegationToken(token);
      editLog.logSync();
    } catch (IOException e) {
      System.out.println("Transaction " + i + " encountered exception " +
                         e);
    }
  }
}
 
Example 22
Project: hadoop-oss   File: NuCypherExtUtilClient.java   View source code 6 votes vote down vote up
public static Peer peerFromSocketAndKey(
    SaslDataTransferClient saslClient, Socket s,
    DataEncryptionKeyFactory keyFactory,
    Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
    throws IOException {
  Peer peer = null;
  boolean success = false;
  try {
    peer = peerFromSocket(s);
    peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
    success = true;
    return peer;
  } finally {
    if (!success) {
      IOUtilsClient.cleanup(null, peer);
    }
  }
}
 
Example 23
Project: hadoop   File: TestWebHDFSForHA.java   View source code 5 votes vote down vote up
@Test
public void testSecureHAToken() throws IOException, InterruptedException {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  conf.setBoolean(DFSConfigKeys
          .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

  MiniDFSCluster cluster = null;
  WebHdfsFileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
        .numDataNodes(0).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
    cluster.waitActive();

    fs = spy((WebHdfsFileSystem) FileSystem.get(WEBHDFS_URI, conf));
    FileSystemTestHelper.addFileSystemForTesting(WEBHDFS_URI, conf, fs);

    cluster.transitionToActive(0);
    Token<?> token = fs.getDelegationToken(null);

    cluster.shutdownNameNode(0);
    cluster.transitionToActive(1);
    token.renew(conf);
    token.cancel(conf);
    verify(fs).renewDelegationToken(token);
    verify(fs).cancelDelegationToken(token);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 24
Project: hadoop   File: BlockReaderLocalLegacy.java   View source code 5 votes vote down vote up
private BlockReaderLocalLegacy(DFSClient.Conf conf, String hdfsfile,
    ExtendedBlock block, Token<BlockTokenIdentifier> token, long startOffset,
    long length, BlockLocalPathInfo pathinfo, FileInputStream dataIn)
    throws IOException {
  this(conf, hdfsfile, block, token, startOffset, length, pathinfo,
      DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 4), false,
      dataIn, startOffset, null);
}
 
Example 25
Project: hadoop   File: TestDelegationToken.java   View source code 5 votes vote down vote up
@SuppressWarnings("unchecked")
private void checkTokenIdentifier(UserGroupInformation ugi, final Token<?> token)
    throws Exception {
  Assert.assertNotNull(token);
  // should be able to use token.decodeIdentifier() but webhdfs isn't
  // registered with the service loader for token decoding
  DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
  byte[] tokenId = token.getIdentifier();
  DataInputStream in = new DataInputStream(new ByteArrayInputStream(tokenId));
  try {
    identifier.readFields(in);
  } finally {
    in.close();
  }
  Assert.assertNotNull(identifier);
  LOG.info("A valid token should have non-null password, and should be renewed successfully");
  Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
  dtSecretManager.renewToken((Token<DelegationTokenIdentifier>) token, "JobTracker");
  ugi.doAs(
      new PrivilegedExceptionAction<Object>() {
        @Override
        public Object run() throws Exception {
          token.renew(config);
          token.cancel(config);
          return null;
        }
      });
}
 
Example 26
Project: hadoop   File: JobTokenSecretManager.java   View source code 5 votes vote down vote up
/**
 * Add the job token of a job to cache
 * @param jobId the job that owns the token
 * @param token the job token
 */
public void addTokenForJob(String jobId, Token<JobTokenIdentifier> token) {
  SecretKey tokenSecret = createSecretKey(token.getPassword());
  synchronized (currentJobTokens) {
    currentJobTokens.put(jobId, tokenSecret);
  }
}
 
Example 27
Project: hadoop   File: DelegationTokenAuthenticationHandler.java   View source code 5 votes vote down vote up
@SuppressWarnings("unchecked")
private static Map delegationTokenToJSON(Token token) throws IOException {
  Map json = new LinkedHashMap();
  json.put(
      KerberosDelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON,
      token.encodeToUrlString());
  Map response = new LinkedHashMap();
  response.put(KerberosDelegationTokenAuthenticator.DELEGATION_TOKEN_JSON,
      json);
  return response;
}
 
Example 28
Project: hadoop   File: RMWebServices.java   View source code 5 votes vote down vote up
private Token<RMDelegationTokenIdentifier> extractToken(String encodedToken) {
  Token<RMDelegationTokenIdentifier> token =
      new Token<RMDelegationTokenIdentifier>();
  try {
    token.decodeFromUrlString(encodedToken);
  } catch (Exception ie) {
    String msg = "Could not decode encoded token";
    throw new BadRequestException(msg);
  }
  return token;
}
 
Example 29
Project: ditb   File: TokenProvider.java   View source code 5 votes vote down vote up
@Override
public void getAuthenticationToken(RpcController controller,
                                   AuthenticationProtos.GetAuthenticationTokenRequest request,
                                   RpcCallback<AuthenticationProtos.GetAuthenticationTokenResponse> done) {
  AuthenticationProtos.GetAuthenticationTokenResponse.Builder response =
      AuthenticationProtos.GetAuthenticationTokenResponse.newBuilder();

  try {
    if (secretManager == null) {
      throw new IOException(
          "No secret manager configured for token authentication");
    }

    User currentUser = RpcServer.getRequestUser();
    UserGroupInformation ugi = null;
    if (currentUser != null) {
      ugi = currentUser.getUGI();
    }
    if (currentUser == null) {
      throw new AccessDeniedException("No authenticated user for request!");
    } else if (!isAllowedDelegationTokenOp(ugi)) {
      LOG.warn("Token generation denied for user="+currentUser.getName()
          +", authMethod="+ugi.getAuthenticationMethod());
      throw new AccessDeniedException(
          "Token generation only allowed for Kerberos authenticated clients");
    }

    Token<AuthenticationTokenIdentifier> token =
        secretManager.generateToken(currentUser.getName());
    response.setToken(ProtobufUtil.toToken(token)).build();
  } catch (IOException ioe) {
    ResponseConverter.setControllerException(controller, ioe);
  }
  done.run(response.build());
}
 
Example 30
Project: hadoop   File: DataNode.java   View source code 5 votes vote down vote up
FileInputStream[] requestShortCircuitFdsForRead(final ExtendedBlock blk,
    final Token<BlockTokenIdentifier> token, int maxVersion) 
        throws ShortCircuitFdsUnsupportedException,
          ShortCircuitFdsVersionException, IOException {
  if (fileDescriptorPassingDisabledReason != null) {
    throw new ShortCircuitFdsUnsupportedException(
        fileDescriptorPassingDisabledReason);
  }
  checkBlockToken(blk, token, BlockTokenSecretManager.AccessMode.READ);
  int blkVersion = CURRENT_BLOCK_FORMAT_VERSION;
  if (maxVersion < blkVersion) {
    throw new ShortCircuitFdsVersionException("Your client is too old " +
      "to read this block!  Its format version is " + 
      blkVersion + ", but the highest format version you can read is " +
      maxVersion);
  }
  metrics.incrBlocksGetLocalPathInfo();
  FileInputStream fis[] = new FileInputStream[2];
  
  try {
    fis[0] = (FileInputStream)data.getBlockInputStream(blk, 0);
    fis[1] = DatanodeUtil.getMetaDataInputStream(blk, data);
  } catch (ClassCastException e) {
    LOG.debug("requestShortCircuitFdsForRead failed", e);
    throw new ShortCircuitFdsUnsupportedException("This DataNode's " +
        "FsDatasetSpi does not support short-circuit local reads");
  }
  return fis;
}
 
Example 31
Project: hadoop-oss   File: DelegationTokenManager.java   View source code 5 votes vote down vote up
@SuppressWarnings("unchecked")
public UserGroupInformation verifyToken(
    Token<? extends AbstractDelegationTokenIdentifier> token)
        throws IOException {
  AbstractDelegationTokenIdentifier id = secretManager.decodeTokenIdentifier(token);
  secretManager.verifyToken(id, token.getPassword());
  return id.getUser();
}
 
Example 32
Project: hadoop   File: MiniRPCBenchmark.java   View source code 5 votes vote down vote up
void connectToServerAndGetDelegationToken(
    final Configuration conf, final InetSocketAddress addr) throws IOException {
  MiniProtocol client = null;
  try {
    UserGroupInformation current = UserGroupInformation.getCurrentUser();
    UserGroupInformation proxyUserUgi = 
      UserGroupInformation.createProxyUserForTesting(
          MINI_USER, current, GROUP_NAMES);
    
    try {
      client =  proxyUserUgi.doAs(new PrivilegedExceptionAction<MiniProtocol>() {
        @Override
        public MiniProtocol run() throws IOException {
          MiniProtocol p = RPC.getProxy(MiniProtocol.class,
              MiniProtocol.versionID, addr, conf);
          Token<TestDelegationTokenIdentifier> token;
          token = p.getDelegationToken(new Text(RENEWER));
          currentUgi = UserGroupInformation.createUserForTesting(MINI_USER, 
              GROUP_NAMES);
          SecurityUtil.setTokenService(token, addr);
          currentUgi.addToken(token);
          return p;
        }
      });
    } catch (InterruptedException e) {
      Assert.fail(Arrays.toString(e.getStackTrace()));
    }
  } finally {
    RPC.stopProxy(client);
  }
}
 
Example 33
Project: hadoop   File: TokenAspect.java   View source code 5 votes vote down vote up
synchronized void initDelegationToken(UserGroupInformation ugi) {
  Token<?> token = selectDelegationToken(ugi);
  if (token != null) {
    LOG.debug("Found existing DT for " + token.getService());
    fs.setDelegationToken(token);
    hasInitedToken = true;
  }
}
 
Example 34
Project: hadoop   File: ClientDatanodeProtocolTranslatorPB.java   View source code 5 votes vote down vote up
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(String blockPoolId,
    long[] blockIds,
    List<Token<BlockTokenIdentifier>> tokens) throws IOException {
  List<TokenProto> tokensProtos = 
      new ArrayList<TokenProto>(tokens.size());
  for (Token<BlockTokenIdentifier> t : tokens) {
    tokensProtos.add(PBHelper.convert(t));
  }
  // Build the request
  GetHdfsBlockLocationsRequestProto request = 
      GetHdfsBlockLocationsRequestProto.newBuilder()
      .setBlockPoolId(blockPoolId)
      .addAllBlockIds(Longs.asList(blockIds))
      .addAllTokens(tokensProtos)
      .build();
  // Send the RPC
  GetHdfsBlockLocationsResponseProto response;
  try {
    response = rpcProxy.getHdfsBlockLocations(NULL_CONTROLLER, request);
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
  // List of volumes in the response
  List<ByteString> volumeIdsByteStrings = response.getVolumeIdsList();
  List<byte[]> volumeIds = new ArrayList<byte[]>(volumeIdsByteStrings.size());
  for (ByteString bs : volumeIdsByteStrings) {
    volumeIds.add(bs.toByteArray());
  }
  // Array of indexes into the list of volumes, one per block
  List<Integer> volumeIndexes = response.getVolumeIndexesList();
  // Parsed HdfsVolumeId values, one per block
  return new HdfsBlocksMetadata(blockPoolId, blockIds,
      volumeIds, volumeIndexes);
}
 
Example 35
Project: hadoop   File: DFSClient.java   View source code 5 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
public void cancel(Token<?> token, Configuration conf) throws IOException {
  Token<DelegationTokenIdentifier> delToken = 
      (Token<DelegationTokenIdentifier>) token;
  LOG.info("Cancelling " + 
           DelegationTokenIdentifier.stringifyToken(delToken));
  ClientProtocol nn = getNNProxy(delToken, conf);
  try {
    nn.cancelDelegationToken(delToken);
  } catch (RemoteException re) {
    throw re.unwrapRemoteException(InvalidToken.class,
        AccessControlException.class);
  }
}
 
Example 36
Project: dremio-oss   File: FileSystemWrapper.java   View source code 5 votes vote down vote up
@Override
@Private
public Token<?> getDelegationToken(String renewer) throws IOException {
  try {
    return underlyingFs.getDelegationToken(renewer);
  } catch(FSError e) {
    throw propagateFSError(e);
  }
}
 
Example 37
Project: hadoop   File: TestDelegationTokensWithHA.java   View source code 5 votes vote down vote up
@Test(timeout = 300000)
public void testDelegationTokenWithDoAs() throws Exception {
  final Token<DelegationTokenIdentifier> token =
      getDelegationToken(fs, "JobTracker");
  final UserGroupInformation longUgi = UserGroupInformation
      .createRemoteUser("JobTracker/[email protected]");
  final UserGroupInformation shortUgi = UserGroupInformation
      .createRemoteUser("JobTracker");
  longUgi.doAs(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      // try renew with long name
      token.renew(conf);
      return null;
    }
  });
  shortUgi.doAs(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      token.renew(conf);
      return null;
    }
  });
  longUgi.doAs(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      token.cancel(conf);;
      return null;
    }
  });
}
 
Example 38
Project: hadoop   File: TestEncryptionZones.java   View source code 5 votes vote down vote up
/**
 * Tests obtaining delegation token from stored key
 */
@Test(timeout = 120000)
public void testDelegationToken() throws Exception {
  UserGroupInformation.createRemoteUser("JobTracker");
  DistributedFileSystem dfs = cluster.getFileSystem();
  KeyProvider keyProvider = Mockito.mock(KeyProvider.class,
      withSettings().extraInterfaces(
          DelegationTokenExtension.class,
          CryptoExtension.class));
  Mockito.when(keyProvider.getConf()).thenReturn(conf);
  byte[] testIdentifier = "Test identifier for delegation token".getBytes();

  Token<?> testToken = new Token(testIdentifier, new byte[0],
      new Text(), new Text());
  Mockito.when(((DelegationTokenExtension)keyProvider).
      addDelegationTokens(anyString(), (Credentials)any())).
      thenReturn(new Token<?>[] { testToken });

  dfs.getClient().setKeyProvider(keyProvider);

  Credentials creds = new Credentials();
  final Token<?> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
  DistributedFileSystem.LOG.debug("Delegation tokens: " +
      Arrays.asList(tokens));
  Assert.assertEquals(2, tokens.length);
  Assert.assertEquals(tokens[1], testToken);
  Assert.assertEquals(1, creds.numberOfTokens());
}
 
Example 39
Project: hadoop   File: MapTaskImpl.java   View source code 5 votes vote down vote up
public MapTaskImpl(JobId jobId, int partition, EventHandler eventHandler,
    Path remoteJobConfFile, JobConf conf,
    TaskSplitMetaInfo taskSplitMetaInfo,
    TaskAttemptListener taskAttemptListener,
    Token<JobTokenIdentifier> jobToken,
    Credentials credentials, Clock clock,
    int appAttemptId, MRAppMetrics metrics, AppContext appContext) {
  super(jobId, TaskType.MAP, partition, eventHandler, remoteJobConfFile,
      conf, taskAttemptListener, jobToken, credentials, clock,
      appAttemptId, metrics, appContext);
  this.taskSplitMetaInfo = taskSplitMetaInfo;
}
 
Example 40
Project: hadoop   File: ViewFsBaseTest.java   View source code 5 votes vote down vote up
/**
 * This default implementation is when viewfs has mount points
 * into file systems, such as LocalFs that do no have delegation tokens.
 * It should be overridden for when mount points into hdfs.
 */
@Test
public void testGetDelegationTokens() throws IOException {
  List<Token<?>> delTokens = 
      fcView.getDelegationTokens(new Path("/"), "sanjay");
  Assert.assertEquals(getExpectedDelegationTokenCount(), delTokens.size());
}