org.apache.hadoop.security.token.Token Java Examples
The following examples show how to use
org.apache.hadoop.security.token.Token.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: StramClientUtils.java From Bats with Apache License 2.0 | 6 votes |
public void addRMDelegationToken(final String renewer, final Credentials credentials) throws IOException, YarnException { // Get the ResourceManager delegation rmToken final org.apache.hadoop.yarn.api.records.Token rmDelegationToken = clientRM.getRMDelegationToken(new Text(renewer)); Token<RMDelegationTokenIdentifier> token; // TODO: Use the utility method getRMDelegationTokenService in ClientRMProxy to remove the separate handling of // TODO: HA and non-HA cases when hadoop dependency is changed to hadoop 2.4 or above if (ConfigUtils.isRMHAEnabled(conf)) { LOG.info("Yarn Resource Manager HA is enabled"); token = getRMHAToken(rmDelegationToken); } else { LOG.info("Yarn Resource Manager HA is not enabled"); InetSocketAddress rmAddress = conf.getSocketAddr(YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT); token = ConverterUtils.convertFromYarn(rmDelegationToken, rmAddress); } LOG.info("RM dt {}", token); credentials.addToken(token.getService(), token); }
Example #2
Source File: TestUserGroupInformation.java From hadoop with Apache License 2.0 | 6 votes |
/** * In some scenario, such as HA, delegation tokens are associated with a * logical name. The tokens are cloned and are associated with the * physical address of the server where the service is provided. * This test ensures cloned delegated tokens are locally used * and are not returned in {@link UserGroupInformation#getCredentials()} */ @Test public void testPrivateTokenExclusion() throws Exception { UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); TestTokenIdentifier tokenId = new TestTokenIdentifier(); Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>( tokenId.getBytes(), "password".getBytes(), tokenId.getKind(), null); ugi.addToken(new Text("regular-token"), token); // Now add cloned private token ugi.addToken(new Text("private-token"), new Token.PrivateToken<TestTokenIdentifier>(token)); ugi.addToken(new Text("private-token1"), new Token.PrivateToken<TestTokenIdentifier>(token)); // Ensure only non-private tokens are returned Collection<Token<? extends TokenIdentifier>> tokens = ugi.getCredentials().getAllTokens(); assertEquals(1, tokens.size()); }
Example #3
Source File: TestBinaryTokenFile.java From hadoop with Apache License 2.0 | 6 votes |
private static void createBinaryTokenFile(Configuration conf) { // Fetch delegation tokens and store in binary token file. try { Credentials cred1 = new Credentials(); Credentials cred2 = new Credentials(); TokenCache.obtainTokensForNamenodesInternal(cred1, new Path[] { p1 }, conf); for (Token<? extends TokenIdentifier> t : cred1.getAllTokens()) { cred2.addToken(new Text(DELEGATION_TOKEN_KEY), t); } DataOutputStream os = new DataOutputStream(new FileOutputStream( binaryTokenFileName.toString())); try { cred2.writeTokenStorageToStream(os); } finally { os.close(); } } catch (IOException e) { Assert.fail("Exception " + e); } }
Example #4
Source File: TestClientRMTokens.java From hadoop with Apache License 2.0 | 6 votes |
private long renewDelegationToken(final UserGroupInformation loggedInUser, final ApplicationClientProtocol clientRMService, final org.apache.hadoop.yarn.api.records.Token dToken) throws IOException, InterruptedException { long nextExpTime = loggedInUser.doAs(new PrivilegedExceptionAction<Long>() { @Override public Long run() throws YarnException, IOException { RenewDelegationTokenRequest request = Records .newRecord(RenewDelegationTokenRequest.class); request.setDelegationToken(dToken); return clientRMService.renewDelegationToken(request) .getNextExpirationTime(); } }); return nextExpTime; }
Example #5
Source File: HadoopSecurityManager_H_2_0.java From azkaban-plugins with Apache License 2.0 | 6 votes |
private void cancelNameNodeToken(final Token<? extends TokenIdentifier> t, String userToProxy) throws HadoopSecurityManagerException { try { getProxiedUser(userToProxy).doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { cancelToken(t); return null; } private void cancelToken(Token<?> nt) throws IOException, InterruptedException { nt.cancel(conf); } }); } catch (Exception e) { throw new HadoopSecurityManagerException("Failed to cancel token. " + e.getMessage() + e.getCause(), e); } }
Example #6
Source File: TcpPeerServer.java From big-c with Apache License 2.0 | 6 votes |
public static Peer peerFromSocketAndKey( SaslDataTransferClient saslClient, Socket s, DataEncryptionKeyFactory keyFactory, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException { Peer peer = null; boolean success = false; try { peer = peerFromSocket(s); peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId); success = true; return peer; } finally { if (!success) { IOUtils.cleanup(null, peer); } } }
Example #7
Source File: WebHdfsFileSystem.java From hadoop with Apache License 2.0 | 6 votes |
protected synchronized Token<?> getDelegationToken() throws IOException { if (canRefreshDelegationToken && delegationToken == null) { Token<?> token = tokenSelector.selectToken( new Text(getCanonicalServiceName()), ugi.getTokens()); // ugi tokens are usually indicative of a task which can't // refetch tokens. even if ugi has credentials, don't attempt // to get another token to match hdfs/rpc behavior if (token != null) { LOG.debug("Using UGI token: " + token); canRefreshDelegationToken = false; } else { token = getDelegationToken(null); if (token != null) { LOG.debug("Fetched new token: " + token); } else { // security is disabled canRefreshDelegationToken = false; } } setDelegationToken(token); } return delegationToken; }
Example #8
Source File: TestApplicationMasterServiceProtocolOnHA.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void initialize() throws Exception { startHACluster(0, false, false, true); attemptId = this.cluster.createFakeApplicationAttemptId(); amClient = ClientRMProxy .createRMProxy(this.conf, ApplicationMasterProtocol.class); Token<AMRMTokenIdentifier> appToken = this.cluster.getResourceManager().getRMContext() .getAMRMTokenSecretManager().createAndGetAMRMToken(attemptId); appToken.setService(ClientRMProxy.getAMRMTokenService(conf)); UserGroupInformation.setLoginUser(UserGroupInformation .createRemoteUser(UserGroupInformation.getCurrentUser() .getUserName())); UserGroupInformation.getCurrentUser().addToken(appToken); syncToken(appToken); }
Example #9
Source File: ClientTokenUtil.java From hbase with Apache License 2.0 | 6 votes |
/** * Obtain and return an authentication token for the current user. * @param conn The HBase cluster connection * @throws IOException if a remote error or serialization problem occurs. * @return the authentication token instance */ @InterfaceAudience.Private static Token<AuthenticationTokenIdentifier> obtainToken( Connection conn) throws IOException { Table meta = null; try { injectFault(); meta = conn.getTable(TableName.META_TABLE_NAME); CoprocessorRpcChannel rpcChannel = meta.coprocessorService( HConstants.EMPTY_START_ROW); AuthenticationProtos.AuthenticationService.BlockingInterface service = AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel); AuthenticationProtos.GetAuthenticationTokenResponse response = service.getAuthenticationToken(null, AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance()); return toToken(response.getToken()); } catch (ServiceException se) { throw ProtobufUtil.handleRemoteException(se); } finally { if (meta != null) { meta.close(); } } }
Example #10
Source File: DFSClient.java From hadoop with Apache License 2.0 | 6 votes |
@Override // RemotePeerFactory public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException { Peer peer = null; boolean success = false; Socket sock = null; try { sock = socketFactory.createSocket(); NetUtils.connect(sock, addr, getRandomLocalInterfaceAddr(), dfsClientConf.socketTimeout); peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this, blockToken, datanodeId); peer.setReadTimeout(dfsClientConf.socketTimeout); success = true; return peer; } finally { if (!success) { IOUtils.cleanup(LOG, peer); IOUtils.closeSocket(sock); } } }
Example #11
Source File: DataXceiver.java From big-c with Apache License 2.0 | 6 votes |
@Override public void transferBlock(final ExtendedBlock blk, final Token<BlockTokenIdentifier> blockToken, final String clientName, final DatanodeInfo[] targets, final StorageType[] targetStorageTypes) throws IOException { checkAccess(socketOut, true, blk, blockToken, Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY); previousOpClientName = clientName; updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk); final DataOutputStream out = new DataOutputStream( getOutputStream()); try { datanode.transferReplicaForPipelineRecovery(blk, targets, targetStorageTypes, clientName); writeResponse(Status.SUCCESS, null, out); } catch (IOException ioe) { LOG.info("transferBlock " + blk + " received exception " + ioe); incrDatanodeNetworkErrors(); throw ioe; } finally { IOUtils.closeStream(out); } }
Example #12
Source File: OzoneManagerProtocolClientSideTranslatorPB.java From hadoop-ozone with Apache License 2.0 | 6 votes |
/** * Cancel an existing delegation token. * * @param token delegation token */ @Override public void cancelDelegationToken(Token<OzoneTokenIdentifier> token) throws OMException { CancelDelegationTokenRequestProto req = CancelDelegationTokenRequestProto .newBuilder() .setToken(OMPBHelper.convertToTokenProto(token)) .build(); OMRequest omRequest = createOMRequest(Type.CancelDelegationToken) .setCancelDelegationTokenRequest(req) .build(); final CancelDelegationTokenResponseProto resp; try { handleError(submitRequest(omRequest)); } catch (IOException e) { if(e instanceof OMException) { throw (OMException)e; } throw new OMException("Cancel delegation token failed.", e, TOKEN_ERROR_OTHER); } }
Example #13
Source File: TestDelegationToken.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testAddDelegationTokensDFSApi() throws Exception { UserGroupInformation ugi = UserGroupInformation.createRemoteUser("JobTracker"); DistributedFileSystem dfs = cluster.getFileSystem(); Credentials creds = new Credentials(); final Token<?> tokens[] = dfs.addDelegationTokens("JobTracker", creds); Assert.assertEquals(1, tokens.length); Assert.assertEquals(1, creds.numberOfTokens()); checkTokenIdentifier(ugi, tokens[0]); final Token<?> tokens2[] = dfs.addDelegationTokens("JobTracker", creds); Assert.assertEquals(0, tokens2.length); // already have token Assert.assertEquals(1, creds.numberOfTokens()); }
Example #14
Source File: SaslClientAuthenticationProviders.java From hbase with Apache License 2.0 | 5 votes |
/** * Returns the provider and token pair for SIMPLE authentication. * * This method is a "hack" while SIMPLE authentication for HBase does not flow through * the SASL codepath. */ public Pair<SaslClientAuthenticationProvider, Token<? extends TokenIdentifier>> getSimpleProvider() { Optional<SaslClientAuthenticationProvider> optional = providers.stream() .filter((p) -> p instanceof SimpleSaslClientAuthenticationProvider) .findFirst(); return new Pair<>(optional.get(), null); }
Example #15
Source File: TestDelegationTokenRemoteFetcher.java From hadoop with Apache License 2.0 | 5 votes |
@Override public void handle(Channel channel, Token<DelegationTokenIdentifier> token, String serviceUrl) throws IOException { Assert.assertEquals(testToken, token); HttpResponse response = new DefaultHttpResponse(HTTP_1_1, HttpResponseStatus.METHOD_NOT_ALLOWED); channel.write(response).addListener(ChannelFutureListener.CLOSE); }
Example #16
Source File: TokenUtil.java From elasticsearch-hadoop with Apache License 2.0 | 5 votes |
/** * Obtain an authentication token on behalf of the given user and add it to * the credentials for the given map reduce job. This version always obtains * a fresh authentication token instead of checking for existing ones on the * current user. * * @param client The Elasticsearch client * @param user The user for whom to obtain the token * @param job The job instance in which the token should be stored */ public static void obtainTokenForJob(final RestClient client, User user, Job job) { Token<EsTokenIdentifier> token = obtainToken(client, user); if (token == null) { throw new EsHadoopException("No token returned for user " + user.getKerberosPrincipal().getName()); } Text clusterName = token.getService(); if (LOG.isDebugEnabled()) { LOG.debug("Obtained token " + EsTokenIdentifier.KIND_NAME.toString() + " for user " + user.getKerberosPrincipal().getName() + " on cluster " + clusterName.toString()); } job.getCredentials().addToken(clusterName, token); }
Example #17
Source File: DelegationTokenManager.java From big-c with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") public UserGroupInformation verifyToken( Token<? extends AbstractDelegationTokenIdentifier> token) throws IOException { AbstractDelegationTokenIdentifier id = secretManager.decodeTokenIdentifier(token); secretManager.verifyToken(id, token.getPassword()); return id.getUser(); }
Example #18
Source File: MockAM.java From hadoop with Apache License 2.0 | 5 votes |
public RegisterApplicationMasterResponse registerAppAttempt(boolean wait) throws Exception { if (wait) { waitForState(RMAppAttemptState.LAUNCHED); } responseId = 0; final RegisterApplicationMasterRequest req = Records.newRecord(RegisterApplicationMasterRequest.class); req.setHost(""); req.setRpcPort(1); req.setTrackingUrl(""); if (ugi == null) { ugi = UserGroupInformation.createRemoteUser( attemptId.toString()); Token<AMRMTokenIdentifier> token = context.getRMApps().get(attemptId.getApplicationId()) .getRMAppAttempt(attemptId).getAMRMToken(); ugi.addTokenIdentifier(token.decodeIdentifier()); } try { return ugi .doAs( new PrivilegedExceptionAction<RegisterApplicationMasterResponse>() { @Override public RegisterApplicationMasterResponse run() throws Exception { return amRMProtocol.registerApplicationMaster(req); } }); } catch (UndeclaredThrowableException e) { throw (Exception) e.getCause(); } }
Example #19
Source File: DelegationTokenManager.java From hadoop with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") public UserGroupInformation verifyToken( Token<? extends AbstractDelegationTokenIdentifier> token) throws IOException { AbstractDelegationTokenIdentifier id = secretManager.decodeTokenIdentifier(token); secretManager.verifyToken(id, token.getPassword()); return id.getUser(); }
Example #20
Source File: TestFailoverWithBlockTokensEnabled.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void ensureInvalidBlockTokensAreRejected() throws IOException, URISyntaxException { cluster.transitionToActive(0); FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf); DFSTestUtil.writeFile(fs, TEST_PATH, TEST_DATA); assertEquals(TEST_DATA, DFSTestUtil.readFile(fs, TEST_PATH)); DFSClient dfsClient = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs); DFSClient spyDfsClient = Mockito.spy(dfsClient); Mockito.doAnswer( new Answer<LocatedBlocks>() { @Override public LocatedBlocks answer(InvocationOnMock arg0) throws Throwable { LocatedBlocks locatedBlocks = (LocatedBlocks)arg0.callRealMethod(); for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) { Token<BlockTokenIdentifier> token = lb.getBlockToken(); BlockTokenIdentifier id = lb.getBlockToken().decodeIdentifier(); // This will make the token invalid, since the password // won't match anymore id.setExpiryDate(Time.now() + 10); Token<BlockTokenIdentifier> newToken = new Token<BlockTokenIdentifier>(id.getBytes(), token.getPassword(), token.getKind(), token.getService()); lb.setBlockToken(newToken); } return locatedBlocks; } }).when(spyDfsClient).getLocatedBlocks(Mockito.anyString(), Mockito.anyLong(), Mockito.anyLong()); DFSClientAdapter.setDFSClient((DistributedFileSystem)fs, spyDfsClient); try { assertEquals(TEST_DATA, DFSTestUtil.readFile(fs, TEST_PATH)); fail("Shouldn't have been able to read a file with invalid block tokens"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains("Could not obtain block", ioe); } }
Example #21
Source File: PingServer.java From gcp-token-broker with Apache License 2.0 | 5 votes |
private static void checkCancelSessionToken(Configuration config, String sessionToken) throws IOException { try { Token<BrokerTokenIdentifier> token = getTokenBTI(sessionToken); BrokerTokenRenewer renewer = new BrokerTokenRenewer(); renewer.cancel(token, config); System.out.println(CHECK_SUCCESS); } catch (Exception e) { System.out.println(CHECK_FAIL); e.printStackTrace(System.out); System.out.println(); } }
Example #22
Source File: OzoneDelegationTokenSecretManager.java From hadoop-ozone with Apache License 2.0 | 5 votes |
/** * Add delegation token in to in-memory map of tokens. * @param token * @param ozoneTokenIdentifier * @return renewTime - If updated successfully, return renewTime. */ public long updateToken(Token<OzoneTokenIdentifier> token, OzoneTokenIdentifier ozoneTokenIdentifier, long tokenRenewInterval) { long renewTime = ozoneTokenIdentifier.getIssueDate() + tokenRenewInterval; TokenInfo tokenInfo = new TokenInfo(renewTime, token.getPassword(), ozoneTokenIdentifier.getTrackingId()); currentTokens.put(ozoneTokenIdentifier, tokenInfo); return renewTime; }
Example #23
Source File: TestDelegationTokensWithHA.java From big-c with Apache License 2.0 | 5 votes |
/** * HDFS-3062: DistributedFileSystem.getCanonicalServiceName() throws an * exception if the URI is a logical URI. This bug fails the combination of * ha + mapred + security. */ @Test(timeout = 300000) public void testDFSGetCanonicalServiceName() throws Exception { URI hAUri = HATestUtil.getLogicalUri(cluster); String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri, HdfsConstants.HDFS_URI_SCHEME).toString(); assertEquals(haService, dfs.getCanonicalServiceName()); final String renewer = UserGroupInformation.getCurrentUser().getShortUserName(); final Token<DelegationTokenIdentifier> token = getDelegationToken(dfs, renewer); assertEquals(haService, token.getService().toString()); // make sure the logical uri is handled correctly token.renew(dfs.getConf()); token.cancel(dfs.getConf()); }
Example #24
Source File: LogUtils.java From incubator-tez with Apache License 2.0 | 5 votes |
public static void logCredentials(Log log, Credentials credentials, String identifier) { if (log.isDebugEnabled()) { StringBuilder sb = new StringBuilder(); sb.append("#" + identifier + "Tokens=").append(credentials.numberOfTokens()); if (credentials.numberOfTokens() > 0) { sb.append(", Services: "); for (Token<?> t : credentials.getAllTokens()) { sb.append(t.getService()).append(","); } } log.debug(sb.toString()); } }
Example #25
Source File: TokenUtils.java From incubator-gobblin with Apache License 2.0 | 5 votes |
/** * function to fetch hcat token as per the specified hive configuration and then store the token * in to the credential store specified . * * @param userToProxy String value indicating the name of the user the token will be fetched for. * @param hiveConf the configuration based off which the hive client will be initialized. */ private static Token<DelegationTokenIdentifier> fetchHcatToken(final String userToProxy, final HiveConf hiveConf, final String tokenSignatureOverwrite, final IMetaStoreClient hiveClient) throws IOException, TException, InterruptedException { LOG.info(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname + ": " + hiveConf.get( HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname)); LOG.info(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname + ": " + hiveConf.get( HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname)); final Token<DelegationTokenIdentifier> hcatToken = new Token<>(); hcatToken.decodeFromUrlString( hiveClient.getDelegationToken(userToProxy, UserGroupInformation.getLoginUser().getShortUserName())); // overwrite the value of the service property of the token if the signature // override is specified. // If the service field is set, do not overwrite that if (hcatToken.getService().getLength() <= 0 && tokenSignatureOverwrite != null && tokenSignatureOverwrite.trim().length() > 0) { hcatToken.setService(new Text(tokenSignatureOverwrite.trim().toLowerCase())); LOG.info(HIVE_TOKEN_SIGNATURE_KEY + ":" + tokenSignatureOverwrite); } LOG.info("Created hive metastore token for user:" + userToProxy + " with kind[" + hcatToken.getKind() + "]" + " and service[" + hcatToken.getService() + "]"); return hcatToken; }
Example #26
Source File: WebHdfsFileSystem.java From hadoop with Apache License 2.0 | 5 votes |
@Override public <T extends TokenIdentifier> void setDelegationToken( final Token<T> token) { synchronized (this) { delegationToken = token; } }
Example #27
Source File: ReduceTaskImpl.java From big-c with Apache License 2.0 | 5 votes |
public ReduceTaskImpl(JobId jobId, int partition, EventHandler eventHandler, Path jobFile, JobConf conf, int numMapTasks, TaskAttemptListener taskAttemptListener, Token<JobTokenIdentifier> jobToken, Credentials credentials, Clock clock, int appAttemptId, MRAppMetrics metrics, AppContext appContext) { super(jobId, TaskType.REDUCE, partition, eventHandler, jobFile, conf, taskAttemptListener, jobToken, credentials, clock, appAttemptId, metrics, appContext); this.numMapTasks = numMapTasks; }
Example #28
Source File: BlockStorageLocationUtil.java From big-c with Apache License 2.0 | 5 votes |
VolumeBlockLocationCallable(Configuration configuration, DatanodeInfo datanode, String poolId, long []blockIds, List<Token<BlockTokenIdentifier>> dnTokens, int timeout, boolean connectToDnViaHostname, Span parentSpan) { this.configuration = configuration; this.timeout = timeout; this.datanode = datanode; this.poolId = poolId; this.blockIds = blockIds; this.dnTokens = dnTokens; this.connectToDnViaHostname = connectToDnViaHostname; this.parentSpan = parentSpan; }
Example #29
Source File: TestDelegationTokensWithHA.java From hadoop with Apache License 2.0 | 5 votes |
@Test(timeout = 300000) public void testDelegationTokenWithDoAs() throws Exception { final Token<DelegationTokenIdentifier> token = getDelegationToken(fs, "JobTracker"); final UserGroupInformation longUgi = UserGroupInformation .createRemoteUser("JobTracker/[email protected]"); final UserGroupInformation shortUgi = UserGroupInformation .createRemoteUser("JobTracker"); longUgi.doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { // try renew with long name token.renew(conf); return null; } }); shortUgi.doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { token.renew(conf); return null; } }); longUgi.doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { token.cancel(conf);; return null; } }); }
Example #30
Source File: BlockTokenSecretManager.java From big-c with Apache License 2.0 | 5 votes |
/** Generate an block token for current user */ public Token<BlockTokenIdentifier> generateToken(ExtendedBlock block, EnumSet<AccessMode> modes) throws IOException { UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); String userID = (ugi == null ? null : ugi.getShortUserName()); return generateToken(userID, block, modes); }