Java Code Examples for org.apache.hadoop.security.UserGroupInformation.addToken()

The following are Jave code examples for showing how to use addToken() of the org.apache.hadoop.security.UserGroupInformation class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
+ Save this method
Example 1
Project: hadoop   File: TestClientToAMTokens.java   View Source Code Vote up 6 votes
private void verifyValidToken(final Configuration conf, final CustomAM am,
    Token<ClientToAMTokenIdentifier> token) throws IOException,
    InterruptedException {
  UserGroupInformation ugi;
  ugi = UserGroupInformation.createRemoteUser("me");
  ugi.addToken(token);

  ugi.doAs(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      CustomProtocol client =
          (CustomProtocol) RPC.getProxy(CustomProtocol.class, 1L, am.address,
            conf);
      client.ping();
      Assert.assertTrue(am.pinged);
      return null;
    }
  });
}
 
Example 2
Project: hadoop   File: TestClientRMTokens.java   View Source Code Vote up 6 votes
private ApplicationClientProtocol getClientRMProtocolWithDT(
    org.apache.hadoop.yarn.api.records.Token token,
    final InetSocketAddress rmAddress, String user, final Configuration conf) {
  // Maybe consider converting to Hadoop token, serialize de-serialize etc
  // before trying to renew the token.

  UserGroupInformation ugi = UserGroupInformation
      .createRemoteUser(user);
  ugi.addToken(ConverterUtils.convertFromYarn(token, rmAddress));

  final YarnRPC rpc = YarnRPC.create(conf);
  ApplicationClientProtocol clientRMWithDT = ugi
      .doAs(new PrivilegedAction<ApplicationClientProtocol>() {
        @Override
        public ApplicationClientProtocol run() {
          return (ApplicationClientProtocol) rpc.getProxy(ApplicationClientProtocol.class,
              rmAddress, conf);
        }
      });
  return clientRMWithDT;
}
 
Example 3
Project: hadoop   File: ContainerManagementProtocolProxy.java   View Source Code Vote up 6 votes
@Private
@VisibleForTesting
protected ContainerManagementProtocol newProxy(final YarnRPC rpc,
    String containerManagerBindAddr, ContainerId containerId, Token token)
    throws InvalidToken {

  if (token == null) {
    throw new InvalidToken("No NMToken sent for "
        + containerManagerBindAddr);
  }
  
  final InetSocketAddress cmAddr =
      NetUtils.createSocketAddr(containerManagerBindAddr);
  LOG.info("Opening proxy : " + containerManagerBindAddr);
  // the user in createRemoteUser in this context has to be ContainerID
  UserGroupInformation user =
      UserGroupInformation.createRemoteUser(containerId
          .getApplicationAttemptId().toString());

  org.apache.hadoop.security.token.Token<NMTokenIdentifier> nmToken =
      ConverterUtils.convertFromYarn(token, cmAddr);
  user.addToken(nmToken);

  return NMProxy.createNMProxy(conf, ContainerManagementProtocol.class,
    user, rpc, cmAddr);
}
 
Example 4
Project: hadoop   File: TestWebHdfsUrl.java   View Source Code Vote up 6 votes
private WebHdfsFileSystem getWebHdfsFileSystem(UserGroupInformation ugi,
    Configuration conf) throws IOException {
  if (UserGroupInformation.isSecurityEnabled()) {
    DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text(
        ugi.getUserName()), null, null);
    FSNamesystem namesystem = mock(FSNamesystem.class);
    DelegationTokenSecretManager dtSecretManager = new DelegationTokenSecretManager(
        86400000, 86400000, 86400000, 86400000, namesystem);
    dtSecretManager.startThreads();
    Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(
        dtId, dtSecretManager);
    SecurityUtil.setTokenService(
        token, NetUtils.createSocketAddr(uri.getAuthority()));
    token.setKind(WebHdfsFileSystem.TOKEN_KIND);
    ugi.addToken(token);
  }
  return (WebHdfsFileSystem) FileSystem.get(uri, conf);
}
 
Example 5
Project: hadoop   File: TestJHSSecurity.java   View Source Code Vote up 6 votes
private MRClientProtocol getMRClientProtocol(Token token,
    final InetSocketAddress hsAddress, String user, final Configuration conf) {
  UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
  ugi.addToken(ConverterUtils.convertFromYarn(token, hsAddress));

  final YarnRPC rpc = YarnRPC.create(conf);
  MRClientProtocol hsWithDT = ugi
      .doAs(new PrivilegedAction<MRClientProtocol>() {

        @Override
        public MRClientProtocol run() {
          return (MRClientProtocol) rpc.getProxy(HSClientProtocol.class,
              hsAddress, conf);
        }
      });
  return hsWithDT;
}
 
Example 6
Project: hadoop   File: TestBlockToken.java   View Source Code Vote up 5 votes
@Test
public void testBlockTokenRpc() throws Exception {
  Configuration conf = new Configuration();
  conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
  UserGroupInformation.setConfiguration(conf);
  
  BlockTokenSecretManager sm = new BlockTokenSecretManager(
      blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null);
  Token<BlockTokenIdentifier> token = sm.generateToken(block3,
      EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));

  final Server server = createMockDatanode(sm, token, conf);

  server.start();

  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  final UserGroupInformation ticket = UserGroupInformation
      .createRemoteUser(block3.toString());
  ticket.addToken(token);

  ClientDatanodeProtocol proxy = null;
  try {
    proxy = DFSUtil.createClientDatanodeProtocolProxy(addr, ticket, conf,
        NetUtils.getDefaultSocketFactory(conf));
    assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
 
Example 7
Project: hadoop   File: TestSaslRPC.java   View Source Code Vote up 5 votes
private void doDigestRpc(Server server, TestTokenSecretManager sm
                         ) throws Exception {
  server.start();

  final UserGroupInformation current = UserGroupInformation.getCurrentUser();
  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
      .getUserName()));
  Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
      sm);
  SecurityUtil.setTokenService(token, addr);
  current.addToken(token);

  TestSaslProtocol proxy = null;
  try {
    proxy = RPC.getProxy(TestSaslProtocol.class,
        TestSaslProtocol.versionID, addr, conf);
    AuthMethod authMethod = proxy.getAuthMethod();
    assertEquals(TOKEN, authMethod);
    //QOP must be auth
    assertEquals(expectedQop.saslQop,
                 RPC.getConnectionIdForProxy(proxy).getSaslQop());            
    proxy.ping();
  } finally {
    server.stop();
    if (proxy != null) {
      RPC.stopProxy(proxy);
    }
  }
}
 
Example 8
Project: ditb   File: TestTokenAuthentication.java   View Source Code Vote up 5 votes
@Test
public void testTokenAuthentication() throws Exception {
  UserGroupInformation testuser =
      UserGroupInformation.createUserForTesting("testuser", new String[]{"testgroup"});

  testuser.setAuthenticationMethod(
      UserGroupInformation.AuthenticationMethod.TOKEN);
  final Configuration conf = TEST_UTIL.getConfiguration();
  UserGroupInformation.setConfiguration(conf);
  Token<AuthenticationTokenIdentifier> token =
      secretManager.generateToken("testuser");
  LOG.debug("Got token: " + token.toString());
  testuser.addToken(token);

  // verify the server authenticates us as this token user
  testuser.doAs(new PrivilegedExceptionAction<Object>() {
    public Object run() throws Exception {
      Configuration c = server.getConfiguration();
      RpcClient rpcClient = RpcClientFactory.createClient(c, clusterId.toString());
      ServerName sn =
          ServerName.valueOf(server.getAddress().getHostName(), server.getAddress().getPort(),
              System.currentTimeMillis());
      try {
        BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn,
            User.getCurrent(), HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
        AuthenticationProtos.AuthenticationService.BlockingInterface stub =
            AuthenticationProtos.AuthenticationService.newBlockingStub(channel);
        AuthenticationProtos.WhoAmIResponse response =
            stub.whoAmI(null, AuthenticationProtos.WhoAmIRequest.getDefaultInstance());
        String myname = response.getUsername();
        assertEquals("testuser", myname);
        String authMethod = response.getAuthMethod();
        assertEquals("TOKEN", authMethod);
      } finally {
        rpcClient.close();
      }
      return null;
    }
  });
}
 
Example 9
Project: hadoop   File: TestAMAuthorization.java   View Source Code Vote up 5 votes
@Test
public void testAuthorizedAccess() throws Exception {
  MyContainerManager containerManager = new MyContainerManager();
  rm =
      new MockRMWithAMS(conf, containerManager);
  rm.start();

  MockNM nm1 = rm.registerNode("localhost:1234", 5120);

  Map<ApplicationAccessType, String> acls =
      new HashMap<ApplicationAccessType, String>(2);
  acls.put(ApplicationAccessType.VIEW_APP, "*");
  RMApp app = rm.submitApp(1024, "appname", "appuser", acls);

  nm1.nodeHeartbeat(true);

  int waitCount = 0;
  while (containerManager.containerTokens == null && waitCount++ < 20) {
    LOG.info("Waiting for AM Launch to happen..");
    Thread.sleep(1000);
  }
  Assert.assertNotNull(containerManager.containerTokens);

  RMAppAttempt attempt = app.getCurrentAppAttempt();
  ApplicationAttemptId applicationAttemptId = attempt.getAppAttemptId();
  waitForLaunchedState(attempt);

  // Create a client to the RM.
  final Configuration conf = rm.getConfig();
  final YarnRPC rpc = YarnRPC.create(conf);

  UserGroupInformation currentUser = UserGroupInformation
      .createRemoteUser(applicationAttemptId.toString());
  Credentials credentials = containerManager.getContainerCredentials();
  final InetSocketAddress rmBindAddress =
      rm.getApplicationMasterService().getBindAddress();
  Token<? extends TokenIdentifier> amRMToken =
      MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress,
        credentials.getAllTokens());
  currentUser.addToken(amRMToken);
  ApplicationMasterProtocol client = currentUser
      .doAs(new PrivilegedAction<ApplicationMasterProtocol>() {
        @Override
        public ApplicationMasterProtocol run() {
          return (ApplicationMasterProtocol) rpc.getProxy(ApplicationMasterProtocol.class, rm
            .getApplicationMasterService().getBindAddress(), conf);
        }
      });

  RegisterApplicationMasterRequest request = Records
      .newRecord(RegisterApplicationMasterRequest.class);
  RegisterApplicationMasterResponse response =
      client.registerApplicationMaster(request);
  Assert.assertNotNull(response.getClientToAMTokenMasterKey());
  if (UserGroupInformation.isSecurityEnabled()) {
    Assert
      .assertTrue(response.getClientToAMTokenMasterKey().array().length > 0);
  }
  Assert.assertEquals("Register response has bad ACLs", "*",
      response.getApplicationACLs().get(ApplicationAccessType.VIEW_APP));
}
 
Example 10
Project: hadoop   File: AMRMClientImpl.java   View Source Code Vote up 5 votes
private void updateAMRMToken(Token token) throws IOException {
  org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> amrmToken =
      new org.apache.hadoop.security.token.Token<AMRMTokenIdentifier>(token
        .getIdentifier().array(), token.getPassword().array(), new Text(
        token.getKind()), new Text(token.getService()));
  // Preserve the token service sent by the RM when adding the token
  // to ensure we replace the previous token setup by the RM.
  // Afterwards we can update the service address for the RPC layer.
  UserGroupInformation currentUGI = UserGroupInformation.getCurrentUser();
  currentUGI.addToken(amrmToken);
  amrmToken.setService(ClientRMProxy.getAMRMTokenService(getConfig()));
}
 
Example 11
Project: hadoop   File: LocalContainerAllocator.java   View Source Code Vote up 5 votes
private void updateAMRMToken(Token token) throws IOException {
  org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> amrmToken =
      new org.apache.hadoop.security.token.Token<AMRMTokenIdentifier>(token
        .getIdentifier().array(), token.getPassword().array(), new Text(
        token.getKind()), new Text(token.getService()));
  UserGroupInformation currentUGI = UserGroupInformation.getCurrentUser();
  currentUGI.addToken(amrmToken);
  amrmToken.setService(ClientRMProxy.getAMRMTokenService(getConfig()));
}
 
Example 12
Project: hadoop   File: DataNodeUGIProvider.java   View Source Code Vote up 5 votes
private UserGroupInformation tokenUGI() throws IOException {
  Token<DelegationTokenIdentifier> token = params.delegationToken();
  ByteArrayInputStream buf =
    new ByteArrayInputStream(token.getIdentifier());
  DataInputStream in = new DataInputStream(buf);
  DelegationTokenIdentifier id = new DelegationTokenIdentifier();
  id.readFields(in);
  UserGroupInformation ugi = id.getUser();
  ugi.addToken(token);
  return ugi;
}
 
Example 13
Project: hadoop   File: JspHelper.java   View Source Code Vote up 5 votes
private static UserGroupInformation getTokenUGI(ServletContext context,
                                                HttpServletRequest request,
                                                String tokenString,
                                                Configuration conf)
                                                    throws IOException {
  final Token<DelegationTokenIdentifier> token =
      new Token<DelegationTokenIdentifier>();
  token.decodeFromUrlString(tokenString);
  InetSocketAddress serviceAddress = getNNServiceAddress(context, request);
  if (serviceAddress != null) {
    SecurityUtil.setTokenService(token, serviceAddress);
    token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
  }

  ByteArrayInputStream buf =
      new ByteArrayInputStream(token.getIdentifier());
  DataInputStream in = new DataInputStream(buf);
  DelegationTokenIdentifier id = new DelegationTokenIdentifier();
  id.readFields(in);
  if (context != null) {
    final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
    if (nn != null) {
      // Verify the token.
      nn.getNamesystem().verifyToken(id, token.getPassword());
    }
  }
  UserGroupInformation ugi = id.getUser();
  ugi.addToken(token);
  return ugi;
}
 
Example 14
Project: hadoop-oss   File: TestFileSystemCaching.java   View Source Code Vote up 4 votes
@SuppressWarnings("unchecked")
@Test
public <T extends TokenIdentifier> void testCacheForUgi() throws Exception {
  final Configuration conf = new Configuration();
  conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
  UserGroupInformation ugiA = UserGroupInformation.createRemoteUser("foo");
  UserGroupInformation ugiB = UserGroupInformation.createRemoteUser("bar");
  FileSystem fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
    @Override
    public FileSystem run() throws Exception {
      return FileSystem.get(new URI("cachedfile://a"), conf);
    }
  });
  FileSystem fsA1 = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
    @Override
    public FileSystem run() throws Exception {
      return FileSystem.get(new URI("cachedfile://a"), conf);
    }
  });
  //Since the UGIs are the same, we should have the same filesystem for both
  assertSame(fsA, fsA1);
  
  FileSystem fsB = ugiB.doAs(new PrivilegedExceptionAction<FileSystem>() {
    @Override
    public FileSystem run() throws Exception {
      return FileSystem.get(new URI("cachedfile://a"), conf);
    }
  });
  //Since the UGIs are different, we should end up with different filesystems
  //corresponding to the two UGIs
  assertNotSame(fsA, fsB);
  
  Token<T> t1 = mock(Token.class);
  UserGroupInformation ugiA2 = UserGroupInformation.createRemoteUser("foo");
  
  fsA = ugiA2.doAs(new PrivilegedExceptionAction<FileSystem>() {
    @Override
    public FileSystem run() throws Exception {
      return FileSystem.get(new URI("cachedfile://a"), conf);
    }
  });
  // Although the users in the UGI are same, they have different subjects
  // and so are different.
  assertNotSame(fsA, fsA1);
  
  ugiA.addToken(t1);
  
  fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
    @Override
    public FileSystem run() throws Exception {
      return FileSystem.get(new URI("cachedfile://a"), conf);
    }
  });
  // Make sure that different UGI's with the same subject lead to the same
  // file system.
  assertSame(fsA, fsA1);
}
 
Example 15
Project: hadoop   File: TestFileSystemCaching.java   View Source Code Vote up 4 votes
@SuppressWarnings("unchecked")
@Test
public <T extends TokenIdentifier> void testCacheForUgi() throws Exception {
  final Configuration conf = new Configuration();
  conf.set("fs.cachedfile.impl", FileSystem.getFileSystemClass("file", null).getName());
  UserGroupInformation ugiA = UserGroupInformation.createRemoteUser("foo");
  UserGroupInformation ugiB = UserGroupInformation.createRemoteUser("bar");
  FileSystem fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
    @Override
    public FileSystem run() throws Exception {
      return FileSystem.get(new URI("cachedfile://a"), conf);
    }
  });
  FileSystem fsA1 = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
    @Override
    public FileSystem run() throws Exception {
      return FileSystem.get(new URI("cachedfile://a"), conf);
    }
  });
  //Since the UGIs are the same, we should have the same filesystem for both
  assertSame(fsA, fsA1);
  
  FileSystem fsB = ugiB.doAs(new PrivilegedExceptionAction<FileSystem>() {
    @Override
    public FileSystem run() throws Exception {
      return FileSystem.get(new URI("cachedfile://a"), conf);
    }
  });
  //Since the UGIs are different, we should end up with different filesystems
  //corresponding to the two UGIs
  assertNotSame(fsA, fsB);
  
  Token<T> t1 = mock(Token.class);
  UserGroupInformation ugiA2 = UserGroupInformation.createRemoteUser("foo");
  
  fsA = ugiA2.doAs(new PrivilegedExceptionAction<FileSystem>() {
    @Override
    public FileSystem run() throws Exception {
      return FileSystem.get(new URI("cachedfile://a"), conf);
    }
  });
  // Although the users in the UGI are same, they have different subjects
  // and so are different.
  assertNotSame(fsA, fsA1);
  
  ugiA.addToken(t1);
  
  fsA = ugiA.doAs(new PrivilegedExceptionAction<FileSystem>() {
    @Override
    public FileSystem run() throws Exception {
      return FileSystem.get(new URI("cachedfile://a"), conf);
    }
  });
  // Make sure that different UGI's with the same subject lead to the same
  // file system.
  assertSame(fsA, fsA1);
}
 
Example 16
Project: hadoop   File: TestNMProxy.java   View Source Code Vote up 4 votes
@Test(timeout = 20000)
public void testNMProxyRetry() throws Exception {
  containerManager.start();
  containerManager.setBlockNewContainerRequests(false);
  StartContainersRequest allRequests =
      Records.newRecord(StartContainersRequest.class);
  ApplicationId appId = ApplicationId.newInstance(1, 1);
  ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);

  org.apache.hadoop.yarn.api.records.Token nmToken =
      context.getNMTokenSecretManager().createNMToken(attemptId,
        context.getNodeId(), user);
  final InetSocketAddress address =
      conf.getSocketAddr(YarnConfiguration.NM_BIND_HOST,
        YarnConfiguration.NM_ADDRESS, YarnConfiguration.DEFAULT_NM_ADDRESS,
        YarnConfiguration.DEFAULT_NM_PORT);
  Token<NMTokenIdentifier> token =
      ConverterUtils.convertFromYarn(nmToken,
        SecurityUtil.buildTokenService(address));
  UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
  ugi.addToken(token);

  ContainerManagementProtocol proxy =
      NMProxy.createNMProxy(conf, ContainerManagementProtocol.class, ugi,
        YarnRPC.create(conf), address);

  retryCount = 0;
  shouldThrowNMNotYetReadyException = false;
  proxy.startContainers(allRequests);
  Assert.assertEquals(5, retryCount);

  retryCount = 0;
  shouldThrowNMNotYetReadyException = false;
  proxy.stopContainers(Records.newRecord(StopContainersRequest.class));
  Assert.assertEquals(5, retryCount);

  retryCount = 0;
  shouldThrowNMNotYetReadyException = false;
  proxy.getContainerStatuses(Records
    .newRecord(GetContainerStatusesRequest.class));
  Assert.assertEquals(5, retryCount);

  retryCount = 0;
  shouldThrowNMNotYetReadyException = true;
  proxy.startContainers(allRequests);
  Assert.assertEquals(5, retryCount);
}
 
Example 17
Project: hadoop   File: TestClientToAMTokens.java   View Source Code Vote up 4 votes
private void verifyTamperedToken(final Configuration conf, final CustomAM am,
    Token<ClientToAMTokenIdentifier> token, UserGroupInformation ugi,
    ClientToAMTokenIdentifier maliciousID) {
  Token<ClientToAMTokenIdentifier> maliciousToken =
      new Token<ClientToAMTokenIdentifier>(maliciousID.getBytes(),
        token.getPassword(), token.getKind(),
        token.getService());
  ugi.addToken(maliciousToken);

  try {
    ugi.doAs(new PrivilegedExceptionAction<Void>()  {
      @Override
      public Void run() throws Exception {
        try {
          CustomProtocol client =
              (CustomProtocol) RPC.getProxy(CustomProtocol.class, 1L,
                am.address, conf);
          client.ping();
          fail("Connection initiation with illegally modified "
              + "tokens is expected to fail.");
          return null;
        } catch (YarnException ex) {
          fail("Cannot get a YARN remote exception as "
              + "it will indicate RPC success");
          throw ex;
        }
      }
    });
  } catch (Exception e) {
    Assert.assertEquals(RemoteException.class.getName(), e.getClass()
        .getName());
    e = ((RemoteException)e).unwrapRemoteException();
    Assert
      .assertEquals(SaslException.class
        .getCanonicalName(), e.getClass().getCanonicalName());
    Assert.assertTrue(e
      .getMessage()
      .contains(
        "DIGEST-MD5: digest response format violation. "
            + "Mismatched response."));
    Assert.assertFalse(am.pinged);
  }
}
 
Example 18
Project: hadoop   File: TestSchedulerUtils.java   View Source Code Vote up 4 votes
@Test
public void testValidateResourceBlacklistRequest() throws Exception {

  MyContainerManager containerManager = new MyContainerManager();
  final MockRMWithAMS rm =
      new MockRMWithAMS(new YarnConfiguration(), containerManager);
  rm.start();

  MockNM nm1 = rm.registerNode("localhost:1234", 5120);

  Map<ApplicationAccessType, String> acls =
      new HashMap<ApplicationAccessType, String>(2);
  acls.put(ApplicationAccessType.VIEW_APP, "*");
  RMApp app = rm.submitApp(1024, "appname", "appuser", acls);

  nm1.nodeHeartbeat(true);

  RMAppAttempt attempt = app.getCurrentAppAttempt();
  ApplicationAttemptId applicationAttemptId = attempt.getAppAttemptId();
  waitForLaunchedState(attempt);

  // Create a client to the RM.
  final Configuration conf = rm.getConfig();
  final YarnRPC rpc = YarnRPC.create(conf);

  UserGroupInformation currentUser = 
      UserGroupInformation.createRemoteUser(applicationAttemptId.toString());
  Credentials credentials = containerManager.getContainerCredentials();
  final InetSocketAddress rmBindAddress =
      rm.getApplicationMasterService().getBindAddress();
  Token<? extends TokenIdentifier> amRMToken =
      MockRMWithAMS.setupAndReturnAMRMToken(rmBindAddress,
        credentials.getAllTokens());
  currentUser.addToken(amRMToken);
  ApplicationMasterProtocol client =
      currentUser.doAs(new PrivilegedAction<ApplicationMasterProtocol>() {
        @Override
        public ApplicationMasterProtocol run() {
          return (ApplicationMasterProtocol) rpc.getProxy(
            ApplicationMasterProtocol.class, rmBindAddress, conf);
        }
      });

  RegisterApplicationMasterRequest request = Records
      .newRecord(RegisterApplicationMasterRequest.class);
  client.registerApplicationMaster(request);

  ResourceBlacklistRequest blacklistRequest =
      ResourceBlacklistRequest.newInstance(
          Collections.singletonList(ResourceRequest.ANY), null);

  AllocateRequest allocateRequest =
      AllocateRequest.newInstance(0, 0.0f, null, null, blacklistRequest);
  boolean error = false;
  try {
    client.allocate(allocateRequest);
  } catch (InvalidResourceBlacklistRequestException e) {
    error = true;
  }

  rm.stop();
  
  Assert.assertTrue(
      "Didn't not catch InvalidResourceBlacklistRequestException", error);
}
 
Example 19
Project: hadoop   File: TestLocalContainerAllocator.java   View Source Code Vote up 4 votes
@Test
public void testAMRMTokenUpdate() throws Exception {
  Configuration conf = new Configuration();
  ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(
      ApplicationId.newInstance(1, 1), 1);
  AMRMTokenIdentifier oldTokenId = new AMRMTokenIdentifier(attemptId, 1);
  AMRMTokenIdentifier newTokenId = new AMRMTokenIdentifier(attemptId, 2);
  Token<AMRMTokenIdentifier> oldToken = new Token<AMRMTokenIdentifier>(
      oldTokenId.getBytes(), "oldpassword".getBytes(), oldTokenId.getKind(),
      new Text());
  Token<AMRMTokenIdentifier> newToken = new Token<AMRMTokenIdentifier>(
      newTokenId.getBytes(), "newpassword".getBytes(), newTokenId.getKind(),
      new Text());

  MockScheduler scheduler = new MockScheduler();
  scheduler.amToken = newToken;

  final LocalContainerAllocator lca =
      new StubbedLocalContainerAllocator(scheduler);
  lca.init(conf);
  lca.start();

  UserGroupInformation testUgi = UserGroupInformation.createUserForTesting(
      "someuser", new String[0]);
  testUgi.addToken(oldToken);
  testUgi.doAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
          lca.heartbeat();
          return null;
        }
  });
  lca.close();

  // verify there is only one AMRM token in the UGI and it matches the
  // updated token from the RM
  int tokenCount = 0;
  Token<? extends TokenIdentifier> ugiToken = null;
  for (Token<? extends TokenIdentifier> token : testUgi.getTokens()) {
    if (AMRMTokenIdentifier.KIND_NAME.equals(token.getKind())) {
      ugiToken = token;
      ++tokenCount;
    }
  }

  Assert.assertEquals("too many AMRM tokens", 1, tokenCount);
  Assert.assertArrayEquals("token identifier not updated",
      newToken.getIdentifier(), ugiToken.getIdentifier());
  Assert.assertArrayEquals("token password not updated",
      newToken.getPassword(), ugiToken.getPassword());
  Assert.assertEquals("AMRM token service not updated",
      new Text(ClientRMProxy.getAMRMTokenService(conf)),
      ugiToken.getService());
}
 
Example 20
Project: hadoop   File: TestSaslRPC.java   View Source Code Vote up 4 votes
@Test
public void testPerConnectionConf() throws Exception {
  TestTokenSecretManager sm = new TestTokenSecretManager();
  final Server server = new RPC.Builder(conf)
      .setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl())
      .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
      .setSecretManager(sm).build();
  server.start();
  final UserGroupInformation current = UserGroupInformation.getCurrentUser();
  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
      .getUserName()));
  Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
      sm);
  SecurityUtil.setTokenService(token, addr);
  current.addToken(token);

  Configuration newConf = new Configuration(conf);
  newConf.set(CommonConfigurationKeysPublic.
      HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY, "");

  Client client = null;
  TestSaslProtocol proxy1 = null;
  TestSaslProtocol proxy2 = null;
  TestSaslProtocol proxy3 = null;
  int timeouts[] = {111222, 3333333};
  try {
    newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, timeouts[0]);
    proxy1 = RPC.getProxy(TestSaslProtocol.class,
        TestSaslProtocol.versionID, addr, newConf);
    proxy1.getAuthMethod();
    client = WritableRpcEngine.getClient(newConf);
    Set<ConnectionId> conns = client.getConnectionIds();
    assertEquals("number of connections in cache is wrong", 1, conns.size());
    // same conf, connection should be re-used
    proxy2 = RPC.getProxy(TestSaslProtocol.class,
        TestSaslProtocol.versionID, addr, newConf);
    proxy2.getAuthMethod();
    assertEquals("number of connections in cache is wrong", 1, conns.size());
    // different conf, new connection should be set up
    newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, timeouts[1]);
    proxy3 = RPC.getProxy(TestSaslProtocol.class,
        TestSaslProtocol.versionID, addr, newConf);
    proxy3.getAuthMethod();
    assertEquals("number of connections in cache is wrong", 2, conns.size());
    // now verify the proxies have the correct connection ids and timeouts
    ConnectionId[] connsArray = {
        RPC.getConnectionIdForProxy(proxy1),
        RPC.getConnectionIdForProxy(proxy2),
        RPC.getConnectionIdForProxy(proxy3)
    };
    assertEquals(connsArray[0], connsArray[1]);
    assertEquals(connsArray[0].getMaxIdleTime(), timeouts[0]);
    assertFalse(connsArray[0].equals(connsArray[2]));
    assertNotSame(connsArray[2].getMaxIdleTime(), timeouts[1]);
  } finally {
    server.stop();
    // this is dirty, but clear out connection cache for next run
    if (client != null) {
      client.getConnectionIds().clear();
    }
    if (proxy1 != null) RPC.stopProxy(proxy1);
    if (proxy2 != null) RPC.stopProxy(proxy2);
    if (proxy3 != null) RPC.stopProxy(proxy3);
  }
}