Java Code Examples for org.apache.hadoop.security.SecurityUtil

The following examples show how to use org.apache.hadoop.security.SecurityUtil. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop-ozone   Source File: HttpServer2.java    License: Apache License 2.0 6 votes vote down vote up
private void initSpnego(ConfigurationSource conf, String hostName,
    String usernameConfKey, String keytabConfKey) throws IOException {
  Map<String, String> params = new HashMap<>();
  String principalInConf = conf.get(usernameConfKey);
  if (principalInConf != null && !principalInConf.isEmpty()) {
    params.put("kerberos.principal", SecurityUtil.getServerPrincipal(
        principalInConf, hostName));
  }
  String httpKeytab = conf.get(keytabConfKey);
  if (httpKeytab != null && !httpKeytab.isEmpty()) {
    params.put("kerberos.keytab", httpKeytab);
  }
  params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");
  defineFilter(webAppContext, SPNEGO_FILTER,
      AuthenticationFilter.class.getName(), params, null);
}
 
Example 2
Source Project: hadoop-ozone   Source File: OMKeyRequest.java    License: Apache License 2.0 6 votes vote down vote up
private static EncryptedKeyVersion generateEDEK(OzoneManager ozoneManager,
    String ezKeyName) throws IOException {
  if (ezKeyName == null) {
    return null;
  }
  long generateEDEKStartTime = monotonicNow();
  EncryptedKeyVersion edek = SecurityUtil.doAsLoginUser(
      new PrivilegedExceptionAction<EncryptedKeyVersion >() {
        @Override
        public EncryptedKeyVersion run() throws IOException {
          try {
            return ozoneManager.getKmsProvider()
                .generateEncryptedKey(ezKeyName);
          } catch (GeneralSecurityException e) {
            throw new IOException(e);
          }
        }
      });
  long generateEDEKTime = monotonicNow() - generateEDEKStartTime;
  LOG.debug("generateEDEK takes {} ms", generateEDEKTime);
  Preconditions.checkNotNull(edek);
  return edek;
}
 
Example 3
Source Project: hadoop-ozone   Source File: KeyManagerImpl.java    License: Apache License 2.0 6 votes vote down vote up
private EncryptedKeyVersion generateEDEK(
    final String ezKeyName) throws IOException {
  if (ezKeyName == null) {
    return null;
  }
  long generateEDEKStartTime = monotonicNow();
  EncryptedKeyVersion edek = SecurityUtil.doAsLoginUser(
      new PrivilegedExceptionAction<EncryptedKeyVersion>() {
        @Override
        public EncryptedKeyVersion run() throws IOException {
          try {
            return getKMSProvider().generateEncryptedKey(ezKeyName);
          } catch (GeneralSecurityException e) {
            throw new IOException(e);
          }
        }
      });
  long generateEDEKTime = monotonicNow() - generateEDEKStartTime;
  LOG.debug("generateEDEK takes {} ms", generateEDEKTime);
  Preconditions.checkNotNull(edek);
  return edek;
}
 
Example 4
Source Project: big-c   Source File: RpcProgramMountd.java    License: Apache License 2.0 6 votes vote down vote up
public RpcProgramMountd(NfsConfiguration config,
    DatagramSocket registrationSocket, boolean allowInsecurePorts)
    throws IOException {
  // Note that RPC cache is not enabled
  super("mountd", "localhost", config.getInt(
      NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY,
      NfsConfigKeys.DFS_NFS_MOUNTD_PORT_DEFAULT), PROGRAM, VERSION_1,
      VERSION_3, registrationSocket, allowInsecurePorts);
  exports = new ArrayList<String>();
  exports.add(config.get(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY,
      NfsConfigKeys.DFS_NFS_EXPORT_POINT_DEFAULT));
  this.hostsMatcher = NfsExports.getInstance(config);
  this.mounts = Collections.synchronizedList(new ArrayList<MountEntry>());
  UserGroupInformation.setConfiguration(config);
  SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY,
      NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY);
  this.dfsClient = new DFSClient(NameNode.getAddress(config), config);
}
 
Example 5
Source Project: hadoop-ozone   Source File: TestOzoneS3Util.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testBuildServiceNameForToken() {

  Collection<String> nodeIDList = OmUtils.getOMNodeIds(configuration,
      serviceID);

  configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
      serviceID, "om1"), "om1:9862");
  configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
      serviceID, "om2"), "om2:9862");
  configuration.set(OmUtils.addKeySuffixes(OMConfigKeys.OZONE_OM_ADDRESS_KEY,
      serviceID, "om3"), "om3:9862");

  String expectedOmServiceAddress = buildServiceAddress(nodeIDList);

  SecurityUtil.setConfiguration(configuration);
  String omserviceAddr = OzoneS3Util.buildServiceNameForToken(configuration,
      serviceID, nodeIDList);

  Assert.assertEquals(expectedOmServiceAddress, omserviceAddr);
}
 
Example 6
Source Project: attic-apex-core   Source File: StramClientUtils.java    License: Apache License 2.0 6 votes vote down vote up
private Token<RMDelegationTokenIdentifier> getRMHAToken(org.apache.hadoop.yarn.api.records.Token rmDelegationToken)
{
  // Build a list of service addresses to form the service name
  ArrayList<String> services = new ArrayList<>();
  for (String rmId : ConfigUtils.getRMHAIds(conf)) {
    LOG.info("Yarn Resource Manager id: {}", rmId);
    // Set RM_ID to get the corresponding RM_ADDRESS
    services.add(SecurityUtil.buildTokenService(getRMHAAddress(rmId)).toString());
  }
  Text rmTokenService = new Text(Joiner.on(',').join(services));

  return new Token<>(
      rmDelegationToken.getIdentifier().array(),
      rmDelegationToken.getPassword().array(),
      new Text(rmDelegationToken.getKind()),
      rmTokenService);
}
 
Example 7
Source Project: pxf   Source File: SecureLogin.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Returns the service principal name from the configuration if available,
 * or defaults to the system property for the default server for backwards
 * compatibility. If the prncipal name contains _HOST element, replaces it with the
 * name of the host where the service is running.
 *
 * @param serverName    the name of the server
 * @param configuration the hadoop configuration
 * @return the service principal for the given server and configuration
 */
String getServicePrincipal(String serverName, Configuration configuration) {
    // use system property as default for backward compatibility when only 1 Kerberized cluster was supported
    String defaultPrincipal = StringUtils.equalsIgnoreCase(serverName, "default") ?
            System.getProperty(CONFIG_KEY_SERVICE_PRINCIPAL) :
            null;
    String principal = configuration.get(CONFIG_KEY_SERVICE_PRINCIPAL, defaultPrincipal);
    try {
        principal = SecurityUtil.getServerPrincipal(principal, getLocalHostName(configuration));
        LOG.debug("Resolved Kerberos principal name to {} for server {}", principal, serverName);
        return principal;
    } catch (Exception e) {
        throw new IllegalStateException(
            String.format("Failed to determine local hostname for server {} : {}", serverName, e.getMessage()), e);
    }
}
 
Example 8
Source Project: big-c   Source File: SaslDataTransferTestCase.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Creates configuration for starting a secure cluster.
 *
 * @param dataTransferProtection supported QOPs
 * @return configuration for starting a secure cluster
 * @throws Exception if there is any failure
 */
protected HdfsConfiguration createSecureConfig(
    String dataTransferProtection) throws Exception {
  HdfsConfiguration conf = new HdfsConfiguration();
  SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
  conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
  conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
  conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, dataTransferProtection);
  conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
  conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);

  String keystoresDir = baseDir.getAbsolutePath();
  String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass());
  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
  return conf;
}
 
Example 9
Source Project: hadoop   Source File: ClientRMProxy.java    License: Apache License 2.0 6 votes vote down vote up
@Unstable
public static Text getTokenService(Configuration conf, String address,
    String defaultAddr, int defaultPort) {
  if (HAUtil.isHAEnabled(conf)) {
    // Build a list of service addresses to form the service name
    ArrayList<String> services = new ArrayList<String>();
    YarnConfiguration yarnConf = new YarnConfiguration(conf);
    for (String rmId : HAUtil.getRMHAIds(conf)) {
      // Set RM_ID to get the corresponding RM_ADDRESS
      yarnConf.set(YarnConfiguration.RM_HA_ID, rmId);
      services.add(SecurityUtil.buildTokenService(
          yarnConf.getSocketAddr(address, defaultAddr, defaultPort))
          .toString());
    }
    return new Text(Joiner.on(',').join(services));
  }

  // Non-HA case - no need to set RM_ID
  return SecurityUtil.buildTokenService(conf.getSocketAddr(address,
    defaultAddr, defaultPort));
}
 
Example 10
Source Project: RDFS   Source File: TestConfiguredPolicy.java    License: Apache License 2.0 6 votes vote down vote up
public void testConfiguredPolicy() throws Exception {
  Configuration conf = new Configuration();
  conf.set(KEY_1, AccessControlList.WILDCARD_ACL_VALUE);
  conf.set(KEY_2, USER1 + " " + GROUPS1[0]);
  
  ConfiguredPolicy policy = new ConfiguredPolicy(conf, new TestPolicyProvider());
  SecurityUtil.setPolicy(policy);
  
  Subject user1 = 
    SecurityUtil.getSubject(new UnixUserGroupInformation(USER1, GROUPS1));

  // Should succeed
  ServiceAuthorizationManager.authorize(user1, Protocol1.class);
  
  // Should fail
  Subject user2 = 
    SecurityUtil.getSubject(new UnixUserGroupInformation(USER2, GROUPS2));
  boolean failed = false;
  try {
    ServiceAuthorizationManager.authorize(user2, Protocol2.class);
  } catch (AuthorizationException ae) {
    failed = true;
  }
  assertTrue(failed);
}
 
Example 11
Source Project: crate   Source File: HdfsRepository.java    License: Apache License 2.0 6 votes vote down vote up
private static String preparePrincipal(String originalPrincipal) {
    String finalPrincipal = originalPrincipal;
    // Don't worry about host name resolution if they don't have the _HOST pattern in the name.
    if (originalPrincipal.contains("_HOST")) {
        try {
            finalPrincipal = SecurityUtil.getServerPrincipal(originalPrincipal, getHostName());
        } catch (IOException e) {
            throw new UncheckedIOException(e);
        }

        if (originalPrincipal.equals(finalPrincipal) == false) {
            LOGGER.debug("Found service principal. Converted original principal name [{}] to server principal [{}]",
                originalPrincipal, finalPrincipal);
        }
    }
    return finalPrincipal;
}
 
Example 12
Source Project: hadoop   Source File: JournalNode.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Start listening for edits via RPC.
 */
public void start() throws IOException {
  Preconditions.checkState(!isStarted(), "JN already running");
  
  validateAndCreateJournalDir(localDir);
  
  DefaultMetricsSystem.initialize("JournalNode");
  JvmMetrics.create("JournalNode",
      conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
      DefaultMetricsSystem.instance());

  InetSocketAddress socAddr = JournalNodeRpcServer.getAddress(conf);
  SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY,
      DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
  
  registerJNMXBean();
  
  httpServer = new JournalNodeHttpServer(conf, this);
  httpServer.start();

  httpServerURI = httpServer.getServerURI().toString();

  rpcServer = new JournalNodeRpcServer(conf, this);
  rpcServer.start();
}
 
Example 13
Source Project: streamx   Source File: TestWithSecureMiniDFSCluster.java    License: Apache License 2.0 6 votes vote down vote up
private Configuration createSecureConfig(String dataTransferProtection) throws Exception {
  HdfsConfiguration conf = new HdfsConfiguration();
  SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
  conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
  conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
  conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, dataTransferProtection);
  conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
  conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);
  conf.set(DFS_ENCRYPT_DATA_TRANSFER_KEY,
           "true");//https://issues.apache.org/jira/browse/HDFS-7431
  String keystoresDir = baseDir.getAbsolutePath();
  String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass());
  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
  return conf;
}
 
Example 14
Source Project: hadoop   Source File: SaslDataTransferTestCase.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Creates configuration for starting a secure cluster.
 *
 * @param dataTransferProtection supported QOPs
 * @return configuration for starting a secure cluster
 * @throws Exception if there is any failure
 */
protected HdfsConfiguration createSecureConfig(
    String dataTransferProtection) throws Exception {
  HdfsConfiguration conf = new HdfsConfiguration();
  SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
  conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
  conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
  conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, dataTransferProtection);
  conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
  conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);

  String keystoresDir = baseDir.getAbsolutePath();
  String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass());
  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
  return conf;
}
 
Example 15
Source Project: hadoop   Source File: TestRMContainerAllocator.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public synchronized Allocation allocate(
    ApplicationAttemptId applicationAttemptId, List<ResourceRequest> ask,
    List<ContainerId> release, 
    List<String> blacklistAdditions, List<String> blacklistRemovals) {
  List<ResourceRequest> askCopy = new ArrayList<ResourceRequest>();
  for (ResourceRequest req : ask) {
    ResourceRequest reqCopy = ResourceRequest.newInstance(req
        .getPriority(), req.getResourceName(), req.getCapability(), req
        .getNumContainers(), req.getRelaxLocality());
    askCopy.add(reqCopy);
  }
  SecurityUtil.setTokenServiceUseIp(false);
  lastAsk = ask;
  lastRelease = release;
  lastBlacklistAdditions = blacklistAdditions;
  lastBlacklistRemovals = blacklistRemovals;
  return super.allocate(
      applicationAttemptId, askCopy, release, 
      blacklistAdditions, blacklistRemovals);
}
 
Example 16
Source Project: hadoop   Source File: YARNRunner.java    License: Apache License 2.0 6 votes vote down vote up
@VisibleForTesting
void addHistoryToken(Credentials ts) throws IOException, InterruptedException {
  /* check if we have a hsproxy, if not, no need */
  MRClientProtocol hsProxy = clientCache.getInitializedHSProxy();
  if (UserGroupInformation.isSecurityEnabled() && (hsProxy != null)) {
    /*
     * note that get delegation token was called. Again this is hack for oozie
     * to make sure we add history server delegation tokens to the credentials
     */
    RMDelegationTokenSelector tokenSelector = new RMDelegationTokenSelector();
    Text service = resMgrDelegate.getRMDelegationTokenService();
    if (tokenSelector.selectToken(service, ts.getAllTokens()) != null) {
      Text hsService = SecurityUtil.buildTokenService(hsProxy
          .getConnectAddress());
      if (ts.getToken(hsService) == null) {
        ts.addToken(hsService, getDelegationTokenFromHS(hsProxy));
      }
    }
  }
}
 
Example 17
Source Project: hadoop   Source File: MRDelegationTokenRenewer.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public long renew(Token<?> token, Configuration conf) throws IOException,
    InterruptedException {

  org.apache.hadoop.yarn.api.records.Token dToken =
      org.apache.hadoop.yarn.api.records.Token.newInstance(
        token.getIdentifier(), token.getKind().toString(),
        token.getPassword(), token.getService().toString());

  MRClientProtocol histProxy = instantiateHistoryProxy(conf,
      SecurityUtil.getTokenServiceAddr(token));
  try {
    RenewDelegationTokenRequest request = Records
        .newRecord(RenewDelegationTokenRequest.class);
    request.setDelegationToken(dToken);
    return histProxy.renewDelegationToken(request).getNextExpirationTime();
  } finally {
    stopHistoryProxy(histProxy);
  }

}
 
Example 18
Source Project: hadoop   Source File: RpcProgramMountd.java    License: Apache License 2.0 6 votes vote down vote up
public RpcProgramMountd(NfsConfiguration config,
    DatagramSocket registrationSocket, boolean allowInsecurePorts)
    throws IOException {
  // Note that RPC cache is not enabled
  super("mountd", "localhost", config.getInt(
      NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY,
      NfsConfigKeys.DFS_NFS_MOUNTD_PORT_DEFAULT), PROGRAM, VERSION_1,
      VERSION_3, registrationSocket, allowInsecurePorts);
  exports = new ArrayList<String>();
  exports.add(config.get(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY,
      NfsConfigKeys.DFS_NFS_EXPORT_POINT_DEFAULT));
  this.hostsMatcher = NfsExports.getInstance(config);
  this.mounts = Collections.synchronizedList(new ArrayList<MountEntry>());
  UserGroupInformation.setConfiguration(config);
  SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY,
      NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY);
  this.dfsClient = new DFSClient(NameNode.getAddress(config), config);
}
 
Example 19
Source Project: hadoop   Source File: HftpFileSystem.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
public void cancelDelegationToken(final Token<?> token) throws IOException {
  UserGroupInformation connectUgi = ugi.getRealUser();
  if (connectUgi == null) {
    connectUgi = ugi;
  }
  try {
    connectUgi.doAs(new PrivilegedExceptionAction<Void>() {
      @Override
      public Void run() throws Exception {
        InetSocketAddress serviceAddr = SecurityUtil
            .getTokenServiceAddr(token);
        DelegationTokenFetcher.cancelDelegationToken(connectionFactory,
            DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr),
            (Token<DelegationTokenIdentifier>) token);
        return null;
      }
    });
  } catch (InterruptedException e) {
    throw new IOException(e);
  }
}
 
Example 20
Source Project: hadoop   Source File: DelegationTokenSelector.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Select the delegation token for hdfs.  The port will be rewritten to
 * the port of hdfs.service.host_$nnAddr, or the default rpc namenode port. 
 * This method should only be called by non-hdfs filesystems that do not
 * use the rpc port to acquire tokens.  Ex. webhdfs, hftp 
 * @param nnUri of the remote namenode
 * @param tokens as a collection
 * @param conf hadoop configuration
 * @return Token
 */
public Token<DelegationTokenIdentifier> selectToken(
    final URI nnUri, Collection<Token<?>> tokens,
    final Configuration conf) {
  // this guesses the remote cluster's rpc service port.
  // the current token design assumes it's the same as the local cluster's
  // rpc port unless a config key is set.  there should be a way to automatic
  // and correctly determine the value
  Text serviceName = SecurityUtil.buildTokenService(nnUri);
  final String nnServiceName = conf.get(SERVICE_NAME_KEY + serviceName);
  
  int nnRpcPort = NameNode.DEFAULT_PORT;
  if (nnServiceName != null) {
    nnRpcPort = NetUtils.createSocketAddr(nnServiceName, nnRpcPort).getPort(); 
  }
  // use original hostname from the uri to avoid unintentional host resolving
  serviceName = SecurityUtil.buildTokenService(
  		NetUtils.createSocketAddrForHost(nnUri.getHost(), nnRpcPort));
  
  return selectToken(serviceName, tokens);
}
 
Example 21
Source Project: hadoop   Source File: NameNode.java    License: Apache License 2.0 6 votes vote down vote up
private void startTrashEmptier(final Configuration conf) throws IOException {
  long trashInterval =
      conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT);
  if (trashInterval == 0) {
    return;
  } else if (trashInterval < 0) {
    throw new IOException("Cannot start trash emptier with negative interval."
        + " Set " + FS_TRASH_INTERVAL_KEY + " to a positive value.");
  }
  
  // This may be called from the transitionToActive code path, in which
  // case the current user is the administrator, not the NN. The trash
  // emptier needs to run as the NN. See HDFS-3972.
  FileSystem fs = SecurityUtil.doAsLoginUser(
      new PrivilegedExceptionAction<FileSystem>() {
        @Override
        public FileSystem run() throws IOException {
          return FileSystem.get(conf);
        }
      });
  this.emptier = new Thread(new Trash(fs, conf).getEmptier(), "Trash Emptier");
  this.emptier.setDaemon(true);
  this.emptier.start();
}
 
Example 22
Source Project: big-c   Source File: IPCLoggerChannel.java    License: Apache License 2.0 6 votes vote down vote up
protected QJournalProtocol createProxy() throws IOException {
  final Configuration confCopy = new Configuration(conf);
  
  // Need to set NODELAY or else batches larger than MTU can trigger 
  // 40ms nagling delays.
  confCopy.setBoolean(
      CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_KEY,
      true);
  
  RPC.setProtocolEngine(confCopy,
      QJournalProtocolPB.class, ProtobufRpcEngine.class);
  return SecurityUtil.doAsLoginUser(
      new PrivilegedExceptionAction<QJournalProtocol>() {
        @Override
        public QJournalProtocol run() throws IOException {
          RPC.setProtocolEngine(confCopy,
              QJournalProtocolPB.class, ProtobufRpcEngine.class);
          QJournalProtocolPB pbproxy = RPC.getProxy(
              QJournalProtocolPB.class,
              RPC.getProtocolVersion(QJournalProtocolPB.class),
              addr, confCopy);
          return new QJournalProtocolTranslatorPB(pbproxy);
        }
      });
}
 
Example 23
Source Project: hadoop-ozone   Source File: StorageContainerManager.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Login as the configured user for SCM.
 *
 * @param conf
 */
private void loginAsSCMUser(ConfigurationSource conf)
    throws IOException, AuthenticationException {
  if (LOG.isDebugEnabled()) {
    ScmConfig scmConfig = configuration.getObject(ScmConfig.class);
    LOG.debug("Ozone security is enabled. Attempting login for SCM user. "
            + "Principal: {}, keytab: {}",
        scmConfig.getKerberosPrincipal(),
        scmConfig.getKerberosKeytab());
  }

  Configuration hadoopConf =
      LegacyHadoopConfigurationSource.asHadoopConfiguration(conf);
  if (SecurityUtil.getAuthenticationMethod(hadoopConf).equals(
      AuthenticationMethod.KERBEROS)) {
    UserGroupInformation.setConfiguration(hadoopConf);
    InetSocketAddress socAddr = HddsServerUtil
        .getScmBlockClientBindAddress(conf);
    SecurityUtil.login(hadoopConf,
          ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
          ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
          socAddr.getHostName());
  } else {
    throw new AuthenticationException(SecurityUtil.getAuthenticationMethod(
        hadoopConf) + " authentication method not support. "
        + "SCM user login failed.");
  }
  LOG.info("SCM login successful.");
}
 
Example 24
Source Project: big-c   Source File: MRApp.java    License: Apache License 2.0 5 votes vote down vote up
public static Token newContainerToken(NodeId nodeId,
    byte[] password, ContainerTokenIdentifier tokenIdentifier) {
  // RPC layer client expects ip:port as service for tokens
  InetSocketAddress addr =
      NetUtils.createSocketAddrForHost(nodeId.getHost(), nodeId.getPort());
  // NOTE: use SecurityUtil.setTokenService if this becomes a "real" token
  Token containerToken =
      Token.newInstance(tokenIdentifier.getBytes(),
        ContainerTokenIdentifier.KIND.toString(), password, SecurityUtil
          .buildTokenService(addr).toString());
  return containerToken;
}
 
Example 25
Source Project: big-c   Source File: TestTokenAspect.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void initialize(URI name, Configuration conf) throws IOException {
  super.initialize(name, conf);
  setConf(conf);
  this.uri = URI.create(name.getScheme() + "://" + name.getAuthority());
  tokenAspect = new TokenAspect<DummyFs>(this,
      SecurityUtil.buildTokenService(uri), TOKEN_KIND);
  if (emulateSecurityEnabled || UserGroupInformation.isSecurityEnabled()) {
    tokenAspect.initDelegationToken(ugi);
  }
}
 
Example 26
Source Project: RDFS   Source File: NameNode.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void refreshServiceAcl() throws IOException {
  if (!serviceAuthEnabled) {
    throw new AuthorizationException("Service Level Authorization not enabled!");
  }

  SecurityUtil.getPolicy().refresh();
}
 
Example 27
Source Project: big-c   Source File: NetUtils.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Checks if {@code host} is a local host name and return {@link InetAddress}
 * corresponding to that address.
 * 
 * @param host the specified host
 * @return a valid local {@link InetAddress} or null
 * @throws SocketException if an I/O error occurs
 */
public static InetAddress getLocalInetAddress(String host)
    throws SocketException {
  if (host == null) {
    return null;
  }
  InetAddress addr = null;
  try {
    addr = SecurityUtil.getByName(host);
    if (NetworkInterface.getByInetAddress(addr) == null) {
      addr = null; // Not a local address
    }
  } catch (UnknownHostException ignore) { }
  return addr;
}
 
Example 28
Source Project: NNAnalytics   Source File: NameNodeLoader.java    License: Apache License 2.0 5 votes vote down vote up
/** Reloads the Kerberos keytab if security enabled. */
public void reloadKeytab() {
  if (UserGroupInformation.isSecurityEnabled()) {
    try {
      SecurityUtil.login(
          conf,
          DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,
          DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
          InetAddress.getLocalHost().getCanonicalHostName());
      UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
    } catch (IOException e) {
      throw new RuntimeException(e);
    }
  }
}
 
Example 29
Source Project: imhotep   Source File: KerberosUtils.java    License: Apache License 2.0 5 votes vote down vote up
private static void with(String principal, String keytabPath) throws IOException {
    log.info("Setting keytab file of " + keytabPath + ", and principal to " + principal);
    checkArgument(!Strings.isNullOrEmpty(principal), "Unable to use a null/empty principal for keytab");
    checkArgument(!Strings.isNullOrEmpty(keytabPath), "Unable to use a null/empty keytab path");

    // do hostname substitution
    final String realPrincipal = SecurityUtil.getServerPrincipal(principal, (String) null);
    // actually login
    try {
        UserGroupInformation.loginUserFromKeytab(realPrincipal, keytabPath);
    } catch (IOException e) {
        checkKnownErrors(realPrincipal, e);
        throw e;
    }
}
 
Example 30
Source Project: big-c   Source File: BaseNMTokenSecretManager.java    License: Apache License 2.0 5 votes vote down vote up
public static Token newInstance(byte[] password,
    NMTokenIdentifier identifier) {
  NodeId nodeId = identifier.getNodeId();
  // RPC layer client expects ip:port as service for tokens
  InetSocketAddress addr =
      NetUtils.createSocketAddrForHost(nodeId.getHost(), nodeId.getPort());
  Token nmToken =
      Token.newInstance(identifier.getBytes(),
        NMTokenIdentifier.KIND.toString(), password, SecurityUtil
          .buildTokenService(addr).toString());
  return nmToken;
}