Java Code Examples for org.apache.hadoop.security.SecurityUtil#doAsLoginUser()

The following examples show how to use org.apache.hadoop.security.SecurityUtil#doAsLoginUser() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: OMKeyRequest.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
private static EncryptedKeyVersion generateEDEK(OzoneManager ozoneManager,
    String ezKeyName) throws IOException {
  if (ezKeyName == null) {
    return null;
  }
  long generateEDEKStartTime = monotonicNow();
  EncryptedKeyVersion edek = SecurityUtil.doAsLoginUser(
      new PrivilegedExceptionAction<EncryptedKeyVersion >() {
        @Override
        public EncryptedKeyVersion run() throws IOException {
          try {
            return ozoneManager.getKmsProvider()
                .generateEncryptedKey(ezKeyName);
          } catch (GeneralSecurityException e) {
            throw new IOException(e);
          }
        }
      });
  long generateEDEKTime = monotonicNow() - generateEDEKStartTime;
  LOG.debug("generateEDEK takes {} ms", generateEDEKTime);
  Preconditions.checkNotNull(edek);
  return edek;
}
 
Example 2
Source File: KeyManagerImpl.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
private EncryptedKeyVersion generateEDEK(
    final String ezKeyName) throws IOException {
  if (ezKeyName == null) {
    return null;
  }
  long generateEDEKStartTime = monotonicNow();
  EncryptedKeyVersion edek = SecurityUtil.doAsLoginUser(
      new PrivilegedExceptionAction<EncryptedKeyVersion>() {
        @Override
        public EncryptedKeyVersion run() throws IOException {
          try {
            return getKMSProvider().generateEncryptedKey(ezKeyName);
          } catch (GeneralSecurityException e) {
            throw new IOException(e);
          }
        }
      });
  long generateEDEKTime = monotonicNow() - generateEDEKStartTime;
  LOG.debug("generateEDEK takes {} ms", generateEDEKTime);
  Preconditions.checkNotNull(edek);
  return edek;
}
 
Example 3
Source File: EditLogTailer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public void catchupDuringFailover() throws IOException {
  Preconditions.checkState(tailerThread == null ||
      !tailerThread.isAlive(),
      "Tailer thread should not be running once failover starts");
  // Important to do tailing as the login user, in case the shared
  // edits storage is implemented by a JournalManager that depends
  // on security credentials to access the logs (eg QuorumJournalManager).
  SecurityUtil.doAsLoginUser(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      try {
        // It is already under the full name system lock and the checkpointer
        // thread is already stopped. No need to acqure any other lock.
        doTailEdits();
      } catch (InterruptedException e) {
        throw new IOException(e);
      }
      return null;
    }
  });
}
 
Example 4
Source File: NameNode.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void startTrashEmptier(final Configuration conf) throws IOException {
  long trashInterval =
      conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT);
  if (trashInterval == 0) {
    return;
  } else if (trashInterval < 0) {
    throw new IOException("Cannot start trash emptier with negative interval."
        + " Set " + FS_TRASH_INTERVAL_KEY + " to a positive value.");
  }
  
  // This may be called from the transitionToActive code path, in which
  // case the current user is the administrator, not the NN. The trash
  // emptier needs to run as the NN. See HDFS-3972.
  FileSystem fs = SecurityUtil.doAsLoginUser(
      new PrivilegedExceptionAction<FileSystem>() {
        @Override
        public FileSystem run() throws IOException {
          return FileSystem.get(conf);
        }
      });
  this.emptier = new Thread(new Trash(fs, conf).getEmptier(), "Trash Emptier");
  this.emptier.setDaemon(true);
  this.emptier.start();
}
 
Example 5
Source File: IPCLoggerChannel.java    From hadoop with Apache License 2.0 6 votes vote down vote up
protected QJournalProtocol createProxy() throws IOException {
  final Configuration confCopy = new Configuration(conf);
  
  // Need to set NODELAY or else batches larger than MTU can trigger 
  // 40ms nagling delays.
  confCopy.setBoolean(
      CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_KEY,
      true);
  
  RPC.setProtocolEngine(confCopy,
      QJournalProtocolPB.class, ProtobufRpcEngine.class);
  return SecurityUtil.doAsLoginUser(
      new PrivilegedExceptionAction<QJournalProtocol>() {
        @Override
        public QJournalProtocol run() throws IOException {
          RPC.setProtocolEngine(confCopy,
              QJournalProtocolPB.class, ProtobufRpcEngine.class);
          QJournalProtocolPB pbproxy = RPC.getProxy(
              QJournalProtocolPB.class,
              RPC.getProtocolVersion(QJournalProtocolPB.class),
              addr, confCopy);
          return new QJournalProtocolTranslatorPB(pbproxy);
        }
      });
}
 
Example 6
Source File: EditLogTailer.java    From big-c with Apache License 2.0 6 votes vote down vote up
public void catchupDuringFailover() throws IOException {
  Preconditions.checkState(tailerThread == null ||
      !tailerThread.isAlive(),
      "Tailer thread should not be running once failover starts");
  // Important to do tailing as the login user, in case the shared
  // edits storage is implemented by a JournalManager that depends
  // on security credentials to access the logs (eg QuorumJournalManager).
  SecurityUtil.doAsLoginUser(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      try {
        // It is already under the full name system lock and the checkpointer
        // thread is already stopped. No need to acqure any other lock.
        doTailEdits();
      } catch (InterruptedException e) {
        throw new IOException(e);
      }
      return null;
    }
  });
}
 
Example 7
Source File: NameNode.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void startTrashEmptier(final Configuration conf) throws IOException {
  long trashInterval =
      conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT);
  if (trashInterval == 0) {
    return;
  } else if (trashInterval < 0) {
    throw new IOException("Cannot start trash emptier with negative interval."
        + " Set " + FS_TRASH_INTERVAL_KEY + " to a positive value.");
  }
  
  // This may be called from the transitionToActive code path, in which
  // case the current user is the administrator, not the NN. The trash
  // emptier needs to run as the NN. See HDFS-3972.
  FileSystem fs = SecurityUtil.doAsLoginUser(
      new PrivilegedExceptionAction<FileSystem>() {
        @Override
        public FileSystem run() throws IOException {
          return FileSystem.get(conf);
        }
      });
  this.emptier = new Thread(new Trash(fs, conf).getEmptier(), "Trash Emptier");
  this.emptier.setDaemon(true);
  this.emptier.start();
}
 
Example 8
Source File: IPCLoggerChannel.java    From big-c with Apache License 2.0 6 votes vote down vote up
protected QJournalProtocol createProxy() throws IOException {
  final Configuration confCopy = new Configuration(conf);
  
  // Need to set NODELAY or else batches larger than MTU can trigger 
  // 40ms nagling delays.
  confCopy.setBoolean(
      CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_KEY,
      true);
  
  RPC.setProtocolEngine(confCopy,
      QJournalProtocolPB.class, ProtobufRpcEngine.class);
  return SecurityUtil.doAsLoginUser(
      new PrivilegedExceptionAction<QJournalProtocol>() {
        @Override
        public QJournalProtocol run() throws IOException {
          RPC.setProtocolEngine(confCopy,
              QJournalProtocolPB.class, ProtobufRpcEngine.class);
          QJournalProtocolPB pbproxy = RPC.getProxy(
              QJournalProtocolPB.class,
              RPC.getProtocolVersion(QJournalProtocolPB.class),
              addr, confCopy);
          return new QJournalProtocolTranslatorPB(pbproxy);
        }
      });
}
 
Example 9
Source File: OzoneManagerServiceProviderImpl.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
/**
 * Method to obtain current OM DB Snapshot.
 * @return DBCheckpoint instance.
 */
@VisibleForTesting
DBCheckpoint getOzoneManagerDBSnapshot() {
  String snapshotFileName = RECON_OM_SNAPSHOT_DB + "_" +
      System.currentTimeMillis();
  File targetFile = new File(omSnapshotDBParentDir, snapshotFileName +
      ".tar.gz");
  try {
    SecurityUtil.doAsLoginUser(() -> {
      try (InputStream inputStream = reconUtils.makeHttpCall(
          connectionFactory, getOzoneManagerSnapshotUrl(),
          isOmSpengoEnabled())) {
        FileUtils.copyInputStreamToFile(inputStream, targetFile);
      }
      return null;
    });
    // Untar the checkpoint file.
    Path untarredDbDir = Paths.get(omSnapshotDBParentDir.getAbsolutePath(),
        snapshotFileName);
    reconUtils.untarCheckpointFile(targetFile, untarredDbDir);
    FileUtils.deleteQuietly(targetFile);

    // Currently, OM DB type is not configurable. Hence, defaulting to
    // RocksDB.
    return new RocksDBCheckpoint(untarredDbDir);
  } catch (IOException e) {
    LOG.error("Unable to obtain Ozone Manager DB Snapshot. ", e);
  }
  return null;
}
 
Example 10
Source File: TransferFsImageWrapper.java    From NNAnalytics with Apache License 2.0 4 votes vote down vote up
/**
 * This is meant to download the latest FSImage without relying on FSNamesystem or other running
 * HDFS classes within NameNodeLoader.
 *
 * @throws IOException if FileSystem can not be initialized
 */
public void downloadMostRecentImage() throws IOException {
  FileSystem fileSystem = nameNodeLoader.getFileSystem();
  Configuration conf = nameNodeLoader.getConfiguration();
  String namespaceDirPath = conf.get(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
  File namespaceDir = new File(namespaceDirPath, "current");
  SecurityUtil.login(
      conf,
      DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,
      DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
      InetAddress.getLocalHost().getCanonicalHostName());
  InetSocketAddress addressOfActive = HAUtil.getAddressOfActive(fileSystem);
  URL infoServer =
      DFSUtil.getInfoServer(addressOfActive, conf, DFSUtil.getHttpClientScheme(conf)).toURL();
  SecurityUtil.doAsLoginUser(
      () -> {
        NamenodeProtocol nnProtocolProxy =
            NameNodeProxies.createNonHAProxy(
                    conf,
                    addressOfActive,
                    NamenodeProtocol.class,
                    UserGroupInformation.getLoginUser(),
                    true)
                .getProxy();
        NamespaceInfo namespaceInfo = nnProtocolProxy.versionRequest();
        String fileId = ImageServlet.getParamStringForMostRecentImage();
        NNStorage storage =
            new NNStorage(
                conf,
                FSNamesystem.getNamespaceDirs(conf),
                FSNamesystem.getNamespaceEditsDirs(conf));
        storage.format(namespaceInfo);
        MD5Hash md5 =
            TransferFsImage.getFileClient(
                infoServer, fileId, Lists.newArrayList(namespaceDir), storage, true);
        FSImageTransactionalStorageInspector inspector =
            new FSImageTransactionalStorageInspector(EnumSet.of(NNStorage.NameNodeFile.IMAGE));
        storage.inspectStorageDirs(inspector);
        File imageFile = inspector.getLatestImages().get(0).getFile();
        MD5FileUtils.saveMD5File(imageFile, md5);
        return null;
      });
}