java.security.PrivilegedExceptionAction Scala Examples

The following examples show how to use java.security.PrivilegedExceptionAction. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: IOEncryptionSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.yarn

import java.io._
import java.nio.charset.StandardCharsets
import java.security.PrivilegedExceptionAction
import java.util.UUID

import org.apache.hadoop.security.{Credentials, UserGroupInformation}
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Matchers}

import org.apache.spark._
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.config._
import org.apache.spark.serializer._
import org.apache.spark.storage._

class IOEncryptionSuite extends SparkFunSuite with Matchers with BeforeAndAfterAll
  with BeforeAndAfterEach {
  private[this] val blockId = new TempShuffleBlockId(UUID.randomUUID())
  private[this] val conf = new SparkConf()
  private[this] val ugi = UserGroupInformation.createUserForTesting("testuser", Array("testgroup"))
  private[this] val serializer = new KryoSerializer(conf)

  override def beforeAll(): Unit = {
    System.setProperty("SPARK_YARN_MODE", "true")
    ugi.doAs(new PrivilegedExceptionAction[Unit]() {
      override def run(): Unit = {
        conf.set(IO_ENCRYPTION_ENABLED, true)
        val creds = new Credentials()
        SecurityManager.initIOEncryptionKey(conf, creds)
        SparkHadoopUtil.get.addCurrentUserCredentials(creds)
      }
    })
  }

  override def afterAll(): Unit = {
    SparkEnv.set(null)
    System.clearProperty("SPARK_YARN_MODE")
  }

  override def beforeEach(): Unit = {
    super.beforeEach()
  }

  override def afterEach(): Unit = {
    super.afterEach()
    conf.set("spark.shuffle.compress", false.toString)
    conf.set("spark.shuffle.spill.compress", false.toString)
  }

  test("IO encryption read and write") {
    ugi.doAs(new PrivilegedExceptionAction[Unit] {
      override def run(): Unit = {
        conf.set(IO_ENCRYPTION_ENABLED, true)
        conf.set("spark.shuffle.compress", false.toString)
        conf.set("spark.shuffle.spill.compress", false.toString)
        testYarnIOEncryptionWriteRead()
      }
    })
  }

  test("IO encryption read and write with shuffle compression enabled") {
    ugi.doAs(new PrivilegedExceptionAction[Unit] {
      override def run(): Unit = {
        conf.set(IO_ENCRYPTION_ENABLED, true)
        conf.set("spark.shuffle.compress", true.toString)
        conf.set("spark.shuffle.spill.compress", true.toString)
        testYarnIOEncryptionWriteRead()
      }
    })
  }

  private[this] def testYarnIOEncryptionWriteRead(): Unit = {
    val plainStr = "hello world"
    val outputStream = new ByteArrayOutputStream()
    val serializerManager = new SerializerManager(serializer, conf)
    val wrappedOutputStream = serializerManager.wrapStream(blockId, outputStream)
    wrappedOutputStream.write(plainStr.getBytes(StandardCharsets.UTF_8))
    wrappedOutputStream.close()

    val encryptedBytes = outputStream.toByteArray
    val encryptedStr = new String(encryptedBytes)
    assert(plainStr !== encryptedStr)

    val inputStream = new ByteArrayInputStream(encryptedBytes)
    val wrappedInputStream = serializerManager.wrapStream(blockId, inputStream)
    val decryptedBytes = new Array[Byte](1024)
    val len = wrappedInputStream.read(decryptedBytes)
    val decryptedStr = new String(decryptedBytes, 0, len, StandardCharsets.UTF_8)
    assert(decryptedStr === plainStr)
  }
} 
Example 2
Source File: CryptoStreamUtilsSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.security

import java.security.PrivilegedExceptionAction

import org.apache.hadoop.security.{Credentials, UserGroupInformation}

import org.apache.spark.{SecurityManager, SparkConf, SparkFunSuite}
import org.apache.spark.internal.config._
import org.apache.spark.security.CryptoStreamUtils._

class CryptoStreamUtilsSuite extends SparkFunSuite {
  val ugi = UserGroupInformation.createUserForTesting("testuser", Array("testgroup"))

  test("Crypto configuration conversion") {
    val sparkKey1 = s"${SPARK_IO_ENCRYPTION_COMMONS_CONFIG_PREFIX}a.b.c"
    val sparkVal1 = "val1"
    val cryptoKey1 = s"${COMMONS_CRYPTO_CONF_PREFIX}a.b.c"

    val sparkKey2 = SPARK_IO_ENCRYPTION_COMMONS_CONFIG_PREFIX.stripSuffix(".") + "A.b.c"
    val sparkVal2 = "val2"
    val cryptoKey2 = s"${COMMONS_CRYPTO_CONF_PREFIX}A.b.c"
    val conf = new SparkConf()
    conf.set(sparkKey1, sparkVal1)
    conf.set(sparkKey2, sparkVal2)
    val props = CryptoStreamUtils.toCryptoConf(conf)
    assert(props.getProperty(cryptoKey1) === sparkVal1)
    assert(!props.containsKey(cryptoKey2))
  }

  test("Shuffle encryption is disabled by default") {
    ugi.doAs(new PrivilegedExceptionAction[Unit]() {
      override def run(): Unit = {
        val credentials = UserGroupInformation.getCurrentUser.getCredentials()
        val conf = new SparkConf()
        initCredentials(conf, credentials)
        assert(credentials.getSecretKey(SPARK_IO_TOKEN) === null)
      }
    })
  }

  test("Shuffle encryption key length should be 128 by default") {
    ugi.doAs(new PrivilegedExceptionAction[Unit]() {
      override def run(): Unit = {
        val credentials = UserGroupInformation.getCurrentUser.getCredentials()
        val conf = new SparkConf()
        conf.set(IO_ENCRYPTION_ENABLED, true)
        initCredentials(conf, credentials)
        var key = credentials.getSecretKey(SPARK_IO_TOKEN)
        assert(key !== null)
        val actual = key.length * (java.lang.Byte.SIZE)
        assert(actual === 128)
      }
    })
  }

  test("Initial credentials with key length in 256") {
    ugi.doAs(new PrivilegedExceptionAction[Unit]() {
      override def run(): Unit = {
        val credentials = UserGroupInformation.getCurrentUser.getCredentials()
        val conf = new SparkConf()
        conf.set(IO_ENCRYPTION_KEY_SIZE_BITS, 256)
        conf.set(IO_ENCRYPTION_ENABLED, true)
        initCredentials(conf, credentials)
        var key = credentials.getSecretKey(SPARK_IO_TOKEN)
        assert(key !== null)
        val actual = key.length * (java.lang.Byte.SIZE)
        assert(actual === 256)
      }
    })
  }

  test("Initial credentials with invalid key length") {
    ugi.doAs(new PrivilegedExceptionAction[Unit]() {
      override def run(): Unit = {
        val credentials = UserGroupInformation.getCurrentUser.getCredentials()
        val conf = new SparkConf()
        conf.set(IO_ENCRYPTION_KEY_SIZE_BITS, 328)
        conf.set(IO_ENCRYPTION_ENABLED, true)
        val thrown = intercept[IllegalArgumentException] {
          initCredentials(conf, credentials)
        }
      }
    })
  }

  private[this] def initCredentials(conf: SparkConf, credentials: Credentials): Unit = {
    if (conf.get(IO_ENCRYPTION_ENABLED)) {
      SecurityManager.initIOEncryptionKey(conf, credentials)
    }
  }
} 
Example 3
Source File: HDFSUtils.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.hadoop.common.utils

import java.io.File
import java.nio.file.Paths
import java.security.PrivilegedExceptionAction

import com.webank.wedatasphere.linkis.common.conf.Configuration.hadoopConfDir
import com.webank.wedatasphere.linkis.hadoop.common.conf.HadoopConf._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.security.UserGroupInformation

object HDFSUtils {



  def getConfiguration(user: String): Configuration = getConfiguration(user, hadoopConfDir)

  def getConfiguration(user: String, hadoopConfDir: String): Configuration = {
    val confPath = new File(hadoopConfDir)
    if(!confPath.exists() || confPath.isFile) {
      throw new RuntimeException(s"Create hadoop configuration failed, path $hadoopConfDir not exists.")
    }
    val conf = new Configuration()
    conf.addResource(new Path(Paths.get(hadoopConfDir, "core-site.xml").toAbsolutePath.toFile.getAbsolutePath))
    conf.addResource(new Path(Paths.get(hadoopConfDir, "hdfs-site.xml").toAbsolutePath.toFile.getAbsolutePath))
    conf.addResource(new Path(Paths.get(hadoopConfDir, "yarn-site.xml").toAbsolutePath.toFile.getAbsolutePath))
    conf
  }

  def getHDFSRootUserFileSystem: FileSystem = getHDFSRootUserFileSystem(getConfiguration(HADOOP_ROOT_USER.getValue))

  def getHDFSRootUserFileSystem(conf: org.apache.hadoop.conf.Configuration): FileSystem =
    getHDFSUserFileSystem(HADOOP_ROOT_USER.getValue, conf)

  def getHDFSUserFileSystem(userName: String): FileSystem = getHDFSUserFileSystem(userName, getConfiguration(userName))

  def getHDFSUserFileSystem(userName: String, conf: org.apache.hadoop.conf.Configuration): FileSystem =
    getUserGroupInformation(userName)
      .doAs(new PrivilegedExceptionAction[FileSystem]{
        def run = FileSystem.get(conf)
      })
  def getUserGroupInformation(userName: String): UserGroupInformation ={
    if(KERBEROS_ENABLE.getValue) {
      val path = new File(KEYTAB_FILE.getValue , userName + ".keytab").getPath
      val user = getKerberosUser(userName)
      UserGroupInformation.setConfiguration(getConfiguration(userName))
      UserGroupInformation.loginUserFromKeytabAndReturnUGI(user, path)
    } else {
      UserGroupInformation.createRemoteUser(userName)
    }
  }

  def getKerberosUser(userName: String): String = {
    var user = userName
    if(KEYTAB_HOST_ENABLED.getValue){
      user = user+ "/" + KEYTAB_HOST.getValue
    }
    user
  }

} 
Example 4
Source File: RangerSparkTestUtils.scala    From spark-ranger   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql

import java.security.PrivilegedExceptionAction

import org.apache.hadoop.security.UserGroupInformation
import org.apache.spark.sql.catalyst.optimizer.{RangerSparkMaskingExtension, RangerSparkRowFilterExtension}
import org.apache.spark.sql.execution.RangerSparkPlanOmitStrategy

object RangerSparkTestUtils {

  def injectRules(spark: SparkSession): Unit = {
    spark.extensions.injectOptimizerRule(RangerSparkRowFilterExtension)
    spark.extensions.injectOptimizerRule(RangerSparkMaskingExtension)
    spark.extensions.injectPlannerStrategy(RangerSparkPlanOmitStrategy)
  }

  def withUser[T](user: String)(f: => T): T = {
    val ugi = UserGroupInformation.createRemoteUser(user)
    ugi.doAs(new PrivilegedExceptionAction[T] {
      override def run(): T = f
    })
  }
} 
Example 5
Source File: MetadataOperation.scala    From kyuubi   with Apache License 2.0 5 votes vote down vote up
package yaooqinn.kyuubi.operation.metadata

import java.security.PrivilegedExceptionAction

import org.apache.commons.lang3.StringUtils
import org.apache.spark.KyuubiSparkUtil

import yaooqinn.kyuubi.KyuubiSQLException
import yaooqinn.kyuubi.cli.FetchOrientation
import yaooqinn.kyuubi.operation._
import yaooqinn.kyuubi.schema.{RowSet, RowSetBuilder}
import yaooqinn.kyuubi.session.KyuubiSession

abstract class MetadataOperation(session: KyuubiSession, opType: OperationType)
  extends AbstractOperation(session, opType) {

  setHasResultSet(true)

  override def cancel(): Unit = {
    setState(CANCELED)
    throw new UnsupportedOperationException("MetadataOperation.cancel()")
  }

  override def close(): Unit = {
    setState(CLOSED)
    cleanupOperationLog()
  }

  
  protected def convertSchemaPattern(pattern: String): String = {
    if (StringUtils.isEmpty(pattern)) {
      convertPattern("%", datanucleusFormat = true)
    } else {
      convertPattern(pattern, datanucleusFormat = true)
    }
  }

  private def convertPattern(pattern: String, datanucleusFormat: Boolean): String = {
    val wStr = if (datanucleusFormat) "*" else ".*"
    pattern
      .replaceAll("([^\\\\])%", "$1" + wStr)
      .replaceAll("\\\\%", "%")
      .replaceAll("^%", wStr)
      .replaceAll("([^\\\\])_", "$1.")
      .replaceAll("\\\\_", "_")
      .replaceAll("^_", ".")
  }

  protected def execute(block: => Unit): Unit = {
    setState(RUNNING)
    try {
      session.ugi.doAs(new PrivilegedExceptionAction[Unit] {
        override def run(): Unit = block
      })
      setState(FINISHED)
    } catch {
      case e: Exception =>
        setState(ERROR)
        throw new KyuubiSQLException(KyuubiSparkUtil.findCause(e))
    }
  }

  override def getNextRowSet(order: FetchOrientation, rowSetSize: Long): RowSet = {
    assertState(FINISHED)
    validateDefaultFetchOrientation(order)
    val taken = iter.take(rowSetSize.toInt)
    RowSetBuilder.create(getResultSetSchema, taken.toSeq, getProtocolVersion)
  }

} 
Example 6
Source File: ExecuteStatementOperation.scala    From kyuubi   with Apache License 2.0 5 votes vote down vote up
package yaooqinn.kyuubi.operation.statement

import java.security.PrivilegedExceptionAction
import java.util.UUID
import java.util.concurrent.RejectedExecutionException

import yaooqinn.kyuubi.KyuubiSQLException
import yaooqinn.kyuubi.operation._
import yaooqinn.kyuubi.session.KyuubiSession


abstract class ExecuteStatementOperation(
    session: KyuubiSession,
    statement: String,
    runAsync: Boolean)
  extends AbstractOperation(session, EXECUTE_STATEMENT) {

  protected val statementId: String = UUID.randomUUID().toString

  protected def execute(): Unit

  protected def onStatementError(id: String, message: String, trace: String): Unit = {
    error(
      s"""
         |Error executing query as ${session.getUserName},
         |$statement
         |Current operation state ${getStatus.getState},
         |$trace
       """.stripMargin)
    setState(ERROR)
  }

  protected def cleanup(state: OperationState): Unit = {
    setState(state)
    if (shouldRunAsync) {
      val backgroundHandle = getBackgroundHandle
      if (backgroundHandle != null) {
        backgroundHandle.cancel(true)
      }
    }
  }

  override protected def runInternal(): Unit = {
    setState(PENDING)
    setHasResultSet(true)
    val task = new Runnable() {
      override def run(): Unit = {
        try {
          session.ugi.doAs(new PrivilegedExceptionAction[Unit]() {
            registerCurrentOperationLog()
            override def run(): Unit = {
              try {
                execute()
              } catch {
                case e: KyuubiSQLException => setOperationException(e)
              }
            }
          })
        } catch {
          case e: Exception => setOperationException(new KyuubiSQLException(e))
        }
      }
    }

    if (shouldRunAsync) {
      try {
        // This submit blocks if no background threads are available to run this operation
        val backgroundHandle = session.getSessionMgr.submitBackgroundOperation(task)
        setBackgroundHandle(backgroundHandle)
      } catch {
        case rejected: RejectedExecutionException =>
          setState(ERROR)
          throw new KyuubiSQLException("The background threadpool cannot accept" +
            " new task for execution, please retry the operation", rejected)
      }
    } else {
      task.run()
    }
  }

  private def registerCurrentOperationLog(): Unit = {
    if (isOperationLogEnabled) {
      if (operationLog == null) {
        warn("Failed to get current OperationLog object of Operation: " +
          getHandle.getHandleIdentifier)
        isOperationLogEnabled = false
      } else {
        session.getSessionMgr.getOperationMgr.setOperationLog(operationLog)
      }
    }
  }

  override def shouldRunAsync: Boolean = runAsync

} 
Example 7
Source File: KyuubiHadoopUtil.scala    From kyuubi   with Apache License 2.0 5 votes vote down vote up
package yaooqinn.kyuubi.utils

import java.security.PrivilegedExceptionAction
import java.util.EnumSet

import scala.collection.JavaConverters._
import scala.util.control.NonFatal

import org.apache.hadoop.security.UserGroupInformation
import org.apache.hadoop.yarn.api.records.YarnApplicationState._
import org.apache.hadoop.yarn.client.api.YarnClient
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.kyuubi.Logging
import org.apache.spark.KyuubiSparkUtil

private[kyuubi] object KyuubiHadoopUtil extends Logging {

  private def createYarnClient: YarnClient = {
    val c = YarnClient.createYarnClient()
    c.init(new YarnConfiguration())
    c.start()
    c
  }

  def killYarnAppByName(appName: String): Unit = {
    val client = createYarnClient
    client.getApplications(Set("SPARK").asJava, EnumSet.of(ACCEPTED, SUBMITTED, RUNNING)).asScala
      .filter(applicationReport => applicationReport.getName.equals(appName))
      .foreach { applicationReport =>
        client.killApplication(applicationReport.getApplicationId)
      }
  }

  def doAs[T](user: UserGroupInformation)(f: => T): T = {
    try {
      user.doAs(new PrivilegedExceptionAction[T] {
        override def run(): T = f
      })
    } catch {
      case NonFatal(e) => throw KyuubiSparkUtil.findCause(e)
    }
  }

  def doAsAndLogNonFatal(user: UserGroupInformation)(f: => Unit): Unit = {
    try {
      doAs(user)(f)
    } catch {
      case NonFatal(e) => error(s"Failed to operate as ${user.getShortUserName}", e)
    }
  }

  
  def doAsRealUser[T](f: => T): T = {
    val currentUser = UserGroupInformation.getCurrentUser
    val realUser = Option(currentUser.getRealUser).getOrElse(currentUser)
    doAs(realUser)(f)
  }
} 
Example 8
Source File: SparkHadoopUtilSuite.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy

import java.security.PrivilegedExceptionAction

import scala.util.Random

import org.apache.hadoop.fs.FileStatus
import org.apache.hadoop.fs.permission.{FsAction, FsPermission}
import org.apache.hadoop.security.UserGroupInformation
import org.scalatest.Matchers

import org.apache.spark.SparkFunSuite

class SparkHadoopUtilSuite extends SparkFunSuite with Matchers {
  test("check file permission") {
    import FsAction._
    val testUser = s"user-${Random.nextInt(100)}"
    val testGroups = Array(s"group-${Random.nextInt(100)}")
    val testUgi = UserGroupInformation.createUserForTesting(testUser, testGroups)

    testUgi.doAs(new PrivilegedExceptionAction[Void] {
      override def run(): Void = {
        val sparkHadoopUtil = new SparkHadoopUtil

        // If file is owned by user and user has access permission
        var status = fileStatus(testUser, testGroups.head, READ_WRITE, READ_WRITE, NONE)
        sparkHadoopUtil.checkAccessPermission(status, READ) should be(true)
        sparkHadoopUtil.checkAccessPermission(status, WRITE) should be(true)

        // If file is owned by user but user has no access permission
        status = fileStatus(testUser, testGroups.head, NONE, READ_WRITE, NONE)
        sparkHadoopUtil.checkAccessPermission(status, READ) should be(false)
        sparkHadoopUtil.checkAccessPermission(status, WRITE) should be(false)

        val otherUser = s"test-${Random.nextInt(100)}"
        val otherGroup = s"test-${Random.nextInt(100)}"

        // If file is owned by user's group and user's group has access permission
        status = fileStatus(otherUser, testGroups.head, NONE, READ_WRITE, NONE)
        sparkHadoopUtil.checkAccessPermission(status, READ) should be(true)
        sparkHadoopUtil.checkAccessPermission(status, WRITE) should be(true)

        // If file is owned by user's group but user's group has no access permission
        status = fileStatus(otherUser, testGroups.head, READ_WRITE, NONE, NONE)
        sparkHadoopUtil.checkAccessPermission(status, READ) should be(false)
        sparkHadoopUtil.checkAccessPermission(status, WRITE) should be(false)

        // If file is owned by other user and this user has access permission
        status = fileStatus(otherUser, otherGroup, READ_WRITE, READ_WRITE, READ_WRITE)
        sparkHadoopUtil.checkAccessPermission(status, READ) should be(true)
        sparkHadoopUtil.checkAccessPermission(status, WRITE) should be(true)

        // If file is owned by other user but this user has no access permission
        status = fileStatus(otherUser, otherGroup, READ_WRITE, READ_WRITE, NONE)
        sparkHadoopUtil.checkAccessPermission(status, READ) should be(false)
        sparkHadoopUtil.checkAccessPermission(status, WRITE) should be(false)

        null
      }
    })
  }

  private def fileStatus(
      owner: String,
      group: String,
      userAction: FsAction,
      groupAction: FsAction,
      otherAction: FsAction): FileStatus = {
    new FileStatus(0L,
      false,
      0,
      0L,
      0L,
      0L,
      new FsPermission(userAction, groupAction, otherAction),
      owner,
      group,
      null)
  }
}