org.apache.spark.scheduler.SparkListenerApplicationEnd Scala Examples

The following examples show how to use org.apache.spark.scheduler.SparkListenerApplicationEnd. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: MVManagerInSpark.scala    From carbondata   with Apache License 2.0 5 votes vote down vote up
package org.apache.carbondata.view

import java.util

import org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}
import org.apache.spark.sql.{CarbonEnv, CarbonUtils, SparkSession}

import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.ThreadLocalSessionInfo
import org.apache.carbondata.core.view.MVManager

class MVManagerInSpark(session: SparkSession) extends MVManager {
  override def getDatabases: util.List[String] = {
    CarbonUtils.threadSet(CarbonCommonConstants.DISABLE_SQL_REWRITE, "true")
    try {
      val databaseList = session.catalog.listDatabases()
      val databaseNameList = new util.ArrayList[String]()
      for (database <- databaseList.collect()) {
        databaseNameList.add(database.name)
      }
      databaseNameList
    } finally {
      CarbonUtils.threadUnset(CarbonCommonConstants.DISABLE_SQL_REWRITE)
    }
  }

  override def getDatabaseLocation(databaseName: String): String = {
    CarbonEnv.getDatabaseLocation(databaseName, session)
  }
}

object MVManagerInSpark {

  private val MANAGER_MAP_BY_SESSION =
    new util.HashMap[SparkSession, MVManagerInSpark]()

  def get(session: SparkSession): MVManagerInSpark = {
    var viewManager = MANAGER_MAP_BY_SESSION.get(session)
    if (viewManager == null) {
      MANAGER_MAP_BY_SESSION.synchronized {
        viewManager = MANAGER_MAP_BY_SESSION.get(session)
        if (viewManager == null) {
          viewManager = new MVManagerInSpark(session)
          MANAGER_MAP_BY_SESSION.put(session, viewManager)
          session.sparkContext.addSparkListener(new SparkListener {
            override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit = {
              CarbonEnv.carbonEnvMap.remove(session)
              ThreadLocalSessionInfo.unsetAll()
            }
          })
        }
      }
    }
    viewManager
  }

} 
Example 2
Source File: SparkSqlAdapter.scala    From carbondata   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql

import org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.expressions.{Attribute, Expression}
import org.apache.spark.sql.execution.FileSourceScanExec
import org.apache.spark.sql.execution.datasources.HadoopFsRelation
import org.apache.spark.sql.types.StructType

import org.apache.carbondata.core.util.ThreadLocalSessionInfo

object SparkSqlAdapter {

  def initSparkSQL(): Unit = {
  }

  def getScanForSegments(
      @transient relation: HadoopFsRelation,
      output: Seq[Attribute],
      outputSchema: StructType,
      partitionFilters: Seq[Expression],
      dataFilters: Seq[Expression],
      tableIdentifier: Option[TableIdentifier]
  ): FileSourceScanExec = {
    FileSourceScanExec(
      relation,
      output,
      outputSchema,
      partitionFilters,
      dataFilters,
      tableIdentifier)
  }

  def addSparkSessionListener(sparkSession: SparkSession): Unit = {
    sparkSession.sparkContext.addSparkListener(new SparkListener {
      override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit = {
        CarbonEnv.carbonEnvMap.remove(sparkSession)
        ThreadLocalSessionInfo.unsetAll()
      }
    })
  }
}