org.apache.spark.sql.catalyst.analysis.NoSuchDatabaseException Scala Examples

The following examples show how to use org.apache.spark.sql.catalyst.analysis.NoSuchDatabaseException. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: ClientInterface.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.client

import java.io.PrintStream
import java.util.{Map => JMap}

import org.apache.spark.sql.catalyst.analysis.{NoSuchDatabaseException, NoSuchTableException}

private[hive] case class HiveDatabase(
    name: String,
    location: String)

private[hive] abstract class TableType { val name: String }
private[hive] case object ExternalTable extends TableType { override val name = "EXTERNAL_TABLE" }
private[hive] case object IndexTable extends TableType { override val name = "INDEX_TABLE" }
private[hive] case object ManagedTable extends TableType { override val name = "MANAGED_TABLE" }
private[hive] case object VirtualView extends TableType { override val name = "VIRTUAL_VIEW" }

// TODO: Use this for Tables and Partitions
private[hive] case class HiveStorageDescriptor(
    location: String,
    inputFormat: String,
    outputFormat: String,
    serde: String,
    serdeProperties: Map[String, String])

private[hive] case class HivePartition(
    values: Seq[String],
    storage: HiveStorageDescriptor)

private[hive] case class HiveColumn(name: String, hiveType: String, comment: String)
private[hive] case class HiveTable(
    specifiedDatabase: Option[String],
    name: String,
    schema: Seq[HiveColumn],
    partitionColumns: Seq[HiveColumn],
    properties: Map[String, String],
    serdeProperties: Map[String, String],
    tableType: TableType,
    location: Option[String] = None,
    inputFormat: Option[String] = None,
    outputFormat: Option[String] = None,
    serde: Option[String] = None,
    viewText: Option[String] = None) {

  @transient
  private[client] var client: ClientInterface = _

  private[client] def withClient(ci: ClientInterface): this.type = {
    client = ci
    this
  }

  def database: String = specifiedDatabase.getOrElse(sys.error("database not resolved"))

  def isPartitioned: Boolean = partitionColumns.nonEmpty

  def getAllPartitions: Seq[HivePartition] = client.getAllPartitions(this)

  // Hive does not support backticks when passing names to the client.
  def qualifiedName: String = s"$database.$name"
}


  def reset(): Unit
} 
Example 2
Source File: SparkSchemaProvider.scala    From mimir   with Apache License 2.0 5 votes vote down vote up
package mimir.data

import com.typesafe.scalalogging.LazyLogging
import org.apache.spark.sql.{ DataFrame, SaveMode } 
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.analysis.{ UnresolvedRelation, NoSuchDatabaseException }
import org.apache.spark.sql.execution.command.{ DropTableCommand, CreateDatabaseCommand }

import mimir.Database
import mimir.algebra._
import mimir.exec.spark.{MimirSpark, RAToSpark, RowIndexPlan}

class SparkSchemaProvider(db: Database)
  extends LogicalPlanSchemaProvider
  with MaterializedTableProvider
  with LazyLogging
{

  def listTables(): Seq[ID] = 
  {
    try {
      val tables = 
        MimirSpark.get.sparkSession
                  .catalog
                  .listTables( table.id)
                .collect()
                .map { col => (
                    ID(col.name), 
                    RAToSpark.getMimirType( 
                      RAToSpark.dataTypeFromHiveDataTypeString(col.dataType))
                  ) }
        )
      } else { 
        logger.trace(s"$table doesn't exist")
        None 
      }
    } catch {
      case _:NoSuchDatabaseException => {
        logger.warn("Couldn't find database!!! ($sparkDBName)")
        None
      }
    }
  }

  def logicalplan(table: ID): LogicalPlan =
  {
    RowIndexPlan(
      UnresolvedRelation(TableIdentifier(table.id)), 
      tableSchema(table).get
    ).getPlan(db)
  }

  def createStoredTableAs(data: DataFrame, name: ID)
  {
    data.persist()
        .createOrReplaceTempView(name.id)
    data.write
        .mode(SaveMode.Overwrite)
        .saveAsTable(name.id)
  }

  def dropStoredTable(name: ID)
  {
    DropTableCommand(
      TableIdentifier(name.id, None),//Option(sparkDBName)), 
      true, false, true
    ).run(MimirSpark.get.sparkSession)
  }
} 
Example 3
Source File: TestShowPartitions.scala    From carbondata   with Apache License 2.0 4 votes vote down vote up
package org.apache.carbondata.spark.testsuite.partition

import org.apache.spark.sql.{AnalysisException, DataFrame, Row}
import org.apache.spark.sql.catalyst.analysis.NoSuchDatabaseException
import org.scalatest.BeforeAndAfterAll

import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.spark.sql.test.util.QueryTest

import org.apache.carbondata.spark.exception.ProcessMetaDataException

class TestShowPartition  extends QueryTest with BeforeAndAfterAll {
  override def beforeAll = {

    CarbonProperties.getInstance()
      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")

    sql("drop table if exists notPartitionTable")
    sql("""
                | CREATE TABLE notPartitionTable
                | (
                | vin String,
                | logdate Timestamp,
                | phonenumber Int,
                | country String,
                | area String
                | )
                | STORED AS carbondata
              """.stripMargin)

    sql(s"CREATE DATABASE if not exists partitionDB")

    sql("DROP TABLE IF EXISTS hiveTable")
    sql("""
       | create table hiveTable(id int, name string) partitioned by (city string)
       | row format delimited fields terminated by ','
       """.stripMargin)
    sql("alter table hiveTable add partition (city = 'Hangzhou')")

    sql(s"CREATE DATABASE if not exists hiveDB")
    sql("DROP TABLE IF EXISTS hiveDB.hiveTable")
    sql("""
       | create table hiveDB.hiveTable(id int, name string) partitioned by (city string)
       | row format delimited fields terminated by ','
       """.stripMargin)
    sql("alter table hiveDB.hiveTable add partition (city = 'Shanghai')")
  }

  test("show partition table: exception when show not partition table") {
    val errorMessage = intercept[AnalysisException] {
      sql("show partitions notPartitionTable").show()
    }
    assert(errorMessage.getMessage.contains(
      "SHOW PARTITIONS is not allowed on a table that is not partitioned"))
  }

  test("show partition table: hive partition table") {
    // EqualTo
    checkAnswer(sql("show partitions hiveTable"), Seq(Row("city=Hangzhou")))
    sql("use hiveDB").show()
    checkAnswer(sql("show partitions hiveTable"), Seq(Row("city=Shanghai")))
    sql("use default").show()
  }

  override def afterAll = {
    sql("use default")
    sql("drop table if exists notPartitionTable")
    sql("drop table if exists  hiveTable")
    try {
      sql("drop table if exists  hiveDB.hiveTable")
    } catch {
      case ex: NoSuchDatabaseException => print(ex.getMessage())
    }
    sql("DROP DATABASE if exists partitionDB")
    sql("DROP DATABASE if exists hiveDB")
    CarbonProperties.getInstance()
      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
        CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
  }
}