java.sql.ResultSet Scala Examples

The following examples show how to use java.sql.ResultSet. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: TestOracleDataTypeConverter.scala    From ohara   with Apache License 2.0 7 votes vote down vote up
package oharastream.ohara.connector.jdbc.datatype

import java.sql.ResultSet

import oharastream.ohara.client.configurator.InspectApi.RdbColumn
import oharastream.ohara.common.rule.OharaTest
import org.junit.Test
import org.mockito.Mockito
import org.mockito.Mockito.when
import org.scalatest.matchers.should.Matchers._

class TestOracleDataTypeConverter extends OharaTest {
  @Test
  def testConverterCharValue(): Unit = {
    val resultSet: ResultSet = Mockito.mock(classOf[ResultSet])
    when(resultSet.getString("column1")).thenReturn("value1")
    val column                  = RdbColumn("column1", "CHAR", false)
    val oracleDataTypeConverter = new OracleDataTypeConverter()
    val result                  = oracleDataTypeConverter.converterValue(resultSet, column)
    result shouldBe "value1"
    result.isInstanceOf[String] shouldBe true
  }

  @Test
  def testConverterRawValue(): Unit = {
    val resultSet: ResultSet = Mockito.mock(classOf[ResultSet])
    when(resultSet.getBytes("column1")).thenReturn("aaaa".getBytes)
    val column                  = RdbColumn("column1", "RAW", false)
    val oracleDataTypeConverter = new OracleDataTypeConverter()
    val result                  = oracleDataTypeConverter.converterValue(resultSet, column)
    result.isInstanceOf[Array[Byte]] shouldBe true
    new String(result.asInstanceOf[Array[Byte]]) shouldBe "aaaa"
  }

  @Test
  def testConverterRawNullValue(): Unit = {
    val resultSet: ResultSet = Mockito.mock(classOf[ResultSet])
    when(resultSet.getBytes("column1")).thenReturn(null)
    val column                  = RdbColumn("column1", "RAW", false)
    val oracleDataTypeConverter = new OracleDataTypeConverter()
    val result                  = oracleDataTypeConverter.converterValue(resultSet, column)
    result.isInstanceOf[Array[Byte]] shouldBe true
    result.asInstanceOf[Array[Byte]].length shouldBe 0
  }

  @Test
  def testConverterSmallIntValue(): Unit = {
    val resultSet: ResultSet = Mockito.mock(classOf[ResultSet])
    when(resultSet.getInt("column1")).thenReturn(111)
    val column                  = RdbColumn("column1", "INT", false)
    val oracleDataTypeConverter = new OracleDataTypeConverter()
    val result                  = oracleDataTypeConverter.converterValue(resultSet, column)
    result.isInstanceOf[Integer] shouldBe true
    result.asInstanceOf[Integer] shouldBe 111
  }
} 
Example 2
Source File: PrepareJdbcSpec.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.context.jdbc.postgres

import java.sql.ResultSet

import io.getquill.context.jdbc.PrepareJdbcSpecBase
import org.scalatest.BeforeAndAfter

class PrepareJdbcSpec extends PrepareJdbcSpecBase with BeforeAndAfter {

  val context = testContext
  import testContext._

  before {
    testContext.run(query[Product].delete)
  }

  def productExtractor = (rs: ResultSet) => materializeQueryMeta[Product].extract(rs)
  val prepareQuery = prepare(query[Product])

  "single" in {
    val prepareInsert = prepare(query[Product].insert(lift(productEntries.head)))
    singleInsert(dataSource.getConnection)(prepareInsert) mustEqual false
    extractProducts(dataSource.getConnection)(prepareQuery) === List(productEntries.head)
  }

  "batch" in {
    val prepareBatchInsert = prepare(
      liftQuery(withOrderedIds(productEntries)).foreach(p => query[Product].insert(p))
    )

    batchInsert(dataSource.getConnection)(prepareBatchInsert).distinct mustEqual List(false)
    extractProducts(dataSource.getConnection)(prepareQuery) === withOrderedIds(productEntries)
  }
} 
Example 3
Source File: RichConnection.scala    From s4ds   with Apache License 2.0 5 votes vote down vote up
// RichConnection.scala

import java.sql.{Connection, ResultSet}

class RichConnection(val underlying:Connection) {

  
  def withQuery[T](query:String)(f:ResultSet => T):T = {
    val statement = underlying.prepareStatement(query)
    val results = statement.executeQuery
    try {
      f(results) // loan the ResultSet to the client
    }
    finally {
      // Ensure all the resources get freed.
      results.close
      statement.close
    }
  }
} 
Example 4
Source File: TableReader.scala    From DataQuality   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package it.agilelab.bigdata.DataQuality.utils.io.db.readers

import java.sql.{Connection, ResultSet}
import java.util.Properties

import org.apache.spark.sql.{DataFrame, SQLContext}


  def loadData(
      table: String,
      username: Option[String],
      password: Option[String])(implicit sqlContext: SQLContext): DataFrame = {
    val connectionProperties = new Properties()

    (username, password) match {
      case (Some(u), Some(p)) =>
        connectionProperties.put("user", u)
        connectionProperties.put("password", p)
      case _ =>
    }

    sqlContext.read.jdbc(connectionUrl, table, connectionProperties)
  }

} 
Example 5
Source File: PostgresReader.scala    From DataQuality   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package it.agilelab.bigdata.DataQuality.utils.io.db.readers

import java.sql.{Connection, DriverManager, ResultSet}

import it.agilelab.bigdata.DataQuality.sources.DatabaseConfig


case class PostgresReader(config: DatabaseConfig) extends TableReader {

  override val connectionUrl: String = "jdbc:postgresql://" + config.host

  override def runQuery[T](query: String,
                           transformOutput: ResultSet => T): T = {
    val connection = getConnection

    val statement = connection.createStatement()
    statement.setFetchSize(1000)

    val queryResult = statement.executeQuery(query)
    val result = transformOutput(queryResult)
    statement.close()
    result
  }

  override def getConnection: Connection = {
    val connectionProperties = new java.util.Properties()
    config.user match {
      case Some(user) => connectionProperties.put("user", user)
      case None       =>
    }
    config.password match {
      case Some(pwd) => connectionProperties.put("password", pwd)
      case None      =>
    }
    connectionProperties.put("driver", "org.postgresql.Driver")

    DriverManager.getConnection(connectionUrl, connectionProperties)
  }

} 
Example 6
Source File: PrepareJdbcSpec.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.context.jdbc.h2

import java.sql.ResultSet

import io.getquill.context.jdbc.PrepareJdbcSpecBase
import org.scalatest.BeforeAndAfter

class PrepareJdbcSpec extends PrepareJdbcSpecBase with BeforeAndAfter {

  val context = testContext
  import testContext._

  before {
    testContext.run(query[Product].delete)
  }

  def productExtractor = (rs: ResultSet) => materializeQueryMeta[Product].extract(rs)
  val prepareQuery = prepare(query[Product])

  "single" in {
    val prepareInsert = prepare(query[Product].insert(lift(productEntries.head)))
    singleInsert(dataSource.getConnection)(prepareInsert) mustEqual false
    extractProducts(dataSource.getConnection)(prepareQuery) === List(productEntries.head)
  }

  "batch" in {
    val prepareBatchInsert = prepare(
      liftQuery(withOrderedIds(productEntries)).foreach(p => query[Product].insert(p))
    )

    batchInsert(dataSource.getConnection)(prepareBatchInsert).distinct mustEqual List(false)
    extractProducts(dataSource.getConnection)(prepareQuery) === withOrderedIds(productEntries)
  }
} 
Example 7
Source File: PrepareJdbcSpec.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.context.jdbc.sqlite

import java.sql.ResultSet

import io.getquill.context.jdbc.PrepareJdbcSpecBase
import org.scalatest.BeforeAndAfter

class PrepareJdbcSpec extends PrepareJdbcSpecBase with BeforeAndAfter {

  val context = testContext
  import testContext._

  before {
    testContext.run(query[Product].delete)
  }

  def productExtractor = (rs: ResultSet) => materializeQueryMeta[Product].extract(rs)
  val prepareQuery = prepare(query[Product])

  "single" in {
    val prepareInsert = prepare(query[Product].insert(lift(productEntries.head)))
    singleInsert(dataSource.getConnection)(prepareInsert) mustEqual false
    extractProducts(dataSource.getConnection)(prepareQuery) === List(productEntries.head)
  }

  "batch" in {
    val prepareBatchInsert = prepare(
      liftQuery(withOrderedIds(productEntries)).foreach(p => query[Product].insert(p))
    )

    batchInsert(dataSource.getConnection)(prepareBatchInsert).distinct mustEqual List(false)
    extractProducts(dataSource.getConnection)(prepareQuery) === withOrderedIds(productEntries)
  }
} 
Example 8
Source File: PrepareJdbcSpec.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.context.jdbc.mysql

import java.sql.ResultSet

import io.getquill.context.jdbc.PrepareJdbcSpecBase
import org.scalatest.BeforeAndAfter

class PrepareJdbcSpec extends PrepareJdbcSpecBase with BeforeAndAfter {

  val context = testContext
  import testContext._

  before {
    testContext.run(query[Product].delete)
  }

  def productExtractor = (rs: ResultSet) => materializeQueryMeta[Product].extract(rs)
  val prepareQuery = prepare(query[Product])

  "single" in {
    val prepareInsert = prepare(query[Product].insert(lift(productEntries.head)))
    singleInsert(dataSource.getConnection)(prepareInsert) mustEqual false
    extractProducts(dataSource.getConnection)(prepareQuery) === List(productEntries.head)
  }

  "batch" in {
    val prepareBatchInsert = prepare(
      liftQuery(withOrderedIds(productEntries)).foreach(p => query[Product].insert(p))
    )

    batchInsert(dataSource.getConnection)(prepareBatchInsert).distinct mustEqual List(false)
    extractProducts(dataSource.getConnection)(prepareQuery) === withOrderedIds(productEntries)
  }
} 
Example 9
Source File: PrepareJdbcSpec.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.context.jdbc.sqlserver

import java.sql.ResultSet

import io.getquill.context.jdbc.PrepareJdbcSpecBase
import org.scalatest.BeforeAndAfter

class PrepareJdbcSpec extends PrepareJdbcSpecBase with BeforeAndAfter {

  val context = testContext
  import testContext._

  before {
    testContext.run(query[Product].delete)
  }

  def productExtractor = (rs: ResultSet) => materializeQueryMeta[Product].extract(rs)
  val prepareQuery = prepare(query[Product])
  implicit val im = insertMeta[Product](_.id)

  "single" in {
    val prepareInsert = prepare(query[Product].insert(lift(productEntries.head)))

    singleInsert(dataSource.getConnection)(prepareInsert) mustEqual false
    extractProducts(dataSource.getConnection)(prepareQuery) === List(productEntries.head)
  }

  "batch" in {
    val prepareBatchInsert = prepare(
      liftQuery(withOrderedIds(productEntries)).foreach(p => query[Product].insert(p))
    )

    batchInsert(dataSource.getConnection)(prepareBatchInsert).distinct mustEqual List(false)
    extractProducts(dataSource.getConnection)(prepareQuery) === withOrderedIds(productEntries)
  }
} 
Example 10
Source File: PrepareJdbcSpecBase.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.context.jdbc
import java.sql.{ Connection, PreparedStatement, ResultSet }

import io.getquill.context.sql.ProductSpec
import io.getquill.util.Using.Manager
import org.scalactic.Equality
import scala.util.{ Success, Failure }

trait PrepareJdbcSpecBase extends ProductSpec {

  implicit val productEq = new Equality[Product] {
    override def areEqual(a: Product, b: Any): Boolean = b match {
      case Product(_, desc, sku) => desc == a.description && sku == a.sku
      case _                     => false
    }
  }

  def productExtractor: ResultSet => Product

  def withOrderedIds(products: List[Product]) =
    products.zipWithIndex.map { case (product, id) => product.copy(id = id.toLong + 1) }

  def singleInsert(conn: => Connection)(prep: Connection => PreparedStatement) = {
    val flag = Manager { use =>
      val c = use(conn)
      val s = use(prep(c))
      s.execute()
    }
    flag match {
      case Success(value) => value
      case Failure(e)     => throw e
    }
  }

  def batchInsert(conn: => Connection)(prep: Connection => List[PreparedStatement]) = {
    val r = Manager { use =>
      val c = use(conn)
      val st = prep(c)
      appendExecuteSequence(st)
    }
    r.flatten match {
      case Success(value) => value
      case Failure(e)     => throw e
    }
  }

  def extractResults[T](conn: => Connection)(prep: Connection => PreparedStatement)(extractor: ResultSet => T) = {
    val r = Manager { use =>
      val c = use(conn)
      val st = use(prep(c))
      val rs = st.executeQuery()
      ResultSetExtractor(rs, extractor)
    }
    r match {
      case Success(v) => v
      case Failure(e) => throw e
    }
  }

  def extractProducts(conn: => Connection)(prep: Connection => PreparedStatement): List[Product] =
    extractResults(conn)(prep)(productExtractor)

  def appendExecuteSequence(actions: => List[PreparedStatement]) = {
    Manager { use =>
      actions.map { stmt =>
        val s = use(stmt)
        s.execute()
      }
    }
  }
} 
Example 11
Source File: PushDownJdbcRDD.scala    From gimel   with Apache License 2.0 5 votes vote down vote up
package com.paypal.gimel.jdbc.utilities

import java.sql.{Connection, ResultSet}

import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.JdbcRDD
import org.apache.spark.sql.Row

import com.paypal.gimel.common.utilities.GenericUtils
import com.paypal.gimel.logger.Logger


class PushDownJdbcRDD(sc: SparkContext,
                      getConnection: () => Connection,
                      sql: String,
                      mapRow: ResultSet => Row = PushDownJdbcRDD.resultSetToRow)
  extends JdbcRDD[Row](sc, getConnection, sql, 0, 100, 1, mapRow)
    with Logging {

  override def compute(thePart: Partition,
                       context: TaskContext): Iterator[Row] = {
    val logger = Logger(this.getClass.getName)
    val functionName = s"[QueryHash: ${sql.hashCode}]"
    logger.info(s"Proceeding to execute push down query $functionName: $sql")
    val queryResult: String = GenericUtils.time(functionName, Some(logger)) {
      JDBCConnectionUtility.withResources(getConnection()) { connection =>
        JdbcAuxiliaryUtilities.executeQueryAndReturnResultString(
          sql,
          connection
        )
      }
    }
    Seq(Row(queryResult)).iterator
  }
}

object PushDownJdbcRDD {
  def resultSetToRow(rs: ResultSet): Row = {
    Row(rs.getString(0))
  }
} 
Example 12
Source File: PrepareJdbcSpec.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.context.jdbc.oracle

import java.sql.ResultSet

import io.getquill.context.jdbc.PrepareJdbcSpecBase
import org.scalatest.BeforeAndAfter

class PrepareJdbcSpec extends PrepareJdbcSpecBase with BeforeAndAfter {

  val context = testContext
  import testContext._

  before {
    testContext.run(query[Product].delete)
  }

  def productExtractor = (rs: ResultSet) => materializeQueryMeta[Product].extract(rs)
  val prepareQuery = prepare(query[Product])

  "single" in {
    val prepareInsert = prepare(query[Product].insert(lift(productEntries.head)))
    singleInsert(dataSource.getConnection)(prepareInsert) mustEqual false
    extractProducts(dataSource.getConnection)(prepareQuery) === List(productEntries.head)
  }

  "batch" in {
    val prepareBatchInsert = prepare(
      liftQuery(withOrderedIds(productEntries)).foreach(p => query[Product].insert(p))
    )

    batchInsert(dataSource.getConnection)(prepareBatchInsert).distinct mustEqual List(false)
    extractProducts(dataSource.getConnection)(prepareQuery) === withOrderedIds(productEntries)
  }
} 
Example 13
Source File: ResultSetExtractor.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.context.jdbc
import java.sql.ResultSet

import scala.annotation.tailrec

object ResultSetExtractor {

  private[getquill] final def apply[T](rs: ResultSet, extractor: ResultSet => T): List[T] =
    extractResult(rs, extractor, List())

  @tailrec
  private[getquill] final def extractResult[T](rs: ResultSet, extractor: ResultSet => T, acc: List[T]): List[T] =
    if (rs.next)
      extractResult(rs, extractor, extractor(rs) :: acc)
    else
      acc.reverse
} 
Example 14
Source File: SchemaModel.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.codegen.model

import java.sql.ResultSet

case class RawSchema[T, C](table: T, columns: Seq[C])

trait BasicTableMeta {
  def tableSchem: Option[String]
  def tableName: String
}

trait BasicColumnMeta {
  def columnName: String
}

case class JdbcTableMeta(
  tableCat:   Option[String],
  tableSchem: Option[String],
  tableName:  String,
  tableType:  Option[String]
) extends BasicTableMeta

object JdbcTableMeta {
  def fromResultSet(rs: ResultSet) = JdbcTableMeta(
    tableCat = Option(rs.getString("TABLE_CAT")),
    tableSchem = Option(rs.getString("TABLE_SCHEM")),
    tableName = rs.getString("TABLE_NAME"),
    tableType = Option(rs.getString("TABLE_TYPE"))
  )
}

case class JdbcColumnMeta(
  tableCat:   Option[String],
  tableSchem: Option[String],
  tableName:  String,
  columnName: String,
  dataType:   Int,
  typeName:   String,
  nullable:   Int,
  size:       Int
) extends BasicColumnMeta

object JdbcColumnMeta {
  def fromResultSet(rs: ResultSet) =
    JdbcColumnMeta(
      tableCat = Option(rs.getString("TABLE_CAT")),
      tableSchem = Option(rs.getString("TABLE_SCHEM")),
      tableName = rs.getString("TABLE_NAME"),
      columnName = rs.getString("COLUMN_NAME"),
      dataType = rs.getInt("DATA_TYPE"),
      typeName = rs.getString("TYPE_NAME"),
      nullable = rs.getInt("NULLABLE"),
      size = rs.getInt("COLUMN_SIZE")
    )
}

object SchemaModel {

} 
Example 15
Source File: PrepareJdbcSpec.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.h2

import java.sql.ResultSet

import io.getquill.PrepareMonixJdbcSpecBase
import monix.execution.Scheduler
import org.scalatest.BeforeAndAfter

class PrepareJdbcSpec extends PrepareMonixJdbcSpecBase with BeforeAndAfter {

  val context = testContext
  import testContext._
  implicit val scheduler = Scheduler.global

  before {
    testContext.run(query[Product].delete).runSyncUnsafe()
  }

  def productExtractor = (rs: ResultSet) => materializeQueryMeta[Product].extract(rs)
  val prepareQuery = prepare(query[Product])

  "single" in {
    val prepareInsert = prepare(query[Product].insert(lift(productEntries.head)))
    singleInsert(dataSource.getConnection)(prepareInsert).runSyncUnsafe() mustEqual false
    extractProducts(dataSource.getConnection)(prepareQuery).runSyncUnsafe() === List(productEntries.head)
  }

  "batch" in {
    val prepareBatchInsert = prepare(
      liftQuery(withOrderedIds(productEntries)).foreach(p => query[Product].insert(p))
    )

    batchInsert(dataSource.getConnection)(prepareBatchInsert).runSyncUnsafe().distinct mustEqual List(false)
    extractProducts(dataSource.getConnection)(prepareQuery).runSyncUnsafe() === withOrderedIds(productEntries)
  }
} 
Example 16
Source File: PrepareJdbcSpec.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.sqlite

import java.sql.ResultSet

import io.getquill.PrepareMonixJdbcSpecBase
import monix.execution.Scheduler
import org.scalatest.BeforeAndAfter

class PrepareJdbcSpec extends PrepareMonixJdbcSpecBase with BeforeAndAfter {

  val context = testContext
  import testContext._
  implicit val scheduler = Scheduler.global

  before {
    testContext.run(query[Product].delete).runSyncUnsafe()
  }

  def productExtractor = (rs: ResultSet) => materializeQueryMeta[Product].extract(rs)
  val prepareQuery = prepare(query[Product])

  "single" in {
    val prepareInsert = prepare(query[Product].insert(lift(productEntries.head)))
    singleInsert(dataSource.getConnection)(prepareInsert).runSyncUnsafe() mustEqual false
    extractProducts(dataSource.getConnection)(prepareQuery).runSyncUnsafe() === List(productEntries.head)
  }

  "batch" in {
    val prepareBatchInsert = prepare(
      liftQuery(withOrderedIds(productEntries)).foreach(p => query[Product].insert(p))
    )

    batchInsert(dataSource.getConnection)(prepareBatchInsert).runSyncUnsafe().distinct mustEqual List(false)
    extractProducts(dataSource.getConnection)(prepareQuery).runSyncUnsafe() === withOrderedIds(productEntries)
  }
} 
Example 17
Source File: PrepareJdbcSpec.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.mysql

import java.sql.ResultSet

import io.getquill.PrepareMonixJdbcSpecBase
import monix.execution.Scheduler
import org.scalatest.BeforeAndAfter

class PrepareJdbcSpec extends PrepareMonixJdbcSpecBase with BeforeAndAfter {

  val context = testContext
  import testContext._
  implicit val scheduler = Scheduler.global

  before {
    testContext.run(query[Product].delete).runSyncUnsafe()
  }

  def productExtractor = (rs: ResultSet) => materializeQueryMeta[Product].extract(rs)
  val prepareQuery = prepare(query[Product])

  "single" in {
    val prepareInsert = prepare(query[Product].insert(lift(productEntries.head)))
    singleInsert(dataSource.getConnection)(prepareInsert).runSyncUnsafe() mustEqual false
    extractProducts(dataSource.getConnection)(prepareQuery).runSyncUnsafe() === List(productEntries.head)
  }

  "batch" in {
    val prepareBatchInsert = prepare(
      liftQuery(withOrderedIds(productEntries)).foreach(p => query[Product].insert(p))
    )

    batchInsert(dataSource.getConnection)(prepareBatchInsert).runSyncUnsafe().distinct mustEqual List(false)
    extractProducts(dataSource.getConnection)(prepareQuery).runSyncUnsafe() === withOrderedIds(productEntries)
  }
} 
Example 18
Source File: PrepareJdbcSpec.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.sqlserver

import java.sql.ResultSet

import io.getquill.PrepareMonixJdbcSpecBase
import monix.execution.Scheduler
import org.scalatest.BeforeAndAfter

class PrepareJdbcSpec extends PrepareMonixJdbcSpecBase with BeforeAndAfter {

  val context = testContext
  import testContext._
  implicit val scheduler = Scheduler.global

  before {
    testContext.run(query[Product].delete).runSyncUnsafe()
  }

  def productExtractor = (rs: ResultSet) => materializeQueryMeta[Product].extract(rs)
  val prepareQuery = prepare(query[Product])
  implicit val im = insertMeta[Product](_.id)

  "single" in {
    val prepareInsert = prepare(query[Product].insert(lift(productEntries.head)))
    singleInsert(dataSource.getConnection)(prepareInsert).runSyncUnsafe() mustEqual false
    extractProducts(dataSource.getConnection)(prepareQuery).runSyncUnsafe() === List(productEntries.head)
  }

  "batch" in {
    val prepareBatchInsert = prepare(
      liftQuery(withOrderedIds(productEntries)).foreach(p => query[Product].insert(p))
    )

    batchInsert(dataSource.getConnection)(prepareBatchInsert).runSyncUnsafe().distinct mustEqual List(false)
    extractProducts(dataSource.getConnection)(prepareQuery).runSyncUnsafe() === withOrderedIds(productEntries)
  }
} 
Example 19
Source File: PrepareJdbcSpec.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.oracle

import java.sql.ResultSet

import io.getquill.PrepareMonixJdbcSpecBase
import monix.execution.Scheduler
import org.scalatest.BeforeAndAfter

class PrepareJdbcSpec extends PrepareMonixJdbcSpecBase with BeforeAndAfter {

  val context = testContext
  import testContext._
  implicit val scheduler = Scheduler.global

  before {
    testContext.run(query[Product].delete).runSyncUnsafe()
  }

  def productExtractor = (rs: ResultSet) => materializeQueryMeta[Product].extract(rs)
  val prepareQuery = prepare(query[Product])

  "single" in {
    val prepareInsert = prepare(query[Product].insert(lift(productEntries.head)))
    singleInsert(dataSource.getConnection)(prepareInsert).runSyncUnsafe() mustEqual false
    extractProducts(dataSource.getConnection)(prepareQuery).runSyncUnsafe() === List(productEntries.head)
  }

  "batch" in {
    val prepareBatchInsert = prepare(
      liftQuery(withOrderedIds(productEntries)).foreach(p => query[Product].insert(p))
    )

    batchInsert(dataSource.getConnection)(prepareBatchInsert).runSyncUnsafe().distinct mustEqual List(false)
    extractProducts(dataSource.getConnection)(prepareQuery).runSyncUnsafe() === withOrderedIds(productEntries)
  }
} 
Example 20
Source File: JdbcFlow.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package mass.connector.sql

import java.nio.charset.{ Charset, StandardCharsets }
import java.sql.ResultSet

import akka.NotUsed
import akka.stream.scaladsl.Flow
import akka.util.ByteString
import fusion.jdbc.util.JdbcUtils

import scala.collection.immutable

case class JdbcResultSet(rs: ResultSet, values: immutable.IndexedSeq[AnyRef])

object JdbcFlow {
  def flowToText(valueSeparator: Char = ','): Flow[immutable.IndexedSeq[AnyRef], String, NotUsed] =
    Flow[immutable.IndexedSeq[AnyRef]].map { values =>
      val builder = new java.lang.StringBuilder()
      var i = 0
      while (i < values.length) {
        builder.append(values(i).toString)
        i += 1
        if (i < values.length) {
          builder.append(valueSeparator)
        }
      }
      builder.toString
    }

  def flowToSeq: Flow[ResultSet, immutable.IndexedSeq[AnyRef], NotUsed] =
    Flow[ResultSet].map { rs =>
      val metaData = rs.getMetaData
      (1 to rs.getMetaData.getColumnCount).map { i =>
        val typ = metaData.getColumnType(i)
        if (JdbcUtils.isString(typ)) {
          rs.getString(i)
        } else
          rs.getObject(i)
      }
    }

  def flowToByteString(
      valueSeparator: Char = ',',
      charset: Charset = StandardCharsets.UTF_8): Flow[immutable.IndexedSeq[AnyRef], ByteString, NotUsed] =
    Flow[immutable.IndexedSeq[AnyRef]].map { values =>
      val builder = ByteString.newBuilder
      var i = 0
      while (i < values.length) {
        builder.putBytes(values(i).toString.getBytes(charset))
        i += 1
        if (i < values.length) {
          builder.putByte(valueSeparator.toByte)
        }
      }
      builder.result()
    }

  def flowJdbcResultSet: Flow[ResultSet, JdbcResultSet, NotUsed] =
    Flow[ResultSet].map { rs =>
      val metaData = rs.getMetaData
      JdbcResultSet(rs, (1 to metaData.getColumnCount).map(i => rs.getObject(i)))
    }
} 
Example 21
Source File: JdbcSourceStage.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package mass.connector.sql

import java.sql.{ Connection, PreparedStatement, ResultSet }

import akka.stream.stage.{ GraphStage, GraphStageLogic, OutHandler }
import akka.stream.{ Attributes, Outlet, SourceShape }
import javax.sql.DataSource
import fusion.jdbc.ConnectionPreparedStatementCreator
import fusion.jdbc.util.JdbcUtils

import scala.util.control.NonFatal

class JdbcSourceStage(dataSource: DataSource, creator: ConnectionPreparedStatementCreator, fetchRowSize: Int)
    extends GraphStage[SourceShape[ResultSet]] {
  private val out: Outlet[ResultSet] = Outlet("JdbcSource.out")

  override def shape: SourceShape[ResultSet] = SourceShape(out)

  override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
    new GraphStageLogic(shape) with OutHandler {
      var maybeConn =
        Option.empty[(Connection, Boolean, PreparedStatement, ResultSet)]

      setHandler(out, this)

      override def onPull(): Unit =
        maybeConn match {
          case Some((_, _, _, rs)) if rs.next() =>
            push(out, rs)
          case Some(_) =>
            completeStage()
          case None =>
            () // doing nothing, waiting for in preStart() to be completed
        }

      override def preStart(): Unit =
        try {
          val conn = dataSource.getConnection
          val autoCommit = conn.getAutoCommit
          conn.setAutoCommit(false)
          val stmt = creator(conn)
          val rs = stmt.executeQuery()
          //          rs.setFetchDirection(ResultSet.TYPE_FORWARD_ONLY)
          rs.setFetchSize(fetchRowSize)
          maybeConn = Option((conn, autoCommit, stmt, rs))
        } catch {
          case NonFatal(e) => failStage(e)
        }

      override def postStop(): Unit =
        for {
          (conn, autoCommit, stmt, rs) <- maybeConn
        } {
          JdbcUtils.closeResultSet(rs)
          JdbcUtils.closeStatement(stmt)
          conn.setAutoCommit(autoCommit)
          JdbcUtils.closeConnection(conn)
        }
    }
} 
Example 22
Source File: JdbcSource.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package mass.connector.sql

import java.sql.ResultSet

import akka.NotUsed
import akka.stream.scaladsl.Source
import fusion.jdbc.ConnectionPreparedStatementCreator
import fusion.jdbc.util.JdbcUtils
import javax.sql.DataSource

object JdbcSource {
  def apply(sql: String, args: Iterable[Any], fetchRowSize: Int)(
      implicit dataSource: DataSource): Source[ResultSet, NotUsed] =
    Source.fromGraph(new JdbcSourceStage(dataSource, conn => {
      val stmt = conn.prepareStatement(sql, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY)
      JdbcUtils.setStatementParameters(stmt, args)
    }, fetchRowSize))

  def apply(creator: ConnectionPreparedStatementCreator, fetchRowSize: Int)(
      implicit dataSource: DataSource): Source[ResultSet, NotUsed] =
    Source.fromGraph(new JdbcSourceStage(dataSource, creator, fetchRowSize))
} 
Example 23
Source File: ProcessDefinitionMarshaller.scala    From sundial   with MIT License 5 votes vote down vote up
package dao.postgres.marshalling

import java.sql.{Connection, PreparedStatement, ResultSet}

import dao.postgres.common.ProcessDefinitionTable
import model.{
  EmailNotification,
  Notification,
  ProcessDefinition,
  ProcessOverlapAction
}
import util.JdbcUtil._

object ProcessDefinitionMarshaller {

  private val postgresJsonMarshaller = new PostgresJsonMarshaller

  def marshal(definition: ProcessDefinition,
              stmt: PreparedStatement,
              columns: Seq[String],
              startIndex: Int = 1)(implicit conn: Connection) = {
    import ProcessDefinitionTable._
    var index = startIndex
    columns.foreach { col =>
      col match {
        case COL_NAME => stmt.setString(index, definition.name)
        case COL_DESCRIPTION =>
          stmt.setString(index, definition.description.orNull)
        case COL_SCHEDULE =>
          stmt.setString(
            index,
            definition.schedule.map(PostgresJsonMarshaller.toJson).orNull)
        case COL_OVERLAP_ACTION =>
          stmt.setString(index, definition.overlapAction match {
            case ProcessOverlapAction.Wait      => OVERLAP_WAIT
            case ProcessOverlapAction.Terminate => OVERLAP_TERMINATE
          })
        case COL_TEAMS => stmt.setString(index, "[]")
        case COL_NOTIFICATIONS =>
          stmt.setString(
            index,
            postgresJsonMarshaller.toJson(definition.notifications))
        case COL_DISABLED   => stmt.setBoolean(index, definition.isPaused)
        case COL_CREATED_AT => stmt.setTimestamp(index, definition.createdAt)
      }
      index += 1
    }
  }

  def unmarshal(rs: ResultSet): ProcessDefinition = {
    import ProcessDefinitionTable._
    ProcessDefinition(
      name = rs.getString(COL_NAME),
      description = Option(rs.getString(COL_DESCRIPTION)),
      schedule = Option(rs.getString(COL_SCHEDULE))
        .map(PostgresJsonMarshaller.toSchedule),
      overlapAction = rs.getString(COL_OVERLAP_ACTION) match {
        case OVERLAP_WAIT      => ProcessOverlapAction.Wait
        case OVERLAP_TERMINATE => ProcessOverlapAction.Terminate
      },
      notifications = this.getNotifications(rs),
      isPaused = rs.getBoolean(COL_DISABLED),
      createdAt = javaDate(rs.getTimestamp(COL_CREATED_AT))
    )
  }

  private def getNotifications(rs: ResultSet): Seq[Notification] = {
    import ProcessDefinitionTable._
    val teams = PostgresJsonMarshaller
      .toTeams(rs.getString(COL_TEAMS))
      .map(team => EmailNotification(team.name, team.email, team.notifyAction))
    val notifications =
      postgresJsonMarshaller.toNotifications(rs.getString(COL_NOTIFICATIONS))
    notifications ++ teams
  }

} 
Example 24
Source File: TaskDefinitionMarshaller.scala    From sundial   with MIT License 5 votes vote down vote up
package dao.postgres.marshalling

import java.sql.{Connection, PreparedStatement, ResultSet}
import java.util.UUID
import dao.postgres.common.TaskDefinitionTable
import model.{TaskBackoff, TaskDefinition, TaskDependencies, TaskLimits}
import util.JdbcUtil._

object TaskDefinitionMarshaller {

  def marshal(definition: TaskDefinition,
              stmt: PreparedStatement,
              columns: Seq[String],
              startIndex: Int = 1)(implicit conn: Connection) = {
    import TaskDefinitionTable._
    var index = startIndex
    columns.foreach { col =>
      col match {
        case COL_NAME    => stmt.setString(index, definition.name)
        case COL_PROC_ID => stmt.setObject(index, definition.processId)
        case COL_EXECUTABLE =>
          stmt.setString(index,
                         PostgresJsonMarshaller.toJson(definition.executable))
        case COL_MAX_ATTEMPTS =>
          stmt.setInt(index, definition.limits.maxAttempts)
        case COL_MAX_EXECUTION_TIME =>
          stmt.setObject(index,
                         definition.limits.maxExecutionTimeSeconds.orNull)
        case COL_BACKOFF_SECONDS =>
          stmt.setInt(index, definition.backoff.seconds)
        case COL_BACKOFF_EXPONENT =>
          stmt.setDouble(index, definition.backoff.exponent)
        case COL_REQUIRED_DEPS =>
          stmt.setArray(index,
                        makeStringArray(definition.dependencies.required))
        case COL_OPTIONAL_DEPS =>
          stmt.setArray(index,
                        makeStringArray(definition.dependencies.optional))
        case COL_REQUIRE_EXPLICIT_SUCCESS =>
          stmt.setBoolean(index, definition.requireExplicitSuccess)
      }
      index += 1
    }
  }

  def unmarshal(rs: ResultSet): TaskDefinition = {
    import TaskDefinitionTable._
    TaskDefinition(
      name = rs.getString(COL_NAME),
      processId = rs.getObject(COL_PROC_ID).asInstanceOf[UUID],
      executable =
        PostgresJsonMarshaller.toExecutable(rs.getString(COL_EXECUTABLE)),
      limits = TaskLimits(
        maxAttempts = rs.getInt(COL_MAX_ATTEMPTS),
        maxExecutionTimeSeconds = getIntOption(rs, COL_MAX_EXECUTION_TIME)
      ),
      backoff = TaskBackoff(
        seconds = rs.getInt(COL_BACKOFF_SECONDS),
        exponent = rs.getDouble(COL_BACKOFF_EXPONENT)
      ),
      dependencies = TaskDependencies(
        required = getStringArray(rs, COL_REQUIRED_DEPS).getOrElse(Seq.empty),
        optional = getStringArray(rs, COL_OPTIONAL_DEPS).getOrElse(Seq.empty)
      ),
      requireExplicitSuccess = rs.getBoolean(COL_REQUIRE_EXPLICIT_SUCCESS)
    )
  }

} 
Example 25
Source File: ProcessMarshaller.scala    From sundial   with MIT License 5 votes vote down vote up
package dao.postgres.marshalling

import java.sql.{Connection, PreparedStatement, ResultSet, Timestamp}
import java.util.UUID
import dao.postgres.common.ProcessTable
import model.{Process, ProcessStatus}
import util.JdbcUtil._

object ProcessMarshaller {

  def unmarshalProcess(rs: ResultSet): Process = {
    import ProcessTable._
    Process(
      id = rs.getObject(COL_ID).asInstanceOf[UUID],
      processDefinitionName = rs.getString(COL_DEF_NAME),
      startedAt = javaDate(rs.getTimestamp(COL_STARTED)),
      status = rs.getString(COL_STATUS) match {
        case STATUS_SUCCEEDED =>
          ProcessStatus.Succeeded(javaDate(rs.getTimestamp(COL_ENDED_AT)))
        case STATUS_FAILED =>
          ProcessStatus.Failed(javaDate(rs.getTimestamp(COL_ENDED_AT)))
        case STATUS_RUNNING => ProcessStatus.Running()
      },
      taskFilter = getStringArray(rs, COL_TASK_FILTER)
    )
  }

  def marshalProcess(process: Process,
                     stmt: PreparedStatement,
                     columns: Seq[String],
                     startIndex: Int = 1)(implicit conn: Connection) = {
    import ProcessTable._
    var index = startIndex
    columns.foreach { col =>
      col match {
        case COL_ID => stmt.setObject(index, process.id)
        case COL_DEF_NAME =>
          stmt.setString(index, process.processDefinitionName)
        case COL_STARTED =>
          stmt.setTimestamp(index, new Timestamp(process.startedAt.getTime()))
        case COL_ENDED_AT =>
          stmt.setTimestamp(index, process.endedAt.getOrElse(null))
        case COL_STATUS =>
          stmt.setString(
            index,
            process.status match {
              case ProcessStatus.Succeeded(_) => STATUS_SUCCEEDED
              case ProcessStatus.Failed(_)    => STATUS_FAILED
              case ProcessStatus.Running()    => STATUS_RUNNING
            }
          )
        case COL_TASK_FILTER =>
          stmt.setArray(index,
                        process.taskFilter.map(makeStringArray).getOrElse(null))
      }
      index += 1
    }
  }

} 
Example 26
Source File: TaskDefinitionTemplateMarshaller.scala    From sundial   with MIT License 5 votes vote down vote up
package dao.postgres.marshalling

import java.sql.{Connection, PreparedStatement, ResultSet}

import dao.postgres.common.TaskDefinitionTemplateTable
import model._
import util.JdbcUtil._

object TaskDefinitionTemplateMarshaller {

  def marshal(definition: TaskDefinitionTemplate,
              stmt: PreparedStatement,
              columns: Seq[String],
              startIndex: Int = 1)(implicit conn: Connection) = {
    import TaskDefinitionTemplateTable._
    var index = startIndex
    columns.foreach { col =>
      col match {
        case COL_NAME => stmt.setString(index, definition.name)
        case COL_PROC_DEF_NAME =>
          stmt.setString(index, definition.processDefinitionName)
        case COL_EXECUTABLE =>
          stmt.setString(index,
                         PostgresJsonMarshaller.toJson(definition.executable))
        case COL_MAX_ATTEMPTS =>
          stmt.setInt(index, definition.limits.maxAttempts)
        case COL_MAX_EXECUTION_TIME =>
          stmt.setObject(index,
                         definition.limits.maxExecutionTimeSeconds.orNull)
        case COL_BACKOFF_SECONDS =>
          stmt.setInt(index, definition.backoff.seconds)
        case COL_BACKOFF_EXPONENT =>
          stmt.setDouble(index, definition.backoff.exponent)
        case COL_REQUIRED_DEPS =>
          stmt.setArray(index,
                        makeStringArray(definition.dependencies.required))
        case COL_OPTIONAL_DEPS =>
          stmt.setArray(index,
                        makeStringArray(definition.dependencies.optional))
        case COL_REQUIRE_EXPLICIT_SUCCESS =>
          stmt.setBoolean(index, definition.requireExplicitSuccess)
      }
      index += 1
    }
  }

  def unmarshal(rs: ResultSet): TaskDefinitionTemplate = {
    import TaskDefinitionTemplateTable._
    TaskDefinitionTemplate(
      name = rs.getString(COL_NAME),
      processDefinitionName = rs.getString(COL_PROC_DEF_NAME),
      executable =
        PostgresJsonMarshaller.toExecutable(rs.getString(COL_EXECUTABLE)),
      limits = TaskLimits(
        maxAttempts = rs.getInt(COL_MAX_ATTEMPTS),
        maxExecutionTimeSeconds = getIntOption(rs, COL_MAX_EXECUTION_TIME)
      ),
      backoff = TaskBackoff(
        seconds = rs.getInt(COL_BACKOFF_SECONDS),
        exponent = rs.getDouble(COL_BACKOFF_EXPONENT)
      ),
      dependencies = TaskDependencies(
        required = getStringArray(rs, COL_REQUIRED_DEPS).getOrElse(Seq.empty),
        optional = getStringArray(rs, COL_OPTIONAL_DEPS).getOrElse(Seq.empty)
      ),
      requireExplicitSuccess = rs.getBoolean(COL_REQUIRE_EXPLICIT_SUCCESS)
    )
  }

} 
Example 27
Source File: ProcessTriggerRequestMarshaller.scala    From sundial   with MIT License 5 votes vote down vote up
package dao.postgres.marshalling

import java.sql.{Connection, PreparedStatement, ResultSet}
import java.util.UUID

import dao.postgres.common.{ProcessTriggerRequestTable, TaskTriggerRequestTable}
import model.ProcessTriggerRequest
import util.JdbcUtil._

object ProcessTriggerRequestMarshaller {

  def marshal(request: ProcessTriggerRequest,
              stmt: PreparedStatement,
              columns: Seq[String],
              startIndex: Int = 1)(implicit conn: Connection) = {
    import ProcessTriggerRequestTable._
    var index = startIndex
    columns.foreach { col =>
      col match {
        case COL_REQUEST_ID => stmt.setObject(index, request.requestId)
        case COL_PROCESS_DEF_NAME =>
          stmt.setString(index, request.processDefinitionName)
        case COL_REQUESTED_AT => stmt.setTimestamp(index, request.requestedAt)
        case COL_STARTED_PROCESS_ID =>
          stmt.setObject(index, request.startedProcessId.orNull)
        case COL_TASK_FILTER =>
          stmt.setArray(index, request.taskFilter.map(makeStringArray).orNull)
      }
      index += 1
    }
  }

  def unmarshal(rs: ResultSet): ProcessTriggerRequest = {
    import ProcessTriggerRequestTable._
    ProcessTriggerRequest(
      requestId = rs.getObject(COL_REQUEST_ID).asInstanceOf[UUID],
      processDefinitionName = rs.getString(COL_PROCESS_DEF_NAME),
      requestedAt = javaDate(rs.getTimestamp(COL_REQUESTED_AT)),
      startedProcessId =
        Option(rs.getObject(COL_STARTED_PROCESS_ID)).map(_.asInstanceOf[UUID]),
      taskFilter = getStringArray(rs, COL_TASK_FILTER)
    )
  }

} 
Example 28
Source File: TaskMarshaller.scala    From sundial   with MIT License 5 votes vote down vote up
package dao.postgres.marshalling

import java.sql.{Connection, PreparedStatement, ResultSet}
import java.util.UUID
import dao.postgres.common.TaskTable
import model.{Task, TaskStatus}
import util.JdbcUtil._

object TaskMarshaller {

  def unmarshalTask(rs: ResultSet): Task = {
    import TaskTable._
    Task(
      id = rs.getObject(COL_ID).asInstanceOf[UUID],
      processId = rs.getObject(COL_PROCESS_ID).asInstanceOf[UUID],
      processDefinitionName = rs.getString(COL_PROC_DEF_NAME),
      taskDefinitionName = rs.getString(COL_TASK_DEF_NAME),
      executable =
        PostgresJsonMarshaller.toExecutable(rs.getString(COL_EXECUTABLE)),
      previousAttempts = rs.getInt(COL_ATTEMPTS),
      startedAt = javaDate(rs.getTimestamp(COL_STARTED)),
      status = rs.getString(COL_STATUS) match {
        case STATUS_SUCCEEDED =>
          TaskStatus.Success(javaDate(rs.getTimestamp(COL_ENDED_AT)))
        case STATUS_FAILED =>
          TaskStatus.Failure(javaDate(rs.getTimestamp(COL_ENDED_AT)),
                             Option(rs.getString(COL_REASON)))
        case STATUS_RUNNING => TaskStatus.Running()
      }
    )
  }

  def marshalTask(task: Task,
                  stmt: PreparedStatement,
                  columns: Seq[String],
                  startIndex: Int = 1)(implicit conn: Connection) = {
    import TaskTable._
    var index = startIndex
    columns.foreach { col =>
      col match {
        case COL_ID         => stmt.setObject(index, task.id)
        case COL_PROCESS_ID => stmt.setObject(index, task.processId)
        case COL_PROC_DEF_NAME =>
          stmt.setString(index, task.processDefinitionName)
        case COL_TASK_DEF_NAME => stmt.setString(index, task.taskDefinitionName)
        case COL_EXECUTABLE =>
          stmt.setString(index, PostgresJsonMarshaller.toJson(task.executable))
        case COL_ATTEMPTS => stmt.setInt(index, task.previousAttempts)
        case COL_STARTED  => stmt.setTimestamp(index, task.startedAt)
        case COL_STATUS =>
          stmt.setString(index, task.status match {
            case TaskStatus.Success(_)    => STATUS_SUCCEEDED
            case TaskStatus.Failure(_, _) => STATUS_FAILED
            case TaskStatus.Running()     => STATUS_RUNNING
          })
        case COL_REASON =>
          stmt.setString(index, task.status match {
            case TaskStatus.Failure(_, reasons) => reasons.mkString(",")
            case _                              => null
          })
        case COL_ENDED_AT =>
          stmt.setTimestamp(index, task.endedAt.getOrElse(null))
      }
      index += 1
    }
  }

} 
Example 29
Source File: TaskTriggerRequestMarshaller.scala    From sundial   with MIT License 5 votes vote down vote up
package dao.postgres.marshalling

import java.sql.{Connection, PreparedStatement, ResultSet}
import java.util.UUID
import dao.postgres.common.TaskTriggerRequestTable
import model.TaskTriggerRequest
import util.JdbcUtil._

object TaskTriggerRequestMarshaller {

  def marshal(request: TaskTriggerRequest,
              stmt: PreparedStatement,
              columns: Seq[String],
              startIndex: Int = 1)(implicit conn: Connection) = {
    import TaskTriggerRequestTable._
    var index = startIndex
    columns.foreach { col =>
      col match {
        case COL_REQUEST_ID => stmt.setObject(index, request.requestId)
        case COL_PROCESS_DEF_NAME =>
          stmt.setString(index, request.processDefinitionName)
        case COL_TASK_DEF_NAME =>
          stmt.setString(index, request.taskDefinitionName)
        case COL_REQUESTED_AT => stmt.setTimestamp(index, request.requestedAt)
        case COL_STARTED_PROCESS_ID =>
          stmt.setObject(index, request.startedProcessId.orNull)
      }
      index += 1
    }
  }

  def unmarshal(rs: ResultSet): TaskTriggerRequest = {
    import TaskTriggerRequestTable._
    TaskTriggerRequest(
      requestId = rs.getObject(COL_REQUEST_ID).asInstanceOf[UUID],
      processDefinitionName = rs.getString(COL_PROCESS_DEF_NAME),
      taskDefinitionName = rs.getString(COL_TASK_DEF_NAME),
      requestedAt = javaDate(rs.getTimestamp(COL_REQUESTED_AT)),
      startedProcessId =
        Option(rs.getObject(COL_STARTED_PROCESS_ID)).map(_.asInstanceOf[UUID])
    )
  }

} 
Example 30
Source File: PostgresTaskLogsDao.scala    From sundial   with MIT License 5 votes vote down vote up
package dao.postgres

import java.sql.{Connection, ResultSet}
import java.util.{Date, UUID}

import dao.TaskLogsDao
import model.TaskEventLog

import util.JdbcUtil._

class PostgresTaskLogsDao(implicit conn: Connection) extends TaskLogsDao {

  final val TABLE = "task_log"
  final val COL_ID = "task_log_id"
  final val COL_TASK_ID = "task_id"
  final val COL_WHEN = "when_" // 'when' is a reserved word in PostgreSQL
  final val COL_SOURCE = "source"
  final val COL_MESSAGE = "message"

  private def unmarshal(rs: ResultSet): TaskEventLog = {
    TaskEventLog(
      id = rs.getObject(COL_ID).asInstanceOf[UUID],
      taskId = rs.getObject(COL_TASK_ID).asInstanceOf[UUID],
      when = new Date(rs.getTimestamp(COL_WHEN).getTime()),
      source = rs.getString(COL_SOURCE),
      message = rs.getString(COL_MESSAGE)
    )
  }

  override def loadEventsForTask(taskId: UUID) = {
    val stmt =
      conn.prepareStatement(s"SELECT * FROM $TABLE WHERE $COL_TASK_ID = ?")
    stmt.setObject(1, taskId)
    stmt.executeQuery().map(unmarshal).toList
  }

  override def saveEvents(events: Seq[TaskEventLog]) {
    val sql =
      s"""
         |INSERT INTO $TABLE
         |($COL_ID, $COL_TASK_ID, $COL_WHEN, $COL_SOURCE, $COL_MESSAGE)
         |VALUES
         |(?, ?, ?, ?, ?)
       """.stripMargin
    val stmt = conn.prepareStatement(sql)
    events.foreach { event =>
      stmt.setObject(1, event.id)
      stmt.setObject(2, event.taskId)
      stmt.setTimestamp(3, new java.sql.Timestamp(event.when.getTime))
      stmt.setString(4, event.source)
      stmt.setString(5, event.message)
      stmt.addBatch()
    }
    stmt.executeBatch()
  }

} 
Example 31
Source File: JdbcUtil.scala    From sundial   with MIT License 5 votes vote down vote up
package util

import java.sql.{Connection, Timestamp, ResultSet}
import java.util.Date
import scala.language.implicitConversions

object JdbcUtil {

  implicit def resultSetItr(resultSet: ResultSet): Stream[ResultSet] = {
    new Iterator[ResultSet] {
      def hasNext = resultSet.next()
      def next() = resultSet
    }.toStream
  }

  implicit def javaDate(ts: Timestamp): Date = {
    new Date(ts.getTime())
  }

  implicit def dateToTimestamp(date: Date) = {
    if (date != null)
      new Timestamp(date.getTime())
    else
      null
  }

  private def getNullable[T](rs: ResultSet, f: ResultSet => T): Option[T] = {
    val obj = f(rs)
    if (rs.wasNull()) {
      Option.empty
    } else {
      Some(obj)
    }
  }

  def getIntOption(rs: ResultSet, col: String) =
    getNullable(rs, rs => rs.getInt(col))

  def makeStringArray(seq: Seq[String])(implicit conn: Connection) = {
    conn.createArrayOf("varchar", seq.toArray[AnyRef])
  }

  def getStringArray(rs: ResultSet, col: String) = {
    Option(rs.getArray(col))
      .map(_.getArray().asInstanceOf[Array[String]].toList)
  }

} 
Example 32
Source File: JdbcShardedReadOptions.scala    From scio   with Apache License 2.0 5 votes vote down vote up
package com.spotify.scio.jdbc.sharded

import java.sql.ResultSet
import com.spotify.scio.jdbc.JdbcConnectionOptions


final case class JdbcShardedReadOptions[T, S](
  connectionOptions: JdbcConnectionOptions,
  tableName: String,
  shardColumn: String,
  shard: Shard[S],
  rowMapper: ResultSet => T,
  fetchSize: Int = JdbcShardedReadOptions.DefaultFetchSize,
  numShards: Int = JdbcShardedReadOptions.DefaultNumShards
)

object JdbcShardedReadOptions {

  val DefaultFetchSize: Int = 100000
  val UnboundedFetchSize: Int = -1
  val DefaultNumShards: Int = 4

} 
Example 33
Source File: JdbcShardedSelectTest.scala    From scio   with Apache License 2.0 5 votes vote down vote up
package com.spotify.scio.jdbc
package sharded

import java.sql.ResultSet

import com.spotify.scio._
import com.spotify.scio.io.TextIO
import com.spotify.scio.testing._

object JdbcShardedSelectJob {
  def main(cmdlineArgs: Array[String]): Unit = {
    val (opts, _) = ScioContext.parseArguments[CloudSqlOptions](cmdlineArgs)
    val sc = ScioContext(opts)
    sc.jdbcShardedSelect(getShardedReadOptions(opts))
      .map(_ + "J")
      .saveAsTextFile("output")
    sc.run()
    ()
  }

  def getShardedReadOptions(opts: CloudSqlOptions): JdbcShardedReadOptions[String, Long] =
    JdbcShardedReadOptions(
      connectionOptions = JdbcJob.getConnectionOptions(opts),
      tableName = "test_table",
      shard = Shard.range[Long],
      rowMapper = (rs: ResultSet) => rs.getString("id"),
      fetchSize = 100000,
      numShards = 8,
      shardColumn = "id"
    )
}

class JdbcShardedSelectTest extends PipelineSpec {

  it should "pass correct sharded JDBC read" in {
    val args = Array(
      "--cloudSqlUsername=john",
      "--cloudSqlPassword=secret",
      "--cloudSqlDb=mydb",
      "--cloudSqlInstanceConnectionName=project-id:zone:db-instance-name"
    )
    val (opts, _) = ScioContext.parseArguments[CloudSqlOptions](args)
    val readOpts = JdbcShardedSelectJob.getShardedReadOptions(opts)

    JobTest[JdbcShardedSelectJob.type]
      .args(args: _*)
      .input(JdbcShardedSelect(readOpts), Seq("a", "b", "c"))
      .output(TextIO("output")) { coll =>
        coll should containInAnyOrder(Seq("aJ", "bJ", "cJ"))
      }
      .run()
  }

} 
Example 34
Source File: PStatementTest.scala    From yoda-orm   with MIT License 5 votes vote down vote up
package in.norbor.yoda.orm

import java.sql.{Connection, DriverManager, ResultSet, Timestamp}

import com.typesafe.scalalogging.LazyLogging
import in.norbor.yoda.implicits.JavaSqlImprovement._
import mocks.People
import org.joda.time.DateTime
import org.scalatest.funsuite.AnyFunSuite


class PStatementTest extends AnyFunSuite {

  Class.forName("org.h2.Driver")

  private implicit val conn: Connection = DriverManager.getConnection("jdbc:h2:~/test", "sa", "")

  test("0) apply") {

    val ps = PStatement("SELECT 1")(conn)
    assert(ps !== null)

    ps.equals(null)
    ps.canEqual(null)
    ps.hashCode
    ps.toString
    ps.productPrefix
    ps.productArity
    ps.productElement(0)
    ps.productIterator
    ps.copy()
  }

  test("0) query") {

    PStatement("DROP TABLE IF EXISTS yoda_sql; CREATE TABLE yoda_sql (id INTEGER);")
      .update
  }

  test("0) update") {

    val rs = PStatement("""select 1""")
      .query

    assert(rs !== null)
  }

  test("0) queryOne with non index parameter") {

    val result = PStatement("""select ?, ?, ?, ?, ?, ?, ?, ?""")
      .setBoolean(true)
      .setInt(1)
      .setLong(1L)
      .setDouble(1)
      .setString("YO")
      .setDateTime(DateTime.now)
      .setTimestamp(new Timestamp(System.currentTimeMillis))
      .setTimestamp(null)
      .queryOne(parse)

    assert(result.head._1 === true)
  }

  test("3) queryList with parse method") {

    val peoples = PStatement("""select 1 as id, 'Peerapat' as name, now() as born;""")
      .queryList(parsePeople)

    assert(peoples.head.id === 1)
    assert(peoples.head.name === "Peerapat")
    assert(peoples.head.born.getMillis <= DateTime.now.getMillis)
  }

  test("5) batch") {

    val insert = PStatement("INSERT INTO yoda_sql VALUES(?)")
      .setInt(1)
      .addBatch()
      .setInt(2)
      .addBatch()
      .executeBatch

    assert(insert.length === 2)
  }


  private def parse(rs: ResultSet): (Boolean, Int, Long, Double, String, DateTime, Timestamp) = (rs.getBoolean(1)
    , rs.getInt(2)
    , rs.getLong(3)
    , rs.getDouble(4)
    , rs.getString(5)
    , rs.getDateTime(6)
    , rs.getTimestamp(7)
  )

  private def parsePeople(rs: ResultSet): People = People(id = rs.getLong("id")
    , name = rs.getString("name")
    , born = rs.getDateTime("born")
  )

} 
Example 35
Source File: MemsqlRDD.scala    From memsql-spark-connector   with Apache License 2.0 5 votes vote down vote up
package com.memsql.spark

import java.sql.{Connection, PreparedStatement, ResultSet}

import com.memsql.spark.SQLGen.VariableList
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.execution.datasources.jdbc.{JDBCOptions, JdbcUtils}
import org.apache.spark.sql.types._
import org.apache.spark.{InterruptibleIterator, Partition, SparkContext, TaskContext}

case class MemsqlRDD(query: String,
                     variables: VariableList,
                     options: MemsqlOptions,
                     schema: StructType,
                     expectedOutput: Seq[Attribute],
                     @transient val sc: SparkContext)
    extends RDD[Row](sc, Nil) {

  override protected def getPartitions: Array[Partition] =
    MemsqlQueryHelpers.GetPartitions(options, query, variables)

  override def compute(rawPartition: Partition, context: TaskContext): Iterator[Row] = {
    var closed                     = false
    var rs: ResultSet              = null
    var stmt: PreparedStatement    = null
    var conn: Connection           = null
    var partition: MemsqlPartition = rawPartition.asInstanceOf[MemsqlPartition]

    def tryClose(name: String, what: AutoCloseable): Unit = {
      try {
        if (what != null) { what.close() }
      } catch {
        case e: Exception => logWarning(s"Exception closing $name", e)
      }
    }

    def close(): Unit = {
      if (closed) { return }
      tryClose("resultset", rs)
      tryClose("statement", stmt)
      tryClose("connection", conn)
      closed = true
    }

    context.addTaskCompletionListener { context =>
      close()
    }

    conn = JdbcUtils.createConnectionFactory(partition.connectionInfo)()
    stmt = conn.prepareStatement(partition.query)
    JdbcHelpers.fillStatement(stmt, partition.variables)
    rs = stmt.executeQuery()

    var rowsIter = JdbcUtils.resultSetToRows(rs, schema)

    if (expectedOutput.nonEmpty) {
      val schemaDatatypes   = schema.map(_.dataType)
      val expectedDatatypes = expectedOutput.map(_.dataType)

      if (schemaDatatypes != expectedDatatypes) {
        val columnEncoders = schemaDatatypes.zip(expectedDatatypes).zipWithIndex.map {
          case ((_: StringType, _: NullType), _)     => ((_: Row) => null)
          case ((_: ShortType, _: BooleanType), i)   => ((r: Row) => r.getShort(i) != 0)
          case ((_: IntegerType, _: BooleanType), i) => ((r: Row) => r.getInt(i) != 0)
          case ((_: LongType, _: BooleanType), i)    => ((r: Row) => r.getLong(i) != 0)

          case ((l, r), i) => {
            options.assert(l == r, s"MemsqlRDD: unable to encode ${l} into ${r}")
            ((r: Row) => r.get(i))
          }
        }

        rowsIter = rowsIter
          .map(row => Row.fromSeq(columnEncoders.map(_(row))))
      }
    }

    CompletionIterator[Row, Iterator[Row]](new InterruptibleIterator[Row](context, rowsIter), close)
  }

} 
Example 36
Source File: ResultSetDataConverter.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.connector.jdbc.source

import java.sql.ResultSet

import oharastream.ohara.client.configurator.InspectApi.RdbColumn
import oharastream.ohara.connector.jdbc.datatype.RDBDataTypeConverter
import oharastream.ohara.connector.jdbc.util.ColumnInfo


  protected[source] def converterRecord(
    rdbDataTypeConverter: RDBDataTypeConverter,
    resultSet: ResultSet,
    columns: Seq[RdbColumn]
  ): Seq[ColumnInfo[_]] = {
    columns.map(column => {
      val resultValue: Any = rdbDataTypeConverter.converterValue(resultSet, column)
      // Setting data value to ColumnInfo case class
      ColumnInfo(column.name, column.dataType, resultValue)
    })
  }
} 
Example 37
Source File: QueryResultIterator.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.connector.jdbc.source

import java.sql.ResultSet

import oharastream.ohara.client.configurator.InspectApi.RdbColumn
import oharastream.ohara.connector.jdbc.datatype.RDBDataTypeConverter
import oharastream.ohara.connector.jdbc.util.ColumnInfo

class QueryResultIterator(
  rdbDataTypeConverter: RDBDataTypeConverter,
  var resultSet: ResultSet,
  columns: Seq[RdbColumn]
) extends Iterator[Seq[ColumnInfo[_]]] {
  private[this] var cache: Seq[ColumnInfo[_]] = _

  
  override def hasNext: Boolean = {
    if (cache == null && resultSet.next())
      cache = ResultSetDataConverter.converterRecord(rdbDataTypeConverter, resultSet, columns)
    cache != null
  }

  override def next(): Seq[ColumnInfo[_]] = {
    if (!hasNext) throw new NoSuchElementException("Cache no data")
    else
      try cache
      finally cache = null
  }
} 
Example 38
Source File: V3__Recompute_Key_Hash.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

// Note: package name must correspond exactly to the flyway 'locations' setting, which defaults to
// 'db.migration.postgres' for postgres migrations
package db.migration.postgres

import java.sql.{Connection, ResultSet}

import anorm.{BatchSql, NamedParameter}
import com.daml.lf.data.Ref
import com.daml.lf.transaction.Node.GlobalKey
import com.daml.lf.value.Value.ContractId
import com.daml.platform.store.serialization.{KeyHasher, ValueSerializer}
import org.flywaydb.core.api.migration.{BaseJavaMigration, Context}

class V3__Recompute_Key_Hash extends BaseJavaMigration {

  // the number of contracts proceeded in a batch.
  private val batchSize = 10 * 1000

  def migrate(context: Context): Unit = {
    implicit val conn: Connection = context.getConnection
    updateKeyHashed(loadContractKeys)
  }

  private def loadContractKeys(
      implicit connection: Connection
  ): Iterator[(ContractId, GlobalKey)] = {

    val SQL_SELECT_CONTRACT_KEYS =
      """
      |SELECT
      |  contracts.id as contract_id,
      |  contracts.package_id as package_id,
      |  contracts.name as template_name,
      |  contracts.key as contract_key
      |FROM
      |  contracts
      |WHERE
      |  contracts.key is not null
    """.stripMargin

    val rows: ResultSet = connection.createStatement().executeQuery(SQL_SELECT_CONTRACT_KEYS)

    new Iterator[(ContractId, GlobalKey)] {

      var hasNext: Boolean = rows.next()

      def next(): (ContractId, GlobalKey) = {
        val contractId = ContractId.assertFromString(rows.getString("contract_id"))
        val templateId = Ref.Identifier(
          packageId = Ref.PackageId.assertFromString(rows.getString("package_id")),
          qualifiedName = Ref.QualifiedName.assertFromString(rows.getString("template_name"))
        )
        val key = ValueSerializer
          .deserializeValue(rows.getBinaryStream("contract_key"))
          .assertNoCid(coid => s"Found contract ID $coid in contract key")

        hasNext = rows.next()
        contractId -> GlobalKey(templateId, key.value)
      }
    }

  }

  private def updateKeyHashed(contractKeys: Iterator[(ContractId, GlobalKey)])(
      implicit conn: Connection): Unit = {

    val SQL_UPDATE_CONTRACT_KEYS_HASH =
      """
        |UPDATE
        |  contract_keys
        |SET
        |  value_hash = {valueHash}
        |WHERE
        |  contract_id = {contractId}
      """.stripMargin

    val statements = contractKeys.map {
      case (cid, key) =>
        Seq[NamedParameter]("contractId" -> cid.coid, "valueHash" -> KeyHasher.hashKeyString(key))
    }

    statements.toStream.grouped(batchSize).foreach { batch =>
      BatchSql(
        SQL_UPDATE_CONTRACT_KEYS_HASH,
        batch.head,
        batch.tail: _*
      ).execute()
    }
  }

} 
Example 39
Source File: JDBCSink.scala    From BigData-News   with Apache License 2.0 5 votes vote down vote up
package com.vita.spark

import java.sql.{Connection, ResultSet, SQLException, Statement}

import org.apache.log4j.{LogManager, Logger}
import org.apache.spark.sql.{ForeachWriter, Row}

/**
  * 处理从StructuredStreaming中向mysql中写入数据
  */
class JDBCSink(url: String, username: String, password: String) extends ForeachWriter[Row] {

  var statement: Statement = _
  var resultSet: ResultSet = _
  var connection: Connection = _

  override def open(partitionId: Long, version: Long): Boolean = {
    connection = new MySqlPool(url, username, password).getJdbcConn()
    statement = connection.createStatement();
    print("open")
    return true
  }

  override def process(value: Row): Unit = {
    println("process step one")
    val titleName = value.getAs[String]("titleName").replaceAll("[\\[\\]]", "")
    val count = value.getAs[Long]("count")

    val querySql = "select 1 from webCount where titleName = '" + titleName + "'"
    val insertSql = "insert into webCount(titleName,count) values('" + titleName + "' , '" + count + "')"
    val updateSql = "update webCount set count = " + count + " where titleName = '" + titleName + "'"
    println("process step two")
    try {
      //查看连接是否成功
      var resultSet = statement.executeQuery(querySql)
      if (resultSet.next()) {
        println("updateSql")
        statement.executeUpdate(updateSql)
      } else {
        println("insertSql")
        statement.execute(insertSql)
      }

    } catch {
      case ex: SQLException => {
        println("SQLException")
      }
      case ex: Exception => {
        println("Exception")
      }
      case ex: RuntimeException => {
        println("RuntimeException")
      }
      case ex: Throwable => {
        println("Throwable")
      }
    }
  }

  override def close(errorOrNull: Throwable): Unit = {
    if (statement == null) {
      statement.close()
    }
    if (connection == null) {
      connection.close()
    }
  }
} 
Example 40
Source File: PostgresInteropTest.scala    From spark-alchemy   with Apache License 2.0 5 votes vote down vote up
package com.swoop.alchemy.spark.expressions.hll

import java.sql.{DriverManager, ResultSet, Statement}

import com.swoop.alchemy.spark.expressions.hll.functions._
import com.swoop.test_utils.SparkSessionSpec
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.scalatest.{Matchers, WordSpec}


case class Postgres(user: String, database: String, port: Int) {
  val con_str = s"jdbc:postgresql://localhost:$port/$database?user=$user"

  def execute[T](query: String, handler: ResultSet => T): T =
    execute(stm => handler(stm.executeQuery(query)))

  def update(query: String): Unit =
    execute(_.executeUpdate(query))

  def sparkRead(schema: String, table: String)(implicit spark: SparkSession): DataFrame =
    spark.read
      .format("jdbc")
      .option("url", s"jdbc:postgresql:${database}")
      .option("dbtable", s"${schema}.${table}")
      .option("user", user)
      .load()

  def sparkWrite(schema: String, table: String)(df: DataFrame): Unit =
    df.write
      .format("jdbc")
      .option("url", s"jdbc:postgresql:${database}")
      .option("dbtable", s"${schema}.${table}")
      .option("user", user)
      .save()

  private def execute[T](fn: Statement => T): T = {
    val conn = DriverManager.getConnection(con_str)
    try {
      val stm = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY)
      fn(stm)
    } finally {
      conn.close()
    }
  }
}


class PostgresInteropTest extends WordSpec with Matchers with SparkSessionSpec {

  import testImplicits._

  lazy val pg = Postgres("postgres", "postgres", 5432)

  "Postgres interop" should {
    "calculate same results" in {
      // use Aggregate Knowledge (Postgres-compatible) HLL implementation
      spark.conf.set(IMPLEMENTATION_CONFIG_KEY, "AGKN")

      // init Postgres extension for database
      pg.update("CREATE EXTENSION IF NOT EXISTS hll;")

      // create some random not-entirely distinct rows
      val rand = new scala.util.Random(42)
      val n = 100000
      val randomDF = sc.parallelize(
        Seq.fill(n) {
          (rand.nextInt(24), rand.nextInt(n))
        }
      ).toDF("hour", "id").cache

      // create hll aggregates (by hour)
      val byHourDF = randomDF.groupBy("hour").agg(hll_init_agg("id", .39).as("hll_id")).cache

      // send hlls to postgres
      pg.update("DROP TABLE IF EXISTS spark_hlls CASCADE;")
      pg.sparkWrite("public", "spark_hlls")(byHourDF)

      // convert hll column from `bytea` to `hll` type
      pg.update(
        """
          |ALTER TABLE spark_hlls
          |ALTER COLUMN hll_id TYPE hll USING CAST (hll_id AS hll);
          |""".stripMargin
      )

      // re-aggregate all hours in Spark
      val distinctSpark = byHourDF.select(hll_cardinality(hll_merge(byHourDF("hll_id")))).as[Long].first()
      // re-aggregate all hours in Postgres
      val distinctPostgres = pg.execute(
        "SELECT CAST (hll_cardinality(hll_union_agg(hll_id)) as Integer) AS approx FROM spark_hlls",
        (rs) => {
          rs.next;
          rs.getInt("approx")
        }
      )

      distinctSpark should be(distinctPostgres)
    }
  }

} 
Example 41
Source File: JdbcPrimitives.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.jdbc

import java.sql.{Connection, DriverManager, ResultSet}

import com.sksamuel.exts.Logging
import io.eels.component.jdbc.dialect.JdbcDialect
import io.eels.schema.StructType

trait JdbcPrimitives extends Logging {

  def connect(url: String): Connection = {
    logger.debug(s"Connecting to jdbc source $url...")
    val conn = DriverManager.getConnection(url)
    logger.debug(s"Connected to $url")
    conn
  }

  def schemaFor(dialect: JdbcDialect, rs: ResultSet): StructType = {
    val schema = JdbcSchemaFns.fromJdbcResultset(rs, dialect)
    logger.trace("Fetched schema:\n" + schema.show())
    schema
  }
} 
Example 42
Source File: JdbcSchemaFns.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.jdbc

import java.sql.{ResultSet, ResultSetMetaData}

import com.sksamuel.exts.Logging
import io.eels.component.jdbc.dialect.JdbcDialect
import io.eels.schema.{Field, StructType}


object JdbcSchemaFns extends Logging {

  def fromJdbcResultset(rs: ResultSet, dialect: JdbcDialect): StructType = {

    val md = rs.getMetaData
    val columnCount = md.getColumnCount
    logger.trace(s"Resultset column count is $columnCount")

    val cols = (1 to columnCount).map { k =>
      Field(
          name = md.getColumnLabel(k),
          dataType = dialect.fromJdbcType(k, md),
          nullable = md.isNullable(k) == ResultSetMetaData.columnNullable
      )
    }

    StructType(cols.toList)
  }
} 
Example 43
Source File: JdbcTable.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.jdbc

import java.sql.{Connection, DatabaseMetaData, ResultSet}

import com.sksamuel.exts.Logging
import com.sksamuel.exts.io.Using
import com.sksamuel.exts.jdbc.ResultSetIterator
import io.eels.component.jdbc.dialect.{GenericJdbcDialect, JdbcDialect}
import io.eels.schema.{Field, StructType}

case class JdbcTable(tableName: String,
                     dialect: JdbcDialect = new GenericJdbcDialect,
                     catalog: Option[String] = None,
                     dbSchema: Option[String] = None)
                    (implicit conn: Connection) extends Logging with JdbcPrimitives with Using {

  private val dbPrefix: String = if (dbSchema.nonEmpty) dbSchema.get + "." else ""
  private val databaseMetaData: DatabaseMetaData = conn.getMetaData
  private val tables = RsIterator(databaseMetaData.getTables(catalog.orNull, dbSchema.orNull, null, Array("TABLE", "VIEW")))
    .map(_.getString("TABLE_NAME"))

  val candidateTableName: String = tables.find(_.toLowerCase == tableName.toLowerCase).getOrElse(sys.error(s"$tableName not found!"))
  val primaryKeys: Seq[String] = RsIterator(databaseMetaData.getPrimaryKeys(catalog.orNull, dbSchema.orNull, candidateTableName))
    .map(_.getString("COLUMN_NAME")).toSeq

  val schema = StructType(
    JdbcSchemaFns
      .fromJdbcResultset(conn.createStatement().executeQuery(s"SELECT * FROM $dbPrefix$candidateTableName WHERE 1=0"), dialect)
      .fields
      .map { f =>
        Field(name = f.name,
          dataType = f.dataType,
          nullable = f.nullable,
          key = primaryKeys.contains(f.name),
          metadata = f.metadata)
      }
  )

  private case class RsIterator(rs: ResultSet) extends Iterator[ResultSet] {
    def hasNext: Boolean = rs.next()

    def next(): ResultSet = rs
  }

} 
Example 44
Source File: package.scala    From maha   with Apache License 2.0 5 votes vote down vote up
// Copyright 2017, Yahoo Holdings Inc.
// Licensed under the terms of the Apache License 2.0. Please see LICENSE file in project root for terms.


package com.yahoo.maha

import java.sql.ResultSet

package object jdbc {

  type Seq[+A] = scala.collection.immutable.Seq[A]
  val Seq = scala.collection.immutable.Seq
  type List[+A] = scala.collection.immutable.List[A]
  val List = scala.collection.immutable.List

  implicit class RowData(rs: ResultSet) {
    def apply(columnNumber: Int): Any = rs.getObject(columnNumber)
    def apply(columnName: String): Any = rs.getObject(columnName)
    def toIterator[E](rowMapper: ResultSet => E): Iterator[E] = new Iterator[E] {
      override def hasNext: Boolean = rs.next()
      override def next(): E = rowMapper(rs)
    }
  }

} 
Example 45
Source File: ThriftServerTest.scala    From Hive-JDBC-Proxy   with Apache License 2.0 5 votes vote down vote up
package com.enjoyyin.hive.proxy.jdbc.test

import java.sql.{Connection, DriverManager, ResultSet, Statement}

import com.enjoyyin.hive.proxy.jdbc.util.Utils


private object ThriftServerTest extends App {
  val sql = """show tables"""
  val test_url = "jdbc:hive2://localhost:10001/default"
  Class.forName("org.apache.hive.jdbc.HiveDriver")
  def test(index: Int) = {
    var conn: Connection = null
    var stmt: Statement = null
    var rs: ResultSet = null
    Utils.tryFinally {
      conn = DriverManager.getConnection(test_url, "hduser0009", "")
      stmt = conn.createStatement
      rs = stmt.executeQuery(sql)
      while(rs.next) {
        println ("Date: " + Utils.dateFormat(System.currentTimeMillis) + ", " + index + ".tables => " + rs.getObject(1))
      }
      println("Date: " + Utils.dateFormat(System.currentTimeMillis) + ", ready to close " + index)
    } {
      if(rs != null) Utils.tryIgnoreError(rs.close())
      if(stmt != null) Utils.tryIgnoreError(stmt.close())
      if(conn != null) Utils.tryIgnoreError(conn.close())
    }
  }
  (0 until 8).foreach(i => new Thread {
    setName("thread-" + i)
    override def run(): Unit = {
      Utils.tryCatch(test(i)) { t =>
        println("Date: " + Utils.dateFormat(System.currentTimeMillis) + ", " + i + " has occur an error.")
        t.printStackTrace()
      }
    }
  }.start())
} 
Example 46
Source File: ConnectionPool.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe.jdbc

import java.sql.{Connection, PreparedStatement, ResultSet}

import wvlet.log.LogSupport
import wvlet.log.io.IOUtil.withResource

object ConnectionPool {
  def apply(config: DbConfig): ConnectionPool = {
    val pool: ConnectionPool = config.`type` match {
      case "sqlite" => new SQLiteConnectionPool(config)
      case other =>
        new GenericConnectionPool(config)
    }
    pool
  }

  def newFactory: ConnectionPoolFactory = new ConnectionPoolFactory()
}

trait ConnectionPool extends LogSupport with AutoCloseable {
  def config: DbConfig

  def withConnection[U](body: Connection => U): U
  def withTransaction[U](body: Connection => U): U = {
    withConnection { conn =>
      conn.setAutoCommit(false)
      var failed = false
      try {
        body(conn)
      } catch {
        case e: Throwable =>
          // Need to set the failed flag first because the rollback might fail
          failed = true
          conn.rollback()
          throw e
      } finally {
        if (failed == false) {
          conn.commit()
        }
      }
    }
  }

  def stop: Unit

  override def close(): Unit = stop

  def executeQuery[U](sql: String)(handler: ResultSet => U): U = {
    withConnection { conn =>
      withResource(conn.createStatement()) { stmt =>
        debug(s"execute query: ${sql}")
        withResource(stmt.executeQuery(sql)) { rs => handler(rs) }
      }
    }
  }
  def executeUpdate(sql: String): Int = {
    // TODO Add update retry
    withConnection { conn =>
      withResource(conn.createStatement()) { stmt =>
        debug(s"execute update: ${sql}")
        stmt.executeUpdate(sql)
      }
    }
  }

  def queryWith[U](preparedStatement: String)(body: PreparedStatement => Unit)(handler: ResultSet => U): U = {
    withConnection { conn =>
      withResource(conn.prepareStatement(preparedStatement)) { stmt =>
        body(stmt)
        debug(s"execute query: ${preparedStatement}")
        withResource(stmt.executeQuery) { rs => handler(rs) }
      }
    }
  }

  def updateWith(preparedStatement: String)(body: PreparedStatement => Unit): Unit = {
    withConnection { conn =>
      withResource(conn.prepareStatement(preparedStatement)) { stmt =>
        body(stmt)
        stmt.executeUpdate()
      }
    }
  }

} 
Example 47
Source File: HttpRecord.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe.http.recorder
import java.sql.{Connection, ResultSet}
import java.time.Instant

import com.twitter.finagle.http.{Response, Status, Version}
import com.twitter.io.Buf
import wvlet.airframe.codec._
import wvlet.airframe.control.Control.withResource
import wvlet.airframe.http.recorder.HttpRecord.headerCodec
import wvlet.log.LogSupport


case class HttpRecord(
    session: String,
    requestHash: Int,
    method: String,
    destHost: String,
    path: String,
    requestHeader: Seq[(String, String)],
    requestBody: String,
    responseCode: Int,
    responseHeader: Seq[(String, String)],
    responseBody: String,
    createdAt: Instant
) {
  def summary: String = {
    s"${method}(${responseCode}) ${destHost}${path}: ${responseBody.substring(0, 30.min(responseBody.size))} ..."
  }

  def toResponse: Response = {
    val r = Response(Version.Http11, Status.fromCode(responseCode))

    responseHeader.foreach { x => r.headerMap.set(x._1, x._2) }

    // Decode binary contents with Base64
    val contentBytes = HttpRecordStore.decodeFromBase64(responseBody)
    r.content = Buf.ByteArray.Owned(contentBytes)
    r.contentLength = contentBytes.length
    r
  }

  def insertInto(tableName: String, conn: Connection): Unit = {
    withResource(conn.prepareStatement(s"""|insert into "${tableName}" values(
          |?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
          |)
      """.stripMargin)) { prep =>
      // TODO Implement this logic in JDBCResultSetCodec
      prep.setString(1, session)
      prep.setInt(2, requestHash)
      prep.setString(3, method)
      prep.setString(4, destHost)
      prep.setString(5, path)
      prep.setString(6, JSONCodec.toJson(headerCodec.toMsgPack(requestHeader)))
      prep.setString(7, requestBody)
      prep.setInt(8, responseCode)
      prep.setString(9, JSONCodec.toJson(headerCodec.toMsgPack(responseHeader)))
      prep.setString(10, responseBody)
      prep.setString(11, createdAt.toString)

      prep.execute()
    }
  }
}

object HttpRecord extends LogSupport {
  private[recorder] val headerCodec                               = MessageCodec.of[Seq[(String, String)]]
  private[recorder] val recordCodec                               = MessageCodec.of[HttpRecord]
  private[recorder] def createTableSQL(tableName: String): String =
    // TODO: Add a method to generate this SQL statement in airframe-codec
    s"""create table if not exists "${tableName}" (
       |  session string,
       |  requestHash string,
       |  method string,
       |  destHost string,
       |  path string,
       |  requestHeader string,
       |  requestBody string,
       |  responseCode int,
       |  responseHeader string,
       |  responseBody string,
       |  createdAt string
       |)
     """.stripMargin

  private[recorder] def read(rs: ResultSet): Seq[HttpRecord] = {
    val resultSetCodec = JDBCCodec(rs)
    resultSetCodec
      .mapMsgPackMapRows(msgpack => recordCodec.unpackBytes(msgpack))
      .filter(_.isDefined)
      .map(_.get)
      .toSeq
  }
} 
Example 48
Source File: Application.scala    From spring-scala-examples   with Apache License 2.0 5 votes vote down vote up
package hello

import java.sql.ResultSet

import org.slf4j.Logger
import org.slf4j.LoggerFactory
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.boot.CommandLineRunner
import org.springframework.boot.SpringApplication
import org.springframework.boot.autoconfigure.SpringBootApplication
import org.springframework.jdbc.core.{JdbcTemplate, RowMapper}

import collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ListBuffer

@SpringBootApplication
class Application extends CommandLineRunner {

  @Autowired
  var jdbcTemplate: JdbcTemplate = _


  val log: Logger = LoggerFactory.getLogger(classOf[Application])

  override def run(args: String*): Unit = {
    log.info("Creating tables")
    jdbcTemplate.execute("DROP TABLE customers IF EXISTS")
    jdbcTemplate.execute("CREATE TABLE customers(" +
      "id SERIAL, first_name VARCHAR(255), last_name VARCHAR(255))")

    val splitUpNames = ListBuffer("John Woo", "Jeff Dean", "Josh Bloch", "Josh Long").map(_.split(" "))
    splitUpNames.foreach(name => log.info("Inserting customer record for %s %s".format(name(0), name(1))))

    jdbcTemplate.batchUpdate("INSERT INTO customers(first_name, last_name) VALUES (?,?)", splitUpNames.asInstanceOf[mutable.Buffer[Array[AnyRef]]].asJava)

    log.info("Querying for customer records where first_name = 'Josh':")
    jdbcTemplate.query(
      "SELECT id, first_name, last_name FROM customers WHERE first_name = ?",
      Array("Josh").asInstanceOf[Array[AnyRef]],
      // no Java 8 Lambda support in Scala pre 2.12
      new RowMapper[Customer]{
        override def mapRow(rs: ResultSet, rowNum: Int): Customer = new Customer(rs.getLong("id"), rs.getString("first_name"), rs.getString("last_name"))
      })
      // Works in Scala 2.12
      // (rs: ResultSet, rowNum: Int) => new Customer(rs.getLong("id"), rs.getString("first_name"), rs.getString("last_name"))    )
      .asScala.foreach((customer:Customer) => log.info(customer.toString))
  }
}

object Application extends App {
  SpringApplication.run(classOf[Application], args:_*)
} 
Example 49
Source File: Database.scala    From schedoscope   with Apache License 2.0 5 votes vote down vote up
package org.schedoscope.test

import java.sql.{Connection, ResultSet, Statement}

import org.schedoscope.dsl.{FieldLike, View}
import org.schedoscope.schema.ddl.HiveQl

import scala.collection.mutable.{HashMap, ListBuffer}

class Database(conn: Connection, url: String) {

  def selectForViewByQuery(v: View, query: String, orderByField: Option[FieldLike[_]]): List[Map[String, Any]] = {
    val res = ListBuffer[Map[String, Any]]()
    var statement: Statement = null
    var rs: ResultSet = null

    try {
      statement = conn.createStatement()
      rs = statement.executeQuery(query)

      while (rs.next()) {
        val row = HashMap[String, Any]()
        v.fields.view.zipWithIndex.foreach(f => {
          row.put(f._1.n, ViewSerDe.deserializeField(f._1.t, rs.getString(f._2 + 1)))
        })
        res.append(row.toMap)
      }
    }
    finally {
      if (rs != null) try {
        rs.close()
      } catch {
        case _: Throwable =>
      }

      if (statement != null) try {
        statement.close()
      } catch {
        case _: Throwable =>
      }
    }

    orderByField match {
      case Some(f) => res.sortBy {
        _ (f.n) match {
          case null => ""
          case other => other.toString
        }
      } toList
      case None => res.toList
    }
  }

  def selectView(v: View, orderByField: Option[FieldLike[_]]): List[Map[String, Any]] =
    selectForViewByQuery(v, HiveQl.selectAll(v), orderByField)

} 
Example 50
Source File: BaseThriftIntegrationTestSuite.scala    From incubator-livy   with Apache License 2.0 5 votes vote down vote up
package org.apache.livy.test.framework

import java.sql.{Connection, DriverManager, ResultSet}

class BaseThriftIntegrationTestSuite extends BaseIntegrationTestSuite {
  private var jdbcUri: String = _

  override def beforeAll(): Unit = {
    cluster = Cluster.get()
    // The JDBC endpoint must contain a valid value
    assert(cluster.jdbcEndpoint.isDefined)
    jdbcUri = cluster.jdbcEndpoint.get
  }

  def checkQuery(connection: Connection, query: String)(validate: ResultSet => Unit): Unit = {
    val ps = connection.prepareStatement(query)
    try {
      val rs = ps.executeQuery()
      try {
        validate(rs)
      } finally {
        rs.close()
      }
    } finally {
      ps.close()
    }
  }

  def withConnection[T](f: Connection => T): T = {
    val connection = DriverManager.getConnection(jdbcUri)
    try {
      f(connection)
    } finally {
      connection.close()
    }
  }
} 
Example 51
Source File: TimeBasedDataService.scala    From kafka-jdbc-connector   with Apache License 2.0 5 votes vote down vote up
package com.agoda.kafka.connector.jdbc.services

import java.sql.{Connection, PreparedStatement, ResultSet, Timestamp}
import java.util.{Date, GregorianCalendar, TimeZone}

import com.agoda.kafka.connector.jdbc.JdbcSourceConnectorConstants
import com.agoda.kafka.connector.jdbc.models.DatabaseProduct
import com.agoda.kafka.connector.jdbc.models.DatabaseProduct.{MsSQL, MySQL}
import com.agoda.kafka.connector.jdbc.models.Mode.TimestampMode
import com.agoda.kafka.connector.jdbc.utils.DataConverter
import org.apache.kafka.connect.data.Schema
import org.apache.kafka.connect.source.SourceRecord

import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer
import scala.util.Try


case class TimeBasedDataService(databaseProduct: DatabaseProduct,
                                storedProcedureName: String,
                                batchSize: Int,
                                batchSizeVariableName: String,
                                timestampVariableName: String,
                                var timestampOffset: Long,
                                timestampFieldName: String,
                                topic: String,
                                keyFieldOpt: Option[String],
                                dataConverter: DataConverter,
                                calendar: GregorianCalendar = new GregorianCalendar(TimeZone.getTimeZone("UTC"))
                               ) extends DataService {

  override def createPreparedStatement(connection: Connection): Try[PreparedStatement] = Try {
    val preparedStatement = databaseProduct match {
      case MsSQL => connection.prepareStatement(s"EXECUTE $storedProcedureName @$timestampVariableName = ?, @$batchSizeVariableName = ?")
      case MySQL => connection.prepareStatement(s"CALL $storedProcedureName (@$timestampVariableName := ?, @$batchSizeVariableName := ?)")
    }
    preparedStatement.setTimestamp(1, new Timestamp(timestampOffset), calendar)
    preparedStatement.setObject(2, batchSize)
    preparedStatement
  }

  override def extractRecords(resultSet: ResultSet, schema: Schema): Try[Seq[SourceRecord]] = Try {
    val sourceRecords = ListBuffer.empty[SourceRecord]
    var max = timestampOffset
    while (resultSet.next()) {
      dataConverter.convertRecord(schema, resultSet) map { record =>
        val time = record.get(timestampFieldName).asInstanceOf[Date].getTime
        max = if(time > max) {
          keyFieldOpt match {
            case Some(keyField) =>
              sourceRecords += new SourceRecord(
                Map(JdbcSourceConnectorConstants.STORED_PROCEDURE_NAME_KEY -> storedProcedureName).asJava,
                Map(TimestampMode.entryName -> time).asJava, topic, null, schema, record.get(keyField), schema, record
              )
            case None           =>
              sourceRecords += new SourceRecord(
                Map(JdbcSourceConnectorConstants.STORED_PROCEDURE_NAME_KEY -> storedProcedureName).asJava,
                Map(TimestampMode.entryName -> time).asJava, topic, schema, record
              )
          }
          time
        } else max
      }
    }
    timestampOffset = max
    sourceRecords
  }

  override def toString: String = {
    s"""
       |{
       |   "name" : "${this.getClass.getSimpleName}"
       |   "mode" : "${TimestampMode.entryName}"
       |   "stored-procedure.name" : "$storedProcedureName"
       |}
    """.stripMargin
  }
} 
Example 52
Source File: RDBDataTypeConverter.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.connector.jdbc.datatype

import java.sql.{Date, ResultSet, Time, Timestamp}
import java.util.Optional

import oharastream.ohara.client.configurator.InspectApi.RdbColumn
import oharastream.ohara.connector.jdbc.util.DateTimeUtils

trait RDBDataTypeConverter {
  
  def converterValue(resultSet: ResultSet, column: RdbColumn): Any = {
    val columnName             = column.name
    val typeName               = column.dataType.toUpperCase
    val dataType: DataTypeEnum = converterDataType(column)
    dataType match {
      case DataTypeEnum.INTEGER =>
        java.lang.Integer.valueOf(resultSet.getInt(columnName))
      case DataTypeEnum.LONG =>
        java.lang.Long.valueOf(resultSet.getLong(columnName))
      case DataTypeEnum.BOOLEAN =>
        java.lang.Boolean.valueOf(resultSet.getBoolean(columnName))
      case DataTypeEnum.FLOAT =>
        java.lang.Float.valueOf(resultSet.getFloat(columnName))
      case DataTypeEnum.DOUBLE =>
        java.lang.Double.valueOf(resultSet.getDouble(columnName))
      case DataTypeEnum.BIGDECIMAL =>
        Optional.ofNullable(resultSet.getBigDecimal(columnName)).orElseGet(() => new java.math.BigDecimal(0L))
      case DataTypeEnum.STRING =>
        Optional.ofNullable(resultSet.getString(columnName)).orElseGet(() => "null")
      case DataTypeEnum.DATE =>
        Optional.ofNullable(resultSet.getDate(columnName, DateTimeUtils.CALENDAR)).orElseGet(() => new Date(0))
      case DataTypeEnum.TIME =>
        Optional.ofNullable(resultSet.getTime(columnName, DateTimeUtils.CALENDAR)).orElseGet(() => new Time(0))
      case DataTypeEnum.TIMESTAMP =>
        Optional
          .ofNullable(resultSet.getTimestamp(columnName, DateTimeUtils.CALENDAR))
          .orElseGet(() => new Timestamp(0))
      case DataTypeEnum.BYTES =>
        Optional.ofNullable(resultSet.getBytes(columnName)).orElseGet(() => Array())
      case _ =>
        throw new UnsupportedOperationException(
          s"JDBC Source Connector not support ${typeName} data type in ${columnName} column for ${dataBaseProductName} implement."
        )
    }
  }
  protected[datatype] def dataBaseProductName: String

  protected[datatype] def converterDataType(column: RdbColumn): DataTypeEnum
} 
Example 53
Source File: PrepareMonixJdbcSpecBase.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill

import java.sql.{ Connection, PreparedStatement, ResultSet }

import io.getquill.context.jdbc.ResultSetExtractor
import io.getquill.context.sql.ProductSpec
import monix.eval.Task
import org.scalactic.Equality

trait PrepareMonixJdbcSpecBase extends ProductSpec {

  implicit val productEq = new Equality[Product] {
    override def areEqual(a: Product, b: Any): Boolean = b match {
      case Product(_, desc, sku) => desc == a.description && sku == a.sku
      case _                     => false
    }
  }

  def productExtractor: ResultSet => Product

  def withOrderedIds(products: List[Product]) =
    products.zipWithIndex.map { case (product, id) => product.copy(id = id.toLong + 1) }

  def singleInsert(conn: => Connection)(prep: Connection => Task[PreparedStatement]) = {
    Task(conn).bracket { conn =>
      prep(conn).bracket { stmt =>
        Task(stmt.execute())
      }(stmt => Task(stmt.close()))
    }(conn => Task(conn.close()))
  }

  def batchInsert(conn: => Connection)(prep: Connection => Task[List[PreparedStatement]]) = {
    Task(conn).bracket { conn =>
      prep(conn).flatMap(stmts =>
        Task.sequence(
          stmts.map(stmt =>
            Task(stmt).bracket { stmt =>
              Task(stmt.execute())
            }(stmt => Task(stmt.close())))
        ))
    }(conn => Task(conn.close()))
  }

  def extractResults[T](conn: => Connection)(prep: Connection => Task[PreparedStatement])(extractor: ResultSet => T) = {
    Task(conn).bracket { conn =>
      prep(conn).bracket { stmt =>
        Task(stmt.executeQuery()).bracket { rs =>
          Task(ResultSetExtractor(rs, extractor))
        }(rs => Task(rs.close()))
      }(stmt => Task(stmt.close()))
    }(conn => Task(conn.close()))
  }

  def extractProducts(conn: => Connection)(prep: Connection => Task[PreparedStatement]) =
    extractResults(conn)(prep)(productExtractor)
} 
Example 54
Source File: StreamResultsOrBlowUpSpec.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.integration

import java.sql.{ Connection, ResultSet }

import io.getquill._
import io.getquill.context.monix.Runner
import monix.execution.Scheduler
import monix.execution.schedulers.CanBlock
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.duration.Duration


class StreamResultsOrBlowUpSpec extends Spec {

  case class Person(name: String, age: Int)

  private implicit val scheduler = Scheduler.io()

  // set to true in order to create a ResultSet type (i.e. a rewindable one)
  // that will force jdbc to load the entire ResultSet into memory and crash this test.
  val doBlowUp = false

  val ctx = new PostgresMonixJdbcContext(Literal, "testPostgresDB", Runner.default) {
    override protected def prepareStatementForStreaming(sql: String, conn: Connection, fetchSize: Option[Int]) = {
      val stmt =
        conn.prepareStatement(
          sql,
          if (doBlowUp) ResultSet.TYPE_SCROLL_SENSITIVE
          else ResultSet.TYPE_FORWARD_ONLY,
          ResultSet.CONCUR_READ_ONLY
        )
      fetchSize.foreach(stmt.setFetchSize(_))
      stmt
    }
  }
  import ctx.{ run => runQuill, _ }

  val numRows = 1000000L

  "stream a large result set without blowing up" in {
    val deletes = runQuill { query[Person].delete }
    deletes.runSyncUnsafe(Duration.Inf)(scheduler, CanBlock.permit)

    val inserts = quote {
      (numRows: Long) =>
        infix"""insert into person (name, age) select md5(random()::text), random()*10+1 from generate_series(1, ${numRows}) s(i)""".as[Insert[Int]]
    }

    runQuill(inserts(lift(numRows))).runSyncUnsafe(Duration.Inf)(scheduler, CanBlock.permit)

    // not sure why but foreachL causes a OutOfMemory exception anyhow, and firstL causes a ResultSet Closed exception
    val result = stream(query[Person], 100)
      .zipWithIndex
      .foldLeftL(0L)({
        case (totalYears, (person, index)) => {
          // Need to print something out as we stream or travis will thing the build is stalled and kill it with the following message:
          // "No output has been received in the last 10m0s..."
          if (index % 10000 == 0) println(s"Streaming Test Row: ${index}")
          totalYears + person.age
        }
      })
      .runSyncUnsafe(Duration.Inf)(scheduler, CanBlock.permit)
    result should be > numRows
  }
} 
Example 55
Source File: DefaultJdbcSchemaReader.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.codegen.jdbc.gen

import java.sql.{ Connection, ResultSet }

import io.getquill.codegen.jdbc.DatabaseTypes.{ DatabaseType, Oracle }
import io.getquill.codegen.jdbc.model.JdbcTypes.{ JdbcConnectionMaker, JdbcSchemaReader }
import io.getquill.codegen.model.{ JdbcColumnMeta, JdbcTableMeta, RawSchema }
import io.getquill.codegen.util.StringUtil._
import io.getquill.util.Using
import scala.util.{ Success, Failure }

import scala.annotation.tailrec
import scala.collection.immutable.List

class DefaultJdbcSchemaReader(
  databaseType: DatabaseType
) extends JdbcSchemaReader {

  @tailrec
  private def resultSetExtractor[T](rs: ResultSet, extractor: (ResultSet) => T, acc: List[T] = List()): List[T] = {
    if (!rs.next())
      acc.reverse
    else
      resultSetExtractor(rs, extractor, extractor(rs) :: acc)
  }

  private[getquill] def schemaPattern(schema: String) =
    databaseType match {
      case Oracle => schema // Oracle meta fetch takes minutes to hours if schema is not specified
      case _      => null
    }

  def jdbcEntityFilter(ts: JdbcTableMeta) =
    ts.tableType.existsInSetNocase("table", "view", "user table", "user view", "base table")

  private[getquill] def extractTables(connectionMaker: () => Connection): List[JdbcTableMeta] = {
    val output = Using.Manager { use =>
      val conn = use(connectionMaker())
      val schema = conn.getSchema
      val rs = use {
        conn.getMetaData.getTables(
          null,
          schemaPattern(schema),
          null,
          null
        )
      }
      resultSetExtractor(rs, rs => JdbcTableMeta.fromResultSet(rs))
    }
    val unfilteredJdbcEntities =
      output match {
        case Success(value) => value
        case Failure(e)     => throw e
      }

    unfilteredJdbcEntities.filter(jdbcEntityFilter(_))
  }

  private[getquill] def extractColumns(connectionMaker: () => Connection): List[JdbcColumnMeta] = {
    val output = Using.Manager { use =>
      val conn = use(connectionMaker())
      val schema = conn.getSchema
      val rs = use {
        conn.getMetaData.getColumns(
          null,
          schemaPattern(schema),
          null,
          null
        )
      }
      resultSetExtractor(rs, rs => JdbcColumnMeta.fromResultSet(rs))
    }
    output match {
      case Success(value) => value
      case Failure(e)     => throw e
    }
  }

  override def apply(connectionMaker: JdbcConnectionMaker): Seq[RawSchema[JdbcTableMeta, JdbcColumnMeta]] = {
    val tableMap =
      extractTables(connectionMaker)
        .map(t => ((t.tableCat, t.tableSchem, t.tableName), t))
        .toMap

    val columns = extractColumns(connectionMaker)
    val tableColumns =
      columns
        .groupBy(c => (c.tableCat, c.tableSchem, c.tableName))
        .map({ case (tup, cols) => tableMap.get(tup).map(RawSchema(_, cols)) })
        .collect({ case Some(tbl) => tbl })

    tableColumns.toSeq
  }
} 
Example 56
Source File: H2Utils.scala    From morpheus   with Apache License 2.0 5 votes vote down vote up
package org.opencypher.morpheus.testing.utils

import java.sql.{Connection, DriverManager, ResultSet, Statement}

import org.apache.spark.sql._
import org.opencypher.morpheus.api.io.sql.SqlDataSourceConfig

object H2Utils {

  implicit class ConnOps(conn: Connection) {
    def run[T](code: Statement => T): T = {
      val stmt = conn.createStatement()
      try { code(stmt) } finally { stmt.close() }
    }
    def execute(sql: String): Boolean = conn.run(_.execute(sql))
    def query(sql: String): ResultSet = conn.run(_.executeQuery(sql))
    def update(sql: String): Int = conn.run(_.executeUpdate(sql))
  }

  def withConnection[T](cfg: SqlDataSourceConfig.Jdbc)(code: Connection => T): T = {
    Class.forName(cfg.driver)
    val conn = (cfg.options.get("user"), cfg.options.get("password")) match {
      case (Some(user), Some(pass)) =>
        DriverManager.getConnection(cfg.url, user, pass)
      case _ =>
        DriverManager.getConnection(cfg.url)
    }
    try { code(conn) } finally { conn.close() }
  }

  implicit class DataFrameWriterOps(write: DataFrameWriter[Row]) {
    def maybeOption(key: String, value: Option[String]): DataFrameWriter[Row] =
      value.fold(write)(write.option(key, _))
  }

  implicit class DataFrameSqlOps(df: DataFrame) {

    def saveAsSqlTable(cfg: SqlDataSourceConfig.Jdbc, tableName: String): Unit =
      df.write
        .mode(SaveMode.Overwrite)
        .format("jdbc")
        .option("url", cfg.url)
        .option("driver", cfg.driver)
        .options(cfg.options)
        .option("dbtable", tableName)
        .save()
  }
} 
Example 57
Source File: DataServiceTest.scala    From kafka-jdbc-connector   with Apache License 2.0 5 votes vote down vote up
package com.agoda.kafka.connector.jdbc.services

import java.sql.{Connection, PreparedStatement, ResultSet, ResultSetMetaData}

import com.agoda.kafka.connector.jdbc.utils.DataConverter
import org.apache.kafka.connect.data.Schema
import org.apache.kafka.connect.source.SourceRecord
import org.scalatest.mockito.MockitoSugar
import org.mockito.Mockito._
import org.scalatest.{Matchers, WordSpec}

import scala.concurrent.duration._
import scala.util.Success

class DataServiceTest extends WordSpec with Matchers with MockitoSugar {

  "Data Service" should {

    val spName = "stored-procedure"
    val connection = mock[Connection]
    val converter = mock[DataConverter]
    val sourceRecord1 = mock[SourceRecord]
    val sourceRecord2 = mock[SourceRecord]
    val resultSet = mock[ResultSet]
    val resultSetMetadata = mock[ResultSetMetaData]
    val preparedStatement = mock[PreparedStatement]
    val schema = mock[Schema]

    val dataService = new DataService {

      override def storedProcedureName: String = spName

      override protected def createPreparedStatement(connection: Connection) = Success(preparedStatement)

      override protected def extractRecords(resultSet: ResultSet, schema: Schema) = Success(Seq(sourceRecord1, sourceRecord2))

      override def dataConverter: DataConverter = converter
    }

    "get records" in {
      doNothing().when(preparedStatement).setQueryTimeout(1)
      when(preparedStatement.executeQuery).thenReturn(resultSet)
      when(resultSet.getMetaData).thenReturn(resultSetMetadata)
      when(converter.convertSchema(spName, resultSetMetadata)).thenReturn(Success(schema))

      dataService.getRecords(connection, 1.second) shouldBe Success(Seq(sourceRecord1, sourceRecord2))

      verify(preparedStatement).setQueryTimeout(1)
      verify(preparedStatement).executeQuery
      verify(resultSet).getMetaData
      verify(converter).convertSchema(spName, resultSetMetadata)
    }
  }
} 
Example 58
Source File: DataService.scala    From kafka-jdbc-connector   with Apache License 2.0 5 votes vote down vote up
package com.agoda.kafka.connector.jdbc.services

import java.sql.{Connection, PreparedStatement, ResultSet}

import com.agoda.kafka.connector.jdbc.utils.DataConverter
import org.apache.kafka.connect.data.Schema
import org.apache.kafka.connect.source.SourceRecord

import scala.concurrent.duration.Duration
import scala.util.Try

trait DataService {


  def getRecords(connection: Connection, timeout: Duration): Try[Seq[SourceRecord]] = {
    for {
      preparedStatement <- createPreparedStatement(connection)
      resultSet         <- executeStoredProcedure(preparedStatement, timeout)
      schema            <- dataConverter.convertSchema(storedProcedureName, resultSet.getMetaData)
      records           <- extractRecords(resultSet, schema)
    } yield records
  }

  protected def createPreparedStatement(connection: Connection): Try[PreparedStatement]

  protected def extractRecords(resultSet: ResultSet, schema: Schema): Try[Seq[SourceRecord]]

  private def executeStoredProcedure(preparedStatement: PreparedStatement, timeout: Duration): Try[ResultSet] = Try {
    preparedStatement.setQueryTimeout(timeout.toSeconds.toInt)
    preparedStatement.executeQuery
  }
} 
Example 59
Source File: PrepareJdbcSpec.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.postgres

import java.sql.ResultSet

import io.getquill.PrepareMonixJdbcSpecBase
import monix.execution.Scheduler
import org.scalatest.BeforeAndAfter

class PrepareJdbcSpec extends PrepareMonixJdbcSpecBase with BeforeAndAfter {

  val context = testContext
  import testContext._
  implicit val scheduler = Scheduler.global

  before {
    testContext.run(query[Product].delete).runSyncUnsafe()
  }

  def productExtractor = (rs: ResultSet) => materializeQueryMeta[Product].extract(rs)
  val prepareQuery = prepare(query[Product])

  "single" in {
    val prepareInsert = prepare(query[Product].insert(lift(productEntries.head)))
    singleInsert(dataSource.getConnection)(prepareInsert).runSyncUnsafe() mustEqual false
    extractProducts(dataSource.getConnection)(prepareQuery).runSyncUnsafe() === List(productEntries.head)
  }

  "batch" in {
    val prepareBatchInsert = prepare(
      liftQuery(withOrderedIds(productEntries)).foreach(p => query[Product].insert(p))
    )

    batchInsert(dataSource.getConnection)(prepareBatchInsert).runSyncUnsafe().distinct mustEqual List(false)
    extractProducts(dataSource.getConnection)(prepareQuery).runSyncUnsafe() === withOrderedIds(productEntries)
  }
} 
Example 60
Source File: JdbcConnectorTest.scala    From bandar-log   with Apache License 2.0 5 votes vote down vote up
package com.aol.one.dwh.bandarlog.connectors

import java.sql.{Connection, DatabaseMetaData, ResultSet, Statement}

import com.aol.one.dwh.infra.config._
import com.aol.one.dwh.infra.sql.pool.HikariConnectionPool
import com.aol.one.dwh.infra.sql.{ListStringResultHandler, Setting, VerticaMaxValuesQuery}
import org.apache.commons.dbutils.ResultSetHandler
import org.mockito.Mockito.when
import org.scalatest.FunSuite
import org.scalatest.mock.MockitoSugar

class JdbcConnectorTest extends FunSuite with MockitoSugar {

  private val statement = mock[Statement]
  private val resultSet = mock[ResultSet]
  private val connectionPool = mock[HikariConnectionPool]
  private val connection = mock[Connection]
  private val databaseMetaData = mock[DatabaseMetaData]
  private val resultSetHandler = mock[ResultSetHandler[Long]]
  private val listStringResultHandler = mock[ListStringResultHandler]

  test("check run query result for numeric batch_id column") {
    val resultValue = 100L
    val table = Table("table", List("column"), None)
    val query = VerticaMaxValuesQuery(table)
    when(connectionPool.getConnection).thenReturn(connection)
    when(connectionPool.getName).thenReturn("connection_pool_name")
    when(connection.createStatement()).thenReturn(statement)
    when(statement.executeQuery("SELECT MAX(column) AS column FROM table")).thenReturn(resultSet)
    when(connection.getMetaData).thenReturn(databaseMetaData)
    when(databaseMetaData.getURL).thenReturn("connection_url")
    when(resultSetHandler.handle(resultSet)).thenReturn(resultValue)

    val result = new DefaultJdbcConnector(connectionPool).runQuery(query, resultSetHandler)

    assert(result == resultValue)
  }

  test("check run query result for date/time partitions") {
    val resultValue = Some(20190924L)
    val table = Table("table", List("year", "month", "day"), Some(List("yyyy", "MM", "dd")))
    val query = VerticaMaxValuesQuery(table)
    when(connectionPool.getConnection).thenReturn(connection)
    when(connectionPool.getName).thenReturn("connection_pool_name")
    when(connection.createStatement()).thenReturn(statement)
    when(statement.executeQuery("SELECT DISTINCT year, month, day FROM table")).thenReturn(resultSet)
    when(connection.getMetaData).thenReturn(databaseMetaData)
    when(databaseMetaData.getURL).thenReturn("connection_url")
    when(listStringResultHandler.handle(resultSet)).thenReturn(resultValue)

    val result = new DefaultJdbcConnector(connectionPool).runQuery(query, listStringResultHandler)

    assert(result == resultValue)
  }
}

class DefaultJdbcConnector(connectionPool: HikariConnectionPool) extends JdbcConnector(connectionPool) {
  override def applySetting(connection: Connection, statement: Statement, setting: Setting): Unit = {}
} 
Example 61
Source File: ListStringResultHandler.scala    From bandar-log   with Apache License 2.0 5 votes vote down vote up
package com.aol.one.dwh.infra.sql

import java.sql.ResultSet

import com.aol.one.dwh.infra.parser.StringToTimestampParser
import com.aol.one.dwh.infra.util.LogTrait
import org.apache.commons.dbutils.ResultSetHandler

class ListStringResultHandler(numberOfPartitions: Int, format: String) extends ResultSetHandler[Option[Long]] with LogTrait {

  override def handle(resultSet: ResultSet): Option[Long] = {

   val result =  Iterator
    .continually(resultSet.next)
    .takeWhile(identity)
    .map { _ => getColumnValues(numberOfPartitions, resultSet) }.toList

    parseValuesToTimestamp(result, format)
  }

  private def getColumnValues(numberOfPartitions: Int, resultSet: ResultSet): String = {
    (1 to numberOfPartitions)
      .map( index => resultSet.getString(index))
      .toList
      .mkString(":")
  }

  private def parseValuesToTimestamp(values: List[String], format: String): Option[Long] = {
    values
      .map(value => StringToTimestampParser.parse(value, format))
      .max
  }
} 
Example 62
Source File: Mappers.scala    From common4s   with Apache License 2.0 5 votes vote down vote up
package commons.mapper

import java.sql.ResultSet

import scala.collection.JavaConversions.{ mapAsJavaMap, mapAsScalaMap }
import scala.collection.concurrent.TrieMap


object Mappers {

	private val beanToMapMapperCache = new TrieMap[Class[_], BeanToMapMapper]
	private val mapToBeanMapperCache = new TrieMap[Class[_], MapToBeanMapper]
	private val autoConvertTypeMapToBeanMapperCache = new TrieMap[Class[_], MapToBeanMapper]
	private val resultSetMapperCache = new TrieMap[Class[_], ResultSetMapper]

	def beanToMap(any : AnyRef) : collection.Map[String, Any] = {
		val map = beanToMapMapperCache
			.getOrElseUpdate(any.getClass, BeanToMapMapper.createMapper(any.getClass))
			.map(any)

		mapAsScalaMap(map)
	}

	def mapToBean[T](map : collection.Map[String, Any])(implicit classTag : scala.reflect.ClassTag[T]) : T = {
		mapToBean(map, false)
	}

	def mapToBean[T](map : collection.Map[String, Any], autoConvert : Boolean)(implicit classTag : scala.reflect.ClassTag[T]) : T = {
		val clazz = classTag.runtimeClass

		val mapper =
			if (!autoConvert) mapToBeanMapperCache.getOrElseUpdate(clazz, MapToBeanMapper.createMapper(classTag.runtimeClass))
			else autoConvertTypeMapToBeanMapperCache.getOrElseUpdate(clazz, MapToBeanMapper.createMapper(classTag.runtimeClass, true))

		mapper.map(mapAsJavaMap(map)).asInstanceOf[T]
	}

	def resultSetToBean[T](rs : ResultSet)(implicit classTag : scala.reflect.ClassTag[T]) : T = {
		val clazz = classTag.runtimeClass
		resultSetMapperCache.getOrElseUpdate(clazz, ResultSetMapper.createMapper(clazz)).map(rs).asInstanceOf[T]
	}

	def resultSetToMap(rs : ResultSet) : collection.Map[String, Any] = {
		resultSetToBean[collection.Map[String, Any]](rs)
	}
} 
Example 63
Source File: DaoComponent.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.cake

import java.sql.{ResultSet, PreparedStatement}

import com.ivan.nikolov.cake.model.{Person, Class}

trait DaoComponent {
  this: DatabaseComponent =>
  
  val dao: Dao
  
  class Dao() {
    def getPeople: List[Person] = {
      val connection = databaseService.getConnection
      try {
        executeSelect(
          connection.prepareStatement("SELECT id, name, age FROM people")
        ) {
          rs =>
            readResultSet(rs) {
              row =>
                Person(row.getInt(1), row.getString(2), row.getInt(3))
            }
        }
      } finally {
        connection.close()
      }
    }
    
    def getClasses: List[Class] = {
      val connection = databaseService.getConnection
      try {
        executeSelect(
          connection.prepareStatement("SELECT id, name FROM classes")
        ) {
          rs =>
            readResultSet(rs) {
              row =>
                Class(row.getInt(1), row.getString(2))
            }
        }
      } finally {
        connection.close()
      }
    }
    
    def getPeopleInClass(className: String): List[Person] = {
      val connection = databaseService.getConnection
      try {
        val statement = connection.prepareStatement(
          """
            |SELECT p.id, p.name, p.age 
            |FROM people p 
            | JOIN people_classes pc ON p.id = pc.person_id
            | JOIN classes c ON c.id = pc.class_id
            |WHERE c.name = ? 
          """.stripMargin
        )
        statement.setString(1, className)
        executeSelect(
          statement
        ) {
          rs =>
            readResultSet(rs) {
              row =>
                Person(row.getInt(1), row.getString(2), row.getInt(3))
            }
        }
      } finally {
        connection.close()
      }
      
    }
      
    private def executeSelect[T](preparedStatement: PreparedStatement)(f: (ResultSet) => List[T]): List[T] =
      try {
        f(preparedStatement.executeQuery())
      } finally {
        preparedStatement.close()
      }
    
    private def readResultSet[T](rs: ResultSet)(f: ResultSet => T): List[T] =
      Iterator.continually((rs.next(), rs)).takeWhile(_._1).map {
        case (_, row) =>
          f(rs)
      }.toList
    
  }
} 
Example 64
Source File: DaoComponent.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.cake

import java.sql.{ResultSet, PreparedStatement}

import com.ivan.nikolov.cake.model.{Person, Class}

trait DaoComponent {
  this: DatabaseComponent =>
  
  val dao: Dao
  
  class Dao() {
    def getPeople: List[Person] = {
      val connection = databaseService.getConnection
      try {
        executeSelect(
          connection.prepareStatement("SELECT id, name, age FROM people")
        ) {
          rs =>
            readResultSet(rs) {
              row =>
                Person(row.getInt(1), row.getString(2), row.getInt(3))
            }
        }
      } finally {
        connection.close()
      }
    }
    
    def getClasses: List[Class] = {
      val connection = databaseService.getConnection
      try {
        executeSelect(
          connection.prepareStatement("SELECT id, name FROM classes")
        ) {
          rs =>
            readResultSet(rs) {
              row =>
                Class(row.getInt(1), row.getString(2))
            }
        }
      } finally {
        connection.close()
      }
    }
    
    def getPeopleInClass(className: String): List[Person] = {
      val connection = databaseService.getConnection
      try {
        val statement = connection.prepareStatement(
          """
            |SELECT p.id, p.name, p.age 
            |FROM people p 
            | JOIN people_classes pc ON p.id = pc.person_id
            | JOIN classes c ON c.id = pc.class_id
            |WHERE c.name = ? 
          """.stripMargin
        )
        statement.setString(1, className)
        executeSelect(
          statement
        ) {
          rs =>
            readResultSet(rs) {
              row =>
                Person(row.getInt(1), row.getString(2), row.getInt(3))
            }
        }
      } finally {
        connection.close()
      }
      
    }
      
    private def executeSelect[T](preparedStatement: PreparedStatement)(f: (ResultSet) => List[T]): List[T] =
      try {
        f(preparedStatement.executeQuery())
      } finally {
        preparedStatement.close()
      }
    
    private def readResultSet[T](rs: ResultSet)(f: ResultSet => T): List[T] =
      Iterator.continually((rs.next(), rs)).takeWhile(_._1).map {
        case (_, row) =>
          f(rs)
      }.toList
    
  }
} 
Example 65
Source File: DaoServiceComponent.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.scheduler.dao

import java.sql.{Connection, ResultSet, PreparedStatement}

trait DaoService {
  def getConnection(): Connection
  
  def executeSelect[T](preparedStatement: PreparedStatement)(f: (ResultSet) => List[T]): List[T] =
    try {
      f(preparedStatement.executeQuery())
    } finally {
      preparedStatement.close()
    }

  def readResultSet[T](rs: ResultSet)(f: ResultSet => T): List[T] =
    Iterator.continually((rs.next(), rs)).takeWhile(_._1).map {
      case (_, row) =>
        f(rs)
    }.toList
}

trait DaoServiceComponent {
  this: DatabaseServiceComponent =>
  
  val daoService: DaoService
  
  class DaoServiceImpl extends DaoService {
    override def getConnection(): Connection = databaseService.getConnection
  }
} 
Example 66
Source File: DaoServiceComponent.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.scheduler.dao

import java.sql.{Connection, ResultSet, PreparedStatement}

trait DaoService {
  def getConnection(): Connection
  
  def executeSelect[T](preparedStatement: PreparedStatement)(f: (ResultSet) => List[T]): List[T] =
    try {
      f(preparedStatement.executeQuery())
    } finally {
      preparedStatement.close()
    }

  def readResultSet[T](rs: ResultSet)(f: ResultSet => T): List[T] =
    Iterator.continually((rs.next(), rs)).takeWhile(_._1).map {
      case (_, row) =>
        f(rs)
    }.toList
}

trait DaoServiceComponent {
  this: DatabaseServiceComponent =>
  
  val daoService: DaoService
  
  class DaoServiceImpl extends DaoService {
    override def getConnection(): Connection = databaseService.getConnection
  }
} 
Example 67
Source File: TestResultSetDataConverter.scala    From ohara   with Apache License 2.0 4 votes vote down vote up
package oharastream.ohara.connector.jdbc.source

import java.sql.{ResultSet, Time, Timestamp}

import oharastream.ohara.client.configurator.InspectApi.RdbColumn
import oharastream.ohara.common.rule.OharaTest
import oharastream.ohara.connector.jdbc.datatype.{MySQLDataTypeConverter, RDBDataTypeConverter}
import oharastream.ohara.connector.jdbc.util.{ColumnInfo, DateTimeUtils}
import org.junit.Test
import org.mockito.Mockito
import org.mockito.Mockito._
import org.scalatest.matchers.should.Matchers._

class TestResultSetDataConverter extends OharaTest {
  private[this] val VARCHAR: String   = "VARCHAR"
  private[this] val TIMESTAMP: String = "TIMESTAMP"
  private[this] val INT: String       = "INT"
  private[this] val DATE: String      = "DATE"
  private[this] val TIME: String      = "TIME"

  @Test
  def testConverterRecord(): Unit = {
    val resultSet: ResultSet = Mockito.mock(classOf[ResultSet])
    when(resultSet.getTimestamp("column1", DateTimeUtils.CALENDAR)).thenReturn(new Timestamp(0L))
    when(resultSet.getString("column2")).thenReturn("aaa")
    when(resultSet.getInt("column3")).thenReturn(10)

    val columnList = Seq(
      RdbColumn("column1", TIMESTAMP, true),
      RdbColumn("column2", VARCHAR, false),
      RdbColumn("column3", INT, false)
    )
    val dataTypeConverter: RDBDataTypeConverter = new MySQLDataTypeConverter()
    val result: Seq[ColumnInfo[_]]              = ResultSetDataConverter.converterRecord(dataTypeConverter, resultSet, columnList)
    result.head.columnName shouldBe "column1"
    result.head.columnType shouldBe TIMESTAMP
    result.head.value.toString shouldBe "1970-01-01 08:00:00.0"

    result(1).columnName shouldBe "column2"
    result(1).columnType shouldBe VARCHAR
    result(1).value shouldBe "aaa"

    result(2).columnName shouldBe "column3"
    result(2).columnType shouldBe INT
    result(2).value shouldBe 10
  }

  @Test
  def testNullValue(): Unit = {
    val resultSet: ResultSet = Mockito.mock(classOf[ResultSet])
    when(resultSet.getTimestamp("column1", DateTimeUtils.CALENDAR)).thenReturn(new Timestamp(0L))
    when(resultSet.getString("column2")).thenReturn(null)
    when(resultSet.getDate("column3")).thenReturn(null)
    when(resultSet.getTime("column4")).thenReturn(null)

    val columnList = Seq(
      RdbColumn("column1", TIMESTAMP, true),
      RdbColumn("column2", VARCHAR, false),
      RdbColumn("column3", DATE, false),
      RdbColumn("column4", TIME, false)
    )
    val dataTypeConverter: RDBDataTypeConverter = new MySQLDataTypeConverter()
    val result: Seq[ColumnInfo[_]]              = ResultSetDataConverter.converterRecord(dataTypeConverter, resultSet, columnList)
    result(1).columnName shouldBe "column2"
    result(1).columnType shouldBe VARCHAR
    result(1).value shouldBe "null"

    result(2).columnName shouldBe "column3"
    result(2).columnType shouldBe DATE
    result(2).value.toString shouldBe "1970-01-01"

    result(3).columnName shouldBe "column4"
    result(3).columnType shouldBe TIME
    result(3).value.toString shouldBe new Time(0).toString
  }
}