scala.math.abs Scala Examples

The following examples show how to use scala.math.abs. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: TanhSpec.scala    From BigDL   with Apache License 2.0 7 votes vote down vote up
package com.intel.analytics.bigdl.nn

import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl._
import com.intel.analytics.bigdl.nn.tf.TanhGrad
import com.intel.analytics.bigdl.utils.T
import com.intel.analytics.bigdl.utils.serializer.ModuleSerializationTest
import org.scalatest.{FlatSpec, Matchers}

import scala.math.abs

@com.intel.analytics.bigdl.tags.Parallel
class TanhSpec extends FlatSpec with Matchers {
  "A Tanh Module " should "generate correct output and grad" in {
    val module = new Tanh[Double]()
    val input = Tensor[Double](2, 2, 2)
    input(Array(1, 1, 1)) = -0.17020166106522
    input(Array(1, 1, 2)) = 0.57785657607019
    input(Array(1, 2, 1)) = -1.3404131438583
    input(Array(1, 2, 2)) = 1.0938102817163
    input(Array(2, 1, 1)) = 1.120370157063
    input(Array(2, 1, 2)) = -1.5014141565189
    input(Array(2, 2, 1)) = 0.3380249235779
    input(Array(2, 2, 2)) = -0.625677742064
    val gradOutput = Tensor[Double](2, 2, 2)
    gradOutput(Array(1, 1, 1)) = 0.79903302760795
    gradOutput(Array(1, 1, 2)) = 0.019753993256018
    gradOutput(Array(1, 2, 1)) = 0.63136631483212
    gradOutput(Array(1, 2, 2)) = 0.29849314852618
    gradOutput(Array(2, 1, 1)) = 0.94380705454387
    gradOutput(Array(2, 1, 2)) = 0.030344664584845
    gradOutput(Array(2, 2, 1)) = 0.33804601291195
    gradOutput(Array(2, 2, 2)) = 0.8807330634445
    val expectedOutput = Tensor[Double](2, 2, 2)
    expectedOutput(Array(1, 1, 1)) = -0.16857698275003
    expectedOutput(Array(1, 1, 2)) = 0.52110579963112
    expectedOutput(Array(1, 2, 1)) = -0.87177144344863
    expectedOutput(Array(1, 2, 2)) = 0.79826462420686
    expectedOutput(Array(2, 1, 1)) = 0.80769763073281
    expectedOutput(Array(2, 1, 2)) = -0.90540347425835
    expectedOutput(Array(2, 2, 1)) = 0.32571298952384
    expectedOutput(Array(2, 2, 2)) = -0.55506882753488
    val expectedGrad = Tensor[Double](2, 2, 2)
    expectedGrad(Array(1, 1, 1)) = 0.77632594793144
    expectedGrad(Array(1, 1, 2)) = 0.014389771607755
    expectedGrad(Array(1, 2, 1)) = 0.15153710218424
    expectedGrad(Array(1, 2, 2)) = 0.1082854310036
    expectedGrad(Array(2, 1, 1)) = 0.32809049064441
    expectedGrad(Array(2, 1, 2)) = 0.0054694603766104
    expectedGrad(Array(2, 2, 1)) = 0.3021830658283
    expectedGrad(Array(2, 2, 2)) = 0.6093779706637
    val inputOrg = input.clone()
    val gradOutputOrg = gradOutput.clone()
    val output = module.forward(input)
    val gradInput = module.backward(input, gradOutput)
    expectedOutput.map(output, (v1, v2) => {
      assert(abs(v1 - v2) < 1e-6);
      v1
    })
    expectedGrad.map(gradInput, (v1, v2) => {
      assert(abs(v1 - v2) < 1e-6);
      v1
    })
    assert(input == inputOrg)
    assert(gradOutput == gradOutputOrg)
  }

  "A Tanh Module " should "be good in gradient check" in {
    val module = new Tanh[Double]()
    val input = Tensor[Double](2, 2, 2).rand()

    val checker = new GradientChecker(1e-4, 1e-2)
    checker.checkLayer[Double](module, input) should be(true)
  }
}

class TanhSerialTest extends ModuleSerializationTest {
  override def test(): Unit = {
    val module = TanhGrad[Float, Float]()

    val input = T(Tensor[Float](1, 5, 3, 4).rand(), Tensor[Float](1, 5, 3, 4).rand())

    runSerializationTest(module, input)
  }
} 
Example 2
Source File: SigmoidSpec.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.nn

import org.scalatest.FlatSpec
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.serializer.ModuleSerializationTest

import scala.math.abs
import scala.util.Random

@com.intel.analytics.bigdl.tags.Parallel
class SigmoidSpec extends FlatSpec {
  "A Sigmoid Module " should "generate correct output and grad" in {
    val module = new Sigmoid[Double]
    val input = Tensor[Double](2, 2, 2)
    input(Array(1, 1, 1)) = 0.063364277360961
    input(Array(1, 1, 2)) = 0.90631252736785
    input(Array(1, 2, 1)) = 0.22275671223179
    input(Array(1, 2, 2)) = 0.37516756891273
    input(Array(2, 1, 1)) = 0.99284988618456
    input(Array(2, 1, 2)) = 0.97488326719031
    input(Array(2, 2, 1)) = 0.94414822547697
    input(Array(2, 2, 2)) = 0.68123375508003
    val gradOutput = Tensor[Double](2, 2, 2)
    gradOutput(Array(1, 1, 1)) = 0.38652365817688
    gradOutput(Array(1, 1, 2)) = 0.034144022269174
    gradOutput(Array(1, 2, 1)) = 0.68105488433503
    gradOutput(Array(1, 2, 2)) = 0.41517980070785
    gradOutput(Array(2, 1, 1)) = 0.91740695876069
    gradOutput(Array(2, 1, 2)) = 0.35317355184816
    gradOutput(Array(2, 2, 1)) = 0.24361599306576
    gradOutput(Array(2, 2, 2)) = 0.65869987895712
    val expectedOutput = Tensor[Double](2, 2, 2)
    expectedOutput(Array(1, 1, 1)) = 0.51583577126786
    expectedOutput(Array(1, 1, 2)) = 0.71224499952187
    expectedOutput(Array(1, 2, 1)) = 0.55546003768115
    expectedOutput(Array(1, 2, 2)) = 0.59270705262321
    expectedOutput(Array(2, 1, 1)) = 0.72965046058394
    expectedOutput(Array(2, 1, 2)) = 0.72609176575892
    expectedOutput(Array(2, 2, 1)) = 0.71993681755829
    expectedOutput(Array(2, 2, 2)) = 0.66401400310487
    val expectedGrad = Tensor[Double](2, 2, 2)
    expectedGrad(Array(1, 1, 1)) = 0.096533985368059
    expectedGrad(Array(1, 1, 2)) = 0.0069978877068295
    expectedGrad(Array(1, 2, 1)) = 0.16816892172375
    expectedGrad(Array(1, 2, 2)) = 0.1002266468557
    expectedGrad(Array(2, 1, 1)) = 0.18096830763559
    expectedGrad(Array(2, 1, 2)) = 0.070240043677749
    expectedGrad(Array(2, 2, 1)) = 0.049119755820981
    expectedGrad(Array(2, 2, 2)) = 0.14695555224503
    val inputOrg = input.clone()
    val gradOutputOrg = gradOutput.clone()
    val output = module.forward(input)
    val gradInput = module.backward(input, gradOutput)
    expectedOutput.map(output, (v1, v2) => {
      assert(abs(v1 - v2) < 1e-6);
      v1
    })
    expectedGrad.map(gradInput, (v1, v2) => {
      assert(abs(v1 - v2) < 1e-6);
      v1
    })
    assert(input == inputOrg)
    assert(gradOutput == gradOutputOrg)
  }
}

class SigmoidSerialTest extends ModuleSerializationTest {
  override def test(): Unit = {
    val sigmoid = Sigmoid[Float]().setName("sigmoid")
    val input = Tensor[Float](10).apply1(_ => Random.nextFloat())
    runSerializationTest(sigmoid, input)
  }
} 
Example 3
Source File: DspTesterSpec.scala    From dsptools   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
// See LICENSE for license details.

package dsptools

import DspTesterUtilities._
import org.scalatest.{FlatSpec, Matchers}
import scala.math.{pow, abs}

class DspTesterSpec {

}

class DspTesterUtilitiesSpec extends FlatSpec with Matchers {

  behavior of "Tester Converters"

  it should "convert positive and negative doubles to their BigInt, fixed point equivalents" in {

    def check_conversion(value: Double, totalWidth: Int, fractionalWidth: Int, verbose: Boolean = false): Unit = {
      if (verbose) { println(s"value = $value\ntotal width = $totalWidth\nfractional width = $fractionalWidth") }
      var bi = signedToBigIntUnsigned(value, totalWidth, fractionalWidth)
      if (verbose) { println(s"result = $bi") }
      // check sign, flip if necessary
      if (totalWidth > 0 && bi.testBit(totalWidth-1)) {
        bi = -1 * ((bi ^ ((BigInt(1) << totalWidth) - 1)) + 1)
      }
      val bid = bi.toDouble / (BigInt(1) << fractionalWidth).toDouble
      if (verbose) { println(s"back to double = $bid") }
      val comp = scala.math.abs(bid-value)
      if (verbose) { println(s"comp = $comp") }
      val ref = scala.math.pow(2, -fractionalWidth)
      if (verbose) { println(s"ref = $ref") }
      require(abs(bid-value) < pow(2, -fractionalWidth))
    }

    // integers
    var width = 14
    for (i <- -pow(2,width-1).toInt until pow(2,width-1).toInt) {
      check_conversion(i, width, 0)
    }

    // big integers
    width = 40
    for (i <- -pow(2,width-1).toInt to pow(2,width-1).toInt by pow(2, 20).toInt) {
      check_conversion(i, width, 0)
    }

    // total > fractional
    width = 19
    var fract = 8
    for (i <- -pow(2,width-fract-1) to pow(2,width-fract-1)-1 by 1.0/fract*0.9) {
      check_conversion(i, width, fract)
    }

    // total < fractional
    width = 11
    fract = 17
    for (i <- -pow(2,width-fract-1) to pow(2,width-fract-1)-1 by 1.0/fract*0.9) {
      check_conversion(i, width, fract)
    }

  }

  it should "fail to convert doubles to BigInts when not enough space is supplied" in {
    intercept[IllegalArgumentException] { signedToBigIntUnsigned(2.0, 4, 2) }
    intercept[IllegalArgumentException] { signedToBigIntUnsigned(-2.25, 4, 2) }
  }

} 
Example 4
Source File: FunSpec.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio.test

import scala.math.abs

import zio.test.Assertion._
import zio.{ random, ZIO }

object FunSpec extends ZIOBaseSpec {

  def spec = suite("FunSpec")(
    testM("fun converts effects into pure functions") {
      for {
        f <- Fun.make((n: Int) => random.nextIntBounded(n))
        n <- random.nextInt.map(abs(_))
      } yield assert(f(n))(equalTo(f(n)))
    },
    testM("fun does not have race conditions") {
      for {
        f       <- Fun.make((_: Int) => random.nextIntBounded(6))
        results <- ZIO.foreachPar(List.range(0, 1000))(n => ZIO.effectTotal((n % 6, f(n % 6))))
      } yield assert(results.distinct.length)(equalTo(6))
    },
    testM("fun is showable") {
      for {
        f <- Fun.make((_: String) => random.nextBoolean)
        p = f("Scala")
        q = f("Haskell")
      } yield {
        assert(f.toString)(equalTo(s"Fun(Scala -> $p, Haskell -> $q)")) ||
        assert(f.toString)(equalTo(s"Fun(Haskell -> $q, Scala -> $p)"))
      }
    },
    testM("fun is supported on Scala.js") {
      for {
        f <- Fun.make((_: Int) => ZIO.foreach(List.range(0, 100000))(ZIO.succeed(_)))
      } yield assert(f(1))(anything)
    }
  )
} 
Example 5
Source File: RowComparer.scala    From spark-fast-tests   with MIT License 5 votes vote down vote up
package com.github.mrpowers.spark.fast.tests

import org.apache.spark.sql.Row

import java.sql.Timestamp
import scala.math.abs

object RowComparer {

  
  def areRowsEqual(r1: Row, r2: Row, tol: Double): Boolean = {
    if (r1.length != r2.length) {
      return false
    } else {
      (0 until r1.length).foreach(idx => {
        if (r1.isNullAt(idx) != r2.isNullAt(idx)) {
          return false
        }

        if (!r1.isNullAt(idx)) {
          val o1 = r1.get(idx)
          val o2 = r2.get(idx)
          o1 match {
            case b1: Array[Byte] =>
              if (!java.util.Arrays.equals(
                    b1,
                    o2.asInstanceOf[Array[Byte]]
                  )) {
                return false
              }

            case f1: Float =>
              if (java.lang.Float.isNaN(f1) !=
                    java.lang.Float.isNaN(o2.asInstanceOf[Float])) {
                return false
              }
              if (abs(f1 - o2.asInstanceOf[Float]) > tol) {
                return false
              }

            case d1: Double =>
              if (java.lang.Double.isNaN(d1) !=
                    java.lang.Double.isNaN(o2.asInstanceOf[Double])) {
                return false
              }
              if (abs(d1 - o2.asInstanceOf[Double]) > tol) {
                return false
              }

            case d1: java.math.BigDecimal =>
              if (d1.compareTo(o2.asInstanceOf[java.math.BigDecimal]) != 0) {
                return false
              }

            case t1: Timestamp =>
              if (abs(t1.getTime - o2.asInstanceOf[Timestamp].getTime) > tol) {
                return false
              }

            case _ =>
              if (o1 != o2) return false
          }
        }
      })
    }
    true
  }

} 
Example 6
Source File: FeatureSelection.scala    From aerosolve   with Apache License 2.0 5 votes vote down vote up
package com.airbnb.aerosolve.training

import java.io.BufferedWriter
import java.io.OutputStreamWriter
import java.util

import com.airbnb.aerosolve.core.{ModelRecord, ModelHeader, FeatureVector, Example}
import com.airbnb.aerosolve.core.models.LinearModel
import com.airbnb.aerosolve.core.util.Util
import com.typesafe.config.Config
import org.slf4j.{LoggerFactory, Logger}
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD

import scala.collection.mutable.HashMap
import scala.collection.mutable.HashSet
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.Buffer
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import scala.util.Random
import scala.math.abs
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.fs.Path
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path

object FeatureSelection {
  private final val log: Logger = LoggerFactory.getLogger("FeatureSelection")
  val allKey : (String, String) = ("$ALL", "$POS")

  // Given a RDD compute the pointwise mutual information between
  // the positive label and the discrete features.
  def pointwiseMutualInformation(examples : RDD[Example],
                                 config : Config,
                                 key : String,
                                 rankKey : String,
                                 posThreshold : Double,
                                 minPosCount : Double,
                                 newCrosses : Boolean) : RDD[((String, String), Double)] = {
    val pointwise = LinearRankerUtils.makePointwise(examples, config, key, rankKey)
    val features = pointwise
      .mapPartitions(part => {
      // The tuple2 is var, var | positive
      val output = scala.collection.mutable.HashMap[(String, String), (Double, Double)]()
      part.foreach(example =>{
        val featureVector = example.example.get(0)
        val isPos = if (featureVector.floatFeatures.get(rankKey).asScala.head._2 > posThreshold) 1.0
        else 0.0
        val all : (Double, Double) = output.getOrElse(allKey, (0.0, 0.0))
        output.put(allKey, (all._1 + 1.0, all._2 + 1.0 * isPos))

        val features : Array[(String, String)] =
          LinearRankerUtils.getFeatures(featureVector)
        if (newCrosses) {
          for (i <- features) {
            for (j <- features) {
              if (i._1 < j._1) {
                val key = ("%s<NEW>%s".format(i._1, j._1),
                           "%s<NEW>%s".format(i._2, j._2))
                val x = output.getOrElse(key, (0.0, 0.0))
                output.put(key, (x._1 + 1.0, x._2 + 1.0 * isPos))
              }
            }
          }
        }
        for (feature <- features) {
          val x = output.getOrElse(feature, (0.0, 0.0))
          output.put(feature, (x._1 + 1.0, x._2 + 1.0 * isPos))
        }
      })
      output.iterator
    })
    .reduceByKey((a, b) => (a._1 + b._1, a._2 + b._2))
    .filter(x => x._2._2 >= minPosCount)

    val allCount = features.filter(x => x._1.equals(allKey)).take(1).head

    features.map(x => {
      val prob = x._2._1 / allCount._2._1
      val probPos = x._2._2 / allCount._2._2
      (x._1, math.log(probPos / prob) / math.log(2.0))
    })
  }

  // Returns the maximum entropy per family
  def maxEntropy(input : RDD[((String, String), Double)]) : RDD[((String, String), Double)] = {
    input
      .map(x => (x._1._1, (x._1._2, x._2)))
      .reduceByKey((a, b) => if (math.abs(a._2) > math.abs(b._2)) a else b)
      .map(x => ((x._1, x._2._1), x._2._2))
  }
} 
Example 7
Source File: JoinedDifferences.scala    From diffy   with GNU Affero General Public License v3.0 5 votes vote down vote up
package ai.diffy.analysis

import javax.inject.Inject

import com.twitter.util.Future
import scala.math.abs

object DifferencesFilterFactory {
  def apply(relative: Double, absolute: Double): JoinedField => Boolean = {
    (field: JoinedField) =>
      field.raw.differences > field.noise.differences &&
        field.relativeDifference > relative &&
        field.absoluteDifference > absolute
  }
}

case class JoinedDifferences @Inject() (raw: RawDifferenceCounter, noise: NoiseDifferenceCounter) {
  def endpoints: Future[Map[String, JoinedEndpoint]] = {
    raw.counter.endpoints map { _.keys } flatMap { eps =>
      Future.collect(
        eps map { ep =>
          endpoint(ep) map { ep -> _ }
        } toSeq
      ) map { _.toMap }
    }
  }

  def endpoint(endpoint: String): Future[JoinedEndpoint] = {
    Future.join(
      raw.counter.endpoint(endpoint),
      raw.counter.fields(endpoint),
      noise.counter.fields(endpoint)
    ) map { case (endpoint, rawFields, noiseFields) =>
      JoinedEndpoint(endpoint, rawFields, noiseFields)
    }
  }
}

case class JoinedEndpoint(
  endpoint: EndpointMetadata,
  original: Map[String, FieldMetadata],
  noise: Map[String, FieldMetadata])
{
  def differences = endpoint.differences
  def total = endpoint.total
  def fields: Map[String, JoinedField] = original map { case (path, field) =>
    path -> JoinedField(endpoint, field, noise.getOrElse(path, FieldMetadata.Empty))
  }
}

case class JoinedField(endpoint: EndpointMetadata, raw: FieldMetadata, noise: FieldMetadata) {
  // the percent difference out of the total # of requests
  def absoluteDifference = abs(raw.differences - noise.differences) / endpoint.total.toDouble * 100
  // the square error between this field's differences and the noisey counterpart's differences
  def relativeDifference = abs(raw.differences - noise.differences) / (raw.differences + noise.differences).toDouble * 100
} 
Example 8
Source File: SplitCriterionSuite.scala    From streamDM   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streamdm.classifiers.trees

import org.scalatest.FunSuite
import org.apache.spark.streamdm.classifiers.trees._
import scala.math.{ log, abs }
import org.apache.spark.streamdm.utils.Utils.{ log2 }
class SplitCriterionSuite extends FunSuite {

  test("InfoGainSplitCriterion, test negtive function") {
    val ig: InfoGainSplitCriterion = new InfoGainSplitCriterion()
    assert(ig.hasNegative(Array(0, -1.0, 2)))
  }

  test("InfoGainSplitCriterion, test entropy of array") {
    val ig: InfoGainSplitCriterion = new InfoGainSplitCriterion()
    assert(ig.entropy(Array[Double](2.0, 4, 8)) == (log2(2.0 + 4 + 8) - (2.0 * log2(2.0) + 4 * log2(4.0) + 8 * log2(8.0)) / (2.0 + 4 + 8)))
    assert(ig.entropy(Array(0.5, 0.5)) == 1.0)
    assert(ig.entropy(Array(0.4, 0.6)) == 0.9709505944546686)
  }

  test("InfoGainSplitCriterion, test entropy of matrix ") {
    val ig: InfoGainSplitCriterion = new InfoGainSplitCriterion()
    assert(ig.entropy(Array(Array(1.0, 1, 1), Array(1.0, 1, 1))) == 1.5849625007211563)
  }

  test("InfoGainSplitCriterion, test nunFrac") {
    val ig: InfoGainSplitCriterion = new InfoGainSplitCriterion()
    assert(ig.numGTFrac(Array(new Array(0)), 0.01) == 0)
    assert(ig.numGTFrac(Array(Array(1, -1, 1), Array(-1, 1, 0)), 0.01) == 1)
    assert(ig.numGTFrac(Array(Array(1, 1, 1), Array(-1, 1, 1)), 0.01) == 2)
    assert(ig.numGTFrac(Array(Array(1, 2, 3, 4, 5), Array(5, 4, 3, 2, 1)), 0.01) == 2)
  }
  test("InfoGainSplitCriterion, test rangeMerit") {
    val ig: InfoGainSplitCriterion = new InfoGainSplitCriterion()
    assert(ig.rangeMerit(Array(1.0, 1)) == log2(2))
  }
  test("InfoGainSplitCriterion, test merit") {
    val ig: InfoGainSplitCriterion = new InfoGainSplitCriterion()
    assert(ig.merit(Array(1.0, 1, 1), Array(Array(1, -1, 1), Array(-1, 1, 0))) == Double.NegativeInfinity)
  }

  test("GiniSplitCriterion, test computeGini") {
    val gc: GiniSplitCriterion = new GiniSplitCriterion()
    assert(gc.computeGini(Array[Double](1, 1, 1), 3) == 1.0 - 3.0 * 1.0 / 9)
  }
  test("GiniSplitCriterion, test merit") {
    val gc: GiniSplitCriterion = new GiniSplitCriterion()
    assert(abs(gc.merit(Array(1.0 / 3, 1.0 / 3, 1.0 / 3),
      Array(Array(1.0 / 6, 1.0 / 6, 1.0 / 6), Array(1.0 / 6, 1.0 / 6, 1.0 / 6))) - 0.33333333333) < 0.000001)
  }

} 
Example 9
Source File: GaussianEstimatorSuite.scala    From streamDM   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streamdm.classifiers.trees

import org.scalatest.FunSuite
import scala.math.{ abs }
class GaussianEstimatorSuite extends FunSuite {

  test("test observe,stddev,variance,merge") {
    val gs: GaussianEstimator = new GaussianEstimator()
    gs.observe(2.0, 0.5)
    gs.observe(1, 2)
    var gs2: GaussianEstimator = new GaussianEstimator()
    gs2 = gs2.merge(gs, true)
    assert(gs2.totalWeight() == 2.5)
    assert(gs2.getMean() == 1.2)
    assert(gs2.stdDev() == 0.5163977794943222)
    assert(gs2.variance() == 0.2666666666666666)
  }

  test("test variance") {
    val gs: GaussianEstimator = new GaussianEstimator()
    gs.observe(2.0, 0.5)
    gs.observe(1, 0.5)
    gs.observe(3, 0.5)
    var gs2: GaussianEstimator = new GaussianEstimator()
    gs2 = gs2.merge(gs, true)
    assert(gs2.variance() == 2.0)
  }

  test("test probalilityDensity") {
    val gs: GaussianEstimator = new GaussianEstimator()
    assert(gs.probabilityDensity(0) == 0)
    assert(gs.probabilityDensity(1) == 0)
    assert(gs.probabilityDensity(2) == 0)
    gs.observe(2.0, 1)
    var gs2: GaussianEstimator = new GaussianEstimator()
    gs2 = gs2.merge(gs, true)
    assert(gs2.probabilityDensity(0) == 0)
    assert(gs2.probabilityDensity(1) == 0)
    assert(gs2.probabilityDensity(2) == 1)
  }

} 
Example 10
Source File: DataUtilsSpec.scala    From warp-core   with MIT License 5 votes vote down vote up
package com.workday.warp.utils

import com.workday.warp.common.spec.WarpJUnitSpec
import com.workday.warp.junit.UnitTest

import scala.math.abs


class DataUtilsSpec extends WarpJUnitSpec {

  @UnitTest
  def zeroStdDev(): Unit = {
    val standardized: Iterable[Double] = DataUtils.standardize(List(1, 2, 3, 4))
    val diffs: Iterable[Double] = standardized.zip(
      List(-1.161895003862225, -0.3872983346207417, 0.3872983346207417, 1.161895003862225)
    ).map { case (d1, d2) => abs(d1 - d2) }

    val epsilon: Double = 0.0001
    if (diffs.exists(_ > epsilon)) {
      throw new RuntimeException("$diffs exceeded maximum allowed tolerance threshold")
    }

    DataUtils.standardize(List(1, 1, 1, 1)) should be (List(0, 0, 0, 0))
  }
}