breeze.numerics.abs Scala Examples

The following examples show how to use breeze.numerics.abs. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: SoftmaxWithCriterionSpec.scala    From BigDL   with Apache License 2.0 5 votes vote down vote up
package com.intel.analytics.bigdl.nn

import breeze.numerics.abs
import com.intel.analytics.bigdl.tensor.{Storage, Tensor}
import org.scalatest.{FlatSpec, Matchers}

class SoftmaxWithCriterionSpec extends FlatSpec with Matchers {
  val inputArr = Array(-3.8623790740966796875, -5.576374053955078125,
    10.298772811889648438, 9.0803890228271484375,
    1.3665539026260375977, -0.44133603572845458984,
    -9.40171051025390625, 1.0564124584197998047, 13.553049087524414062,
    -13.990137100219726562, 0.38796663284301757812, 1.6085460186004638672,
    8.8876256942749023438, 2.3242428302764892578,
    -4.9687619209289550781, 3.7455892562866210938, 2.0669219493865966797,
    19.429233551025390625, 7.1232995986938476562,
    -10.957750320434570312, 4.5843319892883300781, 16.586359024047851562,
    -1.0300438404083251953, -21.75362396240234375,
    -2.7482614517211914062, 2.2115952968597412109, 0.85470116138458251953,
    1.8852581977844238281, -0.88053613901138305664, -21.679836273193359375)
  val targetArr = Array(2, 4, 2, 4, 1, 2)
  val input = Tensor(Storage(inputArr.map(x => x.toFloat))).resize(1, 5, 2, 3)
  val target = Tensor(Storage((targetArr).map(x => x.toFloat))).resize(1, 1, 2, 3)

  "SoftmaxWithCriterion forward" should "work properly" in {
    val normMode = NormMode.apply(2)
    val sfmLoss = new SoftmaxWithCriterion[Float](normalizeMode = normMode)
    val actOut = sfmLoss.forward(input, target)
    var sum = 0f
    for (tar <- 1 to 5) {
      val res = new SoftmaxWithCriterion[Float](ignoreLabel = Some(tar),
        normalizeMode = normMode).forward(input, target)
      sum += res
    }
    assert(abs(actOut - 51.643194557149605828) < 1e-4)
    assert(abs(actOut * 4 - sum) < 1e-4)
  }

  "SoftmaxWithCriterion backward" should "work properly" in {
    val normMode = NormMode.apply(1)
    val sfmLoss = new SoftmaxWithCriterion[Float](normalizeMode = normMode, ignoreLabel = Some(1))
    val actOut = sfmLoss.forward(input, target)
    assert(abs(actOut - 10.073171615600585938) < 1e-4)

    val actGradInput = sfmLoss.backward(input, target)

    val expectedGradInput = Array(9.9112855878047412261e-07,
      6.813194340793415904e-05, 0.014867544174194335938,
      0.00021979543089400976896, 0, 9.3838581349814376154e-10,
      -0.40000000596046447754, 0.051752727478742599487,
      -0.014917778782546520233, 2.1019214065673766378e-14, 0,
      -0.40000000596046447754, 0.34149685502052307129,
      0.18388444185256958008, 3.4804334969606998129e-09,
      1.0596063475531991571e-06, 0, 0.40000000596046447754,
      0.058499157428741455078, -0.39999970793724060059,
      4.9033053073799237609e-05, -0.0002209901867900043726,
      0, 5.2068160617220955731e-19, 3.0198482363630319014e-06,
      0.16429440677165985107, 1.1768768217734759673e-06,
      1.6489701692989910953e-07, 0, 5.6055445173304457315e-19)

    assert(expectedGradInput.length == actGradInput.nElement())
    (actGradInput.storage().array() zip expectedGradInput).foreach(x => {
      // because in caffe, they use weight 2 for the loss
      assert(abs(x._1 - x._2 / 2.0) < 1e-4)
    })
  }
} 
Example 2
Source File: VLFeatSuite.scala    From keystone   with Apache License 2.0 5 votes vote down vote up
package keystoneml.utils.external

import java.io.File

import breeze.linalg._
import breeze.numerics.abs
import org.scalatest.FunSuite
import keystoneml.pipelines.Logging
import keystoneml.utils.{ImageUtils, MatrixUtils, TestUtils}

class VLFeatSuite extends FunSuite with Logging {
  test("Load an Image and compute SIFT Features") {
    val testImage = TestUtils.loadTestImage("images/000012.jpg")
    val singleImage = ImageUtils.mapPixels(testImage, _/255.0)
    val grayImage = ImageUtils.toGrayScale(singleImage)

    val extLib = new VLFeat

    val stepSize = 3
    val binSize = 4
    val scales = 4
    val descriptorLength = 128
    val scaleStep = 0

    val rawDescDataShort = extLib.getSIFTs(grayImage.metadata.xDim, grayImage.metadata.yDim,
      stepSize, binSize, scales, scaleStep, grayImage.getSingleChannelAsFloatArray())

    assert(rawDescDataShort.length % descriptorLength == 0, "Resulting SIFTs must be 128-dimensional.")

    val numCols = rawDescDataShort.length/descriptorLength
    val result = new DenseMatrix(descriptorLength, numCols, rawDescDataShort.map(_.toDouble))

    // Compare with the output of running this image through vl_phow with matlab from the enceval package:
    // featpipem_addpaths;
    // im = im2single(imread('images/000012.jpg'));
    // featextr = featpipem.features.PhowExtractor();
    // featextr.step = 3;
    // [frames feats] = featextr.compute(im);
    // csvwrite('images/feats128.csv', feats)

    val testFeatures = csvread(new File(TestUtils.getTestResourceFileName("images/feats128.csv")))

    val diff = result - testFeatures

    // Because of subtle differences in the way image smoothing works in the VLFeat C library and the VLFeat matlab
    // library (vl_imsmooth_f vs. _vl_imsmooth_f), these two matrices will not be exactly the same.
    // Instead, we check that 99.5% of the matrix entries are off by at most 1.
    val absdiff = abs(diff).toDenseVector

    assert(absdiff.findAll(_ > 1.0).length.toDouble < 0.005*absdiff.length,
      "Fewer than 0.05% of entries may be different by more than 1.")
  }
} 
Example 3
Source File: LDAOptimizer.scala    From mleap   with Apache License 2.0 5 votes vote down vote up
package ml.combust.mleap.core.clustering.optimization

import breeze.linalg.{Vector, sum, DenseMatrix => BDM, DenseVector => BDV, SparseVector => BSV}
import breeze.numerics.{abs, exp}
import breeze.stats.distributions.Gamma
import ml.combust.mleap.core.annotation.SparkCode
import ml.combust.mleap.core.clustering.LDAUtils


  private[clustering] def variationalTopicInference(
                                                     termCounts: Vector[Double],
                                                     expElogbeta: BDM[Double],
                                                     alpha: breeze.linalg.Vector[Double],
                                                     gammaShape: Double,
                                                     k: Int): (BDV[Double], BDM[Double], List[Int]) = {
    val (ids: List[Int], cts: Array[Double]) = termCounts match {
      case v: BDV[Double] => ((0 until v.size).toList, v.data)
      case v: BSV[Double] => (v.index.toList, v.data)
    }
    // Initialize the variational distribution q(theta|gamma) for the mini-batch
    val gammad: BDV[Double] =
      new Gamma(gammaShape, 1.0 / gammaShape).samplesVector(k)                   // K
    val expElogthetad: BDV[Double] = exp(LDAUtils.dirichletExpectation(gammad))  // K
    val expElogbetad = expElogbeta(ids, ::).toDenseMatrix                        // ids * K

    val phiNorm: BDV[Double] = expElogbetad * expElogthetad :+ 1e-100            // ids
    var meanGammaChange = 1D
    val ctsVector = new BDV[Double](cts)                                         // ids

    // Iterate between gamma and phi until convergence
    while (meanGammaChange > 1e-3) {
      val lastgamma = gammad.copy
      //        K                  K * ids               ids
      gammad := (expElogthetad :* (expElogbetad.t * (ctsVector :/ phiNorm))) :+ alpha
      expElogthetad := exp(LDAUtils.dirichletExpectation(gammad))
      // TODO: Keep more values in log space, and only exponentiate when needed.
      phiNorm := expElogbetad * expElogthetad :+ 1e-100
      meanGammaChange = sum(abs(gammad - lastgamma)) / k
    }

    val sstatsd = expElogthetad.asDenseMatrix.t * (ctsVector :/ phiNorm).asDenseMatrix
    (gammad, sstatsd, ids)
  }
} 
Example 4
Source File: ARDRBFKernelTest.scala    From spark-gp   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.commons.kernel

import breeze.linalg.{all, DenseMatrix => BDM, DenseVector => BDV}
import breeze.numerics.abs
import org.apache.spark.ml.linalg.Vectors
import org.scalatest.FunSuite

class ARDRBFKernelTest extends FunSuite {
  private val dataset = Array(Array(1d, 2d), Array(2d, 3d), Array(5d, 7d)).map(Vectors.dense)

  private def computationalDerivative(beta: BDV[Double], h: Double): BDM[Double] = {
    val left = new ARDRBFKernel(beta - h)
    val right = new ARDRBFKernel(beta + h)

    left.setTrainingVectors(dataset)
    right.setTrainingVectors(dataset)

    (right.trainingKernel() - left.trainingKernel()) / (2 * h)
  }

  test("being called after `setTrainingVector`," +
    " `derivative` should return the correct kernel matrix derivative") {
    val beta = BDV[Double](0.2, 0.3)
    val ard = new ARDRBFKernel(beta)
    ard.setTrainingVectors(dataset)

    val analytical = ard.trainingKernelAndDerivative()._2.reduce(_ + _)
    val computational = computationalDerivative(beta, 1e-3)

    assert(all(abs(analytical - computational) <:< 1e-3))
  }

} 
Example 5
Source File: RBFKernelTest.scala    From spark-gp   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.commons.kernel

import breeze.linalg.{DenseMatrix, DenseVector, all}
import breeze.numerics.abs
import org.apache.spark.ml.linalg.Vectors
import org.scalatest.FunSuite

class RBFKernelTest extends FunSuite {
  test("Calling `trainingKernel` before `setTrainingVectors` " +
    "yields `TrainingVectorsNotInitializedException") {
    val rbf = new RBFKernel()

    assertThrows[TrainingVectorsNotInitializedException] {
      rbf.trainingKernel()
    }
  }

  test("Calling `derivative` before `setTrainingVectors` " +
    "yields `TrainingVectorsNotInitializedException") {
    val rbf = new RBFKernel()

    assertThrows[TrainingVectorsNotInitializedException] {
      rbf.trainingKernelAndDerivative()
    }
  }

  private val dataset = Array(Array(1d, 2d), Array(2d, 3d), Array(5d, 7d)).map(Vectors.dense)

  test("being called after `setTrainingVector`," +
    " `trainingKernel` should return the correct kernel matrix") {
    val rbf = new RBFKernel(math.sqrt(0.2))
    rbf.setTrainingVectors(dataset)

    val correctKernelMatrix = DenseMatrix((1.000000e+00, 6.737947e-03, 3.053624e-45),
                                          (6.737947e-03, 1.000000e+00, 7.187782e-28),
                                          (3.053624e-45, 7.187782e-28, 1.000000e+00))

    assert(all(abs(rbf.trainingKernel() - correctKernelMatrix) <:< 1e-4))
  }

  private def computationalDerivative(sigma: Double, h: Double) = {
    val rbfLeft = new RBFKernel(sigma - h)
    val rbfRight = new RBFKernel(sigma + h)

    rbfLeft.setTrainingVectors(dataset)
    rbfRight.setTrainingVectors(dataset)

    (rbfRight.trainingKernel() - rbfLeft.trainingKernel()) / (2 * h)
  }

  test("being called after `setTrainingVector`," +
    " `derivative` should return the correct kernel matrix derivative") {
    val rbf = new RBFKernel(0.2)
    rbf.setTrainingVectors(dataset)

    val analytical = rbf.trainingKernelAndDerivative()._2(0)
    val computational = computationalDerivative(0.2, 1e-3)

    assert(all(abs(analytical - computational) <:< 1e-3))
  }

  test("crossKernel returns correct kernel") {
    val rbf = new RBFKernel(math.sqrt(0.2))
    rbf.setTrainingVectors(dataset.drop(1))
    val crossKernel = rbf.crossKernel(dataset.take(1))
    val correctCrossKernel = DenseMatrix((6.737947e-03, 3.053624e-45))
    assert(all(abs(crossKernel - correctCrossKernel) <:< 1e-4))
  }

  test("crossKernel returns correct kernel if called on a single vector") {
    val rbf = new RBFKernel(math.sqrt(0.2))
    rbf.setTrainingVectors(dataset.drop(1))
    val crossKernel = rbf.crossKernel(dataset(0))
    val correctCrossKernel = DenseVector(6.737947e-03, 3.053624e-45).t
    assert(all(abs(crossKernel - correctCrossKernel) <:< 1e-4))
  }
} 
Example 6
Source File: IntegratorTest.scala    From spark-gp   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.commons.util

import breeze.numerics.{abs, sigmoid, sqrt}
import breeze.stats.distributions.{Gaussian, RandBasis}
import org.apache.commons.math3.random.MersenneTwister
import org.scalatest.FunSuite


class IntegratorTest extends FunSuite {

  test("testExpectedOfFunctionOfNormal") {
    val f = (x: Double) => sigmoid(x)
    val integrator = new Integrator(100)
    val mean = 0.5
    val variance = 3
    val sd = sqrt(variance)

    val testResult = integrator.expectedOfFunctionOfNormal(mean, variance, f)

    val gg = new Gaussian(mean, sd)(new RandBasis(new MersenneTwister()))
    val mcIters = 100000
    val values = gg.sample(mcIters).map(f)
    val mcResult = values.sum / mcIters
    val mcSD = sqrt(values.map(_ - mcResult).map(x => x * x).sum / mcIters) / sqrt(mcIters)
    assert(abs(mcResult - testResult) < 3 * mcSD)
  }

} 
Example 7
Source File: normDist.scala    From DynaML   with Apache License 2.0 5 votes vote down vote up
package io.github.mandar2812.dynaml.algebra

import breeze.generic.UFunc
import breeze.linalg.sum
import breeze.numerics.{abs, pow}


object normDist extends UFunc {

  implicit object implDV extends Impl2[SparkVector, Double, Double] {
    def apply(a: SparkVector, p: Double) = {
      assert(p >= 1.0, "For an L_p norm to be computed p >= 1.0")
      math.pow(a._vector.values.map(x => math.pow(math.abs(x), p)).sum(), 1.0/p)
    }
  }
}

object normBDist extends UFunc {
  implicit object implBlockedDV extends Impl2[SparkBlockedVector, Double, Double] {
    def apply(a: SparkBlockedVector, p: Double) = {
      assert(p >= 1.0, "For an L_p norm to be computed p >= 1.0")
      math.pow(a._vector.values.map(x => sum(pow(abs(x), p))).sum(), 1.0/p)
    }
  }

  implicit object implPartitionedDV extends Impl2[PartitionedVector, Double, Double] {
    def apply(a: PartitionedVector, p: Double) = {
      assert(p >= 1.0, "For an L_p norm to be computed p >= 1.0")
      math.pow(a._data.map(_._2).map(x => sum(pow(abs(x), p))).sum, 1.0/p)
    }
  }


} 
Example 8
Source File: TensorLDAModelTest.scala    From spectrallda-tensorspark   with Apache License 2.0 5 votes vote down vote up
package edu.uci.eecs.spectralLDA.algorithm

import breeze.linalg.{DenseMatrix, DenseVector, SparseVector, norm}
import breeze.numerics.abs
import org.scalatest._
import org.apache.spark.SparkContext
import edu.uci.eecs.spectralLDA.testharness.Context

class TensorLDAModelTest extends FlatSpec with Matchers {

  private val sc: SparkContext = Context.getSparkContext

  "Multinomial log-likelihood" should "be correct" in {
    val p = DenseVector[Double](0.2, 0.5, 0.3)
    val x1 = DenseVector[Double](20, 50, 30)
    val x2 = DenseVector[Double](40, 40, 20)

    abs(TensorLDAModel.multinomialLogLikelihood(p, x1) - (-4.697546)) should be <= 1e-6
    abs(TensorLDAModel.multinomialLogLikelihood(p, x2) - (-15.42038)) should be <= 1e-6
  }
} 
Example 9
Source File: Norms.scala    From doddle-model   with Apache License 2.0 5 votes vote down vote up
package io.picnicml.doddlemodel.preprocessing

import breeze.linalg.{Axis, max, sum}
import breeze.numerics.{abs, pow, sqrt}
import io.picnicml.doddlemodel.data.{Features, RealVector}

object Norms {

  sealed trait Norm {
    def apply(x: Features): RealVector
  }

  final case object L1Norm extends Norm {
    override def apply(x: Features): RealVector = sum(abs(x), Axis._1)
  }

  final case object L2Norm extends Norm {
    override def apply(x: Features): RealVector = sqrt(sum(pow(x, 2), Axis._1))
  }

  final case object MaxNorm extends Norm {
    override def apply(x: Features): RealVector = max(abs(x), Axis._1)
  }
}