com.amazonaws.auth.AWSCredentialsProvider Scala Examples

The following examples show how to use com.amazonaws.auth.AWSCredentialsProvider. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: AmazonKinesis.scala    From aws-kinesis-scala   with Apache License 2.0 5 votes vote down vote up
package jp.co.bizreach.kinesis

import com.amazonaws.ClientConfiguration
import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.regions.Regions
import com.amazonaws.services.kinesis.{AmazonKinesisClientBuilder, AmazonKinesis => AWSKinesis}
import jp.co.bizreach.kinesis.action.PutRecordAction

object AmazonKinesis {
  def apply()(implicit region: Regions): AmazonKinesis = {
    new AmazonKinesis(AmazonKinesisClientBuilder.standard
      .withRegion(region)
      .build()) with PutRecordAction
  }
  def apply(credentials: AWSCredentialsProvider)(implicit region: Regions): AmazonKinesis = {
    new AmazonKinesis(AmazonKinesisClientBuilder.standard
      .withCredentials(credentials)
      .withRegion(region)
      .build()) with PutRecordAction
  }
  def apply(endpointConfiguration: EndpointConfiguration): AmazonKinesis = {
    new AmazonKinesis(AmazonKinesisClientBuilder.standard
      .withEndpointConfiguration(endpointConfiguration)
      .build()) with PutRecordAction
  }
  def apply(config: ClientConfiguration)(implicit region: Regions): AmazonKinesis = {
    new AmazonKinesis(AmazonKinesisClientBuilder.standard
      .withClientConfiguration(config)
      .withRegion(region)
      .build()) with PutRecordAction
  }
  def apply(config: ClientConfiguration, endpointConfiguration: EndpointConfiguration): AmazonKinesis = {
    new AmazonKinesis(AmazonKinesisClientBuilder.standard
      .withClientConfiguration(config)
      .withEndpointConfiguration(endpointConfiguration)
      .build()) with PutRecordAction
  }
  def apply(credentials: AWSCredentialsProvider, endpointConfiguration: EndpointConfiguration): AmazonKinesis = {
    new AmazonKinesis(AmazonKinesisClientBuilder.standard
      .withCredentials(credentials)
      .withEndpointConfiguration(endpointConfiguration)
      .build()) with PutRecordAction
  }
  def apply(credentials: AWSCredentialsProvider, config: ClientConfiguration)(implicit region: Regions): AmazonKinesis = {
    new AmazonKinesis(AmazonKinesisClientBuilder.standard
      .withCredentials(credentials)
      .withClientConfiguration(config)
      .withRegion(region)
      .build()) with PutRecordAction
  }
  def apply(credentials: AWSCredentialsProvider, config: ClientConfiguration, endpointConfiguration: EndpointConfiguration): AmazonKinesis = {
    new AmazonKinesis(AmazonKinesisClientBuilder.standard
      .withCredentials(credentials)
      .withClientConfiguration(config)
      .withEndpointConfiguration(endpointConfiguration)
      .build()) with PutRecordAction
  }
  def apply(client: AWSKinesis): AmazonKinesis = {
    new AmazonKinesis(client) with PutRecordAction
  }
}

class AmazonKinesis(client: AWSKinesis){
  self: PutRecordAction =>

  
  def putRecordsWithRetry(request: PutRecordsRequest): Seq[Either[PutRecordsResultEntry, PutRecordsResultEntry]] = {
    withPutsRetry(request.records){ entry =>
      client.putRecords(PutRecordsRequest(request.streamName, entry))
    }
  }

  def shutdown(): Unit = {
    client.shutdown()
  }
} 
Example 2
Source File: AwsConfig.scala    From cave   with MIT License 5 votes vote down vote up
package com.cave.metrics.data

import com.amazonaws.auth.{AWSCredentials, AWSCredentialsProvider, BasicAWSCredentials, ClasspathPropertiesFileCredentialsProvider}
import com.typesafe.config.Config

trait AmazonWebServiceConfig {
  def endpoint: String
  def service: String
  def region: String
}

class AwsConfig(config: Config) {

  private lazy val awsConfig = config.resolve.getConfig("aws")
  private lazy val rdsConfig = awsConfig.resolve.getConfig("rds")

  private lazy val awsCredentialsConfig = awsConfig.getConfig("credentials")
  lazy val awsCredentialsProvider = createAwsCredentialsProvider(
    awsCredentialsConfig.getString("access-key"),
    awsCredentialsConfig.getString("secret-key"))

  println("AWS Access Key: " + awsCredentialsProvider.getCredentials.getAWSAccessKeyId)

  private lazy val kinesisConfig = awsConfig.getConfig("kinesis")
  lazy val awsKinesisConfig = makeAmazonWebServiceConfig(kinesisConfig)

  private lazy val awsKinesisStreamConfig = kinesisConfig.getConfig("stream")
  lazy val rawStreamName = awsKinesisStreamConfig.getString("raw")
  lazy val processedStreamName = awsKinesisStreamConfig.getString("processed")

  private lazy val sqsConfig = awsConfig.getConfig("sqs")
  lazy val awsSQSConfig = makeAmazonWebServiceConfig(sqsConfig)

  lazy val longPollTimeInSeconds = sqsConfig.getInt("longPollTimeInSeconds")

  private lazy val awsSqsQueuesConfig = sqsConfig.getConfig("queues")
  lazy val configurationChangesQueueName = awsSqsQueuesConfig.getString("config-changes")
  lazy val alarmScheduleQueueName = awsSqsQueuesConfig.getString("alarm-schedule")

  private lazy val autoScalingConfig = awsConfig.getConfig("autoscaling")
  lazy val awsAutoScalingConfig = makeAmazonWebServiceConfig(autoScalingConfig)

  private lazy val ec2Config = awsConfig.getConfig("ec2")
  lazy val awsEC2Config = makeAmazonWebServiceConfig(ec2Config)

  private lazy val snsConfig = awsConfig.getConfig("sns")
  lazy val awsSNSConfig = makeAmazonWebServiceConfig(snsConfig)

  private lazy val awsSnsTopicsConfig = snsConfig.getConfig("topics")
  lazy val configurationChangesTopicName = awsSnsTopicsConfig.getString("config-changes")

  lazy val rdsJdbcDatabaseClass = rdsConfig.getString("database-class")
  lazy val rdsJdbcDatabaseUrl = rdsConfig.getString("database-jdbc")
  lazy val rdsJdbcDatabaseServer = rdsConfig.getString("database-server")
  lazy val rdsJdbcDatabasePort = rdsConfig.getString("database-port")
  lazy val rdsJdbcDatabaseName = rdsConfig.getString("database-name")
  lazy val rdsJdbcDatabaseUser = rdsConfig.getString("database-user")
  lazy val rdsJdbcDatabasePassword = rdsConfig.getString("database-password")
  lazy val rdsJdbcDatabasePoolSize = rdsConfig.getInt("pool-size")
  lazy val rdsJdbcConnectionTimeout = rdsConfig.getInt("connection-timeout")

  lazy val leadershipTermTimeoutSeconds = config.getInt("leadershipTermTimeoutSeconds")
  lazy val leadershipTermLengthSeconds = config.getInt("leadershipTermLengthSeconds")

  
  private[this] def makeAmazonWebServiceConfig(config: Config) = new AmazonWebServiceConfig {
      override def endpoint: String = config.getString("endpoint")
      override def service: String = config.getString("service")
      override def region: String = config.getString("region")
    }
} 
Example 3
Source File: S3ConfigManager.scala    From teamcity-s3-plugin   with Apache License 2.0 5 votes vote down vote up
package com.gu.teamcity

import java.io.{File, PrintWriter}

import com.amazonaws.auth.{BasicAWSCredentials, AWSCredentialsProvider, AWSCredentials}
import jetbrains.buildServer.serverSide.ServerPaths
import org.json4s._
import org.json4s.native.JsonMethods._
import org.json4s.native.Serialization
import org.json4s.native.Serialization._

case class S3Config(
  artifactBucket: Option[String], buildManifestBucket: Option[String], tagManifestBucket: Option[String],
  awsAccessKey: Option[String], awsSecretKey: Option[String]
)

class S3ConfigManager(paths: ServerPaths) extends AWSCredentialsProvider {
  implicit val formats = Serialization.formats(NoTypeHints)

  val configFile = new File(s"${paths.getConfigDir}/s3.json")

  private[teamcity] var config: Option[S3Config] = {
    if (configFile.exists()) {
      parse(configFile).extractOpt[S3Config]
    } else None
  }

  def artifactBucket: Option[String] = config.flatMap(_.artifactBucket)
  def buildManifestBucket: Option[String] = config.flatMap(_.buildManifestBucket)
  def tagManifestBucket: Option[String] = config.flatMap(_.tagManifestBucket)

  private[teamcity] def update(config: S3Config): Unit = {
    this.config = Some(if (config.awsSecretKey.isEmpty && config.awsAccessKey == this.config.flatMap(_.awsAccessKey)) {
      config.copy(awsSecretKey = this.config.flatMap(_.awsSecretKey))
    } else config)
  }

  def updateAndPersist(newConfig: S3Config): Unit = {
    synchronized {
      update(newConfig)
      val out = new PrintWriter(configFile, "UTF-8")
      try { writePretty(config, out) }
      finally { out.close }
    }
  }

  def details: Map[String, Option[String]] = Map(
    "artifactBucket" -> artifactBucket,
    "buildManifestBucket" -> buildManifestBucket,
    "tagManifestBucket" -> tagManifestBucket,
    "accessKey" -> config.flatMap(_.awsAccessKey)
  )

  override def getCredentials: AWSCredentials = (for {
    c <- config
    accessKey <- c.awsAccessKey
    secretKey <- c.awsSecretKey
  } yield new BasicAWSCredentials(accessKey, secretKey)).getOrElse(null) // Yes, this is sad

  override def refresh(): Unit = ()
}

object S3ConfigManager {
  val bucketElement = "bucket"
  val s3Element = "S3"
} 
Example 4
Source File: KinesisStreamConsumerConfig.scala    From gfc-aws-kinesis   with Apache License 2.0 5 votes vote down vote up
package com.gilt.gfc.aws.kinesis.akka

import com.amazonaws.auth.{AWSCredentialsProvider, DefaultAWSCredentialsProviderChain}
import com.amazonaws.services.dynamodbv2.streamsadapter.AmazonDynamoDBStreamsAdapterClient
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.{InitialPositionInStream, KinesisClientLibConfiguration}
import com.amazonaws.services.kinesis.metrics.impl.NullMetricsFactory
import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory
import com.gilt.gfc.aws.kinesis.client.KinesisClientEndpoints

import scala.concurrent.duration._

case class KinesisStreamConsumerConfig[T](
  streamName: String,
  applicationName: String,
  kinesisCredentialsProvider: AWSCredentialsProvider = new DefaultAWSCredentialsProviderChain(),
  dynamoCredentialsProvider: AWSCredentialsProvider = new DefaultAWSCredentialsProviderChain(),
  cloudWatchCredentialsProvider: AWSCredentialsProvider = new DefaultAWSCredentialsProviderChain(),
  metricsFactory: IMetricsFactory = new NullMetricsFactory(),
  checkPointInterval: Duration = 5.minutes,
  retryConfig: RetryConfig = RetryConfig(1.second, 1.second, 3),
  initialPositionInStream: InitialPositionInStream = InitialPositionInStream.LATEST,
  regionName: Option[String] = None,
  dynamoDBKinesisAdapterClient: Option[AmazonDynamoDBStreamsAdapterClient] = None,
  kinesisClientEndpoints: Option[KinesisClientEndpoints] = None,
  failoverTimeoutMillis: Long = KinesisClientLibConfiguration.DEFAULT_FAILOVER_TIME_MILLIS,
  maxRecordsPerBatch: Option[Int] = None,
  idleTimeBetweenReads: FiniteDuration = KinesisClientLibConfiguration.DEFAULT_IDLETIME_BETWEEN_READS_MILLIS.millis
) {

  
  def withCommonCredentialsProvider(credentialsProvider: AWSCredentialsProvider): KinesisStreamConsumerConfig[T] =
    this.copy(
      kinesisCredentialsProvider = credentialsProvider,
      dynamoCredentialsProvider = credentialsProvider,
      cloudWatchCredentialsProvider = credentialsProvider
    )
} 
Example 5
Source File: KCLConfiguration.scala    From gfc-aws-kinesis   with Apache License 2.0 5 votes vote down vote up
package com.gilt.gfc.aws.kinesis.client

import java.util.UUID
import scala.concurrent.duration._

import com.amazonaws.auth.{AWSCredentialsProvider, DefaultAWSCredentialsProviderChain}
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.{InitialPositionInStream, KinesisClientLibConfiguration}


  def apply(applicationName: String
            , streamName: String
            , kinesisCredentialsProvider: AWSCredentialsProvider = new DefaultAWSCredentialsProviderChain()
            , dynamoCredentialsProvider: AWSCredentialsProvider = new DefaultAWSCredentialsProviderChain()
            , cloudWatchCredentialsProvider: AWSCredentialsProvider = new DefaultAWSCredentialsProviderChain()
            , regionName: Option[String] = None
            , initialPositionInStream: InitialPositionInStream = InitialPositionInStream.LATEST
            , endpointConfiguration: Option[KinesisClientEndpoints] = None
            , failoverTimeoutMillis: Long = KinesisClientLibConfiguration.DEFAULT_FAILOVER_TIME_MILLIS
            , maxRecordsPerBatch: Int = KinesisClientLibConfiguration.DEFAULT_MAX_RECORDS
            , idleTimeBetweenReads: FiniteDuration = KinesisClientLibConfiguration.DEFAULT_IDLETIME_BETWEEN_READS_MILLIS.millis): KinesisClientLibConfiguration = {

    val dynamoTableName = (s"${applicationName}.${streamName}")
      .replaceAll("[^a-zA-Z0-9_.-]", "-")

    val conf = new KinesisClientLibConfiguration(
      dynamoTableName,
      streamName,
      kinesisCredentialsProvider,
      dynamoCredentialsProvider,
      cloudWatchCredentialsProvider,
      s"${HostName}:${UUID.randomUUID()}"
    ).withRegionName(regionName.orNull)
     .withInitialPositionInStream(initialPositionInStream)
     .withFailoverTimeMillis(failoverTimeoutMillis)
     .withMaxRecords(maxRecordsPerBatch)
     .withIdleTimeBetweenReadsInMillis(idleTimeBetweenReads.toMillis)

    endpointConfiguration.fold(conf)( endpoints =>
      conf.withDynamoDBEndpoint(endpoints.dynamoDBEndpoint)
          .withKinesisEndpoint(endpoints.kinesisEndpoint)
    )

  }
} 
Example 6
Source File: AWSSigningJestClientFactory.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.commons.clients.es

import java.time.{LocalDateTime, ZoneId}

import com.expedia.www.haystack.trace.commons.config.entities.AWSRequestSigningConfiguration
import com.google.common.base.Supplier
import io.searchbox.client.JestClientFactory
import org.apache.http.impl.client.HttpClientBuilder
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder
import org.slf4j.LoggerFactory
import vc.inreach.aws.request.{AWSSigner, AWSSigningRequestInterceptor}
import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.auth.BasicAWSCredentials
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain
import com.amazonaws.internal.StaticCredentialsProvider



class AWSSigningJestClientFactory(awsRequestSigningConfig: AWSRequestSigningConfiguration) extends JestClientFactory {
  private val LOGGER = LoggerFactory.getLogger(classOf[AWSSigningJestClientFactory])

  val awsSigner = new AWSSigner(getCredentialProvider, awsRequestSigningConfig.region, awsRequestSigningConfig.awsServiceName, new ClockSupplier)
  val requestInterceptor = new AWSSigningRequestInterceptor(awsSigner)

  override def configureHttpClient(builder: HttpClientBuilder): HttpClientBuilder = {
    builder.addInterceptorLast(requestInterceptor)
  }

  override def configureHttpClient(builder: HttpAsyncClientBuilder): HttpAsyncClientBuilder = {
    builder.addInterceptorLast(requestInterceptor)
  }

  def getCredentialProvider: AWSCredentialsProvider = {
    if (awsRequestSigningConfig.accessKey.isDefined) {
      LOGGER.info("using static aws credential provider with access and secret key for ES")
      new StaticCredentialsProvider(
        new BasicAWSCredentials(awsRequestSigningConfig.accessKey.get, awsRequestSigningConfig.secretKey.get))
    } else {
      LOGGER.info("using default credential provider chain for ES")
      new DefaultAWSCredentialsProviderChain
    }
  }
}

class ClockSupplier extends Supplier[LocalDateTime] {
  override def get(): LocalDateTime = {
    LocalDateTime.now(ZoneId.of("UTC"))
  }
} 
Example 7
Source File: AuthUtilSpec.scala    From shield   with MIT License 5 votes vote down vote up
package shield.implicits

import com.amazonaws.auth.{AWSCredentials, AWSCredentialsProvider, AWSCredentialsProviderChain}
import org.specs2.mutable.Specification
import shield.aws.{AWSSigningConfig, AuthUtil}
import spray.http._


class AuthUtilSpec extends Specification {
  //Set consistant times that will produce consistant results for the tests
  val d1 = "20160315T141234Z"
  val d2 = "20160315"

  //Create a new config, these values are typically found in application.conf
  val config = new AWSSigningConfig("example-elasticsearch-host", "us-west-1", "es", true, new AWSCredentialsProviderChain(new StaticCredentialProvider()))

  "AuthUtil" should {

    "Use SHA256" in {
      println(AuthUtil.hashAsString("Hello world!"))
      AuthUtil.hashAsString("Hello world!") must be equalTo "c0535e4be2b79ffd93291305436bf889314e4a3faec05ecffcbb7df31ad9e51a"
      AuthUtil.hashAsString("123$%^abcDEF") must be equalTo "3b43642576e2c2cf349f34ff7f10e700bf485e6982647a50e361e883a5aaafa2"
      AuthUtil.hashAsString("  _***~`  ") must be equalTo "0597e54e8278a8673f09842d03e4af3a2688d1a15a55a640968382a5311416b4"
    }

    "Create canonical request hash" in {
      val request = new HttpRequest(HttpMethods.GET, Uri("https://example-elasticsearch-host.com:80"), List(), HttpEntity(HttpData("Sample data for a sample request ~*)@#$) @#(((")))

      println(AuthUtil.createCanonicalHash(request, "example-elasticsearch-host"))
      AuthUtil.createCanonicalHash(request, "example-elasticsearch-host") must be equalTo "05ef99e67afa47f06ed12084460baa4fca0bfbf92faebabed00fa78796028c5d"
    }

    "Create string to sign from a given canonical request" in {
      val canonicalRequestHash = "05ef99e67afa47f06ed12084460baa4fca0bfbf92faebabed00fa78796028c5d"

      AuthUtil.createStringToSign(d1, d2, config.region, config.service, canonicalRequestHash) must be equalTo "AWS4-HMAC-SHA256\n20160315\n20160315T141234Z/us-west-1/es/aws4_request\n05ef99e67afa47f06ed12084460baa4fca0bfbf92faebabed00fa78796028c5d"
    }

    "Create a signature" in {
      val stringToSign = "AWS4-HMAC-SHA256\n20160315\n20160315T141234Z/us-west-1/es/aws4_request\n05ef99e67afa47f06ed12084460baa4fca0bfbf92faebabed00fa78796028c5d"
      val signature = AuthUtil.hmacSHA256AsString("AWS4-HMAC-SHA256\n20160315\n20160315T141234Z/us-west-1/es/aws4_request\n05ef99e67afa47f06ed12084460baa4fca0bfbf92faebabed00fa78796028c5d", AuthUtil.createSignatureKey(config.getSecretKey(), d1, config.region, config.service))

      signature must be equalTo "68e811337b35141320236cf585a7fefad71d8948e4d1e9d5eb3583474d31eb6a"
    }
  }
}

//Create a static credential provider so that the access key and secret key stay the same for the purposes of testing
class StaticCredentialProvider extends AWSCredentialsProvider {
  override def refresh(): Unit = { }

  override def getCredentials: AWSCredentials = new AWSCredentials {
    override def getAWSAccessKeyId: String = "AccessKeyId"

    override def getAWSSecretKey: String = "SuperSecretKey"
  }
} 
Example 8
Source File: DynaliteContainer.scala    From testcontainers-scala   with MIT License 5 votes vote down vote up
package com.dimafeng.testcontainers

import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.client.builder.AwsClientBuilder
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB
import org.testcontainers.dynamodb.{DynaliteContainer => JavaDynaliteContainer}

case class DynaliteContainer(
  dockerImageName: String = DynaliteContainer.defaultDockerImageName
) extends SingleContainer[JavaDynaliteContainer] {

  override val container: JavaDynaliteContainer = {
    val c = new JavaDynaliteContainer(dockerImageName)
    c.withExposedPorts(4567)
    c
  }

  def client: AmazonDynamoDB = container.getClient

  def endpointConfiguration: AwsClientBuilder.EndpointConfiguration = container.getEndpointConfiguration

  def credentials: AWSCredentialsProvider = container.getCredentials
}

object DynaliteContainer {

  val defaultDockerImageName = "quay.io/testcontainers/dynalite:v1.2.1-1"

  case class Def(
    dockerImageName: String = DynaliteContainer.defaultDockerImageName
  ) extends ContainerDef {

    override type Container = DynaliteContainer

    override def createContainer(): DynaliteContainer = {
      new DynaliteContainer(
        dockerImageName
      )
    }
  }
} 
Example 9
Source File: LocalStackContainer.scala    From testcontainers-scala   with MIT License 5 votes vote down vote up
package com.dimafeng.testcontainers

import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.client.builder.AwsClientBuilder
import org.testcontainers.containers.localstack.{LocalStackContainer => JavaLocalStackContainer}

case class LocalStackContainer(
  tag: String = LocalStackContainer.defaultTag,
  services: Seq[LocalStackContainer.Service] = Seq.empty
) extends SingleContainer[JavaLocalStackContainer] {

  override val container: JavaLocalStackContainer = {
    val c = new JavaLocalStackContainer(tag)
    c.withServices(services: _*)
    c
  }

  def endpointConfiguration(service: LocalStackContainer.Service): AwsClientBuilder.EndpointConfiguration =
    container.getEndpointConfiguration(service)

  def defaultCredentialsProvider: AWSCredentialsProvider = container.getDefaultCredentialsProvider
}

object LocalStackContainer {

  val defaultTag = "0.9.4"

  type Service = JavaLocalStackContainer.Service

  case class Def(
    tag: String = LocalStackContainer.defaultTag,
    services: Seq[LocalStackContainer.Service] = Seq.empty
  ) extends ContainerDef {

    override type Container = LocalStackContainer

    override def createContainer(): LocalStackContainer = {
      new LocalStackContainer(
        tag,
        services
      )
    }
  }
} 
Example 10
Source File: ElasticLoadBalancing.scala    From ionroller   with MIT License 5 votes vote down vote up
package ionroller.aws

import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.services.elasticloadbalancing.AmazonElasticLoadBalancingClient
import com.amazonaws.services.elasticloadbalancing.model._

import scala.collection.JavaConverters._
import scalaz.Kleisli
import scalaz.concurrent.Task

object ElasticLoadBalancing {
  def client(credentialsProvider: AWSCredentialsProvider) = Task(new AmazonElasticLoadBalancingClient(credentialsProvider))(awsExecutorService)

  def getLoadBalancer(lb: String): Kleisli[Task, AWSClientCache, Option[LoadBalancerDescription]] = {
    Kleisli { cache =>
      val req = new DescribeLoadBalancersRequest().withLoadBalancerNames(lb)
      Task(cache.elb.describeLoadBalancers(req).getLoadBalancerDescriptions.asScala.headOption)(awsExecutorService)
    }
  }

  def registerInstances(lb: String, instances: Seq[String]): Kleisli[Task, AWSClientCache, Seq[String]] = {
    Kleisli { cache =>
      val req = new RegisterInstancesWithLoadBalancerRequest().withLoadBalancerName(lb).withInstances(instances.map(new Instance(_)): _*)
      Task(cache.elb.registerInstancesWithLoadBalancer(req).getInstances.asScala.map(_.getInstanceId))(awsExecutorService)
    }
  }

  def deregisterInstances(lb: String, instances: Seq[String]): Kleisli[Task, AWSClientCache, Seq[String]] = {
    Kleisli { cache =>
      val req = new DeregisterInstancesFromLoadBalancerRequest().withLoadBalancerName(lb).withInstances(instances.map(new Instance(_)): _*)
      Task(cache.elb.deregisterInstancesFromLoadBalancer(req).getInstances.asScala.map(_.getInstanceId))(awsExecutorService)
    }
  }

  def describeInstanceHealth(lb: String): Kleisli[Task, AWSClientCache, Seq[InstanceState]] = {
    Kleisli { cache =>
      Task {
        val req = new DescribeInstanceHealthRequest().withLoadBalancerName(lb)
        cache.elb.describeInstanceHealth(req).getInstanceStates.asScala.toSeq
      }(awsExecutorService)
    }
  }
} 
Example 11
Source File: AutoScaling.scala    From ionroller   with MIT License 5 votes vote down vote up
package ionroller.aws

import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.services.autoscaling.model._
import com.amazonaws.services.autoscaling.{AmazonAutoScaling, AmazonAutoScalingClient}
import com.amazonaws.services.elasticloadbalancing.model.InstanceState
import com.typesafe.scalalogging.StrictLogging

import scala.collection.JavaConverters._
import scala.concurrent.duration.FiniteDuration
import scalaz._
import scalaz.concurrent.Task

object AutoScaling extends StrictLogging {
  val client: Kleisli[Task, AWSCredentialsProvider, AmazonAutoScaling] = {
    Kleisli { credentialsProvider =>
      Task(new AmazonAutoScalingClient(credentialsProvider))(awsExecutorService)
    }
  }

  def getAutoScalingGroupDetails(asgs: Seq[String]): Kleisli[Task, AWSClientCache, List[AutoScalingGroup]] = {
    Kleisli { client =>

      def go(asgsSoFar: List[AutoScalingGroup], token: Option[String]): Task[List[AutoScalingGroup]] = Task.delay {
        val req =
          if (asgs.isEmpty)
            new DescribeAutoScalingGroupsRequest()
          else
            new DescribeAutoScalingGroupsRequest().withAutoScalingGroupNames(asgs: _*)

        token foreach { t => req.setNextToken(t) }
        val response = client.asg.describeAutoScalingGroups(req)
        (asgsSoFar ::: response.getAutoScalingGroups.asScala.toList, Option(response.getNextToken))
      } flatMap {
        case (events, t @ Some(newToken)) if t != token =>
          logger.debug(s"Needed multiple getAutoScalingGroups calls, token=$token newToken=$t")
          go(events, t)
        case (events, _) =>
          Task.now(events)
      }

      Task.fork(go(List.empty, None))(awsExecutorService)
    }
  }

  def getUnregisteredInstance(elbInstances: Seq[InstanceState], asg: AutoScalingGroup): Option[String] = {
    val lbInstances = elbInstances.map(_.getInstanceId).toSet
    val asgInstances = asg.getInstances.asScala
    val healthyAsgInstances = asgInstances.filter(_.getHealthStatus == "Healthy").map(_.getInstanceId).toSet

    (healthyAsgInstances -- lbInstances).toSeq.sorted.headOption
  }

  def attachElb(asg: AutoScalingGroup, lb: String): Kleisli[Task, AWSClientCache, AttachLoadBalancersResult] = {
    Kleisli { cache =>
      val attachRequest = new AttachLoadBalancersRequest().withAutoScalingGroupName(asg.getAutoScalingGroupName).withLoadBalancerNames(lb)
      Task(cache.asg.attachLoadBalancers(attachRequest))(awsExecutorService)
    }
  }

  def detachElb(asg: AutoScalingGroup, lb: String): Kleisli[Task, AWSClientCache, DetachLoadBalancersResult] = {
    Kleisli { cache =>
      val detachRequest = new DetachLoadBalancersRequest().withAutoScalingGroupName(asg.getAutoScalingGroupName).withLoadBalancerNames(lb)
      Task(cache.asg.detachLoadBalancers(detachRequest))(awsExecutorService)
    }
  }

  def updateElbHealthCheck(asgs: Seq[String], healthCheckType: String, gracePeriod: FiniteDuration): Kleisli[Task, AWSClientCache, Unit] = {
    Kleisli { cache =>

      for {
        _ <- Task.gatherUnordered(
          asgs map { name =>
            Task(
              cache.asg.updateAutoScalingGroup(
                new UpdateAutoScalingGroupRequest()
                  .withAutoScalingGroupName(name)
                  .withHealthCheckType(healthCheckType)
                  .withHealthCheckGracePeriod(gracePeriod.toSeconds.toInt)
              )
            )(awsExecutorService)
          }
        )
      } yield ()
    }
  }
} 
Example 12
Source File: AWSClientCache.scala    From ionroller   with MIT License 5 votes vote down vote up
package ionroller.aws

import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.services.autoscaling.AmazonAutoScaling
import com.amazonaws.services.ec2.AmazonEC2Client
import com.amazonaws.services.elasticbeanstalk.AWSElasticBeanstalk
import com.amazonaws.services.elasticloadbalancing.AmazonElasticLoadBalancing
import com.amazonaws.services.route53.AmazonRoute53
import com.amazonaws.services.s3.AmazonS3

import scalaz.concurrent.Task
import scalaz.{Kleisli, Nondeterminism}

class AWSClientCache(
  val role: String,
  val credentialsProvider: AWSCredentialsProvider,
  val route53: AmazonRoute53,
  val elasticBeanstalk: AWSElasticBeanstalk,
  val s3: AmazonS3,
  val asg: AmazonAutoScaling,
  val elb: AmazonElasticLoadBalancing
)

object AWSClientCache {
  private[ionroller] val cache: java.util.concurrent.ConcurrentHashMap[String, AWSClientCache] = new java.util.concurrent.ConcurrentHashMap

  val getCache: Kleisli[Task, String, AWSClientCache] = {
    Kleisli { role =>
      Option(cache.get(role)) match {
        case None =>
          for {
            credentials <- CredentialsProvider(role)
            route53Client = Route53.client(credentials)
            elasticBeanstalkClient = ElasticBeanstalk.client(credentials)
            s3Client = S3.client(credentials)
            asgClient = AutoScaling.client(credentials)
            elbClient = ElasticLoadBalancing.client(credentials)
            newItem <- Nondeterminism[Task].apply5(route53Client, elasticBeanstalkClient, s3Client, asgClient, elbClient) {
              case (r53, eb, s3, asg, elb) =>
                val newEntry = new AWSClientCache(role, credentials, r53, eb, s3, asg, elb)
                cache.put(role, newEntry)
                newEntry
            }
          } yield newItem

        case Some(e) => Task.now(e)
      }
    }
  }
} 
Example 13
Source File: AmazonKinesisFirehose.scala    From aws-kinesis-scala   with Apache License 2.0 5 votes vote down vote up
package jp.co.bizreach.kinesisfirehose

import com.amazonaws.ClientConfiguration
import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.regions.Regions
import com.amazonaws.services.kinesisfirehose.{AmazonKinesisFirehose => AWSKinesisFirehose,
  AmazonKinesisFirehoseClientBuilder}
import jp.co.bizreach.kinesisfirehose.action.PutRecordAction

object AmazonKinesisFirehose {
  def apply()(implicit region: Regions): AmazonKinesisFirehose = {
    new AmazonKinesisFirehose(AmazonKinesisFirehoseClientBuilder.standard
      .withRegion(region)
      .build()) with PutRecordAction
  }
  def apply(credentials: AWSCredentialsProvider)(implicit region: Regions): AmazonKinesisFirehose = {
    new AmazonKinesisFirehose(AmazonKinesisFirehoseClientBuilder.standard
      .withCredentials(credentials)
      .withRegion(region)
      .build()) with PutRecordAction
  }
  def apply(config: ClientConfiguration)(implicit region: Regions): AmazonKinesisFirehose = {
    new AmazonKinesisFirehose(AmazonKinesisFirehoseClientBuilder.standard
      .withClientConfiguration(config)
      .withRegion(region)
      .build()) with PutRecordAction
  }
  def apply(credentials: AWSCredentialsProvider, config: ClientConfiguration)(implicit region: Regions): AmazonKinesisFirehose = {
    new AmazonKinesisFirehose(AmazonKinesisFirehoseClientBuilder.standard
      .withCredentials(credentials)
      .withClientConfiguration(config)
      .withRegion(region)
      .build()) with PutRecordAction
  }
  def apply(client: AWSKinesisFirehose): AmazonKinesisFirehose = {
    new AmazonKinesisFirehose(client) with PutRecordAction
  }
}

class AmazonKinesisFirehose(client: AWSKinesisFirehose) {
  self: PutRecordAction =>

  
  def putRecordBatchWithRetry(request: PutRecordBatchRequest): Seq[Either[PutRecordBatchResponseEntry, PutRecordBatchResponseEntry]] = {
    withPutBatchRetry(request.records){ entry =>
      client.putRecordBatch(PutRecordBatchRequest(request.deliveryStreamName, entry))
    }
  }

  def shutdown(): Unit = {
    client.shutdown()
  }

} 
Example 14
Source File: AmazonKinesisAsync.scala    From aws-kinesis-scala   with Apache License 2.0 5 votes vote down vote up
package jp.co.bizreach.kinesis

import com.amazonaws.ClientConfiguration
import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.handlers.AsyncHandler
import com.amazonaws.regions.Regions
import com.amazonaws.services.kinesis.{AmazonKinesisAsync => AWSKinesisAsync, AmazonKinesisAsyncClientBuilder}
import com.amazonaws.services.kinesis.model.{
  PutRecordRequest => AWSPutRecordRequest,
  PutRecordResult => AWSPutRecordResult,
  PutRecordsRequest => AWSPutRecordsRequest,
  PutRecordsResult => AWSPutRecordsResult}
import jp.co.bizreach.kinesis.action.PutRecordAction

import scala.concurrent._

object AmazonKinesisAsync {
  def apply()(implicit region: Regions): AmazonKinesisAsync = {
    new AmazonKinesisAsync(AmazonKinesisAsyncClientBuilder.standard
      .withRegion(region)
      .build()) with PutRecordAction
  }
  def apply(credentials: AWSCredentialsProvider)(implicit region: Regions): AmazonKinesisAsync = {
    new AmazonKinesisAsync(AmazonKinesisAsyncClientBuilder.standard
      .withCredentials(credentials)
      .withRegion(region)
      .build()) with PutRecordAction
  }
  def apply(config: ClientConfiguration)(implicit region: Regions): AmazonKinesisAsync = {
    new AmazonKinesisAsync(AmazonKinesisAsyncClientBuilder.standard
      .withClientConfiguration(config)
      .withRegion(region)
      .build()) with PutRecordAction
  }
  def apply(credentials: AWSCredentialsProvider, config: ClientConfiguration)(implicit region: Regions): AmazonKinesisAsync = {
    new AmazonKinesisAsync(AmazonKinesisAsyncClientBuilder.standard
      .withCredentials(credentials)
      .withClientConfiguration(config)
      .withRegion(region)
      .build()) with PutRecordAction
  }
  def apply(client: AWSKinesisAsync): AmazonKinesisAsync = {
    new AmazonKinesisAsync(client) with PutRecordAction
  }
}

class AmazonKinesisAsync(client: AWSKinesisAsync) {
  self: PutRecordAction =>

  
  def putRecordsAsyncWithRetry(request: PutRecordsRequest)
                              (implicit ec: ExecutionContext): Future[Seq[Either[PutRecordsResultEntry, PutRecordsResultEntry]]] = {
    withPutsAsyncRetry(request.records){ entry =>
      val p = Promise[PutRecordsResult]
      client.putRecordsAsync(PutRecordsRequest(request.streamName, entry), new AsyncHandler[AWSPutRecordsRequest, AWSPutRecordsResult]{
        override def onError(e: Exception): Unit = p.failure(e)
        override def onSuccess(req: AWSPutRecordsRequest, res: AWSPutRecordsResult): Unit = p.success(res)
      })
      p.future
    }
  }

  def shutdown(): Unit = {
    client.shutdown()
  }
} 
Example 15
Source File: BufferedAmazonKinesis.scala    From aws-kinesis-scala   with Apache License 2.0 5 votes vote down vote up
package jp.co.bizreach.kinesis

import java.util.concurrent.{TimeUnit, Executors}

import com.amazonaws.ClientConfiguration
import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.regions.Regions

object BufferedAmazonKinesis {
  def apply(amount: Int, interval: Long)(implicit region: Regions): BufferedAmazonKinesis = {
    new BufferedAmazonKinesis(AmazonKinesis(), amount, interval)
  }
  def apply(credentials: AWSCredentialsProvider, amount: Int, interval: Long)(implicit region: Regions): BufferedAmazonKinesis = {
    new BufferedAmazonKinesis(AmazonKinesis(credentials), amount, interval)
  }
  def apply(config: ClientConfiguration, amount: Int, interval: Long)(implicit region: Regions): BufferedAmazonKinesis = {
    new BufferedAmazonKinesis(AmazonKinesis(config), amount, interval)
  }
  def apply(credentials: AWSCredentialsProvider, config: ClientConfiguration, amount: Int, interval: Long)(implicit region: Regions): BufferedAmazonKinesis = {
    new BufferedAmazonKinesis(AmazonKinesis(credentials, config), amount, interval)
  }
  def apply(client: AmazonKinesis, amount: Int, interval: Long): BufferedAmazonKinesis = {
    new BufferedAmazonKinesis(client, amount, interval)
  }
}

// TODO Would like to provide DiskBufferClient also
class BufferedAmazonKinesis(client: AmazonKinesis, amount: Int, interval: Long) {

  private val queue = new java.util.concurrent.ConcurrentLinkedQueue[Any]

  private val scheduler = Executors.newSingleThreadScheduledExecutor()
  scheduler.scheduleAtFixedRate(new BufferedKinesisSendTask(), 0, interval, TimeUnit.MILLISECONDS)

  def putRecord(request: PutRecordRequest): Unit = queue.add(request)

  def putRecords(request: PutRecordsRequest): Unit = queue.add(request)

  def shutdown(): Unit = {
    scheduler.shutdownNow()
    client.shutdown()
  }

  
  def error(e: Exception): Unit = {
    e.printStackTrace()
  }

  private class BufferedKinesisSendTask extends Runnable {

    override def run(): Unit = {
      try {
        val requests = for(i <- 1 to amount if queue.size() != 0) yield queue.poll()
        requests.foreach {
          case r: PutRecordRequest  => client.putRecord(r)
          case r: PutRecordsRequest => client.putRecords(r)
        }
      } catch {
        case e: Exception => error(e)
      }
    }
  }

} 
Example 16
Source File: Credentials.scala    From spark-select   with Apache License 2.0 5 votes vote down vote up
package io.minio.spark.select

import java.net.URI

// For BasicAWSCredentials
import com.amazonaws.auth.AWSCredentials
import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.auth.BasicAWSCredentials
import com.amazonaws.auth.BasicSessionCredentials
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain

import org.apache.hadoop.conf.Configuration

private[spark] object Credentials {
  private def staticCredentialsProvider(credentials: AWSCredentials): AWSCredentialsProvider = {
    new AWSCredentialsProvider {
      override def getCredentials: AWSCredentials = credentials
      override def refresh(): Unit = {}
    }
  }

  def load(location: Option[String], hadoopConfiguration: Configuration): AWSCredentialsProvider = {
    val uri = new URI(location.getOrElse(""))
    val uriScheme = uri.getScheme

    uriScheme match {
      case "s3" | "s3a" =>
        // This matches what S3A does, with one exception: we don't
        // support anonymous credentials. First, try to parse from URI:
        Option(uri.getUserInfo).flatMap { userInfo =>
          if (userInfo.contains(":")) {
            val Array(accessKey, secretKey) = userInfo.split(":")
            Some(staticCredentialsProvider(new BasicAWSCredentials(accessKey, secretKey)))
          } else {
            None
          }
        }.orElse {
          val accessKey = hadoopConfiguration.get(s"fs.s3a.access.key", null)
          val secretKey = hadoopConfiguration.get(s"fs.s3a.secret.key", null)
          val sessionToken = hadoopConfiguration.get(s"fs.s3a.session.token", null)
          if (accessKey != null && secretKey != null) {
            if (sessionToken != null) {
              Some(staticCredentialsProvider(new BasicSessionCredentials(accessKey, secretKey, sessionToken)))
            } else {
              Some(staticCredentialsProvider(new BasicAWSCredentials(accessKey, secretKey)))
            }
          } else {
            None
          }
        }.getOrElse {
          // Finally, fall back on the instance profile provider
          new DefaultAWSCredentialsProviderChain()
        }
      case other =>
        throw new IllegalArgumentException(s"Unrecognized scheme $other; expected s3, or s3a")
    }
  }
} 
Example 17
Source File: SqsClientSettingsSpec.scala    From akka-stream-sqs   with Apache License 2.0 5 votes vote down vote up
package me.snov.akka.sqs.client

import com.amazonaws.ClientConfiguration
import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.typesafe.config.ConfigFactory
import org.scalatest.{FlatSpec, Matchers}
import org.scalatest.mockito.MockitoSugar._

class SqsClientSettingsSpec extends FlatSpec with Matchers {

  it should "parse configuration" in {
    val conf = ConfigFactory.parseString(
      """
        reactive-sqs {
          endpoint = "http://localhost:9324/"
          region = "eu-west-1"
          queue-url = "http://localhost:9324/queue/queue1"
          max-number-of-messages = 10
          visibility-timeout = 60
          wait-time-seconds = 5
        }
      """)
      .getConfig("reactive-sqs")

    val settings = SqsSettings(
      conf,
      Some(mock[AWSCredentialsProvider]),
      Some(mock[ClientConfiguration])
    )

    settings.endpoint.get.getServiceEndpoint shouldBe "http://localhost:9324/"
    settings.endpoint.get.getSigningRegion shouldBe "eu-west-1"
    settings.queueUrl shouldBe "http://localhost:9324/queue/queue1"
    settings.maxNumberOfMessages shouldBe 10
    settings.visibilityTimeout shouldBe Some(60)
    settings.waitTimeSeconds shouldBe 5
  }

  it should "support optional parameters" in {
    val conf = ConfigFactory.parseString(
      """
        reactive-sqs {
          queue-url = "http://localhost:9324/queue/queue1"
          wait-time-seconds = 5
        }
      """)
      .getConfig("reactive-sqs")

    val settings = SqsSettings(
      conf,
      Some(mock[AWSCredentialsProvider]),
      Some(mock[ClientConfiguration])
    )

    settings.endpoint shouldBe None
    settings.queueUrl shouldBe "http://localhost:9324/queue/queue1"
    settings.maxNumberOfMessages shouldBe 10
    settings.visibilityTimeout shouldBe None
    settings.waitTimeSeconds shouldBe 5
  }
} 
Example 18
Source File: SqsClientSpec.scala    From akka-stream-sqs   with Apache License 2.0 5 votes vote down vote up
package me.snov.akka.sqs.client

import com.amazonaws.ClientConfiguration
import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.services.sqs.{AmazonSQS, AmazonSQSAsync}
import com.amazonaws.services.sqs.model.{ReceiveMessageRequest, ReceiveMessageResult}
import org.scalatest.mockito.MockitoSugar.mock
import org.scalatest.{FlatSpec, Matchers}
import org.mockito.Mockito._
import org.mockito.ArgumentMatchers._

class SqsClientSpec extends FlatSpec with Matchers {

  it should "call AWS client" in {

    val awsClient = mock[AmazonSQSAsync]

    val sqsClientSettings = SqsSettings(
      awsCredentialsProvider = Some(mock[AWSCredentialsProvider]),
      awsClientConfiguration = Some(mock[ClientConfiguration]),
      awsClient = Some(awsClient),
      queueUrl = ""
    )
    val sqsClient = SqsClient(sqsClientSettings)
    val receiveMessageResult = mock[ReceiveMessageResult]

    when(awsClient.receiveMessage(any[ReceiveMessageRequest])).thenReturn(receiveMessageResult)

    sqsClient.receiveMessage()

    verify(receiveMessageResult).getMessages
  }

  it should "pass parameters with ReceiveMessageRequest" in {

    val awsClient = mock[AmazonSQSAsync]

    val sqsClientSettings = SqsSettings(
      queueUrl = "",
      maxNumberOfMessages = 9,
      waitTimeSeconds = 7,
      awsCredentialsProvider = Some(mock[AWSCredentialsProvider]),
      awsClientConfiguration = Some(mock[ClientConfiguration]),
      awsClient = Some(awsClient),
      visibilityTimeout = Some(75)
    )
    val sqsClient = SqsClient(sqsClientSettings)
    val receiveMessageResult = mock[ReceiveMessageResult]

    val receiveMessageRequest = new ReceiveMessageRequest()
        .withQueueUrl("")
        .withMaxNumberOfMessages(9)
        .withVisibilityTimeout(75)
        .withWaitTimeSeconds(7)

    when(awsClient.receiveMessage(receiveMessageRequest)).thenReturn(receiveMessageResult)

    sqsClient.receiveMessage()

    verify(receiveMessageResult).getMessages
  }
} 
Example 19
Source File: SqsSettings.scala    From akka-stream-sqs   with Apache License 2.0 5 votes vote down vote up
package me.snov.akka.sqs.client

import akka.actor.ActorSystem
import com.amazonaws.ClientConfiguration
import com.amazonaws.auth.{AWSCredentialsProvider, DefaultAWSCredentialsProviderChain}
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.sqs.AmazonSQSAsync
import com.typesafe.config.Config
import collection.JavaConverters._

object SqsSettings {
  private val defaultAWSCredentialsProvider = new DefaultAWSCredentialsProviderChain()
  private val defaultAWSClientConfiguration = new ClientConfiguration()
  private val defaultMaxNumberOfMessages = 10
  private val defaultWaitTimeSeconds = 10
  private val configurationRoot = "akka-stream-sqs"

  def apply(
             queueUrl: String,
             maxNumberOfMessages: Int = defaultMaxNumberOfMessages,
             waitTimeSeconds: Int = defaultWaitTimeSeconds,
             awsCredentialsProvider: Option[AWSCredentialsProvider] = None,
             awsClientConfiguration: Option[ClientConfiguration] = None,
             awsClient: Option[AmazonSQSAsync] = None,
             endpoint: Option[EndpointConfiguration] = None,
             visibilityTimeout: Option[Int] = None,
             messageAttributes: Seq[String] = List()
           ): SqsSettings =
    new SqsSettings(
      queueUrl = queueUrl,
      maxNumberOfMessages = maxNumberOfMessages,
      waitTimeSeconds = waitTimeSeconds,
      awsClient = awsClient,
      endpoint = endpoint,
      awsCredentialsProvider = awsCredentialsProvider.getOrElse(defaultAWSCredentialsProvider),
      awsClientConfiguration = awsClientConfiguration.getOrElse(defaultAWSClientConfiguration),
      visibilityTimeout = visibilityTimeout,
      messageAttributes = messageAttributes
    )

  def apply(system: ActorSystem): SqsSettings = apply(system, None, None)

  def apply(
             system: ActorSystem,
             awsCredentialsProvider: Option[AWSCredentialsProvider],
             awsClientConfiguration: Option[ClientConfiguration]
           ): SqsSettings =
    apply(system.settings.config.getConfig(configurationRoot), awsCredentialsProvider, awsClientConfiguration)

  def apply(config: Config): SqsSettings = apply(config, None, None)

  def apply(
             config: Config,
             awsCredentialsProvider: Option[AWSCredentialsProvider],
             awsClientConfiguration: Option[ClientConfiguration]
           ): SqsSettings = {
    apply(
      queueUrl = config.getString("queue-url"),
      maxNumberOfMessages = if (config.hasPath("max-number-of-messages")) config.getInt("max-number-of-messages") else defaultMaxNumberOfMessages,
      waitTimeSeconds = if (config.hasPath("wait-time-seconds")) config.getInt("wait-time-seconds") else defaultWaitTimeSeconds,
      awsCredentialsProvider = awsCredentialsProvider,
      awsClientConfiguration = awsClientConfiguration,
      endpoint = if (config.hasPath("endpoint") && config.hasPath("region")) Some(new EndpointConfiguration(config.getString("endpoint"), config.getString("region"))) else None,
      visibilityTimeout = if (config.hasPath("visibility-timeout")) Some(config.getInt("visibility-timeout")) else None,
      messageAttributes = if (config.hasPath("message-attributes")) config.getStringList("message-attributes").asScala else List()
    )
  }
}

case class SqsSettings(queueUrl: String,
                       maxNumberOfMessages: Int,
                       waitTimeSeconds: Int,
                       awsClient: Option[AmazonSQSAsync],
                       endpoint: Option[EndpointConfiguration],
                       awsCredentialsProvider: AWSCredentialsProvider,
                       awsClientConfiguration: ClientConfiguration,
                       visibilityTimeout: Option[Int],
                       messageAttributes: Seq[String]) 
Example 20
Source File: KinesisTestConsumer.scala    From reactive-kinesis   with Apache License 2.0 5 votes vote down vote up
package com.weightwatchers.reactive.kinesis.common

import java.util.Collections

import com.amazonaws.ClientConfiguration
import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord
import com.amazonaws.services.kinesis.model._
import com.amazonaws.services.kinesis.{AmazonKinesisAsyncClient, _}
import com.weightwatchers.reactive.kinesis.consumer.KinesisConsumer.ConsumerConf

import scala.collection.JavaConverters._
import scala.concurrent.duration.FiniteDuration

object KinesisTestConsumer {

  
  def retrieveRecords(streamName: String, batchSize: Int): List[String] = {
    getShards(streamName)
      .flatMap { shard =>
        val getRecordsRequest = new GetRecordsRequest
        getRecordsRequest.setShardIterator(getShardIterator(streamName, shard))
        getRecordsRequest.setLimit(batchSize)
        client.getRecords(getRecordsRequest).getRecords.asScala.toList
      }
      .flatMap { record: Record =>
        UserRecord
          .deaggregate(Collections.singletonList(record))
          .asScala
          .map { ur =>
            new String(ur.getData.array(), java.nio.charset.StandardCharsets.UTF_8)
          }
      }
  }

  private def getShardIterator(streamName: String, shard: Shard) = {
    client
      .getShardIterator(streamName, shard.getShardId, "TRIM_HORIZON")
      .getShardIterator
  }

  private def getShards(streamName: String) = {
    client
      .describeStream(streamName)
      .getStreamDescription
      .getShards
      .asScala
      .toList
  }

  def shutdown(): Unit = client.shutdown()

} 
Example 21
Source File: EagerBeanstalkDalek.scala    From aws-training-demo   with Apache License 2.0 5 votes vote down vote up
package aws.daleks.eager

import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.services.elasticbeanstalk.AWSElasticBeanstalkClient
import com.amazonaws.regions.Region
import scala.collection.JavaConverters._
import com.amazonaws.services.elasticbeanstalk.model.EnvironmentStatus
import com.amazonaws.services.elasticbeanstalk.model.ApplicationDescription
import com.amazonaws.services.elasticbeanstalk.model.DeleteApplicationRequest
import com.amazonaws.services.elasticbeanstalk.model.EnvironmentDescription
import com.amazonaws.services.elasticbeanstalk.model.TerminateEnvironmentRequest
import aws.daleks.util.Humid

class EagerBeanstalkDalek(implicit region: Region, credentials: AWSCredentialsProvider) extends Dalek {
  val beanstalk = withRegion(new AWSElasticBeanstalkClient(credentials), region)

  def exterminate = {
    val TERMINATED = EnvironmentStatus.Terminated.toString()
    val envs = beanstalk.describeEnvironments().getEnvironments().asScala filter { e =>
      !TERMINATED.equalsIgnoreCase(e.getStatus())
    }

    val apps = try {
      beanstalk.describeApplications.getApplications asScala
    } catch {
      case e: Exception => {
        println("Could not fectch beanstalk applications: " + e.getMessage());
        List.empty
      }
    }

    envs foreach exterminateEnv

    apps foreach exterminateApp
  }

  def exterminateEnv(env: EnvironmentDescription) =
    try {
      val envName = env.getEnvironmentName()
      println(s"** Exterminating Beanstalk Environment ${envName} [${env.getStatus()} ] ")
      Humid {
      beanstalk.terminateEnvironment(new TerminateEnvironmentRequest()
        .withEnvironmentName(envName)
        .withTerminateResources(true))
      }
    } catch {
      case e: Exception => println(s"! Failed to exterminate Beanstalk Environment ${env.getEnvironmentName()} [id: ${env.getEnvironmentId} ]: ${e.getMessage()}");
    }

  def exterminateApp(app: ApplicationDescription) =
    try {
      println("** Exterminating Beanstalk Application " + app.getApplicationName())
      Humid {
      beanstalk.deleteApplication(new DeleteApplicationRequest().withApplicationName(app.getApplicationName()))
      }
    } catch {
      case e: Exception => println(s"! Failed to exterminate Beanstalk Application ${app.getApplicationName()}: ${e.getMessage()}")
    }
} 
Example 22
Source File: EagerS3Dalek.scala    From aws-training-demo   with Apache License 2.0 5 votes vote down vote up
package aws.daleks.eager

import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.regions.Region
import com.amazonaws.services.s3.AmazonS3Client
import com.amazonaws.regions.ServiceAbbreviations
import scala.collection.JavaConverters._
import com.amazonaws.services.s3.model.{ Region => S3Region }
import com.amazonaws.services.s3.model.S3ObjectSummary
import com.amazonaws.services.s3.model.Bucket
import aws.daleks.util.Humid

class EagerS3Dalek(implicit region: Region, credentials: AWSCredentialsProvider)
  extends Dalek {

  val s3 = {
    val s3 = new AmazonS3Client(credentials);
    val endpoint = region.getServiceEndpoint(ServiceAbbreviations.S3);
    s3.setEndpoint(endpoint);
    withRegion(s3, region)
  }

  def buckets = (s3.listBuckets.asScala).filter { bucket =>
    val name = bucket.getName
    val keep =  name.startsWith("logs")  || name.startsWith("billing") || name.startsWith("share")
    !keep
  }.filter { bucket =>
    val locStr = s3.getBucketLocation(bucket.getName)
    val bucketRegion = S3Region.fromValue(locStr).toAWSRegion()
    bucketRegion.equals(region)
  }

  def exterminate = buckets foreach { bucket =>
    val bucketName = bucket.getName
    //TODO: Support > 1000 Objects
    val objects = s3.listObjects(bucketName).getObjectSummaries.asScala.par
    objects.foreach { o =>
      println("** Exterminating S3 Object " + bucket.getName + "/" + o.getKey);
      Humid {
        s3.deleteObject(o.getBucketName, o.getKey)
      }
    }
    val versions = s3.listVersions(bucketName, "").getVersionSummaries().asScala.par
    versions.foreach { v =>
      println("** Exterminating S3 Version " + bucket.getName + "/" + v.getKey() + " v" + v.getVersionId);
      Humid {
        s3.deleteVersion(bucketName, v.getKey, v.getVersionId)
      }
    }

    try {
      println("** Exterminating S3 Bucket Policy " + bucket.getName)
      Humid { s3.deleteBucketPolicy(bucket.getName()) }
      println("** Exterminating S3 Bucket " + bucket.getName)
      Humid { s3.deleteBucket(bucket.getName) }
    } catch {
      case e: Exception => println(s"! Failed to exterminate S3 Bucket ${bucket.getName}: ${e.getMessage()}")
    }
  }

} 
Example 23
Source File: EagerSQSDalek.scala    From aws-training-demo   with Apache License 2.0 5 votes vote down vote up
package aws.daleks.eager

import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.regions.Region
import scala.collection.JavaConverters._
import com.amazonaws.services.sqs.AmazonSQSClient
import com.amazonaws.services.sqs.model.DeleteQueueRequest
import aws.daleks.util.Humid

class EagerSQSDalek(implicit region: Region, credentials: AWSCredentialsProvider) extends Dalek {
  val sqs = withRegion(new AmazonSQSClient(credentials), region)

  def exterminate = {
    val queues = sqs.listQueues.getQueueUrls asScala

    queues foreach { q =>
      println("Esterminating SQS Queue " + q)
      Humid {
        sqs.deleteQueue(new DeleteQueueRequest().withQueueUrl(q))
      }
    }
  }
} 
Example 24
Source File: EagerEMRDalek.scala    From aws-training-demo   with Apache License 2.0 5 votes vote down vote up
package aws.daleks.eager

import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.regions.Region
import scala.collection.JavaConverters._
import com.amazonaws.services.sqs.AmazonSQSClient
import com.amazonaws.services.sqs.model.DeleteQueueRequest
import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClient
import com.amazonaws.services.elasticmapreduce.model.TerminateJobFlowsRequest
import aws.daleks.util.Humid

class EagerEMRDalek(implicit region: Region, credentials: AWSCredentialsProvider) extends Dalek {
  val emr = withRegion(new AmazonElasticMapReduceClient(credentials), region)

  def exterminate = {
    val clusters = emr.listClusters.getClusters.asScala

    clusters map { _.getId() } foreach { id =>
      try {
        info(this,s"Exterminating Clusters $id")
        val req = new TerminateJobFlowsRequest
        req.setJobFlowIds(List(id).asJava)
        Humid {
          emr.terminateJobFlows(req)
        }
      } catch {
        case e: Exception => println(s"! Failed to exterminate Clusters ${id}: ${e.getMessage()}")
      }
    }
  }
} 
Example 25
Source File: EagerDynamoDBDalek.scala    From aws-training-demo   with Apache License 2.0 5 votes vote down vote up
package aws.daleks.eager

import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.regions.Region
import com.amazonaws.services.s3.AmazonS3Client
import com.amazonaws.regions.ServiceAbbreviations
import scala.collection.JavaConverters._
import com.amazonaws.services.s3.model.{ Region => S3Region }
import com.amazonaws.services.s3.model.S3ObjectSummary
import com.amazonaws.services.s3.model.Bucket
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient
import aws.daleks.util.Humid

class EagerDynamoDBDalek(implicit region: Region, credentials: AWSCredentialsProvider) extends Dalek {
  val dynamo = withRegion(new AmazonDynamoDBClient(credentials), region)

  def exterminate = {
    val tables: Seq[String] = dynamo.listTables.getTableNames asScala

    tables foreach { t =>
      info(this,s"Exterminating DyanmoDB Table ${t}")
      Humid { dynamo.deleteTable(t) }
    }

  }
} 
Example 26
Source File: EagerSNSDalek.scala    From aws-training-demo   with Apache License 2.0 5 votes vote down vote up
package aws.daleks.eager

import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.regions.Region
import scala.collection.JavaConverters._
import com.amazonaws.services.sqs.AmazonSQSClient
import com.amazonaws.services.sqs.model.DeleteQueueRequest
import com.amazonaws.services.sns.AmazonSNSClient
import scala.collection.JavaConverters._
import aws.daleks.util.Humid

class EagerSNSDalek(implicit region: Region, credentials: AWSCredentialsProvider) extends Dalek {
  val sns = withRegion(new AmazonSNSClient(credentials),region)

  def exterminate = {
    val topics = sns.listTopics.getTopics asScala

    topics.foreach { t =>
      println("** Exterminating SNS Topic " + t.getTopicArn())
      Humid {sns.deleteTopic(t.getTopicArn())}
    }
  }
} 
Example 27
Source File: EagerRDSDalek.scala    From aws-training-demo   with Apache License 2.0 5 votes vote down vote up
package aws.daleks.eager

import com.amazonaws.services.rds.AmazonRDSClient
import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.regions.Region
import scala.collection.JavaConverters._
import com.amazonaws.services.rds.model.DeleteDBInstanceRequest
import aws.daleks.util.Humid

class EagerRDSDalek(implicit region: Region, credentials: AWSCredentialsProvider) extends Dalek {
  val rds = withRegion(new AmazonRDSClient(credentials), region)

  def exterminate = {
    val databases = rds.describeDBInstances.getDBInstances asScala

    databases foreach { db =>
      println("** Exterminating RDS Database " + db.getDBInstanceIdentifier)
      val delReq = new DeleteDBInstanceRequest
      delReq.setDBInstanceIdentifier(db.getDBInstanceIdentifier())
      delReq.setSkipFinalSnapshot(true);
      Humid {
        rds.deleteDBInstance(delReq)
      }
    }
  }
} 
Example 28
Source File: EagerElastiCacheDalek.scala    From aws-training-demo   with Apache License 2.0 5 votes vote down vote up
package aws.daleks.eager

import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.regions.Region
import scala.collection.JavaConverters._
import com.amazonaws.services.sqs.AmazonSQSClient
import com.amazonaws.services.sqs.model.DeleteQueueRequest
import com.amazonaws.services.elasticache.AmazonElastiCacheClient
import com.amazonaws.services.elasticache.model.DeleteCacheClusterRequest
import aws.daleks.util.Humid

class EagerElastiCacheDalek(implicit region: Region, credentials: AWSCredentialsProvider) extends Dalek {
  val ecache = withRegion(new AmazonElastiCacheClient(credentials), region)

  def exterminate = {
    val caches = ecache.describeCacheClusters.getCacheClusters asScala

    caches foreach { c =>
      try {
        info(this,"Exterminating Cache Cluster " + c.getCacheClusterId)
        Humid{
          ecache.deleteCacheCluster(new DeleteCacheClusterRequest().withCacheClusterId(c.getCacheClusterId()))
        }
      } catch {
        case e: Exception => println(s"! Failed to exterminate Cache Cluster ${c.getCacheClusterId()}: ${e.getMessage()}")
      }
    }
  }
} 
Example 29
Source File: EagerCloudFormationDalek.scala    From aws-training-demo   with Apache License 2.0 5 votes vote down vote up
package aws.daleks.eager

import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.regions.Region
import scala.collection.JavaConverters._
import com.amazonaws.services.sqs.AmazonSQSClient
import com.amazonaws.services.sqs.model.DeleteQueueRequest
import com.amazonaws.services.cloudformation.AmazonCloudFormationClient
import com.amazonaws.services.cloudformation.model.DeleteStackRequest
import aws.daleks.util.Humid

class EagerCloudFormationDalek(implicit region: Region, credentials: AWSCredentialsProvider) extends Dalek {
  val cloudformation = withRegion(new AmazonCloudFormationClient(credentials), region)

  def exterminate = {
    val stacks = cloudformation.describeStacks.getStacks asScala

    stacks foreach { stack =>
      try {
        info(this,s"** Exterminating CloudFormation Stack " + stack.getStackName())
        Humid {
        cloudformation.deleteStack(new DeleteStackRequest().withStackName(stack.getStackName()))
        }
      } catch {
        case e: Exception => println(s"! Failed to exterminate Beanstalk Application ${stack.getStackName}: ${e.getMessage()}")
      }
    }
  }
} 
Example 30
Source File: EagerRoute53Dalek.scala    From aws-training-demo   with Apache License 2.0 5 votes vote down vote up
package aws.daleks.eager

import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.services.route53.AmazonRoute53Client
import scala.collection.JavaConverters._

class EagerRoute53Dalek(implicit credentials: AWSCredentialsProvider) extends Dalek {
  val r53 = new AmazonRoute53Client(credentials)

  def zones = r53.listHostedZones.getHostedZones.asScala

  def exterminate = {
    println("Exterminating Hosted Zones")
    zones.foreach { z =>
      try {
        println("** Exterminating HostedZone " + z.getName)
        // val records = r53.listResourceRecordSets(new ListResourceRecordSetsRequest().withHostedZoneId(z.getId())).getResourceRecordSets() asScala
        // records.foreach
        // TODO
      } catch {
        case e: Exception => println(s"! Failed to exterminate Zone ${z.getName()}: ${e.getMessage()}")
      }
    }

  }
}