com.amazonaws.services.s3.AmazonS3Client Scala Examples

The following examples show how to use com.amazonaws.services.s3.AmazonS3Client. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: EagerDynamoDBDalek.scala    From aws-training-demo   with Apache License 2.0 5 votes vote down vote up
package aws.daleks.eager

import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.regions.Region
import com.amazonaws.services.s3.AmazonS3Client
import com.amazonaws.regions.ServiceAbbreviations
import scala.collection.JavaConverters._
import com.amazonaws.services.s3.model.{ Region => S3Region }
import com.amazonaws.services.s3.model.S3ObjectSummary
import com.amazonaws.services.s3.model.Bucket
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient
import aws.daleks.util.Humid

class EagerDynamoDBDalek(implicit region: Region, credentials: AWSCredentialsProvider) extends Dalek {
  val dynamo = withRegion(new AmazonDynamoDBClient(credentials), region)

  def exterminate = {
    val tables: Seq[String] = dynamo.listTables.getTableNames asScala

    tables foreach { t =>
      info(this,s"Exterminating DyanmoDB Table ${t}")
      Humid { dynamo.deleteTable(t) }
    }

  }
} 
Example 2
Source File: EagerS3Dalek.scala    From aws-training-demo   with Apache License 2.0 5 votes vote down vote up
package aws.daleks.eager

import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.regions.Region
import com.amazonaws.services.s3.AmazonS3Client
import com.amazonaws.regions.ServiceAbbreviations
import scala.collection.JavaConverters._
import com.amazonaws.services.s3.model.{ Region => S3Region }
import com.amazonaws.services.s3.model.S3ObjectSummary
import com.amazonaws.services.s3.model.Bucket
import aws.daleks.util.Humid

class EagerS3Dalek(implicit region: Region, credentials: AWSCredentialsProvider)
  extends Dalek {

  val s3 = {
    val s3 = new AmazonS3Client(credentials);
    val endpoint = region.getServiceEndpoint(ServiceAbbreviations.S3);
    s3.setEndpoint(endpoint);
    withRegion(s3, region)
  }

  def buckets = (s3.listBuckets.asScala).filter { bucket =>
    val name = bucket.getName
    val keep =  name.startsWith("logs")  || name.startsWith("billing") || name.startsWith("share")
    !keep
  }.filter { bucket =>
    val locStr = s3.getBucketLocation(bucket.getName)
    val bucketRegion = S3Region.fromValue(locStr).toAWSRegion()
    bucketRegion.equals(region)
  }

  def exterminate = buckets foreach { bucket =>
    val bucketName = bucket.getName
    //TODO: Support > 1000 Objects
    val objects = s3.listObjects(bucketName).getObjectSummaries.asScala.par
    objects.foreach { o =>
      println("** Exterminating S3 Object " + bucket.getName + "/" + o.getKey);
      Humid {
        s3.deleteObject(o.getBucketName, o.getKey)
      }
    }
    val versions = s3.listVersions(bucketName, "").getVersionSummaries().asScala.par
    versions.foreach { v =>
      println("** Exterminating S3 Version " + bucket.getName + "/" + v.getKey() + " v" + v.getVersionId);
      Humid {
        s3.deleteVersion(bucketName, v.getKey, v.getVersionId)
      }
    }

    try {
      println("** Exterminating S3 Bucket Policy " + bucket.getName)
      Humid { s3.deleteBucketPolicy(bucket.getName()) }
      println("** Exterminating S3 Bucket " + bucket.getName)
      Humid { s3.deleteBucket(bucket.getName) }
    } catch {
      case e: Exception => println(s"! Failed to exterminate S3 Bucket ${bucket.getName}: ${e.getMessage()}")
    }
  }

} 
Example 3
Source File: S3Client.scala    From akka-persistence-s3   with MIT License 5 votes vote down vote up
package akka.persistence.s3

import java.io.InputStream

import com.amazonaws.auth.{ BasicAWSCredentials, DefaultAWSCredentialsProviderChain }
import com.amazonaws.services.s3.{ S3ClientOptions, AmazonS3Client }
import com.amazonaws.services.s3.model._

import scala.concurrent.{ Future, ExecutionContext }

trait S3Client {
  val s3ClientConfig: S3ClientConfig

  lazy val client: AmazonS3Client = {
    val client =
      if (s3ClientConfig.awsUseDefaultCredentialsProviderChain)
        new AmazonS3Client(new DefaultAWSCredentialsProviderChain).withRegion(s3ClientConfig.region)
      else
        new AmazonS3Client(new BasicAWSCredentials(s3ClientConfig.awsKey, s3ClientConfig.awsSecret))

    s3ClientConfig.endpoint.foreach { endpoint =>
      client.withEndpoint(endpoint)
      ()
    }
    client.setS3ClientOptions(new S3ClientOptions()
      .withPathStyleAccess(s3ClientConfig.options.pathStyleAccess)
      .withChunkedEncodingDisabled(s3ClientConfig.options.chunkedEncodingDisabled))
    client
  }

  def createBucket(bucketName: String)(implicit ec: ExecutionContext): Future[Bucket] = Future {
    client.createBucket(bucketName)
  }

  def deleteBucket(bucketName: String)(implicit ec: ExecutionContext): Future[Unit] = Future {
    client.deleteBucket(bucketName)
  }

  def putObject(bucketName: String, key: String, input: InputStream, metadata: ObjectMetadata)(implicit ec: ExecutionContext): Future[PutObjectResult] = Future {
    client.putObject(new PutObjectRequest(bucketName, key, input, metadata))
  }

  def getObject(bucketName: String, key: String)(implicit ec: ExecutionContext): Future[S3Object] = Future {
    client.getObject(new GetObjectRequest(bucketName, key))
  }

  def listObjects(request: ListObjectsRequest)(implicit ec: ExecutionContext): Future[ObjectListing] = Future {
    client.listObjects(request)
  }

  def deleteObject(bucketName: String, key: String)(implicit ec: ExecutionContext): Future[Unit] = Future {
    client.deleteObject(bucketName, key)
  }

  def deleteObjects(request: DeleteObjectsRequest)(implicit ec: ExecutionContext): Future[Unit] = Future {
    client.deleteObjects(request)
  }
} 
Example 4
Source File: S3Sandbox.scala    From redshift-fake-driver   with Apache License 2.0 5 votes vote down vote up
package jp.ne.opt.redshiftfake

import java.net.URI

import com.amazonaws.auth.{AWSCredentials, BasicAWSCredentials}
import com.amazonaws.regions.RegionUtils
import com.amazonaws.services.s3.AmazonS3Client
import org.gaul.s3proxy.{AuthenticationType, S3Proxy}
import org.jclouds.ContextBuilder
import org.jclouds.blobstore.BlobStoreContext
import org.scalatest.{BeforeAndAfterAll, Suite}

trait S3Sandbox extends BeforeAndAfterAll {this: Suite =>

  val dummyCredentials:  Credentials.WithKey
  val s3Endpoint: String

  var s3Proxy: S3Proxy = _

  override def beforeAll(): Unit = {
    val blobContext: BlobStoreContext = ContextBuilder
      .newBuilder("transient")
      .build(classOf[BlobStoreContext])

    s3Proxy = S3Proxy.builder
      .blobStore(blobContext.getBlobStore)
      .awsAuthentication(AuthenticationType.AWS_V4, dummyCredentials.accessKeyId, dummyCredentials.secretAccessKey)
      .endpoint(URI.create(s3Endpoint))
      .build
    s3Proxy.start()
  }

  override def afterAll(): Unit = {
    s3Proxy.stop()
  }

  def createS3Client(s3Region: String): AmazonS3Client = {
    val credentials: AWSCredentials = new BasicAWSCredentials(dummyCredentials.accessKeyId, dummyCredentials.secretAccessKey)
    val client = new AmazonS3Client(credentials)
    client.setRegion(RegionUtils.getRegion(s3Region))
    client.setEndpoint(s3Endpoint)

    client
  }
} 
Example 5
Source File: S3EventMigrationHandler.scala    From flyway-awslambda   with MIT License 5 votes vote down vote up
package crossroad0201.aws.flywaylambda

import com.amazonaws.regions.{Region, Regions}
import com.amazonaws.services.lambda.runtime.events.S3Event
import com.amazonaws.services.lambda.runtime.{Context, RequestHandler}
import com.amazonaws.services.s3.{AmazonS3, AmazonS3Client}

import scala.util.{Failure, Success}

class S3EventMigrationHandler extends RequestHandler[S3Event, Unit] with S3MigrationHandlerBase {

  override def handleRequest(event: S3Event, context: Context): Unit = {
    val logger = context.getLogger

    implicit val s3Client: AmazonS3 = new AmazonS3Client().withRegion(Region.getRegion(Regions.fromName(event.getRecords.get(0).getAwsRegion)))

    logger.log(s"Flyway migration start. by ${event.getRecords.get(0).getEventName} s3://${event.getRecords.get(0).getS3.getBucket.getName}/${event.getRecords.get(0).getS3.getObject.getKey}")

    val s3 = event.getRecords.get(0).getS3
    val migrationPrefix = {
      val objectKey = s3.getObject.getKey
      objectKey.substring(0, objectKey.lastIndexOf("/"))
    }

    migrate(s3.getBucket.getName, migrationPrefix)(context, s3Client) match {
      case Success(r) => logger.log(r)
      case Failure(e) => e.printStackTrace()
    }
  }

} 
Example 6
Source File: InvokeMigrationHandler.scala    From flyway-awslambda   with MIT License 5 votes vote down vote up
package crossroad0201.aws.flywaylambda

import java.io.{BufferedOutputStream, InputStream, OutputStream, PrintWriter}

import com.amazonaws.regions.{Region, Regions}
import com.amazonaws.services.lambda.runtime.{Context, RequestStreamHandler}
import com.amazonaws.services.s3.{AmazonS3, AmazonS3Client}

import scala.io.{BufferedSource, Codec}
import scala.util.{Failure, Success, Try}

class InvokeMigrationHandler extends RequestStreamHandler with S3MigrationHandlerBase {
  type BucketName = String
  type Prefix = String
  type ConfFileName = String

  override def handleRequest(input: InputStream, output: OutputStream, context: Context): Unit = {
    def parseInput: Try[(BucketName, Prefix, ConfFileName)] = Try {
      import spray.json._
      import DefaultJsonProtocol._

      val json = new BufferedSource(input)(Codec("UTF-8")).mkString
      val jsObj = JsonParser(json).toJson.asJsObject
      jsObj.getFields(
        "bucket_name",
        "prefix"
      ) match {
        case Seq(JsString(b), JsString(p)) => {
          jsObj.getFields(
            "flyway_conf"
          ) match {
            case Seq(JsString(c)) => (b, p, c)
            case _ => (b, p, "flyway.conf")
          }
        }
        case _ => throw new IllegalArgumentException(s"Missing require key [bucketName, prefix]. - $json")
      }
    }

    val logger = context.getLogger

    implicit val s3Client: AmazonS3 = new AmazonS3Client().withRegion(Region.getRegion(Regions.fromName(sys.env("AWS_REGION"))))

    (for {
      i <- parseInput
      _ = { logger.log(s"Flyway migration start. by invoke lambda function(${i._1}, ${i._2}, ${i._3}).") }
      r <- migrate(i._1, i._2, i._3)(context, s3Client)
    } yield r) match {
      case Success(r) =>
        logger.log(r)
        val b = r.getBytes("UTF-8")
        val bout = new BufferedOutputStream(output)
        Stream.continually(bout.write(b))
        bout.flush()
      case Failure(e) =>
        e.printStackTrace()
        val w = new PrintWriter(output)
        w.write(e.toString)
        w.flush()
    }
  }

} 
Example 7
Source File: S3Persisted.scala    From fotm-info   with MIT License 5 votes vote down vote up
package info.fotm.util

import java.io.File

import com.amazonaws.services.s3.AmazonS3Client
import com.amazonaws.services.s3.model._
import com.twitter.bijection.Bijection

import scala.util.Try

class S3Persisted[T](bucket: String, path: String)(implicit serializer: Bijection[T, Array[Byte]]) extends Persisted[T] {

  val s3client = new AmazonS3Client()

  override def save(state: T): Try[Unit] = Try {
    val fileName = "tmpstorage.txt"
    val fileIO = new FilePersisted[T](fileName)(serializer)
    fileIO.save(state)

    val tmpFile = new File(fileName)
    s3client.putObject(new PutObjectRequest(bucket, path, tmpFile))
    tmpFile.delete()
  }

  override def fetch(): Try[T] = {
    val request = new GetObjectRequest(bucket, path)
    Try(s3client.getObject(request)).map { s3object =>
      val objectData: S3ObjectInputStream = s3object.getObjectContent
      val bytes = scala.io.Source.fromInputStream(objectData).mkString.getBytes
      objectData.close()
      serializer.inverse(bytes)
    }
  }
} 
Example 8
Source File: S3KVPersisted.scala    From fotm-info   with MIT License 5 votes vote down vote up
package info.fotm.util

import java.io.ByteArrayInputStream

import com.amazonaws.services.s3.AmazonS3Client
import com.amazonaws.services.s3.model.{GetObjectRequest, ObjectListing, ObjectMetadata, S3ObjectInputStream}
import com.amazonaws.util.IOUtils
import com.twitter.bijection.Bijection

import scala.collection.JavaConverters._
import scala.collection.breakOut
import scala.util.Try

class S3KVPersisted[K, V](bucket: String, keyPathBijection: Bijection[K, String])
                         (implicit valueSerializer: Bijection[V, Array[Byte]])
  extends Persisted[Map[K, V]] {

  val s3client = new AmazonS3Client()

  override def save(state: Map[K, V]): Try[Unit] = Try {
    for ((k, v) <- state) {
      val path: String = keyPathBijection(k)
      val bytes = valueSerializer(v)
      val stream = new ByteArrayInputStream(bytes)
      val meta = new ObjectMetadata()
      meta.setContentLength(bytes.length)
      s3client.putObject(bucket, path, stream, meta)
    }
  }

  override def fetch(): Try[Map[K, V]] = Try {
    val listing: ObjectListing = s3client.listObjects(bucket)
    val bucketEntries = listing.getObjectSummaries.asScala.toList
    val s3keys = bucketEntries.map(_.getKey)

    val result: Map[K, V] = (
      for (s3key <- s3keys) yield {
        println(s"Loading $s3key...")
        val request = new GetObjectRequest(bucket, s3key)
        val s3object = s3client.getObject(request)
        val objectData: S3ObjectInputStream = s3object.getObjectContent
        val bytes = IOUtils.toByteArray(objectData)
        objectData.close()
        println(s"Loaded $s3key! Deserializing...")
        val k = keyPathBijection.inverse(s3key)
        val v = valueSerializer.inverse(bytes)
        println(s"Done with $s3key.")
        (k, v)
      })(breakOut)

    result
  }
} 
Example 9
Source File: S3ObjectWatcher.scala    From shield   with MIT License 5 votes vote down vote up
package shield.actors.config

import akka.actor.{Actor, ActorLogging}
import com.amazonaws.services.s3.AmazonS3Client

sealed trait S3ObjectWatcherMessage
case object Refresh extends S3ObjectWatcherMessage
case class ChangedContents(contents: String) extends S3ObjectWatcherMessage


class S3ObjectWatcher(bucketName: String, configFilename: String) extends Actor with ActorLogging {
  val s3Client = new AmazonS3Client()
  var lastContents = ""

  def receive = {
    case Refresh =>
      val s3Object = s3Client.getObject(bucketName, configFilename)
      val newContents = scala.io.Source.fromInputStream(s3Object.getObjectContent).mkString

      if (newContents != lastContents) {
        log.info("Detected change in s3 file contents")
        log.debug(s"Fetched from s3: $newContents")
        context.parent ! ChangedContents(newContents)
        lastContents = newContents
      }
  }
} 
Example 10
Source File: S3DiffUploader.scala    From shield   with MIT License 5 votes vote down vote up
package shield.aws

import java.io.{ByteArrayInputStream, InputStream}
import java.nio.charset.StandardCharsets

import akka.actor.{Actor, ActorLogging, Props}
import com.amazonaws.auth.profile.ProfileCredentialsProvider
import com.amazonaws.services.s3.AmazonS3Client
import com.amazonaws.services.s3.model.ObjectMetadata
import shield.actors.listeners.ComparisonDiffFile

object S3DiffUploader{
  def props(bucket: String, folder: String) : Props = Props(new S3DiffUploader(bucket, folder))
}

class S3DiffUploader(bucket: String, folder: String)  extends Actor with ActorLogging {
  val s3Client = new AmazonS3Client()
  val charset = StandardCharsets.UTF_8
  val stripped = folder.stripPrefix("/").stripSuffix("/")
  val prefix = if (stripped.isEmpty) {
    stripped
  } else {
    stripped + "/"
  }

  def receive = {
    case file: ComparisonDiffFile =>
      val metadata = new ObjectMetadata()
      metadata.setContentLength(file.contents.length)
      s3Client.putObject(bucket, s"$prefix${file.fileName}", new ByteArrayInputStream(file.contents), metadata)
  }
} 
Example 11
Source File: S3.scala    From teamcity-s3-plugin   with Apache License 2.0 5 votes vote down vote up
package com.gu.teamcity

import java.io.{InputStream, File}

import com.amazonaws.ClientConfiguration
import com.amazonaws.auth.{AWSCredentialsProviderChain, DefaultAWSCredentialsProviderChain}
import com.amazonaws.services.s3.AmazonS3Client
import com.amazonaws.services.s3.model.{ObjectMetadata, PutObjectRequest, CannedAccessControlList}
import com.amazonaws.services.s3.transfer.TransferManager
import jetbrains.buildServer.serverSide.SBuild

import scala.util.{Success, Try}

class S3(config: S3ConfigManager) {
  val credentialsProvider = {
    val provider = new AWSCredentialsProviderChain(config, new DefaultAWSCredentialsProviderChain())
    provider.setReuseLastProvider(false)
    provider
  }

  val transferManager = new TransferManager(
    new AmazonS3Client(credentialsProvider, new ClientConfiguration().withMaxErrorRetry(2))
  )

  def upload(bucket: String, build: SBuild, fileName: String, contents: InputStream, fileSize: Long): Try[Unit] =
    Try {
      val uploadDirectory = s"${S3Plugin.cleanFullName(build)}/${build.getBuildNumber}"
      val metadata = {
        val md = new ObjectMetadata()
        md.setContentLength(fileSize)
        md
      }
      val req = new PutObjectRequest(bucket, s"$uploadDirectory/$fileName", contents, metadata)
      req.withCannedAcl(CannedAccessControlList.BucketOwnerFullControl)
      val upload = transferManager.upload(req)
      upload.waitForUploadResult()
    }

  def upload(bucket: String, build: SBuild, fileName: String, file: File): Try[Unit] =
    Try {
      val uploadDirectory = s"${S3Plugin.cleanFullName(build)}/${build.getBuildNumber}"
      val req = new PutObjectRequest(bucket, s"$uploadDirectory/$fileName", file)
      req.withCannedAcl(CannedAccessControlList.BucketOwnerFullControl)
      val upload = transferManager.upload(req)
      upload.waitForUploadResult()
    }

} 
Example 12
Source File: RedshiftReaderM.scala    From SqlShift   with MIT License 5 votes vote down vote up
package com.databricks.spark.redshift

import com.amazonaws.auth.AWSCredentials
import com.amazonaws.services.s3.AmazonS3Client
import org.apache.spark.SparkContext
import org.apache.spark.sql.sources.BaseRelation
import org.apache.spark.sql.{DataFrame, SQLContext}

object RedshiftReaderM {

    val endpoint = "s3.ap-south-1.amazonaws.com"

    def getS3Client(provider: AWSCredentials): AmazonS3Client = {
        val client = new AmazonS3Client(provider)
        client.setEndpoint(endpoint)
        client
    }

    def getDataFrameForConfig(configs: Map[String, String], sparkContext: SparkContext, sqlContext: SQLContext): DataFrame = {
        val source: DefaultSource = new DefaultSource(new JDBCWrapper(), getS3Client)
        val br: BaseRelation = source.createRelation(sqlContext, configs)
        sqlContext.baseRelationToDataFrame(br)
    }
} 
Example 13
Source File: S3Brain.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.brain

import java.io.{ByteArrayInputStream, ByteArrayOutputStream}
import java.util.Properties

import akka.actor.{Actor, Props}
import com.amazonaws.auth.{AWSCredentials, AWSStaticCredentialsProvider}
import com.amazonaws.services.s3.{AmazonS3Client, AmazonS3ClientBuilder}
import com.amazonaws.services.s3.model.ObjectMetadata
import com.sumologic.sumobot.brain.Brain._

import scala.collection.JavaConverters._
import scala.collection.immutable

object S3Brain {
  def props(credentials: AWSCredentials,
            bucket: String,
            s3Key: String): Props = Props(classOf[S3Brain], credentials, bucket, s3Key)
}

class S3Brain(credentials: AWSCredentials,
              bucket: String,
              s3Key: String) extends Actor {

  private val s3Client = AmazonS3ClientBuilder.standard()
    .withCredentials(new AWSStaticCredentialsProvider(credentials)).build

  private var brainContents: Map[String, String] = loadFromS3()

  override def receive: Receive = {
    case Store(key, value) =>
      brainContents += (key -> value)
      saveToS3(brainContents)

    case Remove(key) =>
      brainContents -= key
      saveToS3(brainContents)

    case Retrieve(key) =>
      brainContents.get(key) match {
        case Some(value) => sender() ! ValueRetrieved(key, value)
        case None => sender() ! ValueMissing(key)
      }

    case ListValues(prefix) =>
      sender() ! ValueMap(brainContents.filter(_._1.startsWith(prefix)))
  }

  private def loadFromS3(): Map[String, String] = {
    if (s3Client.doesBucketExistV2(bucket)) {
      val props = new Properties()
      props.load(s3Client.getObject(bucket, s3Key).getObjectContent)
      immutable.Map(props.asScala.toSeq: _*)
    } else {
      Map.empty
    }
  }

  private def saveToS3(contents: Map[String, String]): Unit = {
    if (!s3Client.doesBucketExistV2(bucket)) {
      s3Client.createBucket(bucket)
    }

    val props = new Properties()
    props.putAll(contents.asJava)
    val out = new ByteArrayOutputStream()
    props.store(out, "")
    out.flush()
    out.close()
    val in = new ByteArrayInputStream(out.toByteArray)
    s3Client.putObject(bucket, s3Key, in, new ObjectMetadata())
  }
}