com.amazonaws.services.s3.model.PutObjectRequest Java Examples

The following examples show how to use com.amazonaws.services.s3.model.PutObjectRequest. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: S3FileManagerImpl.java    From entrada with GNU General Public License v3.0 6 votes vote down vote up
private boolean uploadFile(File src, S3Details dstDetails, boolean archive) {
  PutObjectRequest request = new PutObjectRequest(dstDetails.getBucket(),
      FileUtil.appendPath(dstDetails.getKey(), src.getName()), src);
  ObjectMetadata meta = new ObjectMetadata();

  if (archive) {
    meta
        .setHeader(Headers.STORAGE_CLASS,
            StorageClass.fromValue(StringUtils.upperCase(archiveStorageClass)));
  }

  if (encrypt) {
    meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
  }

  request.setMetadata(meta);
  try {
    amazonS3.putObject(request);
    return true;
  } catch (Exception e) {
    log.error("Error while uploading file: {}", src, e);
  }

  return false;
}
 
Example #2
Source File: S3DataManagerTest.java    From aws-codebuild-jenkins-plugin with Apache License 2.0 6 votes vote down vote up
@Test
public void testUploadLocalSourceWithNoSSEAlgorithm() throws Exception {
    File file = new File(mockWorkspaceDir + "/source-file");
    FileUtils.write(file, "contents");

    PutObjectResult mockedResponse = new PutObjectResult();
    mockedResponse.setVersionId("some-version-id");
    when(s3Client.putObject(any(PutObjectRequest.class))).thenReturn(mockedResponse);
    S3DataManager d = new S3DataManager(s3Client, s3InputBucketName, s3InputKeyName, "", file.getPath(), "");

    ArgumentCaptor<PutObjectRequest> savedPutObjectRequest = ArgumentCaptor.forClass(PutObjectRequest.class);
    UploadToS3Output result = d.uploadSourceToS3(listener, testWorkSpace);
    assertEquals(result.getSourceLocation(), s3InputBucketName + "/" + s3InputKeyName);

    verify(s3Client).putObject(savedPutObjectRequest.capture());
    assertEquals(savedPutObjectRequest.getValue().getBucketName(), s3InputBucketName);
    assertEquals(savedPutObjectRequest.getValue().getKey(), s3InputKeyName);
    assertEquals(savedPutObjectRequest.getValue().getMetadata().getContentMD5(), S3DataManager.getZipMD5(file));
    assertEquals(savedPutObjectRequest.getValue().getMetadata().getContentLength(), file.length());
    assertNull(savedPutObjectRequest.getValue().getMetadata().getSSEAlgorithm());
}
 
Example #3
Source File: S3RecordReaderModuleAppTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Before
public void setup() throws Exception
{
  client = new AmazonS3Client(new BasicAWSCredentials(accessKey, secretKey));
  client.createBucket(testMeta.bucketKey);
  inputDir = testMeta.baseDirectory + File.separator + "input";

  File file1 = new File(inputDir + File.separator + FILE_1);
  File file2 = new File(inputDir + File.separator + FILE_2);

  FileUtils.writeStringToFile(file1, FILE_1_DATA);
  FileUtils.writeStringToFile(file2, FILE_2_DATA);

  client.putObject(new PutObjectRequest(testMeta.bucketKey, "input/" + FILE_1, file1));
  client.putObject(new PutObjectRequest(testMeta.bucketKey, "input/" + FILE_2, file2));
  files = SCHEME + "://" + accessKey + ":" + secretKey + "@" + testMeta.bucketKey + "/input";
}
 
Example #4
Source File: MultipartUploadLiveTest.java    From tutorials with MIT License 6 votes vote down vote up
@Test
public void whenUploadingFileWithTransferManager_thenVerifyUploadRequested() {
    File file = mock(File.class);
    PutObjectResult s3Result = mock(PutObjectResult.class);

    when(amazonS3.putObject(anyString(), anyString(), (File) any())).thenReturn(s3Result);
    when(file.getName()).thenReturn(KEY_NAME);

    PutObjectRequest request = new PutObjectRequest(BUCKET_NAME, KEY_NAME, file);
    request.setGeneralProgressListener(progressListener);

    Upload upload = tm.upload(request);

    assertThat(upload).isNotNull();
    verify(amazonS3).putObject(request);
}
 
Example #5
Source File: S3BlobStorage.java    From mojito with Apache License 2.0 6 votes vote down vote up
void put(String name, byte[] content, Retention retention, ObjectMetadata objectMetadata) {

        Preconditions.checkNotNull(objectMetadata);
        objectMetadata.setContentLength(content.length);

        PutObjectRequest putRequest = new PutObjectRequest(
                s3BlobStorageConfigurationProperties.getBucket(),
                getFullName(name),
                new ByteArrayInputStream(content),
                objectMetadata);

        List<Tag> tags = new ArrayList<Tag>();
        tags.add(new Tag("retention", retention.toString()));

        putRequest.setTagging(new ObjectTagging(tags));

        amazonS3.putObject(putRequest);
    }
 
Example #6
Source File: S3AFileSystem.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void createEmptyObject(final String bucketName, final String objectName)
    throws AmazonClientException, AmazonServiceException {
  final InputStream im = new InputStream() {
    @Override
    public int read() throws IOException {
      return -1;
    }
  };

  final ObjectMetadata om = new ObjectMetadata();
  om.setContentLength(0L);
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, objectName, im, om);
  putObjectRequest.setCannedAcl(cannedACL);
  s3.putObject(putObjectRequest);
  statistics.incrementWriteOps(1);
}
 
Example #7
Source File: TestPutS3Object.java    From nifi with Apache License 2.0 6 votes vote down vote up
@Test
public void testPutSinglePart() {
    runner.setProperty("x-custom-prop", "hello");
    prepareTest();

    runner.run(1);

    ArgumentCaptor<PutObjectRequest> captureRequest = ArgumentCaptor.forClass(PutObjectRequest.class);
    Mockito.verify(mockS3Client, Mockito.times(1)).putObject(captureRequest.capture());
    PutObjectRequest request = captureRequest.getValue();
    assertEquals("test-bucket", request.getBucketName());

    runner.assertAllFlowFilesTransferred(PutS3Object.REL_SUCCESS, 1);

    List<MockFlowFile> flowFiles = runner.getFlowFilesForRelationship(PutS3Object.REL_SUCCESS);
    MockFlowFile ff0 = flowFiles.get(0);

    ff0.assertAttributeEquals(CoreAttributes.FILENAME.key(), "testfile.txt");
    ff0.assertAttributeEquals(PutS3Object.S3_ETAG_ATTR_KEY, "test-etag");
    ff0.assertAttributeEquals(PutS3Object.S3_VERSION_ATTR_KEY, "test-version");
}
 
Example #8
Source File: S3FastLoader.java    From pocket-etl with Apache License 2.0 6 votes vote down vote up
private void writeBufferToS3(byte[] toWrite, int limit) {
    try (EtlProfilingScope scope = new EtlProfilingScope(parentMetrics, "S3FastLoader.writeToS3")) {
        InputStream inputStream = new ByteArrayInputStream(toWrite, 0, limit);
        String s3Key = s3PartFileKeyGenerator.apply(++fileSequenceNumber);
        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(limit);
        PutObjectRequest putObjectRequest = new PutObjectRequest(s3Bucket, s3Key, inputStream, metadata);

        if (sseKmsArn != null && !sseKmsArn.isEmpty()) {
            putObjectRequest.setSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(sseKmsArn));
        }

        try {
            amazonS3.putObject(putObjectRequest);
            emitSuccessAndFailureMetrics(scope, true);
        } catch (AmazonClientException e) {
                logger.error(e);
                scope.addCounter(e.getClass().getSimpleName(), 1);
                emitSuccessAndFailureMetrics(scope, false);
                throw new UnrecoverableStreamFailureException("Exception caught trying to write object to S3: ", e);
        }
    }
}
 
Example #9
Source File: S3DaoTest.java    From herd with Apache License 2.0 6 votes vote down vote up
@Test
public void testTagObjects()
{
    // Create an S3 file transfer request parameters DTO to access S3 objects.
    S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
    params.setS3BucketName(S3_BUCKET_NAME);

    // Create an S3 object summary.
    S3ObjectSummary s3ObjectSummary = new S3ObjectSummary();
    s3ObjectSummary.setKey(TARGET_S3_KEY);

    // Create an S3 object tag.
    Tag tag = new Tag(S3_OBJECT_TAG_KEY, S3_OBJECT_TAG_VALUE);

    // Put a file in S3.
    s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), new ObjectMetadata()), null);

    // Tag the file with an S3 object tag.
    s3Dao.tagObjects(params, new S3FileTransferRequestParamsDto(), Collections.singletonList(s3ObjectSummary), tag);

    // Validate that the object got tagged.
    GetObjectTaggingResult getObjectTaggingResult = s3Operations.getObjectTagging(new GetObjectTaggingRequest(S3_BUCKET_NAME, TARGET_S3_KEY), null);
    assertEquals(Collections.singletonList(tag), getObjectTaggingResult.getTagSet());
}
 
Example #10
Source File: AwsUploadRepository.java    From konker-platform with Apache License 2.0 6 votes vote down vote up
public String upload(InputStream is, String fileKey, String fileName, String suffix, Boolean isPublic) throws Exception {
    validateFile(is, suffix);
    if (isPublic == null) {
        isPublic = Boolean.TRUE;
    }
    if ((is != null) && (fileKey != null)) {
        try {
            byte[] bytes = IOUtils.toByteArray(is);
            s3Client.putObject(
                    new PutObjectRequest(
                    		s3BucketConfig.getName(),
                            fileKey,
                            new ByteArrayInputStream(bytes),
                            S3ObjectMetadata.getObjectMetadata(bytes)
                    ).withCannedAcl(isPublic ? CannedAccessControlList.PublicRead : CannedAccessControlList.AuthenticatedRead)
            );
            return fileName + '.' + suffix;
        } catch (AmazonServiceException | IOException e) {
            throw new BusinessException(Validations.INVALID_S3_BUCKET_CREDENTIALS.getCode());
        } finally {
            is.close();
        }
    } else {
        throw new BusinessException(Validations.INVALID_PARAMETERS.getCode());
    }
}
 
Example #11
Source File: FileHelper.java    From datacollector with Apache License 2.0 6 votes vote down vote up
Upload doUpload(String bucket, String fileName, InputStream is, ObjectMetadata metadata) {
  final PutObjectRequest putObjectRequest = new PutObjectRequest(
      bucket,
      fileName,
      is,
      metadata
  );
  final String object = bucket + s3TargetConfigBean.s3Config.delimiter + fileName;
  Upload upload = transferManager.upload(putObjectRequest);
  upload.addProgressListener((ProgressListener) progressEvent -> {
    switch (progressEvent.getEventType()) {
      case TRANSFER_STARTED_EVENT:
        LOG.debug("Started uploading object {} into Amazon S3", object);
        break;
      case TRANSFER_COMPLETED_EVENT:
        LOG.debug("Completed uploading object {} into Amazon S3", object);
        break;
      case TRANSFER_FAILED_EVENT:
        LOG.debug("Failed uploading object {} into Amazon S3", object);
        break;
      default:
        break;
    }
  });
  return upload;
}
 
Example #12
Source File: S3DaoTest.java    From herd with Apache License 2.0 6 votes vote down vote up
@Test
public void testValidateGlacierS3FilesRestored()
{
    // Put a 1 byte already restored Glacier storage class file in S3.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    metadata.setOngoingRestore(false);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
            null);

    // Validate the file.
    S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
    params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
    params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
    s3Dao.validateGlacierS3FilesRestored(params);
}
 
Example #13
Source File: S3DaoTest.java    From herd with Apache License 2.0 6 votes vote down vote up
@Test
public void testValidateGlacierS3FilesRestoredGlacierObjectRestoreNotInitiated()
{
    // Put a 1 byte Glacier storage class file in S3 that has no restore initiated (OngoingRestore flag is null).
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
            null);

    // Try to validate if the Glacier S3 file is already restored.
    try
    {
        S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
        params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
        params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
        s3Dao.validateGlacierS3FilesRestored(params);
        fail("Should throw an IllegalArgumentException when Glacier S3 file is not restored.");
    }
    catch (IllegalArgumentException e)
    {
        assertEquals(String
            .format("Archived S3 file \"%s\" is not restored. StorageClass {GLACIER}, OngoingRestore flag {null}, S3 bucket name {%s}",
                TARGET_S3_KEY, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage());
    }
}
 
Example #14
Source File: S3Archiver.java    From chancery with Apache License 2.0 6 votes vote down vote up
private void upload(@NotNull File src, @NotNull String key, @NotNull CallbackPayload payload) {
    log.info("Uploading {} to {} in {}", src, key, bucketName);
    final PutObjectRequest request = new PutObjectRequest(bucketName, key, src);
    final ObjectMetadata metadata = request.getMetadata();
    final String commitId = payload.getAfter();
    if (commitId != null) {
        metadata.addUserMetadata("commit-id", commitId);
    }
    final DateTime timestamp = payload.getTimestamp();
    if (timestamp != null) {
        metadata.addUserMetadata("hook-timestamp",
                ISODateTimeFormat.basicTime().print(timestamp));
    }

    final TimerContext time = uploadTimer.time();
    try {
        s3Client.putObject(request);
    } catch (Exception e) {
        log.error("Couldn't upload to {} in {}", key, bucketName, e);
        throw e;
    } finally {
        time.stop();
    }
    log.info("Uploaded to {} in {}", key, bucketName);
}
 
Example #15
Source File: RedshiftManifestEmitter.java    From amazon-kinesis-connectors with Apache License 2.0 5 votes vote down vote up
/**
 * Generates manifest file and writes it to Amazon S3
 * 
 * @param fileName Name of manifest file (Amazon S3 key)
 * @param records Used to generate the manifest file
 * @throws IOException
 */
private String writeManifestToS3(String fileName, List<String> records) throws IOException {
    String fileContents = generateManifestFile(records);
    // upload generated manifest file
    PutObjectRequest putObjectRequest =
            new PutObjectRequest(s3Bucket, fileName, new ByteArrayInputStream(fileContents.getBytes()), null);
    s3Client.putObject(putObjectRequest);
    return fileName;
}
 
Example #16
Source File: TestS3EncryptionStrategies.java    From nifi with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() {
    byte[] keyRawBytes = new byte[32];
    SecureRandom secureRandom = new SecureRandom();
    secureRandom.nextBytes(keyRawBytes);
    randomKeyMaterial = Base64.encodeBase64String(keyRawBytes);

    metadata = new ObjectMetadata();
    putObjectRequest = new PutObjectRequest("", "", "");
    initUploadRequest = new InitiateMultipartUploadRequest("", "");
    getObjectRequest = new GetObjectRequest("", "");
    uploadPartRequest = new UploadPartRequest();
}
 
Example #17
Source File: S3DaoTest.java    From herd with Apache License 2.0 5 votes vote down vote up
@Test
public void testTagVersionsS3BucketWithVersioningDisabled()
{
    // Create an S3 object tag.
    Tag tag = new Tag(S3_OBJECT_TAG_KEY, S3_OBJECT_TAG_VALUE);

    // Put S3 objects in S3 bucket that has versioning disabled.
    for (int i = 0; i < 2; i++)
    {
        s3Operations.putObject(new PutObjectRequest(S3_BUCKET_NAME, TARGET_S3_KEY + i, new ByteArrayInputStream(new byte[1]), new ObjectMetadata()), null);
    }

    // List S3 versions that match the test prefix.
    ListVersionsRequest listVersionsRequest = new ListVersionsRequest().withBucketName(S3_BUCKET_NAME).withPrefix(TARGET_S3_KEY);
    VersionListing versionListing = s3Operations.listVersions(listVersionsRequest, null);
    assertEquals(2, CollectionUtils.size(versionListing.getVersionSummaries()));
    for (int i = 0; i < 2; i++)
    {
        assertNull(versionListing.getVersionSummaries().get(i).getVersionId());
    }

    // Create an S3 file transfer request parameters DTO to access S3 objects.
    S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
    params.setS3BucketName(S3_BUCKET_NAME);

    // Tag listed S3 versions with an S3 object tag.
    s3Dao.tagVersions(params, new S3FileTransferRequestParamsDto(), versionListing.getVersionSummaries(), tag);

    // Validate that both S3 objects got tagged.
    for (int i = 0; i < 2; i++)
    {
        GetObjectTaggingResult getObjectTaggingResult =
            s3Operations.getObjectTagging(new GetObjectTaggingRequest(S3_BUCKET_NAME, TARGET_S3_KEY + i, null), null);
        assertEquals(Collections.singletonList(tag), getObjectTaggingResult.getTagSet());
    }
}
 
Example #18
Source File: AmazonS3Config.java    From ReCiter with Apache License 2.0 5 votes vote down vote up
/**
 * This function creates empty folder in a s3 bucket
 * @param bucketName
 * @param folderName
 * @param client
 */
public static void createFolder(String bucketName, String folderName, AmazonS3 client) {
	final String SUFFIX = "/";
	
	// create meta-data for your folder and set content-length to 0
	ObjectMetadata metadata = new ObjectMetadata();
	metadata.setContentLength(0);
	// create empty content
	InputStream emptyContent = new ByteArrayInputStream(new byte[0]);
	// create a PutObjectRequest passing the folder name suffixed by /
	PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName,
				folderName + SUFFIX, emptyContent, metadata);
	// send request to S3 to create folder
	client.putObject(putObjectRequest);
}
 
Example #19
Source File: COSAPIClient.java    From stocator with Apache License 2.0 5 votes vote down vote up
/**
 * PUT an object directly (i.e. not via the transfer manager).
 * @param putObjectRequest the request
 * @return the upload initiated
 * @throws IOException on problems
 */
PutObjectResult putObject(PutObjectRequest putObjectRequest)
    throws IOException {
  try {
    PutObjectResult result = mClient.putObject(putObjectRequest);
    return result;
  } catch (AmazonClientException e) {
    throw translateException("put", putObjectRequest.getKey(), e);
  }
}
 
Example #20
Source File: S3ArtifactStoreTest.java    From gocd-s3-artifacts with Apache License 2.0 5 votes vote down vote up
@Test
public void shouldUseStandardIAStorageClassAsDefault() {
    S3ArtifactStore store = new S3ArtifactStore(mockClient, "foo-bar");
    store.setStorageClass("standard-ia");
    store.put(new PutObjectRequest("foo-bar", "key", new File("/tmp/baz")));
    verify(mockClient, times(1)).putObject(putCaptor.capture());
    PutObjectRequest putRequest = putCaptor.getValue();
    assertThat(putRequest.getStorageClass(), is("STANDARD_IA"));
}
 
Example #21
Source File: S3Uploader.java    From ache with Apache License 2.0 5 votes vote down vote up
public String upload(String keyName, byte[] content) throws IOException {
    try {
        InputStream is = new ByteArrayInputStream(content);
        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(content.length);
        PutObjectRequest put = new PutObjectRequest(this.bucketName, keyName, is, metadata);
        s3client.putObject(put);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which " +
                "means your request made it " +
                "to Amazon S3, but was rejected with an error response" +
                " for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which " +
                "means the client encountered " +
                "an internal error while trying to " +
                "communicate with S3, " +
                "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }

    return "https://s3.amazonaws.com/" + this.bucketName + "/" + keyName;
}
 
Example #22
Source File: S3PseudoLock.java    From exhibitor with Apache License 2.0 5 votes vote down vote up
@Override
protected void createFile(String key, byte[] contents) throws Exception
{
    ObjectMetadata      metadata = new ObjectMetadata();
    metadata.setContentLength(contents.length);
    PutObjectRequest    request = new PutObjectRequest(bucket, key, new ByteArrayInputStream(contents), metadata);
    client.putObject(request);
}
 
Example #23
Source File: ExtendedClientConfigurationTest.java    From amazon-sqs-java-extended-client-lib with Apache License 2.0 5 votes vote down vote up
@Test
public void testLargePayloadSupportEnabled() {

    AmazonS3 s3 = mock(AmazonS3.class);
    when(s3.putObject(isA(PutObjectRequest.class))).thenReturn(null);

    ExtendedClientConfiguration extendedClientConfiguration = new ExtendedClientConfiguration();
    extendedClientConfiguration.setLargePayloadSupportEnabled(s3, s3BucketName);

    Assert.assertTrue(extendedClientConfiguration.isLargePayloadSupportEnabled());
    Assert.assertNotNull(extendedClientConfiguration.getAmazonS3Client());
    Assert.assertEquals(s3BucketName, extendedClientConfiguration.getS3BucketName());

}
 
Example #24
Source File: S3ArtifactStoreTest.java    From gocd-s3-artifacts with Apache License 2.0 5 votes vote down vote up
@Test
public void shouldUseReducedRedundancyStorageClass() {
    S3ArtifactStore store = new S3ArtifactStore(mockClient, "foo-bar");
    store.setStorageClass("rrs");
    store.put(new PutObjectRequest("foo-bar", "key", new File("/tmp/baz")));
    verify(mockClient, times(1)).putObject(putCaptor.capture());
    PutObjectRequest putRequest = putCaptor.getValue();
    assertThat(putRequest.getStorageClass(), is("REDUCED_REDUNDANCY"));
}
 
Example #25
Source File: S3DaoTest.java    From herd with Apache License 2.0 5 votes vote down vote up
@Test
public void testPrepareMetadataAssertSetKmsHeaders()
{
    S3Operations originalS3Operations = (S3Operations) ReflectionTestUtils.getField(s3Dao, "s3Operations");
    S3Operations mockS3Operations = mock(S3Operations.class);
    ReflectionTestUtils.setField(s3Dao, "s3Operations", mockS3Operations);

    try
    {
        String s3BucketName = "s3BucketName";
        String s3KeyPrefix = "s3KeyPrefix";
        String kmsKeyId = "kmsKeyId";

        S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
        s3FileTransferRequestParamsDto.setS3BucketName(s3BucketName);
        s3FileTransferRequestParamsDto.setS3KeyPrefix(s3KeyPrefix);
        s3FileTransferRequestParamsDto.setKmsKeyId(kmsKeyId);

        when(mockS3Operations.putObject(any(), any())).then(new Answer<PutObjectResult>()
        {
            @Override
            public PutObjectResult answer(InvocationOnMock invocation) throws Throwable
            {
                PutObjectRequest putObjectRequest = invocation.getArgument(0);
                ObjectMetadata metadata = putObjectRequest.getMetadata();
                assertEquals("aws:kms", metadata.getSSEAlgorithm());
                assertEquals(kmsKeyId, metadata.getRawMetadata().get(Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID));
                return new PutObjectResult();
            }
        });

        s3Dao.createDirectory(s3FileTransferRequestParamsDto);
    }
    finally
    {
        ReflectionTestUtils.setField(s3Dao, "s3Operations", originalS3Operations);
    }
}
 
Example #26
Source File: S3DataManagerTest.java    From aws-codebuild-jenkins-plugin with Apache License 2.0 5 votes vote down vote up
@Test
public void testUploadSourceSubdir() throws Exception {
    File subdir = new File(mockWorkspaceDir + "/subdir");
    subdir.mkdirs();

    ArgumentCaptor<PutObjectRequest> savedPutObjectRequest = ArgumentCaptor.forClass(PutObjectRequest.class);
    UploadToS3Output result = createDefaultSource("", "subdir").uploadSourceToS3(listener, testWorkSpace);
    assertEquals(result.getSourceLocation(), s3InputBucketName + "/" + s3InputKeyName);

    verify(s3Client).putObject(savedPutObjectRequest.capture());
    assertEquals(savedPutObjectRequest.getValue().getBucketName(), s3InputBucketName);
    assertEquals(savedPutObjectRequest.getValue().getKey(), s3InputKeyName);
    assertEquals(savedPutObjectRequest.getValue().getMetadata().getSSEAlgorithm(), sseAlgorithm);
}
 
Example #27
Source File: S3DataManagerTest.java    From aws-codebuild-jenkins-plugin with Apache License 2.0 5 votes vote down vote up
private S3DataManager createDefaultSource(String localSourcePath, String workspaceSubdir) {
    this.s3ARNs.put("main", "ARN1/bucket/thing.zip"); //put one item in s3ARNs so exception doesn't happen.

    PutObjectResult mockedResponse = new PutObjectResult();
    mockedResponse.setVersionId("some-version-id");
    when(s3Client.putObject(any(PutObjectRequest.class))).thenReturn(mockedResponse);
    return new S3DataManager(s3Client, s3InputBucketName, s3InputKeyName, sseAlgorithm, localSourcePath, workspaceSubdir);
}
 
Example #28
Source File: S3ConnectivityCheckTest.java    From data-highway with Apache License 2.0 5 votes vote down vote up
@Test
public void checkS3() {
  Mockito.when(
      s3.putObject(
          Mockito.any(PutObjectRequest.class)))
      .thenReturn(new PutObjectResult());

  underTest.checkS3Put(s3, "bucket", "key");
}
 
Example #29
Source File: AmazonSQSExtendedClientTest.java    From amazon-sqs-java-extended-client-lib with Apache License 2.0 5 votes vote down vote up
@Test
public void testWhenSendMessageWithSetMessageSizeThresholdThenThresholdIsHonored() {
    int messageLength = ARBITRATY_SMALLER_THRESSHOLD * 2;
    String messageBody = generateStringWithLength(messageLength);
    ExtendedClientConfiguration extendedClientConfiguration = new ExtendedClientConfiguration()
            .withLargePayloadSupportEnabled(mockS3, S3_BUCKET_NAME).withMessageSizeThreshold(ARBITRATY_SMALLER_THRESSHOLD);

    AmazonSQS sqsExtended = spy(new AmazonSQSExtendedClient(mock(AmazonSQSClient.class), extendedClientConfiguration));

    SendMessageRequest messageRequest = new SendMessageRequest(SQS_QUEUE_URL, messageBody);
    sqsExtended.sendMessage(messageRequest);
    verify(mockS3, times(1)).putObject(isA(PutObjectRequest.class));
}
 
Example #30
Source File: S3BlobContainer.java    From crate with Apache License 2.0 5 votes vote down vote up
/**
 * Uploads a blob using a single upload request
 */
void executeSingleUpload(final S3BlobStore blobStore,
                         final String blobName,
                         final InputStream input,
                         final long blobSize) throws IOException {

    // Extra safety checks
    if (blobSize > MAX_FILE_SIZE.getBytes()) {
        throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE);
    }
    if (blobSize > blobStore.bufferSizeInBytes()) {
        throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than buffer size");
    }

    final ObjectMetadata md = new ObjectMetadata();
    md.setContentLength(blobSize);
    if (blobStore.serverSideEncryption()) {
        md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    }
    final PutObjectRequest putRequest = new PutObjectRequest(blobStore.bucket(), blobName, input, md);
    putRequest.setStorageClass(blobStore.getStorageClass());
    putRequest.setCannedAcl(blobStore.getCannedACL());

    try (AmazonS3Reference clientReference = blobStore.clientReference()) {
        clientReference.client().putObject(putRequest);
    } catch (final AmazonClientException e) {
        throw new IOException("Unable to upload object [" + blobName + "] using a single upload", e);
    }
}