Java Code Examples for com.amazonaws.services.s3.model.ObjectMetadata#setSSEAlgorithm()

The following examples show how to use com.amazonaws.services.s3.model.ObjectMetadata#setSSEAlgorithm() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: S3FileManagerImpl.java    From entrada with GNU General Public License v3.0 6 votes vote down vote up
private boolean uploadFile(File src, S3Details dstDetails, boolean archive) {
  PutObjectRequest request = new PutObjectRequest(dstDetails.getBucket(),
      FileUtil.appendPath(dstDetails.getKey(), src.getName()), src);
  ObjectMetadata meta = new ObjectMetadata();

  if (archive) {
    meta
        .setHeader(Headers.STORAGE_CLASS,
            StorageClass.fromValue(StringUtils.upperCase(archiveStorageClass)));
  }

  if (encrypt) {
    meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
  }

  request.setMetadata(meta);
  try {
    amazonS3.putObject(request);
    return true;
  } catch (Exception e) {
    log.error("Error while uploading file: {}", src, e);
  }

  return false;
}
 
Example 2
Source File: S3DataTransferer.java    From oodt with Apache License 2.0 6 votes vote down vote up
@Override
public void transferProduct(Product product) throws DataTransferException, IOException {
	for (Reference ref : product.getProductReferences()) {
     String origRef = stripProtocol(ref.getOrigReference(), false);
	  String dataStoreRef = stripProtocol(ref.getDataStoreReference(), true);
		try {
		  PutObjectRequest request = new PutObjectRequest(
		      bucketName, dataStoreRef, new File(origRef));
		  if (encrypt) {
 				ObjectMetadata requestMetadata = new ObjectMetadata();
 				requestMetadata.setSSEAlgorithm(AES_256_SERVER_SIDE_ENCRYPTION);
 				request.setMetadata(requestMetadata);
		  }
       s3Client.putObject(request);
		} catch (AmazonClientException e) {
			throw new DataTransferException(String.format(
			    "Failed to upload product reference %s to S3 at %s", origRef,
			    dataStoreRef), e);
		}
	}
}
 
Example 3
Source File: S3MultipartUpload.java    From data-highway with Apache License 2.0 5 votes vote down vote up
String start() {
  bytes = 0;
  InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucket, key);

  if(enableServerSideEncryption) {
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    request.setObjectMetadata(objectMetadata);
  }

  String uploadId = s3.initiateMultipartUpload(request).getUploadId();
  stopwatch.start();
  log.info("Starting upload to s3://{}/{}.", bucket, key);
  return uploadId;
}
 
Example 4
Source File: S3S3Copier.java    From circus-train with Apache License 2.0 5 votes vote down vote up
private void applyObjectMetadata(CopyObjectRequest copyObjectRequest) {
  if (s3s3CopierOptions.isS3ServerSideEncryption()) {
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    copyObjectRequest.setNewObjectMetadata(objectMetadata);
  }
}
 
Example 5
Source File: CopyMapper.java    From circus-train with Apache License 2.0 5 votes vote down vote up
private S3UploadDescriptor describeUpload(FileStatus sourceFileStatus, Path targetPath) throws IOException {
  URI targetUri = targetPath.toUri();
  String bucketName = PathUtil.toBucketName(targetUri);
  String key = PathUtil.toBucketKey(targetUri);

  Path sourcePath = sourceFileStatus.getPath();

  ObjectMetadata metadata = new ObjectMetadata();
  metadata.setContentLength(sourceFileStatus.getLen());
  if (conf.getBoolean(ConfigurationVariable.S3_SERVER_SIDE_ENCRYPTION)) {
    metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
  }
  return new S3UploadDescriptor(sourcePath, bucketName, key, metadata);
}
 
Example 6
Source File: S3BaseUploadCallable.java    From jobcacher-plugin with MIT License 5 votes vote down vote up
protected ObjectMetadata buildMetadata(File file) throws IOException {
    final ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentType(Mimetypes.getInstance().getMimetype(file.getName()));
    metadata.setContentLength(file.length());
    metadata.setLastModified(new Date(file.lastModified()));

    if (storageClass != null && !storageClass.isEmpty()) {
        metadata.setHeader("x-amz-storage-class", storageClass);
    }
    if (useServerSideEncryption) {
        metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    }

    for (Map.Entry<String, String> entry : userMetadata.entrySet()) {
        final String key = entry.getKey().toLowerCase();
        switch (key) {
            case "cache-control":
                metadata.setCacheControl(entry.getValue());
                break;
            case "expires":
                try {
                    final Date expires = new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss z").parse(entry.getValue());
                    metadata.setHttpExpiresDate(expires);
                } catch (ParseException e) {
                    metadata.addUserMetadata(entry.getKey(), entry.getValue());
                }
                break;
            case "content-encoding":
                metadata.setContentEncoding(entry.getValue());
                break;
            case "content-type":
                metadata.setContentType(entry.getValue());
            default:
                metadata.addUserMetadata(entry.getKey(), entry.getValue());
                break;
        }
    }
    return metadata;
}
 
Example 7
Source File: AbstractS3IT.java    From nifi with Apache License 2.0 5 votes vote down vote up
protected void putTestFileEncrypted(String key, File file) throws AmazonS3Exception, FileNotFoundException {
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    PutObjectRequest putRequest = new PutObjectRequest(BUCKET_NAME, key, new FileInputStream(file), objectMetadata);

    client.putObject(putRequest);
}
 
Example 8
Source File: S3WritableByteChannel.java    From beam with Apache License 2.0 5 votes vote down vote up
S3WritableByteChannel(AmazonS3 amazonS3, S3ResourceId path, String contentType, S3Options options)
    throws IOException {
  this.amazonS3 = checkNotNull(amazonS3, "amazonS3");
  this.options = checkNotNull(options);
  this.path = checkNotNull(path, "path");
  checkArgument(
      atMostOne(
          options.getSSECustomerKey() != null,
          options.getSSEAlgorithm() != null,
          options.getSSEAwsKeyManagementParams() != null),
      "Either SSECustomerKey (SSE-C) or SSEAlgorithm (SSE-S3)"
          + " or SSEAwsKeyManagementParams (SSE-KMS) must not be set at the same time.");
  // Amazon S3 API docs: Each part must be at least 5 MB in size, except the last part.
  checkArgument(
      options.getS3UploadBufferSizeBytes()
          >= S3UploadBufferSizeBytesFactory.MINIMUM_UPLOAD_BUFFER_SIZE_BYTES,
      "S3UploadBufferSizeBytes must be at least %s bytes",
      S3UploadBufferSizeBytesFactory.MINIMUM_UPLOAD_BUFFER_SIZE_BYTES);
  this.uploadBuffer = ByteBuffer.allocate(options.getS3UploadBufferSizeBytes());
  eTags = new ArrayList<>();

  ObjectMetadata objectMetadata = new ObjectMetadata();
  objectMetadata.setContentType(contentType);
  if (options.getSSEAlgorithm() != null) {
    objectMetadata.setSSEAlgorithm(options.getSSEAlgorithm());
  }
  InitiateMultipartUploadRequest request =
      new InitiateMultipartUploadRequest(path.getBucket(), path.getKey())
          .withStorageClass(options.getS3StorageClass())
          .withObjectMetadata(objectMetadata);
  request.setSSECustomerKey(options.getSSECustomerKey());
  request.setSSEAwsKeyManagementParams(options.getSSEAwsKeyManagementParams());
  InitiateMultipartUploadResult result;
  try {
    result = amazonS3.initiateMultipartUpload(request);
  } catch (AmazonClientException e) {
    throw new IOException(e);
  }
  uploadId = result.getUploadId();
}
 
Example 9
Source File: S3DaoImpl.java    From herd with Apache License 2.0 5 votes vote down vote up
/**
 * Prepares the object metadata for server side encryption and reduced redundancy storage.
 *
 * @param params the parameters.
 * @param metadata the metadata to prepare.
 */
private void prepareMetadata(final S3FileTransferRequestParamsDto params, ObjectMetadata metadata)
{
    // Set the server side encryption
    if (params.getKmsKeyId() != null)
    {
        /*
         * TODO Use proper way to set KMS once AWS provides a way.
         * We are modifying the raw headers directly since TransferManager's uploadFileList operation does not provide a way to set a KMS key ID.
         * This would normally cause some issues when uploading where an MD5 checksum validation exception will be thrown, even though the object is
         * correctly uploaded.
         * To get around this, a system property defined at
         * com.amazonaws.services.s3.internal.SkipMd5CheckStrategy.DISABLE_PUT_OBJECT_MD5_VALIDATION_PROPERTY must be set.
         */
        metadata.setSSEAlgorithm(SSEAlgorithm.KMS.getAlgorithm());
        metadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID, params.getKmsKeyId().trim());
    }
    else
    {
        metadata.setSSEAlgorithm(SSEAlgorithm.AES256.getAlgorithm());
    }

    // If specified, set the metadata to use RRS.
    if (Boolean.TRUE.equals(params.isUseRrs()))
    {
        // TODO: For upload File, we can set RRS on the putObjectRequest. For uploadDirectory, this is the only
        // way to do it. However, setHeader() is flagged as For Internal Use Only
        metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.ReducedRedundancy.toString());
    }
}
 
Example 10
Source File: S3BlobContainer.java    From crate with Apache License 2.0 5 votes vote down vote up
/**
 * Uploads a blob using a single upload request
 */
void executeSingleUpload(final S3BlobStore blobStore,
                         final String blobName,
                         final InputStream input,
                         final long blobSize) throws IOException {

    // Extra safety checks
    if (blobSize > MAX_FILE_SIZE.getBytes()) {
        throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE);
    }
    if (blobSize > blobStore.bufferSizeInBytes()) {
        throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than buffer size");
    }

    final ObjectMetadata md = new ObjectMetadata();
    md.setContentLength(blobSize);
    if (blobStore.serverSideEncryption()) {
        md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    }
    final PutObjectRequest putRequest = new PutObjectRequest(blobStore.bucket(), blobName, input, md);
    putRequest.setStorageClass(blobStore.getStorageClass());
    putRequest.setCannedAcl(blobStore.getCannedACL());

    try (AmazonS3Reference clientReference = blobStore.clientReference()) {
        clientReference.client().putObject(putRequest);
    } catch (final AmazonClientException e) {
        throw new IOException("Unable to upload object [" + blobName + "] using a single upload", e);
    }
}
 
Example 11
Source File: OldS3NotebookRepo.java    From zeppelin with Apache License 2.0 5 votes vote down vote up
@Override
public void save(Note note, AuthenticationInfo subject) throws IOException {
  String json = note.toJson();
  String key = user + "/" + "notebook" + "/" + note.getId() + "/" + "note.json";

  File file = File.createTempFile("note", "json");
  try {
    Writer writer = new OutputStreamWriter(new FileOutputStream(file));
    writer.write(json);
    writer.close();

    PutObjectRequest putRequest = new PutObjectRequest(bucketName, key, file);

    if (useServerSideEncryption) {
      // Request server-side encryption.
      ObjectMetadata objectMetadata = new ObjectMetadata();
      objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
      putRequest.setMetadata(objectMetadata);
    }
    if (objectCannedAcl != null) {
      putRequest.withCannedAcl(objectCannedAcl);
    }
    s3client.putObject(putRequest);
  }
  catch (AmazonClientException ace) {
    throw new IOException("Unable to store note in S3: " + ace, ace);
  }
  finally {
    FileUtils.deleteQuietly(file);
  }
}
 
Example 12
Source File: S3NotebookRepo.java    From zeppelin with Apache License 2.0 5 votes vote down vote up
@Override
public void save(Note note, AuthenticationInfo subject) throws IOException {
  String json = note.toJson();
  String key = rootFolder + "/" + buildNoteFileName(note);
  File file = File.createTempFile("note", "zpln");
  try {
    Writer writer = new OutputStreamWriter(new FileOutputStream(file));
    writer.write(json);
    writer.close();
    PutObjectRequest putRequest = new PutObjectRequest(bucketName, key, file);
    if (useServerSideEncryption) {
      // Request server-side encryption.
      ObjectMetadata objectMetadata = new ObjectMetadata();
      objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
      putRequest.setMetadata(objectMetadata);
    }
    if (objectCannedAcl != null) {
        putRequest.withCannedAcl(objectCannedAcl);
    }
    s3client.putObject(putRequest);
  }
  catch (AmazonClientException ace) {
    throw new IOException("Fail to store note: " + note.getPath() + " in S3", ace);
  }
  finally {
    FileUtils.deleteQuietly(file);
  }
}
 
Example 13
Source File: TestFetchS3Object.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Test
public void testGetObject() throws IOException {
    runner.setProperty(FetchS3Object.REGION, "us-east-1");
    runner.setProperty(FetchS3Object.BUCKET, "request-bucket");
    final Map<String, String> attrs = new HashMap<>();
    attrs.put("filename", "request-key");
    runner.enqueue(new byte[0], attrs);

    S3Object s3ObjectResponse = new S3Object();
    s3ObjectResponse.setBucketName("response-bucket-name");
    s3ObjectResponse.setKey("response-key");
    s3ObjectResponse.setObjectContent(new StringInputStream("Some Content"));
    ObjectMetadata metadata = Mockito.spy(ObjectMetadata.class);
    metadata.setContentDisposition("key/path/to/file.txt");
    metadata.setContentType("text/plain");
    metadata.setContentMD5("testMD5hash");
    Date expiration = new Date();
    metadata.setExpirationTime(expiration);
    metadata.setExpirationTimeRuleId("testExpirationRuleId");
    Map<String, String> userMetadata = new HashMap<>();
    userMetadata.put("userKey1", "userValue1");
    userMetadata.put("userKey2", "userValue2");
    metadata.setUserMetadata(userMetadata);
    metadata.setSSEAlgorithm("testAlgorithm");
    Mockito.when(metadata.getETag()).thenReturn("test-etag");
    s3ObjectResponse.setObjectMetadata(metadata);
    Mockito.when(mockS3Client.getObject(Mockito.any())).thenReturn(s3ObjectResponse);

    runner.run(1);

    ArgumentCaptor<GetObjectRequest> captureRequest = ArgumentCaptor.forClass(GetObjectRequest.class);
    Mockito.verify(mockS3Client, Mockito.times(1)).getObject(captureRequest.capture());
    GetObjectRequest request = captureRequest.getValue();
    assertEquals("request-bucket", request.getBucketName());
    assertEquals("request-key", request.getKey());
    assertFalse(request.isRequesterPays());
    assertNull(request.getVersionId());

    runner.assertAllFlowFilesTransferred(FetchS3Object.REL_SUCCESS, 1);
    final List<MockFlowFile> ffs = runner.getFlowFilesForRelationship(FetchS3Object.REL_SUCCESS);
    MockFlowFile ff = ffs.get(0);
    ff.assertAttributeEquals("s3.bucket", "response-bucket-name");
    ff.assertAttributeEquals(CoreAttributes.FILENAME.key(), "file.txt");
    ff.assertAttributeEquals(CoreAttributes.PATH.key(), "key/path/to");
    ff.assertAttributeEquals(CoreAttributes.ABSOLUTE_PATH.key(), "key/path/to/file.txt");
    ff.assertAttributeEquals(CoreAttributes.MIME_TYPE.key(), "text/plain");
    ff.assertAttributeEquals("hash.value", "testMD5hash");
    ff.assertAttributeEquals("hash.algorithm", "MD5");
    ff.assertAttributeEquals("s3.etag", "test-etag");
    ff.assertAttributeEquals("s3.expirationTime", String.valueOf(expiration.getTime()));
    ff.assertAttributeEquals("s3.expirationTimeRuleId", "testExpirationRuleId");
    ff.assertAttributeEquals("userKey1", "userValue1");
    ff.assertAttributeEquals("userKey2", "userValue2");
    ff.assertAttributeEquals("s3.sseAlgorithm", "testAlgorithm");
    ff.assertContentEquals("Some Content");
}
 
Example 14
Source File: ServerSideS3EncryptionStrategy.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void configureInitiateMultipartUploadRequest(InitiateMultipartUploadRequest request, ObjectMetadata objectMetadata, String keyValue) {
    objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
}
 
Example 15
Source File: ServerSideS3EncryptionStrategy.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void configurePutObjectRequest(PutObjectRequest request, ObjectMetadata objectMetadata, String keyValue) {
    objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
}
 
Example 16
Source File: S3UploadManager.java    From secor with Apache License 2.0 4 votes vote down vote up
private void enableS3Encryption(PutObjectRequest uploadRequest) {
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    uploadRequest.setMetadata(objectMetadata);
}
 
Example 17
Source File: S3ManagedEncrypter.java    From nexus-public with Eclipse Public License 1.0 4 votes vote down vote up
private void setEncryption(final Supplier<ObjectMetadata> getter, final Consumer<ObjectMetadata> setter) {
  ObjectMetadata objectMetadata = ofNullable(getter.get()).orElse(new ObjectMetadata());
  objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
  setter.accept(objectMetadata);
}
 
Example 18
Source File: S3CopyStep.java    From pipeline-aws-plugin with Apache License 2.0 4 votes vote down vote up
@Override
public String run() throws Exception {
	final String fromBucket = this.step.getFromBucket();
	final String toBucket = this.step.getToBucket();
	final String fromPath = this.step.getFromPath();
	final String toPath = this.step.getToPath();
	final String kmsId = this.step.getKmsId();
	final Map<String, String> metadatas = new HashMap<>();
	final CannedAccessControlList acl = this.step.getAcl();
	final String cacheControl = this.step.getCacheControl();
	final String contentType = this.step.getContentType();
	final String sseAlgorithm = this.step.getSseAlgorithm();
	final S3ClientOptions s3ClientOptions = this.step.createS3ClientOptions();
	final EnvVars envVars = this.getContext().get(EnvVars.class);

	if (this.step.getMetadatas() != null && this.step.getMetadatas().length != 0) {
		for (String metadata : this.step.getMetadatas()) {
			if (metadata.split(":").length == 2) {
				metadatas.put(metadata.split(":")[0], metadata.split(":")[1]);
			}
		}
	}

	Preconditions.checkArgument(fromBucket != null && !fromBucket.isEmpty(), "From bucket must not be null or empty");
	Preconditions.checkArgument(fromPath != null && !fromPath.isEmpty(), "From path must not be null or empty");
	Preconditions.checkArgument(toBucket != null && !toBucket.isEmpty(), "To bucket must not be null or empty");
	Preconditions.checkArgument(toPath != null && !toPath.isEmpty(), "To path must not be null or empty");

	TaskListener listener = Execution.this.getContext().get(TaskListener.class);
	listener.getLogger().format("Copying s3://%s/%s to s3://%s/%s%n", fromBucket, fromPath, toBucket, toPath);

	CopyObjectRequest request = new CopyObjectRequest(fromBucket, fromPath, toBucket, toPath);

	// Add metadata
	if (metadatas.size() > 0 || (cacheControl != null && !cacheControl.isEmpty()) || (contentType != null && !contentType.isEmpty()) || (sseAlgorithm != null && !sseAlgorithm.isEmpty())) {
		ObjectMetadata metas = new ObjectMetadata();
		if (metadatas.size() > 0) {
			metas.setUserMetadata(metadatas);
		}
		if (cacheControl != null && !cacheControl.isEmpty()) {
			metas.setCacheControl(cacheControl);
		}
		if (contentType != null && !contentType.isEmpty()) {
			metas.setContentType(contentType);
		}
		if (sseAlgorithm != null && !sseAlgorithm.isEmpty()) {
			metas.setSSEAlgorithm(sseAlgorithm);
		}
		request.withNewObjectMetadata(metas);
	}

	// Add acl
	if (acl != null) {
		request.withCannedAccessControlList(acl);
	}

	// Add kms
	if (kmsId != null && !kmsId.isEmpty()) {
		listener.getLogger().format("Using KMS: %s%n", kmsId);
		request.withSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(kmsId));
	}

	TransferManager mgr = TransferManagerBuilder.standard()
			.withS3Client(AWSClientFactory.create(s3ClientOptions.createAmazonS3ClientBuilder(), envVars))
			.build();
	try {
		final Copy copy = mgr.copy(request);
		copy.addProgressListener((ProgressListener) progressEvent -> {
			if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) {
				listener.getLogger().println("Finished: " + copy.getDescription());
			}
		});
		copy.waitForCompletion();
	}
	finally{
		mgr.shutdownNow();
	}

	listener.getLogger().println("Copy complete");
	return String.format("s3://%s/%s", toBucket, toPath);
}
 
Example 19
Source File: TestFetchS3Object.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Test
public void testGetObjectWithRequesterPays() throws IOException {
    runner.setProperty(FetchS3Object.REGION, "us-east-1");
    runner.setProperty(FetchS3Object.BUCKET, "request-bucket");
    runner.setProperty(FetchS3Object.REQUESTER_PAYS, "true");
    final Map<String, String> attrs = new HashMap<>();
    attrs.put("filename", "request-key");
    runner.enqueue(new byte[0], attrs);

    S3Object s3ObjectResponse = new S3Object();
    s3ObjectResponse.setBucketName("response-bucket-name");
    s3ObjectResponse.setKey("response-key");
    s3ObjectResponse.setObjectContent(new StringInputStream("Some Content"));
    ObjectMetadata metadata = Mockito.spy(ObjectMetadata.class);
    metadata.setContentDisposition("key/path/to/file.txt");
    metadata.setContentType("text/plain");
    metadata.setContentMD5("testMD5hash");
    Date expiration = new Date();
    metadata.setExpirationTime(expiration);
    metadata.setExpirationTimeRuleId("testExpirationRuleId");
    Map<String, String> userMetadata = new HashMap<>();
    userMetadata.put("userKey1", "userValue1");
    userMetadata.put("userKey2", "userValue2");
    metadata.setUserMetadata(userMetadata);
    metadata.setSSEAlgorithm("testAlgorithm");
    Mockito.when(metadata.getETag()).thenReturn("test-etag");
    s3ObjectResponse.setObjectMetadata(metadata);
    Mockito.when(mockS3Client.getObject(Mockito.any())).thenReturn(s3ObjectResponse);

    runner.run(1);

    ArgumentCaptor<GetObjectRequest> captureRequest = ArgumentCaptor.forClass(GetObjectRequest.class);
    Mockito.verify(mockS3Client, Mockito.times(1)).getObject(captureRequest.capture());
    GetObjectRequest request = captureRequest.getValue();
    assertEquals("request-bucket", request.getBucketName());
    assertEquals("request-key", request.getKey());
    assertTrue(request.isRequesterPays());
    assertNull(request.getVersionId());

    runner.assertAllFlowFilesTransferred(FetchS3Object.REL_SUCCESS, 1);
    final List<MockFlowFile> ffs = runner.getFlowFilesForRelationship(FetchS3Object.REL_SUCCESS);
    MockFlowFile ff = ffs.get(0);
    ff.assertAttributeEquals("s3.bucket", "response-bucket-name");
    ff.assertAttributeEquals(CoreAttributes.FILENAME.key(), "file.txt");
    ff.assertAttributeEquals(CoreAttributes.PATH.key(), "key/path/to");
    ff.assertAttributeEquals(CoreAttributes.ABSOLUTE_PATH.key(), "key/path/to/file.txt");
    ff.assertAttributeEquals(CoreAttributes.MIME_TYPE.key(), "text/plain");
    ff.assertAttributeEquals("hash.value", "testMD5hash");
    ff.assertAttributeEquals("hash.algorithm", "MD5");
    ff.assertAttributeEquals("s3.etag", "test-etag");
    ff.assertAttributeEquals("s3.expirationTime", String.valueOf(expiration.getTime()));
    ff.assertAttributeEquals("s3.expirationTimeRuleId", "testExpirationRuleId");
    ff.assertAttributeEquals("userKey1", "userValue1");
    ff.assertAttributeEquals("userKey2", "userValue2");
    ff.assertAttributeEquals("s3.sseAlgorithm", "testAlgorithm");
    ff.assertContentEquals("Some Content");
}
 
Example 20
Source File: S3Utils.java    From CloverETL-Engine with GNU Lesser General Public License v2.1 2 votes vote down vote up
/**
 * CLO-7293:
 * 
 * Creates new {@link ObjectMetadata}, sets SSE algorithm, if configured.
 * 
 * @return new {@link ObjectMetadata}
 */
private static ObjectMetadata createPutObjectMetadata() {
	ObjectMetadata metadata = new ObjectMetadata();
	metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
	return metadata;
}