com.amazonaws.services.s3.model.CopyPartResult Java Examples

The following examples show how to use com.amazonaws.services.s3.model.CopyPartResult. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: S3FileSystemTest.java    From beam with Apache License 2.0 4 votes vote down vote up
private void testMultipartCopy(S3Options options) {
  S3FileSystem s3FileSystem = buildMockedS3FileSystem(options);

  S3ResourceId sourcePath = S3ResourceId.fromUri("s3://bucket/from");
  S3ResourceId destinationPath = S3ResourceId.fromUri("s3://bucket/to");

  InitiateMultipartUploadResult initiateMultipartUploadResult =
      new InitiateMultipartUploadResult();
  initiateMultipartUploadResult.setUploadId("upload-id");
  if (getSSECustomerKeyMd5(options) != null) {
    initiateMultipartUploadResult.setSSECustomerKeyMd5(getSSECustomerKeyMd5(options));
  }
  when(s3FileSystem
          .getAmazonS3Client()
          .initiateMultipartUpload(any(InitiateMultipartUploadRequest.class)))
      .thenReturn(initiateMultipartUploadResult);
  assertEquals(
      getSSECustomerKeyMd5(options),
      s3FileSystem
          .getAmazonS3Client()
          .initiateMultipartUpload(
              new InitiateMultipartUploadRequest(
                  destinationPath.getBucket(), destinationPath.getKey()))
          .getSSECustomerKeyMd5());

  ObjectMetadata sourceObjectMetadata = new ObjectMetadata();
  sourceObjectMetadata.setContentLength((long) (options.getS3UploadBufferSizeBytes() * 1.5));
  sourceObjectMetadata.setContentEncoding("read-seek-efficient");
  if (getSSECustomerKeyMd5(options) != null) {
    sourceObjectMetadata.setSSECustomerKeyMd5(getSSECustomerKeyMd5(options));
  }
  assertGetObjectMetadata(
      s3FileSystem,
      createObjectMetadataRequest(sourcePath, options),
      options,
      sourceObjectMetadata);

  CopyPartResult copyPartResult1 = new CopyPartResult();
  copyPartResult1.setETag("etag-1");
  CopyPartResult copyPartResult2 = new CopyPartResult();
  copyPartResult1.setETag("etag-2");
  if (getSSECustomerKeyMd5(options) != null) {
    copyPartResult1.setSSECustomerKeyMd5(getSSECustomerKeyMd5(options));
    copyPartResult2.setSSECustomerKeyMd5(getSSECustomerKeyMd5(options));
  }
  CopyPartRequest copyPartRequest = new CopyPartRequest();
  copyPartRequest.setSourceSSECustomerKey(options.getSSECustomerKey());
  when(s3FileSystem.getAmazonS3Client().copyPart(any(CopyPartRequest.class)))
      .thenReturn(copyPartResult1)
      .thenReturn(copyPartResult2);
  assertEquals(
      getSSECustomerKeyMd5(options),
      s3FileSystem.getAmazonS3Client().copyPart(copyPartRequest).getSSECustomerKeyMd5());

  s3FileSystem.multipartCopy(sourcePath, destinationPath, sourceObjectMetadata);

  verify(s3FileSystem.getAmazonS3Client(), times(1))
      .completeMultipartUpload(any(CompleteMultipartUploadRequest.class));
}
 
Example #2
Source File: MultipartCopier.java    From nexus-public with Eclipse Public License 1.0 4 votes vote down vote up
private void copyMultiPart(final AmazonS3 s3,
                           final String bucket,
                           final String sourcePath,
                           final String destinationPath,
                           final long length) {
  checkState(length > 0);
  String uploadId = null;
  try {
    long remaining = length;
    long offset = 0;

    InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(bucket, destinationPath);
    uploadId = s3.initiateMultipartUpload(initiateRequest).getUploadId();

    log.debug("Starting multipart copy {} to key {} from key {}", uploadId, destinationPath, sourcePath);

    List<CopyPartResult> results = new ArrayList<>();
    for (int partNumber = 1; ; partNumber++) {
      if (remaining <= 0) {
        break;
      }
      else {
        long partSize = min(remaining, chunkSize);
        log.trace("Copying chunk {} for {} from byte {} to {}, size {}", partNumber, uploadId, offset,
            offset + partSize - 1, partSize);
        CopyPartRequest part = new CopyPartRequest()
            .withSourceBucketName(bucket)
            .withSourceKey(sourcePath)
            .withDestinationBucketName(bucket)
            .withDestinationKey(destinationPath)
            .withUploadId(uploadId)
            .withPartNumber(partNumber)
            .withFirstByte(offset)
            .withLastByte(offset + partSize - 1);
        results.add(s3.copyPart(part));
        offset += partSize;
        remaining -= partSize;
      }
    }
    CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest()
        .withBucketName(bucket)
        .withKey(destinationPath)
        .withUploadId(uploadId)
        .withPartETags(results.stream().map(r -> new PartETag(r.getPartNumber(), r.getETag())).collect(toList()));
    s3.completeMultipartUpload(compRequest);
    log.debug("Copy {} complete", uploadId);
  }
  catch(SdkClientException e) {
    if (uploadId != null) {
      try {
        s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, destinationPath, uploadId));
      }
      catch(Exception inner) {
        log.error("Error aborting S3 multipart copy to bucket {} with key {}", bucket, destinationPath,
            log.isDebugEnabled() ? inner : null);
      }
    }
    throw e;
  }
}
 
Example #3
Source File: DummyS3Client.java    From ignite with Apache License 2.0 4 votes vote down vote up
/** Unsupported Operation. */
@Override public CopyPartResult copyPart(CopyPartRequest cpPartReq) throws SdkClientException {
    throw new UnsupportedOperationException("Operation not supported");
}
 
Example #4
Source File: AwsSdkTest.java    From s3proxy with Apache License 2.0 4 votes vote down vote up
@Test
public void testMultipartCopy() throws Exception {
    // B2 requires two parts to issue an MPU
    assumeTrue(!blobStoreType.equals("b2"));

    String sourceBlobName = "testMultipartCopy-source";
    String targetBlobName = "testMultipartCopy-target";

    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(BYTE_SOURCE.size());
    client.putObject(containerName, sourceBlobName,
            BYTE_SOURCE.openStream(), metadata);

    InitiateMultipartUploadRequest initiateRequest =
            new InitiateMultipartUploadRequest(containerName,
                    targetBlobName);
    InitiateMultipartUploadResult initResult =
            client.initiateMultipartUpload(initiateRequest);
    String uploadId = initResult.getUploadId();

    CopyPartRequest copyRequest = new CopyPartRequest()
            .withDestinationBucketName(containerName)
            .withDestinationKey(targetBlobName)
            .withSourceBucketName(containerName)
            .withSourceKey(sourceBlobName)
            .withUploadId(uploadId)
            .withFirstByte(0L)
            .withLastByte(BYTE_SOURCE.size() - 1)
            .withPartNumber(1);
    CopyPartResult copyPartResult = client.copyPart(copyRequest);

    CompleteMultipartUploadRequest completeRequest =
            new CompleteMultipartUploadRequest(
                    containerName, targetBlobName, uploadId,
                    ImmutableList.of(copyPartResult.getPartETag()));
    client.completeMultipartUpload(completeRequest);

    S3Object object = client.getObject(containerName, targetBlobName);
    assertThat(object.getObjectMetadata().getContentLength()).isEqualTo(
            BYTE_SOURCE.size());
    try (InputStream actual = object.getObjectContent();
            InputStream expected = BYTE_SOURCE.openStream()) {
        assertThat(actual).hasContentEqualTo(expected);
    }
}
 
Example #5
Source File: AmazonS3Mock.java    From Scribengin with GNU Affero General Public License v3.0 4 votes vote down vote up
@Override
public CopyPartResult copyPart(CopyPartRequest copyPartRequest) throws AmazonClientException, AmazonServiceException {
  // TODO Auto-generated method stub
  return null;
}