com.amazonaws.services.s3.model.UploadPartRequest Java Examples

The following examples show how to use com.amazonaws.services.s3.model.UploadPartRequest. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: S3MultipartUploadTest.java    From data-highway with Apache License 2.0 6 votes vote down vote up
@Test
public void upload() {
  ArgumentCaptor<UploadPartRequest> request = ArgumentCaptor.forClass(UploadPartRequest.class);
  UploadPartResult response = mock(UploadPartResult.class);
  PartETag partETag = mock(PartETag.class);
  when(response.getPartETag()).thenReturn(partETag);
  when(s3.uploadPart(request.capture())).thenReturn(response);
  InputStream inputStream = mock(InputStream.class);
  S3Part part = new S3Part(1, 2, "md5", inputStream);

  PartETag result = underTest.upload(UPLOAD_ID, part);

  assertThat(result, is(partETag));
  assertThat(request.getValue().getBucketName(), is(BUCKET));
  assertThat(request.getValue().getKey(), is(KEY));
  assertThat(request.getValue().getPartNumber(), is(1));
  assertThat(request.getValue().getPartSize(), is(2L));
  assertThat(request.getValue().getMd5Digest(), is("md5"));
  assertThat(request.getValue().getInputStream(), is(inputStream));
}
 
Example #2
Source File: S3AFastOutputStream.java    From big-c with Apache License 2.0 6 votes vote down vote up
public void uploadPartAsync(ByteArrayInputStream inputStream,
    int partSize) {
  final int currentPartNumber = partETagsFutures.size() + 1;
  final UploadPartRequest request =
      new UploadPartRequest().withBucketName(bucket).withKey(key)
          .withUploadId(uploadId).withInputStream(inputStream)
          .withPartNumber(currentPartNumber).withPartSize(partSize);
  request.setGeneralProgressListener(progressListener);
  ListenableFuture<PartETag> partETagFuture =
      executorService.submit(new Callable<PartETag>() {
        @Override
        public PartETag call() throws Exception {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Uploading part {} for id '{}'", currentPartNumber,
                uploadId);
          }
          return client.uploadPart(request).getPartETag();
        }
      });
  partETagsFutures.add(partETagFuture);
}
 
Example #3
Source File: S3WritableByteChannel.java    From beam with Apache License 2.0 6 votes vote down vote up
private void flush() throws IOException {
  uploadBuffer.flip();
  ByteArrayInputStream inputStream = new ByteArrayInputStream(uploadBuffer.array());

  UploadPartRequest request =
      new UploadPartRequest()
          .withBucketName(path.getBucket())
          .withKey(path.getKey())
          .withUploadId(uploadId)
          .withPartNumber(partNumber++)
          .withPartSize(uploadBuffer.remaining())
          .withMD5Digest(Base64.encodeAsString(md5.digest()))
          .withInputStream(inputStream);
  request.setSSECustomerKey(options.getSSECustomerKey());

  UploadPartResult result;
  try {
    result = amazonS3.uploadPart(request);
  } catch (AmazonClientException e) {
    throw new IOException(e);
  }
  uploadBuffer.clear();
  md5.reset();
  eTags.add(result.getPartETag());
}
 
Example #4
Source File: ParallelUploader.java    From nexus-public with Eclipse Public License 1.0 6 votes vote down vote up
private List<PartETag> uploadChunks(final AmazonS3 s3,
                                    final String bucket,
                                    final String key,
                                    final String uploadId,
                                    final ChunkReader chunkReader)
    throws IOException
{
  List<PartETag> tags = new ArrayList<>();
  Optional<Chunk> chunk;

  while ((chunk = chunkReader.readChunk(chunkSize)).isPresent()) {
    UploadPartRequest request = new UploadPartRequest()
        .withBucketName(bucket)
        .withKey(key)
        .withUploadId(uploadId)
        .withPartNumber(chunk.get().chunkNumber)
        .withInputStream(new ByteArrayInputStream(chunk.get().data, 0, chunk.get().dataLength))
        .withPartSize(chunk.get().dataLength);

    tags.add(s3.uploadPart(request).getPartETag());
  }

  return tags;
}
 
Example #5
Source File: SimpleStorageResource.java    From spring-cloud-aws with Apache License 2.0 6 votes vote down vote up
@Override
public UploadPartResult call() throws Exception {
	try {
		return this.amazonS3.uploadPart(new UploadPartRequest()
				.withBucketName(this.bucketName).withKey(this.key)
				.withUploadId(this.uploadId)
				.withInputStream(new ByteArrayInputStream(this.content))
				.withPartNumber(this.partNumber).withLastPart(this.last)
				.withPartSize(this.contentLength));
	}
	finally {
		// Release the memory, as the callable may still live inside the
		// CompletionService which would cause
		// an exhaustive memory usage
		this.content = null;
	}
}
 
Example #6
Source File: TestStandardS3EncryptionService.java    From nifi with Apache License 2.0 6 votes vote down vote up
@Test
public void testRequests() {
    final ObjectMetadata metadata = new ObjectMetadata();
    final GetObjectRequest getObjectRequest = new GetObjectRequest("", "");
    final InitiateMultipartUploadRequest initUploadRequest = new InitiateMultipartUploadRequest("", "");
    final PutObjectRequest putObjectRequest = new PutObjectRequest("", "", "");
    final UploadPartRequest uploadPartRequest = new UploadPartRequest();

    service.configureGetObjectRequest(getObjectRequest, metadata);
    Assert.assertNull(getObjectRequest.getSSECustomerKey());
    Assert.assertNull(metadata.getSSEAlgorithm());

    service.configureUploadPartRequest(uploadPartRequest, metadata);
    Assert.assertNull(uploadPartRequest.getSSECustomerKey());
    Assert.assertNull(metadata.getSSEAlgorithm());

    service.configurePutObjectRequest(putObjectRequest, metadata);
    Assert.assertNull(putObjectRequest.getSSECustomerKey());
    Assert.assertNull(metadata.getSSEAlgorithm());

    service.configureInitiateMultipartUploadRequest(initUploadRequest, metadata);
    Assert.assertNull(initUploadRequest.getSSECustomerKey());
    Assert.assertNull(metadata.getSSEAlgorithm());
}
 
Example #7
Source File: S3AFastOutputStream.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public void uploadPartAsync(ByteArrayInputStream inputStream,
    int partSize) {
  final int currentPartNumber = partETagsFutures.size() + 1;
  final UploadPartRequest request =
      new UploadPartRequest().withBucketName(bucket).withKey(key)
          .withUploadId(uploadId).withInputStream(inputStream)
          .withPartNumber(currentPartNumber).withPartSize(partSize);
  request.setGeneralProgressListener(progressListener);
  ListenableFuture<PartETag> partETagFuture =
      executorService.submit(new Callable<PartETag>() {
        @Override
        public PartETag call() throws Exception {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Uploading part {} for id '{}'", currentPartNumber,
                uploadId);
          }
          return client.uploadPart(request).getPartETag();
        }
      });
  partETagsFutures.add(partETagFuture);
}
 
Example #8
Source File: OutputS3.java    From crate with Apache License 2.0 6 votes vote down vote up
private void doUploadIfNeeded() throws IOException {
    if (currentPartBytes >= PART_SIZE) {
        final ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray());
        final int currentPart = partNumber;
        final long currentPartSize = currentPartBytes;

        outputStream.close();
        outputStream = new ByteArrayOutputStream();
        partNumber++;
        pendingUploads.add(CompletableFutures.supplyAsync(() -> {
            UploadPartRequest uploadPartRequest = new UploadPartRequest()
                .withBucketName(bucketName)
                .withKey(key)
                .withPartNumber(currentPart)
                .withPartSize(currentPartSize)
                .withUploadId(multipartUpload.getUploadId())
                .withInputStream(inputStream);
            return client.uploadPart(uploadPartRequest).getPartETag();
        }, executor));
        currentPartBytes = 0;
    }
}
 
Example #9
Source File: S3MultipartUpload.java    From data-highway with Apache License 2.0 6 votes vote down vote up
PartETag upload(String uploadId, S3Part part) {
  Object[] logParams = new Object[] { part.getSize(), part.getNumber(), bucket, key };
  log.debug("Uploading {} bytes for part {} to s3://{}/{}.", logParams);
  UploadPartRequest request = new UploadPartRequest()
      .withUploadId(uploadId)
      .withBucketName(bucket)
      .withKey(key)
      .withPartNumber(part.getNumber())
      .withPartSize(part.getSize())
      .withMD5Digest(part.getMd5())
      .withInputStream(part.getInputStream());
  UploadPartResult result = s3.uploadPart(request);
  log.debug("Uploaded {} bytes for part {} to s3://{}/{}.", logParams);
  bytes += part.getSize();
  return result.getPartETag();
}
 
Example #10
Source File: TestS3EncryptionStrategies.java    From nifi with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() {
    byte[] keyRawBytes = new byte[32];
    SecureRandom secureRandom = new SecureRandom();
    secureRandom.nextBytes(keyRawBytes);
    randomKeyMaterial = Base64.encodeBase64String(keyRawBytes);

    metadata = new ObjectMetadata();
    putObjectRequest = new PutObjectRequest("", "", "");
    initUploadRequest = new InitiateMultipartUploadRequest("", "");
    getObjectRequest = new GetObjectRequest("", "");
    uploadPartRequest = new UploadPartRequest();
}
 
Example #11
Source File: S3DataOutputStream.java    From stratosphere with Apache License 2.0 5 votes vote down vote up
private void uploadPartAndFlushBuffer() throws IOException {

		boolean operationSuccessful = false;

		if (this.uploadId == null) {
			this.uploadId = initiateMultipartUpload();
		}

		try {

			if (this.partNumber >= MAX_PART_NUMBER) {
				throw new IOException("Cannot upload any more data: maximum part number reached");
			}

			final InputStream inputStream = new InternalUploadInputStream(this.buf, this.bytesWritten);
			final UploadPartRequest request = new UploadPartRequest();
			request.setBucketName(this.bucket);
			request.setKey(this.object);
			request.setInputStream(inputStream);
			request.setUploadId(this.uploadId);
			request.setPartSize(this.bytesWritten);
			request.setPartNumber(this.partNumber++);

			final UploadPartResult result = this.s3Client.uploadPart(request);
			this.partETags.add(result.getPartETag());

			this.bytesWritten = 0;
			operationSuccessful = true;

		} catch (AmazonServiceException e) {
			throw new IOException(StringUtils.stringifyException(e));
		} finally {
			if (!operationSuccessful) {
				abortUpload();
			}
		}
	}
 
Example #12
Source File: PublisherToolsTest.java    From aws-codepipeline-plugin-for-jenkins with Apache License 2.0 5 votes vote down vote up
@Test
public void uploadFileSuccess() throws IOException {
    TestUtils.initializeTestingFolders();

    final File compressedFile = CompressionTools.compressFile(
            "ZipProject",
            PATH_TO_COMPRESS,
            CompressionType.Zip,
            null);

    PublisherTools.uploadFile(
            compressedFile,
            mockArtifact,
            CompressionType.Zip,
            null, // No custom encryption key
            mockS3Client,
            null); // Listener

    final InOrder inOrder = inOrder(mockS3Client);
    inOrder.verify(mockS3Client, times(1)).initiateMultipartUpload(initiateCaptor.capture());
    // Total size is less than 5MB, should only be one upload
    inOrder.verify(mockS3Client, times(1)).uploadPart(any(UploadPartRequest.class));
    inOrder.verify(mockS3Client, times(1)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));

    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Uploading artifact:", outContent.toString());
    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Upload successful", outContent.toString());

    final InitiateMultipartUploadRequest request = initiateCaptor.getValue();
    final SSEAwsKeyManagementParams encryptionParams = request.getSSEAwsKeyManagementParams();
    assertNotNull(encryptionParams);
    assertNull(encryptionParams.getAwsKmsKeyId());
    assertEquals("aws:kms", encryptionParams.getEncryption());

    compressedFile.delete();
    TestUtils.cleanUpTestingFolders();
}
 
Example #13
Source File: PublisherToolsTest.java    From aws-codepipeline-plugin-for-jenkins with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() {
    MockitoAnnotations.initMocks(this);

    when(mockS3Client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class)))
            .thenReturn(mockUploadResult);
    when(mockS3Client.uploadPart(any(UploadPartRequest.class))).thenReturn(mockPartRequest);
    when(mockUploadResult.getUploadId()).thenReturn("123");
    when(mockArtifact.getLocation()).thenReturn(mockLocation);
    when(mockLocation.getS3Location()).thenReturn(s3ArtifactLocation);
    when(s3ArtifactLocation.getBucketName()).thenReturn("Bucket");
    when(s3ArtifactLocation.getObjectKey()).thenReturn("Key");

    outContent = TestUtils.setOutputStream();
}
 
Example #14
Source File: PublisherCallableTest.java    From aws-codepipeline-plugin-for-jenkins with Apache License 2.0 5 votes vote down vote up
@Test
public void uploadsArtifactToS3() throws IOException {
    // when
    publisher.invoke(workspace, null);

    // then
    final InOrder inOrder = inOrder(clientFactory, awsClients, s3Client);
    inOrder.verify(clientFactory).getAwsClient(ACCESS_KEY, SECRET_KEY, PROXY_HOST, PROXY_PORT, REGION, PLUGIN_VERSION);
    inOrder.verify(awsClients).getCodePipelineClient();
    inOrder.verify(awsClients).getS3Client(credentialsProviderCaptor.capture());
    inOrder.verify(s3Client).initiateMultipartUpload(initiateMultipartUploadRequestCaptor.capture());
    inOrder.verify(s3Client).uploadPart(uploadPartRequestCaptor.capture());

    final com.amazonaws.auth.AWSSessionCredentials credentials
        = (com.amazonaws.auth.AWSSessionCredentials) credentialsProviderCaptor.getValue().getCredentials();
    assertEquals(JOB_ACCESS_KEY, credentials.getAWSAccessKeyId());
    assertEquals(JOB_SECRET_KEY, credentials.getAWSSecretKey());
    assertEquals(JOB_SESSION_TOKEN, credentials.getSessionToken());

    verify(codePipelineClient).getJobDetails(getJobDetailsRequestCaptor.capture());
    assertEquals(JOB_ID, getJobDetailsRequestCaptor.getValue().getJobId());

    final InitiateMultipartUploadRequest initRequest = initiateMultipartUploadRequestCaptor.getValue();
    assertEquals(S3_BUCKET_NAME, initRequest.getBucketName());
    assertEquals(S3_OBJECT_KEY, initRequest.getKey());

    final UploadPartRequest uploadRequest = uploadPartRequestCaptor.getValue();
    assertEquals(S3_BUCKET_NAME, uploadRequest.getBucketName());
    assertEquals(S3_OBJECT_KEY, uploadRequest.getKey());
    assertEquals(UPLOAD_ID, uploadRequest.getUploadId());

    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Uploading artifact:", outContent.toString());
    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Upload successful", outContent.toString());
}
 
Example #15
Source File: PublisherTools.java    From aws-codepipeline-plugin-for-jenkins with Apache License 2.0 5 votes vote down vote up
public static void uploadFile(
        final File file,
        final Artifact artifact,
        final CompressionType compressionType,
        final EncryptionKey encryptionKey,
        final AmazonS3 amazonS3,
        final BuildListener listener) throws IOException {

    LoggingHelper.log(listener, "Uploading artifact: " + artifact + ", file: " + file);

    final String bucketName = artifact.getLocation().getS3Location().getBucketName();
    final String objectKey  = artifact.getLocation().getS3Location().getObjectKey();
    final List<PartETag> partETags = new ArrayList<>();

    final InitiateMultipartUploadRequest initiateMultipartUploadRequest = new InitiateMultipartUploadRequest(
            bucketName,
            objectKey,
            createObjectMetadata(compressionType))
        .withSSEAwsKeyManagementParams(toSSEAwsKeyManagementParams(encryptionKey));

    final InitiateMultipartUploadResult initiateMultipartUploadResult
            = amazonS3.initiateMultipartUpload(initiateMultipartUploadRequest);

    final long contentLength = file.length();
    long filePosition = 0;
    long partSize = 5 * 1024 * 1024; // Set part size to 5 MB

    for (int i = 1; filePosition < contentLength; i++) {
        partSize = Math.min(partSize, (contentLength - filePosition));

        final UploadPartRequest uploadPartRequest = new UploadPartRequest()
                .withBucketName(bucketName)
                .withKey(objectKey)
                .withUploadId(initiateMultipartUploadResult.getUploadId())
                .withPartNumber(i)
                .withFileOffset(filePosition)
                .withFile(file)
                .withPartSize(partSize);

        partETags.add(amazonS3.uploadPart(uploadPartRequest).getPartETag());

        filePosition += partSize;
    }

    final CompleteMultipartUploadRequest completeMultipartUpload
            = new CompleteMultipartUploadRequest(
                bucketName,
                objectKey,
                initiateMultipartUploadResult.getUploadId(),
                partETags);

    amazonS3.completeMultipartUpload(completeMultipartUpload);

    LoggingHelper.log(listener, "Upload successful");
}
 
Example #16
Source File: COSBlockOutputStream.java    From stocator with Apache License 2.0 5 votes vote down vote up
/**
 * Upload a block of data. This will take the block
 *
 * @param block block to upload
 * @throws IOException upload failure
 */
private void uploadBlockAsync(final COSDataBlocks.DataBlock block) throws IOException {
  LOG.debug("Queueing upload of {}", block);
  final int size = block.dataSize();
  final COSDataBlocks.BlockUploadData uploadData = block.startUpload();
  final int currentPartNumber = partETagsFutures.size() + 1;
  final UploadPartRequest request = writeOperationHelper.newUploadPartRequest(uploadId,
      currentPartNumber, size,
      uploadData.getUploadStream(), uploadData.getFile());

  ListenableFuture<PartETag> partETagFuture = executorService.submit(new Callable<PartETag>() {
    @Override
    public PartETag call() throws Exception {
      // this is the queued upload operation
      LOG.debug("Uploading part {} for id '{}'", currentPartNumber, uploadId);
      // do the upload
      PartETag partETag;
      try {
        partETag = fs.uploadPart(request).getPartETag();
        LOG.debug("Completed upload of {} to part {}", block, partETag.getETag());
      } finally {
        // close the stream and block
        closeAll(LOG, uploadData, block);
      }
      return partETag;
    }
  });
  partETagsFutures.add(partETagFuture);
}
 
Example #17
Source File: COSAPIClient.java    From stocator with Apache License 2.0 5 votes vote down vote up
/**
 * Create and initialize a part request of a multipart upload.
 * Exactly one of: {@code uploadStream} or {@code sourceFile}
 * must be specified.
 * @param uploadId ID of ongoing upload
 * @param partNumber current part number of the upload
 * @param size amount of data
 * @param uploadStream source of data to upload
 * @param sourceFile optional source file
 * @return the request
 */
UploadPartRequest newUploadPartRequest(String uploadId,
    int partNumber, int size, InputStream uploadStream, File sourceFile) {
  Preconditions.checkNotNull(uploadId);
  // exactly one source must be set; xor verifies this
  Preconditions.checkArgument((uploadStream != null) ^ (sourceFile != null),
      "Data source");
  Preconditions.checkArgument(size > 0, "Invalid partition size %s", size);
  Preconditions.checkArgument(partNumber > 0 && partNumber <= 10000,
      "partNumber must be between 1 and 10000 inclusive, but is %s",
      partNumber);

  LOG.debug("Creating part upload request for {} #{} size {}",
      uploadId, partNumber, size);
  UploadPartRequest request = new UploadPartRequest()
      .withBucketName(mBucket)
      .withKey(key)
      .withUploadId(uploadId)
      .withPartNumber(partNumber)
      .withPartSize(size);
  if (uploadStream != null) {
    // there's an upload stream. Bind to it.
    request.setInputStream(uploadStream);
  } else {
    request.setFile(sourceFile);
  }
  return request;
}
 
Example #18
Source File: COSAPIClient.java    From stocator with Apache License 2.0 5 votes vote down vote up
/**
 * Upload part of a multi-partition file.
 * <i>Important: this call does not close any input stream in the request.</i>
 * @param request request
 * @return the result of the operation
 * @throws AmazonClientException on problems
 */
public UploadPartResult uploadPart(UploadPartRequest request)
    throws AmazonClientException {
  try {
    UploadPartResult uploadPartResult = mClient.uploadPart(request);
    return uploadPartResult;
  } catch (AmazonClientException e) {
    throw e;
  }
}
 
Example #19
Source File: OutputS3.java    From crate with Apache License 2.0 5 votes vote down vote up
@Override
public void close() throws IOException {
    UploadPartRequest uploadPartRequest = new UploadPartRequest()
        .withBucketName(bucketName)
        .withKey(key)
        .withPartNumber(partNumber)
        .withPartSize(outputStream.size())
        .withUploadId(multipartUpload.getUploadId())
        .withInputStream(new ByteArrayInputStream(outputStream.toByteArray()));
    UploadPartResult uploadPartResult = client.uploadPart(uploadPartRequest);

    List<PartETag> partETags;
    try {
        partETags = CompletableFutures.allAsList(pendingUploads).get();
    } catch (InterruptedException | ExecutionException e) {
        throw new IOException(e);
    }
    partETags.add(uploadPartResult.getPartETag());
    client.completeMultipartUpload(
        new CompleteMultipartUploadRequest(
            bucketName,
            key,
            multipartUpload.getUploadId(),
            partETags)
    );
    super.close();
}
 
Example #20
Source File: S3TransporterTest.java    From bender with Apache License 2.0 5 votes vote down vote up
@Test
public void testContextBasedFilename()
    throws TransportException, IllegalStateException, IOException {
  /*
   * Create mock client, requests, and replies
   */
  AmazonS3Client mockClient = getMockClient();

  /*
   * Fill buffer with mock data
   */
  S3TransportBuffer buffer = new S3TransportBuffer(1000, true, new S3TransportSerializer());
  InternalEvent mockIevent = mock(InternalEvent.class);
  doReturn("foo").when(mockIevent).getSerialized();

  /*
   * Create transport
   */
  Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
  S3Transport transport =
      new S3Transport(mockClient, "bucket", "basepath/", true, multiPartUploads);

  /*
   * Do actual test
   */
  buffer.add(mockIevent);
  LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
  ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
  TestContext context = new TestContext();
  context.setAwsRequestId("request_id");
  transport.sendBatch(buffer, partitions, context);
  verify(mockClient).uploadPart(argument.capture());

  /*
   * Check results
   */
  assertEquals("basepath/request_id.bz2", argument.getValue().getKey());
}
 
Example #21
Source File: S3TransporterTest.java    From bender with Apache License 2.0 5 votes vote down vote up
@Test
public void testGzFilename() throws TransportException, IllegalStateException, IOException {
  /*
   * Create mock client, requests, and replies
   */
  AmazonS3Client mockClient = getMockClient();

  /*
   * Fill buffer with mock data
   */
  S3TransportBuffer buffer = new S3TransportBuffer(1000, true, new S3TransportSerializer());
  InternalEvent mockIevent = mock(InternalEvent.class);
  doReturn("foo").when(mockIevent).getSerialized();

  /*
   * Create transport
   */
  Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
  S3Transport transport =
      new S3Transport(mockClient, "bucket", "basepath/", true, multiPartUploads);

  /*
   * Do actual test
   */
  buffer.add(mockIevent);
  LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
  partitions.put(S3Transport.FILENAME_KEY, "a_filename.gz");
  ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
  transport.sendBatch(buffer, partitions, new TestContext());
  verify(mockClient).uploadPart(argument.capture());

  /*
   * Check results
   */
  assertEquals("basepath/a_filename.bz2", argument.getValue().getKey());
}
 
Example #22
Source File: TestUtil.java    From s3committer with Apache License 2.0 5 votes vote down vote up
private static UploadPartResult newResult(UploadPartRequest request,
                                          String etag) {
  UploadPartResult result = new UploadPartResult();
  result.setPartNumber(request.getPartNumber());
  result.setETag(etag);
  return result;
}
 
Example #23
Source File: S3OutputStream.java    From nifi-minifi with Apache License 2.0 5 votes vote down vote up
public void uploadPart(ByteArrayInputStream inputStream, int partSize) {
  int currentPartNumber = partETags.size() + 1;
  UploadPartRequest request = new UploadPartRequest()
                                        .withBucketName(bucket)
                                        .withKey(key)
                                        .withUploadId(uploadId)
                                        .withInputStream(inputStream)
                                        .withPartNumber(currentPartNumber)
                                        .withPartSize(partSize)
                                        .withGeneralProgressListener(progressListener);
  log.debug("Uploading part {} for id '{}'", currentPartNumber, uploadId);
  partETags.add(s3.uploadPart(request).getPartETag());
}
 
Example #24
Source File: S3TransporterTest.java    From bender with Apache License 2.0 5 votes vote down vote up
private AmazonS3Client getMockClient() {
  AmazonS3Client mockClient = spy(AmazonS3Client.class);
  UploadPartResult uploadResult = new UploadPartResult();
  uploadResult.setETag("foo");
  doReturn(uploadResult).when(mockClient).uploadPart(any(UploadPartRequest.class));

  InitiateMultipartUploadResult initUploadResult = new InitiateMultipartUploadResult();
  initUploadResult.setUploadId("123");
  doReturn(initUploadResult).when(mockClient)
      .initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));

  return mockClient;
}
 
Example #25
Source File: S3Util.java    From s3committer with Apache License 2.0 4 votes vote down vote up
public static PendingUpload multipartUpload(
    AmazonS3 client, File localFile, String partition,
    String bucket, String key, long uploadPartSize) {

  InitiateMultipartUploadResult initiate = client.initiateMultipartUpload(
      new InitiateMultipartUploadRequest(bucket, key));
  String uploadId = initiate.getUploadId();

  boolean threw = true;
  try {
    Map<Integer, String> etags = Maps.newLinkedHashMap();

    long offset = 0;
    long numParts = (localFile.length() / uploadPartSize +
        ((localFile.length() % uploadPartSize) > 0 ? 1 : 0));

    Preconditions.checkArgument(numParts > 0,
        "Cannot upload 0 byte file: " + localFile);

    for (int partNumber = 1; partNumber <= numParts; partNumber += 1) {
      long size = Math.min(localFile.length() - offset, uploadPartSize);
      UploadPartRequest part = new UploadPartRequest()
          .withBucketName(bucket)
          .withKey(key)
          .withPartNumber(partNumber)
          .withUploadId(uploadId)
          .withFile(localFile)
          .withFileOffset(offset)
          .withPartSize(size)
          .withLastPart(partNumber == numParts);

      UploadPartResult partResult = client.uploadPart(part);
      PartETag etag = partResult.getPartETag();
      etags.put(etag.getPartNumber(), etag.getETag());

      offset += uploadPartSize;
    }

    PendingUpload pending = new PendingUpload(
        partition, bucket, key, uploadId, etags);

    threw = false;

    return pending;

  } finally {
    if (threw) {
      try {
        client.abortMultipartUpload(
            new AbortMultipartUploadRequest(bucket, key, uploadId));
      } catch (AmazonClientException e) {
        LOG.error("Failed to abort multi-part upload", e);
      }
    }
  }
}
 
Example #26
Source File: AwsSdkTest.java    From s3proxy with Apache License 2.0 4 votes vote down vote up
@Test
public void testMultipartUploadAbort() throws Exception {
    String blobName = "multipart-upload-abort";
    ByteSource byteSource = TestUtils.randomByteSource().slice(
            0, MINIMUM_MULTIPART_SIZE);

    InitiateMultipartUploadResult result = client.initiateMultipartUpload(
            new InitiateMultipartUploadRequest(containerName, blobName));

    // TODO: google-cloud-storage and openstack-swift cannot list multipart
    // uploads
    MultipartUploadListing multipartListing = client.listMultipartUploads(
            new ListMultipartUploadsRequest(containerName));
    if (blobStoreType.equals("azureblob")) {
        // Azure does not create a manifest during initiate multi-part
        // upload.  Instead the first part creates this.
        assertThat(multipartListing.getMultipartUploads()).isEmpty();
    } else {
        assertThat(multipartListing.getMultipartUploads()).hasSize(1);
    }

    PartListing partListing = client.listParts(new ListPartsRequest(
            containerName, blobName, result.getUploadId()));
    assertThat(partListing.getParts()).isEmpty();

    client.uploadPart(new UploadPartRequest()
            .withBucketName(containerName)
            .withKey(blobName)
            .withUploadId(result.getUploadId())
            .withPartNumber(1)
            .withPartSize(byteSource.size())
            .withInputStream(byteSource.openStream()));

    multipartListing = client.listMultipartUploads(
            new ListMultipartUploadsRequest(containerName));
    assertThat(multipartListing.getMultipartUploads()).hasSize(1);

    partListing = client.listParts(new ListPartsRequest(
            containerName, blobName, result.getUploadId()));
    assertThat(partListing.getParts()).hasSize(1);

    client.abortMultipartUpload(new AbortMultipartUploadRequest(
            containerName, blobName, result.getUploadId()));

    multipartListing = client.listMultipartUploads(
            new ListMultipartUploadsRequest(containerName));
    if (blobStoreType.equals("azureblob")) {
        // Azure does not support explicit abort.  It automatically
        // removes incomplete multi-part uploads after 7 days.
        assertThat(multipartListing.getMultipartUploads()).hasSize(1);
    } else {
        assertThat(multipartListing.getMultipartUploads()).isEmpty();
    }

    ObjectListing listing = client.listObjects(containerName);
    assertThat(listing.getObjectSummaries()).isEmpty();
}
 
Example #27
Source File: HadoopS3AccessHelper.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public UploadPartResult uploadPart(String key, String uploadId, int partNumber, File inputFile, long length) throws IOException {
	final UploadPartRequest uploadRequest = s3accessHelper.newUploadPartRequest(
		key, uploadId, partNumber, MathUtils.checkedDownCast(length), null, inputFile, 0L);
	return s3accessHelper.uploadPart(uploadRequest);
}
 
Example #28
Source File: StandardS3EncryptionService.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void configureUploadPartRequest(UploadPartRequest request, ObjectMetadata objectMetadata) {
    encryptionStrategy.configureUploadPartRequest(request, objectMetadata, keyValue);
}
 
Example #29
Source File: ServerSideCEncryptionStrategy.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void configureUploadPartRequest(UploadPartRequest request, ObjectMetadata objectMetadata, String keyValue) {
    SSECustomerKey customerKey = new SSECustomerKey(keyValue);
    request.setSSECustomerKey(customerKey);
}
 
Example #30
Source File: TestUtil.java    From s3committer with Apache License 2.0 4 votes vote down vote up
public List<UploadPartRequest> getParts() {
  return parts;
}