Java Code Examples for com.amazonaws.services.s3.AmazonS3#completeMultipartUpload()

The following examples show how to use com.amazonaws.services.s3.AmazonS3#completeMultipartUpload() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ParallelRequester.java    From nexus-public with Eclipse Public License 1.0 5 votes vote down vote up
protected void parallelRequests(final AmazonS3 s3,
                                final String bucket,
                                final String key,
                                final Supplier<IOFunction<String, List<PartETag>>> operations)
{
  InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(bucket, key);
  String uploadId = s3.initiateMultipartUpload(initiateRequest).getUploadId();

  CompletionService<List<PartETag>> completionService = new ExecutorCompletionService<>(executorService);
  try {
    for (int i = 0; i < parallelism; i++) {
      completionService.submit(() -> operations.get().apply(uploadId));
    }

    List<PartETag> partETags = new ArrayList<>();
    for (int i = 0; i < parallelism; i++) {
      partETags.addAll(completionService.take().get());
    }

    s3.completeMultipartUpload(new CompleteMultipartUploadRequest()
        .withBucketName(bucket)
        .withKey(key)
        .withUploadId(uploadId)
        .withPartETags(partETags));
  }
  catch (InterruptedException interrupted) {
    s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId));
    Thread.currentThread().interrupt();
  }
  catch (CancellationException | ExecutionException ex) {
    s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId));
    throw new BlobStoreException(
        format("Error executing parallel requests for bucket:%s key:%s with uploadId:%s", bucket, key, uploadId), ex,
        null);
  }
}
 
Example 2
Source File: PublisherTools.java    From aws-codepipeline-plugin-for-jenkins with Apache License 2.0 5 votes vote down vote up
public static void uploadFile(
        final File file,
        final Artifact artifact,
        final CompressionType compressionType,
        final EncryptionKey encryptionKey,
        final AmazonS3 amazonS3,
        final BuildListener listener) throws IOException {

    LoggingHelper.log(listener, "Uploading artifact: " + artifact + ", file: " + file);

    final String bucketName = artifact.getLocation().getS3Location().getBucketName();
    final String objectKey  = artifact.getLocation().getS3Location().getObjectKey();
    final List<PartETag> partETags = new ArrayList<>();

    final InitiateMultipartUploadRequest initiateMultipartUploadRequest = new InitiateMultipartUploadRequest(
            bucketName,
            objectKey,
            createObjectMetadata(compressionType))
        .withSSEAwsKeyManagementParams(toSSEAwsKeyManagementParams(encryptionKey));

    final InitiateMultipartUploadResult initiateMultipartUploadResult
            = amazonS3.initiateMultipartUpload(initiateMultipartUploadRequest);

    final long contentLength = file.length();
    long filePosition = 0;
    long partSize = 5 * 1024 * 1024; // Set part size to 5 MB

    for (int i = 1; filePosition < contentLength; i++) {
        partSize = Math.min(partSize, (contentLength - filePosition));

        final UploadPartRequest uploadPartRequest = new UploadPartRequest()
                .withBucketName(bucketName)
                .withKey(objectKey)
                .withUploadId(initiateMultipartUploadResult.getUploadId())
                .withPartNumber(i)
                .withFileOffset(filePosition)
                .withFile(file)
                .withPartSize(partSize);

        partETags.add(amazonS3.uploadPart(uploadPartRequest).getPartETag());

        filePosition += partSize;
    }

    final CompleteMultipartUploadRequest completeMultipartUpload
            = new CompleteMultipartUploadRequest(
                bucketName,
                objectKey,
                initiateMultipartUploadResult.getUploadId(),
                partETags);

    amazonS3.completeMultipartUpload(completeMultipartUpload);

    LoggingHelper.log(listener, "Upload successful");
}
 
Example 3
Source File: S3Util.java    From s3committer with Apache License 2.0 4 votes vote down vote up
public static void finishCommit(AmazonS3 client,
                                PendingUpload commit) {
  client.completeMultipartUpload(commit.newCompleteRequest());
}
 
Example 4
Source File: MultipartUploader.java    From nexus-public with Eclipse Public License 1.0 4 votes vote down vote up
private void uploadMultiPart(final AmazonS3 s3,
                             final String bucket,
                             final String key,
                             final InputStream firstChunk,
                             final InputStream restOfContents)
    throws IOException {
  checkState(firstChunk.available() > 0);
  String uploadId = null;
  try {
    InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(bucket, key);
    uploadId = s3.initiateMultipartUpload(initiateRequest).getUploadId();

    log.debug("Starting multipart upload {} to key {} in bucket {}", uploadId, key, bucket);

    List<UploadPartResult> results = new ArrayList<>();
    for (int partNumber = 1; ; partNumber++) {
      InputStream chunk = partNumber == 1 ? firstChunk : readChunk(restOfContents);
      if (chunk.available() == 0) {
        break;
      }
      else {
        log.debug("Uploading chunk {} for {} of {} bytes", partNumber, uploadId, chunk.available());
        UploadPartRequest part = new UploadPartRequest()
            .withBucketName(bucket)
            .withKey(key)
            .withUploadId(uploadId)
            .withPartNumber(partNumber)
            .withInputStream(chunk)
            .withPartSize(chunk.available());
        results.add(s3.uploadPart(part));
      }
    }
    CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest()
        .withBucketName(bucket)
        .withKey(key)
        .withUploadId(uploadId)
        .withPartETags(results);
    s3.completeMultipartUpload(compRequest);
    log.debug("Upload {} complete", uploadId);
    uploadId = null;
  }
  finally {
    if (uploadId != null) {
      try {
        s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId));
      }
      catch(Exception e) {
        log.error("Error aborting S3 multipart upload to bucket {} with key {}", bucket, key,
            log.isDebugEnabled() ? e : null);
      }
    }
  }
}
 
Example 5
Source File: MultipartCopier.java    From nexus-public with Eclipse Public License 1.0 4 votes vote down vote up
private void copyMultiPart(final AmazonS3 s3,
                           final String bucket,
                           final String sourcePath,
                           final String destinationPath,
                           final long length) {
  checkState(length > 0);
  String uploadId = null;
  try {
    long remaining = length;
    long offset = 0;

    InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(bucket, destinationPath);
    uploadId = s3.initiateMultipartUpload(initiateRequest).getUploadId();

    log.debug("Starting multipart copy {} to key {} from key {}", uploadId, destinationPath, sourcePath);

    List<CopyPartResult> results = new ArrayList<>();
    for (int partNumber = 1; ; partNumber++) {
      if (remaining <= 0) {
        break;
      }
      else {
        long partSize = min(remaining, chunkSize);
        log.trace("Copying chunk {} for {} from byte {} to {}, size {}", partNumber, uploadId, offset,
            offset + partSize - 1, partSize);
        CopyPartRequest part = new CopyPartRequest()
            .withSourceBucketName(bucket)
            .withSourceKey(sourcePath)
            .withDestinationBucketName(bucket)
            .withDestinationKey(destinationPath)
            .withUploadId(uploadId)
            .withPartNumber(partNumber)
            .withFirstByte(offset)
            .withLastByte(offset + partSize - 1);
        results.add(s3.copyPart(part));
        offset += partSize;
        remaining -= partSize;
      }
    }
    CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest()
        .withBucketName(bucket)
        .withKey(destinationPath)
        .withUploadId(uploadId)
        .withPartETags(results.stream().map(r -> new PartETag(r.getPartNumber(), r.getETag())).collect(toList()));
    s3.completeMultipartUpload(compRequest);
    log.debug("Copy {} complete", uploadId);
  }
  catch(SdkClientException e) {
    if (uploadId != null) {
      try {
        s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, destinationPath, uploadId));
      }
      catch(Exception inner) {
        log.error("Error aborting S3 multipart copy to bucket {} with key {}", bucket, destinationPath,
            log.isDebugEnabled() ? inner : null);
      }
    }
    throw e;
  }
}
 
Example 6
Source File: AwsUtils.java    From studio with GNU General Public License v3.0 4 votes vote down vote up
public static void uploadStream(String inputBucket, String inputKey, AmazonS3 s3Client, int partSize,
                                String filename, InputStream content) throws AwsException {
    List<PartETag> etags = new LinkedList<>();
    InitiateMultipartUploadResult initResult = null;
    try {
        int partNumber = 1;
        long totalBytes = 0;

        MimetypesFileTypeMap mimeMap = new MimetypesFileTypeMap();
        ObjectMetadata meta = new ObjectMetadata();
        meta.setContentType(mimeMap.getContentType(filename));

        InitiateMultipartUploadRequest initRequest =
                new InitiateMultipartUploadRequest(inputBucket, inputKey, meta);
        initResult = s3Client.initiateMultipartUpload(initRequest);
        byte[] buffer = new byte[partSize];
        int read;

        logger.debug("Starting upload for file '{}'", filename);

        while (0 < (read = IOUtils.read(content, buffer))) {
            totalBytes += read;
            if (logger.isTraceEnabled()) {
                logger.trace("Uploading part {} with size {} - total: {}", partNumber, read, totalBytes);
            }
            ByteArrayInputStream bais = new ByteArrayInputStream(buffer, 0, read);
            UploadPartRequest uploadRequest = new UploadPartRequest()
                    .withUploadId(initResult.getUploadId())
                    .withBucketName(inputBucket)
                    .withKey(inputKey)
                    .withInputStream(bais)
                    .withPartNumber(partNumber)
                    .withPartSize(read)
                    .withLastPart(read < partSize);
            etags.add(s3Client.uploadPart(uploadRequest).getPartETag());
            partNumber++;
        }

        if (totalBytes == 0) {
            // If the file is empty, use the simple upload instead of the multipart
            s3Client.abortMultipartUpload(
                    new AbortMultipartUploadRequest(inputBucket, inputKey, initResult.getUploadId()));

            s3Client.putObject(inputBucket, inputKey, new ByteArrayInputStream(new byte[0]), meta);
        } else {
            CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(inputBucket,
                    inputKey, initResult.getUploadId(), etags);

            s3Client.completeMultipartUpload(completeRequest);
        }

        logger.debug("Upload completed for file '{}'", filename);

    } catch (Exception e) {
        if (initResult != null) {
            s3Client.abortMultipartUpload(new AbortMultipartUploadRequest(inputBucket, inputKey,
                    initResult.getUploadId()));
        }
        throw new AwsException("Upload of file '" + filename + "' failed", e);
    }
}