com.amazonaws.services.s3.transfer.Upload Java Examples

The following examples show how to use com.amazonaws.services.s3.transfer.Upload. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: MultipartUploadLiveTest.java    From tutorials with MIT License 6 votes vote down vote up
@Test
public void whenUploadingFileWithTransferManager_thenVerifyUploadRequested() {
    File file = mock(File.class);
    PutObjectResult s3Result = mock(PutObjectResult.class);

    when(amazonS3.putObject(anyString(), anyString(), (File) any())).thenReturn(s3Result);
    when(file.getName()).thenReturn(KEY_NAME);

    PutObjectRequest request = new PutObjectRequest(BUCKET_NAME, KEY_NAME, file);
    request.setGeneralProgressListener(progressListener);

    Upload upload = tm.upload(request);

    assertThat(upload).isNotNull();
    verify(amazonS3).putObject(request);
}
 
Example #2
Source File: AWSUploader.java    From halvade with GNU General Public License v3.0 6 votes vote down vote up
public void Upload(String key, InputStream input, long size) throws InterruptedException {
    ObjectMetadata meta = new ObjectMetadata();
    if(SSE)
        meta.setServerSideEncryption(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);   
    meta.setContentLength(size);
    Upload upload = tm.upload(existingBucketName, key, input, meta);
    
    try {
    	// Or you can block and wait for the upload to finish
    	upload.waitForCompletion();
            Logger.DEBUG("Upload complete.");
    } catch (AmazonClientException amazonClientException) {
    	Logger.DEBUG("Unable to upload file, upload was aborted.");
    	Logger.EXCEPTION(amazonClientException);
    }
}
 
Example #3
Source File: S3Publisher.java    From hollow-reference-implementation with Apache License 2.0 6 votes vote down vote up
/**
 * Write a list of all of the state versions to S3.
 * @param newVersion
 */
private synchronized void updateSnapshotIndex(Long newVersion) {
	/// insert the new version into the list
	int idx = Collections.binarySearch(snapshotIndex, newVersion);
	int insertionPoint = Math.abs(idx) - 1;
	snapshotIndex.add(insertionPoint, newVersion);
	
	/// build a binary representation of the list -- gap encoded variable-length integers
	byte[] idxBytes = buidGapEncodedVarIntSnapshotIndex();
	
	/// indicate the Content-Length
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader("Content-Length", (long)idxBytes.length);
	
    /// upload the new file content.
    try(InputStream is = new ByteArrayInputStream(idxBytes)) {
        Upload upload = s3TransferManager.upload(bucketName, getSnapshotIndexObjectName(blobNamespace), is, metadata);
        
        upload.waitForCompletion();
    } catch(Exception e) {
        throw new RuntimeException(e);
    }
}
 
Example #4
Source File: FileHelper.java    From datacollector with Apache License 2.0 6 votes vote down vote up
Upload doUpload(String bucket, String fileName, InputStream is, ObjectMetadata metadata) {
  final PutObjectRequest putObjectRequest = new PutObjectRequest(
      bucket,
      fileName,
      is,
      metadata
  );
  final String object = bucket + s3TargetConfigBean.s3Config.delimiter + fileName;
  Upload upload = transferManager.upload(putObjectRequest);
  upload.addProgressListener((ProgressListener) progressEvent -> {
    switch (progressEvent.getEventType()) {
      case TRANSFER_STARTED_EVENT:
        LOG.debug("Started uploading object {} into Amazon S3", object);
        break;
      case TRANSFER_COMPLETED_EVENT:
        LOG.debug("Completed uploading object {} into Amazon S3", object);
        break;
      case TRANSFER_FAILED_EVENT:
        LOG.debug("Failed uploading object {} into Amazon S3", object);
        break;
      default:
        break;
    }
  });
  return upload;
}
 
Example #5
Source File: Uploads.java    From jobcacher-plugin with MIT License 5 votes vote down vote up
public void finishUploading() throws InterruptedException {
    for (Map.Entry<File, Upload> startedUpload : startedUploads.entrySet()) {
        finishUploading(startedUpload.getKey(), startedUpload.getValue());
    }

    startedUploads.clear();
}
 
Example #6
Source File: MultipartUpload.java    From tutorials with MIT License 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    String existingBucketName = "baeldung-bucket";
    String keyName = "my-picture.jpg";
    String filePath = "documents/my-picture.jpg";

    AmazonS3 amazonS3 = AmazonS3ClientBuilder
            .standard()
            .withCredentials(new DefaultAWSCredentialsProviderChain())
            .withRegion(Regions.DEFAULT_REGION)
            .build();

    int maxUploadThreads = 5;

    TransferManager tm = TransferManagerBuilder
            .standard()
            .withS3Client(amazonS3)
            .withMultipartUploadThreshold((long) (5 * 1024 * 1024))
            .withExecutorFactory(() -> Executors.newFixedThreadPool(maxUploadThreads))
            .build();

    ProgressListener progressListener =
            progressEvent -> System.out.println("Transferred bytes: " + progressEvent.getBytesTransferred());

    PutObjectRequest request = new PutObjectRequest(existingBucketName, keyName, new File(filePath));

    request.setGeneralProgressListener(progressListener);

    Upload upload = tm.upload(request);

    try {
        upload.waitForCompletion();
        System.out.println("Upload complete.");
    } catch (AmazonClientException e) {
        System.out.println("Error occurred while uploading file");
        e.printStackTrace();
    }
}
 
Example #7
Source File: S3OutputStreamWrapper.java    From streams with Apache License 2.0 5 votes vote down vote up
private void addFile() throws Exception {

    InputStream is = new ByteArrayInputStream(this.outputStream.toByteArray());
    int contentLength = outputStream.size();

    TransferManager transferManager = new TransferManager(amazonS3Client);
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setExpirationTime(DateTime.now().plusDays(365 * 3).toDate());
    metadata.setContentLength(contentLength);

    metadata.addUserMetadata("writer", "org.apache.streams");

    for (String s : metaData.keySet()) {
      metadata.addUserMetadata(s, metaData.get(s));
    }

    String fileNameToWrite = path + fileName;
    Upload upload = transferManager.upload(bucketName, fileNameToWrite, is, metadata);
    try {
      upload.waitForUploadResult();

      is.close();
      transferManager.shutdownNow(false);
      LOGGER.info("S3 File Close[{} kb] - {}", contentLength / 1024, path + fileName);
    } catch (Exception ignored) {
      LOGGER.trace("Ignoring", ignored);
    }


  }
 
Example #8
Source File: AmazonS3Storage.java    From thunderbit with GNU Affero General Public License v3.0 5 votes vote down vote up
@Override
public F.Promise<Void> store(Path path, String key, String name) {
    Promise<Void> promise = Futures.promise();

    TransferManager transferManager = new TransferManager(credentials);
    try {
        Upload upload = transferManager.upload(bucketName, key, path.toFile());
        upload.addProgressListener((ProgressListener) progressEvent -> {
            if (progressEvent.getEventType().isTransferEvent()) {
                if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_COMPLETED_EVENT)) {
                    transferManager.shutdownNow();
                    promise.success(null);
                } else if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_FAILED_EVENT)) {
                    transferManager.shutdownNow();
                    logger.error(progressEvent.toString());
                    promise.failure(new Exception(progressEvent.toString()));
                }
            }
        });
    } catch (AmazonServiceException ase) {
        logAmazonServiceException (ase);
    } catch (AmazonClientException ace) {
        logAmazonClientException(ace);
    }

    return F.Promise.wrap(promise.future());
}
 
Example #9
Source File: UploadMetadata.java    From datacollector with Apache License 2.0 5 votes vote down vote up
public UploadMetadata(
  Upload upload,
  String bucket,
  List<Record> records,
  List<EventRecord> events
) {
  this.upload = upload;
  this.bucket = bucket;
  this.records = records;
  this.events = events;
}
 
Example #10
Source File: TestS3Accessor.java    From datacollector with Apache License 2.0 5 votes vote down vote up
@Test
public void testUploader() throws Exception {
  S3Accessor accessor = new S3Accessor(null, null, null, null);
  accessor = Mockito.spy(accessor);
  Mockito.doReturn(true).when(accessor).hasTransferManager();
  S3Accessor.EncryptionMetadataBuilder metadataBuilder = Mockito.mock(S3Accessor.EncryptionMetadataBuilder.class);
  ObjectMetadata metadata = new ObjectMetadata();
  Mockito.when(metadataBuilder.build()).thenReturn(metadata);
  Mockito.doReturn(metadataBuilder).when(accessor).getEncryptionMetadataBuilder();

  TransferManager transferManager = Mockito.mock(TransferManager.class);
  Mockito.doReturn(transferManager).when(accessor).getTransferManager();
  Upload upload = Mockito.mock(Upload.class);
  Mockito.when(transferManager.upload(Mockito.any(PutObjectRequest.class))).thenReturn(upload);

  S3Accessor.Uploader uploader = accessor.getUploader();
  Assert.assertNotNull(uploader);

  InputStream is = Mockito.mock(InputStream.class);

  Upload uploadGot = uploader.upload("b", "k", is);

  Assert.assertEquals(upload, uploadGot);

  ArgumentCaptor<PutObjectRequest> captor = ArgumentCaptor.forClass(PutObjectRequest.class);
  Mockito.verify(transferManager, Mockito.times(1)).upload(captor.capture());
  PutObjectRequest request = captor.getValue();
  Assert.assertEquals("b", request.getBucketName());
  Assert.assertEquals("k", request.getKey());
  Assert.assertEquals(is, request.getInputStream());
  Assert.assertEquals(metadata, request.getMetadata());
}
 
Example #11
Source File: S3Accessor.java    From datacollector with Apache License 2.0 5 votes vote down vote up
public Uploader getUploader() {
  Utils.checkState(hasTransferManager(), "transferManager not available");
  return (bucket, key, is) -> {
    Utils.checkNotNull(bucket, "bucket");
    Utils.checkNotNull(key, "key");
    Utils.checkNotNull(is, "is");
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, is, getEncryptionMetadataBuilder().build());
    Upload upload = getTransferManager().upload(putObjectRequest);
    upload.addProgressListener(new UploaderProgressListener(bucket + key));
    return upload;
  };
}
 
Example #12
Source File: S3Resource.java    From micro-server with Apache License 2.0 5 votes vote down vote up
@GET
@Path("/put")
public String put() {
    Try<Upload, Throwable> operation = writer.put("hello", "world");
    if(operation.isSuccess())
        return "added";
    return operation.failureGet().orElse(null).getMessage();
}
 
Example #13
Source File: FileController.java    From full-teaching with Apache License 2.0 5 votes vote down vote up
private void productionFileSaver(String keyName, String folderName, File f) throws InterruptedException {
String bucketName = this.bucketAWS + "/" + folderName;
TransferManager tm = new TransferManager(this.amazonS3);        
      // TransferManager processes all transfers asynchronously, so this call will return immediately
      Upload upload = tm.upload(bucketName, keyName, f);
      try {
      	// Or you can block and wait for the upload to finish
      	upload.waitForCompletion();
      	System.out.println("Upload completed");
      } catch (AmazonClientException amazonClientException) {
      	System.out.println("Unable to upload file, upload was aborted.");
      	amazonClientException.printStackTrace();
      }
  }
 
Example #14
Source File: Uploads.java    From jobcacher-plugin with MIT License 5 votes vote down vote up
public void startUploading(TransferManager manager, File file, InputStream inputStream, Destination dest, ObjectMetadata metadata) throws AmazonServiceException {
    final PutObjectRequest request = new PutObjectRequest(dest.bucketName, dest.objectName, inputStream, metadata);

    // Set the buffer size (ReadLimit) equal to the multipart upload size,
    // allowing us to resend data if the connection breaks.
    request.getRequestClientOptions().setReadLimit(MULTIPART_UPLOAD_THRESHOLD);
    manager.getConfiguration().setMultipartUploadThreshold( (long) MULTIPART_UPLOAD_THRESHOLD);

    final Upload upload = manager.upload(request);
    startedUploads.put(file, upload);
    openedStreams.put(file, inputStream);
}
 
Example #15
Source File: Uploads.java    From jobcacher-plugin with MIT License 5 votes vote down vote up
private void finishUploading(File file, Upload upload) throws InterruptedException {
    if (upload == null) {
        LOGGER.info("File: " + file.getName() + " already was uploaded");
        return;
    }
    try {
        upload.waitForCompletion();
    }
    finally {
        closeStream(file, openedStreams.remove(file));
    }
}
 
Example #16
Source File: S3Publisher.java    From hollow-reference-implementation with Apache License 2.0 5 votes vote down vote up
private void uploadFile(File file, String s3ObjectName, ObjectMetadata metadata) {
	try (InputStream is = new BufferedInputStream(new FileInputStream(file))) {
           Upload upload = s3TransferManager.upload(bucketName, s3ObjectName, is, metadata);
       
           upload.waitForCompletion();
       } catch (Exception e) {
           throw new RuntimeException(e);
       }
}
 
Example #17
Source File: S3AOutputStream.java    From big-c with Apache License 2.0 5 votes vote down vote up
public ProgressableProgressListener(Upload upload, Progressable progress, 
  FileSystem.Statistics statistics) {
  this.upload = upload;
  this.progress = progress;
  this.statistics = statistics;
  this.lastBytesTransferred = 0;
}
 
Example #18
Source File: XferMgrUpload.java    From aws-doc-sdk-examples with Apache License 2.0 5 votes vote down vote up
public static void uploadFile(String file_path, String bucket_name,
                              String key_prefix, boolean pause) {
    System.out.println("file: " + file_path +
            (pause ? " (pause)" : ""));

    String key_name = null;
    if (key_prefix != null) {
        key_name = key_prefix + '/' + file_path;
    } else {
        key_name = file_path;
    }

    // snippet-start:[s3.java1.s3_xfer_mgr_upload.single]
    File f = new File(file_path);
    TransferManager xfer_mgr = TransferManagerBuilder.standard().build();
    try {
        Upload xfer = xfer_mgr.upload(bucket_name, key_name, f);
        // loop with Transfer.isDone()
        XferMgrProgress.showTransferProgress(xfer);
        //  or block with Transfer.waitForCompletion()
        XferMgrProgress.waitForCompletion(xfer);
    } catch (AmazonServiceException e) {
        System.err.println(e.getErrorMessage());
        System.exit(1);
    }
    xfer_mgr.shutdownNow();
    // snippet-end:[s3.java1.s3_xfer_mgr_upload.single]
}
 
Example #19
Source File: S3AOutputStream.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public ProgressableProgressListener(Upload upload, Progressable progress, 
  FileSystem.Statistics statistics) {
  this.upload = upload;
  this.progress = progress;
  this.statistics = statistics;
  this.lastBytesTransferred = 0;
}
 
Example #20
Source File: UploadMetadata.java    From datacollector with Apache License 2.0 4 votes vote down vote up
public Upload getUpload() {
  return upload;
}
 
Example #21
Source File: S3Utils.java    From cloudstack with Apache License 2.0 4 votes vote down vote up
public static Upload putObject(final ClientOptions clientOptions, final PutObjectRequest req) {
    LOGGER.debug(format("Sending stream as S3 object %1$s in bucket %2$s using PutObjectRequest", req.getKey(), req.getBucketName()));

    return getTransferManager(clientOptions).upload(req);
}
 
Example #22
Source File: S3Utils.java    From cloudstack with Apache License 2.0 4 votes vote down vote up
public static Upload putObject(final ClientOptions clientOptions, final InputStream sourceStream, final String bucketName, final String key) {
    LOGGER.debug(format("Sending stream as S3 object %1$s in bucket %2$s", key, bucketName));

    return getTransferManager(clientOptions).upload(bucketName, key, sourceStream, null);
}
 
Example #23
Source File: S3Utils.java    From cloudstack with Apache License 2.0 4 votes vote down vote up
public static Upload putFile(final ClientOptions clientOptions, final File sourceFile, final String bucketName, final String key) {
    LOGGER.debug(format("Sending file %1$s as S3 object %2$s in bucket %3$s", sourceFile.getName(), key, bucketName));

    return getTransferManager(clientOptions).upload(bucketName, key, sourceFile);
}
 
Example #24
Source File: S3AOutputStream.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
public synchronized void close() throws IOException {
  if (closed) {
    return;
  }

  backupStream.close();
  if (LOG.isDebugEnabled()) {
    LOG.debug("OutputStream for key '" + key + "' closed. Now beginning upload");
    LOG.debug("Minimum upload part size: " + partSize + " threshold " + partSizeThreshold);
  }


  try {
    final ObjectMetadata om = new ObjectMetadata();
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
      om.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile);
    putObjectRequest.setCannedAcl(cannedACL);
    putObjectRequest.setMetadata(om);

    Upload upload = transfers.upload(putObjectRequest);

    ProgressableProgressListener listener = 
      new ProgressableProgressListener(upload, progress, statistics);
    upload.addProgressListener(listener);

    upload.waitForUploadResult();

    long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred();
    if (statistics != null && delta != 0) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("S3A write delta changed after finished: " + delta + " bytes");
      }
      statistics.incrementBytesWritten(delta);
    }

    // This will delete unnecessary fake parent directories
    fs.finishedWrite(key);
  } catch (InterruptedException e) {
    throw new IOException(e);
  } finally {
    if (!backupFile.delete()) {
      LOG.warn("Could not delete temporary s3a file: {}", backupFile);
    }
    super.close();
    closed = true;
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("OutputStream for key '" + key + "' upload complete");
  }
}
 
Example #25
Source File: S3UploadHandle.java    From secor with Apache License 2.0 4 votes vote down vote up
public S3UploadHandle(Upload u) {
    mUpload = u;
}
 
Example #26
Source File: AwsS3Storage.java    From ecs-sync with Apache License 2.0 4 votes vote down vote up
@Override
void putObject(SyncObject obj, String targetKey) {
    ObjectMetadata om;
    if (options.isSyncMetadata()) om = s3MetaFromSyncMeta(obj.getMetadata());
    else om = new ObjectMetadata();

    if (obj.getMetadata().isDirectory()) om.setContentType(TYPE_DIRECTORY);

    PutObjectRequest req;
    File file = (File) obj.getProperty(AbstractFilesystemStorage.PROP_FILE);
    S3ProgressListener progressListener = null;
    if (obj.getMetadata().isDirectory()) {
        req = new PutObjectRequest(config.getBucketName(), targetKey, new ByteArrayInputStream(new byte[0]), om);
    } else if (file != null) {
        req = new PutObjectRequest(config.getBucketName(), targetKey, file).withMetadata(om);
        progressListener = new ByteTransferListener(obj);
    } else {
        InputStream stream = obj.getDataStream();
        if (options.isMonitorPerformance())
            stream = new ProgressInputStream(stream, new PerformanceListener(getWriteWindow()));
        req = new PutObjectRequest(config.getBucketName(), targetKey, stream, om);
    }

    if (options.isSyncAcl())
        req.setAccessControlList(s3AclFromSyncAcl(obj.getAcl(), options.isIgnoreInvalidAcls()));

    TransferManager xferManager = null;
    try {
        // xfer manager will figure out if MPU is needed (based on threshold), do the MPU if necessary,
        // and abort if it fails
        xferManager = TransferManagerBuilder.standard()
                .withS3Client(s3)
                .withExecutorFactory(() -> Executors.newFixedThreadPool(config.getMpuThreadCount()))
                .withMultipartUploadThreshold((long) config.getMpuThresholdMb() * 1024 * 1024)
                .withMinimumUploadPartSize((long) config.getMpuPartSizeMb() * 1024 * 1024)
                .withShutDownThreadPools(true)
                .build();

        // directly update

        final Upload upload = xferManager.upload(req, progressListener);
        try {
            String eTag = time((Callable<String>) () -> upload.waitForUploadResult().getETag(), OPERATION_MPU);
            log.debug("Wrote {}, etag: {}", targetKey, eTag);
        } catch (Exception e) {
            log.error("upload exception", e);
            if (e instanceof RuntimeException) throw (RuntimeException) e;
            throw new RuntimeException("upload thread was interrupted", e);
        }
    } finally {
        // NOTE: apparently if we do not reference xferManager again after the upload() call (as in this finally
        // block), the JVM will for some crazy reason determine it is eligible for GC and call finalize(), which
        // shuts down the thread pool, fails the upload, and gives absolutely no indication of what's going on...
        if (xferManager != null) xferManager.shutdownNow(false);
    }
}
 
Example #27
Source File: S3AFileSystem.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.
 *
 * This version doesn't need to create a temporary file to calculate the md5.
 * Sadly this doesn't seem to be used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, 
  Path dst) throws IOException {
  String key = pathToKey(dst);

  if (!overwrite && exists(dst)) {
    throw new IOException(dst + " already exists");
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Copying local file from " + src + " to " + dst);
  }

  // Since we have a local file, we don't need to stream into a temporary file
  LocalFileSystem local = getLocal(getConf());
  File srcfile = local.pathToFile(src);

  final ObjectMetadata om = new ObjectMetadata();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setMetadata(om);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Upload up = transfers.upload(putObjectRequest);
  up.addProgressListener(progressListener);
  try {
    up.waitForUploadResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }

  // This will delete unnecessary fake parent directories
  finishedWrite(key);

  if (delSrc) {
    local.delete(src, false);
  }
}
 
Example #28
Source File: S3AOutputStream.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public synchronized void close() throws IOException {
  if (closed) {
    return;
  }

  backupStream.close();
  if (LOG.isDebugEnabled()) {
    LOG.debug("OutputStream for key '" + key + "' closed. Now beginning upload");
    LOG.debug("Minimum upload part size: " + partSize + " threshold " + partSizeThreshold);
  }


  try {
    final ObjectMetadata om = new ObjectMetadata();
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
      om.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile);
    putObjectRequest.setCannedAcl(cannedACL);
    putObjectRequest.setMetadata(om);

    Upload upload = transfers.upload(putObjectRequest);

    ProgressableProgressListener listener = 
      new ProgressableProgressListener(upload, progress, statistics);
    upload.addProgressListener(listener);

    upload.waitForUploadResult();

    long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred();
    if (statistics != null && delta != 0) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("S3A write delta changed after finished: " + delta + " bytes");
      }
      statistics.incrementBytesWritten(delta);
    }

    // This will delete unnecessary fake parent directories
    fs.finishedWrite(key);
  } catch (InterruptedException e) {
    throw new IOException(e);
  } finally {
    if (!backupFile.delete()) {
      LOG.warn("Could not delete temporary s3a file: {}", backupFile);
    }
    super.close();
    closed = true;
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("OutputStream for key '" + key + "' upload complete");
  }
}
 
Example #29
Source File: S3AFileSystem.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.
 *
 * This version doesn't need to create a temporary file to calculate the md5.
 * Sadly this doesn't seem to be used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, 
  Path dst) throws IOException {
  String key = pathToKey(dst);

  if (!overwrite && exists(dst)) {
    throw new IOException(dst + " already exists");
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Copying local file from " + src + " to " + dst);
  }

  // Since we have a local file, we don't need to stream into a temporary file
  LocalFileSystem local = getLocal(getConf());
  File srcfile = local.pathToFile(src);

  final ObjectMetadata om = new ObjectMetadata();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setMetadata(om);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Upload up = transfers.upload(putObjectRequest);
  up.addProgressListener(progressListener);
  try {
    up.waitForUploadResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }

  // This will delete unnecessary fake parent directories
  finishedWrite(key);

  if (delSrc) {
    local.delete(src, false);
  }
}
 
Example #30
Source File: S3UploadStep.java    From pipeline-aws-plugin with Apache License 2.0 4 votes vote down vote up
@Override
public Void invoke(File localFile, VirtualChannel channel) throws IOException, InterruptedException {
	TransferManager mgr = TransferManagerBuilder.standard()
			.withS3Client(AWSClientFactory.create(this.amazonS3ClientOptions.createAmazonS3ClientBuilder(), this.envVars))
			.build();
	final MultipleFileUpload fileUpload;
	ObjectMetadataProvider metadatasProvider = (file, meta) -> {
		if (meta != null) {
			if (RemoteListUploader.this.metadatas != null && RemoteListUploader.this.metadatas.size() > 0) {
				meta.setUserMetadata(RemoteListUploader.this.metadatas);
			}
			if (RemoteListUploader.this.acl != null) {
				meta.setHeader(Headers.S3_CANNED_ACL, RemoteListUploader.this.acl);
			}
			if (RemoteListUploader.this.cacheControl != null && !RemoteListUploader.this.cacheControl.isEmpty()) {
				meta.setCacheControl(RemoteListUploader.this.cacheControl);
			}
			if (RemoteListUploader.this.contentEncoding != null && !RemoteListUploader.this.contentEncoding.isEmpty()) {
				meta.setContentEncoding(RemoteListUploader.this.contentEncoding);
			}
			if (RemoteListUploader.this.contentType != null && !RemoteListUploader.this.contentType.isEmpty()) {
				meta.setContentType(RemoteListUploader.this.contentType);
			}
			if (RemoteListUploader.this.sseAlgorithm != null && !RemoteListUploader.this.sseAlgorithm.isEmpty()) {
				meta.setSSEAlgorithm(RemoteListUploader.this.sseAlgorithm);
			}
			if (RemoteListUploader.this.kmsId != null && !RemoteListUploader.this.kmsId.isEmpty()) {
				final SSEAwsKeyManagementParams sseAwsKeyManagementParams = new SSEAwsKeyManagementParams(RemoteListUploader.this.kmsId);
				meta.setSSEAlgorithm(sseAwsKeyManagementParams.getAwsKmsKeyId());
				meta.setHeader(
						Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID,
						sseAwsKeyManagementParams.getAwsKmsKeyId()
				);
			}

		}
	};

	ObjectTaggingProvider objectTaggingProvider =(uploadContext) -> {
		List<Tag> tagList = new ArrayList<Tag>();

		//add tags
		if(tags != null){
			for (Map.Entry<String, String> entry : tags.entrySet()) {
				Tag tag = new Tag(entry.getKey(), entry.getValue());
				tagList.add(tag);
			}
		}
		return new ObjectTagging(tagList);
	};

	try {
		fileUpload = mgr.uploadFileList(this.bucket, this.path, localFile, this.fileList, metadatasProvider, objectTaggingProvider);
		for (final Upload upload : fileUpload.getSubTransfers()) {
			upload.addProgressListener((ProgressListener) progressEvent -> {
				if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) {
					RemoteListUploader.this.taskListener.getLogger().println("Finished: " + upload.getDescription());
				}
			});
		}
		fileUpload.waitForCompletion();
	}
	finally {
		mgr.shutdownNow();
	}
	return null;
}