Java Code Examples for com.amazonaws.services.s3.transfer.TransferManager#shutdownNow()

The following examples show how to use com.amazonaws.services.s3.transfer.TransferManager#shutdownNow() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: S3UtilProgram.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private static void downloadByFullPathAndFileNamePrefix(ParameterTool params) {
	final String bucket = params.getRequired("bucket");
	final String s3prefix = params.getRequired("s3prefix");
	final String localFolder = params.getRequired("localFolder");
	final String s3filePrefix = params.get("s3filePrefix", "");
	TransferManager tx = TransferManagerBuilder.defaultTransferManager();
	Predicate<String> keyPredicate = getKeyFilterByFileNamePrefix(s3filePrefix);
	KeyFilter keyFilter = s3filePrefix.isEmpty() ? KeyFilter.INCLUDE_ALL :
		objectSummary -> keyPredicate.test(objectSummary.getKey());
	try {
		tx.downloadDirectory(bucket, s3prefix, new File(localFolder), keyFilter).waitForCompletion();
	} catch (InterruptedException e) {
		System.out.println("Transfer interrupted");
	} finally {
		tx.shutdownNow();
	}
}
 
Example 2
Source File: XferMgrUpload.java    From aws-doc-sdk-examples with Apache License 2.0 6 votes vote down vote up
public static void uploadDir(String dir_path, String bucket_name,
                             String key_prefix, boolean recursive, boolean pause) {
    System.out.println("directory: " + dir_path + (recursive ?
            " (recursive)" : "") + (pause ? " (pause)" : ""));

    // snippet-start:[s3.java1.s3_xfer_mgr_upload.directory]
    TransferManager xfer_mgr = TransferManagerBuilder.standard().build();
    try {
        MultipleFileUpload xfer = xfer_mgr.uploadDirectory(bucket_name,
                key_prefix, new File(dir_path), recursive);
        // loop with Transfer.isDone()
        XferMgrProgress.showTransferProgress(xfer);
        // or block with Transfer.waitForCompletion()
        XferMgrProgress.waitForCompletion(xfer);
    } catch (AmazonServiceException e) {
        System.err.println(e.getErrorMessage());
        System.exit(1);
    }
    xfer_mgr.shutdownNow();
    // snippet-end:[s3.java1.s3_xfer_mgr_upload.directory]
}
 
Example 3
Source File: XferMgrCopy.java    From aws-doc-sdk-examples with Apache License 2.0 6 votes vote down vote up
public static void copyObjectSimple(String from_bucket, String from_key,
                                    String to_bucket, String to_key) {
    // snippet-start:[s3.java1.s3_xfer_mgr_copy.copy_object]
    System.out.println("Copying s3 object: " + from_key);
    System.out.println("      from bucket: " + from_bucket);
    System.out.println("     to s3 object: " + to_key);
    System.out.println("        in bucket: " + to_bucket);

    TransferManager xfer_mgr = TransferManagerBuilder.standard().build();
    try {
        Copy xfer = xfer_mgr.copy(from_bucket, from_key, to_bucket, to_key);
        // loop with Transfer.isDone()
        XferMgrProgress.showTransferProgress(xfer);
        // or block with Transfer.waitForCompletion()
        XferMgrProgress.waitForCompletion(xfer);
    } catch (AmazonServiceException e) {
        System.err.println(e.getErrorMessage());
        System.exit(1);
    }
    xfer_mgr.shutdownNow();
    // snippet-end:[s3.java1.s3_xfer_mgr_copy.copy_object]
}
 
Example 4
Source File: XferMgrDownload.java    From aws-doc-sdk-examples with Apache License 2.0 6 votes vote down vote up
public static void downloadFile(String bucket_name, String key_name,
                                String file_path, boolean pause) {
    System.out.println("Downloading to file: " + file_path +
            (pause ? " (pause)" : ""));

    // snippet-start:[s3.java1.s3_xfer_mgr_download.single]
    File f = new File(file_path);
    TransferManager xfer_mgr = TransferManagerBuilder.standard().build();
    try {
        Download xfer = xfer_mgr.download(bucket_name, key_name, f);
        // loop with Transfer.isDone()
        XferMgrProgress.showTransferProgress(xfer);
        // or block with Transfer.waitForCompletion()
        XferMgrProgress.waitForCompletion(xfer);
    } catch (AmazonServiceException e) {
        System.err.println(e.getErrorMessage());
        System.exit(1);
    }
    xfer_mgr.shutdownNow();
    // snippet-end:[s3.java1.s3_xfer_mgr_download.single]
}
 
Example 5
Source File: XferMgrDownload.java    From aws-doc-sdk-examples with Apache License 2.0 6 votes vote down vote up
public static void downloadDir(String bucket_name, String key_prefix,
                               String dir_path, boolean pause) {
    System.out.println("downloading to directory: " + dir_path +
            (pause ? " (pause)" : ""));

    // snippet-start:[s3.java1.s3_xfer_mgr_download.directory]
    TransferManager xfer_mgr = TransferManagerBuilder.standard().build();

    try {
        MultipleFileDownload xfer = xfer_mgr.downloadDirectory(
                bucket_name, key_prefix, new File(dir_path));
        // loop with Transfer.isDone()
        XferMgrProgress.showTransferProgress(xfer);
        // or block with Transfer.waitForCompletion()
        XferMgrProgress.waitForCompletion(xfer);
    } catch (AmazonServiceException e) {
        System.err.println(e.getErrorMessage());
        System.exit(1);
    }
    xfer_mgr.shutdownNow();
    // snippet-end:[s3.java1.s3_xfer_mgr_download.directory]
}
 
Example 6
Source File: XferMgrProgress.java    From dlp-dataflow-deidentification with Apache License 2.0 6 votes vote down vote up
public static void uploadDirWithSubprogress(
    String dir_path, String bucket_name, String key_prefix, boolean recursive, boolean pause) {
  System.out.println(
      "directory: " + dir_path + (recursive ? " (recursive)" : "") + (pause ? " (pause)" : ""));

  TransferManager xfer_mgr = new TransferManager();
  try {
    MultipleFileUpload multi_upload =
        xfer_mgr.uploadDirectory(bucket_name, key_prefix, new File(dir_path), recursive);
    // loop with Transfer.isDone()
    XferMgrProgress.showMultiUploadProgress(multi_upload);
    // or block with Transfer.waitForCompletion()
    XferMgrProgress.waitForCompletion(multi_upload);
  } catch (AmazonServiceException e) {
    System.err.println(e.getErrorMessage());
    System.exit(1);
  }
  xfer_mgr.shutdownNow();
}
 
Example 7
Source File: S3UtilProgram.java    From flink with Apache License 2.0 6 votes vote down vote up
private static void downloadByFullPathAndFileNamePrefix(ParameterTool params) {
	final String bucket = params.getRequired("bucket");
	final String s3prefix = params.getRequired("s3prefix");
	final String localFolder = params.getRequired("localFolder");
	final String s3filePrefix = params.get("s3filePrefix", "");
	TransferManager tx = TransferManagerBuilder.defaultTransferManager();
	Predicate<String> keyPredicate = getKeyFilterByFileNamePrefix(s3filePrefix);
	KeyFilter keyFilter = s3filePrefix.isEmpty() ? KeyFilter.INCLUDE_ALL :
		objectSummary -> keyPredicate.test(objectSummary.getKey());
	try {
		tx.downloadDirectory(bucket, s3prefix, new File(localFolder), keyFilter).waitForCompletion();
	} catch (InterruptedException e) {
		System.out.println("Transfer interrupted");
	} finally {
		tx.shutdownNow();
	}
}
 
Example 8
Source File: S3Callable.java    From jobcacher-plugin with MIT License 5 votes vote down vote up
/**
 * Override this if you don't want a transfer manager
 */
@Override
public T invoke(File f, VirtualChannel channel) throws IOException, InterruptedException {
    TransferManager transferManager = new TransferManager(helper.client());

    try {
        return invoke(transferManager, f, channel);
    } finally {
        transferManager.shutdownNow();
    }
}
 
Example 9
Source File: S3DownloadStep.java    From pipeline-aws-plugin with Apache License 2.0 5 votes vote down vote up
@Override
public Void invoke(File localFile, VirtualChannel channel) throws IOException, InterruptedException {
	TransferManager mgr = TransferManagerBuilder.standard()
			.withS3Client(AWSClientFactory.create(this.amazonS3ClientOptions.createAmazonS3ClientBuilder(), this.envVars))
			.build();

	if (this.path == null || this.path.isEmpty() || this.path.endsWith("/")) {
		try {
			final MultipleFileDownload fileDownload = mgr.downloadDirectory(this.bucket, this.path, localFile);
			fileDownload.waitForCompletion();
			RemoteDownloader.this.taskListener.getLogger().println("Finished: " + fileDownload.getDescription());
		}
		finally {
			mgr.shutdownNow();
		}
		return null;
	} else {
		try {
			final Download download = mgr.download(this.bucket, this.path, localFile);
			download.addProgressListener((ProgressListener) progressEvent -> {
				if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) {
					RemoteDownloader.this.taskListener.getLogger().println("Finished: " + download.getDescription());
				}
			});
			download.waitForCompletion();
		}
		finally {
			mgr.shutdownNow();
		}
		return null;
	}
}
 
Example 10
Source File: S3UtilProgram.java    From flink with Apache License 2.0 5 votes vote down vote up
private static void downloadFile(ParameterTool params) {
	final String bucket = params.getRequired("bucket");
	final String s3file = params.getRequired("s3file");
	final String localFile = params.getRequired("localFile");
	TransferManager tx = TransferManagerBuilder.defaultTransferManager();
	try {
		tx.download(bucket, s3file, new File(localFile)).waitForCompletion();
	} catch (InterruptedException e) {
		System.out.println("Transfer interrupted");
	} finally {
		tx.shutdownNow();
	}
}
 
Example 11
Source File: S3UtilProgram.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private static void downloadFile(ParameterTool params) {
	final String bucket = params.getRequired("bucket");
	final String s3file = params.getRequired("s3file");
	final String localFile = params.getRequired("localFile");
	TransferManager tx = TransferManagerBuilder.defaultTransferManager();
	try {
		tx.download(bucket, s3file, new File(localFile)).waitForCompletion();
	} catch (InterruptedException e) {
		System.out.println("Transfer interrupted");
	} finally {
		tx.shutdownNow();
	}
}
 
Example 12
Source File: XferMgrUpload.java    From aws-doc-sdk-examples with Apache License 2.0 5 votes vote down vote up
public static void uploadFileList(String[] file_paths, String bucket_name,
                                  String key_prefix, boolean pause) {
    System.out.println("file list: " + Arrays.toString(file_paths) +
            (pause ? " (pause)" : ""));
    // convert the file paths to a list of File objects (required by the
    // uploadFileList method)
    // snippet-start:[s3.java1.s3_xfer_mgr_upload.list_of_files]
    ArrayList<File> files = new ArrayList<File>();
    for (String path : file_paths) {
        files.add(new File(path));
    }

    TransferManager xfer_mgr = TransferManagerBuilder.standard().build();
    try {
        MultipleFileUpload xfer = xfer_mgr.uploadFileList(bucket_name,
                key_prefix, new File("."), files);
        // loop with Transfer.isDone()
        XferMgrProgress.showTransferProgress(xfer);
        // or block with Transfer.waitForCompletion()
        XferMgrProgress.waitForCompletion(xfer);
    } catch (AmazonServiceException e) {
        System.err.println(e.getErrorMessage());
        System.exit(1);
    }
    xfer_mgr.shutdownNow();
    // snippet-end:[s3.java1.s3_xfer_mgr_upload.list_of_files]
}
 
Example 13
Source File: XferMgrUpload.java    From aws-doc-sdk-examples with Apache License 2.0 5 votes vote down vote up
public static void uploadFile(String file_path, String bucket_name,
                              String key_prefix, boolean pause) {
    System.out.println("file: " + file_path +
            (pause ? " (pause)" : ""));

    String key_name = null;
    if (key_prefix != null) {
        key_name = key_prefix + '/' + file_path;
    } else {
        key_name = file_path;
    }

    // snippet-start:[s3.java1.s3_xfer_mgr_upload.single]
    File f = new File(file_path);
    TransferManager xfer_mgr = TransferManagerBuilder.standard().build();
    try {
        Upload xfer = xfer_mgr.upload(bucket_name, key_name, f);
        // loop with Transfer.isDone()
        XferMgrProgress.showTransferProgress(xfer);
        //  or block with Transfer.waitForCompletion()
        XferMgrProgress.waitForCompletion(xfer);
    } catch (AmazonServiceException e) {
        System.err.println(e.getErrorMessage());
        System.exit(1);
    }
    xfer_mgr.shutdownNow();
    // snippet-end:[s3.java1.s3_xfer_mgr_upload.single]
}
 
Example 14
Source File: BackgroundUploader.java    From openbd-core with GNU General Public License v3.0 5 votes vote down vote up
private void cleanupMultiPartUploads( AmazonS3 s3Client, String bucket ){
	TransferManager tm = new TransferManager(s3Client);        
   try {
   	tm.abortMultipartUploads(bucket, new Date(System.currentTimeMillis() - DateUtil.DAY_MS ));
   } catch (AmazonClientException amazonClientException) {
   	cfEngine.log("AmazonS3Write.BackgroundUploader.cleanupMultiPartUploads():" + amazonClientException.getMessage() );
   }
   tm.shutdownNow(true);
}
 
Example 15
Source File: S3OutputStream.java    From CloverETL-Engine with GNU Lesser General Public License v2.1 5 votes vote down vote up
private void upload() throws IOException {
	if (uploaded) {
		return;
	}
	try {
		uploaded = true;
		os.close();
		os = null;
		
		if (!S3InputStream.isS3File(url)) {
			throw new IllegalArgumentException("Not an Amazon S3 host");
		}
		
		String accessKey = S3InputStream.getAccessKey(url);
		String secretKey = S3InputStream.getSecretKey(url);
		String path = url.getFile();
		if (path.startsWith("/")) {
			path = path.substring(1);
		}
		
		AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
		AmazonS3Client service = new AmazonS3Client(credentials);
		transferManager = new TransferManager(service);
		TransferManagerConfiguration config = new TransferManagerConfiguration();
		config.setMultipartUploadThreshold(MULTIPART_UPLOAD_THRESHOLD);
		config.setMinimumUploadPartSize(MULTIPART_UPLOAD_THRESHOLD);
		transferManager.setConfiguration(config);
		
		String bucket = S3InputStream.getBucket(url);

		// CLO-4724:
		S3Utils.uploadFile(transferManager, tempFile, bucket, path);
		
	} finally {
		tempFile.delete();
		if (transferManager != null) {
			transferManager.shutdownNow();
		}
	}
}
 
Example 16
Source File: S3OutputStreamWrapper.java    From streams with Apache License 2.0 5 votes vote down vote up
private void addFile() throws Exception {

    InputStream is = new ByteArrayInputStream(this.outputStream.toByteArray());
    int contentLength = outputStream.size();

    TransferManager transferManager = new TransferManager(amazonS3Client);
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setExpirationTime(DateTime.now().plusDays(365 * 3).toDate());
    metadata.setContentLength(contentLength);

    metadata.addUserMetadata("writer", "org.apache.streams");

    for (String s : metaData.keySet()) {
      metadata.addUserMetadata(s, metaData.get(s));
    }

    String fileNameToWrite = path + fileName;
    Upload upload = transferManager.upload(bucketName, fileNameToWrite, is, metadata);
    try {
      upload.waitForUploadResult();

      is.close();
      transferManager.shutdownNow(false);
      LOGGER.info("S3 File Close[{} kb] - {}", contentLength / 1024, path + fileName);
    } catch (Exception ignored) {
      LOGGER.trace("Ignoring", ignored);
    }


  }
 
Example 17
Source File: S3StorageDriver.java    From dcos-cassandra-service with Apache License 2.0 4 votes vote down vote up
@Override
public void upload(BackupRestoreContext ctx) throws Exception {
    final String localLocation = ctx.getLocalLocation();
    final String backupName = ctx.getName();
    final String nodeId = ctx.getNodeId();
    final String key = getPrefixKey(ctx) + "/" + nodeId;
    LOGGER.info("Backup key: " + key);
    final TransferManager tx = getS3TransferManager(ctx);
    final File dataDirectory = new File(localLocation);

    try {
        // Ex: data/<keyspace>/<cf>/snapshots/</snapshot-dir>/<files>
        for (File keyspaceDir : dataDirectory.listFiles()) {
            if (keyspaceDir.isFile()) {
                // Skip any files in the data directory.
                // Only enter keyspace directory.
                continue;
            }
            LOGGER.info("Entering keyspace: {}", keyspaceDir.getName());
            for (File cfDir : getColumnFamilyDir(keyspaceDir)) {
                LOGGER.info("Entering column family dir: {}", cfDir.getName());
                File snapshotDir = new File(cfDir, "snapshots");
                File backupDir = new File(snapshotDir, backupName);
                if (!StorageUtil.isValidBackupDir(keyspaceDir, cfDir, snapshotDir, backupDir)) {
                    LOGGER.info("Skipping directory: {}",
                            snapshotDir.getAbsolutePath());
                    continue;
                }
                LOGGER.info(
                        "Valid backup directories. KeyspaceDir: {} | ColumnFamilyDir: {} | SnapshotDir: {} | BackupName: {}",
                        keyspaceDir.getAbsolutePath(), cfDir.getAbsolutePath(),
                        snapshotDir.getAbsolutePath(), backupName);

                final Optional<File> snapshotDirectory = StorageUtil.getValidSnapshotDirectory(
                        snapshotDir, backupName);
                LOGGER.info("Valid snapshot directory: {}",
                        snapshotDirectory.isPresent());
                if (snapshotDirectory.isPresent()) {
                    // Upload this directory
                    LOGGER.info("Going to upload directory: {}",
                            snapshotDirectory.get().getAbsolutePath());

                    uploadDirectory(
                            tx,
                            getBucketName(ctx),
                            key,
                            keyspaceDir.getName(),
                            cfDir.getName(),
                            snapshotDirectory.get());
                } else {
                    LOGGER.warn(
                            "Snapshots directory: {} doesn't contain the current backup directory: {}",
                            snapshotDir.getName(), backupName);
                }
            }
        }
        LOGGER.info("Done uploading snapshots for backup: {}", backupName);
    } catch (Exception e) {
        LOGGER.info("Failed uploading snapshots for backup: {}, error: {}", backupName, e);
        throw new Exception(e);
    } finally {
        tx.shutdownNow();
    }
}
 
Example 18
Source File: S3CopyStep.java    From pipeline-aws-plugin with Apache License 2.0 4 votes vote down vote up
@Override
public String run() throws Exception {
	final String fromBucket = this.step.getFromBucket();
	final String toBucket = this.step.getToBucket();
	final String fromPath = this.step.getFromPath();
	final String toPath = this.step.getToPath();
	final String kmsId = this.step.getKmsId();
	final Map<String, String> metadatas = new HashMap<>();
	final CannedAccessControlList acl = this.step.getAcl();
	final String cacheControl = this.step.getCacheControl();
	final String contentType = this.step.getContentType();
	final String sseAlgorithm = this.step.getSseAlgorithm();
	final S3ClientOptions s3ClientOptions = this.step.createS3ClientOptions();
	final EnvVars envVars = this.getContext().get(EnvVars.class);

	if (this.step.getMetadatas() != null && this.step.getMetadatas().length != 0) {
		for (String metadata : this.step.getMetadatas()) {
			if (metadata.split(":").length == 2) {
				metadatas.put(metadata.split(":")[0], metadata.split(":")[1]);
			}
		}
	}

	Preconditions.checkArgument(fromBucket != null && !fromBucket.isEmpty(), "From bucket must not be null or empty");
	Preconditions.checkArgument(fromPath != null && !fromPath.isEmpty(), "From path must not be null or empty");
	Preconditions.checkArgument(toBucket != null && !toBucket.isEmpty(), "To bucket must not be null or empty");
	Preconditions.checkArgument(toPath != null && !toPath.isEmpty(), "To path must not be null or empty");

	TaskListener listener = Execution.this.getContext().get(TaskListener.class);
	listener.getLogger().format("Copying s3://%s/%s to s3://%s/%s%n", fromBucket, fromPath, toBucket, toPath);

	CopyObjectRequest request = new CopyObjectRequest(fromBucket, fromPath, toBucket, toPath);

	// Add metadata
	if (metadatas.size() > 0 || (cacheControl != null && !cacheControl.isEmpty()) || (contentType != null && !contentType.isEmpty()) || (sseAlgorithm != null && !sseAlgorithm.isEmpty())) {
		ObjectMetadata metas = new ObjectMetadata();
		if (metadatas.size() > 0) {
			metas.setUserMetadata(metadatas);
		}
		if (cacheControl != null && !cacheControl.isEmpty()) {
			metas.setCacheControl(cacheControl);
		}
		if (contentType != null && !contentType.isEmpty()) {
			metas.setContentType(contentType);
		}
		if (sseAlgorithm != null && !sseAlgorithm.isEmpty()) {
			metas.setSSEAlgorithm(sseAlgorithm);
		}
		request.withNewObjectMetadata(metas);
	}

	// Add acl
	if (acl != null) {
		request.withCannedAccessControlList(acl);
	}

	// Add kms
	if (kmsId != null && !kmsId.isEmpty()) {
		listener.getLogger().format("Using KMS: %s%n", kmsId);
		request.withSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(kmsId));
	}

	TransferManager mgr = TransferManagerBuilder.standard()
			.withS3Client(AWSClientFactory.create(s3ClientOptions.createAmazonS3ClientBuilder(), envVars))
			.build();
	try {
		final Copy copy = mgr.copy(request);
		copy.addProgressListener((ProgressListener) progressEvent -> {
			if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) {
				listener.getLogger().println("Finished: " + copy.getDescription());
			}
		});
		copy.waitForCompletion();
	}
	finally{
		mgr.shutdownNow();
	}

	listener.getLogger().println("Copy complete");
	return String.format("s3://%s/%s", toBucket, toPath);
}
 
Example 19
Source File: S3UploadStep.java    From pipeline-aws-plugin with Apache License 2.0 4 votes vote down vote up
@Override
public Void invoke(File localFile, VirtualChannel channel) throws IOException, InterruptedException {
	TransferManager mgr = TransferManagerBuilder.standard()
			.withS3Client(AWSClientFactory.create(this.amazonS3ClientOptions.createAmazonS3ClientBuilder(), this.envVars))
			.build();
	final MultipleFileUpload fileUpload;
	ObjectMetadataProvider metadatasProvider = (file, meta) -> {
		if (meta != null) {
			if (RemoteListUploader.this.metadatas != null && RemoteListUploader.this.metadatas.size() > 0) {
				meta.setUserMetadata(RemoteListUploader.this.metadatas);
			}
			if (RemoteListUploader.this.acl != null) {
				meta.setHeader(Headers.S3_CANNED_ACL, RemoteListUploader.this.acl);
			}
			if (RemoteListUploader.this.cacheControl != null && !RemoteListUploader.this.cacheControl.isEmpty()) {
				meta.setCacheControl(RemoteListUploader.this.cacheControl);
			}
			if (RemoteListUploader.this.contentEncoding != null && !RemoteListUploader.this.contentEncoding.isEmpty()) {
				meta.setContentEncoding(RemoteListUploader.this.contentEncoding);
			}
			if (RemoteListUploader.this.contentType != null && !RemoteListUploader.this.contentType.isEmpty()) {
				meta.setContentType(RemoteListUploader.this.contentType);
			}
			if (RemoteListUploader.this.sseAlgorithm != null && !RemoteListUploader.this.sseAlgorithm.isEmpty()) {
				meta.setSSEAlgorithm(RemoteListUploader.this.sseAlgorithm);
			}
			if (RemoteListUploader.this.kmsId != null && !RemoteListUploader.this.kmsId.isEmpty()) {
				final SSEAwsKeyManagementParams sseAwsKeyManagementParams = new SSEAwsKeyManagementParams(RemoteListUploader.this.kmsId);
				meta.setSSEAlgorithm(sseAwsKeyManagementParams.getAwsKmsKeyId());
				meta.setHeader(
						Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID,
						sseAwsKeyManagementParams.getAwsKmsKeyId()
				);
			}

		}
	};

	ObjectTaggingProvider objectTaggingProvider =(uploadContext) -> {
		List<Tag> tagList = new ArrayList<Tag>();

		//add tags
		if(tags != null){
			for (Map.Entry<String, String> entry : tags.entrySet()) {
				Tag tag = new Tag(entry.getKey(), entry.getValue());
				tagList.add(tag);
			}
		}
		return new ObjectTagging(tagList);
	};

	try {
		fileUpload = mgr.uploadFileList(this.bucket, this.path, localFile, this.fileList, metadatasProvider, objectTaggingProvider);
		for (final Upload upload : fileUpload.getSubTransfers()) {
			upload.addProgressListener((ProgressListener) progressEvent -> {
				if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) {
					RemoteListUploader.this.taskListener.getLogger().println("Finished: " + upload.getDescription());
				}
			});
		}
		fileUpload.waitForCompletion();
	}
	finally {
		mgr.shutdownNow();
	}
	return null;
}
 
Example 20
Source File: AwsS3Storage.java    From ecs-sync with Apache License 2.0 4 votes vote down vote up
@Override
void putObject(SyncObject obj, String targetKey) {
    ObjectMetadata om;
    if (options.isSyncMetadata()) om = s3MetaFromSyncMeta(obj.getMetadata());
    else om = new ObjectMetadata();

    if (obj.getMetadata().isDirectory()) om.setContentType(TYPE_DIRECTORY);

    PutObjectRequest req;
    File file = (File) obj.getProperty(AbstractFilesystemStorage.PROP_FILE);
    S3ProgressListener progressListener = null;
    if (obj.getMetadata().isDirectory()) {
        req = new PutObjectRequest(config.getBucketName(), targetKey, new ByteArrayInputStream(new byte[0]), om);
    } else if (file != null) {
        req = new PutObjectRequest(config.getBucketName(), targetKey, file).withMetadata(om);
        progressListener = new ByteTransferListener(obj);
    } else {
        InputStream stream = obj.getDataStream();
        if (options.isMonitorPerformance())
            stream = new ProgressInputStream(stream, new PerformanceListener(getWriteWindow()));
        req = new PutObjectRequest(config.getBucketName(), targetKey, stream, om);
    }

    if (options.isSyncAcl())
        req.setAccessControlList(s3AclFromSyncAcl(obj.getAcl(), options.isIgnoreInvalidAcls()));

    TransferManager xferManager = null;
    try {
        // xfer manager will figure out if MPU is needed (based on threshold), do the MPU if necessary,
        // and abort if it fails
        xferManager = TransferManagerBuilder.standard()
                .withS3Client(s3)
                .withExecutorFactory(() -> Executors.newFixedThreadPool(config.getMpuThreadCount()))
                .withMultipartUploadThreshold((long) config.getMpuThresholdMb() * 1024 * 1024)
                .withMinimumUploadPartSize((long) config.getMpuPartSizeMb() * 1024 * 1024)
                .withShutDownThreadPools(true)
                .build();

        // directly update

        final Upload upload = xferManager.upload(req, progressListener);
        try {
            String eTag = time((Callable<String>) () -> upload.waitForUploadResult().getETag(), OPERATION_MPU);
            log.debug("Wrote {}, etag: {}", targetKey, eTag);
        } catch (Exception e) {
            log.error("upload exception", e);
            if (e instanceof RuntimeException) throw (RuntimeException) e;
            throw new RuntimeException("upload thread was interrupted", e);
        }
    } finally {
        // NOTE: apparently if we do not reference xferManager again after the upload() call (as in this finally
        // block), the JVM will for some crazy reason determine it is eligible for GC and call finalize(), which
        // shuts down the thread pool, fails the upload, and gives absolutely no indication of what's going on...
        if (xferManager != null) xferManager.shutdownNow(false);
    }
}