com.amazonaws.event.ProgressListener Java Examples

The following examples show how to use com.amazonaws.event.ProgressListener. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PrestoS3FileSystem.java    From presto with Apache License 2.0 6 votes vote down vote up
private ProgressListener createProgressListener(Transfer transfer)
{
    return new ProgressListener()
    {
        private ProgressEventType previousType;
        private double previousTransferred;

        @Override
        public synchronized void progressChanged(ProgressEvent progressEvent)
        {
            ProgressEventType eventType = progressEvent.getEventType();
            if (previousType != eventType) {
                log.debug("Upload progress event (%s/%s): %s", bucket, key, eventType);
                previousType = eventType;
            }

            double transferred = transfer.getProgress().getPercentTransferred();
            if (transferred >= (previousTransferred + 10.0)) {
                log.debug("Upload percentage (%s/%s): %.0f%%", bucket, key, transferred);
                previousTransferred = transferred;
            }
        }
    };
}
 
Example #2
Source File: FileHelper.java    From datacollector with Apache License 2.0 6 votes vote down vote up
Upload doUpload(String bucket, String fileName, InputStream is, ObjectMetadata metadata) {
  final PutObjectRequest putObjectRequest = new PutObjectRequest(
      bucket,
      fileName,
      is,
      metadata
  );
  final String object = bucket + s3TargetConfigBean.s3Config.delimiter + fileName;
  Upload upload = transferManager.upload(putObjectRequest);
  upload.addProgressListener((ProgressListener) progressEvent -> {
    switch (progressEvent.getEventType()) {
      case TRANSFER_STARTED_EVENT:
        LOG.debug("Started uploading object {} into Amazon S3", object);
        break;
      case TRANSFER_COMPLETED_EVENT:
        LOG.debug("Completed uploading object {} into Amazon S3", object);
        break;
      case TRANSFER_FAILED_EVENT:
        LOG.debug("Failed uploading object {} into Amazon S3", object);
        break;
      default:
        break;
    }
  });
  return upload;
}
 
Example #3
Source File: S3AFileSystem.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void copyFile(String srcKey, String dstKey) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("copyFile " + srcKey + " -> " + dstKey);
  }

  ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
  final ObjectMetadata dstom = srcom.clone();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    dstom.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
  copyObjectRequest.setCannedAccessControlList(cannedACL);
  copyObjectRequest.setNewObjectMetadata(dstom);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Copy copy = transfers.copy(copyObjectRequest);
  copy.addProgressListener(progressListener);
  try {
    copy.waitForCopyResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }
}
 
Example #4
Source File: S3AFileSystem.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void copyFile(String srcKey, String dstKey) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("copyFile " + srcKey + " -> " + dstKey);
  }

  ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
  final ObjectMetadata dstom = srcom.clone();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    dstom.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
  copyObjectRequest.setCannedAccessControlList(cannedACL);
  copyObjectRequest.setNewObjectMetadata(dstom);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Copy copy = transfers.copy(copyObjectRequest);
  copy.addProgressListener(progressListener);
  try {
    copy.waitForCopyResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }
}
 
Example #5
Source File: MultipartUpload.java    From tutorials with MIT License 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    String existingBucketName = "baeldung-bucket";
    String keyName = "my-picture.jpg";
    String filePath = "documents/my-picture.jpg";

    AmazonS3 amazonS3 = AmazonS3ClientBuilder
            .standard()
            .withCredentials(new DefaultAWSCredentialsProviderChain())
            .withRegion(Regions.DEFAULT_REGION)
            .build();

    int maxUploadThreads = 5;

    TransferManager tm = TransferManagerBuilder
            .standard()
            .withS3Client(amazonS3)
            .withMultipartUploadThreshold((long) (5 * 1024 * 1024))
            .withExecutorFactory(() -> Executors.newFixedThreadPool(maxUploadThreads))
            .build();

    ProgressListener progressListener =
            progressEvent -> System.out.println("Transferred bytes: " + progressEvent.getBytesTransferred());

    PutObjectRequest request = new PutObjectRequest(existingBucketName, keyName, new File(filePath));

    request.setGeneralProgressListener(progressListener);

    Upload upload = tm.upload(request);

    try {
        upload.waitForCompletion();
        System.out.println("Upload complete.");
    } catch (AmazonClientException e) {
        System.out.println("Error occurred while uploading file");
        e.printStackTrace();
    }
}
 
Example #6
Source File: S3DownloadStep.java    From pipeline-aws-plugin with Apache License 2.0 5 votes vote down vote up
@Override
public Void invoke(File localFile, VirtualChannel channel) throws IOException, InterruptedException {
	TransferManager mgr = TransferManagerBuilder.standard()
			.withS3Client(AWSClientFactory.create(this.amazonS3ClientOptions.createAmazonS3ClientBuilder(), this.envVars))
			.build();

	if (this.path == null || this.path.isEmpty() || this.path.endsWith("/")) {
		try {
			final MultipleFileDownload fileDownload = mgr.downloadDirectory(this.bucket, this.path, localFile);
			fileDownload.waitForCompletion();
			RemoteDownloader.this.taskListener.getLogger().println("Finished: " + fileDownload.getDescription());
		}
		finally {
			mgr.shutdownNow();
		}
		return null;
	} else {
		try {
			final Download download = mgr.download(this.bucket, this.path, localFile);
			download.addProgressListener((ProgressListener) progressEvent -> {
				if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) {
					RemoteDownloader.this.taskListener.getLogger().println("Finished: " + download.getDescription());
				}
			});
			download.waitForCompletion();
		}
		finally {
			mgr.shutdownNow();
		}
		return null;
	}
}
 
Example #7
Source File: S3BroadcastManager.java    From kickflip-android-sdk with Apache License 2.0 5 votes vote down vote up
public void queueUpload(final String bucket, final String key, final File file, boolean lastUpload) {
    if (VERBOSE) Log.i(TAG, "Queueing upload " + key);

    final PutObjectRequest por = new PutObjectRequest(bucket, key, file);
    por.setGeneralProgressListener(new ProgressListener() {
        final String url = "https://" + bucket + ".s3.amazonaws.com/" + key;
        private long uploadStartTime;

        @Override
        public void progressChanged(com.amazonaws.event.ProgressEvent progressEvent) {
            try {
                if (progressEvent.getEventCode() == ProgressEvent.STARTED_EVENT_CODE) {
                    uploadStartTime = System.currentTimeMillis();
                } else if (progressEvent.getEventCode() == com.amazonaws.event.ProgressEvent.COMPLETED_EVENT_CODE) {
                    long uploadDurationMillis = System.currentTimeMillis() - uploadStartTime;
                    int bytesPerSecond = (int) (file.length() / (uploadDurationMillis / 1000.0));
                    if (VERBOSE)
                        Log.i(TAG, "Uploaded " + file.length() / 1000.0 + " KB in " + (uploadDurationMillis) + "ms (" + bytesPerSecond / 1000.0 + " KBps)");
                    mBroadcaster.onS3UploadComplete(new S3UploadEvent(file, url, bytesPerSecond));
                } else if (progressEvent.getEventCode() == ProgressEvent.FAILED_EVENT_CODE) {
                    Log.w(TAG, "Upload failed for " + url);
                }
            } catch (Exception excp) {
                Log.e(TAG, "ProgressListener error");
                excp.printStackTrace();
            }
        }
    });
    por.setCannedAcl(CannedAccessControlList.PublicRead);
    for (WeakReference<S3RequestInterceptor> ref : mInterceptors) {
        S3RequestInterceptor interceptor = ref.get();
        if (interceptor != null) {
            interceptor.interceptRequest(por);
        }
    }
    mQueue.add(new Pair<>(por, lastUpload));
}
 
Example #8
Source File: S3ScanWriter.java    From emodb with Apache License 2.0 5 votes vote down vote up
/**
 * Starts an asynchronous upload and returns a ListenableFuture for handling the result.
 */
synchronized ListenableFuture<String> upload() {
    // Reset values from possible prior attempt
    _attempts += 1;
    _bytesTransferred = 0;

    // Separate the future returned to the caller from the future generated by submitting the
    // putObject request.  If the writer is closed then uploadFuture may be canceled before it executes,
    // in which case it may not trigger any callbacks.  To ensure there is always a callback resultFuture is
    // tracked independently and, in the event that the upload is aborted, gets set on abort().
    _resultFuture = SettableFuture.create();

    _uploadFuture = _uploadService.submit(new Runnable() {
        @Override
        public void run() {
            try {
                ProgressListener progressListener = new ProgressListener() {
                    @Override
                    public void progressChanged(ProgressEvent progressEvent) {
                        // getBytesTransferred() returns zero for all events not pertaining to the file transfer
                        _bytesTransferred += progressEvent.getBytesTransferred();
                    }
                };

                PutObjectRequest putObjectRequest = new PutObjectRequest(_bucket, _key, _file);
                putObjectRequest.setGeneralProgressListener(progressListener);
                PutObjectResult result = _amazonS3.putObject(putObjectRequest);
                _resultFuture.set(result.getETag());
            } catch (Throwable t) {
                _resultFuture.setException(t);
            }
        }
    });

    return _resultFuture;
}
 
Example #9
Source File: S3FileManagerImpl.java    From entrada with GNU General Public License v3.0 5 votes vote down vote up
private boolean uploadDirectory(File location, S3Details dstDetails, boolean archive) {

    ObjectMetadataProvider metaDataProvider = (file, meta) -> {

      if (archive) {
        meta
            .setHeader(Headers.STORAGE_CLASS,
                StorageClass.fromValue(StringUtils.upperCase(archiveStorageClass)));
      }

      if (encrypt) {
        meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
      }
    };

    MultipleFileUpload upload = transferManager
        .uploadDirectory(dstDetails.getBucket(), dstDetails.getKey(), location, true,
            metaDataProvider);


    if (log.isDebugEnabled()) {
      ProgressListener progressListener = progressEvent -> log
          .debug("S3 Transferred bytes: " + progressEvent.getBytesTransferred());
      upload.addProgressListener(progressListener);
    }

    try {
      upload.waitForCompletion();
      return true;
    } catch (Exception e) {
      log.error("Error while uploading directory: {}", location, e);
    }

    return false;
  }
 
Example #10
Source File: S3Manager.java    From datacollector with Apache License 2.0 5 votes vote down vote up
String uploadToS3(String name, File file) throws IOException {
  long start = System.currentTimeMillis();
  long fileLength = file.length() / (1000 * 1000);
  String bucket = getBucket(pipelineEmrConfigs.getS3StagingUri());
  String path = getPath(pipelineEmrConfigs.getS3StagingUri()) + "/" + pipelineId + "/" + uniquePrefix;
  String s3Uri =  "s3://" + bucket + "/" +  path + "/" + name;
  try {

    // Upload
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, path + "/" + name, file);
    putObjectRequest.setGeneralProgressListener(new ProgressListener() {
      long counter;
      long tick = -1;
      @Override
      public void progressChanged(ProgressEvent progressEvent) {
        counter += progressEvent.getBytesTransferred();
        if (counter / (100 * 1000000) > tick) {
          tick++;
          LOG.debug(
              "Uploading '{}' {}/{} MB, {} secs",
              s3Uri,
              counter / (1000 * 1000),
              fileLength,
              (System.currentTimeMillis() - start) / 1000
          );
        }
      }
    });

    getS3TransferManager().upload(putObjectRequest).waitForCompletion();

    LOG.info("Uploaded file at: {}", s3Uri);
    return s3Uri;
  } catch (SdkBaseException|InterruptedException ex) {
    throw new IOException(ex);
  }
}
 
Example #11
Source File: AmazonS3Storage.java    From thunderbit with GNU Affero General Public License v3.0 5 votes vote down vote up
@Override
public F.Promise<Void> store(Path path, String key, String name) {
    Promise<Void> promise = Futures.promise();

    TransferManager transferManager = new TransferManager(credentials);
    try {
        Upload upload = transferManager.upload(bucketName, key, path.toFile());
        upload.addProgressListener((ProgressListener) progressEvent -> {
            if (progressEvent.getEventType().isTransferEvent()) {
                if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_COMPLETED_EVENT)) {
                    transferManager.shutdownNow();
                    promise.success(null);
                } else if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_FAILED_EVENT)) {
                    transferManager.shutdownNow();
                    logger.error(progressEvent.toString());
                    promise.failure(new Exception(progressEvent.toString()));
                }
            }
        });
    } catch (AmazonServiceException ase) {
        logAmazonServiceException (ase);
    } catch (AmazonClientException ace) {
        logAmazonClientException(ace);
    }

    return F.Promise.wrap(promise.future());
}
 
Example #12
Source File: AmazonS3SinkMockTests.java    From spring-cloud-stream-app-starters with Apache License 2.0 4 votes vote down vote up
@Test
@Override
public void test() throws Exception {
	AmazonS3 amazonS3Client = TestUtils.getPropertyValue(this.s3MessageHandler, "transferManager.s3",
			AmazonS3.class);

	File file = this.temporaryFolder.newFile("foo.mp3");
	Message<?> message = MessageBuilder.withPayload(file)
			.build();

	this.channels.input().send(message);

	ArgumentCaptor<PutObjectRequest> putObjectRequestArgumentCaptor =
			ArgumentCaptor.forClass(PutObjectRequest.class);
	verify(amazonS3Client, atLeastOnce()).putObject(putObjectRequestArgumentCaptor.capture());

	PutObjectRequest putObjectRequest = putObjectRequestArgumentCaptor.getValue();
	assertThat(putObjectRequest.getBucketName(), equalTo(S3_BUCKET));
	assertThat(putObjectRequest.getKey(), equalTo("foo.mp3"));
	assertNotNull(putObjectRequest.getFile());
	assertNull(putObjectRequest.getInputStream());

	ObjectMetadata metadata = putObjectRequest.getMetadata();
	assertThat(metadata.getContentMD5(), equalTo(Md5Utils.md5AsBase64(file)));
	assertThat(metadata.getContentLength(), equalTo(0L));
	assertThat(metadata.getContentType(), equalTo("audio/mpeg"));

	ProgressListener listener = putObjectRequest.getGeneralProgressListener();
	S3ProgressPublisher.publishProgress(listener, ProgressEventType.TRANSFER_COMPLETED_EVENT);

	assertTrue(this.transferCompletedLatch.await(10, TimeUnit.SECONDS));
	assertTrue(this.aclLatch.await(10, TimeUnit.SECONDS));

	ArgumentCaptor<SetObjectAclRequest> setObjectAclRequestArgumentCaptor =
			ArgumentCaptor.forClass(SetObjectAclRequest.class);
	verify(amazonS3Client).setObjectAcl(setObjectAclRequestArgumentCaptor.capture());

	SetObjectAclRequest setObjectAclRequest = setObjectAclRequestArgumentCaptor.getValue();

	assertThat(setObjectAclRequest.getBucketName(), equalTo(S3_BUCKET));
	assertThat(setObjectAclRequest.getKey(), equalTo("foo.mp3"));
	assertNull(setObjectAclRequest.getAcl());
	assertThat(setObjectAclRequest.getCannedAcl(), equalTo(CannedAccessControlList.PublicReadWrite));
}
 
Example #13
Source File: Request.java    From dynamodb-transactions with Apache License 2.0 4 votes vote down vote up
@JsonIgnore
public abstract void setGeneralProgressListener(ProgressListener progressListener);
 
Example #14
Source File: Request.java    From dynamodb-transactions with Apache License 2.0 4 votes vote down vote up
@JsonIgnore
public abstract ProgressListener getGeneralProgressListener();
 
Example #15
Source File: S3UploadStep.java    From pipeline-aws-plugin with Apache License 2.0 4 votes vote down vote up
@Override
public Void invoke(File localFile, VirtualChannel channel) throws IOException, InterruptedException {
	TransferManager mgr = TransferManagerBuilder.standard()
			.withS3Client(AWSClientFactory.create(this.amazonS3ClientOptions.createAmazonS3ClientBuilder(), this.envVars))
			.build();
	final MultipleFileUpload fileUpload;
	ObjectMetadataProvider metadatasProvider = (file, meta) -> {
		if (meta != null) {
			if (RemoteListUploader.this.metadatas != null && RemoteListUploader.this.metadatas.size() > 0) {
				meta.setUserMetadata(RemoteListUploader.this.metadatas);
			}
			if (RemoteListUploader.this.acl != null) {
				meta.setHeader(Headers.S3_CANNED_ACL, RemoteListUploader.this.acl);
			}
			if (RemoteListUploader.this.cacheControl != null && !RemoteListUploader.this.cacheControl.isEmpty()) {
				meta.setCacheControl(RemoteListUploader.this.cacheControl);
			}
			if (RemoteListUploader.this.contentEncoding != null && !RemoteListUploader.this.contentEncoding.isEmpty()) {
				meta.setContentEncoding(RemoteListUploader.this.contentEncoding);
			}
			if (RemoteListUploader.this.contentType != null && !RemoteListUploader.this.contentType.isEmpty()) {
				meta.setContentType(RemoteListUploader.this.contentType);
			}
			if (RemoteListUploader.this.sseAlgorithm != null && !RemoteListUploader.this.sseAlgorithm.isEmpty()) {
				meta.setSSEAlgorithm(RemoteListUploader.this.sseAlgorithm);
			}
			if (RemoteListUploader.this.kmsId != null && !RemoteListUploader.this.kmsId.isEmpty()) {
				final SSEAwsKeyManagementParams sseAwsKeyManagementParams = new SSEAwsKeyManagementParams(RemoteListUploader.this.kmsId);
				meta.setSSEAlgorithm(sseAwsKeyManagementParams.getAwsKmsKeyId());
				meta.setHeader(
						Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID,
						sseAwsKeyManagementParams.getAwsKmsKeyId()
				);
			}

		}
	};

	ObjectTaggingProvider objectTaggingProvider =(uploadContext) -> {
		List<Tag> tagList = new ArrayList<Tag>();

		//add tags
		if(tags != null){
			for (Map.Entry<String, String> entry : tags.entrySet()) {
				Tag tag = new Tag(entry.getKey(), entry.getValue());
				tagList.add(tag);
			}
		}
		return new ObjectTagging(tagList);
	};

	try {
		fileUpload = mgr.uploadFileList(this.bucket, this.path, localFile, this.fileList, metadatasProvider, objectTaggingProvider);
		for (final Upload upload : fileUpload.getSubTransfers()) {
			upload.addProgressListener((ProgressListener) progressEvent -> {
				if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) {
					RemoteListUploader.this.taskListener.getLogger().println("Finished: " + upload.getDescription());
				}
			});
		}
		fileUpload.waitForCompletion();
	}
	finally {
		mgr.shutdownNow();
	}
	return null;
}
 
Example #16
Source File: S3CopyStep.java    From pipeline-aws-plugin with Apache License 2.0 4 votes vote down vote up
@Override
public String run() throws Exception {
	final String fromBucket = this.step.getFromBucket();
	final String toBucket = this.step.getToBucket();
	final String fromPath = this.step.getFromPath();
	final String toPath = this.step.getToPath();
	final String kmsId = this.step.getKmsId();
	final Map<String, String> metadatas = new HashMap<>();
	final CannedAccessControlList acl = this.step.getAcl();
	final String cacheControl = this.step.getCacheControl();
	final String contentType = this.step.getContentType();
	final String sseAlgorithm = this.step.getSseAlgorithm();
	final S3ClientOptions s3ClientOptions = this.step.createS3ClientOptions();
	final EnvVars envVars = this.getContext().get(EnvVars.class);

	if (this.step.getMetadatas() != null && this.step.getMetadatas().length != 0) {
		for (String metadata : this.step.getMetadatas()) {
			if (metadata.split(":").length == 2) {
				metadatas.put(metadata.split(":")[0], metadata.split(":")[1]);
			}
		}
	}

	Preconditions.checkArgument(fromBucket != null && !fromBucket.isEmpty(), "From bucket must not be null or empty");
	Preconditions.checkArgument(fromPath != null && !fromPath.isEmpty(), "From path must not be null or empty");
	Preconditions.checkArgument(toBucket != null && !toBucket.isEmpty(), "To bucket must not be null or empty");
	Preconditions.checkArgument(toPath != null && !toPath.isEmpty(), "To path must not be null or empty");

	TaskListener listener = Execution.this.getContext().get(TaskListener.class);
	listener.getLogger().format("Copying s3://%s/%s to s3://%s/%s%n", fromBucket, fromPath, toBucket, toPath);

	CopyObjectRequest request = new CopyObjectRequest(fromBucket, fromPath, toBucket, toPath);

	// Add metadata
	if (metadatas.size() > 0 || (cacheControl != null && !cacheControl.isEmpty()) || (contentType != null && !contentType.isEmpty()) || (sseAlgorithm != null && !sseAlgorithm.isEmpty())) {
		ObjectMetadata metas = new ObjectMetadata();
		if (metadatas.size() > 0) {
			metas.setUserMetadata(metadatas);
		}
		if (cacheControl != null && !cacheControl.isEmpty()) {
			metas.setCacheControl(cacheControl);
		}
		if (contentType != null && !contentType.isEmpty()) {
			metas.setContentType(contentType);
		}
		if (sseAlgorithm != null && !sseAlgorithm.isEmpty()) {
			metas.setSSEAlgorithm(sseAlgorithm);
		}
		request.withNewObjectMetadata(metas);
	}

	// Add acl
	if (acl != null) {
		request.withCannedAccessControlList(acl);
	}

	// Add kms
	if (kmsId != null && !kmsId.isEmpty()) {
		listener.getLogger().format("Using KMS: %s%n", kmsId);
		request.withSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(kmsId));
	}

	TransferManager mgr = TransferManagerBuilder.standard()
			.withS3Client(AWSClientFactory.create(s3ClientOptions.createAmazonS3ClientBuilder(), envVars))
			.build();
	try {
		final Copy copy = mgr.copy(request);
		copy.addProgressListener((ProgressListener) progressEvent -> {
			if (progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT) {
				listener.getLogger().println("Finished: " + copy.getDescription());
			}
		});
		copy.waitForCompletion();
	}
	finally{
		mgr.shutdownNow();
	}

	listener.getLogger().println("Copy complete");
	return String.format("s3://%s/%s", toBucket, toPath);
}
 
Example #17
Source File: S3AFileSystem.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.
 *
 * This version doesn't need to create a temporary file to calculate the md5.
 * Sadly this doesn't seem to be used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, 
  Path dst) throws IOException {
  String key = pathToKey(dst);

  if (!overwrite && exists(dst)) {
    throw new IOException(dst + " already exists");
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Copying local file from " + src + " to " + dst);
  }

  // Since we have a local file, we don't need to stream into a temporary file
  LocalFileSystem local = getLocal(getConf());
  File srcfile = local.pathToFile(src);

  final ObjectMetadata om = new ObjectMetadata();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setMetadata(om);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Upload up = transfers.upload(putObjectRequest);
  up.addProgressListener(progressListener);
  try {
    up.waitForUploadResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }

  // This will delete unnecessary fake parent directories
  finishedWrite(key);

  if (delSrc) {
    local.delete(src, false);
  }
}
 
Example #18
Source File: S3AFileSystem.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.
 *
 * This version doesn't need to create a temporary file to calculate the md5.
 * Sadly this doesn't seem to be used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, 
  Path dst) throws IOException {
  String key = pathToKey(dst);

  if (!overwrite && exists(dst)) {
    throw new IOException(dst + " already exists");
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Copying local file from " + src + " to " + dst);
  }

  // Since we have a local file, we don't need to stream into a temporary file
  LocalFileSystem local = getLocal(getConf());
  File srcfile = local.pathToFile(src);

  final ObjectMetadata om = new ObjectMetadata();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setMetadata(om);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Upload up = transfers.upload(putObjectRequest);
  up.addProgressListener(progressListener);
  try {
    up.waitForUploadResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }

  // This will delete unnecessary fake parent directories
  finishedWrite(key);

  if (delSrc) {
    local.delete(src, false);
  }
}