Java Code Examples for com.amazonaws.services.s3.model.PutObjectRequest#setCannedAcl()

The following examples show how to use com.amazonaws.services.s3.model.PutObjectRequest#setCannedAcl() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: S3AFileSystem.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void createEmptyObject(final String bucketName, final String objectName)
    throws AmazonClientException, AmazonServiceException {
  final InputStream im = new InputStream() {
    @Override
    public int read() throws IOException {
      return -1;
    }
  };

  final ObjectMetadata om = new ObjectMetadata();
  om.setContentLength(0L);
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, objectName, im, om);
  putObjectRequest.setCannedAcl(cannedACL);
  s3.putObject(putObjectRequest);
  statistics.incrementWriteOps(1);
}
 
Example 2
Source File: S3AFileSystem.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void createEmptyObject(final String bucketName, final String objectName)
    throws AmazonClientException, AmazonServiceException {
  final InputStream im = new InputStream() {
    @Override
    public int read() throws IOException {
      return -1;
    }
  };

  final ObjectMetadata om = new ObjectMetadata();
  om.setContentLength(0L);
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, objectName, im, om);
  putObjectRequest.setCannedAcl(cannedACL);
  s3.putObject(putObjectRequest);
  statistics.incrementWriteOps(1);
}
 
Example 3
Source File: UploadThread.java    From aws-photosharing-example with Apache License 2.0 5 votes vote down vote up
@Override
public void run() {
    ObjectMetadata meta_data = new ObjectMetadata();
    if (p_content_type != null)
        meta_data.setContentType(p_content_type);

    meta_data.setContentLength(p_size);

    PutObjectRequest putObjectRequest = new PutObjectRequest(p_bucket_name, p_s3_key, p_file_stream, meta_data);
    putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead);
    PutObjectResult res = s3Client.putObject(putObjectRequest);       
}
 
Example 4
Source File: S3AFastOutputStream.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void putObject() throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Executing regular upload for bucket '{}' key '{}'", bucket,
        key);
  }
  final ObjectMetadata om = createDefaultMetadata();
  om.setContentLength(buffer.size());
  final PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key,
      new ByteArrayInputStream(buffer.toByteArray()), om);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setGeneralProgressListener(progressListener);
  ListenableFuture<PutObjectResult> putObjectResult =
      executorService.submit(new Callable<PutObjectResult>() {
        @Override
        public PutObjectResult call() throws Exception {
          return client.putObject(putObjectRequest);
        }
      });
  //wait for completion
  try {
    putObjectResult.get();
  } catch (InterruptedException ie) {
    LOG.warn("Interrupted object upload:" + ie, ie);
    Thread.currentThread().interrupt();
  } catch (ExecutionException ee) {
    throw new IOException("Regular upload failed", ee.getCause());
  }
}
 
Example 5
Source File: S3AFastOutputStream.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void putObject() throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Executing regular upload for bucket '{}' key '{}'", bucket,
        key);
  }
  final ObjectMetadata om = createDefaultMetadata();
  om.setContentLength(buffer.size());
  final PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key,
      new ByteArrayInputStream(buffer.toByteArray()), om);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setGeneralProgressListener(progressListener);
  ListenableFuture<PutObjectResult> putObjectResult =
      executorService.submit(new Callable<PutObjectResult>() {
        @Override
        public PutObjectResult call() throws Exception {
          return client.putObject(putObjectRequest);
        }
      });
  //wait for completion
  try {
    putObjectResult.get();
  } catch (InterruptedException ie) {
    LOG.warn("Interrupted object upload:" + ie, ie);
    Thread.currentThread().interrupt();
  } catch (ExecutionException ee) {
    throw new IOException("Regular upload failed", ee.getCause());
  }
}
 
Example 6
Source File: S3BroadcastManager.java    From kickflip-android-sdk with Apache License 2.0 5 votes vote down vote up
public void queueUpload(final String bucket, final String key, final File file, boolean lastUpload) {
    if (VERBOSE) Log.i(TAG, "Queueing upload " + key);

    final PutObjectRequest por = new PutObjectRequest(bucket, key, file);
    por.setGeneralProgressListener(new ProgressListener() {
        final String url = "https://" + bucket + ".s3.amazonaws.com/" + key;
        private long uploadStartTime;

        @Override
        public void progressChanged(com.amazonaws.event.ProgressEvent progressEvent) {
            try {
                if (progressEvent.getEventCode() == ProgressEvent.STARTED_EVENT_CODE) {
                    uploadStartTime = System.currentTimeMillis();
                } else if (progressEvent.getEventCode() == com.amazonaws.event.ProgressEvent.COMPLETED_EVENT_CODE) {
                    long uploadDurationMillis = System.currentTimeMillis() - uploadStartTime;
                    int bytesPerSecond = (int) (file.length() / (uploadDurationMillis / 1000.0));
                    if (VERBOSE)
                        Log.i(TAG, "Uploaded " + file.length() / 1000.0 + " KB in " + (uploadDurationMillis) + "ms (" + bytesPerSecond / 1000.0 + " KBps)");
                    mBroadcaster.onS3UploadComplete(new S3UploadEvent(file, url, bytesPerSecond));
                } else if (progressEvent.getEventCode() == ProgressEvent.FAILED_EVENT_CODE) {
                    Log.w(TAG, "Upload failed for " + url);
                }
            } catch (Exception excp) {
                Log.e(TAG, "ProgressListener error");
                excp.printStackTrace();
            }
        }
    });
    por.setCannedAcl(CannedAccessControlList.PublicRead);
    for (WeakReference<S3RequestInterceptor> ref : mInterceptors) {
        S3RequestInterceptor interceptor = ref.get();
        if (interceptor != null) {
            interceptor.interceptRequest(por);
        }
    }
    mQueue.add(new Pair<>(por, lastUpload));
}
 
Example 7
Source File: BlockRepoSaver.java    From jelectrum with MIT License 5 votes vote down vote up
private void saveFile(String key, ByteString data, int cache_seconds)
{
  ObjectMetadata omd = new ObjectMetadata();
  omd.setCacheControl("max-age=" + cache_seconds);
  omd.setContentLength(data.size());


  PutObjectRequest put = new PutObjectRequest(bucket, key, data.newInput(), omd);
  put.setCannedAcl(CannedAccessControlList.PublicRead);
  put.setStorageClass(com.amazonaws.services.s3.model.StorageClass.StandardInfrequentAccess.toString());

  s3.putObject(put);

}
 
Example 8
Source File: S3BlobContainer.java    From crate with Apache License 2.0 5 votes vote down vote up
/**
 * Uploads a blob using a single upload request
 */
void executeSingleUpload(final S3BlobStore blobStore,
                         final String blobName,
                         final InputStream input,
                         final long blobSize) throws IOException {

    // Extra safety checks
    if (blobSize > MAX_FILE_SIZE.getBytes()) {
        throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE);
    }
    if (blobSize > blobStore.bufferSizeInBytes()) {
        throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than buffer size");
    }

    final ObjectMetadata md = new ObjectMetadata();
    md.setContentLength(blobSize);
    if (blobStore.serverSideEncryption()) {
        md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    }
    final PutObjectRequest putRequest = new PutObjectRequest(blobStore.bucket(), blobName, input, md);
    putRequest.setStorageClass(blobStore.getStorageClass());
    putRequest.setCannedAcl(blobStore.getCannedACL());

    try (AmazonS3Reference clientReference = blobStore.clientReference()) {
        clientReference.client().putObject(putRequest);
    } catch (final AmazonClientException e) {
        throw new IOException("Unable to upload object [" + blobName + "] using a single upload", e);
    }
}
 
Example 9
Source File: S3AOutputStream.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
public synchronized void close() throws IOException {
  if (closed) {
    return;
  }

  backupStream.close();
  if (LOG.isDebugEnabled()) {
    LOG.debug("OutputStream for key '" + key + "' closed. Now beginning upload");
    LOG.debug("Minimum upload part size: " + partSize + " threshold " + partSizeThreshold);
  }


  try {
    final ObjectMetadata om = new ObjectMetadata();
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
      om.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile);
    putObjectRequest.setCannedAcl(cannedACL);
    putObjectRequest.setMetadata(om);

    Upload upload = transfers.upload(putObjectRequest);

    ProgressableProgressListener listener = 
      new ProgressableProgressListener(upload, progress, statistics);
    upload.addProgressListener(listener);

    upload.waitForUploadResult();

    long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred();
    if (statistics != null && delta != 0) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("S3A write delta changed after finished: " + delta + " bytes");
      }
      statistics.incrementBytesWritten(delta);
    }

    // This will delete unnecessary fake parent directories
    fs.finishedWrite(key);
  } catch (InterruptedException e) {
    throw new IOException(e);
  } finally {
    if (!backupFile.delete()) {
      LOG.warn("Could not delete temporary s3a file: {}", backupFile);
    }
    super.close();
    closed = true;
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("OutputStream for key '" + key + "' upload complete");
  }
}
 
Example 10
Source File: S3AFileSystem.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.
 *
 * This version doesn't need to create a temporary file to calculate the md5.
 * Sadly this doesn't seem to be used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, 
  Path dst) throws IOException {
  String key = pathToKey(dst);

  if (!overwrite && exists(dst)) {
    throw new IOException(dst + " already exists");
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Copying local file from " + src + " to " + dst);
  }

  // Since we have a local file, we don't need to stream into a temporary file
  LocalFileSystem local = getLocal(getConf());
  File srcfile = local.pathToFile(src);

  final ObjectMetadata om = new ObjectMetadata();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setMetadata(om);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Upload up = transfers.upload(putObjectRequest);
  up.addProgressListener(progressListener);
  try {
    up.waitForUploadResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }

  // This will delete unnecessary fake parent directories
  finishedWrite(key);

  if (delSrc) {
    local.delete(src, false);
  }
}
 
Example 11
Source File: S3AOutputStream.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public synchronized void close() throws IOException {
  if (closed) {
    return;
  }

  backupStream.close();
  if (LOG.isDebugEnabled()) {
    LOG.debug("OutputStream for key '" + key + "' closed. Now beginning upload");
    LOG.debug("Minimum upload part size: " + partSize + " threshold " + partSizeThreshold);
  }


  try {
    final ObjectMetadata om = new ObjectMetadata();
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
      om.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile);
    putObjectRequest.setCannedAcl(cannedACL);
    putObjectRequest.setMetadata(om);

    Upload upload = transfers.upload(putObjectRequest);

    ProgressableProgressListener listener = 
      new ProgressableProgressListener(upload, progress, statistics);
    upload.addProgressListener(listener);

    upload.waitForUploadResult();

    long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred();
    if (statistics != null && delta != 0) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("S3A write delta changed after finished: " + delta + " bytes");
      }
      statistics.incrementBytesWritten(delta);
    }

    // This will delete unnecessary fake parent directories
    fs.finishedWrite(key);
  } catch (InterruptedException e) {
    throw new IOException(e);
  } finally {
    if (!backupFile.delete()) {
      LOG.warn("Could not delete temporary s3a file: {}", backupFile);
    }
    super.close();
    closed = true;
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("OutputStream for key '" + key + "' upload complete");
  }
}
 
Example 12
Source File: S3AFileSystem.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.
 *
 * This version doesn't need to create a temporary file to calculate the md5.
 * Sadly this doesn't seem to be used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, 
  Path dst) throws IOException {
  String key = pathToKey(dst);

  if (!overwrite && exists(dst)) {
    throw new IOException(dst + " already exists");
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Copying local file from " + src + " to " + dst);
  }

  // Since we have a local file, we don't need to stream into a temporary file
  LocalFileSystem local = getLocal(getConf());
  File srcfile = local.pathToFile(src);

  final ObjectMetadata om = new ObjectMetadata();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setMetadata(om);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Upload up = transfers.upload(putObjectRequest);
  up.addProgressListener(progressListener);
  try {
    up.waitForUploadResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }

  // This will delete unnecessary fake parent directories
  finishedWrite(key);

  if (delSrc) {
    local.delete(src, false);
  }
}
 
Example 13
Source File: Write.java    From openbd-core with GNU General Public License v3.0 4 votes vote down vote up
private void writeData( AmazonKey amazonKey, String bucket, String key, Map<String, String> metadata, StorageClass storage, String mimetype, cfData data, int retry, int retryseconds, String acl, String aes256key, Map<String, String> customheaders ) throws Exception {
	if ( mimetype == null ) {
		if ( data.getDataType() == cfData.CFBINARYDATA )
			mimetype = "application/unknown";
		else if ( cfData.isSimpleValue( data ) )
			mimetype = "text/plain";
		else
			mimetype = "application/json";

		// Check to see if the mime type is in the metadata
		if ( metadata != null && metadata.containsKey( "Content-Type" ) )
			mimetype = metadata.get( "Content-Type" );
	}


	InputStream ios = null;
	long size = 0;
	if ( data.getDataType() == cfData.CFSTRINGDATA ) {
		ios = new java.io.ByteArrayInputStream( data.getString().getBytes() );
		size = data.getString().length();
	} else if ( data.getDataType() == cfData.CFBINARYDATA ) {
		ios = new java.io.ByteArrayInputStream( ( (cfBinaryData) data ).getByteArray() );
		size = ( (cfBinaryData) data ).getLength();
	} else {
		serializejson json = new serializejson();
		StringBuilder out = new StringBuilder();
		json.encodeJSON( out, data, false, CaseType.MAINTAIN, DateType.LONG );
		size = out.length();
		mimetype = "application/json";
		ios = new java.io.ByteArrayInputStream( out.toString().getBytes() );
	}


	// Setup the object data
	ObjectMetadata omd = new ObjectMetadata();
	if ( metadata != null )
		omd.setUserMetadata( metadata );

	omd.setContentType( mimetype );
	omd.setContentLength( size );

	AmazonS3 s3Client = getAmazonS3( amazonKey );

	// Let us run around the number of attempts
	int attempts = 0;
	while ( attempts < retry ) {
		try {

			PutObjectRequest por = new PutObjectRequest( bucket, key, ios, omd );
			por.setStorageClass( storage );

			if ( aes256key != null && !aes256key.isEmpty() )
				por.setSSECustomerKey( new SSECustomerKey( aes256key ) );

			if ( acl != null && !acl.isEmpty() )
				por.setCannedAcl( amazonKey.getAmazonCannedAcl( acl ) );

			if ( customheaders != null && !customheaders.isEmpty() ) {
				Iterator<String> it = customheaders.keySet().iterator();
				while ( it.hasNext() ) {
					String k = it.next();
					por.putCustomRequestHeader( k, customheaders.get( k ) );
				}
			}

			s3Client.putObject( por );
			break;

		} catch ( Exception e ) {
			cfEngine.log( "Failed: AmazonS3Write(bucket=" + bucket + "; key=" + key + "; attempt=" + ( attempts + 1 ) + "; exception=" + e.getMessage() + ")" );
			attempts++;

			if ( attempts == retry )
				throw e;
			else
				Thread.sleep( retryseconds * 1000 );
		}
	}
}
 
Example 14
Source File: Write.java    From openbd-core with GNU General Public License v3.0 4 votes vote down vote up
private void writeFile( AmazonKey amazonKey, String bucket, String key, Map<String, String> metadata, StorageClass storage, String localpath, int retry, int retryseconds, boolean deletefile, boolean background, String callback, String callbackdata, String appname, String acl, String aes256key, Map<String, String> customheaders ) throws Exception {
	File localFile = new File( localpath );
	if ( !localFile.isFile() )
		throw new Exception( "The file specified does not exist: " + localpath );

	// Push this to the background loader to handle and return immediately
	if ( background ) {
		BackgroundUploader.acceptFile( amazonKey, bucket, key, metadata, storage, localpath, retry, retryseconds, deletefile, callback, callbackdata, appname, acl, aes256key, customheaders );
		return;
	}


	// Setup the object data
	ObjectMetadata omd = new ObjectMetadata();
	if ( metadata != null )
		omd.setUserMetadata( metadata );

	AmazonS3 s3Client = getAmazonS3( amazonKey );

	// Let us run around the number of attempts
	int attempts = 0;
	while ( attempts < retry ) {
		try {

			PutObjectRequest por = new PutObjectRequest( bucket, key, localFile );
			por.setMetadata( omd );
			por.setStorageClass( storage );

			if ( acl != null && !acl.isEmpty() )
				por.setCannedAcl( amazonKey.getAmazonCannedAcl( acl ) );

			if ( aes256key != null && !aes256key.isEmpty() )
				por.setSSECustomerKey( new SSECustomerKey( aes256key ) );

			if ( customheaders != null && !customheaders.isEmpty() ) {
				Iterator<String> it = customheaders.keySet().iterator();
				while ( it.hasNext() ) {
					String k = it.next();
					por.putCustomRequestHeader( k, customheaders.get( k ) );
				}
			}

			s3Client.putObject( por );
			break;

		} catch ( Exception e ) {
			cfEngine.log( "Failed: AmazonS3Write(bucket=" + bucket + "key=" + key + "; file=" + localFile + "; attempt=" + ( attempts + 1 ) + "; exception=" + e.getMessage() + ")" );
			attempts++;

			if ( attempts == retry )
				throw e;
			else
				Thread.sleep( retryseconds * 1000 );
		}
	}


	// delete the file now that it is a success
	if ( deletefile )
		localFile.delete();
}
 
Example 15
Source File: S3Upload.java    From nfscan with MIT License 3 votes vote down vote up
/**
 * Uploads a new object to the specified Amazon S3 bucket.
 *
 * @param bucketName   bucket name
 * @param key          object key
 * @param file         file you want to upload
 * @param willBePublic whether or not this file should be accessible publicly
 * @throws AmazonClientException  If any errors are encountered in the client while making the
 *                                request or handling the response.
 * @throws AmazonServiceException If any errors occurred in Amazon S3 while processing the
 *                                request.
 */
protected void startUpload(String bucketName, String key, File file, boolean willBePublic) throws AmazonClientException, AmazonServiceException {
    PutObjectRequest putObj = new PutObjectRequest(bucketName, key, file);
    if (willBePublic) {
        putObj.setCannedAcl(CannedAccessControlList.PublicRead);
    }
    amazonS3.putObject(putObj);
}