com.amazonaws.AmazonClientException Java Examples
The following examples show how to use
com.amazonaws.AmazonClientException.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: GlueTestClientFactory.java From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 | 7 votes |
private static ClientConfiguration createGatewayTimeoutRetryableConfiguration() { ClientConfiguration retryableConfig = new ClientConfiguration(); RetryPolicy.RetryCondition retryCondition = new PredefinedRetryPolicies.SDKDefaultRetryCondition() { @Override public boolean shouldRetry(AmazonWebServiceRequest originalRequest, AmazonClientException exception, int retriesAttempted) { if (super.shouldRetry(originalRequest, exception, retriesAttempted)) { return true; } if (exception != null && exception instanceof AmazonServiceException) { AmazonServiceException ase = (AmazonServiceException) exception; if (ase.getStatusCode() == SC_GATEWAY_TIMEOUT) { return true; } } return false; } }; RetryPolicy retryPolicy = new RetryPolicy(retryCondition, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, PredefinedRetryPolicies.DEFAULT_MAX_ERROR_RETRY, true); retryableConfig.setRetryPolicy(retryPolicy); return retryableConfig; }
Example #2
Source File: S3WritableByteChannel.java From beam with Apache License 2.0 | 6 votes |
private void flush() throws IOException { uploadBuffer.flip(); ByteArrayInputStream inputStream = new ByteArrayInputStream(uploadBuffer.array()); UploadPartRequest request = new UploadPartRequest() .withBucketName(path.getBucket()) .withKey(path.getKey()) .withUploadId(uploadId) .withPartNumber(partNumber++) .withPartSize(uploadBuffer.remaining()) .withMD5Digest(Base64.encodeAsString(md5.digest())) .withInputStream(inputStream); request.setSSECustomerKey(options.getSSECustomerKey()); UploadPartResult result; try { result = amazonS3.uploadPart(request); } catch (AmazonClientException e) { throw new IOException(e); } uploadBuffer.clear(); md5.reset(); eTags.add(result.getPartETag()); }
Example #3
Source File: COSBlockOutputStream.java From stocator with Apache License 2.0 | 6 votes |
/** * This completes a multipart upload. Sometimes it fails; here retries are * handled to avoid losing all data on a transient failure. * * @param partETags list of partial uploads * @throws IOException on any problem */ private CompleteMultipartUploadResult complete(List<PartETag> partETags) throws IOException { int retryCount = 0; AmazonClientException lastException; String operation = String.format("Completing multi-part upload for key '%s'," + " id '%s' with %s partitions ", key, uploadId, partETags.size()); do { try { LOG.debug(operation); return writeOperationHelper.completeMultipartUpload(uploadId, partETags); } catch (AmazonClientException e) { lastException = e; } } while (shouldRetry(operation, lastException, retryCount++)); // this point is only reached if the operation failed more than // the allowed retry count throw translateException(operation, key, lastException); }
Example #4
Source File: ElbResource.java From Baragon with Apache License 2.0 | 6 votes |
@GET @NoAuth @Path("/{elbName}/instances") public List<InstanceState> getInstancesByElb(@PathParam("elbName") String elbName) { if (config.isPresent()) { try { DescribeInstanceHealthRequest request = new DescribeInstanceHealthRequest(elbName); DescribeInstanceHealthResult result = elbClient.describeInstanceHealth(request); return result.getInstanceStates(); } catch (AmazonClientException exn) { throw new BaragonWebException(String.format("AWS Client Error %s", exn)); } } else { throw new BaragonWebException("ElbSync and related actions are not currently enabled"); } }
Example #5
Source File: PrimitiveS3OperationHandler.java From CloverETL-Engine with GNU Lesser General Public License v2.1 | 6 votes |
/** * Deletes a regular file. */ @Override public boolean deleteFile(URI target) throws IOException { target = target.normalize(); PooledS3Connection connection = null; try { connection = connect(target); AmazonS3 service = connection.getService(); String[] path = getPath(target); try { service.deleteObject(path[0], path[1]); return true; } catch (AmazonClientException e) { throw new IOException(e); } } finally { disconnect(connection); } }
Example #6
Source File: AWSHelper.java From attic-stratos with Apache License 2.0 | 6 votes |
/** * Deletes the load balancer with the name provided. Useful when a cluster, * with which this load balancer was associated, is removed. * * @param loadBalancerName to be deleted * @param region of the laod balancer */ public void deleteLoadBalancer(String loadBalancerName, String region) { log.info("Deleting load balancer " + loadBalancerName); DeleteLoadBalancerRequest deleteLoadBalancerRequest = new DeleteLoadBalancerRequest(); deleteLoadBalancerRequest.setLoadBalancerName(loadBalancerName); try { elbClient.setEndpoint(String.format( Constants.ELB_ENDPOINT_URL_FORMAT, region)); elbClient.deleteLoadBalancer(deleteLoadBalancerRequest); log.info("Deleted load balancer " + loadBalancerName); } catch (AmazonClientException e) { log.error("Could not delete load balancer : " + loadBalancerName, e); } }
Example #7
Source File: S3ClientHelper.java From crate with Apache License 2.0 | 5 votes |
public AWSCredentials getCredentials() { try { return super.getCredentials(); } catch (AmazonClientException ace) { // allow for anonymous access return ANONYMOUS_CREDENTIALS; } }
Example #8
Source File: InstanceProfileCredentialsProvider.java From bazel with Apache License 2.0 | 5 votes |
/** * {@inheritDoc} * * @throws AmazonClientException if {@link SDKGlobalConfiguration#isEc2MetadataDisabled()} is true */ @Override public AWSCredentials getCredentials() { if (SDKGlobalConfiguration.isEc2MetadataDisabled()) { throw new AmazonClientException("AWS_EC2_METADATA_DISABLED is set to true, not loading credentials from EC2 Instance " + "Metadata service"); } AWSCredentials creds = credentialsFetcher.getCredentials(); shouldRefresh = true; return creds; }
Example #9
Source File: SqsMessageQueue.java From Cheddar with Apache License 2.0 | 5 votes |
@Override public void sendDelayedMessage(final T message, final int delaySeconds) throws MessageSendException { try { sqsQueueResource.sendDelayedMessage(toSqsMessageBody(message), delaySeconds); } catch (final AmazonClientException e) { throw new MessageSendException("Unable to send message on SQS queue:[" + sqsQueueResource.getQueueName() + "]", e); } }
Example #10
Source File: S3WritableByteChannel.java From beam with Apache License 2.0 | 5 votes |
S3WritableByteChannel(AmazonS3 amazonS3, S3ResourceId path, String contentType, S3Options options) throws IOException { this.amazonS3 = checkNotNull(amazonS3, "amazonS3"); this.options = checkNotNull(options); this.path = checkNotNull(path, "path"); checkArgument( atMostOne( options.getSSECustomerKey() != null, options.getSSEAlgorithm() != null, options.getSSEAwsKeyManagementParams() != null), "Either SSECustomerKey (SSE-C) or SSEAlgorithm (SSE-S3)" + " or SSEAwsKeyManagementParams (SSE-KMS) must not be set at the same time."); // Amazon S3 API docs: Each part must be at least 5 MB in size, except the last part. checkArgument( options.getS3UploadBufferSizeBytes() >= S3UploadBufferSizeBytesFactory.MINIMUM_UPLOAD_BUFFER_SIZE_BYTES, "S3UploadBufferSizeBytes must be at least %s bytes", S3UploadBufferSizeBytesFactory.MINIMUM_UPLOAD_BUFFER_SIZE_BYTES); this.uploadBuffer = ByteBuffer.allocate(options.getS3UploadBufferSizeBytes()); eTags = new ArrayList<>(); ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentType(contentType); if (options.getSSEAlgorithm() != null) { objectMetadata.setSSEAlgorithm(options.getSSEAlgorithm()); } InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(path.getBucket(), path.getKey()) .withStorageClass(options.getS3StorageClass()) .withObjectMetadata(objectMetadata); request.setSSECustomerKey(options.getSSECustomerKey()); request.setSSEAwsKeyManagementParams(options.getSSEAwsKeyManagementParams()); InitiateMultipartUploadResult result; try { result = amazonS3.initiateMultipartUpload(request); } catch (AmazonClientException e) { throw new IOException(e); } uploadId = result.getUploadId(); }
Example #11
Source File: AmazonS3Storage.java From thunderbit with GNU Affero General Public License v3.0 | 5 votes |
@Override public F.Promise<Void> store(Path path, String key, String name) { Promise<Void> promise = Futures.promise(); TransferManager transferManager = new TransferManager(credentials); try { Upload upload = transferManager.upload(bucketName, key, path.toFile()); upload.addProgressListener((ProgressListener) progressEvent -> { if (progressEvent.getEventType().isTransferEvent()) { if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_COMPLETED_EVENT)) { transferManager.shutdownNow(); promise.success(null); } else if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_FAILED_EVENT)) { transferManager.shutdownNow(); logger.error(progressEvent.toString()); promise.failure(new Exception(progressEvent.toString())); } } }); } catch (AmazonServiceException ase) { logAmazonServiceException (ase); } catch (AmazonClientException ace) { logAmazonClientException(ace); } return F.Promise.wrap(promise.future()); }
Example #12
Source File: Ec2MetadataRegionProvider.java From spring-cloud-aws with Apache License 2.0 | 5 votes |
protected Region getCurrentRegion() { try { InstanceInfo instanceInfo = EC2MetadataUtils.getInstanceInfo(); return instanceInfo != null && instanceInfo.getRegion() != null ? RegionUtils.getRegion(instanceInfo.getRegion()) : null; } catch (AmazonClientException e) { return null; } }
Example #13
Source File: PrimitiveS3OperationHandler.java From CloverETL-Engine with GNU Lesser General Public License v2.1 | 5 votes |
/** * Creates a regular file. * Fails if the parent directory does not exist. */ @Override public boolean createFile(URI target) throws IOException { target = target.normalize(); PooledS3Connection connection = null; try { connection = connect(target); AmazonS3 service = connection.getService(); URI parentUri = URIUtils.getParentURI(target); if (parentUri != null) { Info parentInfo = info(parentUri, connection); if (parentInfo == null) { throw new IOException("Parent dir does not exist"); } } String[] path = getPath(target); if (path.length == 1) { throw new IOException("Cannot write to the root directory"); } try { S3Utils.createEmptyObject(service, path[0], path[1]); return true; } catch (AmazonClientException e) { throw S3Utils.getIOException(e); } } finally { disconnect(connection); } }
Example #14
Source File: UsergridAwsCredentials.java From usergrid with Apache License 2.0 | 5 votes |
public String getAWSSecretKeyJson(Map<String,Object> jsonObject){ String secretKey = (String) jsonObject.get( SDKGlobalConfiguration.SECRET_KEY_ENV_VAR ); if ( StringUtils.isEmpty( secretKey ) ){ secretKey = (String) jsonObject.get( SDKGlobalConfiguration.ALTERNATE_SECRET_KEY_ENV_VAR ); } if(StringUtils.isEmpty(secretKey)){ throw new AmazonClientException("Could not get aws secret key from json object."); } return StringUtils.trim( secretKey ); }
Example #15
Source File: MockPutCloudWatchMetric.java From localization_nifi with Apache License 2.0 | 5 votes |
protected PutMetricDataResult putMetricData(PutMetricDataRequest metricDataRequest) throws AmazonClientException { putMetricDataCallCount++; actualNamespace = metricDataRequest.getNamespace(); actualMetricData = metricDataRequest.getMetricData(); if (throwException != null) { throw throwException; } return result; }
Example #16
Source File: AmazonS3Mock.java From Scribengin with GNU Affero General Public License v3.0 | 5 votes |
@Override public void setBucketNotificationConfiguration(String bucketName, BucketNotificationConfiguration bucketNotificationConfiguration) throws AmazonClientException, AmazonServiceException { // TODO Auto-generated method stub }
Example #17
Source File: ElbManager.java From Baragon with Apache License 2.0 | 5 votes |
public AgentCheckInResponse attemptRemoveAgent(BaragonAgentMetadata agent, Optional<BaragonGroup> group, String groupName, boolean isStatusCheck) throws AmazonClientException { TrafficSourceState state = TrafficSourceState.DONE; long maxWaitTime = 0L; Optional<String> maybeExceptions = Optional.absent(); if (isElbEnabledAgent(agent, group, groupName)) { boolean anyCompatible = false; StringBuilder message = new StringBuilder(); for (TrafficSource source : group.get().getTrafficSources()) { if (source.getRegisterBy() == RegisterBy.PRIVATE_IP && !agent.getEc2().getPrivateIp().isPresent()) { message.append(String.format("No private ip present to register by for source %s ", source.getName())); continue; } else if (source.getRegisterBy() == RegisterBy.INSTANCE_ID && !agent.getEc2().getInstanceId().isPresent()) { message.append(String.format("No instance id present to register by for source %s ", source.getName())); continue; } anyCompatible = true; String id = source.getRegisterBy() == RegisterBy.PRIVATE_IP ? agent.getEc2().getPrivateIp().get() : agent.getEc2().getInstanceId().get(); Instance instance = source.getRegisterBy() == RegisterBy.PRIVATE_IP ? null : new Instance(agent.getEc2().getInstanceId().get()); AgentCheckInResponse response = isStatusCheck ? getLoadBalancer(source.getType()).checkRemovedInstance(id, source.getName(), agent.getAgentId()) : getLoadBalancer(source.getType()).removeInstance(instance, id, source.getName(), agent.getAgentId()); if (response.getState().ordinal() > state.ordinal()) { state = response.getState(); } if (response.getExceptionMessage().isPresent()) { maybeExceptions = Optional.of(maybeExceptions.or("") + response.getExceptionMessage().get() + "\n"); } if (response.getWaitTime() > maxWaitTime) { maxWaitTime = response.getWaitTime(); } } if (!anyCompatible) { return new AgentCheckInResponse(TrafficSourceState.ERROR, Optional.of(message.toString()), maxWaitTime); } } return new AgentCheckInResponse(state, maybeExceptions, maxWaitTime); }
Example #18
Source File: AbstractServiceUnavailableOperationRetrier.java From spring-data-simpledb with MIT License | 5 votes |
/** * @return recognized exception or null, throws further not recognized exception */ private AmazonClientException tryExecute() { try { execute(); } catch (AmazonClientException clientException) { if (isServiceUnavailableException(clientException)) { return clientException; } throw clientException; } return null; }
Example #19
Source File: S3AFileSystem.java From big-c with Apache License 2.0 | 5 votes |
private void createFakeDirectory(final String bucketName, final String objectName) throws AmazonClientException, AmazonServiceException { if (!objectName.endsWith("/")) { createEmptyObject(bucketName, objectName + "/"); } else { createEmptyObject(bucketName, objectName); } }
Example #20
Source File: MockKinesisClient.java From presto-kinesis with Apache License 2.0 | 5 votes |
@Override public GetShardIteratorResult getShardIterator(GetShardIteratorRequest getShardIteratorRequest) throws AmazonServiceException, AmazonClientException { ShardIterator iter = ShardIterator.fromStreamAndShard(getShardIteratorRequest.getStreamName(), getShardIteratorRequest.getShardId()); if (iter != null) { InternalStream theStream = this.getStream(iter.streamId); if (theStream != null) { String seqAsString = getShardIteratorRequest.getStartingSequenceNumber(); if (seqAsString != null && !seqAsString.isEmpty() && getShardIteratorRequest.getShardIteratorType().equals("AFTER_SEQUENCE_NUMBER")) { int sequence = Integer.parseInt(seqAsString); iter.recordIndex = sequence + 1; } else { iter.recordIndex = 100; } GetShardIteratorResult result = new GetShardIteratorResult(); return result.withShardIterator(iter.makeString()); } else { throw new AmazonClientException("Unknown stream or bad shard iterator!"); } } else { throw new AmazonClientException("Bad stream or shard iterator!"); } }
Example #21
Source File: AbstractKinesisOutputOperator.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
private void addRecord(T tuple) { try { Pair<String, V> keyValue = tupleToKeyValue(tuple); PutRecordsRequestEntry putRecordsEntry = new PutRecordsRequestEntry(); putRecordsEntry.setData(ByteBuffer.wrap(getRecord(keyValue.second))); putRecordsEntry.setPartitionKey(keyValue.first); putRecordsRequestEntryList.add(putRecordsEntry); } catch (AmazonClientException e) { throw new RuntimeException(e); } }
Example #22
Source File: S3BlobContainer.java From crate with Apache License 2.0 | 5 votes |
/** * Uploads a blob using a single upload request */ void executeSingleUpload(final S3BlobStore blobStore, final String blobName, final InputStream input, final long blobSize) throws IOException { // Extra safety checks if (blobSize > MAX_FILE_SIZE.getBytes()) { throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE); } if (blobSize > blobStore.bufferSizeInBytes()) { throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than buffer size"); } final ObjectMetadata md = new ObjectMetadata(); md.setContentLength(blobSize); if (blobStore.serverSideEncryption()) { md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); } final PutObjectRequest putRequest = new PutObjectRequest(blobStore.bucket(), blobName, input, md); putRequest.setStorageClass(blobStore.getStorageClass()); putRequest.setCannedAcl(blobStore.getCannedACL()); try (AmazonS3Reference clientReference = blobStore.clientReference()) { clientReference.client().putObject(putRequest); } catch (final AmazonClientException e) { throw new IOException("Unable to upload object [" + blobName + "] using a single upload", e); } }
Example #23
Source File: AwsAutoScalingUtil.java From vertx-deploy-tools with Apache License 2.0 | 5 votes |
public Observable<AwsState> pollForInstanceState() { try { return Observable.from(asyncClient.describeAutoScalingInstancesAsync(new DescribeAutoScalingInstancesRequest().withInstanceIds(instanceId))) .flatMap(result -> { Optional<String> optState = result.getAutoScalingInstances().stream().filter(i -> i.getInstanceId().equals(instanceId)).map(AutoScalingInstanceDetails::getLifecycleState).findFirst(); return just(optState.map(AwsState::map).orElse(AwsState.UNKNOWN)); }); } catch (AmazonClientException e) { LOG.error(LogConstants.ERROR_EXECUTING_REQUEST, e); throw new AwsException(e); } }
Example #24
Source File: AWSHelper.java From attic-stratos with Apache License 2.0 | 5 votes |
/** * Checks if the security group is already present in the given region. If * yes, then returns its group id. If not, present the returns null. * * @param groupName to be checked for presence. * @param region * @return id of the security group */ public String getSecurityGroupId(String groupName, String region) { if (groupName == null || groupName.isEmpty()) { return null; } DescribeSecurityGroupsRequest describeSecurityGroupsRequest = new DescribeSecurityGroupsRequest(); if (AWSExtensionContext.getInstance().isOperatingInVPC()) { if (getVpcIds().size() > 0) { // vpc id filter Set<Filter> filters = getFilters(getVpcIds().iterator().next(), lbSecurityGroupName); describeSecurityGroupsRequest.setFilters(filters); } else { List<String> groupNames = new ArrayList<String>(); groupNames.add(groupName); describeSecurityGroupsRequest.setGroupNames(groupNames); } } try { ec2Client.setEndpoint(String.format( Constants.EC2_ENDPOINT_URL_FORMAT, region)); DescribeSecurityGroupsResult describeSecurityGroupsResult = ec2Client .describeSecurityGroups(describeSecurityGroupsRequest); List<SecurityGroup> securityGroups = describeSecurityGroupsResult .getSecurityGroups(); if (securityGroups != null && securityGroups.size() > 0) { return securityGroups.get(0).getGroupId(); } else { log.warn("Could not find security group id for group " + groupName); } } catch (AmazonClientException e) { log.debug("Could not describe security groups.", e); } return null; }
Example #25
Source File: S3Client.java From Scribengin with GNU Affero General Public License v3.0 | 5 votes |
public void deleteBucket(String bucketName, boolean recursive) throws AmazonClientException, AmazonServiceException { if(recursive) { deleteKeyWithPrefix(bucketName, ""); } DeleteBucketRequest request = new DeleteBucketRequest(bucketName) ; s3Client.deleteBucket(request); }
Example #26
Source File: FailingAmazonDynamoDBClient.java From dynamodb-transactions with Apache License 2.0 | 5 votes |
@Override public UpdateItemResult updateItem(UpdateItemRequest updateItemRequest) throws AmazonServiceException, AmazonClientException { if(requestsToFail.contains(updateItemRequest)) { throw new FailedYourRequestException(); } return super.updateItem(updateItemRequest); }
Example #27
Source File: TransactionDynamoDBFacade.java From dynamodb-transactions with Apache License 2.0 | 5 votes |
@Override public BatchGetItemResult batchGetItem( Map<String, KeysAndAttributes> requestItems, String returnConsumedCapacity) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Use the underlying client instance instead"); }
Example #28
Source File: AmazonSNSStub.java From aws-java-sdk-stubs with Apache License 2.0 | 5 votes |
@Override public CreatePlatformApplicationResult createPlatformApplication( final CreatePlatformApplicationRequest createPlatformApplicationRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException(); }
Example #29
Source File: CreateSpeechGetRequestMarshaller.java From ivona-speechcloud-sdk-java with Apache License 2.0 | 5 votes |
public Request<CreateSpeechRequest> marshall(CreateSpeechRequest createSpeechRequest) { if (createSpeechRequest == null) { throw new AmazonClientException("null createSpeechRequest passed to marshall(...)"); } Request<CreateSpeechRequest> request = new DefaultRequest<CreateSpeechRequest>(createSpeechRequest, IvonaSpeechCloudClient.SERVICE_NAME); setRequestParameters(request, createSpeechRequest); request.setHttpMethod(HttpMethodName.GET); request.setResourcePath(RESOURCE_PATH); return request; }
Example #30
Source File: AmazonSQSMessagingClientWrapperTest.java From amazon-sqs-java-messaging-lib with Apache License 2.0 | 5 votes |
@Test(expected = JMSException.class) public void testQueueExistsThrowAmazonClientException() throws JMSException { GetQueueUrlRequest getQueueUrlRequest = new GetQueueUrlRequest(QUEUE_NAME); doThrow(new AmazonClientException("ace")) .when(amazonSQSClient).getQueueUrl(eq(getQueueUrlRequest)); wrapper.queueExists(QUEUE_NAME); }