Java Code Examples for com.amazonaws.AmazonClientException

The following examples show how to use com.amazonaws.AmazonClientException. These examples are extracted from open source projects.
Example 1
Project: aws-glue-data-catalog-client-for-apache-hive-metastore   File: GlueTestClientFactory.java    License: Apache License 2.0 6 votes vote down vote up
private static ClientConfiguration createGatewayTimeoutRetryableConfiguration() {
  ClientConfiguration retryableConfig = new ClientConfiguration();
  RetryPolicy.RetryCondition retryCondition = new PredefinedRetryPolicies.SDKDefaultRetryCondition() {
    @Override
    public boolean shouldRetry(AmazonWebServiceRequest originalRequest, AmazonClientException exception,
                               int retriesAttempted) {
      if (super.shouldRetry(originalRequest, exception, retriesAttempted)) {
        return true;
      }
      if (exception != null && exception instanceof AmazonServiceException) {
        AmazonServiceException ase = (AmazonServiceException) exception;
        if (ase.getStatusCode() == SC_GATEWAY_TIMEOUT) {
          return true;
        }
      }
      return false;
    }
  };
  RetryPolicy retryPolicy = new RetryPolicy(retryCondition, PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY,
                                                   PredefinedRetryPolicies.DEFAULT_MAX_ERROR_RETRY, true);
  retryableConfig.setRetryPolicy(retryPolicy);
  return retryableConfig;
}
 
Example 2
Project: Baragon   File: ElbResource.java    License: Apache License 2.0 6 votes vote down vote up
@GET
@NoAuth
@Path("/{elbName}/instances")
public List<InstanceState> getInstancesByElb(@PathParam("elbName") String elbName) {
  if (config.isPresent()) {
    try {
      DescribeInstanceHealthRequest request = new DescribeInstanceHealthRequest(elbName);
      DescribeInstanceHealthResult result = elbClient.describeInstanceHealth(request);
      return result.getInstanceStates();
    } catch (AmazonClientException exn) {
      throw new BaragonWebException(String.format("AWS Client Error %s", exn));
    }
  } else {
    throw new BaragonWebException("ElbSync and related actions are not currently enabled");
  }
}
 
Example 3
Project: stocator   File: COSBlockOutputStream.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * This completes a multipart upload. Sometimes it fails; here retries are
 * handled to avoid losing all data on a transient failure.
 *
 * @param partETags list of partial uploads
 * @throws IOException on any problem
 */
private CompleteMultipartUploadResult complete(List<PartETag> partETags) throws IOException {
  int retryCount = 0;
  AmazonClientException lastException;
  String operation = String.format("Completing multi-part upload for key '%s',"
      + " id '%s' with %s partitions ",
      key, uploadId, partETags.size());
  do {
    try {
      LOG.debug(operation);
      return writeOperationHelper.completeMultipartUpload(uploadId, partETags);
    } catch (AmazonClientException e) {
      lastException = e;
    }
  }
  while (shouldRetry(operation, lastException, retryCount++));
  // this point is only reached if the operation failed more than
  // the allowed retry count
  throw translateException(operation, key, lastException);
}
 
Example 4
Project: beam   File: S3WritableByteChannel.java    License: Apache License 2.0 6 votes vote down vote up
private void flush() throws IOException {
  uploadBuffer.flip();
  ByteArrayInputStream inputStream = new ByteArrayInputStream(uploadBuffer.array());

  UploadPartRequest request =
      new UploadPartRequest()
          .withBucketName(path.getBucket())
          .withKey(path.getKey())
          .withUploadId(uploadId)
          .withPartNumber(partNumber++)
          .withPartSize(uploadBuffer.remaining())
          .withMD5Digest(Base64.encodeAsString(md5.digest()))
          .withInputStream(inputStream);
  request.setSSECustomerKey(options.getSSECustomerKey());

  UploadPartResult result;
  try {
    result = amazonS3.uploadPart(request);
  } catch (AmazonClientException e) {
    throw new IOException(e);
  }
  uploadBuffer.clear();
  md5.reset();
  eTags.add(result.getPartETag());
}
 
Example 5
Project: attic-stratos   File: AWSHelper.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Deletes the load balancer with the name provided. Useful when a cluster,
 * with which this load balancer was associated, is removed.
 *
 * @param loadBalancerName to be deleted
 * @param region           of the laod balancer
 */
public void deleteLoadBalancer(String loadBalancerName, String region) {

    log.info("Deleting load balancer " + loadBalancerName);

    DeleteLoadBalancerRequest deleteLoadBalancerRequest = new DeleteLoadBalancerRequest();
    deleteLoadBalancerRequest.setLoadBalancerName(loadBalancerName);

    try {
        elbClient.setEndpoint(String.format(
                Constants.ELB_ENDPOINT_URL_FORMAT, region));

        elbClient.deleteLoadBalancer(deleteLoadBalancerRequest);
        log.info("Deleted load balancer " + loadBalancerName);
    } catch (AmazonClientException e) {
        log.error("Could not delete load balancer : " + loadBalancerName, e);
    }
}
 
Example 6
Project: CloverETL-Engine   File: PrimitiveS3OperationHandler.java    License: GNU Lesser General Public License v2.1 6 votes vote down vote up
/**
 * Deletes a regular file.
 */
@Override
public boolean deleteFile(URI target) throws IOException {
	target = target.normalize();
	PooledS3Connection connection = null;
	try {
		connection = connect(target);
		AmazonS3 service = connection.getService();
		String[] path = getPath(target);
		try {
			service.deleteObject(path[0], path[1]);
			return true;
		} catch (AmazonClientException e) {
			throw new IOException(e);
		}
	} finally {
		disconnect(connection);
	}
}
 
Example 7
Project: beam   File: S3WritableByteChannel.java    License: Apache License 2.0 5 votes vote down vote up
S3WritableByteChannel(AmazonS3 amazonS3, S3ResourceId path, String contentType, S3Options options)
    throws IOException {
  this.amazonS3 = checkNotNull(amazonS3, "amazonS3");
  this.options = checkNotNull(options);
  this.path = checkNotNull(path, "path");
  checkArgument(
      atMostOne(
          options.getSSECustomerKey() != null,
          options.getSSEAlgorithm() != null,
          options.getSSEAwsKeyManagementParams() != null),
      "Either SSECustomerKey (SSE-C) or SSEAlgorithm (SSE-S3)"
          + " or SSEAwsKeyManagementParams (SSE-KMS) must not be set at the same time.");
  // Amazon S3 API docs: Each part must be at least 5 MB in size, except the last part.
  checkArgument(
      options.getS3UploadBufferSizeBytes()
          >= S3UploadBufferSizeBytesFactory.MINIMUM_UPLOAD_BUFFER_SIZE_BYTES,
      "S3UploadBufferSizeBytes must be at least %s bytes",
      S3UploadBufferSizeBytesFactory.MINIMUM_UPLOAD_BUFFER_SIZE_BYTES);
  this.uploadBuffer = ByteBuffer.allocate(options.getS3UploadBufferSizeBytes());
  eTags = new ArrayList<>();

  ObjectMetadata objectMetadata = new ObjectMetadata();
  objectMetadata.setContentType(contentType);
  if (options.getSSEAlgorithm() != null) {
    objectMetadata.setSSEAlgorithm(options.getSSEAlgorithm());
  }
  InitiateMultipartUploadRequest request =
      new InitiateMultipartUploadRequest(path.getBucket(), path.getKey())
          .withStorageClass(options.getS3StorageClass())
          .withObjectMetadata(objectMetadata);
  request.setSSECustomerKey(options.getSSECustomerKey());
  request.setSSEAwsKeyManagementParams(options.getSSEAwsKeyManagementParams());
  InitiateMultipartUploadResult result;
  try {
    result = amazonS3.initiateMultipartUpload(request);
  } catch (AmazonClientException e) {
    throw new IOException(e);
  }
  uploadId = result.getUploadId();
}
 
Example 8
Project: herd   File: S3DaoTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testListDirectoryAssertHandleAmazonClientException()
{
    S3Operations originalS3Operations = (S3Operations) ReflectionTestUtils.getField(s3Dao, "s3Operations");
    S3Operations mockS3Operations = mock(S3Operations.class);
    ReflectionTestUtils.setField(s3Dao, "s3Operations", mockS3Operations);

    try
    {
        String s3BucketName = "s3BucketName";
        String s3KeyPrefix = "s3KeyPrefix";

        S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = new S3FileTransferRequestParamsDto();
        s3FileTransferRequestParamsDto.setS3BucketName(s3BucketName);
        s3FileTransferRequestParamsDto.setS3KeyPrefix(s3KeyPrefix);
        boolean ignoreZeroByteDirectoryMarkers = true;

        when(mockS3Operations.listObjects(any(), any())).thenThrow(new AmazonClientException("message"));

        try
        {
            s3Dao.listDirectory(s3FileTransferRequestParamsDto, ignoreZeroByteDirectoryMarkers);
            fail();
        }
        catch (Exception e)
        {
            assertEquals(IllegalStateException.class, e.getClass());
            assertEquals("Failed to list keys with prefix \"s3KeyPrefix\" from bucket \"s3BucketName\". Reason: message", e.getMessage());
        }
    }
    finally
    {
        ReflectionTestUtils.setField(s3Dao, "s3Operations", originalS3Operations);
    }
}
 
Example 9
Project: thunderbit   File: AmazonS3Storage.java    License: GNU Affero General Public License v3.0 5 votes vote down vote up
@Override
public F.Promise<Void> store(Path path, String key, String name) {
    Promise<Void> promise = Futures.promise();

    TransferManager transferManager = new TransferManager(credentials);
    try {
        Upload upload = transferManager.upload(bucketName, key, path.toFile());
        upload.addProgressListener((ProgressListener) progressEvent -> {
            if (progressEvent.getEventType().isTransferEvent()) {
                if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_COMPLETED_EVENT)) {
                    transferManager.shutdownNow();
                    promise.success(null);
                } else if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_FAILED_EVENT)) {
                    transferManager.shutdownNow();
                    logger.error(progressEvent.toString());
                    promise.failure(new Exception(progressEvent.toString()));
                }
            }
        });
    } catch (AmazonServiceException ase) {
        logAmazonServiceException (ase);
    } catch (AmazonClientException ace) {
        logAmazonClientException(ace);
    }

    return F.Promise.wrap(promise.future());
}
 
Example 10
Project: crate   File: S3ClientHelper.java    License: Apache License 2.0 5 votes vote down vote up
public AWSCredentials getCredentials() {
    try {
        return super.getCredentials();
    } catch (AmazonClientException ace) {
        // allow for anonymous access
        return ANONYMOUS_CREDENTIALS;
    }
}
 
Example 11
Project: Cheddar   File: SqsMessageQueue.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void sendDelayedMessage(final T message, final int delaySeconds) throws MessageSendException {
    try {
        sqsQueueResource.sendDelayedMessage(toSqsMessageBody(message), delaySeconds);
    } catch (final AmazonClientException e) {
        throw new MessageSendException("Unable to send message on SQS queue:[" + sqsQueueResource.getQueueName()
                + "]", e);
    }
}
 
Example 12
Project: spring-cloud-aws   File: AwsCloudEnvironmentCheckUtils.java    License: Apache License 2.0 5 votes vote down vote up
public static boolean isRunningOnCloudEnvironment() {
	if (isCloudEnvironment == null) {
		try {
			isCloudEnvironment = EC2MetadataUtils
					.getData(EC2_METADATA_ROOT + "/instance-id", 1) != null;
		}
		catch (AmazonClientException e) {
			isCloudEnvironment = false;
		}
	}
	return isCloudEnvironment;
}
 
Example 13
Project: reinvent2013-mobile-photo-share   File: DeviceAuthentication.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Deletes the specified UID from the identity table.
 * 
 * @param uid
 *            Unique device identifier
 */
public void deleteDevice(String uid) throws DataAccessException {
    HashMap<String, AttributeValue> key = new HashMap<String, AttributeValue>();
    key.put(ATTRIBUTE_UID, new AttributeValue().withS(uid));

    DeleteItemRequest deleteItemRequest = new DeleteItemRequest()
            .withTableName(DEVICE_TABLE)
            .withKey(key);

    try {
        ddb.deleteItem(deleteItemRequest);
    } catch (AmazonClientException e) {
        throw new DataAccessException("Failed to delete device: " + uid, e);
    }
}
 
Example 14
Project: Scribengin   File: AmazonS3Mock.java    License: GNU Affero General Public License v3.0 5 votes vote down vote up
@Override
public S3Object getObject(String bucketName, String key) throws AmazonClientException, AmazonServiceException {
  List<String> keys = files.get(bucketName);
  if (keys != null && keys.contains(key)) {
    return new S3Object();
  } else {
    return null;
  }

}
 
Example 15
Project: spring-cloud-aws   File: Ec2MetadataRegionProvider.java    License: Apache License 2.0 5 votes vote down vote up
protected Region getCurrentRegion() {
	try {
		InstanceInfo instanceInfo = EC2MetadataUtils.getInstanceInfo();
		return instanceInfo != null && instanceInfo.getRegion() != null
				? RegionUtils.getRegion(instanceInfo.getRegion()) : null;
	}
	catch (AmazonClientException e) {
		return null;
	}

}
 
Example 16
Project: CloverETL-Engine   File: PrimitiveS3OperationHandler.java    License: GNU Lesser General Public License v2.1 5 votes vote down vote up
/**
 * Creates a regular file.
 * Fails if the parent directory does not exist.
 */
@Override
public boolean createFile(URI target) throws IOException {
	target = target.normalize();
	PooledS3Connection connection = null;
	try {
		connection = connect(target);
		AmazonS3 service = connection.getService();
		URI parentUri = URIUtils.getParentURI(target);
		if (parentUri != null) {
			Info parentInfo = info(parentUri, connection);
			if (parentInfo == null) {
				throw new IOException("Parent dir does not exist");
			}
		}
		String[] path = getPath(target);
		if (path.length == 1) {
			throw new IOException("Cannot write to the root directory");
		}
		try {
			S3Utils.createEmptyObject(service, path[0], path[1]);
			return true;
		} catch (AmazonClientException e) {
			throw S3Utils.getIOException(e);
		}
	} finally {
		disconnect(connection);
	}
}
 
Example 17
Project: usergrid   File: UsergridAwsCredentials.java    License: Apache License 2.0 5 votes vote down vote up
public String getAWSSecretKeyJson(Map<String,Object> jsonObject){
    String secretKey = (String) jsonObject.get( SDKGlobalConfiguration.SECRET_KEY_ENV_VAR );
    if ( StringUtils.isEmpty( secretKey ) ){
        secretKey = (String) jsonObject.get( SDKGlobalConfiguration.ALTERNATE_SECRET_KEY_ENV_VAR );
    }
    if(StringUtils.isEmpty(secretKey)){
        throw new AmazonClientException("Could not get aws secret key from json object.");
    }
    return StringUtils.trim( secretKey );
}
 
Example 18
Project: localization_nifi   File: MockPutCloudWatchMetric.java    License: Apache License 2.0 5 votes vote down vote up
protected PutMetricDataResult putMetricData(PutMetricDataRequest metricDataRequest) throws AmazonClientException {
    putMetricDataCallCount++;
    actualNamespace = metricDataRequest.getNamespace();
    actualMetricData = metricDataRequest.getMetricData();

    if (throwException != null) {
        throw throwException;
    }

    return result;
}
 
Example 19
Project: bazel   File: InstanceProfileCredentialsProvider.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * {@inheritDoc}
 *
 * @throws AmazonClientException if {@link SDKGlobalConfiguration#isEc2MetadataDisabled()} is true
 */
@Override
public AWSCredentials getCredentials() {
    if (SDKGlobalConfiguration.isEc2MetadataDisabled()) {
        throw new AmazonClientException("AWS_EC2_METADATA_DISABLED is set to true, not loading credentials from EC2 Instance "
                                     + "Metadata service");
    }
    AWSCredentials creds = credentialsFetcher.getCredentials();
    shouldRefresh = true;
    return creds;
}
 
Example 20
Project: Scribengin   File: AmazonS3Mock.java    License: GNU Affero General Public License v3.0 5 votes vote down vote up
@Override
public void setBucketNotificationConfiguration(String bucketName,
    BucketNotificationConfiguration bucketNotificationConfiguration) throws AmazonClientException,
    AmazonServiceException {
  // TODO Auto-generated method stub

}
 
Example 21
Project: Baragon   File: ElbManager.java    License: Apache License 2.0 5 votes vote down vote up
public AgentCheckInResponse attemptRemoveAgent(BaragonAgentMetadata agent, Optional<BaragonGroup> group, String groupName, boolean isStatusCheck) throws AmazonClientException {
  TrafficSourceState state = TrafficSourceState.DONE;
  long maxWaitTime = 0L;
  Optional<String> maybeExceptions = Optional.absent();
  if (isElbEnabledAgent(agent, group, groupName)) {
    boolean anyCompatible = false;
    StringBuilder message = new StringBuilder();
    for (TrafficSource source : group.get().getTrafficSources()) {
      if (source.getRegisterBy() == RegisterBy.PRIVATE_IP && !agent.getEc2().getPrivateIp().isPresent()) {
        message.append(String.format("No private ip present to register by for source %s ", source.getName()));
        continue;
      } else if (source.getRegisterBy() == RegisterBy.INSTANCE_ID && !agent.getEc2().getInstanceId().isPresent()) {
        message.append(String.format("No instance id present to register by for source %s ", source.getName()));
        continue;
      }
      anyCompatible = true;
      String id = source.getRegisterBy() == RegisterBy.PRIVATE_IP ? agent.getEc2().getPrivateIp().get() : agent.getEc2().getInstanceId().get();
      Instance instance = source.getRegisterBy() == RegisterBy.PRIVATE_IP ? null : new Instance(agent.getEc2().getInstanceId().get());
      AgentCheckInResponse response = isStatusCheck ?
          getLoadBalancer(source.getType()).checkRemovedInstance(id, source.getName(), agent.getAgentId()) :
          getLoadBalancer(source.getType()).removeInstance(instance, id, source.getName(), agent.getAgentId());
      if (response.getState().ordinal() > state.ordinal()) {
        state = response.getState();
      }
      if (response.getExceptionMessage().isPresent()) {
        maybeExceptions = Optional.of(maybeExceptions.or("") + response.getExceptionMessage().get() + "\n");
      }
      if (response.getWaitTime() > maxWaitTime) {
        maxWaitTime = response.getWaitTime();
      }
    }
    if (!anyCompatible) {
      return new AgentCheckInResponse(TrafficSourceState.ERROR, Optional.of(message.toString()), maxWaitTime);
    }
  }
  return new AgentCheckInResponse(state, maybeExceptions, maxWaitTime);
}
 
Example 22
Project: spring-data-simpledb   File: AbstractServiceUnavailableOperationRetrier.java    License: MIT License 5 votes vote down vote up
/**
 * @return recognized exception or null, throws further not recognized exception
 */
private AmazonClientException tryExecute() {
    try {
        execute();
    } catch (AmazonClientException clientException) {
        if (isServiceUnavailableException(clientException)) {
            return clientException;
        }

        throw clientException;
    }

    return null;
}
 
Example 23
Project: big-c   File: S3AFileSystem.java    License: Apache License 2.0 5 votes vote down vote up
private void createFakeDirectory(final String bucketName, final String objectName)
    throws AmazonClientException, AmazonServiceException {
  if (!objectName.endsWith("/")) {
    createEmptyObject(bucketName, objectName + "/");
  } else {
    createEmptyObject(bucketName, objectName);
  }
}
 
Example 24
Project: presto-kinesis   File: MockKinesisClient.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public GetShardIteratorResult getShardIterator(GetShardIteratorRequest getShardIteratorRequest) throws AmazonServiceException, AmazonClientException
{
    ShardIterator iter = ShardIterator.fromStreamAndShard(getShardIteratorRequest.getStreamName(), getShardIteratorRequest.getShardId());
    if (iter != null) {
        InternalStream theStream = this.getStream(iter.streamId);
        if (theStream != null) {
            String seqAsString = getShardIteratorRequest.getStartingSequenceNumber();
            if (seqAsString != null && !seqAsString.isEmpty() && getShardIteratorRequest.getShardIteratorType().equals("AFTER_SEQUENCE_NUMBER")) {
                int sequence = Integer.parseInt(seqAsString);
                iter.recordIndex = sequence + 1;
            }
            else {
                iter.recordIndex = 100;
            }

            GetShardIteratorResult result = new GetShardIteratorResult();
            return result.withShardIterator(iter.makeString());
        }
        else {
            throw new AmazonClientException("Unknown stream or bad shard iterator!");
        }
    }
    else {
        throw new AmazonClientException("Bad stream or shard iterator!");
    }
}
 
Example 25
Project: attic-apex-malhar   File: AbstractKinesisOutputOperator.java    License: Apache License 2.0 5 votes vote down vote up
private void addRecord(T tuple)
{
  try {
    Pair<String, V> keyValue = tupleToKeyValue(tuple);
    PutRecordsRequestEntry putRecordsEntry = new PutRecordsRequestEntry();
    putRecordsEntry.setData(ByteBuffer.wrap(getRecord(keyValue.second)));
    putRecordsEntry.setPartitionKey(keyValue.first);
    putRecordsRequestEntryList.add(putRecordsEntry);
  } catch (AmazonClientException e) {
    throw new RuntimeException(e);
  }
}
 
Example 26
Project: crate   File: S3BlobContainer.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Uploads a blob using a single upload request
 */
void executeSingleUpload(final S3BlobStore blobStore,
                         final String blobName,
                         final InputStream input,
                         final long blobSize) throws IOException {

    // Extra safety checks
    if (blobSize > MAX_FILE_SIZE.getBytes()) {
        throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than " + MAX_FILE_SIZE);
    }
    if (blobSize > blobStore.bufferSizeInBytes()) {
        throw new IllegalArgumentException("Upload request size [" + blobSize + "] can't be larger than buffer size");
    }

    final ObjectMetadata md = new ObjectMetadata();
    md.setContentLength(blobSize);
    if (blobStore.serverSideEncryption()) {
        md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    }
    final PutObjectRequest putRequest = new PutObjectRequest(blobStore.bucket(), blobName, input, md);
    putRequest.setStorageClass(blobStore.getStorageClass());
    putRequest.setCannedAcl(blobStore.getCannedACL());

    try (AmazonS3Reference clientReference = blobStore.clientReference()) {
        clientReference.client().putObject(putRequest);
    } catch (final AmazonClientException e) {
        throw new IOException("Unable to upload object [" + blobName + "] using a single upload", e);
    }
}
 
Example 27
Project: vertx-deploy-tools   File: AwsAutoScalingUtil.java    License: Apache License 2.0 5 votes vote down vote up
public Observable<AwsState> pollForInstanceState() {
    try {
        return Observable.from(asyncClient.describeAutoScalingInstancesAsync(new DescribeAutoScalingInstancesRequest().withInstanceIds(instanceId)))
                .flatMap(result -> {
                    Optional<String> optState = result.getAutoScalingInstances().stream().filter(i -> i.getInstanceId().equals(instanceId)).map(AutoScalingInstanceDetails::getLifecycleState).findFirst();
                    return just(optState.map(AwsState::map).orElse(AwsState.UNKNOWN));
                });
    } catch (AmazonClientException e) {
        LOG.error(LogConstants.ERROR_EXECUTING_REQUEST, e);
        throw new AwsException(e);
    }
}
 
Example 28
Project: attic-stratos   File: AWSHelper.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Checks if the security group is already present in the given region. If
 * yes, then returns its group id. If not, present the returns null.
 *
 * @param groupName to be checked for presence.
 * @param region
 * @return id of the security group
 */
public String getSecurityGroupId(String groupName, String region) {
    if (groupName == null || groupName.isEmpty()) {
        return null;
    }

    DescribeSecurityGroupsRequest describeSecurityGroupsRequest = new DescribeSecurityGroupsRequest();
    if (AWSExtensionContext.getInstance().isOperatingInVPC()) {
        if (getVpcIds().size() > 0) {
            // vpc id filter
            Set<Filter> filters = getFilters(getVpcIds().iterator().next(), lbSecurityGroupName);
            describeSecurityGroupsRequest.setFilters(filters);
        } else {
            List<String> groupNames = new ArrayList<String>();
            groupNames.add(groupName);
            describeSecurityGroupsRequest.setGroupNames(groupNames);
        }
    }

    try {
        ec2Client.setEndpoint(String.format(
                Constants.EC2_ENDPOINT_URL_FORMAT, region));

        DescribeSecurityGroupsResult describeSecurityGroupsResult = ec2Client
                .describeSecurityGroups(describeSecurityGroupsRequest);

        List<SecurityGroup> securityGroups = describeSecurityGroupsResult
                .getSecurityGroups();

        if (securityGroups != null && securityGroups.size() > 0) {
            return securityGroups.get(0).getGroupId();
        } else {
            log.warn("Could not find security group id for group " + groupName);
        }
    } catch (AmazonClientException e) {
        log.debug("Could not describe security groups.", e);
    }

    return null;
}
 
Example 29
Project: Scribengin   File: S3Client.java    License: GNU Affero General Public License v3.0 5 votes vote down vote up
public void deleteBucket(String bucketName, boolean recursive) throws AmazonClientException, AmazonServiceException {
  if(recursive) {
    deleteKeyWithPrefix(bucketName, "");
  }
  DeleteBucketRequest request = new DeleteBucketRequest(bucketName) ;
  s3Client.deleteBucket(request);
}
 
Example 30
Project: dynamodb-transactions   File: FailingAmazonDynamoDBClient.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public UpdateItemResult updateItem(UpdateItemRequest updateItemRequest) throws AmazonServiceException,
    AmazonClientException {
    if(requestsToFail.contains(updateItemRequest)) {
        throw new FailedYourRequestException();
    }
    return super.updateItem(updateItemRequest);
}
 
Example 31
Project: dynamodb-transactions   File: TransactionDynamoDBFacade.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public BatchGetItemResult batchGetItem(
        Map<String, KeysAndAttributes> requestItems,
        String returnConsumedCapacity) throws AmazonServiceException,
        AmazonClientException {
    throw new UnsupportedOperationException("Use the underlying client instance instead");
}
 
Example 32
Project: aws-java-sdk-stubs   File: AmazonSNSStub.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public CreatePlatformApplicationResult createPlatformApplication(
    final CreatePlatformApplicationRequest createPlatformApplicationRequest)
    throws AmazonServiceException,
    AmazonClientException {
  throw new UnsupportedOperationException();
}
 
Example 33
Project: ivona-speechcloud-sdk-java   File: CreateSpeechGetRequestMarshaller.java    License: Apache License 2.0 5 votes vote down vote up
public Request<CreateSpeechRequest> marshall(CreateSpeechRequest createSpeechRequest) {
    if (createSpeechRequest == null) {
        throw new AmazonClientException("null createSpeechRequest passed to marshall(...)");
    }

    Request<CreateSpeechRequest> request = new DefaultRequest<CreateSpeechRequest>(createSpeechRequest,
            IvonaSpeechCloudClient.SERVICE_NAME);
    setRequestParameters(request, createSpeechRequest);
    request.setHttpMethod(HttpMethodName.GET);
    request.setResourcePath(RESOURCE_PATH);

    return request;
}
 
Example 34
Project: amazon-sqs-java-messaging-lib   File: AmazonSQSMessagingClientWrapperTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test(expected = JMSException.class)
public void testQueueExistsThrowAmazonClientException() throws JMSException {

    GetQueueUrlRequest getQueueUrlRequest = new GetQueueUrlRequest(QUEUE_NAME);
    doThrow(new AmazonClientException("ace"))
            .when(amazonSQSClient).getQueueUrl(eq(getQueueUrlRequest));

    wrapper.queueExists(QUEUE_NAME);
}
 
Example 35
Project: aws-java-sdk-stubs   File: AmazonEC2Stub.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public RequestSpotFleetResult requestSpotFleet(RequestSpotFleetRequest arg0) throws AmazonServiceException, AmazonClientException {
  throw new UnsupportedOperationException();
}
 
Example 36
Project: tajo   File: MockAmazonS3.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public PutObjectResult putObject(String bucketName, String key, InputStream input, ObjectMetadata metadata)
  throws AmazonClientException {
  throw new TajoInternalError(new UnsupportedException());
}
 
Example 37
Project: nifi   File: PutS3Object.java    License: Apache License 2.0 4 votes vote down vote up
protected MultipartUploadListing getS3AgeoffListAndAgeoffLocalState(final ProcessContext context, final AmazonS3Client s3, final long now, String bucket) {
    final long ageoff_interval = context.getProperty(MULTIPART_S3_AGEOFF_INTERVAL).asTimePeriod(TimeUnit.MILLISECONDS);
    final Long maxAge = context.getProperty(MULTIPART_S3_MAX_AGE).asTimePeriod(TimeUnit.MILLISECONDS);
    final long ageCutoff = now - maxAge;

    final List<MultipartUpload> ageoffList = new ArrayList<>();
    if ((lastS3AgeOff.get() < now - ageoff_interval) && s3BucketLock.tryLock()) {
        try {

            ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(bucket);
            MultipartUploadListing listing = s3.listMultipartUploads(listRequest);
            for (MultipartUpload upload : listing.getMultipartUploads()) {
                long uploadTime = upload.getInitiated().getTime();
                if (uploadTime < ageCutoff) {
                    ageoffList.add(upload);
                }
            }

            // ageoff any local state
            ageoffLocalState(ageCutoff);
            lastS3AgeOff.set(System.currentTimeMillis());
        } catch(AmazonClientException e) {
            if (e instanceof AmazonS3Exception
                    && ((AmazonS3Exception)e).getStatusCode() == 403
                    && ((AmazonS3Exception) e).getErrorCode().equals("AccessDenied")) {
                getLogger().warn("AccessDenied checking S3 Multipart Upload list for {}: {} " +
                        "** The configured user does not have the s3:ListBucketMultipartUploads permission " +
                        "for this bucket, S3 ageoff cannot occur without this permission.  Next ageoff check " +
                        "time is being advanced by interval to prevent checking on every upload **",
                        new Object[]{bucket, e.getMessage()});
                lastS3AgeOff.set(System.currentTimeMillis());
            } else {
                getLogger().error("Error checking S3 Multipart Upload list for {}: {}",
                        new Object[]{bucket, e.getMessage()});
            }
        } finally {
            s3BucketLock.unlock();
        }
    }
    MultipartUploadListing result = new MultipartUploadListing();
    result.setBucketName(bucket);
    result.setMultipartUploads(ageoffList);
    return result;
}
 
Example 38
Project: tajo   File: MockAmazonS3.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public Bucket createBucket(String bucketName, String region)
  throws AmazonClientException {
  throw new TajoInternalError(new UnsupportedException());
}
 
Example 39
Project: Scribengin   File: AmazonS3Mock.java    License: GNU Affero General Public License v3.0 4 votes vote down vote up
@Override
public void deleteObject(DeleteObjectRequest deleteObjectRequest) throws AmazonClientException,
    AmazonServiceException {
  // TODO Auto-generated method stub

}
 
Example 40
Project: Scribengin   File: AmazonS3Mock.java    License: GNU Affero General Public License v3.0 4 votes vote down vote up
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest request)
    throws AmazonClientException, AmazonServiceException {
  // TODO Auto-generated method stub
  return null;
}