com.amazonaws.AmazonServiceException Java Examples

The following examples show how to use com.amazonaws.AmazonServiceException. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: InventoryUtilTest.java    From pacbot with Apache License 2.0 6 votes vote down vote up
/**
 * Fetch S 3 info test test exception.
 *
 * @throws Exception the exception
 */
@SuppressWarnings("static-access")
@Test
public void fetchS3InfoTestTest_Exception() throws Exception {
    
    mockStatic(AmazonS3ClientBuilder.class);
    AmazonS3 amazonS3Client = PowerMockito.mock(AmazonS3.class);
    AmazonS3ClientBuilder amazonRDSClientBuilder = PowerMockito.mock(AmazonS3ClientBuilder.class);
    AWSStaticCredentialsProvider awsStaticCredentialsProvider = PowerMockito.mock(AWSStaticCredentialsProvider.class);
    PowerMockito.whenNew(AWSStaticCredentialsProvider.class).withAnyArguments().thenReturn(awsStaticCredentialsProvider);
    when(amazonRDSClientBuilder.standard()).thenReturn(amazonRDSClientBuilder);
    when(amazonRDSClientBuilder.withCredentials(anyObject())).thenReturn(amazonRDSClientBuilder);
    when(amazonRDSClientBuilder.withRegion(anyString())).thenReturn(amazonRDSClientBuilder);
    when(amazonRDSClientBuilder.build()).thenReturn(amazonS3Client);
    
    List<Bucket> s3buckets = new ArrayList<>();
    Bucket bucket = new Bucket();
    bucket.setName("name");
    s3buckets.add(bucket);
    when(amazonS3Client.listBuckets()).thenReturn(s3buckets);
    
    when(amazonS3Client.getBucketLocation(anyString())).thenThrow(new AmazonServiceException("Error"));
    assertThat(inventoryUtil.fetchS3Info(new BasicSessionCredentials("awsAccessKey", "awsSecretKey", "sessionToken"), 
            "skipRegions", "account","accountName").size(), is(0));
}
 
Example #2
Source File: AmazonS3Storage.java    From thunderbit with GNU Affero General Public License v3.0 6 votes vote down vote up
@Inject
public AmazonS3Storage (Configuration configuration) {
    bucketName = configuration.getString("storage.s3.bucket", "thunderbit");

    String accessKey = configuration.getString("storage.s3.accesskey");
    String secretKey = configuration.getString("storage.s3.secretkey");
    credentials = new BasicAWSCredentials(accessKey, secretKey);

    AmazonS3 amazonS3 = new AmazonS3Client(credentials);

    if (configuration.getBoolean("storage.s3.createBucket", true)) {
        try {
            if (!(amazonS3.doesBucketExist(bucketName))) {
                amazonS3.createBucket(new CreateBucketRequest(bucketName));
            }

            String bucketLocation = amazonS3.getBucketLocation(new GetBucketLocationRequest(bucketName));
            logger.info("Amazon S3 bucket created at " + bucketLocation);
        } catch (AmazonServiceException ase) {
            logAmazonServiceException (ase);
        } catch (AmazonClientException ace) {
            logAmazonClientException(ace);
        }
    }
}
 
Example #3
Source File: MockCloudStore.java    From athenz with Apache License 2.0 6 votes vote down vote up
@Override
AWSSecurityTokenServiceClient getTokenServiceClient() {
    if (exceptionStatusCode != 0) {
        if (amazonException) {
            AmazonServiceException ex = new AmazonServiceException("Error");
            ex.setStatusCode(exceptionStatusCode);
            throw ex;
        } else {
            throw new IllegalArgumentException("Error");
        }
    } else {
        AWSSecurityTokenServiceClient client = Mockito.mock(AWSSecurityTokenServiceClient.class);
        Mockito.when(client.assumeRole(Mockito.any(AssumeRoleRequest.class))).thenReturn(assumeRoleResult);
        Mockito.when(client.getCallerIdentity(Mockito.any(GetCallerIdentityRequest.class))).thenReturn(callerIdentityResult);
        return client;
    }
}
 
Example #4
Source File: S3AFileSystem.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void createEmptyObject(final String bucketName, final String objectName)
    throws AmazonClientException, AmazonServiceException {
  final InputStream im = new InputStream() {
    @Override
    public int read() throws IOException {
      return -1;
    }
  };

  final ObjectMetadata om = new ObjectMetadata();
  om.setContentLength(0L);
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, objectName, im, om);
  putObjectRequest.setCannedAcl(cannedACL);
  s3.putObject(putObjectRequest);
  statistics.incrementWriteOps(1);
}
 
Example #5
Source File: GlueHiveMetastore.java    From presto with Apache License 2.0 6 votes vote down vote up
@Override
public List<String> getAllDatabases()
{
    try {
        return stats.getGetAllDatabases().call(() -> {
            List<String> databaseNames = new ArrayList<>();
            String nextToken = null;

            do {
                GetDatabasesResult result = glueClient.getDatabases(new GetDatabasesRequest().withCatalogId(catalogId).withNextToken(nextToken));
                nextToken = result.getNextToken();
                result.getDatabaseList().forEach(database -> databaseNames.add(database.getName()));
            }
            while (nextToken != null);

            return databaseNames;
        });
    }
    catch (AmazonServiceException e) {
        throw new PrestoException(HIVE_METASTORE_ERROR, e);
    }
}
 
Example #6
Source File: GlueHiveMetastore.java    From presto with Apache License 2.0 6 votes vote down vote up
@Override
public void dropTable(HiveIdentity identity, String databaseName, String tableName, boolean deleteData)
{
    Table table = getExistingTable(identity, databaseName, tableName);

    try {
        stats.getDropTable().call(() ->
                glueClient.deleteTable(new DeleteTableRequest()
                        .withCatalogId(catalogId)
                        .withDatabaseName(databaseName)
                        .withName(tableName)));
    }
    catch (AmazonServiceException e) {
        throw new PrestoException(HIVE_METASTORE_ERROR, e);
    }

    String tableLocation = table.getStorage().getLocation();
    if (deleteData && isManagedTable(table) && !isNullOrEmpty(tableLocation)) {
        deleteDir(hdfsContext, hdfsEnvironment, new Path(tableLocation), true);
    }
}
 
Example #7
Source File: DynamicQueueUrlDestinationResolverTest.java    From spring-cloud-aws with Apache License 2.0 6 votes vote down vote up
@Test
void testPotentiallyNoAccessToPerformGetQueueUrl() throws Exception {
	AmazonSQS amazonSqs = mock(AmazonSQS.class);
	AmazonServiceException exception = new QueueDoesNotExistException(
			"AWS.SimpleQueueService.NonExistentQueue");
	exception.setErrorCode("AWS.SimpleQueueService.NonExistentQueue");
	exception.setErrorMessage(
			"The specified queue does not exist or you do not have access to it.");
	String queueUrl = "noAccessGetQueueUrlName";
	when(amazonSqs.getQueueUrl(new GetQueueUrlRequest(queueUrl)))
			.thenThrow(exception);
	DynamicQueueUrlDestinationResolver dynamicQueueDestinationResolver = new DynamicQueueUrlDestinationResolver(
			amazonSqs);
	try {
		dynamicQueueDestinationResolver.resolveDestination(queueUrl);
	}
	catch (DestinationResolutionException e) {
		assertThat(e.getMessage()).startsWith(
				"The queue does not exist or no access to perform action sqs:GetQueueUrl.");
	}
}
 
Example #8
Source File: GlueHiveMetastore.java    From presto with Apache License 2.0 6 votes vote down vote up
@Override
public void dropPartition(HiveIdentity identity, String databaseName, String tableName, List<String> parts, boolean deleteData)
{
    Table table = getExistingTable(identity, databaseName, tableName);
    Partition partition = getPartition(identity, table, parts)
            .orElseThrow(() -> new PartitionNotFoundException(new SchemaTableName(databaseName, tableName), parts));

    try {
        stats.getDropPartition().call(() ->
                glueClient.deletePartition(new DeletePartitionRequest()
                        .withCatalogId(catalogId)
                        .withDatabaseName(databaseName)
                        .withTableName(tableName)
                        .withPartitionValues(parts)));
    }
    catch (AmazonServiceException e) {
        throw new PrestoException(HIVE_METASTORE_ERROR, e);
    }

    String partLocation = partition.getStorage().getLocation();
    if (deleteData && isManagedTable(table) && !isNullOrEmpty(partLocation)) {
        deleteDir(hdfsContext, hdfsEnvironment, new Path(partLocation), true);
    }
}
 
Example #9
Source File: ACloudFormationTest.java    From aws-cf-templates with Apache License 2.0 6 votes vote down vote up
private List<StackEvent> getStackEvents(final String stackName) {
    final List<StackEvent> events = new ArrayList<>();
    String nextToken = null;
    do {
        try {
            final DescribeStackEventsResult res = this.cf.describeStackEvents(new DescribeStackEventsRequest().withStackName(stackName).withNextToken(nextToken));
            events.addAll(res.getStackEvents());
            nextToken = res.getNextToken();
        } catch (final AmazonServiceException e) {
            if (e.getErrorMessage().equals("Stack [" + stackName + "] does not exist")) {
                nextToken = null;
            } else {
                throw e;
            }
        }
    } while (nextToken != null);
    Collections.reverse(events);
    return events;
}
 
Example #10
Source File: S3.java    From rdf-delta with Apache License 2.0 6 votes vote down vote up
/** Test whether the bucket exists and is accessible. */
public static boolean bucketExists(AmazonS3 client, String bucketName) {
    try {
        HeadBucketRequest request = new HeadBucketRequest(bucketName);
        HeadBucketResult result = client.headBucket(request);
        return true;
    }
    catch (AmazonServiceException awsEx) {
        switch (awsEx.getStatusCode()) {
            case HttpSC.NOT_FOUND_404 :
                return false;
            case HttpSC.FORBIDDEN_403 :
                break;
            case HttpSC.MOVED_PERMANENTLY_301 : { // Moved permanently.
                System.err.println("301 Location: " + awsEx.getHttpHeaders().get(HttpNames.hLocation));
                break;
            }
        }
        throw awsEx;
    }
}
 
Example #11
Source File: S3Backuper.java    From cassandra-backup with Apache License 2.0 6 votes vote down vote up
@Override
public FreshenResult freshenRemoteObject(final RemoteObjectReference object) throws InterruptedException {
    final String canonicalPath = ((S3RemoteObjectReference) object).canonicalPath;

    final CopyObjectRequest copyRequest = new CopyObjectRequest(request.storageLocation.bucket,
                                                                canonicalPath,
                                                                request.storageLocation.bucket,
                                                                canonicalPath).withStorageClass(StorageClass.Standard);

    try {
        // attempt to refresh existing object in the bucket via an inplace copy
        transferManager.copy(copyRequest).waitForCompletion();
        return FreshenResult.FRESHENED;

    } catch (final AmazonServiceException e) {
        // AWS S3 under certain access policies can't return NoSuchKey (404)
        // instead, it returns AccessDenied (403) — handle it the same way
        if (e.getStatusCode() != 404 && e.getStatusCode() != 403) {
            throw e;
        }

        // the freshen failed because the file/key didn't exist
        return FreshenResult.UPLOAD_REQUIRED;
    }
}
 
Example #12
Source File: XferMgrProgress.java    From dlp-dataflow-deidentification with Apache License 2.0 6 votes vote down vote up
public static void uploadDirWithSubprogress(
    String dir_path, String bucket_name, String key_prefix, boolean recursive, boolean pause) {
  System.out.println(
      "directory: " + dir_path + (recursive ? " (recursive)" : "") + (pause ? " (pause)" : ""));

  TransferManager xfer_mgr = new TransferManager();
  try {
    MultipleFileUpload multi_upload =
        xfer_mgr.uploadDirectory(bucket_name, key_prefix, new File(dir_path), recursive);
    // loop with Transfer.isDone()
    XferMgrProgress.showMultiUploadProgress(multi_upload);
    // or block with Transfer.waitForCompletion()
    XferMgrProgress.waitForCompletion(multi_upload);
  } catch (AmazonServiceException e) {
    System.err.println(e.getErrorMessage());
    System.exit(1);
  }
  xfer_mgr.shutdownNow();
}
 
Example #13
Source File: S3FeaturesDemoTest.java    From Scribengin with GNU Affero General Public License v3.0 6 votes vote down vote up
public void listingObjects(String bucketName, String key) throws AmazonServiceException {
  /**
   * List objects in your bucket by prefix - There are many options for
   * listing the objects in your bucket. Keep in mind that buckets with many
   * objects might truncate their results when listing their objects, so be
   * sure to check if the returned object listing is truncated, and use the
   * AmazonS3.listNextBatchOfObjects(...) operation to retrieve additional
   * results.
   */
  System.out.println("Listing objects");
  ListObjectsRequest request = new ListObjectsRequest().withBucketName(bucketName).withPrefix("My");
  ObjectListing objectListing = s3Client.listObjects(request);
  for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
    System.out.println(" - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
  }
  System.out.println();
}
 
Example #14
Source File: AbstractDynamoDBProcessor.java    From localization_nifi with Apache License 2.0 6 votes vote down vote up
protected List<FlowFile> processServiceException(final ProcessSession session, List<FlowFile> flowFiles,
        AmazonServiceException exception) {
    List<FlowFile> failedFlowFiles = new ArrayList<>();
    for (FlowFile flowFile : flowFiles) {
        Map<String,String> attributes = new HashMap<>();
        attributes.put(DYNAMODB_ERROR_EXCEPTION_MESSAGE, exception.getMessage() );
        attributes.put(DYNAMODB_ERROR_CODE, exception.getErrorCode() );
        attributes.put(DYNAMODB_ERROR_MESSAGE, exception.getErrorMessage() );
        attributes.put(DYNAMODB_ERROR_TYPE, exception.getErrorType().name() );
        attributes.put(DYNAMODB_ERROR_SERVICE, exception.getServiceName() );
        attributes.put(DYNAMODB_ERROR_RETRYABLE, Boolean.toString(exception.isRetryable()));
        attributes.put(DYNAMODB_ERROR_REQUEST_ID, exception.getRequestId() );
        attributes.put(DYNAMODB_ERROR_STATUS_CODE, Integer.toString(exception.getStatusCode()) );
        attributes.put(DYNAMODB_ERROR_EXCEPTION_MESSAGE, exception.getMessage() );
        attributes.put(DYNAMODB_ERROR_RETRYABLE, Boolean.toString(exception.isRetryable()));
        flowFile = session.putAllAttributes(flowFile, attributes);
        failedFlowFiles.add(flowFile);
    }
    return failedFlowFiles;
}
 
Example #15
Source File: AmazonS3FileSystem.java    From iaf with Apache License 2.0 5 votes vote down vote up
@Override
public InputStream readFile(S3Object f) throws FileSystemException, IOException {
	try {
		final S3Object file = s3Client.getObject(bucketName, f.getKey());
		final S3ObjectInputStream is = file.getObjectContent();

		return is;
	} catch (AmazonServiceException e) {
		throw new FileSystemException(e);
	}
}
 
Example #16
Source File: DynamoDBPersistenceService.java    From openhab1-addons with Eclipse Public License 2.0 5 votes vote down vote up
@Override
public void run() {
    logger.debug("Error storing object to dynamo, unprocessed items: {}. Retrying with exponential back-off",
            unprocessedItems);
    lastException = null;
    while (!unprocessedItems.isEmpty() && retry < WAIT_MILLIS_IN_RETRIES.length) {
        if (!sleep()) {
            // Interrupted
            return;
        }
        retry++;
        try {
            BatchWriteItemOutcome outcome = DynamoDBPersistenceService.this.db.getDynamoDB()
                    .batchWriteItemUnprocessed(unprocessedItems);
            unprocessedItems = outcome.getUnprocessedItems();
            lastException = null;
        } catch (AmazonServiceException e) {
            if (e instanceof ResourceNotFoundException) {
                logger.debug(
                        "DynamoDB query raised unexpected exception: {}. This might happen if table was recently created",
                        e.getMessage());
            } else {
                logger.debug("DynamoDB query raised unexpected exception: {}.", e.getMessage());
            }
            lastException = e;
            continue;
        }
    }
    if (unprocessedItems.isEmpty()) {
        logger.debug("After {} retries successfully wrote all unprocessed items", retry);
    } else {
        logger.warn(
                "Even after retries failed to write some items. Last exception: {} {}, unprocessed items: {}",
                lastException == null ? "null" : lastException.getClass().getName(),
                lastException == null ? "null" : lastException.getMessage(), unprocessedItems);
    }
}
 
Example #17
Source File: SqsExtractorTest.java    From pocket-etl with Apache License 2.0 5 votes vote down vote up
@Test
public void nextRetriesThreeTimesBeforeThrowingUnrecoverableStreamFailureExceptionInCaseOfServiceException() {
    when(mockAmazonSQS.receiveMessage(any(ReceiveMessageRequest.class))).thenThrow(new AmazonServiceException(SAMPLE_EXCEPTION));

    try {
        sqsExtractor.next();
    } catch (UnrecoverableStreamFailureException ignored) {}

    verify(mockAmazonSQS, times(3)).receiveMessage(any(ReceiveMessageRequest.class));
}
 
Example #18
Source File: Uploads.java    From jobcacher-plugin with MIT License 5 votes vote down vote up
public void startUploading(TransferManager manager, File file, InputStream inputStream, Destination dest, ObjectMetadata metadata) throws AmazonServiceException {
    final PutObjectRequest request = new PutObjectRequest(dest.bucketName, dest.objectName, inputStream, metadata);

    // Set the buffer size (ReadLimit) equal to the multipart upload size,
    // allowing us to resend data if the connection breaks.
    request.getRequestClientOptions().setReadLimit(MULTIPART_UPLOAD_THRESHOLD);
    manager.getConfiguration().setMultipartUploadThreshold( (long) MULTIPART_UPLOAD_THRESHOLD);

    final Upload upload = manager.upload(request);
    startedUploads.put(file, upload);
    openedStreams.put(file, inputStream);
}
 
Example #19
Source File: AmazonSQSMessagingClientWrapper.java    From amazon-sqs-java-messaging-lib with Apache License 2.0 5 votes vote down vote up
/**
 * Create generic error message for <code>AmazonServiceException</code>. Message include
 * Action, RequestId, HTTPStatusCode, and AmazonErrorCode.
 */
private String logAndGetAmazonServiceException(AmazonServiceException ase, String action) {
    String errorMessage = "AmazonServiceException: " + action + ". RequestId: " + ase.getRequestId() +
                          "\nHTTPStatusCode: " + ase.getStatusCode() + " AmazonErrorCode: " +
                          ase.getErrorCode();
    LOG.error(errorMessage, ase);
    return errorMessage;
}
 
Example #20
Source File: AwsPublicKeyConnector.java    From cloudbreak with Apache License 2.0 5 votes vote down vote up
private boolean exists(AmazonEC2Client client, String publicKeyId) {
    try {
        client.describeKeyPairs(new DescribeKeyPairsRequest().withKeyNames(publicKeyId));
        LOGGER.debug("Key-pair already exists: {}", publicKeyId);
        return true;
    } catch (AmazonServiceException e) {
        LOGGER.debug("Key-pair does not exist: {}", publicKeyId);
    }
    return false;
}
 
Example #21
Source File: LambdaWrapperTest.java    From cloudformation-cli-java-plugin with Apache License 2.0 5 votes vote down vote up
@Test
public void invokeHandler_throwsAmazonServiceException_returnsServiceException() throws IOException {
    // exceptions are caught consistently by LambdaWrapper
    wrapper.setInvokeHandlerException(new AmazonServiceException("some error"));

    wrapper.setTransformResponse(resourceHandlerRequest);

    try (final InputStream in = loadRequestStream("create.request.json");
        final OutputStream out = new ByteArrayOutputStream()) {
        final Context context = getLambdaContext();

        wrapper.handleRequest(in, out, context);

        // verify initialiseRuntime was called and initialised dependencies
        verifyInitialiseRuntime();

        // all metrics should be published, once for a single invocation
        verify(providerMetricsPublisher, times(1)).publishInvocationMetric(any(Instant.class), eq(Action.CREATE));
        verify(providerMetricsPublisher, times(1)).publishDurationMetric(any(Instant.class), eq(Action.CREATE), anyLong());

        // failure metric should be published
        verify(providerMetricsPublisher, times(1)).publishExceptionMetric(any(Instant.class), any(),
            any(AmazonServiceException.class), any(HandlerErrorCode.class));

        // verify that model validation occurred for CREATE/UPDATE/DELETE
        verify(validator, times(1)).validateObject(any(JSONObject.class), any(JSONObject.class));

        // verify output response
        verifyHandlerResponse(out,
            ProgressEvent.<TestModel, TestContext>builder().errorCode(HandlerErrorCode.GeneralServiceException)
                .status(OperationStatus.FAILED)
                .message("some error (Service: null; Status Code: 0; Error Code: null; Request ID: null)").build());
    }
}
 
Example #22
Source File: S3FileManagerImpl.java    From entrada with GNU General Public License v3.0 5 votes vote down vote up
@Override
public boolean exists(String location) {
  Optional<S3Details> details = S3Details.from(location);
  if (!details.isPresent()) {
    return false;
  }

  try {
    return amazonS3.doesObjectExist(details.get().getBucket(), details.get().getKey());
  } catch (AmazonServiceException e) {
    log.error("Error while checking if {} exists", location, e);
  }

  return false;
}
 
Example #23
Source File: VmManagerTest.java    From SeleniumGridScaler with GNU General Public License v2.0 5 votes vote down vote up
@Test
// Test that if a fallback subnet is specified, that the request for new nodes will fallback successfully and nodes will be spun up
public void testSubnetFallsBackSuccessfully() throws NodesCouldNotBeStartedException {
    MockAmazonEc2Client client = new MockAmazonEc2Client(null);
    AmazonServiceException exception = new AmazonServiceException("message");
    exception.setErrorCode("InsufficientInstanceCapacity");
    client.setThrowDescribeInstancesError(exception);
    RunInstancesResult runInstancesResult = new RunInstancesResult();
    Reservation reservation = new Reservation();
    reservation.setInstances(Arrays.asList(new Instance()));
    runInstancesResult.setReservation(reservation);
    client.setRunInstances(runInstancesResult);
    Properties properties = new Properties();
    String region = "east", uuid="uuid",browser="chrome",os="linux";
    Integer threadCount = 5,maxSessions=5;
    MockManageVm manageEC2 = new MockManageVm(client,properties,region);
    String userData = "userData";
    String securityGroup="securityGroup",subnetId="subnetId",keyName="keyName",windowsImage="windowsImage",fallBackSubnet="fallback";
    properties.setProperty(region + "_security_group",securityGroup);
    properties.setProperty(region + "_subnet_id", subnetId);
    properties.setProperty(region + "_subnet_fallback_id_1", fallBackSubnet);
    properties.setProperty(region + "_key_name", keyName);
    properties.setProperty(region + "_windows_node_ami", windowsImage);
    manageEC2.setUserData(userData);
    List<Instance> instances = manageEC2.launchNodes(uuid,os,browser,null,threadCount,maxSessions);
    System.out.print("");
}
 
Example #24
Source File: GetBucketPolicy.java    From aws-doc-sdk-examples with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
    final String USAGE = "\n" +
            "Usage:\n" +
            "    GetBucketPolicy <bucket>\n\n" +
            "Where:\n" +
            "    bucket - the bucket to get the policy from.\n\n" +
            "Example:\n" +
            "    GetBucketPolicy testbucket\n\n";

    if (args.length < 1) {
        System.out.println(USAGE);
        System.exit(1);
    }

    String bucket_name = args[0];
    String policy_text = null;

    System.out.format("Getting policy for bucket: \"%s\"\n\n", bucket_name);

    final AmazonS3 s3 = AmazonS3ClientBuilder.standard().withRegion(Regions.DEFAULT_REGION).build();
    try {
        BucketPolicy bucket_policy = s3.getBucketPolicy(bucket_name);
        policy_text = bucket_policy.getPolicyText();
    } catch (AmazonServiceException e) {
        System.err.println(e.getErrorMessage());
        System.exit(1);
    }

    if (policy_text == null) {
        System.out.println("The specified bucket has no bucket policy.");
    } else {
        System.out.println("Returned policy:");
        System.out.println("----");
        System.out.println(policy_text);
        System.out.println("----\n");
    }

    System.out.println("Done!");
}
 
Example #25
Source File: AmazonSQSMessagingClientWrapperTest.java    From amazon-sqs-java-messaging-lib with Apache License 2.0 5 votes vote down vote up
@Test(expected = JMSException.class)
public void testGetQueueUrlThrowAmazonServiceException() throws JMSException {

    GetQueueUrlRequest getQueueUrlRequest = new GetQueueUrlRequest(QUEUE_NAME);
    doThrow(new AmazonServiceException("ase"))
            .when(amazonSQSClient).getQueueUrl(eq(getQueueUrlRequest));

    wrapper.getQueueUrl(QUEUE_NAME);
}
 
Example #26
Source File: AWSCatalogMetastoreClient.java    From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 5 votes vote down vote up
private void renamePartitionInCatalog(String databaseName, String tableName,
                                      List<String> partitionValues, org.apache.hadoop.hive.metastore.api.Partition newPartition)
      throws InvalidOperationException, MetaException, TException {
  try {
    glueClient.updatePartition(
        new UpdatePartitionRequest()
        .withDatabaseName(databaseName)
        .withTableName(tableName)
        .withPartitionValueList(partitionValues)
        .withPartitionInput(GlueInputConverter.convertToPartitionInput(newPartition)));
  } catch (AmazonServiceException e) {
    throw CatalogToHiveConverter.wrapInHiveException(e);
  }
}
 
Example #27
Source File: S3AFileSystem.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void deleteUnnecessaryFakeDirectories(Path f) throws IOException {
  while (true) {
    try {
      String key = pathToKey(f);
      if (key.isEmpty()) {
        break;
      }

      S3AFileStatus status = getFileStatus(f);

      if (status.isDirectory() && status.isEmptyDirectory()) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Deleting fake directory " + key + "/");
        }
        s3.deleteObject(bucket, key + "/");
        statistics.incrementWriteOps(1);
      }
    } catch (FileNotFoundException | AmazonServiceException e) {
    }

    if (f.isRoot()) {
      break;
    }

    f = f.getParent();
  }
}
 
Example #28
Source File: TransactionDynamoDBFacade.java    From dynamodb-transactions with Apache License 2.0 5 votes vote down vote up
@Override
public PutItemResult putItem(String tableName,
        Map<String, AttributeValue> item) throws AmazonServiceException,
        AmazonClientException {
    return putItem(new PutItemRequest()
            .withTableName(tableName)
            .withItem(item));
}
 
Example #29
Source File: EC2.java    From h2o-2 with Apache License 2.0 5 votes vote down vote up
private List<Instance> wait(AmazonEC2Client ec2, List<String> ids) {
  System.out.println("Establishing ssh connections, make sure security group '" //
      + securityGroup + "' allows incoming TCP 22.");
  boolean tagsDone = false;
  for( ;; ) {
    try {
      if( !tagsDone ) {
        CreateTagsRequest createTagsRequest = new CreateTagsRequest();
        createTagsRequest.withResources(ids).withTags(new Tag("Name", NAME));
        ec2.createTags(createTagsRequest);
        tagsDone = true;
      }
      DescribeInstancesRequest request = new DescribeInstancesRequest();
      request.withInstanceIds(ids);
      DescribeInstancesResult result = ec2.describeInstances(request);
      List<Reservation> reservations = result.getReservations();
      List<Instance> instances = new ArrayList<Instance>();
      for( Reservation reservation : reservations )
        for( Instance instance : reservation.getInstances() )
          if( ip(instance) != null )
            instances.add(instance);
      if( instances.size() == ids.size() ) {
        // Try to connect to SSH port on each box
        if( canConnect(instances) )
          return instances;
      }
    } catch( AmazonServiceException xe ) {
      // Ignore and retry
    }
    try {
      Thread.sleep(500);
    } catch( InterruptedException e ) {
      throw Log.errRTExcept(e);
    }
  }
}
 
Example #30
Source File: TransactionManagerDynamoDBFacade.java    From dynamodb-transactions with Apache License 2.0 5 votes vote down vote up
@Override
public CreateTableResult createTable(
        List<AttributeDefinition> attributeDefinitions, String tableName,
        List<KeySchemaElement> keySchema,
        ProvisionedThroughput provisionedThroughput)
        throws AmazonServiceException, AmazonClientException {
    throw new UnsupportedOperationException("Use the underlying client instance instead");
}