Java Code Examples for com.amazonaws.services.s3.AmazonS3#listObjects()

The following examples show how to use com.amazonaws.services.s3.AmazonS3#listObjects() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: S3Source.java    From sequenceiq-samples with Apache License 2.0 7 votes vote down vote up
@Override
protected void doStart() {
    AWSCredentials myCredentials = new BasicAWSCredentials(accessKey, secretKey);
    AmazonS3 s3Client = new AmazonS3Client(myCredentials);
    ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucket);
    ObjectListing objectListing = s3Client.listObjects(listObjectsRequest);
    ChannelProcessor channelProcessor = getChannelProcessor();
    for (S3ObjectSummary s3ObjectSummary : objectListing.getObjectSummaries()) {
        String file = s3ObjectSummary.getKey();
        LOGGER.info("Read the content of {}", file);
        GetObjectRequest objectRequest = new GetObjectRequest(bucket, file);
        S3Object objectPortion = s3Client.getObject(objectRequest);
        try {
            long startTime = System.currentTimeMillis();
            processLines(channelProcessor, objectPortion.getObjectContent());
            LOGGER.info("Processing of {} took {} ms", file, System.currentTimeMillis() - startTime);
        } catch (IOException e) {
            LOGGER.warn("Cannot process the {}, skipping", file, e);
        }
    }
}
 
Example 2
Source File: InfectedFileCache.java    From aws-s3-virusscan with Apache License 2.0 6 votes vote down vote up
public List<InfectedFile> getFiles() {
    final List<InfectedFile> files = new ArrayList<>();
    if (Config.has(Config.Key.INFECTED_FILES_BUCKET_NAME)) {
        final AmazonS3 s3local = AmazonS3ClientBuilder.standard().withCredentials(this.credentialsProvider).withRegion(Config.get(Config.Key.INFECTED_FILES_BUCKET_REGION)).build();
        ObjectListing objectListing = s3local.listObjects(Config.get(Config.Key.INFECTED_FILES_BUCKET_NAME));
        while (true) {
            objectListing.getObjectSummaries().forEach((summary) -> {
                final S3Object object = s3local.getObject(summary.getBucketName(), summary.getKey());
                final byte[] content;
                try {
                    content = IOUtils.toByteArray(object.getObjectContent());
                } catch (final IOException e) {
                    throw new RuntimeException(e);
                }
                files.add(new InfectedFile(summary.getKey(), content, object.getObjectMetadata().getContentType()));
            });
            if (objectListing.isTruncated()) {
                objectListing = s3local.listNextBatchOfObjects(objectListing);
            } else {
                break;
            }
        }
    }
    return files;
}
 
Example 3
Source File: S3CheckpointSpiSelfTest.java    From ignite with Apache License 2.0 6 votes vote down vote up
/**
 * @throws Exception If error.
 */
@Override protected void afterSpiStopped() throws Exception {
    AWSCredentials cred = new BasicAWSCredentials(IgniteS3TestSuite.getAccessKey(),
        IgniteS3TestSuite.getSecretKey());

    AmazonS3 s3 = new AmazonS3Client(cred);

    String bucketName = S3CheckpointSpi.BUCKET_NAME_PREFIX + "unit-test-bucket";

    try {
        ObjectListing list = s3.listObjects(bucketName);

        while (true) {
            for (S3ObjectSummary sum : list.getObjectSummaries())
                s3.deleteObject(bucketName, sum.getKey());

            if (list.isTruncated())
                list = s3.listNextBatchOfObjects(list);
            else
                break;
        }
    }
    catch (AmazonClientException e) {
        throw new IgniteSpiException("Failed to read checkpoint bucket: " + bucketName, e);
    }
}
 
Example 4
Source File: TestUtils.java    From digdag with Apache License 2.0 6 votes vote down vote up
public static void s3DeleteRecursively(AmazonS3 s3, String bucket, String prefix)
        throws Exception
{
    ListObjectsRequest request = new ListObjectsRequest()
            .withBucketName(bucket)
            .withPrefix(prefix);

    while (true) {
        ObjectListing listing = s3.listObjects(request);
        String[] keys = listing.getObjectSummaries().stream().map(S3ObjectSummary::getKey).toArray(String[]::new);
        for (String key : keys) {
            logger.info("delete s3://{}/{}", bucket, key);
        }
        retryExecutor()
                .retryIf(e -> e instanceof AmazonServiceException)
                .run(() -> s3.deleteObjects(new DeleteObjectsRequest(bucket).withKeys(keys)));
        if (listing.getNextMarker() == null) {
            break;
        }
    }
}
 
Example 5
Source File: ImportS3.java    From h2o-2 with Apache License 2.0 6 votes vote down vote up
@Override
protected Response serve() {
  String bucket = _bucket.value();
  Log.info("ImportS3 processing (" + bucket + ")");
  JsonObject json = new JsonObject();
  JsonArray succ = new JsonArray();
  JsonArray fail = new JsonArray();
  AmazonS3 s3 = PersistS3.getClient();
  ObjectListing currentList = s3.listObjects(bucket);
  processListing(currentList, succ, fail);
  while(currentList.isTruncated()){
    currentList = s3.listNextBatchOfObjects(currentList);
    processListing(currentList, succ, fail);
  }
  json.add(NUM_SUCCEEDED, new JsonPrimitive(succ.size()));
  json.add(SUCCEEDED, succ);
  json.add(NUM_FAILED, new JsonPrimitive(fail.size()));
  json.add(FAILED, fail);
  DKV.write_barrier();
  Response r = Response.done(json);
  r.setBuilder(SUCCEEDED + "." + KEY, new KeyCellBuilder());
  return r;
}
 
Example 6
Source File: LocalstackContainerTest.java    From testcontainers-java with MIT License 6 votes vote down vote up
@Test
public void s3TestOverBridgeNetwork() throws IOException {
    AmazonS3 s3 = AmazonS3ClientBuilder
        .standard()
        .withEndpointConfiguration(localstack.getEndpointConfiguration(S3))
        .withCredentials(localstack.getDefaultCredentialsProvider())
        .build();

    final String bucketName = "foo";
    s3.createBucket(bucketName);
    s3.putObject(bucketName, "bar", "baz");

    final List<Bucket> buckets = s3.listBuckets();
    final Optional<Bucket> maybeBucket = buckets.stream().filter(b -> b.getName().equals(bucketName)).findFirst();
    assertTrue("The created bucket is present", maybeBucket.isPresent());
    final Bucket bucket = maybeBucket.get();

    assertEquals("The created bucket has the right name", bucketName, bucket.getName());

    final ObjectListing objectListing = s3.listObjects(bucketName);
    assertEquals("The created bucket has 1 item in it", 1, objectListing.getObjectSummaries().size());

    final S3Object object = s3.getObject(bucketName, "bar");
    final String content = IOUtils.toString(object.getObjectContent(), Charset.forName("UTF-8"));
    assertEquals("The object can be retrieved", "baz", content);
}
 
Example 7
Source File: MCAWS.java    From aws-big-data-blog with Apache License 2.0 5 votes vote down vote up
public static void listBucketItems(String bucketName) {
System.out.println( "Connecting to AWS" );
System.out.println( "Listing files in bucket "+ bucketName );
AmazonS3 s3 = new AmazonS3Client();
Region usWest2 = Region.getRegion(Regions.US_WEST_2);
s3.setRegion(usWest2);
System.out.println("Listing buckets");
ObjectListing objectListing = s3.listObjects(new ListObjectsRequest()
	.withBucketName(bucketName));
for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
	System.out.println(" - " + objectSummary.getKey() + "  " +
	"(size = " + objectSummary.getSize() + ")");
	}
System.out.println();
}
 
Example 8
Source File: TestUtils.java    From circus-train with Apache License 2.0 5 votes vote down vote up
public static List<S3ObjectSummary> listObjects(AmazonS3 client, String bucket) {
  ObjectListing objects = client.listObjects(bucket);
  List<S3ObjectSummary> result = new ArrayList<>(objects.getObjectSummaries().size());
  for (S3ObjectSummary objectSummary : objects.getObjectSummaries()) {
    if (objectSummary.getKey().endsWith("/") || objectSummary.getKey().endsWith("_$folder$")) {
      continue;
    }
    result.add(objectSummary);
  }
  return result;
}
 
Example 9
Source File: S3UploadAllCallable.java    From jobcacher-plugin with MIT License 5 votes vote down vote up
private Map<String,S3ObjectSummary> lookupExistingCacheEntries(AmazonS3 s3) {
    Map<String,S3ObjectSummary> summaries = new HashMap<>();

    ObjectListing listing = s3.listObjects(bucketName, pathPrefix);
    do {
        for (S3ObjectSummary summary : listing.getObjectSummaries()) {
            summaries.put(summary.getKey(), summary);
        }
        listing = listing.isTruncated() ? s3.listNextBatchOfObjects(listing) : null;
    } while (listing != null);

    return summaries;
}
 
Example 10
Source File: AWSSdkClient.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
/***
 * Get list of S3 objects within a S3 bucket qualified by prefix path
 *
 * @param bucketName S3 bucket name
 * @param prefix S3 prefix to object
 * @return List of {@link S3ObjectSummary} objects within the bucket qualified by prefix path
 */
public List<S3ObjectSummary> listS3Bucket(String bucketName,
    String prefix) {

  final AmazonS3 amazonS3 = getS3Client();

  final ListObjectsRequest listObjectsRequest = new ListObjectsRequest()
      .withBucketName(bucketName)
      .withPrefix(prefix);

  final ObjectListing objectListing = amazonS3.listObjects(listObjectsRequest);
  LOGGER.info("S3 bucket listing for bucket: " + bucketName + " with prefix: " + prefix + " is: " + objectListing);

  return objectListing.getObjectSummaries();
}
 
Example 11
Source File: AmazonBucketClientImpl.java    From molgenis with GNU Lesser General Public License v3.0 5 votes vote down vote up
private String getMostRecentMatchingKey(AmazonS3 s3Client, String bucketName, String regex) {
  ObjectListing objectListing = s3Client.listObjects(bucketName);
  TreeMap<Date, String> keys = new TreeMap<>();
  for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
    if (objectSummary.getKey().matches(regex)) {
      keys.put(objectSummary.getLastModified(), objectSummary.getKey());
    }
  }
  if (keys.size() == 0)
    throw new MolgenisDataException("No key matching regular expression: " + regex);
  return keys.lastEntry().getValue();
}
 
Example 12
Source File: S3BackupRestoreTest.java    From cassandra-backup with Apache License 2.0 5 votes vote down vote up
@Test
@Ignore
public void testDownloadOfRemoteManifest() throws Exception {
    S3BucketService s3BucketService = new S3BucketService(getTransferManagerFactory(), getBackupOperationRequest());

    try {
        s3BucketService.create(BUCKET_NAME);

        AmazonS3 amazonS3Client = getTransferManagerFactory().build(getBackupOperationRequest()).getAmazonS3Client();

        amazonS3Client.putObject(BUCKET_NAME, "cluster/dc/node/manifests/snapshot-name-" + BUCKET_NAME, "hello");

        Thread.sleep(5000);

        ObjectListing objectListing = amazonS3Client.listObjects(BUCKET_NAME);

        objectListing.getObjectSummaries().forEach(summary -> System.out.println(summary.getKey()));

        RestoreOperationRequest restoreOperationRequest = new RestoreOperationRequest();
        restoreOperationRequest.storageLocation = new StorageLocation("s3://" + BUCKET_NAME + "/cluster/dc/node");

        S3Restorer s3Restorer = new S3Restorer(getTransferManagerFactory(), new FixedTasksExecutorSupplier(), restoreOperationRequest);

        final Path downloadedFile = s3Restorer.downloadFileToDir(Paths.get("/tmp"), Paths.get("manifests"), new Predicate<String>() {
            @Override
            public boolean test(final String s) {
                return s.contains("manifests/snapshot-name");
            }
        });

        assertTrue(Files.exists(downloadedFile));
    } finally {
        s3BucketService.delete(BUCKET_NAME);
        deleteDirectory(Paths.get(target("commitlog_download_dir")));
    }
}
 
Example 13
Source File: SpringLocalstackDockerRunnerTest.java    From spring-localstack with Apache License 2.0 5 votes vote down vote up
@Test
public void testS3() throws Exception {
    AmazonS3 client = amazonDockerClientsHolder.amazonS3();

    client.createBucket("test-bucket");
    List<Bucket> bucketList = client.listBuckets();

    assertThat(bucketList.size(), is(1));

    File file = File.createTempFile("localstack", "s3");
    file.deleteOnExit();

    try (FileOutputStream stream = new FileOutputStream(file)) {
        String content = "HELLO WORLD!";
        stream.write(content.getBytes());
    }

    PutObjectRequest request = new PutObjectRequest("test-bucket", "testData", file);
    client.putObject(request);

    ObjectListing listing = client.listObjects("test-bucket");
    assertThat(listing.getObjectSummaries().size(), is(1));

    S3Object s3Object = client.getObject("test-bucket", "testData");
    String resultContent = IOUtils.toString(s3Object.getObjectContent());

    assertThat(resultContent, is("HELLO WORLD!"));
}
 
Example 14
Source File: BucketClass.java    From cloudExplorer with GNU General Public License v3.0 4 votes vote down vote up
String getObjectInfo(String key, String access_key,
        String secret_key, String bucket,
        String endpoint, String process
) {
    AWSCredentials credentials = new BasicAWSCredentials(access_key, secret_key);
    AmazonS3 s3Client = new AmazonS3Client(credentials,
            new ClientConfiguration());
    if (endpoint.contains("amazonaws.com")) {
        String aws_endpoint = s3Client.getBucketLocation(new GetBucketLocationRequest(bucket));
        if (aws_endpoint.contains("US")) {
            s3Client.setEndpoint("https://s3.amazonaws.com");
        } else if (aws_endpoint.contains("us-west")) {
            s3Client.setEndpoint("https://s3-" + aws_endpoint + ".amazonaws.com");
        } else if (aws_endpoint.contains("eu-west")) {
            s3Client.setEndpoint("https://s3-" + aws_endpoint + ".amazonaws.com");
        } else if (aws_endpoint.contains("ap-")) {
            s3Client.setEndpoint("https://s3-" + aws_endpoint + ".amazonaws.com");
        } else if (aws_endpoint.contains("sa-east-1")) {
            s3Client.setEndpoint("https://s3-" + aws_endpoint + ".amazonaws.com");
        } else {
            s3Client.setEndpoint("https://s3." + aws_endpoint + ".amazonaws.com");
        }
    } else {
        s3Client.setS3ClientOptions(S3ClientOptions.builder().setPathStyleAccess(true).build());
        s3Client.setEndpoint(endpoint);
    }
    objectlist = null;

    try {
        ObjectListing current = s3Client.listObjects((bucket));

        ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucket);
        ObjectListing objectListing;
        do {
            objectListing = s3Client.listObjects(listObjectsRequest);

            for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
                if (process.contains("checkmd5")) {
                    if (objectSummary.getKey().contains(key)) {
                        objectlist = objectSummary.getETag();
                        break;
                    }
                }
                if (process.contains("objectsize")) {
                    if (objectSummary.getKey().contains(key)) {
                        objectlist = String.valueOf(objectSummary.getSize());
                        break;
                    }
                }

                if (process.contains("objectdate")) {
                    if (objectSummary.getKey().contains(key)) {
                        objectlist = String.valueOf(objectSummary.getLastModified());
                        break;
                    }

                }
            }
            listObjectsRequest.setMarker(objectListing.getNextMarker());
        } while (objectListing.isTruncated());

    } catch (Exception listBucket) {
        //  mainFrame.jTextArea1.append("\n" + listBucket.getMessage());
    }

    return objectlist;
}
 
Example 15
Source File: S3Samples.java    From aws-sdk-java-samples with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws IOException {
    //BEGIN_SAMPLE:AmazonS3.CreateClient
    //TITLE:Create an S3 client
    //DESCRIPTION:Create your credentials file at ~/.aws/credentials (C:\Users\USER_NAME\.aws\credentials for Windows users)
    AmazonS3 s3 = AmazonS3ClientBuilder.standard().build();
    Region usWest2 = com.amazonaws.regions.Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);
    //END_SAMPLE

    String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        System.out.println("Creating bucket " + bucketName + "\n");

        //BEGIN_SAMPLE:AmazonS3.CreateBucket
        //TITLE:Create an S3 bucket
        //DESCRIPTION:Amazon S3 bucket names are globally unique, so once a bucket name has been taken by any user, you can't create another bucket with that same name.
        s3.createBucket(bucketName);
        //END_SAMPLE


        System.out.println("Listing buckets");
        //BEGIN_SAMPLE:AmazonS3.ListBuckets
        //TITLE:List buckets
        //DESCRIPTION:List the buckets in your account
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();
        //END_SAMPLE


        System.out.println("Uploading a new object to S3 from a file\n");
        //BEGIN_SAMPLE:AmazonS3.PutObject
        //TITLE:Upload an object to a bucket
        //DESCRIPTION:You can easily upload a file to S3, or upload directly an InputStream if you know the length of the data in the stream.
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));
        //END_SAMPLE

        System.out.println("Downloading an object");
        //BEGIN_SAMPLE:AmazonS3.GetObject
        //TITLE:Download an S3 object.
        //DESCRIPTION:When you download an object, you get all of the object's metadata and a stream from which to read the contents.
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        //END_SAMPLE
        System.out.println("Content-Type: "  + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());


        System.out.println("Listing objects");

        //BEGIN_SAMPLE:AmazonS3.ListObjects
        //TITLE:List S3 objects in bucket
        //DESCRIPTION:List objects in your bucket by prefix.  Keep in mind that buckets with many objects might truncate their results when listing their objects, so be sure to check if the returned object listing is truncated.
        ObjectListing objectListing = s3.listObjects(new ListObjectsRequest()
                .withBucketName(bucketName)
                .withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(" - " + objectSummary.getKey() + "  " +
                    "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();
        //END_SAMPLE


        System.out.println("Deleting an object\n");

        //BEGIN_SAMPLE:AmazonS3.DeleteObject
        //TITLE:Delete an S3 object
        //DESCRIPTION:Unless versioning has been turned on for your bucket, there is no way to undelete an object, so use caution when deleting objects.
        s3.deleteObject(bucketName, key);
        //END_SAMPLE


        System.out.println("Deleting bucket " + bucketName + "\n");

        //BEGIN_SAMPLE:AmazonS3.DeleteBucket
        //TITLE:Delete an S3 bucket
        //DESCRIPTION:A bucket must be completely empty before it can be deleted, so remember to delete any objects from your buckets before you try to delete them.
        s3.deleteBucket(bucketName);
        //END_SAMPLE
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}
 
Example 16
Source File: AmazonS3Util.java    From datacollector with Apache License 2.0 4 votes vote down vote up
/**
 * Lists objects from AmazonS3 in lexicographical order
 *
 * @param s3Client
 * @param s3ConfigBean
 * @param pathMatcher glob patterns to match file name against
 * @param s3Offset current offset which provides the key name of the previous object
 * @param fetchSize number of objects to fetch in one go
 * @return
 * @throws AmazonClientException
 */
static List<S3ObjectSummary> listObjectsLexicographically(
    AmazonS3 s3Client,
    S3ConfigBean s3ConfigBean,
    AntPathMatcher pathMatcher,
    S3Offset s3Offset,
    int fetchSize
) {
  // Incrementally scan objects after the marker (s3Offset).
  List<S3ObjectSummary> list = new ArrayList<>(fetchSize);

  ListObjectsRequest listObjectsRequest = new ListObjectsRequest();
  listObjectsRequest.setBucketName(s3ConfigBean.s3Config.bucket);
  listObjectsRequest.setPrefix(s3ConfigBean.s3Config.commonPrefix);
  listObjectsRequest.setMaxKeys(BATCH_SIZE);

  if (s3Offset.getKey() != null) {
    if (!s3Offset.getKey().isEmpty() && parseOffset(s3Offset) != -1) {
      S3ObjectSummary currentObjectSummary = getObjectSummary(s3Client, s3ConfigBean.s3Config.bucket, s3Offset.getKey());
      list.add(currentObjectSummary);
    }
    listObjectsRequest.setMarker(s3Offset.getKey());
  }

  ObjectListing objectListing = s3Client.listObjects(listObjectsRequest);

  while (true) {
    for (S3ObjectSummary s : objectListing.getObjectSummaries()) {
      String fullPrefix = s.getKey();
      String remainingPrefix = fullPrefix.substring(s3ConfigBean.s3Config.commonPrefix.length(), fullPrefix.length());
      if (!remainingPrefix.isEmpty()) {
        if (pathMatcher.match(s3ConfigBean.s3FileConfig.prefixPattern, remainingPrefix)) {
          list.add(s);
        }
        // We've got enough objects.
        if (list.size() == fetchSize) {
          return list;
        }
      }
    }
    // Listing is complete. No more objects to be listed.
    if (!objectListing.isTruncated()) {
      break;
    }
    objectListing = s3Client.listNextBatchOfObjects(objectListing);
  }

  return list;
}
 
Example 17
Source File: S3ChangeLogStore.java    From athenz with Apache License 2.0 4 votes vote down vote up
/**
 * list the objects in the zts bucket. If the mod time is specified as 0
 * then we want to list all objects otherwise, we only list objects
 * that are newer than the specified timestamp
 * @param s3 AWS S3 client object
 * @param domains collection to be updated to include domain names
 * @param modTime only include domains newer than this timestamp
 */
void listObjects(AmazonS3 s3, Collection<String> domains, long modTime) {
    
    if (LOGGER.isDebugEnabled()) {
        LOGGER.debug("listObjects: Retrieving domains from {} with mod time > {}",
                s3BucketName, modTime);
    }
    
    ObjectListing objectListing = s3.listObjects(new ListObjectsRequest()
            .withBucketName(s3BucketName));
    
    String objectName;
    while (objectListing != null) {
        
        // process each entry in our result set and add the domain
        // name to our return list

        final List<S3ObjectSummary> objectSummaries = objectListing.getObjectSummaries();
        boolean listTruncated = objectListing.isTruncated();
        
        if (LOGGER.isDebugEnabled()) {
            LOGGER.debug("listObjects: retrieved {} objects, more objects available - {}",
                    objectSummaries.size(), listTruncated);
        }
        
        for (S3ObjectSummary objectSummary : objectSummaries) {
            
            // if mod time is specified then make sure we automatically skip
            // any domains older than the specified value
            
            if (modTime > 0 && objectSummary.getLastModified().getTime() <= modTime) {
                continue;
            }
            
            // for now skip any folders/objects that start with '.'
            
            objectName = objectSummary.getKey();
            if (objectName.charAt(0) == '.') {
                continue;
            }
            domains.add(objectName);
        }
        
        // check if the object listing is truncated or not (break out in this case)
        // technically we can skip this call and just call listNextBatchOfResults
        // since that returns null if the object listing is not truncated but 
        // this direct check here makes the logic easier to follow
        
        if (!listTruncated) {
            break;
        }
        
        objectListing = s3.listNextBatchOfObjects(objectListing);
    }
}
 
Example 18
Source File: S3OperationsImpl.java    From herd with Apache License 2.0 4 votes vote down vote up
@Override
public ObjectListing listObjects(ListObjectsRequest listObjectsRequest, AmazonS3 s3Client)
{
    return s3Client.listObjects(listObjectsRequest);
}
 
Example 19
Source File: CloudFormationClient.java    From herd-mdl with Apache License 2.0 4 votes vote down vote up
/**
 * Delete the stack {@link #stackName}
 */
public void deleteStack() throws Exception {

    CFTStackInfo cftStackInfo = getStackInfo();
    String rootStackId = cftStackInfo.stackId(); // Use the stack id to track the delete operation
    LOGGER.info("rootStackId   =   " + rootStackId);

    // Go through the stack and pick up resources that we want
    // to finalize before deleting the stack.
    List<String> s3BucketIds = new ArrayList<>();

    DescribeStacksResult describeStacksResult = amazonCloudFormation.describeStacks();
    for (Stack currentStack : describeStacksResult.getStacks()) {
        if (rootStackId.equals(currentStack.getRootId()) || rootStackId
                .equals(currentStack.getStackId())) {
            LOGGER.info("stackId   =   " + currentStack.getStackId());
            DescribeStackResourcesRequest describeStackResourcesRequest = new DescribeStackResourcesRequest();
            describeStackResourcesRequest.setStackName(currentStack.getStackName());
            List<StackResource> stackResources = amazonCloudFormation
                    .describeStackResources(describeStackResourcesRequest).getStackResources();
            for (StackResource stackResource : stackResources) {
                if (!stackResource.getResourceStatus()
                        .equals(ResourceStatus.DELETE_COMPLETE.toString())) {
                    if (stackResource.getResourceType().equals("AWS::S3::Bucket")) {
                        s3BucketIds.add(stackResource.getPhysicalResourceId());
                    }
                }
            }
        }
    }

    // Now empty S3 buckets, clean up will be done when the stack is deleted
    AmazonS3 amazonS3 = AmazonS3ClientBuilder.standard().withRegion(Regions.getCurrentRegion().getName())
            .withCredentials(new InstanceProfileCredentialsProvider(true)).build();
    for (String s3BucketPhysicalId : s3BucketIds) {
        String s3BucketName = s3BucketPhysicalId;
        if(!amazonS3.doesBucketExistV2(s3BucketName)){
            break;
        }
        LOGGER.info("Empyting S3 bucket, " + s3BucketName);
        ObjectListing objectListing = amazonS3.listObjects(s3BucketName);
        while (true) {
            for (Iterator<?> iterator = objectListing.getObjectSummaries().iterator(); iterator
                    .hasNext(); ) {
                S3ObjectSummary summary = (S3ObjectSummary) iterator.next();
                amazonS3.deleteObject(s3BucketName, summary.getKey());
            }
            if (objectListing.isTruncated()) {
                objectListing = amazonS3.listNextBatchOfObjects(objectListing);
            }
            else {
                break;
            }
        }
    }

    //Proceed with the regular stack deletion operation
    DeleteStackRequest deleteRequest = new DeleteStackRequest();
    deleteRequest.setStackName(stackName);
    amazonCloudFormation.deleteStack(deleteRequest);
    LOGGER.info("Stack deletion initiated");

    CFTStackStatus cftStackStatus = waitForCompletionAndGetStackStatus(amazonCloudFormation,
            rootStackId);
    LOGGER.info(
            "Stack deletion completed, the stack " + stackName + " completed with " + cftStackStatus);

    // Throw exception if failed
    if (!cftStackStatus.getStackStatus().equals(StackStatus.DELETE_COMPLETE.toString())) {
        throw new Exception(
                "deleteStack operation failed for stack " + stackName + " - " + cftStackStatus);
    }
}
 
Example 20
Source File: ListAws.java    From bidder with Apache License 2.0 3 votes vote down vote up
public static void main(String [] args) throws Exception {

        BasicAWSCredentials creds = new BasicAWSCredentials(accessKey, secretAccessKey);
        AmazonS3 s3 = AmazonS3ClientBuilder.standard().withCredentials(new AWSStaticCredentialsProvider(creds)).build();

        ObjectListing listing = s3.listObjects(new ListObjectsRequest().withBucketName(s3_bucket));

        processDirectory(s3, listing, s3_bucket);
    }