Java Code Examples for com.amazonaws.services.s3.model.AmazonS3Exception#getStatusCode()

The following examples show how to use com.amazonaws.services.s3.model.AmazonS3Exception#getStatusCode() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AwsObjectStorageConnector.java    From cloudbreak with Apache License 2.0 6 votes vote down vote up
@Override
public ObjectStorageMetadataResponse getObjectStorageMetadata(ObjectStorageMetadataRequest request) {
    AwsCredentialView awsCredentialView = new AwsCredentialView(request.getCredential());
    try {
        AmazonS3 s3Client = awsClient.createS3Client(awsCredentialView);
        String bucketLocation = fixBucketLocation(s3Client.getBucketLocation(request.getObjectStoragePath()));
        return ObjectStorageMetadataResponse.builder()
                .withRegion(bucketLocation)
                .withStatus(ResponseStatus.OK)
                .build();
    } catch (AmazonS3Exception e) {
        // HACK let's assume that if the user gets back 403 Access Denied it is because s/he does not have the s3:GetBucketLocation permission.
        // It is also true though that if the bucket indeed exists but it is in another account or otherwise denied from the requesting user,
        // the same error code will be returned. However, this hack is mainly for QAAS.
        if (e.getStatusCode() != ACCESS_DENIED_ERROR_CODE) {
            throw new CloudConnectorException(String.format("Cannot get object storage location for %s. "
                    + "Provider error message: %s", request.getObjectStoragePath(), e.getErrorMessage()), e);
        }
        return ObjectStorageMetadataResponse.builder()
                .withStatus(ResponseStatus.ACCESS_DENIED)
                .build();
    }
}
 
Example 2
Source File: AmazonS3Provider.java    From emodb with Apache License 2.0 6 votes vote down vote up
public String getRegionForBucket(String bucket) {
    // Just querying for the location for a bucket can be done with the local client
    AmazonS3 client = getLocalS3Client();
    try {
        String region = client.getBucketLocation(bucket);
        if ("US".equals(region)) {
            // GetBucketLocation requests return null for us-east-1 which the SDK then replaces with "US".
            // So change it to the actual region.
            region = "us-east-1";
        }
        return region;
    } catch (AmazonS3Exception e) {
        if (e.getStatusCode() == Response.Status.NOT_FOUND.getStatusCode()) {
            // If the bucket doesn't exist then return null
            return null;
        }
        throw e;
    }
}
 
Example 3
Source File: S3ScanWriter.java    From emodb with Apache License 2.0 6 votes vote down vote up
@Override
protected boolean writeScanCompleteFile(URI fileUri, byte[] contents)
        throws IOException {
    String bucket = fileUri.getHost();
    String key = getKeyFromPath(fileUri);

    try {
        // The following will throw an exception unless the file already exists
        _amazonS3.getObjectMetadata(bucket, key);
        return false;
    } catch (AmazonS3Exception e) {
        if (e.getStatusCode() != Response.Status.NOT_FOUND.getStatusCode()) {
            // Expected case is not found, meaning the file does not exist
            // All other cases are some unexpected error
            throw new IOException(e);
        }
    }

    uploadContents(bucket, key, contents);
    return true;
}
 
Example 4
Source File: S3Service.java    From modeldb with Apache License 2.0 6 votes vote down vote up
@Override
public void commitMultipart(String s3Key, String uploadId, List<PartETag> partETags)
    throws ModelDBException {
  // Validate bucket
  Boolean exist = doesBucketExist(bucketName);
  if (!exist) {
    throw new ModelDBException("Bucket does not exists", io.grpc.Status.Code.INTERNAL);
  }
  CompleteMultipartUploadRequest completeMultipartUploadRequest =
      new CompleteMultipartUploadRequest(bucketName, s3Key, uploadId, partETags);
  try {
    CompleteMultipartUploadResult result =
        s3Client.completeMultipartUpload(completeMultipartUploadRequest);
    LOGGER.info("upload result: {}", result);
  } catch (AmazonS3Exception e) {
    if (e.getStatusCode() == HttpStatusCodes.STATUS_CODE_BAD_REQUEST) {
      LOGGER.info("message: {} additional details: {}", e.getMessage(), e.getAdditionalDetails());
      throw new ModelDBException(e.getErrorMessage(), io.grpc.Status.Code.FAILED_PRECONDITION);
    }
    throw e;
  }
}
 
Example 5
Source File: SimpleStorageResource.java    From spring-cloud-aws with Apache License 2.0 6 votes vote down vote up
private ObjectMetadata getObjectMetadata() {
	if (this.objectMetadata == null) {
		try {
			GetObjectMetadataRequest metadataRequest = new GetObjectMetadataRequest(
					this.bucketName, this.objectName);
			if (this.versionId != null) {
				metadataRequest.setVersionId(this.versionId);
			}
			this.objectMetadata = this.amazonS3.getObjectMetadata(metadataRequest);
		}
		catch (AmazonS3Exception e) {
			// Catch 404 (object not found) and 301 (bucket not found, moved
			// permanently)
			if (e.getStatusCode() == 404 || e.getStatusCode() == 301) {
				this.objectMetadata = null;
			}
			else {
				throw e;
			}
		}
	}
	return this.objectMetadata;
}
 
Example 6
Source File: WallRideResourceTemplateResource.java    From wallride with Apache License 2.0 6 votes vote down vote up
public Reader reader() throws IOException {
	// Will never return null, but an IOException if not found
	try {
		final InputStream inputStream = this.resource.getInputStream();
		if (!StringUtils.isEmptyOrWhitespace(this.characterEncoding)) {
			return new BufferedReader(new InputStreamReader(new BufferedInputStream(inputStream), this.characterEncoding));
		}

		return new BufferedReader(new InputStreamReader(new BufferedInputStream(inputStream)));
	} catch (AmazonS3Exception e) {
		if (e.getStatusCode() == 404) {
			throw new IOException(e);
		}
		throw e;
	}
}
 
Example 7
Source File: AmazonS3ProxyFactory.java    From spring-cloud-aws with Apache License 2.0 6 votes vote down vote up
@Override
public Object invoke(MethodInvocation invocation) throws Throwable {
	try {
		return invocation.proceed();
	}
	catch (AmazonS3Exception e) {
		if (301 == e.getStatusCode()) {
			AmazonS3 redirectClient = buildAmazonS3ForRedirectLocation(
					this.amazonS3, e);
			return ReflectionUtils.invokeMethod(invocation.getMethod(),
					redirectClient, invocation.getArguments());
		}
		else {
			throw e;
		}
	}
}
 
Example 8
Source File: S3RateLimiter.java    From emodb with Apache License 2.0 5 votes vote down vote up
private boolean isRequestRateExceededException(Throwable t) {
    if (t instanceof AmazonS3Exception) {
        AmazonS3Exception e = (AmazonS3Exception) t;
        // Several ways AWS communicates rate limit exceeded: 503 status codes and "SlowDown" error codes.
        // Check for either.
        return e.getStatusCode() == HttpStatus.SC_SERVICE_UNAVAILABLE ||
                (e.getErrorCode() != null && e.getErrorCode().toLowerCase().contains("slowdown"));

    }
    return false;
}
 
Example 9
Source File: StashReader.java    From emodb with Apache License 2.0 5 votes vote down vote up
private static String determineEndpointForBucket(String bucket, AWSCredentialsProvider credentialsProvider,
                                                 @Nullable ClientConfiguration s3Config, String rootPath) {

    // Guess us-east-1.  If wrong AWS will return a redirect with the correct endpoint
    AmazonS3 s3 = createS3ClientForRegion(Regions.US_EAST_1.getName(), credentialsProvider, s3Config);
    if (rootPath.startsWith("/")) {
        rootPath = rootPath.substring(1);
    }
    if (!rootPath.endsWith("/")) {
        rootPath = rootPath + "/";
    }

    try {
        // Any request will work but presumably the client has list access for stash so perform a list.
        s3.listObjects(new ListObjectsRequest()
                .withBucketName(bucket)
                .withPrefix(rootPath)
                .withDelimiter("/")
                .withMaxKeys(1));

        // If this didn't error out then the presumed us-east-1 region was correct
        return  "s3.us-east-1.amazonaws.com";
    } catch (AmazonS3Exception e) {
        if (e.getStatusCode() == 301 /* MOVED_PERMANENTLY */) {
            String endPoint = e.getAdditionalDetails().get("Endpoint");
            // The end point is prefixed with the bucket name, so strip it
            return endPoint.substring(bucket.length() + 1);
        }

        throw e;
    }
}
 
Example 10
Source File: S3BucketService.java    From cassandra-backup with Apache License 2.0 5 votes vote down vote up
@Override
public void create(final String bucketName) {
    if (!doesExist(bucketName)) {
        try {
            transferManager.getAmazonS3Client().createBucket(bucketName);
        } catch (final AmazonS3Exception ex) {
            if (ex.getStatusCode() == 409 && "BucketAlreadyOwnedByYou".equals(ex.getErrorCode())) {
                logger.warn(ex.getErrorMessage());
            } else {
                throw new S3ModuleException(format("Unable to create bucket %s", bucketName), ex);
            }
        }
    }
}
 
Example 11
Source File: ScholarBucketPaperSource.java    From science-parse with Apache License 2.0 5 votes vote down vote up
private S3Object getS3Object(final String paperId) {
    final String key = paperId.substring(0, 4) + "/" + paperId.substring(4) + ".pdf";

    for(int bucketIndex = 0; bucketIndex < buckets.length; ++bucketIndex) {
        try {
            return s3.getObject(buckets[bucketIndex], key);
        } catch (final AmazonS3Exception e) {
            if(bucketIndex < buckets.length - 1 && e.getStatusCode() == 404)
                continue;   // Try again with the next bucket.

            final AmazonS3Exception rethrown =
                new AmazonS3Exception(
                    String.format(
                        "Error for key s3://%s/%s",
                        bucket,
                        key),
                    e);
            rethrown.setExtendedRequestId(e.getExtendedRequestId());
            rethrown.setErrorCode(e.getErrorCode());
            rethrown.setErrorType(e.getErrorType());
            rethrown.setRequestId(e.getRequestId());
            rethrown.setServiceName(e.getServiceName());
            rethrown.setStatusCode(e.getStatusCode());
            throw rethrown;
        }
    }

    throw new IllegalStateException("We should never get here.");
}
 
Example 12
Source File: AwsS3ObjectStorage.java    From james-project with Apache License 2.0 5 votes vote down vote up
private boolean needToCreateBucket(Throwable th) {
    if (th instanceof AmazonS3Exception) {
        AmazonS3Exception s3Exception = (AmazonS3Exception) th;
        return NOT_FOUND_STATUS_CODE == s3Exception.getStatusCode()
            && BUCKET_NOT_FOUND_ERROR_CODE.equals(s3Exception.getErrorCode());
    }

    return false;
}
 
Example 13
Source File: PathMatchingSimpleStorageResourcePatternResolver.java    From spring-cloud-aws with Apache License 2.0 5 votes vote down vote up
private void findAllResourcesThatMatches(String bucketName, Set<Resource> resources,
		String prefix, String keyPattern) {
	ListObjectsRequest listObjectsRequest = new ListObjectsRequest()
			.withBucketName(bucketName).withPrefix(prefix);
	ObjectListing objectListing = null;

	do {
		try {
			if (objectListing == null) {
				objectListing = this.amazonS3.listObjects(listObjectsRequest);
			}
			else {
				objectListing = this.amazonS3.listNextBatchOfObjects(objectListing);
			}
			Set<Resource> newResources = getResourcesFromObjectSummaries(bucketName,
					keyPattern, objectListing.getObjectSummaries());
			if (!newResources.isEmpty()) {
				resources.addAll(newResources);
			}
		}
		catch (AmazonS3Exception e) {
			if (301 != e.getStatusCode()) {
				throw e;
			}
		}
	}
	while (objectListing != null && objectListing.isTruncated());
}
 
Example 14
Source File: S3DaoImpl.java    From herd with Apache License 2.0 4 votes vote down vote up
@Override
public void restoreObjects(final S3FileTransferRequestParamsDto params, int expirationInDays, String archiveRetrievalOption)
{
    LOGGER.info("Restoring a list of objects in S3... s3KeyPrefix=\"{}\" s3BucketName=\"{}\" s3KeyCount={}", params.getS3KeyPrefix(),
        params.getS3BucketName(), params.getFiles().size());

    if (!CollectionUtils.isEmpty(params.getFiles()))
    {
        // Initialize a key value pair for the error message in the catch block.
        String key = params.getFiles().get(0).getPath().replaceAll("\\\\", "/");

        try
        {
            // Create an S3 client.
            AmazonS3Client s3Client = getAmazonS3(params);

            // Create a restore object request.
            RestoreObjectRequest requestRestore = new RestoreObjectRequest(params.getS3BucketName(), null, expirationInDays);
            // Make Bulk the default archive retrieval option if the option is not provided
            requestRestore.setGlacierJobParameters(new GlacierJobParameters().withTier(
                StringUtils.isNotEmpty(archiveRetrievalOption) ? archiveRetrievalOption : Tier.Bulk.toString()));

            try
            {
                for (File file : params.getFiles())
                {
                    key = file.getPath().replaceAll("\\\\", "/");
                    ObjectMetadata objectMetadata = s3Operations.getObjectMetadata(params.getS3BucketName(), key, s3Client);

                    // Request a restore for objects that are not already being restored.
                    if (BooleanUtils.isNotTrue(objectMetadata.getOngoingRestore()))
                    {
                        requestRestore.setKey(key);

                        try
                        {
                            // Try the S3 restore operation on this file.
                            s3Operations.restoreObject(requestRestore, s3Client);
                        }
                        catch (AmazonS3Exception amazonS3Exception)
                        {
                            // If this exception has a status code of 409, log the information and continue to the next file.
                            if (amazonS3Exception.getStatusCode() == HttpStatus.SC_CONFLICT)
                            {
                                LOGGER.info("Restore already in progress for file with s3Key=\"{}\".", key);
                            }
                            // Else, we need to propagate the exception to the next level of try/catch block.
                            else
                            {
                                throw new Exception(amazonS3Exception);
                            }
                        }
                    }
                }
            }
            finally
            {
                s3Client.shutdown();
            }
        }
        catch (Exception e)
        {
            if (StringUtils.contains(e.getMessage(), "Retrieval option is not supported by this storage class"))
            {
                throw new IllegalArgumentException(String
                    .format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. Reason: %s", key, params.getS3BucketName(),
                        e.getMessage()), e);
            }
            else
            {
                throw new IllegalStateException(String
                    .format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. Reason: %s", key, params.getS3BucketName(),
                        e.getMessage()), e);
            }
        }
    }
}
 
Example 15
Source File: S3ConfigProvider.java    From exhibitor with Apache License 2.0 4 votes vote down vote up
private boolean isNotFoundError(AmazonS3Exception e)
{
    return (e.getStatusCode() == 404) || (e.getStatusCode() == 403);
}
 
Example 16
Source File: S3FileSystem.java    From dremio-oss with Apache License 2.0 4 votes vote down vote up
@Override
protected ContainerHolder toContainerHolder() throws IOException {

  return new ContainerHolder(bucketName, new FileSystemSupplier() {
    @Override
    public FileSystem create() throws IOException {
      final String targetEndpoint;
      Optional<String> endpoint = getEndpoint(getConf());

      if (isCompatMode() && endpoint.isPresent()) {
        // if this is compatibility mode and we have an endpoint, just use that.
        targetEndpoint = endpoint.get();
      } else {
        try {
          final String bucketRegion = s3.getBucketLocation(bucketName);
          final String fallbackEndpoint = endpoint.orElseGet(() -> String.format("%ss3.%s.amazonaws.com", getHttpScheme(getConf()), bucketRegion));

          String regionEndpoint = fallbackEndpoint;
          try {
            Region region = Region.fromValue(bucketRegion);
            com.amazonaws.regions.Region awsRegion = region.toAWSRegion();
            if (awsRegion != null) {
              regionEndpoint = awsRegion.getServiceEndpoint("s3");
            }
          } catch (IllegalArgumentException iae) {
            // try heuristic mapping if not found
            regionEndpoint = fallbackEndpoint;
            logger.warn("Unknown or unmapped region {} for bucket {}. Will use following endpoint: {}",
              bucketRegion, bucketName, regionEndpoint);
          }
          // it could be null because no mapping from Region to aws region or there is no such region is the map of endpoints
          // not sure if latter is possible
          if (regionEndpoint == null) {
            logger.error("Could not get AWSRegion for bucket {}. Will use following fs.s3a.endpoint: {} ",
              bucketName, fallbackEndpoint);
          }
          targetEndpoint = (regionEndpoint != null) ? regionEndpoint : fallbackEndpoint;

        } catch (AmazonS3Exception aex) {
          if (aex.getStatusCode() == 403) {
            throw UserException.permissionError(aex)
              .message(S3_PERMISSION_ERROR_MSG)
              .build(logger);
          }
          throw aex;
        }
      }

      String location = S3_URI_SCHEMA + bucketName + "/";
      final Configuration bucketConf = new Configuration(parentConf);
      bucketConf.set(ENDPOINT, targetEndpoint);
      return fsCache.get(new Path(location).toUri(), bucketConf, S3ClientKey.UNIQUE_PROPS);
    }
  });
}
 
Example 17
Source File: TOCPayloadValidator.java    From s3-bucket-loader with Apache License 2.0 2 votes vote down vote up
public TocPathOpResult validateOnS3(TOCPayload payload) {
	
	if (s3Client == null || s3BucketName == null) {
		throw new RuntimeException("Cannot validateOnS3(), TOCPayloadValidator is not configured w/ s3Client or bucket name");
	}

	try {
		String keyToCheck = toc2Key(payload.tocInfo.getPath(),payload.tocInfo.isDirectory);
		logger.debug("validateOnS3() " + keyToCheck);
		
		ObjectMetadata md = s3Client.getObjectMetadata(getS3BucketName(), keyToCheck);
		
		// size not match!
		if (payload.tocInfo.size != md.getContentLength()) {
			
			logger.error("validateOnS3() S3 object length does not match! " +
					"" + keyToCheck + " expected:" + payload.tocInfo.size + " actual:" + md.getContentLength());;
					
			return new TocPathOpResult(payload.mode, false, payload.tocInfo.getPath(),
							"s3.check.content.length", "expected:"+ payload.tocInfo.size + " actual:"+md.getContentLength());

		} 
		
		// SUCCESS (no 404 so size matches and it exists)
		return new TocPathOpResult(payload.mode, true, payload.tocInfo.getPath(), "s3.check", "ok");
		
	} catch(AmazonS3Exception e) {
		
		// 404
		if (e.getStatusCode() == 404) {
			
			logger.error("validateOnS3() " + payload.tocInfo.getPath() + " s3check returned 404");
			
			return new TocPathOpResult(payload.mode, false, payload.tocInfo.getPath(),
					"s3.check.404", "key not found 404 at " + this.getS3BucketName());
			
		// other error
		} else {
			
			logger.error("validateOnS3() " + payload.tocInfo.getPath() + " unexpected error: " + e.getMessage(),e);
			
			return new TocPathOpResult(payload.mode, false, payload.tocInfo.getPath(),
					"s3.check.error", "error getting object metadata: " + e.getMessage());
		}
	}
	
}