com.amazonaws.services.s3.AmazonS3 Java Examples
The following examples show how to use
com.amazonaws.services.s3.AmazonS3.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AwsS3BucketService.java From pacbot with Apache License 2.0 | 8 votes |
public boolean uploadFile(final AmazonS3 amazonS3, MultipartFile fileToUpload, String s3BucketName, String key) { try { File file = AdminUtils.convert(fileToUpload); long size = fileToUpload.getSize(); String contentType = fileToUpload.getContentType(); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentType(contentType); metadata.setContentLength(size); PutObjectRequest putObjectRequest = new PutObjectRequest(s3BucketName, key, file).withCannedAcl(CannedAccessControlList.PublicRead); amazonS3.putObject(putObjectRequest); return Boolean.TRUE; } catch (IOException exception) { log.error(UNEXPECTED_ERROR_OCCURRED, exception); } return Boolean.FALSE; }
Example #2
Source File: S3Source.java From sequenceiq-samples with Apache License 2.0 | 7 votes |
@Override protected void doStart() { AWSCredentials myCredentials = new BasicAWSCredentials(accessKey, secretKey); AmazonS3 s3Client = new AmazonS3Client(myCredentials); ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucket); ObjectListing objectListing = s3Client.listObjects(listObjectsRequest); ChannelProcessor channelProcessor = getChannelProcessor(); for (S3ObjectSummary s3ObjectSummary : objectListing.getObjectSummaries()) { String file = s3ObjectSummary.getKey(); LOGGER.info("Read the content of {}", file); GetObjectRequest objectRequest = new GetObjectRequest(bucket, file); S3Object objectPortion = s3Client.getObject(objectRequest); try { long startTime = System.currentTimeMillis(); processLines(channelProcessor, objectPortion.getObjectContent()); LOGGER.info("Processing of {} took {} ms", file, System.currentTimeMillis() - startTime); } catch (IOException e) { LOGGER.warn("Cannot process the {}, skipping", file, e); } } }
Example #3
Source File: AmazonS3OAuthStateService.java From java-slack-sdk with MIT License | 6 votes |
@Override public boolean isAvailableInDatabase(String state) { AmazonS3 s3 = this.createS3Client(); S3Object s3Object = getObject(s3, getKey(state)); if (s3Object == null) { return false; } String millisToExpire = null; try { millisToExpire = IOUtils.toString(s3Object.getObjectContent()); return Long.valueOf(millisToExpire) > System.currentTimeMillis(); } catch (IOException e) { log.error("Failed to load a state data for state: {}", state, e); return false; } catch (NumberFormatException ne) { log.error("Invalid state value detected - state: {}, millisToExpire: {}", state, millisToExpire); return false; } }
Example #4
Source File: S3Util.java From teamcity-s3-artifact-storage-plugin with Apache License 2.0 | 6 votes |
public static <T> T withClientCorrectingRegion(@NotNull final AmazonS3 s3Client, @NotNull final Map<String, String> settings, @NotNull final WithS3<T, AmazonS3Exception> withCorrectedClient) { try { return withCorrectedClient.run(s3Client); } catch (AmazonS3Exception awsException) { final String correctRegion = extractCorrectedRegion(awsException); if (correctRegion != null) { LOGGER.debug("Running operation with corrected S3 region [" + correctRegion + "]", awsException); final HashMap<String, String> correctedSettings = new HashMap<>(settings); correctedSettings.put(REGION_NAME_PARAM, correctRegion); return withS3Client(correctedSettings, withCorrectedClient); } else { throw awsException; } } }
Example #5
Source File: S3.java From rdf-delta with Apache License 2.0 | 6 votes |
public static AmazonS3 buildS3(LocalServerConfig configuration) { String region = configuration.getProperty(pRegion); String endpoint = configuration.getProperty(pEndpoint); String credentialsFile = configuration.getProperty(pCredentialFile); String credentialsProfile = configuration.getProperty(pCredentialProfile); AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard(); if ( endpoint == null ) builder.withRegion(region); else { // Needed for S3mock builder.withPathStyleAccessEnabled(true); builder.withEndpointConfiguration(new EndpointConfiguration(endpoint, region)); builder.withCredentials(new AWSStaticCredentialsProvider(new AnonymousAWSCredentials())); } if ( credentialsFile != null ) builder.withCredentials(new ProfileCredentialsProvider(credentialsFile, credentialsProfile)); return builder.build(); }
Example #6
Source File: S3Configuration.java From data-highway with Apache License 2.0 | 6 votes |
@Bean AbortableOutputStreamFactory s3OutputStreamFactory( AmazonS3 s3, @Value("${s3.bucket}") String bucket, @Value("${s3.partSize:5242880}") int partSize, // default 5 mebibytes @Value("${s3.enableServerSideEncryption:false}") boolean enableServerSideEncryption, @Value("${s3.retry.maxAttempts:3}") int maxAttempts, @Value("${s3.retry.sleepSeconds:1}") int sleepSeconds, @Value("${s3.async.poolSize:3}") int poolSize, @Value("${s3.async.queueSize:3}") int queueSize) { return key -> S3MultipartOutputStream .builder() .s3(s3, bucket, key) .partSize(partSize) .enableServerSideEncryption(enableServerSideEncryption) .retries(maxAttempts, sleepSeconds) .async(poolSize, queueSize) .build(); }
Example #7
Source File: PrimitiveS3OperationHandler.java From CloverETL-Engine with GNU Lesser General Public License v2.1 | 6 votes |
/** * Deletes a regular file. */ @Override public boolean deleteFile(URI target) throws IOException { target = target.normalize(); PooledS3Connection connection = null; try { connection = connect(target); AmazonS3 service = connection.getService(); String[] path = getPath(target); try { service.deleteObject(path[0], path[1]); return true; } catch (AmazonClientException e) { throw new IOException(e); } } finally { disconnect(connection); } }
Example #8
Source File: S3ChangeLogStore.java From athenz with Apache License 2.0 | 6 votes |
SignedDomain getSignedDomain(AmazonS3 s3, String domainName) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("getSignedDomain with S3: {}", domainName); } SignedDomain signedDomain = null; try { S3Object object = s3.getObject(s3BucketName, domainName); try (S3ObjectInputStream s3is = object.getObjectContent()) { signedDomain = jsonMapper.readValue(s3is, SignedDomain.class); } } catch (Exception ex) { LOGGER.error("AWSS3ChangeLog: getSignedDomain - unable to get domain {} error: {}", domainName, ex.getMessage()); } return signedDomain; }
Example #9
Source File: S3Validator.java From halyard with Apache License 2.0 | 6 votes |
@Override public void validate(ConfigProblemSetBuilder ps, S3PersistentStore n) { if (!StringUtils.isEmpty(n.getEndpoint())) { return; } try { AWSCredentialsProvider credentialsProvider = AwsAccountValidator.getAwsCredentialsProvider( n.getAccessKeyId(), secretSessionManager.decrypt(n.getSecretAccessKey())); S3Config s3Config = new S3Config(); S3MetadataStorageProperties s3Properties = new S3MetadataStorageProperties(); s3Properties.setBucket(n.getBucket()); s3Properties.setRootFolder(n.getRootFolder()); s3Properties.setRegion(n.getRegion()); AmazonS3 s3Client = s3Config.awsS3MetadataClient(credentialsProvider, s3Properties); new S3Config().s3StorageService(s3Client, s3Properties); } catch (Exception e) { ps.addProblem( Problem.Severity.ERROR, "Failed to ensure the required bucket \"" + n.getBucket() + "\" exists: " + e.getMessage()); } }
Example #10
Source File: StashSplitIterator.java From emodb with Apache License 2.0 | 6 votes |
StashSplitIterator(AmazonS3 s3, String bucket, String key) { InputStream rawIn = new RestartingS3InputStream(s3, bucket, key); try { // File is gzipped // Note: // Because the content may be concatenated gzip files we cannot use the default GZIPInputStream. // GzipCompressorInputStream supports concatenated gzip files. GzipCompressorInputStream gzipIn = new GzipCompressorInputStream(rawIn, true); _in = new BufferedReader(new InputStreamReader(gzipIn, Charsets.UTF_8)); // Create a line reader _reader = new LineReader(_in); } catch (Exception e) { try { Closeables.close(rawIn, true); } catch (IOException ignore) { // Won't happen, already caught and logged } throw Throwables.propagate(e); } }
Example #11
Source File: S3Encrypt.java From aws-doc-sdk-examples with Apache License 2.0 | 6 votes |
/** * Uses AES/GCM with AESWrap key wrapping to encrypt the key. Uses v2 metadata schema. The only difference between this and * {@link #authenticatedEncryption_CustomerManagedKey()} is that attempting to retrieve an object non * encrypted with AES/GCM will thrown an exception instead of falling back to encryption only or plaintext GET. */ // snippet-start:[s3.java1.s3_encrypt.strict_authenticated_encryption] public void strictAuthenticatedEncryption_CustomerManagedKey() throws NoSuchAlgorithmException { // snippet-start:[s3.java1.s3_encrypt.strict_authenticated_encryption_build] SecretKey secretKey = KeyGenerator.getInstance("AES").generateKey(); AmazonS3Encryption s3Encryption = AmazonS3EncryptionClientBuilder .standard() .withRegion(Regions.US_WEST_2) .withCryptoConfiguration(new CryptoConfiguration(CryptoMode.StrictAuthenticatedEncryption)) .withEncryptionMaterials(new StaticEncryptionMaterialsProvider(new EncryptionMaterials(secretKey))) .build(); AmazonS3 s3NonEncrypt = AmazonS3ClientBuilder.standard().withRegion(Regions.DEFAULT_REGION).build(); // snippet-end:[s3.java1.s3_encrypt.strict_authenticated_encryption_build] s3Encryption.putObject(BUCKET_NAME, ENCRYPTED_KEY, "some contents"); s3NonEncrypt.putObject(BUCKET_NAME, NON_ENCRYPTED_KEY, "some other contents"); System.out.println(s3Encryption.getObjectAsString(BUCKET_NAME, ENCRYPTED_KEY)); try { s3Encryption.getObjectAsString(BUCKET_NAME, NON_ENCRYPTED_KEY); } catch (SecurityException e) { // Strict authenticated encryption will throw an exception if an object is not encrypted with AES/GCM System.err.println(NON_ENCRYPTED_KEY + " was not encrypted with AES/GCM"); } }
Example #12
Source File: AmazonS3SourceConfiguration.java From spring-cloud-stream-app-starters with Apache License 2.0 | 6 votes |
@Bean public S3InboundFileSynchronizer s3InboundFileSynchronizer(AmazonS3 amazonS3, ResourceIdResolver resourceIdResolver) { S3SessionFactory s3SessionFactory = new S3SessionFactory(amazonS3, resourceIdResolver); S3InboundFileSynchronizer synchronizer = new S3InboundFileSynchronizer(s3SessionFactory); synchronizer.setDeleteRemoteFiles(this.s3SourceProperties.isDeleteRemoteFiles()); synchronizer.setPreserveTimestamp(this.s3SourceProperties.isPreserveTimestamp()); String remoteDir = this.s3SourceProperties.getRemoteDir(); synchronizer.setRemoteDirectory(remoteDir); synchronizer.setRemoteFileSeparator(this.s3SourceProperties.getRemoteFileSeparator()); synchronizer.setTemporaryFileSuffix(this.s3SourceProperties.getTmpFileSuffix()); if (StringUtils.hasText(this.s3SourceProperties.getFilenamePattern())) { synchronizer.setFilter(new S3SimplePatternFileListFilter(this.s3SourceProperties.getFilenamePattern())); } else if (this.s3SourceProperties.getFilenameRegex() != null) { synchronizer.setFilter(new S3RegexPatternFileListFilter(this.s3SourceProperties.getFilenameRegex())); } return synchronizer; }
Example #13
Source File: AmazonS3InstallationServiceTest.java From java-slack-sdk with MIT License | 6 votes |
@Test public void initializer() { AWSCredentials credentials = mock(AWSCredentials.class); when(credentials.getAWSAccessKeyId()).thenReturn("valid key"); AmazonS3 s3 = mock(AmazonS3.class); when(s3.doesBucketExistV2(anyString())).thenReturn(true); AmazonS3InstallationService service = new AmazonS3InstallationService("test-bucket") { @Override protected AWSCredentials getCredentials() { return credentials; } @Override protected AmazonS3 createS3Client() { return s3; } }; service.initializer().accept(null); }
Example #14
Source File: S3Connection.java From components with Apache License 2.0 | 6 votes |
private static String getEndpoint(AmazonS3 s3client, String bucket) { String bucketLocation = null; try { bucketLocation = s3client.getBucketLocation(bucket); } catch(IllegalArgumentException e) { //java.lang.IllegalArgumentException: Cannot create enum from eu-west-2 value! String info = e.getMessage(); if(info == null || info.isEmpty()) { throw e; } Pattern regex = Pattern.compile("[a-zA-Z]+-[a-zA-Z]+-[1-9]"); Matcher matcher = regex.matcher(info); if(matcher.find()) { bucketLocation = matcher.group(0); } else { throw e; } } String region = S3RegionUtil.getBucketRegionFromLocation(bucketLocation); return S3RegionUtil.regionToEndpoint(region); }
Example #15
Source File: S3BlockSpiller.java From aws-athena-query-federation with Apache License 2.0 | 6 votes |
/** * Constructs a new S3BlockSpiller. * * @param amazonS3 AmazonS3 client to use for writing to S3. * @param spillConfig The spill config for this instance. Includes things like encryption key, s3 path, etc... * @param allocator The BlockAllocator to use when creating blocks. * @param schema The schema for blocks that should be written. * @param constraintEvaluator The ConstraintEvaluator that should be used to constrain writes. * @param maxRowsPerCall The max number of rows to allow callers to write in one call. */ public S3BlockSpiller(AmazonS3 amazonS3, SpillConfig spillConfig, BlockAllocator allocator, Schema schema, ConstraintEvaluator constraintEvaluator, int maxRowsPerCall) { this.amazonS3 = requireNonNull(amazonS3, "amazonS3 was null"); this.spillConfig = requireNonNull(spillConfig, "spillConfig was null"); this.allocator = requireNonNull(allocator, "allocator was null"); this.schema = requireNonNull(schema, "schema was null"); this.blockCrypto = (spillConfig.getEncryptionKey() != null) ? new AesGcmBlockCrypto(allocator) : new NoOpBlockCrypto(allocator); asyncSpillPool = (spillConfig.getNumSpillThreads() <= 0) ? null : Executors.newFixedThreadPool(spillConfig.getNumSpillThreads()); this.maxRowsPerCall = maxRowsPerCall; this.constraintEvaluator = constraintEvaluator; }
Example #16
Source File: S3ReadableSeekableByteChannel.java From beam with Apache License 2.0 | 6 votes |
S3ReadableSeekableByteChannel(AmazonS3 amazonS3, S3ResourceId path, S3Options options) throws IOException { this.amazonS3 = checkNotNull(amazonS3, "amazonS3"); checkNotNull(path, "path"); this.options = checkNotNull(options, "options"); if (path.getSize().isPresent()) { contentLength = path.getSize().get(); this.path = path; } else { try { contentLength = amazonS3.getObjectMetadata(path.getBucket(), path.getKey()).getContentLength(); } catch (AmazonClientException e) { throw new IOException(e); } this.path = path.withSize(contentLength); } }
Example #17
Source File: StashReaderTest.java From emodb with Apache License 2.0 | 5 votes |
@Test public void testGetTableMetadata() throws Exception { AmazonS3 s3 = mock(AmazonS3.class); when(s3.getObject(argThat(getsObject("stash-bucket", "stash/test/_LATEST")))).thenAnswer(new Answer<S3Object>() { @Override public S3Object answer(InvocationOnMock invocation) throws Throwable { S3Object s3Object = new S3Object(); s3Object.setObjectContent(new ByteArrayInputStream("2015-01-01-00-00-00".getBytes(Charsets.UTF_8))); return s3Object; } }); when(s3.listObjects(argThat(listObjectRequest("stash-bucket", "stash/test/2015-01-01-00-00-00/test~table/", null)))) .thenAnswer(objectListingAnswer(null, "test~table-split0.gz", "test~table-split1.gz", "test~table-split2.gz")); StandardStashReader reader = new StandardStashReader(URI.create("s3://stash-bucket/stash/test"), s3, 0); StashTableMetadata tableMetadata = reader.getTableMetadata("test:table"); assertEquals(tableMetadata.getBucket(), "stash-bucket"); assertEquals(tableMetadata.getPrefix(), "stash/test/2015-01-01-00-00-00/test~table/"); assertEquals(tableMetadata.getTableName(), "test:table"); assertEquals(tableMetadata.getSize(), 300); List<StashFileMetadata> files = tableMetadata.getFiles(); assertEquals(3, files.size()); for (int i =0; i < 3; i++) { StashFileMetadata fileMetadata = files.get(i); assertEquals(fileMetadata.getBucket(), "stash-bucket"); assertEquals(fileMetadata.getKey(), String.format("stash/test/2015-01-01-00-00-00/test~table/test~table-split%d.gz", i)); assertEquals(fileMetadata.getSize(), 100); } }
Example #18
Source File: S3FilesResource.java From airpal with Apache License 2.0 | 5 votes |
@Inject public S3FilesResource( AmazonS3 s3Client, @Named("s3Bucket") String outputBucket) { this.s3Client = s3Client; this.outputBucket = outputBucket; }
Example #19
Source File: CircusTrainHdfsS3IntegrationTest.java From circus-train with Apache License 2.0 | 5 votes |
private AmazonS3 newS3Client(String tableUri) { AmazonS3URI base = toAmazonS3URI(URI.create(tableUri)); S3S3CopierOptions s3s3CopierOptions = new S3S3CopierOptions(ImmutableMap .<String, Object>builder() .put(S3S3CopierOptions.Keys.S3_ENDPOINT_URI.keyName(), s3Proxy.getUri().toString()) .build()); return s3ClientFactory.newInstance(base, s3s3CopierOptions); }
Example #20
Source File: S3Manager.java From ReplicaDB with Apache License 2.0 | 5 votes |
private void putObjectToS3(ResultSet resultSet, int taskId, AmazonS3 s3Client, String bucketName) throws SQLException { ResultSetMetaData rsmd = resultSet.getMetaData(); int columnCount = rsmd.getColumnCount(); // Get content column index int rowContentColumnIndex = 0; for (int i = 1; i <= columnCount; i++) { if (rsmd.getColumnName(i).toUpperCase().equals(rowContentColumnName.toUpperCase())) { rowContentColumnIndex = i; } } ObjectMetadata binMetadata = new ObjectMetadata(); while (resultSet.next()) { switch (rsmd.getColumnType(rowContentColumnIndex)) { case Types.BINARY: case Types.BLOB: case Types.CLOB: s3Client.putObject(bucketName, resultSet.getString(rowKeyColumnName), resultSet.getBinaryStream(rowContentColumnName), binMetadata); break; case Types.SQLXML: throw new IllegalArgumentException("SQLXML Data Type is not supported. You should convert it to BLOB or CLOB"); default: s3Client.putObject(bucketName, resultSet.getString(rowKeyColumnName), resultSet.getString(rowContentColumnName)); break; } } }
Example #21
Source File: AwsPrivateKeyStore.java From athenz with Apache License 2.0 | 5 votes |
private static AmazonS3 initAmazonS3() { String s3Region = System.getProperty(ATHENZ_PROP_AWS_S3_REGION); ///CLOVER:OFF if (null != s3Region && !s3Region.isEmpty()) { return AmazonS3ClientBuilder.standard().withRegion(s3Region).build(); } return AmazonS3ClientBuilder.defaultClient(); ///CLOVER:ON }
Example #22
Source File: TestUtils.java From digdag with Apache License 2.0 | 5 votes |
public static void s3Put(AmazonS3 s3, String bucket, String key, String resource) throws IOException { logger.info("put {} -> s3://{}/{}", resource, bucket, key); URL resourceUrl = Resources.getResource(resource); byte[] bytes = Resources.toByteArray(resourceUrl); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(bytes.length); s3.putObject(bucket, key, new ByteArrayInputStream(bytes), metadata); }
Example #23
Source File: S3BlobStorage.java From mojito with Apache License 2.0 | 5 votes |
public S3BlobStorage(AmazonS3 amazonS3, S3BlobStorageConfigurationProperties s3BlobStorageConfigurationProperties) { Preconditions.checkNotNull(amazonS3); Preconditions.checkNotNull(s3BlobStorageConfigurationProperties); this.amazonS3 = amazonS3; this.s3BlobStorageConfigurationProperties = s3BlobStorageConfigurationProperties; }
Example #24
Source File: MySqlRecordHandlerTest.java From aws-athena-query-federation with Apache License 2.0 | 5 votes |
@Before public void setup() { this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(AWSSecretsManager.class); this.athena = Mockito.mock(AmazonAthena.class); this.connection = Mockito.mock(Connection.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); Mockito.when(this.jdbcConnectionFactory.getConnection(Mockito.mock(JdbcCredentialProvider.class))).thenReturn(this.connection); jdbcSplitQueryBuilder = new MySqlQueryStringBuilder("`"); final DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", JdbcConnectionFactory.DatabaseEngine.MYSQL, "mysql://jdbc:mysql://hostname/user=A&password=B"); this.mySqlRecordHandler = new MySqlRecordHandler(databaseConnectionConfig, amazonS3, secretsManager, athena, jdbcConnectionFactory, jdbcSplitQueryBuilder); }
Example #25
Source File: SpringCloudStreamAwsApplicationsITCase.java From spring-cloud-stream-app-starters with Apache License 2.0 | 5 votes |
@Test public void testS3Source() throws IOException, InterruptedException { String bucket = "TestBucket"; String key = "foo"; String content = "Spring Cloud Stream AWS S3 Source test"; this.applicationContext = SpringApplication.run(S3SourceBootConfiguration.class, "--s3.remoteDir=" + bucket, "--file.consumer.mode=lines", "--file.consumer.with-markers=false"); ResourceIdResolver resourceIdResolver = this.applicationContext.getBean(ResourceIdResolver.class); AmazonS3 amazonS3 = this.applicationContext.getBean(AmazonS3.class); ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentLength(content.length()); String bucketName = resourceIdResolver.resolveToPhysicalResourceId(bucket); amazonS3.putObject(bucketName, key, new ByteArrayInputStream(content.getBytes("UTF-8")), objectMetadata); try { Source source = this.applicationContext.getBean(Source.class); MessageCollector messageCollector = this.applicationContext.getBean(MessageCollector.class); Message<?> received = messageCollector.forChannel(source.output()).poll(10, TimeUnit.SECONDS); assertNotNull(received); assertThat(received, hasHeader(FileHeaders.FILENAME, key)); assertThat(received, hasPayload("Spring Cloud Stream AWS S3 Source test")); } finally { amazonS3.deleteObject(bucketName, key); } }
Example #26
Source File: MockS3OperationsImpl.java From herd with Apache License 2.0 | 5 votes |
/** * {@inheritDoc} <p/> <p> Since a multipart upload in progress does not exist when in-memory, this method simply returns a preconfigured list. </p> <p> * Returns a mock {@link MultipartUploadListing} based on the parameters and hints provided. By default returns a mock listing as defiend by {@link * #getMultipartUploadListing()}. </p> <p> This operation takes the following hints when suffixed in listMultipartUploadsRequest.bucketName: <dl> <p/> * <dt>MOCK_S3_BUCKET_NAME_SERVICE_EXCEPTION</dt> <dd>Throws a AmazonServiceException</dd> <p/> <dt>MOCK_S3_BUCKET_NAME_TRUNCATED_MULTIPART_LISTING</dt> * <dd>Returns the listing as if it is truncated. See below for details.</dd> <p/> </dl> </p> */ @Override public MultipartUploadListing listMultipartUploads(ListMultipartUploadsRequest listMultipartUploadsRequest, AmazonS3 s3Client) { if (listMultipartUploadsRequest.getBucketName().equals(MOCK_S3_BUCKET_NAME_SERVICE_EXCEPTION)) { throw new AmazonServiceException(null); } else if (listMultipartUploadsRequest.getBucketName().equals(MOCK_S3_BUCKET_NAME_TRUNCATED_MULTIPART_LISTING)) { MultipartUploadListing multipartUploadListing = getMultipartUploadListing(); // If listing request does not have upload ID marker set, mark the listing as truncated - this is done to truncate the multipart listing just once. if (listMultipartUploadsRequest.getUploadIdMarker() == null) { multipartUploadListing.setNextUploadIdMarker("TEST_UPLOAD_MARKER_ID"); multipartUploadListing.setNextKeyMarker("TEST_KEY_MARKER_ID"); multipartUploadListing.setTruncated(true); } return multipartUploadListing; } else { return getMultipartUploadListing(); } }
Example #27
Source File: MultiplexingJdbcRecordHandlerTest.java From aws-athena-query-federation with Apache License 2.0 | 5 votes |
@Before public void setup() { this.mySqlRecordHandler = Mockito.mock(MySqlRecordHandler.class); this.recordHandlerMap = Collections.singletonMap("mysql", this.mySqlRecordHandler); this.amazonS3 = Mockito.mock(AmazonS3.class); this.secretsManager = Mockito.mock(AWSSecretsManager.class); this.athena = Mockito.mock(AmazonAthena.class); this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class); this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class); DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", JdbcConnectionFactory.DatabaseEngine.MYSQL, "mysql://jdbc:mysql://hostname/${testSecret}", "testSecret"); this.jdbcRecordHandler = new MultiplexingJdbcRecordHandler(this.amazonS3, this.secretsManager, this.athena, this.jdbcConnectionFactory, databaseConnectionConfig, this.recordHandlerMap); }
Example #28
Source File: AthenaAuditWriterTest.java From emodb with Apache License 2.0 | 5 votes |
@BeforeMethod public void setUp() { _s3 = mock(AmazonS3.class); when(_s3.putObject(eq(BUCKET), anyString(), any(File.class))).then(invocationOnMock -> { // The file will be deleted after the put object returns successfully, so capture the contents now File file = (File) invocationOnMock.getArguments()[2]; try (FileInputStream fileIn = new FileInputStream(file); GzipCompressorInputStream in = new GzipCompressorInputStream(fileIn); BufferedReader reader = new BufferedReader(new InputStreamReader(in, Charsets.UTF_8))) { String line; while ((line = reader.readLine()) != null) { Map<String, Object> auditJson = JsonHelper.fromJson(line, new TypeReference<Map<String, Object>>() {}); _uploadedAudits.put((String) invocationOnMock.getArguments()[1], auditJson); } } PutObjectResult result = new PutObjectResult(); result.setETag(file.getName()); return result; }); _tempStagingDir = Files.createTempDir(); // Start with some default time; individual tests can override as necessary _now = Instant.from(ZonedDateTime.of(2018, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC)); _clock = mock(Clock.class); when(_clock.millis()).then(ignore -> _now.toEpochMilli()); when(_clock.instant()).then(ignore -> _now); }
Example #29
Source File: GeneratePresignedPutUrl.java From aws-doc-sdk-examples with Apache License 2.0 | 5 votes |
public static void main(String[] args) { final String USAGE = "\n" + "To run this example, supply the name of an S3 bucket and a file to\n" + "upload to it.\n" + "\n" + "Ex: GeneratePresignedPutUrl <bucketname> <filename>\n"; if (args.length < 2) { System.out.println(USAGE); System.exit(1); } String bucket_name = args[0]; String key_name = args[1]; System.out.format("Creating a pre-signed URL for uploading %s to S3 bucket %s...\n", key_name, bucket_name); final AmazonS3 s3 = AmazonS3ClientBuilder.standard().withRegion(Regions.DEFAULT_REGION).build(); // Set the pre-signed URL to expire after 12 hours. java.util.Date expiration = new java.util.Date(); long expirationInMs = expiration.getTime(); expirationInMs += 1000 * 60 * 60 * 12; expiration.setTime(expirationInMs); try { GeneratePresignedUrlRequest generatePresignedUrlRequest = new GeneratePresignedUrlRequest(bucket_name, key_name) .withMethod(HttpMethod.PUT) .withExpiration(expiration); URL url = s3.generatePresignedUrl(generatePresignedUrlRequest); //print URL System.out.println("\n\rGenerated URL: " + url.toString()); //Print curl command to consume URL System.out.println("\n\rExample command to use URL for file upload: \n\r"); System.out.println("curl --request PUT --upload-file /path/to/" + key_name + " '" + url.toString() + "' -# > /dev/null"); } catch (AmazonServiceException e) { System.err.println(e.getErrorMessage()); System.exit(1); } }
Example #30
Source File: S3FileManagerImpl.java From entrada with GNU General Public License v3.0 | 5 votes |
public S3FileManagerImpl(AmazonS3 amazonS3, @Value("${aws.upload.parallelism}") int parallelism, @Value("${aws.upload.multipart.mb.size:5}") int multipartSize) { this.amazonS3 = amazonS3; this.transferManager = TransferManagerBuilder .standard() .withS3Client(amazonS3) .withMultipartUploadThreshold(multipartSize * 1024L * 1024L) .withExecutorFactory(() -> Executors.newFixedThreadPool(parallelism)) .build(); }