org.apache.nifi.stream.io.StreamUtils Java Examples
The following examples show how to use
org.apache.nifi.stream.io.StreamUtils.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestContentRepositoryFlowFileAccess.java From nifi with Apache License 2.0 | 6 votes |
@Test public void testInputStreamFromContentRepo() throws IOException { final ContentRepository contentRepo = mock(ContentRepository.class); final ResourceClaimManager claimManager = new StandardResourceClaimManager(); final ResourceClaim resourceClaim = new StandardResourceClaim(claimManager, "container", "section", "id", false); final ContentClaim contentClaim = new StandardContentClaim(resourceClaim, 5L); final FlowFileRecord flowFile = mock(FlowFileRecord.class); when(flowFile.getContentClaim()).thenReturn(contentClaim); when(flowFile.getSize()).thenReturn(5L); final InputStream inputStream = new ByteArrayInputStream("hello".getBytes()); when(contentRepo.read(contentClaim)).thenReturn(inputStream); final ContentRepositoryFlowFileAccess flowAccess = new ContentRepositoryFlowFileAccess(contentRepo); final InputStream repoStream = flowAccess.read(flowFile); verify(contentRepo, times(1)).read(contentClaim); final byte[] buffer = new byte[5]; StreamUtils.fillBuffer(repoStream, buffer); assertEquals(-1, repoStream.read()); assertArrayEquals("hello".getBytes(), buffer); }
Example #2
Source File: CipherUtility.java From nifi with Apache License 2.0 | 6 votes |
public static byte[] readBytesFromInputStream(InputStream in, String label, int limit, byte[] delimiter) throws IOException, ProcessException { if (in == null) { throw new IllegalArgumentException("Cannot read " + label + " from null InputStream"); } // If the value is not detected within the first n bytes, throw an exception in.mark(limit); // The first n bytes of the input stream contain the value up to the custom delimiter ByteArrayOutputStream bytesOut = new ByteArrayOutputStream(); byte[] stoppedBy = StreamUtils.copyExclusive(in, bytesOut, limit + delimiter.length, delimiter); if (stoppedBy != null) { byte[] bytes = bytesOut.toByteArray(); return bytes; } // If no delimiter was found, reset the cursor in.reset(); return null; }
Example #3
Source File: EncryptedSchemaRepositoryRecordSerde.java From nifi with Apache License 2.0 | 6 votes |
/** * Returns the deserialized and decrypted {@link RepositoryRecord} from the input stream. * * @param in stream to read from * @param version the version of the SerDe that was used to serialize the * record * @return the deserialized record * @throws IOException if there is a problem reading from the stream */ @Override public SerializedRepositoryRecord deserializeRecord(final DataInputStream in, final int version) throws IOException { // Read the expected length of the encrypted record (including the encryption metadata) int encryptedRecordLength = in.readInt(); if (encryptedRecordLength == -1) { return null; } // Read the encrypted record bytes byte[] cipherBytes = new byte[encryptedRecordLength]; StreamUtils.fillBuffer(in, cipherBytes); logger.debug("Read {} bytes (encrypted, including length) from actual input stream", encryptedRecordLength + 4); // Decrypt the byte[] DataInputStream wrappedInputStream = decryptToStream(cipherBytes); // Deserialize the plain bytes using the delegate serde final SerializedRepositoryRecord deserializedRecord = wrappedSerDe.deserializeRecord(wrappedInputStream, version); logger.debug("Deserialized flowfile record {} from temp stream", getRecordIdentifier(deserializedRecord)); return deserializedRecord; }
Example #4
Source File: CaptureServlet.java From localization_nifi with Apache License 2.0 | 6 votes |
@Override protected void doPost(final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException { final ByteArrayOutputStream baos = new ByteArrayOutputStream(); // Capture all the headers for reference. Intentionally choosing to not special handling for headers with multiple values for clarity final Enumeration<String> headerNames = request.getHeaderNames(); lastPostHeaders = new HashMap<>(); while (headerNames.hasMoreElements()) { final String nextHeader = headerNames.nextElement(); lastPostHeaders.put(nextHeader, request.getHeader(nextHeader)); } try { StreamUtils.copy(request.getInputStream(), baos); this.lastPost = baos.toByteArray(); } finally { FileUtils.closeQuietly(baos); } response.setStatus(Status.OK.getStatusCode()); }
Example #5
Source File: CaptureServlet.java From localization_nifi with Apache License 2.0 | 6 votes |
@Override protected void doPost(final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException { final ByteArrayOutputStream baos = new ByteArrayOutputStream(); // Capture all the headers for reference. Intentionally choosing to not special handling for headers with multiple values for clarity final Enumeration<String> headerNames = request.getHeaderNames(); lastPostHeaders = new HashMap<>(); while (headerNames.hasMoreElements()) { final String nextHeader = headerNames.nextElement(); lastPostHeaders.put(nextHeader, request.getHeader(nextHeader)); } try { StreamUtils.copy(request.getInputStream(), baos); this.lastPost = baos.toByteArray(); } finally { FileUtils.closeQuietly(baos); } response.setStatus(Status.OK.getStatusCode()); }
Example #6
Source File: SnippetManager.java From localization_nifi with Apache License 2.0 | 6 votes |
public static List<StandardSnippet> parseBytes(final byte[] bytes) { final List<StandardSnippet> snippets = new ArrayList<>(); try (final InputStream rawIn = new ByteArrayInputStream(bytes); final DataInputStream in = new DataInputStream(rawIn)) { final int length = in.readInt(); final byte[] buffer = new byte[length]; StreamUtils.fillBuffer(in, buffer, true); final StandardSnippet snippet = StandardSnippetDeserializer.deserialize(new ByteArrayInputStream(buffer)); snippets.add(snippet); } catch (final IOException e) { throw new RuntimeException("Failed to parse bytes", e); // should never happen because of streams being used } return snippets; }
Example #7
Source File: FileSystemRepository.java From localization_nifi with Apache License 2.0 | 6 votes |
@Override public ContentClaim clone(final ContentClaim original, final boolean lossTolerant) throws IOException { if (original == null) { return null; } final ContentClaim newClaim = create(lossTolerant); try (final InputStream in = read(original); final OutputStream out = write(newClaim)) { StreamUtils.copy(in, out); } catch (final IOException ioe) { decrementClaimantCount(newClaim); remove(newClaim); throw ioe; } return newClaim; }
Example #8
Source File: PutHBaseCell.java From localization_nifi with Apache License 2.0 | 6 votes |
@Override protected PutFlowFile createPut(final ProcessSession session, final ProcessContext context, final FlowFile flowFile) { final String tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions(flowFile).getValue(); final String row = context.getProperty(ROW_ID).evaluateAttributeExpressions(flowFile).getValue(); final String columnFamily = context.getProperty(COLUMN_FAMILY).evaluateAttributeExpressions(flowFile).getValue(); final String columnQualifier = context.getProperty(COLUMN_QUALIFIER).evaluateAttributeExpressions(flowFile).getValue(); final byte[] buffer = new byte[(int) flowFile.getSize()]; session.read(flowFile, new InputStreamCallback() { @Override public void process(final InputStream in) throws IOException { StreamUtils.fillBuffer(in, buffer); } }); final Collection<PutColumn> columns = Collections.singletonList(new PutColumn(columnFamily.getBytes(StandardCharsets.UTF_8), columnQualifier.getBytes(StandardCharsets.UTF_8), buffer)); byte[] rowKeyBytes = getRow(row,context.getProperty(ROW_ID_ENCODING_STRATEGY).getValue()); return new PutFlowFile(tableName,rowKeyBytes , columns, flowFile); }
Example #9
Source File: EncryptedSchemaRecordReader.java From nifi with Apache License 2.0 | 6 votes |
@Override protected Optional<StandardProvenanceEventRecord> readToEvent(final long eventId, final DataInputStream dis, final int serializationVersion) throws IOException { verifySerializationVersion(serializationVersion); while (isData(dis)) { final long startOffset = getBytesConsumed(); final long id = dis.readInt() + getFirstEventId(); final int recordLength = dis.readInt(); if (id >= eventId) { final StandardProvenanceEventRecord event = readRecord(dis, id, startOffset, recordLength); return Optional.ofNullable(event); } else { // This is not the record we want. Skip over it instead of deserializing it. StreamUtils.skip(dis, recordLength); } } return Optional.empty(); }
Example #10
Source File: FileSystemRepository.java From nifi with Apache License 2.0 | 6 votes |
@Override public long exportTo(final ContentClaim claim, final OutputStream destination, final long offset, final long length) throws IOException { if (offset < 0) { throw new IllegalArgumentException("offset cannot be negative"); } final long claimSize = size(claim); if (offset > claimSize) { throw new IllegalArgumentException("offset of " + offset + " exceeds claim size of " + claimSize); } if (offset == 0 && length == claimSize) { return exportTo(claim, destination); } try (final InputStream in = read(claim)) { StreamUtils.skip(in, offset); final byte[] buffer = new byte[8192]; int len; long copied = 0L; while ((len = in.read(buffer, 0, (int) Math.min(length - copied, buffer.length))) > 0) { destination.write(buffer, 0, len); copied += len; } return copied; } }
Example #11
Source File: FileSystemRepository.java From localization_nifi with Apache License 2.0 | 6 votes |
@Override public InputStream read(final ContentClaim claim) throws IOException { if (claim == null) { return new ByteArrayInputStream(new byte[0]); } final Path path = getPath(claim, true); final FileInputStream fis = new FileInputStream(path.toFile()); if (claim.getOffset() > 0L) { try { StreamUtils.skip(fis, claim.getOffset()); } catch (IOException ioe) { IOUtils.closeQuietly(fis); throw ioe; } } // see javadocs for claim.getLength() as to why we do this. if (claim.getLength() >= 0) { return new LimitedInputStream(fis, claim.getLength()); } else { return fis; } }
Example #12
Source File: TestStandardProcessSession.java From nifi with Apache License 2.0 | 6 votes |
@Test public void testReadFromInputStream() throws IOException { FlowFile flowFile = session.create(); flowFile = session.write(flowFile, new OutputStreamCallback() { @Override public void process(final OutputStream out) throws IOException { out.write("hello, world".getBytes()); } }); try (InputStream in = session.read(flowFile)) { final byte[] buffer = new byte[12]; StreamUtils.fillBuffer(in, buffer); assertEquals("hello, world", new String(buffer)); } session.remove(flowFile); session.commit(); }
Example #13
Source File: SiteToSiteRestApiClient.java From localization_nifi with Apache License 2.0 | 6 votes |
private TransactionResultEntity readResponse(final InputStream inputStream) throws IOException { final ByteArrayOutputStream bos = new ByteArrayOutputStream(); StreamUtils.copy(inputStream, bos); String responseMessage = null; try { responseMessage = new String(bos.toByteArray(), "UTF-8"); logger.debug("readResponse responseMessage={}", responseMessage); final ObjectMapper mapper = new ObjectMapper(); return mapper.readValue(responseMessage, TransactionResultEntity.class); } catch (JsonParseException | JsonMappingException e) { if (logger.isDebugEnabled()) { logger.debug("Failed to parse JSON.", e); } final TransactionResultEntity entity = new TransactionResultEntity(); entity.setResponseCode(ResponseCode.ABORT.getCode()); entity.setMessage(responseMessage); return entity; } }
Example #14
Source File: NifiSeekableInputStream.java From nifi with Apache License 2.0 | 6 votes |
@Override public void seek(long newPos) throws IOException { final long currentPos = getPos(); if (newPos == currentPos) { return; } if (newPos < currentPos) { // seeking backwards so first reset back to beginning of the stream then seek input.reset(); input.mark(Integer.MAX_VALUE); } // must call getPos() again in case reset was called above StreamUtils.skip(input, newPos - getPos()); }
Example #15
Source File: TestFileSystemRepository.java From nifi with Apache License 2.0 | 6 votes |
@Test public void testReadClaimThenWriteThenReadMore() throws IOException { final ContentClaim claim = repository.create(false); final OutputStream out = repository.write(claim); out.write("hello".getBytes()); out.flush(); final InputStream in = repository.read(claim); final byte[] buffer = new byte[5]; StreamUtils.fillBuffer(in, buffer); assertEquals("hello", new String(buffer)); out.write("good-bye".getBytes()); out.close(); final byte[] buffer2 = new byte[8]; StreamUtils.fillBuffer(in, buffer2); assertEquals("good-bye", new String(buffer2)); }
Example #16
Source File: TestStandardProcessSession.java From localization_nifi with Apache License 2.0 | 5 votes |
@Test public void testMigrateWithAppendableStream() throws IOException { FlowFile flowFile = session.create(); flowFile = session.append(flowFile, out -> out.write("1".getBytes())); flowFile = session.append(flowFile, out -> out.write("2".getBytes())); final StandardProcessSession newSession = new StandardProcessSession(context); assertTrue(session.isFlowFileKnown(flowFile)); assertFalse(newSession.isFlowFileKnown(flowFile)); session.migrate(newSession, Collections.singleton(flowFile)); assertFalse(session.isFlowFileKnown(flowFile)); assertTrue(newSession.isFlowFileKnown(flowFile)); flowFile = newSession.append(flowFile, out -> out.write("3".getBytes())); final byte[] buff = new byte[3]; try (final InputStream in = newSession.read(flowFile)) { StreamUtils.fillBuffer(in, buff, true); assertEquals(-1, in.read()); } assertTrue(Arrays.equals(new byte[] {'1', '2', '3'}, buff)); newSession.remove(flowFile); newSession.commit(); session.commit(); }
Example #17
Source File: FileSystemRepository.java From localization_nifi with Apache License 2.0 | 5 votes |
@Override public long exportTo(final ContentClaim claim, final Path destination, final boolean append, final long offset, final long length) throws IOException { if (claim == null && offset > 0) { throw new IllegalArgumentException("Cannot specify an offset of " + offset + " for a null claim"); } if (claim == null) { if (append) { return 0L; } Files.createFile(destination); return 0L; } final long claimSize = size(claim); if (offset > claimSize) { throw new IllegalArgumentException("Offset of " + offset + " exceeds claim size of " + claimSize); } try (final InputStream in = read(claim); final FileOutputStream fos = new FileOutputStream(destination.toFile(), append)) { if (offset > 0) { StreamUtils.skip(in, offset); } StreamUtils.copy(in, fos, length); if (alwaysSync) { fos.getFD().sync(); } return length; } }
Example #18
Source File: VolatileContentRepository.java From nifi with Apache License 2.0 | 5 votes |
@Override public long merge(final Collection<ContentClaim> claims, final ContentClaim destination, final byte[] header, final byte[] footer, final byte[] demarcator) throws IOException { long bytes = 0L; try (final OutputStream out = write(destination)) { if (header != null) { out.write(header); bytes += header.length; } final Iterator<ContentClaim> itr = claims.iterator(); while (itr.hasNext()) { final ContentClaim readClaim = itr.next(); try (final InputStream in = read(readClaim)) { bytes += StreamUtils.copy(in, out); } if (itr.hasNext() && demarcator != null) { bytes += demarcator.length; out.write(demarcator); } } if (footer != null) { bytes += footer.length; out.write(footer); } return bytes; } }
Example #19
Source File: TestContentClaimWriteCache.java From localization_nifi with Apache License 2.0 | 5 votes |
@Test public void testFlushWriteCorrectData() throws IOException { final ContentClaimWriteCache cache = new ContentClaimWriteCache(repository, 4); final ContentClaim claim1 = cache.getContentClaim(); assertNotNull(claim1); final OutputStream out = cache.write(claim1); assertNotNull(out); out.write("hello".getBytes()); out.write("good-bye".getBytes()); cache.flush(); assertEquals(13L, claim1.getLength()); final InputStream in = repository.read(claim1); final byte[] buff = new byte[(int) claim1.getLength()]; StreamUtils.fillBuffer(in, buff); Assert.assertArrayEquals("hellogood-bye".getBytes(), buff); final ContentClaim claim2 = cache.getContentClaim(); final OutputStream out2 = cache.write(claim2); assertNotNull(out2); out2.write("good-day".getBytes()); out2.write("hello".getBytes()); cache.flush(); assertEquals(13L, claim2.getLength()); final InputStream in2 = repository.read(claim2); final byte[] buff2 = new byte[(int) claim2.getLength()]; StreamUtils.fillBuffer(in2, buff2); Assert.assertArrayEquals("good-dayhello".getBytes(), buff2); }
Example #20
Source File: CompressableRecordReader.java From nifi with Apache License 2.0 | 5 votes |
@Override public void skipToBlock(final int blockIndex) throws IOException { if (tocReader == null) { throw new IllegalStateException("Cannot skip to block " + blockIndex + " for Provenance Log " + filename + " because no Table-of-Contents file was found for this Log"); } if (blockIndex < 0) { throw new IllegalArgumentException("Cannot skip to block " + blockIndex + " because the value is negative"); } if (blockIndex == getBlockIndex()) { return; } final long offset = tocReader.getBlockOffset(blockIndex); if (offset < 0) { throw new IOException("Unable to find block " + blockIndex + " in Provenance Log " + filename); } final long curOffset = rawInputStream.getBytesConsumed(); final long bytesToSkip = offset - curOffset; if (bytesToSkip >= 0) { try { StreamUtils.skip(rawInputStream, bytesToSkip); logger.debug("Skipped stream from offset {} to {} ({} bytes skipped)", curOffset, offset, bytesToSkip); } catch (final EOFException eof) { throw new EOFException("Attempted to skip to byte offset " + offset + " for " + filename + " but file does not have that many bytes (TOC Reader=" + getTocReader() + ")"); } catch (final IOException e) { throw new IOException("Failed to skip to offset " + offset + " for block " + blockIndex + " of Provenance Log " + filename, e); } resetStreamForNextBlock(); } }
Example #21
Source File: VolatileContentRepository.java From localization_nifi with Apache License 2.0 | 5 votes |
@Override public long exportTo(ContentClaim claim, OutputStream destination, long offset, long length) throws IOException { final InputStream in = read(claim); try { StreamUtils.skip(in, offset); StreamUtils.copy(in, destination, length); } finally { IOUtils.closeQuietly(in); } return length; }
Example #22
Source File: TestStandardProcessSession.java From nifi with Apache License 2.0 | 5 votes |
@Test public void testCloneOriginalDataSmaller() throws IOException { final byte[] originalContent = "hello".getBytes(); final byte[] replacementContent = "NEW DATA".getBytes(); final Connection conn1 = createConnection(); final FlowFileRecord flowFileRecord = new StandardFlowFileRecord.Builder() .id(1000L) .addAttribute("uuid", "12345678-1234-1234-1234-123456789012") .entryDate(System.currentTimeMillis()) .contentClaim(contentRepo.create(originalContent)) .size(originalContent.length) .build(); flowFileQueue.put(flowFileRecord); when(connectable.getIncomingConnections()).thenReturn(Collections.singletonList(conn1)); final FlowFile input = session.get(); assertEquals(originalContent.length, input.getSize()); final FlowFile modified = session.write(input, (in, out) -> out.write(replacementContent)); assertEquals(replacementContent.length, modified.getSize()); // Clone 'input', not 'modified' because we want to ensure that we use the outdated reference to ensure // that the framework uses the most current reference. final FlowFile clone = session.clone(input); assertEquals(replacementContent.length, clone.getSize()); final byte[] buffer = new byte[replacementContent.length]; try (final InputStream in = session.read(clone)) { StreamUtils.fillBuffer(in, buffer); } assertArrayEquals(replacementContent, buffer); }
Example #23
Source File: StatelessRemoteOutputPort.java From nifi with Apache License 2.0 | 5 votes |
@Override public boolean runRecursive(final Queue<InMemoryFlowFile> queue) { try { final Transaction transaction = client.createTransaction(TransferDirection.RECEIVE); if (transaction == null) { getLogger().error("Unable to create a transaction for Remote Process Group {} to pull from port {}", new Object[]{url, name}); return false; } final Queue<StatelessFlowFile> destinationQueue = new LinkedList<>(); DataPacket dataPacket; while ((dataPacket = transaction.receive()) != null) { final Map<String, String> attributes = dataPacket.getAttributes(); final InputStream in = dataPacket.getData(); final byte[] buffer = new byte[(int) dataPacket.getSize()]; StreamUtils.fillBuffer(in, buffer); final StatelessFlowFile receivedFlowFile = new StatelessFlowFile(buffer, attributes, true); destinationQueue.add(receivedFlowFile); for (final StatelessComponent childComponent : getChildren().get(Relationship.ANONYMOUS)) { childComponent.enqueueAll(destinationQueue); childComponent.runRecursive(queue); } destinationQueue.clear(); } transaction.confirm(); transaction.complete(); } catch (final Exception e) { getLogger().error("Failed to receive FlowFile via site-to-site", e); return false; } return true; }
Example #24
Source File: AbstractHiveQLProcessor.java From localization_nifi with Apache License 2.0 | 5 votes |
/** * Determines the HiveQL statement that should be executed for the given FlowFile * * @param session the session that can be used to access the given FlowFile * @param flowFile the FlowFile whose HiveQL statement should be executed * @return the HiveQL that is associated with the given FlowFile */ protected String getHiveQL(final ProcessSession session, final FlowFile flowFile, final Charset charset) { // Read the HiveQL from the FlowFile's content final byte[] buffer = new byte[(int) flowFile.getSize()]; session.read(flowFile, new InputStreamCallback() { @Override public void process(final InputStream in) throws IOException { StreamUtils.fillBuffer(in, buffer); } }); // Create the PreparedStatement to use for this FlowFile. return new String(buffer, charset); }
Example #25
Source File: FTPTransfer.java From nifi with Apache License 2.0 | 5 votes |
@Override public FlowFile getRemoteFile(final String remoteFileName, final FlowFile origFlowFile, final ProcessSession session) throws ProcessException, IOException { final FTPClient client = getClient(origFlowFile); InputStream in = null; FlowFile resultFlowFile = null; try { in = client.retrieveFileStream(remoteFileName); if (in == null) { final String response = client.getReplyString(); // FTPClient doesn't throw exception if file not found. // Instead, response string will contain: "550 Can't open <absolute_path>: No such file or directory" if (response != null && response.trim().endsWith("No such file or directory")) { throw new FileNotFoundException(response); } throw new IOException(response); } final InputStream remoteIn = in; resultFlowFile = session.write(origFlowFile, new OutputStreamCallback() { @Override public void process(final OutputStream out) throws IOException { StreamUtils.copy(remoteIn, out); } }); client.completePendingCommand(); return resultFlowFile; } finally { if(in != null){ try{ in.close(); }catch(final IOException ioe){ //do nothing } } } }
Example #26
Source File: TestJmsConsumer.java From localization_nifi with Apache License 2.0 | 5 votes |
@Test public void testMap2FlowFileTextMessage() throws Exception { TestRunner runner = TestRunners.newTestRunner(GetJMSQueue.class); TextMessage textMessage = new ActiveMQTextMessage(); String payload = "Hello world!"; textMessage.setText(payload); ProcessContext context = runner.getProcessContext(); ProcessSession session = runner.getProcessSessionFactory().createSession(); ProcessorInitializationContext pic = new MockProcessorInitializationContext(runner.getProcessor(), (MockProcessContext) runner.getProcessContext()); JmsProcessingSummary summary = JmsConsumer.map2FlowFile(context, session, textMessage, true, pic.getLogger()); assertEquals("TextMessage content length should equal to FlowFile content size", payload.length(), summary.getLastFlowFile().getSize()); final byte[] buffer = new byte[payload.length()]; runner.clearTransferState(); session.read(summary.getLastFlowFile(), new InputStreamCallback() { @Override public void process(InputStream in) throws IOException { StreamUtils.fillBuffer(in, buffer, false); } }); String contentString = new String(buffer, "UTF-8"); assertEquals("", payload, contentString); }
Example #27
Source File: PutHBaseCell.java From nifi with Apache License 2.0 | 5 votes |
@Override protected PutFlowFile createPut(final ProcessSession session, final ProcessContext context, final FlowFile flowFile) { final String tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions(flowFile).getValue(); final String row = context.getProperty(ROW_ID).evaluateAttributeExpressions(flowFile).getValue(); final String columnFamily = context.getProperty(COLUMN_FAMILY).evaluateAttributeExpressions(flowFile).getValue(); final String columnQualifier = context.getProperty(COLUMN_QUALIFIER).evaluateAttributeExpressions(flowFile).getValue(); final String timestampValue = context.getProperty(TIMESTAMP).evaluateAttributeExpressions(flowFile).getValue(); final String visibilityStringToUse = pickVisibilityString(columnFamily, columnQualifier, flowFile, context); final Long timestamp; if (!StringUtils.isBlank(timestampValue)) { try { timestamp = Long.valueOf(timestampValue); } catch (Exception e) { getLogger().error("Invalid timestamp value: " + timestampValue, e); return null; } } else { timestamp = null; } final byte[] buffer = new byte[(int) flowFile.getSize()]; session.read(flowFile, in -> StreamUtils.fillBuffer(in, buffer)); PutColumn column = StringUtils.isEmpty(visibilityStringToUse) ? new PutColumn(columnFamily.getBytes(StandardCharsets.UTF_8), columnQualifier.getBytes(StandardCharsets.UTF_8), buffer, timestamp) : new PutColumn(columnFamily.getBytes(StandardCharsets.UTF_8), columnQualifier.getBytes(StandardCharsets.UTF_8), buffer, timestamp, visibilityStringToUse); final Collection<PutColumn> columns = Collections.singletonList(column); byte[] rowKeyBytes = getRow(row,context.getProperty(ROW_ID_ENCODING_STRATEGY).getValue()); return new PutFlowFile(tableName,rowKeyBytes , columns, flowFile); }
Example #28
Source File: PublishAMQP.java From localization_nifi with Apache License 2.0 | 5 votes |
/** * Extracts contents of the {@link FlowFile} as byte array. */ private byte[] extractMessage(FlowFile flowFile, ProcessSession session){ final byte[] messageContent = new byte[(int) flowFile.getSize()]; session.read(flowFile, new InputStreamCallback() { @Override public void process(final InputStream in) throws IOException { StreamUtils.fillBuffer(in, messageContent, true); } }); return messageContent; }
Example #29
Source File: FileSystemRepository.java From nifi with Apache License 2.0 | 5 votes |
@Override public long exportTo(final ContentClaim claim, final Path destination, final boolean append, final long offset, final long length) throws IOException { if (claim == null && offset > 0) { throw new IllegalArgumentException("Cannot specify an offset of " + offset + " for a null claim"); } if (claim == null) { if (append) { return 0L; } Files.createFile(destination); return 0L; } final long claimSize = size(claim); if (offset > claimSize) { throw new IllegalArgumentException("Offset of " + offset + " exceeds claim size of " + claimSize); } try (final InputStream in = read(claim); final FileOutputStream fos = new FileOutputStream(destination.toFile(), append)) { if (offset > 0) { StreamUtils.skip(in, offset); } StreamUtils.copy(in, fos, length); if (alwaysSync) { fos.getFD().sync(); } return length; } }
Example #30
Source File: FileSystemRepository.java From nifi with Apache License 2.0 | 5 votes |
@Override public InputStream read(final ContentClaim claim) throws IOException { if (claim == null) { return new ByteArrayInputStream(new byte[0]); } final Path path = getPath(claim, true); final FileInputStream fis = new FileInputStream(path.toFile()); if (claim.getOffset() > 0L) { try { StreamUtils.skip(fis, claim.getOffset()); } catch (final EOFException eof) { final long resourceClaimBytes; try { resourceClaimBytes = Files.size(path); } catch (final IOException e) { throw new ContentNotFoundException(claim, "Content Claim has an offset of " + claim.getOffset() + " but Resource Claim has fewer than this many bytes (actual length of the resource claim could not be determined)"); } throw new ContentNotFoundException(claim, "Content Claim has an offset of " + claim.getOffset() + " but Resource Claim " + path + " is only " + resourceClaimBytes + " bytes"); } catch (final IOException ioe) { IOUtils.closeQuietly(fis); throw ioe; } } // A claim length of -1 indicates that the claim is still being written to and we don't know // the length. In this case, we don't limit the Input Stream. If the Length has been populated, though, // it is possible that the Length could then be extended. However, we do want to avoid ever allowing the // stream to read past the end of the Content Claim. To accomplish this, we use a LimitedInputStream but // provide a LongSupplier for the length instead of a Long value. this allows us to continue reading until // we get to the end of the Claim, even if the Claim grows. This may happen, for instance, if we obtain an // InputStream for this claim, then read from it, write more to the claim, and then attempt to read again. In // such a case, since we have written to that same Claim, we should still be able to read those bytes. if (claim.getLength() >= 0) { return new LimitedInputStream(fis, claim::getLength); } else { return fis; } }