Java Code Examples for org.apache.nifi.processor.ProcessSession#penalize()

The following examples show how to use org.apache.nifi.processor.ProcessSession#penalize() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DeleteAzureDataLakeStorage.java    From nifi with Apache License 2.0 5 votes vote down vote up
@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final long startNanos = System.nanoTime();
    try {
        final String fileSystem = context.getProperty(FILESYSTEM).evaluateAttributeExpressions(flowFile).getValue();
        final String directory = context.getProperty(DIRECTORY).evaluateAttributeExpressions(flowFile).getValue();
        final String fileName = context.getProperty(FILE).evaluateAttributeExpressions(flowFile).getValue();

        if (StringUtils.isBlank(fileSystem)) {
            throw new ProcessException(FILESYSTEM.getDisplayName() + " property evaluated to empty string. " +
                    FILESYSTEM.getDisplayName() + " must be specified as a non-empty string.");
        }
        if (StringUtils.isBlank(fileName)) {
            throw new ProcessException(FILE.getDisplayName() + " property evaluated to empty string. " +
                    FILE.getDisplayName() + " must be specified as a non-empty string.");
        }

        final DataLakeServiceClient storageClient = getStorageClient(context, flowFile);
        final DataLakeFileSystemClient fileSystemClient = storageClient.getFileSystemClient(fileSystem);
        final DataLakeDirectoryClient directoryClient = fileSystemClient.getDirectoryClient(directory);
        final DataLakeFileClient fileClient = directoryClient.getFileClient(fileName);

        fileClient.delete();
        session.transfer(flowFile, REL_SUCCESS);

        final long transferMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
        session.getProvenanceReporter().invokeRemoteProcess(flowFile, fileClient.getFileUrl(), "File deleted");
    } catch (Exception e) {
        getLogger().error("Failed to delete the specified file from Azure Data Lake Storage", e);
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
    }
}
 
Example 2
Source File: AbstractCouchbaseProcessor.java    From nifi with Apache License 2.0 5 votes vote down vote up
/**
 * Handles the thrown CouchbaseException accordingly.
 *
 * @param context a process context
 * @param session a process session
 * @param logger a logger
 * @param inFile an input FlowFile
 * @param e the thrown CouchbaseException
 * @param errMsg a message to be logged
 */
protected void handleCouchbaseException(final ProcessContext context, final ProcessSession session,
    final ComponentLog logger, FlowFile inFile, CouchbaseException e,
    String errMsg) {
    logger.error(errMsg, e);
    if (inFile != null) {
        ErrorHandlingStrategy strategy = CouchbaseExceptionMappings.getStrategy(e);
        switch (strategy.penalty()) {
            case Penalize:
                if (logger.isDebugEnabled()) {
                    logger.debug("Penalized: {}", new Object[] {inFile});
                }
                inFile = session.penalize(inFile);
                break;
            case Yield:
                if (logger.isDebugEnabled()) {
                    logger.debug("Yielded context: {}", new Object[] {inFile});
                }
                context.yield();
                break;
            case None:
                break;
        }

        switch (strategy.result()) {
            case ProcessException:
                throw new ProcessException(errMsg, e);
            case Failure:
                inFile = session.putAttribute(inFile, CouchbaseAttributes.Exception.key(), e.getClass().getName());
                session.transfer(inFile, REL_FAILURE);
                break;
            case Retry:
                inFile = session.putAttribute(inFile, CouchbaseAttributes.Exception.key(), e.getClass().getName());
                session.transfer(inFile, REL_RETRY);
                break;
        }
    }
}
 
Example 3
Source File: DeleteGCSObject.java    From localization_nifi with Apache License 2.0 5 votes vote down vote up
@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final long startNanos = System.nanoTime();

    final String bucket = context.getProperty(BUCKET)
                               .evaluateAttributeExpressions(flowFile)
                               .getValue();
    final String key = context.getProperty(KEY)
                               .evaluateAttributeExpressions(flowFile)
                               .getValue();

    final Long generation = context.getProperty(GENERATION)
            .evaluateAttributeExpressions(flowFile)
            .asLong();


    final Storage storage = getCloudService();

    // Deletes a key on Google Cloud
    try {
        storage.delete(BlobId.of(bucket, key, generation));
    } catch (Exception e) {
        getLogger().error(e.getMessage(), e);
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    session.transfer(flowFile, REL_SUCCESS);
    final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
    getLogger().info("Successfully deleted GCS Object for {} in {} millis; routing to success", new Object[]{flowFile, millis});
}
 
Example 4
Source File: DeleteGCSObject.java    From nifi with Apache License 2.0 5 votes vote down vote up
@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final long startNanos = System.nanoTime();

    final String bucket = context.getProperty(BUCKET)
                               .evaluateAttributeExpressions(flowFile)
                               .getValue();
    final String key = context.getProperty(KEY)
                               .evaluateAttributeExpressions(flowFile)
                               .getValue();

    final Long generation = context.getProperty(GENERATION)
            .evaluateAttributeExpressions(flowFile)
            .asLong();


    final Storage storage = getCloudService();

    // Deletes a key on Google Cloud
    try {
        storage.delete(BlobId.of(bucket, key, generation));
    } catch (Exception e) {
        getLogger().error(e.getMessage(), e);
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    session.transfer(flowFile, REL_SUCCESS);
    final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
    getLogger().info("Successfully deleted GCS Object for {} in {} millis; routing to success", new Object[]{flowFile, millis});
}
 
Example 5
Source File: AbstractCouchbaseProcessor.java    From localization_nifi with Apache License 2.0 5 votes vote down vote up
/**
 * Handles the thrown CouchbaseException accordingly.
 *
 * @param context a process context
 * @param session a process session
 * @param logger a logger
 * @param inFile an input FlowFile
 * @param e the thrown CouchbaseException
 * @param errMsg a message to be logged
 */
protected void handleCouchbaseException(final ProcessContext context, final ProcessSession session,
    final ComponentLog logger, FlowFile inFile, CouchbaseException e,
    String errMsg) {
    logger.error(errMsg, e);
    if (inFile != null) {
        ErrorHandlingStrategy strategy = CouchbaseExceptionMappings.getStrategy(e);
        switch (strategy.penalty()) {
            case Penalize:
                if (logger.isDebugEnabled()) {
                    logger.debug("Penalized: {}", new Object[] {inFile});
                }
                inFile = session.penalize(inFile);
                break;
            case Yield:
                if (logger.isDebugEnabled()) {
                    logger.debug("Yielded context: {}", new Object[] {inFile});
                }
                context.yield();
                break;
            case None:
                break;
        }

        switch (strategy.result()) {
            case ProcessException:
                throw new ProcessException(errMsg, e);
            case Failure:
                inFile = session.putAttribute(inFile, CouchbaseAttributes.Exception.key(), e.getClass().getName());
                session.transfer(inFile, REL_FAILURE);
                break;
            case Retry:
                inFile = session.putAttribute(inFile, CouchbaseAttributes.Exception.key(), e.getClass().getName());
                session.transfer(inFile, REL_RETRY);
                break;
        }
    }
}
 
Example 6
Source File: ExceptionHandler.java    From nifi with Apache License 2.0 5 votes vote down vote up
private static FlowFile penalize(final ProcessContext context, final ProcessSession session,
                                 final FlowFile flowFile, final ErrorTypes.Penalty penalty) {
    switch (penalty) {
        case Penalize:
            return session.penalize(flowFile);
        case Yield:
            context.yield();
    }
    return flowFile;
}
 
Example 7
Source File: PutCloudWatchMetric.java    From localization_nifi with Apache License 2.0 5 votes vote down vote up
@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }
    MetricDatum datum = new MetricDatum();

    try {
        datum.setMetricName(context.getProperty(METRIC_NAME).evaluateAttributeExpressions(flowFile).getValue());
        datum.setValue(Double.parseDouble(context.getProperty(VALUE).evaluateAttributeExpressions(flowFile).getValue()));

        final String timestamp = context.getProperty(TIMESTAMP).evaluateAttributeExpressions(flowFile).getValue();
        if (timestamp != null) {
            datum.setTimestamp(new Date(Long.parseLong(timestamp)));
        }

        final String unit = context.getProperty(UNIT).evaluateAttributeExpressions(flowFile).getValue();
        if (unit != null) {
            datum.setUnit(unit);
        }

        final PutMetricDataRequest metricDataRequest = new PutMetricDataRequest()
                .withNamespace(context.getProperty(NAMESPACE).evaluateAttributeExpressions(flowFile).getValue())
                .withMetricData(datum);

        putMetricData(metricDataRequest);
        session.transfer(flowFile, REL_SUCCESS);
        getLogger().info("Successfully published cloudwatch metric for {}", new Object[]{flowFile});
    } catch (final Exception e) {
        getLogger().error("Failed to publish cloudwatch metric for {} due to {}", new Object[]{flowFile, e});
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
    }

}
 
Example 8
Source File: FetchElasticsearch5.java    From localization_nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {

    synchronized (esClient) {
        if(esClient.get() == null) {
            super.setup(context);
        }
    }

    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final String index = context.getProperty(INDEX).evaluateAttributeExpressions(flowFile).getValue();
    final String docId = context.getProperty(DOC_ID).evaluateAttributeExpressions(flowFile).getValue();
    final String docType = context.getProperty(TYPE).evaluateAttributeExpressions(flowFile).getValue();
    final Charset charset = Charset.forName(context.getProperty(CHARSET).evaluateAttributeExpressions(flowFile).getValue());

    final ComponentLog logger = getLogger();
    try {

        logger.debug("Fetching {}/{}/{} from Elasticsearch", new Object[]{index, docType, docId});
        GetRequestBuilder getRequestBuilder = esClient.get().prepareGet(index, docType, docId);
        final GetResponse getResponse = getRequestBuilder.execute().actionGet();

        if (getResponse == null || !getResponse.isExists()) {
            logger.warn("Failed to read {}/{}/{} from Elasticsearch: Document not found",
                    new Object[]{index, docType, docId});

            // We couldn't find the document, so penalize it and send it to "not found"
            flowFile = session.penalize(flowFile);
            session.transfer(flowFile, REL_NOT_FOUND);
        } else {
            flowFile = session.putAllAttributes(flowFile, new HashMap<String, String>() {{
                put("filename", docId);
                put("es.index", index);
                put("es.type", docType);
            }});
            flowFile = session.write(flowFile, new OutputStreamCallback() {
                @Override
                public void process(OutputStream out) throws IOException {
                    out.write(getResponse.getSourceAsString().getBytes(charset));
                }
            });
            logger.debug("Elasticsearch document " + docId + " fetched, routing to success");
            // The document is JSON, so update the MIME type of the flow file
            flowFile = session.putAttribute(flowFile, CoreAttributes.MIME_TYPE.key(), "application/json");
            session.getProvenanceReporter().fetch(flowFile, getResponse.remoteAddress().getAddress());
            session.transfer(flowFile, REL_SUCCESS);
        }
    } catch (NoNodeAvailableException
            | ElasticsearchTimeoutException
            | ReceiveTimeoutTransportException
            | NodeClosedException exceptionToRetry) {
        logger.error("Failed to read into Elasticsearch due to {}, this may indicate an error in configuration "
                        + "(hosts, username/password, etc.), or this issue may be transient. Routing to retry",
                new Object[]{exceptionToRetry.getLocalizedMessage()}, exceptionToRetry);
        session.transfer(flowFile, REL_RETRY);
        context.yield();

    } catch (Exception e) {
        logger.error("Failed to read {} from Elasticsearch due to {}", new Object[]{flowFile, e.getLocalizedMessage()}, e);
        session.transfer(flowFile, REL_FAILURE);
        context.yield();
    }
}
 
Example 9
Source File: PutSQS.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final long startNanos = System.nanoTime();
    final AmazonSQSClient client = getClient();
    final SendMessageBatchRequest request = new SendMessageBatchRequest();
    final String queueUrl = context.getProperty(QUEUE_URL).evaluateAttributeExpressions(flowFile).getValue();
    request.setQueueUrl(queueUrl);

    final Set<SendMessageBatchRequestEntry> entries = new HashSet<>();

    final SendMessageBatchRequestEntry entry = new SendMessageBatchRequestEntry();
    entry.setId(flowFile.getAttribute("uuid"));
    final ByteArrayOutputStream baos = new ByteArrayOutputStream();
    session.exportTo(flowFile, baos);
    final String flowFileContent = baos.toString();
    entry.setMessageBody(flowFileContent);

    final Map<String, MessageAttributeValue> messageAttributes = new HashMap<>();

    for (final PropertyDescriptor descriptor : userDefinedProperties) {
        final MessageAttributeValue mav = new MessageAttributeValue();
        mav.setDataType("String");
        mav.setStringValue(context.getProperty(descriptor).evaluateAttributeExpressions(flowFile).getValue());
        messageAttributes.put(descriptor.getName(), mav);
    }

    entry.setMessageAttributes(messageAttributes);
    entry.setDelaySeconds(context.getProperty(DELAY).asTimePeriod(TimeUnit.SECONDS).intValue());
    entries.add(entry);

    request.setEntries(entries);

    try {
        SendMessageBatchResult response = client.sendMessageBatch(request);

        // check for errors
        if (!response.getFailed().isEmpty()) {
            throw new ProcessException(response.getFailed().get(0).toString());
        }
    } catch (final Exception e) {
        getLogger().error("Failed to send messages to Amazon SQS due to {}; routing to failure", new Object[]{e});
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    getLogger().info("Successfully published message to Amazon SQS for {}", new Object[]{flowFile});
    session.transfer(flowFile, REL_SUCCESS);
    final long transmissionMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
    session.getProvenanceReporter().send(flowFile, queueUrl, transmissionMillis);
}
 
Example 10
Source File: PutHBaseRecord.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    final int batchSize = context.getProperty(BATCH_SIZE).asInteger();
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final RecordReaderFactory recordParserFactory = context.getProperty(RECORD_READER_FACTORY)
            .asControllerService(RecordReaderFactory.class);
    List<PutFlowFile> flowFiles = new ArrayList<>();
    final String tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions(flowFile).getValue();
    final String rowFieldName = context.getProperty(ROW_FIELD_NAME).evaluateAttributeExpressions(flowFile).getValue();
    final String columnFamily = context.getProperty(COLUMN_FAMILY).evaluateAttributeExpressions(flowFile).getValue();
    final String timestampFieldName = context.getProperty(TIMESTAMP_FIELD_NAME).evaluateAttributeExpressions(flowFile).getValue();
    final String fieldEncodingStrategy = context.getProperty(FIELD_ENCODING_STRATEGY).getValue();
    final String complexFieldStrategy = context.getProperty(COMPLEX_FIELD_STRATEGY).getValue();
    final String rowEncodingStrategy = context.getProperty(ROW_ID_ENCODING_STRATEGY).getValue();
    final String recordPathText = context.getProperty(VISIBILITY_RECORD_PATH).getValue();

    RecordPath recordPath = null;
    if (recordPathCache != null && !StringUtils.isEmpty(recordPathText)) {
        recordPath = recordPathCache.getCompiled(recordPathText);
    }

    final long start = System.nanoTime();
    int index = 0;
    int columns = 0;
    boolean failed = false;
    String startIndexStr = flowFile.getAttribute("restart.index");
    int startIndex = -1;
    if (startIndexStr != null) {
        startIndex = Integer.parseInt(startIndexStr);
    }

    PutFlowFile last  = null;
    try (final InputStream in = session.read(flowFile);
         final RecordReader reader = recordParserFactory.createRecordReader(flowFile, in, getLogger())) {
        Record record;
        if (startIndex >= 0) {
            while ( index++ < startIndex && (reader.nextRecord()) != null) {}
        }

        while ((record = reader.nextRecord()) != null) {
            PutFlowFile putFlowFile = createPut(context, record, reader.getSchema(), recordPath, flowFile, rowFieldName, columnFamily,
                    timestampFieldName, fieldEncodingStrategy, rowEncodingStrategy, complexFieldStrategy);
            if (putFlowFile.getColumns().size() == 0) {
                continue;
            }
            flowFiles.add(putFlowFile);
            index++;

            if (flowFiles.size() == batchSize) {
                columns += addBatch(tableName, flowFiles);
                last = flowFiles.get(flowFiles.size() - 1);
                flowFiles = new ArrayList<>();
            }
        }
        if (flowFiles.size() > 0) {
            columns += addBatch(tableName, flowFiles);
            last = flowFiles.get(flowFiles.size() - 1);
        }
    } catch (Exception ex) {
        getLogger().error("Failed to put records to HBase.", ex);
        failed = true;
    }

    if (!failed) {
        if (columns > 0) {
            sendProvenance(session, flowFile, columns, System.nanoTime() - start, last);
        }
        flowFile = session.removeAttribute(flowFile, "restart.index");
        session.transfer(flowFile, REL_SUCCESS);
    } else {
        String restartIndex = Integer.toString(index - flowFiles.size());
        flowFile = session.putAttribute(flowFile, "restart.index", restartIndex);
        if (columns > 0) {
            sendProvenance(session, flowFile, columns, System.nanoTime() - start, last);
        }
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
    }



    session.commit();
}
 
Example 11
Source File: InvokeGRPC.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile fileToProcess = null;
    if (context.hasIncomingConnection()) {
        fileToProcess = session.get();

        // If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
        // However, if we have no FlowFile and we have connections coming from other Processors, then
        // we know that we should run only if we have a FlowFile.
        if (fileToProcess == null && context.hasNonLoopConnection()) {
            return;
        }
    }

    final ComponentLog logger = getLogger();
    final FlowFileServiceGrpc.FlowFileServiceBlockingStub blockingStub = blockingStubReference.get();
    final String host = context.getProperty(PROP_SERVICE_HOST).getValue();
    final String port = context.getProperty(PROP_SERVICE_PORT).getValue();
    fileToProcess = session.putAttribute(fileToProcess, SERVICE_HOST, host);
    fileToProcess = session.putAttribute(fileToProcess, SERVICE_PORT, port);
    FlowFile responseFlowFile = null;
    try {
        final FlowFileRequest.Builder requestBuilder = FlowFileRequest.newBuilder()
                .setId(fileToProcess.getId())
                .putAllAttributes(fileToProcess.getAttributes());

        // if the processor is configured to send the content, turn the content into bytes
        // and add it to the request.
        final boolean sendContent = context.getProperty(PROP_SEND_CONTENT).asBoolean();
        if (sendContent) {
            try (final InputStream contents = session.read(fileToProcess)) {
                requestBuilder.setContent(ByteString.readFrom(contents));
            }
            // emit provenance event
            session.getProvenanceReporter().send(fileToProcess, getRemote(host, port), true);
        }
        final FlowFileRequest flowFileRequest = requestBuilder.build();
        logRequest(logger, host, port, flowFileRequest);

        final FlowFileReply flowFileReply = blockingStub.send(flowFileRequest);
        logReply(logger, host, port, flowFileReply);

        final FlowFileReply.ResponseCode responseCode = flowFileReply.getResponseCode();
        final String body = flowFileReply.getBody();

        fileToProcess = session.putAttribute(fileToProcess, RESPONSE_CODE, String.valueOf(responseCode));
        fileToProcess = session.putAttribute(fileToProcess, RESPONSE_BODY, body);

        responseFlowFile = session.create(fileToProcess);
        route(fileToProcess, responseFlowFile, session, context, responseCode);

    } catch (final Exception e) {
        // penalize or yield
        if (fileToProcess != null) {
            logger.error("Routing to {} due to exception: {}", new Object[]{REL_FAILURE.getName(), e}, e);
            fileToProcess = session.penalize(fileToProcess);
            fileToProcess = session.putAttribute(fileToProcess, EXCEPTION_CLASS, e.getClass().getName());
            fileToProcess = session.putAttribute(fileToProcess, EXCEPTION_MESSAGE, e.getMessage());
            // transfer original to failure
            session.transfer(fileToProcess, REL_FAILURE);
        } else {
            logger.error("Yielding processor due to exception encountered as a source processor: {}", e);
            context.yield();
        }

        // cleanup
        try {
            if (responseFlowFile != null) {
                session.remove(responseFlowFile);
            }
        } catch (final Exception e1) {
            logger.error("Could not cleanup response flowfile due to exception: {}", new Object[]{e1}, e1);
        }
    }
}
 
Example 12
Source File: InvokeHTTP.java    From nifi with Apache License 2.0 4 votes vote down vote up
private void route(FlowFile request, FlowFile response, ProcessSession session, ProcessContext context, int statusCode){
    // check if we should yield the processor
    if (!isSuccess(statusCode) && request == null) {
        context.yield();
    }

    // If the property to output the response flowfile regardless of status code is set then transfer it
    boolean responseSent = false;
    if (context.getProperty(PROP_OUTPUT_RESPONSE_REGARDLESS).asBoolean()) {
        session.transfer(response, REL_RESPONSE);
        responseSent = true;
    }

    // transfer to the correct relationship
    // 2xx -> SUCCESS
    if (isSuccess(statusCode)) {
        // we have two flowfiles to transfer
        if (request != null) {
            session.transfer(request, REL_SUCCESS_REQ);
        }
        if (response != null && !responseSent) {
            session.transfer(response, REL_RESPONSE);
        }

        // 5xx -> RETRY
    } else if (statusCode / 100 == 5) {
        if (request != null) {
            request = session.penalize(request);
            session.transfer(request, REL_RETRY);
        }

        // 1xx, 3xx, 4xx -> NO RETRY
    } else {
        if (request != null) {
            if (context.getProperty(PROP_PENALIZE_NO_RETRY).asBoolean()) {
                request = session.penalize(request);
            }
            session.transfer(request, REL_NO_RETRY);
        }
    }

}
 
Example 13
Source File: PutLambda.java    From localization_nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {

    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final String functionName = context.getProperty(AWS_LAMBDA_FUNCTION_NAME).getValue();

    final String qualifier = context.getProperty(AWS_LAMBDA_FUNCTION_QUALIFIER).getValue();

    // Max size of message is 6 MB
    if ( flowFile.getSize() > MAX_REQUEST_SIZE) {
        getLogger().error("Max size for request body is 6mb but was {} for flow file {} for function {}",
            new Object[]{flowFile.getSize(), flowFile, functionName});
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    final AWSLambdaClient client = getClient();

    try {
        final ByteArrayOutputStream baos = new ByteArrayOutputStream();
        session.exportTo(flowFile, baos);

        InvokeRequest invokeRequest = new InvokeRequest()
            .withFunctionName(functionName)
            .withLogType(LogType.Tail).withInvocationType(InvocationType.RequestResponse)
            .withPayload(ByteBuffer.wrap(baos.toByteArray()))
            .withQualifier(qualifier);
        long startTime = System.nanoTime();

        InvokeResult result = client.invoke(invokeRequest);

        flowFile = session.putAttribute(flowFile, AWS_LAMBDA_RESULT_STATUS_CODE, result.getStatusCode().toString());

        if ( !StringUtils.isBlank(result.getLogResult() )) {
            flowFile = session.putAttribute(flowFile, AWS_LAMBDA_RESULT_LOG, new String(Base64.decode(result.getLogResult()),Charset.defaultCharset()));
        }

        if ( result.getPayload() != null ) {
            flowFile = session.putAttribute(flowFile, AWS_LAMBDA_RESULT_PAYLOAD, new String(result.getPayload().array(),Charset.defaultCharset()));
        }

        if ( ! StringUtils.isBlank(result.getFunctionError()) ){
            flowFile = session.putAttribute(flowFile, AWS_LAMBDA_RESULT_FUNCTION_ERROR, result.getFunctionError());
            session.transfer(flowFile, REL_FAILURE);
        } else {
            session.transfer(flowFile, REL_SUCCESS);
            final long totalTimeMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);
            session.getProvenanceReporter().send(flowFile, functionName, totalTimeMillis);
        }
    } catch (final InvalidRequestContentException
        | InvalidParameterValueException
        | RequestTooLargeException
        | ResourceNotFoundException
        | UnsupportedMediaTypeException unrecoverableException) {
            getLogger().error("Failed to invoke lambda {} with unrecoverable exception {} for flow file {}",
                new Object[]{functionName, unrecoverableException, flowFile});
            flowFile = populateExceptionAttributes(session, flowFile, unrecoverableException);
            session.transfer(flowFile, REL_FAILURE);
    } catch (final TooManyRequestsException retryableServiceException) {
        getLogger().error("Failed to invoke lambda {} with exception {} for flow file {}, therefore penalizing flowfile",
            new Object[]{functionName, retryableServiceException, flowFile});
        flowFile = populateExceptionAttributes(session, flowFile, retryableServiceException);
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        context.yield();
    } catch (final AmazonServiceException unrecoverableServiceException) {
        getLogger().error("Failed to invoke lambda {} with exception {} for flow file {} sending to fail",
            new Object[]{functionName, unrecoverableServiceException, flowFile});
        flowFile = populateExceptionAttributes(session, flowFile, unrecoverableServiceException);
        session.transfer(flowFile, REL_FAILURE);
        context.yield();
    } catch (final Exception exception) {
        getLogger().error("Failed to invoke lambda {} with exception {} for flow file {}",
            new Object[]{functionName, exception, flowFile});
        session.transfer(flowFile, REL_FAILURE);
        context.yield();
    }
}
 
Example 14
Source File: PutLambda.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {

    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final String functionName = context.getProperty(AWS_LAMBDA_FUNCTION_NAME).getValue();

    final String qualifier = context.getProperty(AWS_LAMBDA_FUNCTION_QUALIFIER).getValue();

    // Max size of message is 6 MB
    if ( flowFile.getSize() > MAX_REQUEST_SIZE) {
        getLogger().error("Max size for request body is 6mb but was {} for flow file {} for function {}",
            new Object[]{flowFile.getSize(), flowFile, functionName});
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    final AWSLambdaClient client = getClient();

    try {
        final ByteArrayOutputStream baos = new ByteArrayOutputStream();
        session.exportTo(flowFile, baos);

        InvokeRequest invokeRequest = new InvokeRequest()
            .withFunctionName(functionName)
            .withLogType(LogType.Tail).withInvocationType(InvocationType.RequestResponse)
            .withPayload(ByteBuffer.wrap(baos.toByteArray()))
            .withQualifier(qualifier);
        long startTime = System.nanoTime();

        InvokeResult result = client.invoke(invokeRequest);

        flowFile = session.putAttribute(flowFile, AWS_LAMBDA_RESULT_STATUS_CODE, result.getStatusCode().toString());

        if ( !StringUtils.isBlank(result.getLogResult() )) {
            flowFile = session.putAttribute(flowFile, AWS_LAMBDA_RESULT_LOG, new String(Base64.decode(result.getLogResult()),Charset.defaultCharset()));
        }

        if ( result.getPayload() != null ) {
            flowFile = session.putAttribute(flowFile, AWS_LAMBDA_RESULT_PAYLOAD, new String(result.getPayload().array(),Charset.defaultCharset()));
        }

        if ( ! StringUtils.isBlank(result.getFunctionError()) ){
            flowFile = session.putAttribute(flowFile, AWS_LAMBDA_RESULT_FUNCTION_ERROR, result.getFunctionError());
            session.transfer(flowFile, REL_FAILURE);
        } else {
            session.transfer(flowFile, REL_SUCCESS);
            final long totalTimeMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);
            session.getProvenanceReporter().send(flowFile, functionName, totalTimeMillis);
        }
    } catch (final InvalidRequestContentException
        | InvalidParameterValueException
        | RequestTooLargeException
        | ResourceNotFoundException
        | UnsupportedMediaTypeException unrecoverableException) {
            getLogger().error("Failed to invoke lambda {} with unrecoverable exception {} for flow file {}",
                new Object[]{functionName, unrecoverableException, flowFile});
            flowFile = populateExceptionAttributes(session, flowFile, unrecoverableException);
            session.transfer(flowFile, REL_FAILURE);
    } catch (final TooManyRequestsException retryableServiceException) {
        getLogger().error("Failed to invoke lambda {} with exception {} for flow file {}, therefore penalizing flowfile",
            new Object[]{functionName, retryableServiceException, flowFile});
        flowFile = populateExceptionAttributes(session, flowFile, retryableServiceException);
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        context.yield();
    } catch (final AmazonServiceException unrecoverableServiceException) {
        getLogger().error("Failed to invoke lambda {} with exception {} for flow file {} sending to fail",
            new Object[]{functionName, unrecoverableServiceException, flowFile});
        flowFile = populateExceptionAttributes(session, flowFile, unrecoverableServiceException);
        session.transfer(flowFile, REL_FAILURE);
        context.yield();
    } catch (final Exception exception) {
        getLogger().error("Failed to invoke lambda {} with exception {} for flow file {}",
            new Object[]{functionName, exception, flowFile});
        session.transfer(flowFile, REL_FAILURE);
        context.yield();
    }
}
 
Example 15
Source File: AbstractAWSGatewayApiProcessor.java    From nifi with Apache License 2.0 4 votes vote down vote up
protected void route(FlowFile request, FlowFile response, ProcessSession session,
                     ProcessContext context, int statusCode, Set<Relationship> relationships) {
    // check if we should yield the processor
    if (!isSuccess(statusCode) && request == null) {
        context.yield();
    }

    // If the property to output the response flowfile regardless of status code is set then transfer it
    boolean responseSent = false;
    if (context.getProperty(PROP_OUTPUT_RESPONSE_REGARDLESS).asBoolean()) {
        session.transfer(response, getRelationshipForName(REL_RESPONSE_NAME, relationships));
        responseSent = true;
    }

    // transfer to the correct relationship
    // 2xx -> SUCCESS
    if (isSuccess(statusCode)) {
        // we have two flowfiles to transfer
        if (request != null) {
            session
                .transfer(request, getRelationshipForName(REL_SUCCESS_REQ_NAME, relationships));
        }
        if (response != null && !responseSent) {
            session
                .transfer(response, getRelationshipForName(REL_RESPONSE_NAME, relationships));
        }

        // 5xx -> RETRY
    } else if (statusCode / 100 == 5) {
        if (request != null) {
            request = session.penalize(request);
            session.transfer(request, getRelationshipForName(REL_RETRY_NAME, relationships));
        }

        // 1xx, 3xx, 4xx -> NO RETRY
    } else {
        if (request != null) {
            if (context.getProperty(PROP_PENALIZE_NO_RETRY).asBoolean()) {
                request = session.penalize(request);
            }
            session.transfer(request, getRelationshipForName(REL_NO_RETRY_NAME, relationships));
        }
    }

}
 
Example 16
Source File: FetchDistributedMapCache.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final ComponentLog logger = getLogger();
    final String cacheKey = context.getProperty(PROP_CACHE_ENTRY_IDENTIFIER).evaluateAttributeExpressions(flowFile).getValue();
    // This block retains the previous behavior when only one Cache Entry Identifier was allowed, so as not to change the expected error message
    if (StringUtils.isBlank(cacheKey)) {
        logger.error("FlowFile {} has no attribute for given Cache Entry Identifier", new Object[]{flowFile});
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        return;
    }
    List<String> cacheKeys = Arrays.stream(cacheKey.split(",")).filter(path -> !StringUtils.isEmpty(path)).map(String::trim).collect(Collectors.toList());
    for (int i = 0; i < cacheKeys.size(); i++) {
        if (StringUtils.isBlank(cacheKeys.get(i))) {
            // Log first missing identifier, route to failure, and return
            logger.error("FlowFile {} has no attribute for Cache Entry Identifier in position {}", new Object[]{flowFile, i});
            flowFile = session.penalize(flowFile);
            session.transfer(flowFile, REL_FAILURE);
            return;
        }
    }

    final DistributedMapCacheClient cache = context.getProperty(PROP_DISTRIBUTED_CACHE_SERVICE).asControllerService(DistributedMapCacheClient.class);

    try {
        final Map<String, byte[]> cacheValues;
        final boolean singleKey = cacheKeys.size() == 1;
        if (singleKey) {
            cacheValues = new HashMap<>(1);
            cacheValues.put(cacheKeys.get(0), cache.get(cacheKey, keySerializer, valueDeserializer));
        } else {
            cacheValues = cache.subMap(new HashSet<>(cacheKeys), keySerializer, valueDeserializer);
        }
        boolean notFound = false;
        for(Map.Entry<String,byte[]> cacheValueEntry : cacheValues.entrySet()) {
            final byte[] cacheValue = cacheValueEntry.getValue();

            if (cacheValue == null) {
                logger.info("Could not find an entry in cache for {}; routing to not-found", new Object[]{flowFile});
                notFound = true;
                break;
            } else {
                boolean putInAttribute = context.getProperty(PROP_PUT_CACHE_VALUE_IN_ATTRIBUTE).isSet();
                if (putInAttribute) {
                    String attributeName = context.getProperty(PROP_PUT_CACHE_VALUE_IN_ATTRIBUTE).evaluateAttributeExpressions(flowFile).getValue();
                    if (!singleKey) {
                        // Append key to attribute name if multiple keys
                        attributeName += "." + cacheValueEntry.getKey();
                    }
                    String attributeValue = new String(cacheValue, context.getProperty(PROP_CHARACTER_SET).getValue());

                    int maxLength = context.getProperty(PROP_PUT_ATTRIBUTE_MAX_LENGTH).asInteger();
                    if (maxLength < attributeValue.length()) {
                        attributeValue = attributeValue.substring(0, maxLength);
                    }

                    flowFile = session.putAttribute(flowFile, attributeName, attributeValue);

                } else if (cacheKeys.size() > 1) {
                    throw new IOException("Multiple Cache Value Identifiers specified without Put Cache Value In Attribute set");
                } else {
                    // Write single value to content
                    flowFile = session.write(flowFile, out -> out.write(cacheValue));
                }

                if (putInAttribute) {
                    logger.info("Found a cache key of {} and added an attribute to {} with it's value.", new Object[]{cacheKey, flowFile});
                } else {
                    logger.info("Found a cache key of {} and replaced the contents of {} with it's value.", new Object[]{cacheKey, flowFile});
                }
            }
        }
        // If the loop was exited because a cache entry was not found, route to REL_NOT_FOUND; otherwise route to REL_SUCCESS
        if (notFound) {
            session.transfer(flowFile, REL_NOT_FOUND);
        } else {
            session.transfer(flowFile, REL_SUCCESS);
        }
    } catch (final IOException e) {
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        logger.error("Unable to communicate with cache when processing {} due to {}", new Object[]{flowFile, e});
    }
}
 
Example 17
Source File: FetchGCSObject.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final long startNanos = System.nanoTime();

    final String bucketName = context.getProperty(BUCKET).evaluateAttributeExpressions(flowFile).getValue();
    final String key = context.getProperty(KEY).evaluateAttributeExpressions(flowFile).getValue();
    final Long generation = context.getProperty(GENERATION).evaluateAttributeExpressions(flowFile).asLong();
    final String encryptionKey = context.getProperty(ENCRYPTION_KEY).evaluateAttributeExpressions(flowFile).getValue();

    final Storage storage = getCloudService();
    final BlobId blobId = BlobId.of(bucketName, key, generation);

    try {
        final List<Storage.BlobSourceOption> blobSourceOptions = new ArrayList<>(2);

        if (encryptionKey != null) {
            blobSourceOptions.add(Storage.BlobSourceOption.decryptionKey(encryptionKey));
        }

        if (generation != null) {
            blobSourceOptions.add(Storage.BlobSourceOption.generationMatch());
        }

        final Blob blob = storage.get(blobId);
        if (blob == null) {
            throw new StorageException(404, "Blob " + blobId + " not found");
        }

        final ReadChannel reader = storage.reader(blobId, blobSourceOptions.toArray(new Storage.BlobSourceOption[0]));
        flowFile = session.importFrom(Channels.newInputStream(reader), flowFile);

        final Map<String, String> attributes = StorageAttributes.createAttributes(blob);
        flowFile = session.putAllAttributes(flowFile, attributes);
    } catch (StorageException e) {
        getLogger().error("Failed to fetch GCS Object due to {}", new Object[] {e}, e);
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    session.transfer(flowFile, REL_SUCCESS);

    final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
    getLogger().info("Successfully retrieved GCS Object for {} in {} millis; routing to success", new Object[]{flowFile, millis});
    session.getProvenanceReporter().fetch(flowFile, "https://" + bucketName + ".storage.googleapis.com/" + key, millis);
}
 
Example 18
Source File: FetchDistributedMapCache.java    From localization_nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final ComponentLog logger = getLogger();
    final String cacheKey = context.getProperty(PROP_CACHE_ENTRY_IDENTIFIER).evaluateAttributeExpressions(flowFile).getValue();
    if (StringUtils.isBlank(cacheKey)) {
        logger.error("FlowFile {} has no attribute for given Cache Entry Identifier", new Object[]{flowFile});
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        return;
    }
    final DistributedMapCacheClient cache = context.getProperty(PROP_DISTRIBUTED_CACHE_SERVICE).asControllerService(DistributedMapCacheClient.class);

    try {
        final byte[] cacheValue = cache.get(cacheKey, keySerializer, valueDeserializer);

        if(cacheValue==null){
            session.transfer(flowFile, REL_NOT_FOUND);
            logger.info("Could not find an entry in cache for {}; routing to not-found", new Object[]{flowFile});

        } else {
            boolean putInAttribute = context.getProperty(PROP_PUT_CACHE_VALUE_IN_ATTRIBUTE).isSet();
            if(putInAttribute){
                String attributeName = context.getProperty(PROP_PUT_CACHE_VALUE_IN_ATTRIBUTE).evaluateAttributeExpressions(flowFile).getValue();
                String attributeValue = new String(cacheValue,context.getProperty(PROP_CHARACTER_SET).getValue());

                int maxLength = context.getProperty(PROP_PUT_ATTRIBUTE_MAX_LENGTH).asInteger();
                if(maxLength < attributeValue.length()){
                    attributeValue = attributeValue.substring(0,maxLength);
                }

                flowFile = session.putAttribute(flowFile, attributeName, attributeValue);

            } else {
                flowFile = session.write(flowFile, new OutputStreamCallback() {
                    @Override
                    public void process(OutputStream out) throws IOException {
                        out.write(cacheValue);
                    }
                });
            }

            session.transfer(flowFile, REL_SUCCESS);
            if(putInAttribute){
                logger.info("Found a cache key of {} and added an attribute to {} with it's value.", new Object[]{cacheKey, flowFile});
            }else {
                logger.info("Found a cache key of {} and replaced the contents of {} with it's value.", new Object[]{cacheKey, flowFile});
            }
        }

    } catch (final IOException e) {
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        logger.error("Unable to communicate with cache when processing {} due to {}", new Object[]{flowFile, e});
    }
}
 
Example 19
Source File: PutDistributedMapCache.java    From localization_nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {

    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final ComponentLog logger = getLogger();

    // cache key is computed from attribute 'CACHE_ENTRY_IDENTIFIER' with expression language support
    final String cacheKey = context.getProperty(CACHE_ENTRY_IDENTIFIER).evaluateAttributeExpressions(flowFile).getValue();

    // if the computed value is null, or empty, we transfer the flow file to failure relationship
    if (StringUtils.isBlank(cacheKey)) {
        logger.error("FlowFile {} has no attribute for given Cache Entry Identifier", new Object[] {flowFile});
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    // the cache client used to interact with the distributed cache
    final DistributedMapCacheClient cache = context.getProperty(DISTRIBUTED_CACHE_SERVICE).asControllerService(DistributedMapCacheClient.class);

    try {

        final long maxCacheEntrySize = context.getProperty(CACHE_ENTRY_MAX_BYTES).asDataSize(DataUnit.B).longValue();
        long flowFileSize = flowFile.getSize();

        // too big flow file
        if (flowFileSize > maxCacheEntrySize) {
            logger.warn("Flow file {} size {} exceeds the max cache entry size ({} B).", new Object[] {flowFile, flowFileSize, maxCacheEntrySize});
            session.transfer(flowFile, REL_FAILURE);
            return;
        }

        if (flowFileSize == 0) {
            logger.warn("Flow file {} is empty, there is nothing to cache.", new Object[] {flowFile});
            session.transfer(flowFile, REL_FAILURE);
            return;

        }

        // get flow file content
        final ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
        session.exportTo(flowFile, byteStream);
        byte[] cacheValue = byteStream.toByteArray();
        final String updateStrategy = context.getProperty(CACHE_UPDATE_STRATEGY).getValue();
        boolean cached = false;

        if (updateStrategy.equals(CACHE_UPDATE_REPLACE.getValue())) {
            cache.put(cacheKey, cacheValue, keySerializer, valueSerializer);
            cached = true;
        } else if (updateStrategy.equals(CACHE_UPDATE_KEEP_ORIGINAL.getValue())) {
            final byte[] oldValue = cache.getAndPutIfAbsent(cacheKey, cacheValue, keySerializer, valueSerializer, valueDeserializer);
            if (oldValue == null) {
                cached = true;
            }
        }

        // set 'cached' attribute
        flowFile = session.putAttribute(flowFile, CACHED_ATTRIBUTE_NAME, String.valueOf(cached));

        if (cached) {
            session.transfer(flowFile, REL_SUCCESS);
        } else {
            session.transfer(flowFile, REL_FAILURE);
        }

    } catch (final IOException e) {
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        logger.error("Unable to communicate with cache when processing {} due to {}", new Object[] {flowFile, e});
    }
}
 
Example 20
Source File: ExecuteSparkInteractive.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(ProcessContext context, final ProcessSession session) throws ProcessException {

    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final ComponentLog log = getLogger();
    final LivySessionService livySessionService = context.getProperty(LIVY_CONTROLLER_SERVICE).asControllerService(LivySessionService.class);
    final Map<String, String> livyController;
    try {
        livyController = livySessionService.getSession();
        if (livyController == null || livyController.isEmpty()) {
            log.debug("No Spark session available (yet), routing flowfile to wait");
            session.transfer(flowFile, REL_WAIT);
            context.yield();
            return;
        }
    } catch (SessionManagerException sme) {
        log.error("Error opening spark session, routing flowfile to wait", sme);
        session.transfer(flowFile, REL_WAIT);
        context.yield();
        return;
    }
    final long statusCheckInterval = context.getProperty(STATUS_CHECK_INTERVAL).evaluateAttributeExpressions(flowFile).asTimePeriod(TimeUnit.MILLISECONDS);
    Charset charset = Charset.forName(context.getProperty(CHARSET).evaluateAttributeExpressions(flowFile).getValue());

    String sessionId = livyController.get("sessionId");
    String livyUrl = livyController.get("livyUrl");
    String code = context.getProperty(CODE).evaluateAttributeExpressions(flowFile).getValue();
    if (StringUtils.isEmpty(code)) {
        try (InputStream inputStream = session.read(flowFile)) {
            // If no code was provided, assume it is in the content of the incoming flow file
            code = IOUtils.toString(inputStream, charset);
        } catch (IOException ioe) {
            log.error("Error reading input flowfile, penalizing and routing to failure", new Object[]{flowFile, ioe.getMessage()}, ioe);
            flowFile = session.penalize(flowFile);
            session.transfer(flowFile, REL_FAILURE);
            return;
        }
    }

    code = StringEscapeUtils.escapeJson(code);
    String payload = "{\"code\":\"" + code + "\"}";
    try {
        final JSONObject result = submitAndHandleJob(livyUrl, livySessionService, sessionId, payload, statusCheckInterval);
        log.debug("ExecuteSparkInteractive Result of Job Submit: " + result);
        if (result == null) {
            session.transfer(flowFile, REL_FAILURE);
        } else {
            try {
                final JSONObject output = result.getJSONObject("data");
                flowFile = session.write(flowFile, out -> out.write(output.toString().getBytes(charset)));
                flowFile = session.putAttribute(flowFile, CoreAttributes.MIME_TYPE.key(), LivySessionService.APPLICATION_JSON);
                session.transfer(flowFile, REL_SUCCESS);
            } catch (JSONException je) {
                // The result doesn't contain the data, just send the output object as the flow file content to failure (after penalizing)
                log.error("Spark Session returned an error, sending the output JSON object as the flow file content to failure (after penalizing)");
                flowFile = session.write(flowFile, out -> out.write(result.toString().getBytes(charset)));
                flowFile = session.putAttribute(flowFile, CoreAttributes.MIME_TYPE.key(), LivySessionService.APPLICATION_JSON);
                flowFile = session.penalize(flowFile);
                session.transfer(flowFile, REL_FAILURE);
            }
        }
    } catch (IOException | SessionManagerException e) {
        log.error("Failure processing flowfile {} due to {}, penalizing and routing to failure", new Object[]{flowFile, e.getMessage()}, e);
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
    }
}