Java Code Examples for org.apache.nifi.processor.ProcessSession#exportTo()

The following examples show how to use org.apache.nifi.processor.ProcessSession#exportTo() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FetchGridFS.java    From nifi with Apache License 2.0 6 votes vote down vote up
private String getQuery(ProcessSession session, ProcessContext context, FlowFile input) throws IOException {
    String queryString;
    if (context.getProperty(FILE_NAME).isSet()) {
        String fileName = context.getProperty(FILE_NAME).evaluateAttributeExpressions(input).getValue();
        queryString = String.format("{ \"filename\": \"%s\"}", fileName);
    } else if (context.getProperty(QUERY).isSet()) {
        queryString = context.getProperty(QUERY).evaluateAttributeExpressions(input).getValue();
    } else {
        ByteArrayOutputStream out = new ByteArrayOutputStream();
        session.exportTo(input, out);
        out.close();
        queryString = new String(out.toByteArray());
    }

    return queryString;
}
 
Example 2
Source File: QueryHelper.java    From nifi with Apache License 2.0 6 votes vote down vote up
default String readQuery(ProcessContext context, ProcessSession session, PropertyDescriptor queryProp, FlowFile input) throws IOException {
    String queryStr;

    if (context.getProperty(queryProp).isSet()) {
        queryStr = context.getProperty(queryProp).evaluateAttributeExpressions(input).getValue();
    } else if (!context.getProperty(queryProp).isSet() && input == null) {
        queryStr = "{}";
    } else {
        ByteArrayOutputStream out = new ByteArrayOutputStream();
        session.exportTo(input, out);
        out.close();
        queryStr = new String(out.toByteArray());
    }

    return queryStr;
}
 
Example 3
Source File: AbstractMongoQueryProcessor.java    From nifi with Apache License 2.0 6 votes vote down vote up
protected Document getQuery(ProcessContext context, ProcessSession session, FlowFile input) {
    Document query = null;
    if (context.getProperty(QUERY).isSet()) {
        query = Document.parse(context.getProperty(QUERY).evaluateAttributeExpressions(input).getValue());
    } else if (!context.getProperty(QUERY).isSet() && input == null) {
        query = Document.parse("{}");
    } else {
        try {
            ByteArrayOutputStream out = new ByteArrayOutputStream();
            session.exportTo(input, out);
            out.close();
            query = Document.parse(new String(out.toByteArray()));
        } catch (Exception ex) {
            getLogger().error("Error reading FlowFile : ", ex);
            throw new ProcessException(ex);
        }
    }

    return query;
}
 
Example 4
Source File: AbstractAWSGatewayApiProcessor.java    From nifi with Apache License 2.0 5 votes vote down vote up
protected InputStream getRequestBodyToSend(final ProcessSession session,
                                           final ProcessContext context,
                                           final FlowFile requestFlowFile) {

    if (context.getProperty(PROP_SEND_BODY).asBoolean() && requestFlowFile != null) {
        ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
        session.exportTo(requestFlowFile, outputStream);
        return new ByteArrayInputStream(outputStream.toByteArray());

    } else {
        return new ByteArrayInputStream(new byte[0]);
    }
}
 
Example 5
Source File: ElasticsearchRestProcessor.java    From nifi with Apache License 2.0 5 votes vote down vote up
default String getQuery(FlowFile input, ProcessContext context, ProcessSession session) throws IOException {
    String retVal = null;
    if (context.getProperty(QUERY).isSet()) {
        retVal = context.getProperty(QUERY).evaluateAttributeExpressions(input).getValue();
    } else if (input != null) {
        ByteArrayOutputStream out = new ByteArrayOutputStream();
        session.exportTo(input, out);
        out.close();

        retVal = new String(out.toByteArray());
    }

    return retVal;
}
 
Example 6
Source File: ExecuteGraphQuery.java    From nifi with Apache License 2.0 5 votes vote down vote up
protected String getQuery(ProcessContext context, ProcessSession session, FlowFile input) {
    String query = context.getProperty(QUERY).evaluateAttributeExpressions(input).getValue();
    if (StringUtils.isEmpty(query) && input != null) {
        try {
            if (input.getSize() > (64 * 1024)) {
                throw new Exception("Input bigger than 64kb. Cannot assume this is a valid query for Gremlin Server " +
                        "or Neo4J.");
                /*
                 * Note: Gremlin Server compiles the query down to Java byte code, and queries 64kb and above often
                 * bounce because they violate Java method size limits. We might want to revist this, but as a starting
                 * point, it is a sane default to assume that if a flowfile is bigger than about 64kb it is either not a
                 * query or it is a query likely to be a very poor fit here.
                 */
            }

            ByteArrayOutputStream out = new ByteArrayOutputStream();
            session.exportTo(input, out);
            out.close();


            query = new String(out.toByteArray());
        } catch (Exception ex) {
            throw new ProcessException(ex);
        }
    }
    return query;
}
 
Example 7
Source File: FlattenJson.java    From nifi with Apache License 2.0 5 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final String mode = context.getProperty(FLATTEN_MODE).getValue();
    final FlattenMode flattenMode = getFlattenMode(mode);

    String separator = context.getProperty(SEPARATOR).evaluateAttributeExpressions(flowFile).getValue();


    try {
        ByteArrayOutputStream bos = new ByteArrayOutputStream();
        session.exportTo(flowFile, bos);
        bos.close();

        String raw = new String(bos.toByteArray());
        final String flattened = new JsonFlattener(raw)
                .withFlattenMode(flattenMode)
                .withSeparator(separator.charAt(0))
                .withStringEscapePolicy(() -> StringEscapeUtils.ESCAPE_JSON)
                .flatten();

        flowFile = session.write(flowFile, os -> os.write(flattened.getBytes()));

        session.transfer(flowFile, REL_SUCCESS);
    } catch (Exception ex) {
        session.transfer(flowFile, REL_FAILURE);
    }
}
 
Example 8
Source File: ExecuteInfluxDBQuery.java    From nifi with Apache License 2.0 5 votes vote down vote up
protected String getQuery(final ProcessSession session, Charset charset, FlowFile incomingFlowFile)
        throws IOException {
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    session.exportTo(incomingFlowFile, baos);
    baos.close();
    return new String(baos.toByteArray(), charset);
}
 
Example 9
Source File: PutAzureQueueStorage.java    From nifi with Apache License 2.0 5 votes vote down vote up
@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final long startNanos = System.nanoTime();

    final ByteArrayOutputStream baos = new ByteArrayOutputStream();
    session.exportTo(flowFile, baos);
    final String flowFileContent = baos.toString();

    CloudQueueMessage message = new CloudQueueMessage(flowFileContent);
    CloudQueueClient cloudQueueClient;
    CloudQueue cloudQueue;

    final int ttl = context.getProperty(TTL).asTimePeriod(TimeUnit.SECONDS).intValue();
    final int delay = context.getProperty(VISIBILITY_DELAY).asTimePeriod(TimeUnit.SECONDS).intValue();
    final String queue = context.getProperty(QUEUE).evaluateAttributeExpressions(flowFile).getValue().toLowerCase();

    try {
        cloudQueueClient = createCloudQueueClient(context, flowFile);
        cloudQueue = cloudQueueClient.getQueueReference(queue);

        final OperationContext operationContext = new OperationContext();
        AzureStorageUtils.setProxy(operationContext, context);

        cloudQueue.addMessage(message, ttl, delay, null, operationContext);
    } catch (URISyntaxException | StorageException e) {
        getLogger().error("Failed to write the message to Azure Queue Storage due to {}", new Object[]{e});
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    session.transfer(flowFile, REL_SUCCESS);
    final long transmissionMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
    session.getProvenanceReporter().send(flowFile, cloudQueue.getUri().toString(), transmissionMillis);
}
 
Example 10
Source File: PutDistributedMapCache.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {

    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final ComponentLog logger = getLogger();

    // cache key is computed from attribute 'CACHE_ENTRY_IDENTIFIER' with expression language support
    final String cacheKey = context.getProperty(CACHE_ENTRY_IDENTIFIER).evaluateAttributeExpressions(flowFile).getValue();

    // if the computed value is null, or empty, we transfer the flow file to failure relationship
    if (StringUtils.isBlank(cacheKey)) {
        logger.error("FlowFile {} has no attribute for given Cache Entry Identifier", new Object[] {flowFile});
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    // the cache client used to interact with the distributed cache
    final DistributedMapCacheClient cache = context.getProperty(DISTRIBUTED_CACHE_SERVICE).asControllerService(DistributedMapCacheClient.class);

    try {

        final long maxCacheEntrySize = context.getProperty(CACHE_ENTRY_MAX_BYTES).asDataSize(DataUnit.B).longValue();
        long flowFileSize = flowFile.getSize();

        // too big flow file
        if (flowFileSize > maxCacheEntrySize) {
            logger.warn("Flow file {} size {} exceeds the max cache entry size ({} B).", new Object[] {flowFile, flowFileSize, maxCacheEntrySize});
            session.transfer(flowFile, REL_FAILURE);
            return;
        }

        if (flowFileSize == 0) {
            logger.warn("Flow file {} is empty, there is nothing to cache.", new Object[] {flowFile});
            session.transfer(flowFile, REL_FAILURE);
            return;

        }

        // get flow file content
        final ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
        session.exportTo(flowFile, byteStream);
        byte[] cacheValue = byteStream.toByteArray();
        final String updateStrategy = context.getProperty(CACHE_UPDATE_STRATEGY).getValue();
        boolean cached = false;

        if (updateStrategy.equals(CACHE_UPDATE_REPLACE.getValue())) {
            cache.put(cacheKey, cacheValue, keySerializer, valueSerializer);
            cached = true;
        } else if (updateStrategy.equals(CACHE_UPDATE_KEEP_ORIGINAL.getValue())) {
            final byte[] oldValue = cache.getAndPutIfAbsent(cacheKey, cacheValue, keySerializer, valueSerializer, valueDeserializer);
            if (oldValue == null) {
                cached = true;
            }
        }

        // set 'cached' attribute
        flowFile = session.putAttribute(flowFile, CACHED_ATTRIBUTE_NAME, String.valueOf(cached));

        if (cached) {
            session.transfer(flowFile, REL_SUCCESS);
        } else {
            session.transfer(flowFile, REL_FAILURE);
        }

    } catch (final IOException e) {
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        logger.error("Unable to communicate with cache when processing {} due to {}", new Object[] {flowFile, e});
    }
}
 
Example 11
Source File: PutRethinkDB.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    if ( flowFile.getSize() == 0) {
        getLogger().error("Empty message");
        flowFile = session.putAttribute(flowFile, RETHINKDB_ERROR_MESSAGE, "Empty message size " + flowFile.getSize());
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    if ( flowFile.getSize() > maxDocumentsSize) {
        getLogger().error("Message size exceeded {} max allowed is {}", new Object[] { flowFile.getSize(), maxDocumentsSize});
        flowFile = session.putAttribute(flowFile, RETHINKDB_ERROR_MESSAGE, "Max message size exceeded " + flowFile.getSize());
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    Charset charset = Charset.forName(context.getProperty(CHARSET).evaluateAttributeExpressions(flowFile).getValue());
    String conflictStrategy = context.getProperty(CONFLICT_STRATEGY).evaluateAttributeExpressions(flowFile).getValue();
    String durability = context.getProperty(DURABILITY).evaluateAttributeExpressions(flowFile).getValue();

    try {
        long startTimeMillis = System.currentTimeMillis();
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        session.exportTo(flowFile, baos);
        String documents = new String(baos.toByteArray(), charset);
        JSONParser parser = new JSONParser();
        Object jsonDocuments = parser.parse(documents);

        Insert insert = getRdbTable().insert(jsonDocuments)
            .optArg(CONFLICT_OPTION_KEY, conflictStrategy)
            .optArg(DURABILITY_OPTION_KEY, durability);

        HashMap<String,Object> result = runInsert(insert);
        final long endTimeMillis = System.currentTimeMillis();
        getLogger().debug("Json documents {} inserted Result: {}", new Object[] {documents, result});
        flowFile = populateAttributes(session, flowFile, result);

        if ( (Long)result.get(RESULT_ERROR_KEY) != 0 ) {
            getLogger().error("There were errors while inserting data documents {} result {}",
               new Object [] {documents, result});
            session.transfer(flowFile, REL_FAILURE);
        } else {
            session.transfer(flowFile, REL_SUCCESS);
            session.getProvenanceReporter().send(flowFile,
                new StringBuilder("rethinkdb://").append(databaseName).append("/").append(tableName).toString(),
                (endTimeMillis - startTimeMillis));
        }
    } catch (Exception exception) {
        getLogger().error("Failed to insert into RethinkDB due to {}",
                new Object[]{exception.getLocalizedMessage()}, exception);
        flowFile = session.putAttribute(flowFile, RETHINKDB_ERROR_MESSAGE, String.valueOf(exception.getMessage()));
        session.transfer(flowFile, REL_FAILURE);
        context.yield();
    }
}
 
Example 12
Source File: PutSNS.java    From localization_nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    if (flowFile.getSize() > MAX_SIZE) {
        getLogger().error("Cannot publish {} to SNS because its size exceeds Amazon SNS's limit of 256KB; routing to failure", new Object[]{flowFile});
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    final Charset charset = Charset.forName(context.getProperty(CHARACTER_ENCODING).evaluateAttributeExpressions(flowFile).getValue());

    final ByteArrayOutputStream baos = new ByteArrayOutputStream();
    session.exportTo(flowFile, baos);
    final String message = new String(baos.toByteArray(), charset);

    final AmazonSNSClient client = getClient();
    final PublishRequest request = new PublishRequest();
    request.setMessage(message);

    if (context.getProperty(USE_JSON_STRUCTURE).asBoolean()) {
        request.setMessageStructure("json");
    }

    final String arn = context.getProperty(ARN).evaluateAttributeExpressions(flowFile).getValue();
    final String arnType = context.getProperty(ARN_TYPE).getValue();
    if (arnType.equalsIgnoreCase(ARN_TYPE_TOPIC.getValue())) {
        request.setTopicArn(arn);
    } else {
        request.setTargetArn(arn);
    }

    final String subject = context.getProperty(SUBJECT).evaluateAttributeExpressions(flowFile).getValue();
    if (subject != null) {
        request.setSubject(subject);
    }

    for (final Map.Entry<PropertyDescriptor, String> entry : context.getProperties().entrySet()) {
        if (entry.getKey().isDynamic() && !StringUtils.isEmpty(entry.getValue())) {
            final MessageAttributeValue value = new MessageAttributeValue();
            value.setStringValue(context.getProperty(entry.getKey()).evaluateAttributeExpressions(flowFile).getValue());
            value.setDataType("String");
            request.addMessageAttributesEntry(entry.getKey().getName(), value);
        }
    }

    try {
        client.publish(request);
        session.transfer(flowFile, REL_SUCCESS);
        session.getProvenanceReporter().send(flowFile, arn);
        getLogger().info("Successfully published notification for {}", new Object[]{flowFile});
    } catch (final Exception e) {
        getLogger().error("Failed to publish Amazon SNS message for {} due to {}", new Object[]{flowFile, e});
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
    }
}
 
Example 13
Source File: DeleteMongo.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();
    final WriteConcern writeConcern = getWriteConcern(context);
    final String deleteMode = context.getProperty(DELETE_MODE).getValue();
    final String deleteAttr = flowFile.getAttribute("mongodb.delete.mode");
    final Boolean failMode  = context.getProperty(FAIL_ON_NO_DELETE).asBoolean();

    if (deleteMode.equals(DELETE_ATTR.getValue())
            && (StringUtils.isEmpty(deleteAttr) || !ALLOWED_DELETE_VALUES.contains(deleteAttr.toLowerCase()) )) {
        getLogger().error(String.format("%s is not an allowed value for mongodb.delete.mode", deleteAttr));
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    try {
        final MongoCollection<Document> collection = getCollection(context, flowFile).withWriteConcern(writeConcern);
        ByteArrayOutputStream bos = new ByteArrayOutputStream();
        session.exportTo(flowFile, bos);
        bos.close();

        String json = new String(bos.toByteArray());
        Document query = Document.parse(json);
        DeleteResult result;

        if (deleteMode.equals(DELETE_ONE.getValue())
                || (deleteMode.equals(DELETE_ATTR.getValue()) && deleteAttr.toLowerCase().equals("one") )) {
            result = collection.deleteOne(query);
        } else {
            result = collection.deleteMany(query);
        }

        if (failMode && result.getDeletedCount() == 0) {
            session.transfer(flowFile, REL_FAILURE);
        } else {
            session.transfer(flowFile, REL_SUCCESS);
        }

    } catch (Exception ex) {
        getLogger().error("Could not send a delete to MongoDB, failing...", ex);
        session.transfer(flowFile, REL_FAILURE);
    }
}
 
Example 14
Source File: PutSQS.java    From localization_nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final long startNanos = System.nanoTime();
    final AmazonSQSClient client = getClient();
    final SendMessageBatchRequest request = new SendMessageBatchRequest();
    final String queueUrl = context.getProperty(QUEUE_URL).evaluateAttributeExpressions(flowFile).getValue();
    request.setQueueUrl(queueUrl);

    final Set<SendMessageBatchRequestEntry> entries = new HashSet<>();

    final SendMessageBatchRequestEntry entry = new SendMessageBatchRequestEntry();
    entry.setId(flowFile.getAttribute("uuid"));
    final ByteArrayOutputStream baos = new ByteArrayOutputStream();
    session.exportTo(flowFile, baos);
    final String flowFileContent = baos.toString();
    entry.setMessageBody(flowFileContent);

    final Map<String, MessageAttributeValue> messageAttributes = new HashMap<>();

    for (final PropertyDescriptor descriptor : userDefinedProperties) {
        final MessageAttributeValue mav = new MessageAttributeValue();
        mav.setDataType("String");
        mav.setStringValue(context.getProperty(descriptor).evaluateAttributeExpressions(flowFile).getValue());
        messageAttributes.put(descriptor.getName(), mav);
    }

    entry.setMessageAttributes(messageAttributes);
    entry.setDelaySeconds(context.getProperty(DELAY).asTimePeriod(TimeUnit.SECONDS).intValue());
    entries.add(entry);

    request.setEntries(entries);

    try {
        client.sendMessageBatch(request);
    } catch (final Exception e) {
        getLogger().error("Failed to send messages to Amazon SQS due to {}; routing to failure", new Object[]{e});
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    getLogger().info("Successfully published message to Amazon SQS for {}", new Object[]{flowFile});
    session.transfer(flowFile, REL_SUCCESS);
    final long transmissionMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
    session.getProvenanceReporter().send(flowFile, queueUrl, transmissionMillis);
}
 
Example 15
Source File: DeleteHBaseRow.java    From nifi with Apache License 2.0 4 votes vote down vote up
private FlowFile doDeleteFromContent(FlowFile flowFile, ProcessContext context, ProcessSession session, String tableName, int batchSize, String charset, String visibility) throws Exception {
    String keySeparator = context.getProperty(KEY_SEPARATOR).evaluateAttributeExpressions(flowFile).getValue();
    final String restartIndex = flowFile.getAttribute(RESTART_INDEX);

    ByteArrayOutputStream out = new ByteArrayOutputStream();
    session.exportTo(flowFile, out);
    out.close();

    String data = new String(out.toByteArray(), charset);

    int restartFrom = -1;
    if (restartIndex != null) {
        restartFrom = Integer.parseInt(restartIndex);
    }

    String first = null, last = null;

    List<byte[]> batch = new ArrayList<>();
    if (data != null && data.length() > 0) {
        String[] parts = data.split(keySeparator);
        int index = 0;
        try {
            for (index = 0; index < parts.length; index++) {
                if (restartFrom > 0 && index < restartFrom) {
                    continue;
                }

                if (first == null) {
                    first = parts[index];
                }

                batch.add(parts[index].getBytes(charset));
                if (batch.size() == batchSize) {
                    clientService.delete(tableName, batch, visibility);
                    batch = new ArrayList<>();
                }
                last = parts[index];
            }
            if (batch.size() > 0) {
                clientService.delete(tableName, batch, visibility);
            }

            flowFile = session.removeAttribute(flowFile, RESTART_INDEX);
            flowFile = session.putAttribute(flowFile, ROWKEY_START, first);
            flowFile = session.putAttribute(flowFile, ROWKEY_END, last);
        } catch (Exception ex) {
            getLogger().error("Error sending delete batch", ex);
            int restartPoint = index - batch.size() > 0 ? index - batch.size() : 0;
            flowFile = session.putAttribute(flowFile, RESTART_INDEX, String.valueOf(restartPoint));
        }
    }

    return flowFile;
}
 
Example 16
Source File: PutLambda.java    From localization_nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {

    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final String functionName = context.getProperty(AWS_LAMBDA_FUNCTION_NAME).getValue();

    final String qualifier = context.getProperty(AWS_LAMBDA_FUNCTION_QUALIFIER).getValue();

    // Max size of message is 6 MB
    if ( flowFile.getSize() > MAX_REQUEST_SIZE) {
        getLogger().error("Max size for request body is 6mb but was {} for flow file {} for function {}",
            new Object[]{flowFile.getSize(), flowFile, functionName});
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    final AWSLambdaClient client = getClient();

    try {
        final ByteArrayOutputStream baos = new ByteArrayOutputStream();
        session.exportTo(flowFile, baos);

        InvokeRequest invokeRequest = new InvokeRequest()
            .withFunctionName(functionName)
            .withLogType(LogType.Tail).withInvocationType(InvocationType.RequestResponse)
            .withPayload(ByteBuffer.wrap(baos.toByteArray()))
            .withQualifier(qualifier);
        long startTime = System.nanoTime();

        InvokeResult result = client.invoke(invokeRequest);

        flowFile = session.putAttribute(flowFile, AWS_LAMBDA_RESULT_STATUS_CODE, result.getStatusCode().toString());

        if ( !StringUtils.isBlank(result.getLogResult() )) {
            flowFile = session.putAttribute(flowFile, AWS_LAMBDA_RESULT_LOG, new String(Base64.decode(result.getLogResult()),Charset.defaultCharset()));
        }

        if ( result.getPayload() != null ) {
            flowFile = session.putAttribute(flowFile, AWS_LAMBDA_RESULT_PAYLOAD, new String(result.getPayload().array(),Charset.defaultCharset()));
        }

        if ( ! StringUtils.isBlank(result.getFunctionError()) ){
            flowFile = session.putAttribute(flowFile, AWS_LAMBDA_RESULT_FUNCTION_ERROR, result.getFunctionError());
            session.transfer(flowFile, REL_FAILURE);
        } else {
            session.transfer(flowFile, REL_SUCCESS);
            final long totalTimeMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);
            session.getProvenanceReporter().send(flowFile, functionName, totalTimeMillis);
        }
    } catch (final InvalidRequestContentException
        | InvalidParameterValueException
        | RequestTooLargeException
        | ResourceNotFoundException
        | UnsupportedMediaTypeException unrecoverableException) {
            getLogger().error("Failed to invoke lambda {} with unrecoverable exception {} for flow file {}",
                new Object[]{functionName, unrecoverableException, flowFile});
            flowFile = populateExceptionAttributes(session, flowFile, unrecoverableException);
            session.transfer(flowFile, REL_FAILURE);
    } catch (final TooManyRequestsException retryableServiceException) {
        getLogger().error("Failed to invoke lambda {} with exception {} for flow file {}, therefore penalizing flowfile",
            new Object[]{functionName, retryableServiceException, flowFile});
        flowFile = populateExceptionAttributes(session, flowFile, retryableServiceException);
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        context.yield();
    } catch (final AmazonServiceException unrecoverableServiceException) {
        getLogger().error("Failed to invoke lambda {} with exception {} for flow file {} sending to fail",
            new Object[]{functionName, unrecoverableServiceException, flowFile});
        flowFile = populateExceptionAttributes(session, flowFile, unrecoverableServiceException);
        session.transfer(flowFile, REL_FAILURE);
        context.yield();
    } catch (final Exception exception) {
        getLogger().error("Failed to invoke lambda {} with exception {} for flow file {}",
            new Object[]{functionName, exception, flowFile});
        session.transfer(flowFile, REL_FAILURE);
        context.yield();
    }
}
 
Example 17
Source File: PutLambda.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {

    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final String functionName = context.getProperty(AWS_LAMBDA_FUNCTION_NAME).getValue();

    final String qualifier = context.getProperty(AWS_LAMBDA_FUNCTION_QUALIFIER).getValue();

    // Max size of message is 6 MB
    if ( flowFile.getSize() > MAX_REQUEST_SIZE) {
        getLogger().error("Max size for request body is 6mb but was {} for flow file {} for function {}",
            new Object[]{flowFile.getSize(), flowFile, functionName});
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    final AWSLambdaClient client = getClient();

    try {
        final ByteArrayOutputStream baos = new ByteArrayOutputStream();
        session.exportTo(flowFile, baos);

        InvokeRequest invokeRequest = new InvokeRequest()
            .withFunctionName(functionName)
            .withLogType(LogType.Tail).withInvocationType(InvocationType.RequestResponse)
            .withPayload(ByteBuffer.wrap(baos.toByteArray()))
            .withQualifier(qualifier);
        long startTime = System.nanoTime();

        InvokeResult result = client.invoke(invokeRequest);

        flowFile = session.putAttribute(flowFile, AWS_LAMBDA_RESULT_STATUS_CODE, result.getStatusCode().toString());

        if ( !StringUtils.isBlank(result.getLogResult() )) {
            flowFile = session.putAttribute(flowFile, AWS_LAMBDA_RESULT_LOG, new String(Base64.decode(result.getLogResult()),Charset.defaultCharset()));
        }

        if ( result.getPayload() != null ) {
            flowFile = session.putAttribute(flowFile, AWS_LAMBDA_RESULT_PAYLOAD, new String(result.getPayload().array(),Charset.defaultCharset()));
        }

        if ( ! StringUtils.isBlank(result.getFunctionError()) ){
            flowFile = session.putAttribute(flowFile, AWS_LAMBDA_RESULT_FUNCTION_ERROR, result.getFunctionError());
            session.transfer(flowFile, REL_FAILURE);
        } else {
            session.transfer(flowFile, REL_SUCCESS);
            final long totalTimeMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);
            session.getProvenanceReporter().send(flowFile, functionName, totalTimeMillis);
        }
    } catch (final InvalidRequestContentException
        | InvalidParameterValueException
        | RequestTooLargeException
        | ResourceNotFoundException
        | UnsupportedMediaTypeException unrecoverableException) {
            getLogger().error("Failed to invoke lambda {} with unrecoverable exception {} for flow file {}",
                new Object[]{functionName, unrecoverableException, flowFile});
            flowFile = populateExceptionAttributes(session, flowFile, unrecoverableException);
            session.transfer(flowFile, REL_FAILURE);
    } catch (final TooManyRequestsException retryableServiceException) {
        getLogger().error("Failed to invoke lambda {} with exception {} for flow file {}, therefore penalizing flowfile",
            new Object[]{functionName, retryableServiceException, flowFile});
        flowFile = populateExceptionAttributes(session, flowFile, retryableServiceException);
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        context.yield();
    } catch (final AmazonServiceException unrecoverableServiceException) {
        getLogger().error("Failed to invoke lambda {} with exception {} for flow file {} sending to fail",
            new Object[]{functionName, unrecoverableServiceException, flowFile});
        flowFile = populateExceptionAttributes(session, flowFile, unrecoverableServiceException);
        session.transfer(flowFile, REL_FAILURE);
        context.yield();
    } catch (final Exception exception) {
        getLogger().error("Failed to invoke lambda {} with exception {} for flow file {}",
            new Object[]{functionName, exception, flowFile});
        session.transfer(flowFile, REL_FAILURE);
        context.yield();
    }
}
 
Example 18
Source File: PutSQS.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final long startNanos = System.nanoTime();
    final AmazonSQSClient client = getClient();
    final SendMessageBatchRequest request = new SendMessageBatchRequest();
    final String queueUrl = context.getProperty(QUEUE_URL).evaluateAttributeExpressions(flowFile).getValue();
    request.setQueueUrl(queueUrl);

    final Set<SendMessageBatchRequestEntry> entries = new HashSet<>();

    final SendMessageBatchRequestEntry entry = new SendMessageBatchRequestEntry();
    entry.setId(flowFile.getAttribute("uuid"));
    final ByteArrayOutputStream baos = new ByteArrayOutputStream();
    session.exportTo(flowFile, baos);
    final String flowFileContent = baos.toString();
    entry.setMessageBody(flowFileContent);

    final Map<String, MessageAttributeValue> messageAttributes = new HashMap<>();

    for (final PropertyDescriptor descriptor : userDefinedProperties) {
        final MessageAttributeValue mav = new MessageAttributeValue();
        mav.setDataType("String");
        mav.setStringValue(context.getProperty(descriptor).evaluateAttributeExpressions(flowFile).getValue());
        messageAttributes.put(descriptor.getName(), mav);
    }

    entry.setMessageAttributes(messageAttributes);
    entry.setDelaySeconds(context.getProperty(DELAY).asTimePeriod(TimeUnit.SECONDS).intValue());
    entries.add(entry);

    request.setEntries(entries);

    try {
        SendMessageBatchResult response = client.sendMessageBatch(request);

        // check for errors
        if (!response.getFailed().isEmpty()) {
            throw new ProcessException(response.getFailed().get(0).toString());
        }
    } catch (final Exception e) {
        getLogger().error("Failed to send messages to Amazon SQS due to {}; routing to failure", new Object[]{e});
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    getLogger().info("Successfully published message to Amazon SQS for {}", new Object[]{flowFile});
    session.transfer(flowFile, REL_SUCCESS);
    final long transmissionMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
    session.getProvenanceReporter().send(flowFile, queueUrl, transmissionMillis);
}
 
Example 19
Source File: PutSNS.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    if (flowFile.getSize() > MAX_SIZE) {
        getLogger().error("Cannot publish {} to SNS because its size exceeds Amazon SNS's limit of 256KB; routing to failure", new Object[]{flowFile});
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    final Charset charset = Charset.forName(context.getProperty(CHARACTER_ENCODING).evaluateAttributeExpressions(flowFile).getValue());

    final ByteArrayOutputStream baos = new ByteArrayOutputStream();
    session.exportTo(flowFile, baos);
    final String message = new String(baos.toByteArray(), charset);

    final AmazonSNSClient client = getClient();
    final PublishRequest request = new PublishRequest();
    request.setMessage(message);

    if (context.getProperty(USE_JSON_STRUCTURE).asBoolean()) {
        request.setMessageStructure("json");
    }

    final String arn = context.getProperty(ARN).evaluateAttributeExpressions(flowFile).getValue();
    final String arnType = context.getProperty(ARN_TYPE).getValue();
    if (arnType.equalsIgnoreCase(ARN_TYPE_TOPIC.getValue())) {
        request.setTopicArn(arn);
    } else {
        request.setTargetArn(arn);
    }

    final String subject = context.getProperty(SUBJECT).evaluateAttributeExpressions(flowFile).getValue();
    if (subject != null) {
        request.setSubject(subject);
    }

    for (final Map.Entry<PropertyDescriptor, String> entry : context.getProperties().entrySet()) {
        if (entry.getKey().isDynamic() && !StringUtils.isEmpty(entry.getValue())) {
            final MessageAttributeValue value = new MessageAttributeValue();
            value.setStringValue(context.getProperty(entry.getKey()).evaluateAttributeExpressions(flowFile).getValue());
            value.setDataType("String");
            request.addMessageAttributesEntry(entry.getKey().getName(), value);
        }
    }

    try {
        client.publish(request);
        session.transfer(flowFile, REL_SUCCESS);
        session.getProvenanceReporter().send(flowFile, arn);
        getLogger().info("Successfully published notification for {}", new Object[]{flowFile});
    } catch (final Exception e) {
        getLogger().error("Failed to publish Amazon SNS message for {} due to {}", new Object[]{flowFile, e});
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
    }
}
 
Example 20
Source File: PutDistributedMapCache.java    From localization_nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {

    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final ComponentLog logger = getLogger();

    // cache key is computed from attribute 'CACHE_ENTRY_IDENTIFIER' with expression language support
    final String cacheKey = context.getProperty(CACHE_ENTRY_IDENTIFIER).evaluateAttributeExpressions(flowFile).getValue();

    // if the computed value is null, or empty, we transfer the flow file to failure relationship
    if (StringUtils.isBlank(cacheKey)) {
        logger.error("FlowFile {} has no attribute for given Cache Entry Identifier", new Object[] {flowFile});
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    // the cache client used to interact with the distributed cache
    final DistributedMapCacheClient cache = context.getProperty(DISTRIBUTED_CACHE_SERVICE).asControllerService(DistributedMapCacheClient.class);

    try {

        final long maxCacheEntrySize = context.getProperty(CACHE_ENTRY_MAX_BYTES).asDataSize(DataUnit.B).longValue();
        long flowFileSize = flowFile.getSize();

        // too big flow file
        if (flowFileSize > maxCacheEntrySize) {
            logger.warn("Flow file {} size {} exceeds the max cache entry size ({} B).", new Object[] {flowFile, flowFileSize, maxCacheEntrySize});
            session.transfer(flowFile, REL_FAILURE);
            return;
        }

        if (flowFileSize == 0) {
            logger.warn("Flow file {} is empty, there is nothing to cache.", new Object[] {flowFile});
            session.transfer(flowFile, REL_FAILURE);
            return;

        }

        // get flow file content
        final ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
        session.exportTo(flowFile, byteStream);
        byte[] cacheValue = byteStream.toByteArray();
        final String updateStrategy = context.getProperty(CACHE_UPDATE_STRATEGY).getValue();
        boolean cached = false;

        if (updateStrategy.equals(CACHE_UPDATE_REPLACE.getValue())) {
            cache.put(cacheKey, cacheValue, keySerializer, valueSerializer);
            cached = true;
        } else if (updateStrategy.equals(CACHE_UPDATE_KEEP_ORIGINAL.getValue())) {
            final byte[] oldValue = cache.getAndPutIfAbsent(cacheKey, cacheValue, keySerializer, valueSerializer, valueDeserializer);
            if (oldValue == null) {
                cached = true;
            }
        }

        // set 'cached' attribute
        flowFile = session.putAttribute(flowFile, CACHED_ATTRIBUTE_NAME, String.valueOf(cached));

        if (cached) {
            session.transfer(flowFile, REL_SUCCESS);
        } else {
            session.transfer(flowFile, REL_FAILURE);
        }

    } catch (final IOException e) {
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        logger.error("Unable to communicate with cache when processing {} due to {}", new Object[] {flowFile, e});
    }
}