Java Code Examples for org.apache.nifi.processor.ProcessContext#yield()

The following examples show how to use org.apache.nifi.processor.ProcessContext#yield() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AbstractPutEventProcessor.java    From localization_nifi with Apache License 2.0 6 votes vote down vote up
/**
 * Helper method to acquire an available ChannelSender from the pool. If the pool is empty then the a new sender is created.
 *
 * @param context
 *            - the current process context.
 *
 * @param session
 *            - the current process session.
 * @param flowFile
 *            - the FlowFile being processed in this session.
 *
 * @return ChannelSender - the sender that has been acquired or null if no sender is available and a new sender cannot be created.
 */
protected ChannelSender acquireSender(final ProcessContext context, final ProcessSession session, final FlowFile flowFile) {
    ChannelSender sender = senderPool.poll();
    if (sender == null) {
        try {
            getLogger().debug("No available connections, creating a new one...");
            sender = createSender(context);
        } catch (IOException e) {
            getLogger().error("No available connections, and unable to create a new one, transferring {} to failure",
                    new Object[]{flowFile}, e);
            session.transfer(flowFile, REL_FAILURE);
            session.commit();
            context.yield();
            sender = null;
        }
    }

    return sender;
}
 
Example 2
Source File: ListenHTTP.java    From nifi with Apache License 2.0 6 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSessionFactory sessionFactory) throws ProcessException {
    try {
        if (!initialized.get()) {
            createHttpServerFromService(context);
        }
    } catch (Exception e) {
        getLogger().warn("Failed to start http server during initialization: " + e);
        context.yield();
        throw new ProcessException("Failed to initialize the server", e);
    }

    sessionFactoryReference.compareAndSet(null, sessionFactory);

    for (final String id : findOldFlowFileIds(context)) {
        final FlowFileEntryTimeWrapper wrapper = flowFileMap.remove(id);
        if (wrapper != null) {
            getLogger().warn("failed to received acknowledgment for HOLD with ID {} sent by {}; rolling back session", new Object[] {id, wrapper.getClientIP()});
            wrapper.session.rollback();
        }
    }

    context.yield();
}
 
Example 3
Source File: PublishAMQP.java    From localization_nifi with Apache License 2.0 6 votes vote down vote up
/**
 * Will construct AMQP message by extracting its body from the incoming
 * {@link FlowFile}. AMQP Properties will be extracted from the
 * {@link FlowFile} and converted to {@link BasicProperties} to be sent
 * along with the message. Upon success the incoming {@link FlowFile} is
 * transferred to 'success' {@link Relationship} and upon failure FlowFile is
 * penalized and transferred to the 'failure' {@link Relationship}
 * <br>
 * NOTE: Attributes extracted from {@link FlowFile} are considered
 * candidates for AMQP properties if their names are prefixed with
 * {@link AMQPUtils#AMQP_PROP_PREFIX} (e.g., amqp$contentType=text/xml)
 *
 */
@Override
protected void rendezvousWithAmqp(ProcessContext context, ProcessSession processSession) throws ProcessException {
    FlowFile flowFile = processSession.get();
    if (flowFile != null) {
        BasicProperties amqpProperties = this.extractAmqpPropertiesFromFlowFile(flowFile);
        String routingKey = context.getProperty(ROUTING_KEY).evaluateAttributeExpressions(flowFile).getValue();
        if (routingKey == null){
            throw new IllegalArgumentException("Failed to determine 'routing key' with provided value '"
                    + context.getProperty(ROUTING_KEY) + "' after evaluating it as expression against incoming FlowFile.");
        }
        String exchange = context.getProperty(EXCHANGE).evaluateAttributeExpressions(flowFile).getValue();

        byte[] messageContent = this.extractMessage(flowFile, processSession);

        try {
            this.targetResource.publish(messageContent, amqpProperties, routingKey, exchange);
            processSession.transfer(flowFile, REL_SUCCESS);
            processSession.getProvenanceReporter().send(flowFile, this.amqpConnection.toString() + "/E:" + exchange + "/RK:" + routingKey);
        } catch (Exception e) {
            processSession.transfer(processSession.penalize(flowFile), REL_FAILURE);
            this.getLogger().error("Failed while sending message to AMQP via " + this.targetResource, e);
            context.yield();
        }
    }
}
 
Example 4
Source File: SpringContextProcessor.java    From localization_nifi with Apache License 2.0 6 votes vote down vote up
/**
 *
 */
private void sendToSpring(FlowFile flowFileToProcess, ProcessContext context, ProcessSession processSession) {
    byte[] payload = this.extractMessage(flowFileToProcess, processSession);
    boolean sent = false;

    try {
        sent = this.exchanger.send(payload, flowFileToProcess.getAttributes(), this.sendTimeout);
        if (sent) {
            processSession.getProvenanceReporter().send(flowFileToProcess, this.applicationContextConfigFileName);
            processSession.remove(flowFileToProcess);
        } else {
            processSession.transfer(processSession.penalize(flowFileToProcess), REL_FAILURE);
            this.getLogger().error("Timed out while sending FlowFile to Spring Application Context "
                    + this.applicationContextConfigFileName);
            context.yield();
        }
    } catch (Exception e) {
        processSession.transfer(flowFileToProcess, REL_FAILURE);
        this.getLogger().error("Failed while sending FlowFile to Spring Application Context "
                + this.applicationContextConfigFileName + "; " + e.getMessage(), e);
        context.yield();
    }
}
 
Example 5
Source File: ConsumeAMQP.java    From nifi with Apache License 2.0 5 votes vote down vote up
/**
 * Will construct a {@link FlowFile} containing the body of the consumed AMQP message (if {@link GetResponse} returned by {@link AMQPConsumer} is
 * not null) and AMQP properties that came with message which are added to a {@link FlowFile} as attributes, transferring {@link FlowFile} to
 * 'success' {@link Relationship}.
 */
@Override
protected void processResource(final Connection connection, final AMQPConsumer consumer, final ProcessContext context, final ProcessSession session) {
    GetResponse lastReceived = null;

    for (int i = 0; i < context.getProperty(BATCH_SIZE).asInteger(); i++) {
        final GetResponse response = consumer.consume();
        if (response == null) {
            if (lastReceived == null) {
                // If no messages received, then yield.
                context.yield();
            }

            break;
        }

        FlowFile flowFile = session.create();
        flowFile = session.write(flowFile, out -> out.write(response.getBody()));

        final BasicProperties amqpProperties = response.getProps();
        final Map<String, String> attributes = buildAttributes(amqpProperties);
        flowFile = session.putAllAttributes(flowFile, attributes);

        session.getProvenanceReporter().receive(flowFile, connection.toString() + "/" + context.getProperty(QUEUE).getValue());
        session.transfer(flowFile, REL_SUCCESS);
        lastReceived = response;
    }

    session.commit();

    if (lastReceived != null) {
        try {
            consumer.acknowledge(lastReceived);
        } catch (IOException e) {
            throw new ProcessException("Failed to acknowledge message", e);
        }
    }
}
 
Example 6
Source File: PutAzureEventHub.java    From nifi with Apache License 2.0 5 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    try {
        populateSenderQueue(context);
    } catch (ProcessException e) {
        context.yield();
        throw e;
    }

    final StopWatch stopWatch = new StopWatch(true);

    final String partitioningKeyAttributeName = context.getProperty(PARTITIONING_KEY_ATTRIBUTE_NAME).getValue();

    // Get N flow files
    final int maxBatchSize = NumberUtils.toInt(context.getProperty(MAX_BATCH_SIZE).getValue(), 100);
    final List<FlowFile> flowFileList = session.get(maxBatchSize);

    // Convert and send each flow file
    final BlockingQueue<CompletableFuture<FlowFileResultCarrier<Relationship>>> futureQueue = new LinkedBlockingQueue<>();
    for (FlowFile flowFile : flowFileList) {
        if (flowFile == null) {
            continue;
        }

        futureQueue.offer(handleFlowFile(flowFile, partitioningKeyAttributeName, session));
    }

    waitForAllFutures(context, session, stopWatch, futureQueue);
}
 
Example 7
Source File: DistributeLoad.java    From localization_nifi with Apache License 2.0 5 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    final FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final DistributionStrategy strategy = strategyRef.get();
    final Set<Relationship> available = context.getAvailableRelationships();
    final int numRelationships = context.getProperty(NUM_RELATIONSHIPS).asInteger();
    final boolean allDestinationsAvailable = (available.size() == numRelationships);
    if (!allDestinationsAvailable && strategy.requiresAllDestinationsAvailable()) {
        // can't transfer the FlowFiles. Roll back and yield
        session.rollback();
        context.yield();
        return;
    }

    final Relationship relationship = strategy.mapToRelationship(context, flowFile);
    if (relationship == null) {
        // can't transfer the FlowFiles. Roll back and yield
        session.rollback();
        context.yield();
        return;
    }

    session.transfer(flowFile, relationship);
    session.getProvenanceReporter().route(flowFile, relationship);
}
 
Example 8
Source File: GetTCP.java    From localization_nifi with Apache License 2.0 5 votes vote down vote up
@Override
public void onTrigger(ProcessContext context, ProcessSessionFactory sessionFactory) throws ProcessException {
    if (this.delegatingMessageHandler == null) {
        this.delegatingMessageHandler = new NiFiDelegatingMessageHandler(sessionFactory);
    }
    this.run(context);
    context.yield();
}
 
Example 9
Source File: AbstractCouchbaseProcessor.java    From nifi with Apache License 2.0 5 votes vote down vote up
/**
 * Handles the thrown CouchbaseException accordingly.
 *
 * @param context a process context
 * @param session a process session
 * @param logger a logger
 * @param inFile an input FlowFile
 * @param e the thrown CouchbaseException
 * @param errMsg a message to be logged
 */
protected void handleCouchbaseException(final ProcessContext context, final ProcessSession session,
    final ComponentLog logger, FlowFile inFile, CouchbaseException e,
    String errMsg) {
    logger.error(errMsg, e);
    if (inFile != null) {
        ErrorHandlingStrategy strategy = CouchbaseExceptionMappings.getStrategy(e);
        switch (strategy.penalty()) {
            case Penalize:
                if (logger.isDebugEnabled()) {
                    logger.debug("Penalized: {}", new Object[] {inFile});
                }
                inFile = session.penalize(inFile);
                break;
            case Yield:
                if (logger.isDebugEnabled()) {
                    logger.debug("Yielded context: {}", new Object[] {inFile});
                }
                context.yield();
                break;
            case None:
                break;
        }

        switch (strategy.result()) {
            case ProcessException:
                throw new ProcessException(errMsg, e);
            case Failure:
                inFile = session.putAttribute(inFile, CouchbaseAttributes.Exception.key(), e.getClass().getName());
                session.transfer(inFile, REL_FAILURE);
                break;
            case Retry:
                inFile = session.putAttribute(inFile, CouchbaseAttributes.Exception.key(), e.getClass().getName());
                session.transfer(inFile, REL_RETRY);
                break;
        }
    }
}
 
Example 10
Source File: ListenSMTP.java    From nifi with Apache License 2.0 5 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSessionFactory sessionFactory) throws ProcessException {
    if (smtp == null) {
        try {
            final SMTPServer server = prepareServer(context, sessionFactory);
            server.start();
            getLogger().debug("Started SMTP Server on port " + server.getPort());
            smtp = server;
        } catch (final Exception ex) {//have to catch exception due to awkward exception handling in subethasmtp
            smtp = null;
            getLogger().error("Unable to start SMTP server due to " + ex.getMessage(), ex);
        }
    }
    context.yield();//nothing really to do here since threading managed by smtp server sessions
}
 
Example 11
Source File: PutHiveQL.java    From localization_nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    final int batchSize = context.getProperty(BATCH_SIZE).asInteger();
    final List<FlowFile> flowFiles = session.get(batchSize);

    if (flowFiles.isEmpty()) {
        return;
    }

    final long startNanos = System.nanoTime();
    final Charset charset = Charset.forName(context.getProperty(CHARSET).getValue());
    final HiveDBCPService dbcpService = context.getProperty(HIVE_DBCP_SERVICE).asControllerService(HiveDBCPService.class);
    final String statementDelimiter =   context.getProperty(STATEMENT_DELIMITER).getValue();

    try (final Connection conn = dbcpService.getConnection()) {

        for (FlowFile flowFile : flowFiles) {
            try {
                final String script = getHiveQL(session, flowFile, charset);
                String regex = "(?<!\\\\)" + Pattern.quote(statementDelimiter);

                String[] hiveQLs = script.split(regex);

                int loc = 1;
                for (String hiveQL: hiveQLs) {
                    getLogger().debug("HiveQL: {}", new Object[]{hiveQL});

                    if (!StringUtils.isEmpty(hiveQL.trim())) {
                        final PreparedStatement stmt = conn.prepareStatement(hiveQL.trim());

                        // Get ParameterMetadata
                        // Hive JDBC Doesn't support this yet:
                        // ParameterMetaData pmd = stmt.getParameterMetaData();
                        // int paramCount = pmd.getParameterCount();

                        int paramCount = StringUtils.countMatches(hiveQL, "?");

                        if (paramCount > 0) {
                            loc = setParameters(loc, stmt, paramCount, flowFile.getAttributes());
                        }

                        // Execute the statement
                        stmt.execute();
                    }
                }
                // Emit a Provenance SEND event
                final long transmissionMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);

                session.getProvenanceReporter().send(flowFile, dbcpService.getConnectionURL(), transmissionMillis, true);
                session.transfer(flowFile, REL_SUCCESS);

            } catch (final SQLException e) {

                if (e instanceof SQLNonTransientException) {
                    getLogger().error("Failed to update Hive for {} due to {}; routing to failure", new Object[]{flowFile, e});
                    session.transfer(flowFile, REL_FAILURE);
                } else {
                    getLogger().error("Failed to update Hive for {} due to {}; it is possible that retrying the operation will succeed, so routing to retry", new Object[]{flowFile, e});
                    flowFile = session.penalize(flowFile);
                    session.transfer(flowFile, REL_RETRY);
                }

            }
        }
    } catch (final SQLException sqle) {
        // There was a problem getting the connection, yield and retry the flowfiles
        getLogger().error("Failed to get Hive connection due to {}; it is possible that retrying the operation will succeed, so routing to retry", new Object[]{sqle});
        session.transfer(flowFiles, REL_RETRY);
        context.yield();
    }
}
 
Example 12
Source File: AbstractAWSGatewayApiProcessor.java    From nifi with Apache License 2.0 4 votes vote down vote up
protected void route(FlowFile request, FlowFile response, ProcessSession session,
                     ProcessContext context, int statusCode, Set<Relationship> relationships) {
    // check if we should yield the processor
    if (!isSuccess(statusCode) && request == null) {
        context.yield();
    }

    // If the property to output the response flowfile regardless of status code is set then transfer it
    boolean responseSent = false;
    if (context.getProperty(PROP_OUTPUT_RESPONSE_REGARDLESS).asBoolean()) {
        session.transfer(response, getRelationshipForName(REL_RESPONSE_NAME, relationships));
        responseSent = true;
    }

    // transfer to the correct relationship
    // 2xx -> SUCCESS
    if (isSuccess(statusCode)) {
        // we have two flowfiles to transfer
        if (request != null) {
            session
                .transfer(request, getRelationshipForName(REL_SUCCESS_REQ_NAME, relationships));
        }
        if (response != null && !responseSent) {
            session
                .transfer(response, getRelationshipForName(REL_RESPONSE_NAME, relationships));
        }

        // 5xx -> RETRY
    } else if (statusCode / 100 == 5) {
        if (request != null) {
            request = session.penalize(request);
            session.transfer(request, getRelationshipForName(REL_RETRY_NAME, relationships));
        }

        // 1xx, 3xx, 4xx -> NO RETRY
    } else {
        if (request != null) {
            if (context.getProperty(PROP_PENALIZE_NO_RETRY).asBoolean()) {
                request = session.penalize(request);
            }
            session.transfer(request, getRelationshipForName(REL_NO_RETRY_NAME, relationships));
        }
    }

}
 
Example 13
Source File: GetTwitter.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    if (client == null || client.isDone()) {
        connectNewClient();
        if (client.isDone()) {
            context.yield();
            return;
        }
    }
    final Event event = eventQueue.poll();
    if (event != null) {
        switch (event.getEventType()) {
            case STOPPED_BY_ERROR:
                getLogger().error("Received error {}: {} due to {}. Will not attempt to reconnect", new Object[]{event.getEventType(), event.getMessage(), event.getUnderlyingException()});
                break;
            case CONNECTION_ERROR:
            case HTTP_ERROR:
                getLogger().error("Received error {}: {}. Will attempt to reconnect", new Object[]{event.getEventType(), event.getMessage()});
                client.reconnect();
                break;
            default:
                break;
        }
    }

    final String tweet = messageQueue.poll();
    if (tweet == null) {
        context.yield();
        return;
    }

    FlowFile flowFile = session.create();
    flowFile = session.write(flowFile, new OutputStreamCallback() {
        @Override
        public void process(final OutputStream out) throws IOException {
            out.write(tweet.getBytes(StandardCharsets.UTF_8));
        }
    });

    final Map<String, String> attributes = new HashMap<>();
    attributes.put(CoreAttributes.MIME_TYPE.key(), "application/json");
    attributes.put(CoreAttributes.FILENAME.key(), flowFile.getAttribute(CoreAttributes.FILENAME.key()) + ".json");
    flowFile = session.putAllAttributes(flowFile, attributes);

    session.transfer(flowFile, REL_SUCCESS);
    session.getProvenanceReporter().receive(flowFile, Constants.STREAM_HOST + client.getEndpoint().getURI());
}
 
Example 14
Source File: PutLambda.java    From localization_nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {

    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    final String functionName = context.getProperty(AWS_LAMBDA_FUNCTION_NAME).getValue();

    final String qualifier = context.getProperty(AWS_LAMBDA_FUNCTION_QUALIFIER).getValue();

    // Max size of message is 6 MB
    if ( flowFile.getSize() > MAX_REQUEST_SIZE) {
        getLogger().error("Max size for request body is 6mb but was {} for flow file {} for function {}",
            new Object[]{flowFile.getSize(), flowFile, functionName});
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    final AWSLambdaClient client = getClient();

    try {
        final ByteArrayOutputStream baos = new ByteArrayOutputStream();
        session.exportTo(flowFile, baos);

        InvokeRequest invokeRequest = new InvokeRequest()
            .withFunctionName(functionName)
            .withLogType(LogType.Tail).withInvocationType(InvocationType.RequestResponse)
            .withPayload(ByteBuffer.wrap(baos.toByteArray()))
            .withQualifier(qualifier);
        long startTime = System.nanoTime();

        InvokeResult result = client.invoke(invokeRequest);

        flowFile = session.putAttribute(flowFile, AWS_LAMBDA_RESULT_STATUS_CODE, result.getStatusCode().toString());

        if ( !StringUtils.isBlank(result.getLogResult() )) {
            flowFile = session.putAttribute(flowFile, AWS_LAMBDA_RESULT_LOG, new String(Base64.decode(result.getLogResult()),Charset.defaultCharset()));
        }

        if ( result.getPayload() != null ) {
            flowFile = session.putAttribute(flowFile, AWS_LAMBDA_RESULT_PAYLOAD, new String(result.getPayload().array(),Charset.defaultCharset()));
        }

        if ( ! StringUtils.isBlank(result.getFunctionError()) ){
            flowFile = session.putAttribute(flowFile, AWS_LAMBDA_RESULT_FUNCTION_ERROR, result.getFunctionError());
            session.transfer(flowFile, REL_FAILURE);
        } else {
            session.transfer(flowFile, REL_SUCCESS);
            final long totalTimeMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);
            session.getProvenanceReporter().send(flowFile, functionName, totalTimeMillis);
        }
    } catch (final InvalidRequestContentException
        | InvalidParameterValueException
        | RequestTooLargeException
        | ResourceNotFoundException
        | UnsupportedMediaTypeException unrecoverableException) {
            getLogger().error("Failed to invoke lambda {} with unrecoverable exception {} for flow file {}",
                new Object[]{functionName, unrecoverableException, flowFile});
            flowFile = populateExceptionAttributes(session, flowFile, unrecoverableException);
            session.transfer(flowFile, REL_FAILURE);
    } catch (final TooManyRequestsException retryableServiceException) {
        getLogger().error("Failed to invoke lambda {} with exception {} for flow file {}, therefore penalizing flowfile",
            new Object[]{functionName, retryableServiceException, flowFile});
        flowFile = populateExceptionAttributes(session, flowFile, retryableServiceException);
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        context.yield();
    } catch (final AmazonServiceException unrecoverableServiceException) {
        getLogger().error("Failed to invoke lambda {} with exception {} for flow file {} sending to fail",
            new Object[]{functionName, unrecoverableServiceException, flowFile});
        flowFile = populateExceptionAttributes(session, flowFile, unrecoverableServiceException);
        session.transfer(flowFile, REL_FAILURE);
        context.yield();
    } catch (final Exception exception) {
        getLogger().error("Failed to invoke lambda {} with exception {} for flow file {}",
            new Object[]{functionName, exception, flowFile});
        session.transfer(flowFile, REL_FAILURE);
        context.yield();
    }
}
 
Example 15
Source File: GetRethinkDB.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }

    Charset charset = Charset.forName(context.getProperty(CHARSET).evaluateAttributeExpressions(flowFile).getValue());
    String id = context.getProperty(RETHINKDB_DOCUMENT_ID).evaluateAttributeExpressions(flowFile).getValue();
    String readMode = context.getProperty(READ_MODE).evaluateAttributeExpressions(flowFile).getValue();

    if ( StringUtils.isEmpty(id) ) {
        getLogger().error(DOCUMENT_ID_EMPTY_MESSAGE);
        flowFile = session.putAttribute(flowFile, RETHINKDB_ERROR_MESSAGE, DOCUMENT_ID_EMPTY_MESSAGE);
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    try {
        long startTimeMillis = System.currentTimeMillis();
        Map<String,Object> document = getDocument(id, readMode);

        if ( document == null ) {
            getLogger().debug("Document with id '" + id + "' not found");
            flowFile = session.putAttribute(flowFile, RETHINKDB_ERROR_MESSAGE, "Document with id '" + id + "' not found");
            session.transfer(flowFile, REL_NOT_FOUND);
            return;
        }

        String json = gson.toJson(document);

        byte [] documentBytes = json.getBytes(charset);

        if ( documentBytes.length > maxDocumentsSize ) {
            getLogger().error("Document too big with size " + documentBytes.length + " and max limit is " + maxDocumentsSize );
            flowFile = session.putAttribute(flowFile, RETHINKDB_ERROR_MESSAGE, "Document too big size " + documentBytes.length + " bytes");
            session.transfer(flowFile, REL_FAILURE);
            return;
        }

        ByteArrayInputStream bais = new ByteArrayInputStream(documentBytes);
        session.importFrom(bais, flowFile);
        final long endTimeMillis = System.currentTimeMillis();

        getLogger().debug("Json document {} retrieved Result: {}", new Object[] {id, document});

        session.transfer(flowFile, REL_SUCCESS);
        session.getProvenanceReporter().fetch(flowFile,
            new StringBuilder("rethinkdb://").append(databaseName).append("/").append(tableName).append("/").append(id).toString(),
            (endTimeMillis - startTimeMillis));

    } catch (Exception exception) {
        getLogger().error("Failed to get document from RethinkDB due to error {}",
                new Object[]{exception.getLocalizedMessage()}, exception);
        flowFile = session.putAttribute(flowFile, RETHINKDB_ERROR_MESSAGE, exception.getMessage() + "");
        session.transfer(flowFile, REL_FAILURE);
        context.yield();
    }
}
 
Example 16
Source File: DeleteHDFS.java    From localization_nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
    String fileOrDirectoryName = null;
    FlowFile flowFile = session.get();

    // If this processor has an incoming connection, then do not run unless a
    // FlowFile is actually sent through
    if (flowFile == null && context.hasIncomingConnection()) {
        context.yield();
        return;
    }

    if (flowFile != null) {
        fileOrDirectoryName = context.getProperty(FILE_OR_DIRECTORY).evaluateAttributeExpressions(flowFile).getValue();
    } else {
        fileOrDirectoryName = context.getProperty(FILE_OR_DIRECTORY).evaluateAttributeExpressions().getValue();
    }

    final FileSystem fileSystem = getFileSystem();
    try {
        // Check if the user has supplied a file or directory pattern
        List<Path> pathList = Lists.newArrayList();
        if (GLOB_MATCHER.reset(fileOrDirectoryName).find()) {
            FileStatus[] fileStatuses = fileSystem.globStatus(new Path(fileOrDirectoryName));
            if (fileStatuses != null) {
                for (FileStatus fileStatus : fileStatuses) {
                    pathList.add(fileStatus.getPath());
                }
            }
        } else {
            pathList.add(new Path(fileOrDirectoryName));
        }

        Map<String, String> attributes = Maps.newHashMapWithExpectedSize(2);
        for (Path path : pathList) {
            attributes.put("filename", path.getName());
            attributes.put("path", path.getParent().toString());
            if (fileSystem.exists(path)) {
                fileSystem.delete(path, context.getProperty(RECURSIVE).asBoolean());
                if (!context.hasIncomingConnection()) {
                    flowFile = session.create();
                }
                session.transfer(session.putAllAttributes(flowFile, attributes), REL_SUCCESS);
            } else {
                getLogger().warn("File (" + path + ") does not exist");
                if (!context.hasIncomingConnection()) {
                    flowFile = session.create();
                }
                session.transfer(session.putAllAttributes(flowFile, attributes), REL_FAILURE);
            }
        }
    } catch (IOException e) {
        getLogger().warn("Error processing delete for file or directory", e);
        if (flowFile != null) {
            session.rollback(true);
        }
    }
}
 
Example 17
Source File: InvokeHTTP.java    From localization_nifi with Apache License 2.0 4 votes vote down vote up
private void route(FlowFile request, FlowFile response, ProcessSession session, ProcessContext context, int statusCode){
    // check if we should yield the processor
    if (!isSuccess(statusCode) && request == null) {
        context.yield();
    }

    // If the property to output the response flowfile regardless of status code is set then transfer it
    boolean responseSent = false;
    if (context.getProperty(PROP_OUTPUT_RESPONSE_REGARDLESS).asBoolean()) {
        session.transfer(response, REL_RESPONSE);
        responseSent = true;
    }

    // transfer to the correct relationship
    // 2xx -> SUCCESS
    if (isSuccess(statusCode)) {
        // we have two flowfiles to transfer
        if (request != null) {
            session.transfer(request, REL_SUCCESS_REQ);
        }
        if (response != null && !responseSent) {
            session.transfer(response, REL_RESPONSE);
        }

        // 5xx -> RETRY
    } else if (statusCode / 100 == 5) {
        if (request != null) {
            request = session.penalize(request);
            session.transfer(request, REL_RETRY);
        }

        // 1xx, 3xx, 4xx -> NO RETRY
    } else {
        if (request != null) {
            if (context.getProperty(PROP_PENALIZE_NO_RETRY).asBoolean()) {
                request = session.penalize(request);
            }
            session.transfer(request, REL_NO_RETRY);
        }
    }

}
 
Example 18
Source File: DeleteByQueryElasticsearch.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
    FlowFile input = null;
    if (context.hasIncomingConnection()) {
        input = session.get();

        if (input == null && context.hasNonLoopConnection()) {
            return;
        }
    }

    try {
        final String query = getQuery(input, context, session);
        final String index = context.getProperty(INDEX).evaluateAttributeExpressions(input).getValue();
        final String type  = context.getProperty(TYPE).isSet()
                ? context.getProperty(TYPE).evaluateAttributeExpressions(input).getValue()
                : null;
        final String queryAttr = context.getProperty(QUERY_ATTRIBUTE).isSet()
                ? context.getProperty(QUERY_ATTRIBUTE).evaluateAttributeExpressions(input).getValue()
                : null;
        DeleteOperationResponse dor = clientService.deleteByQuery(query, index, type);

        if (input == null) {
            input = session.create();
        }

        Map<String, String> attrs = new HashMap<>();
        attrs.put(TOOK_ATTRIBUTE, String.valueOf(dor.getTook()));
        if (!StringUtils.isBlank(queryAttr)) {
            attrs.put(queryAttr, query);
        }

        input = session.putAllAttributes(input, attrs);

        session.transfer(input, REL_SUCCESS);
    } catch (Exception e) {
        if (input != null) {
            input = session.putAttribute(input, ERROR_ATTRIBUTE, e.getMessage());
            session.transfer(input, REL_FAILURE);
        }
        getLogger().error("Error running delete by query: ", e);
        context.yield();
    }
}
 
Example 19
Source File: PublishMQTT.java    From localization_nifi with Apache License 2.0 4 votes vote down vote up
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile flowfile = session.get();
    if (flowfile == null) {
        return;
    }

    if(mqttClient == null || !mqttClient.isConnected()){
        logger.info("Was disconnected from client or was never connected, attempting to connect.");
        try {
            reconnect();
        } catch (MqttException e) {
            context.yield();
            session.transfer(flowfile, REL_FAILURE);
            logger.error("MQTT client is disconnected and re-connecting failed. Transferring FlowFile to fail and yielding", e);
            return;
        }
    }

    // get the MQTT topic
    String topic = context.getProperty(PROP_TOPIC).evaluateAttributeExpressions(flowfile).getValue();

    if (topic == null || topic.isEmpty()) {
        logger.warn("Evaluation of the topic property returned null or evaluated to be empty, routing to failure");
        session.transfer(flowfile, REL_FAILURE);
        return;
    }

    // do the read
    final byte[] messageContent = new byte[(int) flowfile.getSize()];
    session.read(flowfile, new InputStreamCallback() {
        @Override
        public void process(final InputStream in) throws IOException {
            StreamUtils.fillBuffer(in, messageContent, true);
        }
    });

    int qos = context.getProperty(PROP_QOS).evaluateAttributeExpressions(flowfile).asInteger();
    final MqttMessage mqttMessage = new MqttMessage(messageContent);
    mqttMessage.setQos(qos);
    mqttMessage.setPayload(messageContent);
    mqttMessage.setRetained(context.getProperty(PROP_RETAIN).evaluateAttributeExpressions(flowfile).asBoolean());

    try {
        mqttClientConnectLock.readLock().lock();
        final StopWatch stopWatch = new StopWatch(true);
        try {
            /*
             * Underlying method waits for the message to publish (according to set QoS), so it executes synchronously:
             *     MqttClient.java:361 aClient.publish(topic, message, null, null).waitForCompletion(getTimeToWait());
             */
            mqttClient.publish(topic, mqttMessage);
        } finally {
            mqttClientConnectLock.readLock().unlock();
        }

        session.getProvenanceReporter().send(flowfile, broker, stopWatch.getElapsed(TimeUnit.MILLISECONDS));
        session.transfer(flowfile, REL_SUCCESS);
    } catch(MqttException me) {
        logger.error("Failed to publish message.", me);
        session.transfer(flowfile, REL_FAILURE);
    }
}
 
Example 20
Source File: PutTCP.java    From localization_nifi with Apache License 2.0 2 votes vote down vote up
/**
 * Event handler method to perform the required actions when a failure has occurred. The FlowFile is penalized, forwarded to the failure relationship and the context is yielded.
 *
 * @param context
 *            - the current process context.
 *
 * @param session
 *            - the current process session.
 * @param flowFile
 *            - the FlowFile that has failed to have been processed.
 */
protected void onFailure(final ProcessContext context, final ProcessSession session, final FlowFile flowFile) {
    session.transfer(session.penalize(flowFile), REL_FAILURE);
    session.commit();
    context.yield();
}