Java Code Examples for org.elasticsearch.ExceptionsHelper

The following examples show how to use org.elasticsearch.ExceptionsHelper. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: crate   Source File: MetaDataStateFormat.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tries to load the state of particular generation from the given data-locations. If any of data locations contain state files with
 * given generation, state will be loaded from these state files.
 *
 * @param logger a logger instance.
 * @param generation the generation to be loaded.
 * @param dataLocations the data-locations to try.
 * @return the state of asked generation or <code>null</code> if no state was found.
 */
public T loadGeneration(Logger logger, NamedXContentRegistry namedXContentRegistry, long generation, Path... dataLocations) {
    List<Path> stateFiles = findStateFilesByGeneration(generation, dataLocations);

    final List<Throwable> exceptions = new ArrayList<>();
    for (Path stateFile : stateFiles) {
        try {
            T state = read(namedXContentRegistry, stateFile);
            logger.trace("generation id [{}] read from [{}]", generation, stateFile.getFileName());
            return state;
        } catch (Exception e) {
            exceptions.add(new IOException("failed to read " + stateFile.toAbsolutePath(), e));
            logger.debug(() -> new ParameterizedMessage(
                    "{}: failed to read [{}], ignoring...", stateFile.toAbsolutePath(), prefix), e);
        }
    }
    // if we reach this something went wrong
    ExceptionsHelper.maybeThrowRuntimeAndSuppress(exceptions);
    if (stateFiles.size() > 0) {
        // We have some state files but none of them gave us a usable state
        throw new IllegalStateException("Could not find a state file to recover from among " +
                stateFiles.stream().map(Path::toAbsolutePath).map(Object::toString).collect(Collectors.joining(", ")));
    }
    return null;
}
 
Example 2
protected void checkRequest(final RestRequest request, final RestChannel channel) {
    
    if(SSLRequestHelper.containsBadHeader(threadContext, "_opendistro_security_ssl_")) {
        final ElasticsearchException exception = ExceptionUtils.createBadHeaderException();
        errorHandler.logError(exception, request, 1);
        throw exception;
    }
    
    try {
        if(SSLRequestHelper.getSSLInfo(settings, configPath, request, null) == null) {
            logger.error("Not an SSL request");
            throw new ElasticsearchSecurityException("Not an SSL request", RestStatus.INTERNAL_SERVER_ERROR);
        }
    } catch (SSLPeerUnverifiedException e) {
        logger.error("No client certificates found but such are needed (Security 8).");
        errorHandler.logError(e, request, 0);
        throw ExceptionsHelper.convertToElastic(e);
    }
}
 
Example 3
Source Project: Elasticsearch   Source File: BlobRecoverySource.java    License: Apache License 2.0 6 votes vote down vote up
synchronized void cancel(IndexShard shard, String reason) {
    final Set<RecoverySourceHandler> shardRecoveryHandlers = ongoingRecoveries.get(shard);
    if (shardRecoveryHandlers != null) {
        final List<Exception> failures = new ArrayList<>();
        for (RecoverySourceHandler handlers : shardRecoveryHandlers) {
            try {
                handlers.cancel(reason);
            } catch (Exception ex) {
                failures.add(ex);
            } finally {
                shard.recoveryStats().decCurrentAsSource();
            }
        }
        ExceptionsHelper.maybeThrowRuntimeAndSuppress(failures);
    }
}
 
Example 4
Source Project: Elasticsearch   Source File: LuceneQueryBuilder.java    License: Apache License 2.0 6 votes vote down vote up
private Query queryFromInnerFunction(Function function, Context context) {
    for (Symbol symbol : function.arguments()) {
        if (symbol.symbolType() == SymbolType.FUNCTION) {
            String functionName = ((Function) symbol).info().ident().name();
            InnerFunctionToQuery functionToQuery = innerFunctions.get(functionName);
            if (functionToQuery != null) {
                try {
                    Query query = functionToQuery.apply(function, (Function)symbol, context);
                    if (query != null) {
                        return query;
                    }
                } catch (IOException e) {
                    throw ExceptionsHelper.convertToRuntime(e);
                }
            }
        }
    }
    return null;
}
 
Example 5
Source Project: Elasticsearch   Source File: UpsertByIdTask.java    License: Apache License 2.0 6 votes vote down vote up
private void createIndexAndExecuteUpsertRequest(final UpsertByIdNode.Item item,
                                                final SettableFuture<TaskResult> futureResult) {
    transportCreateIndexAction.execute(
            new CreateIndexRequest(item.index()).cause("upsert single item"),
            new ActionListener<CreateIndexResponse>() {
        @Override
        public void onResponse(CreateIndexResponse createIndexResponse) {
            executeUpsertRequest(item, futureResult);
        }

        @Override
        public void onFailure(Throwable e) {
            e = ExceptionsHelper.unwrapCause(e);
            if (e instanceof IndexAlreadyExistsException) {
                executeUpsertRequest(item, futureResult);
            } else {
                futureResult.setException(e);
            }

        }
    });
}
 
Example 6
Source Project: crate   Source File: PeerRecoverySourceService.java    License: Apache License 2.0 6 votes vote down vote up
synchronized void cancel(IndexShard shard, String reason) {
    final ShardRecoveryContext shardRecoveryContext = ongoingRecoveries.get(shard);
    if (shardRecoveryContext != null) {
        final List<Exception> failures = new ArrayList<>();
        for (RecoverySourceHandler handlers : shardRecoveryContext.recoveryHandlers) {
            try {
                handlers.cancel(reason);
            } catch (Exception ex) {
                failures.add(ex);
            } finally {
                shard.recoveryStats().decCurrentAsSource();
            }
        }
        ExceptionsHelper.maybeThrowRuntimeAndSuppress(failures);
    }
}
 
Example 7
Source Project: Elasticsearch   Source File: MatchedQueriesFetchSubPhase.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void hitExecute(SearchContext context, HitContext hitContext) {
    List<String> matchedQueries = new ArrayList<>(2);

    try {
        addMatchedQueries(hitContext, context.parsedQuery().namedFilters(), matchedQueries);

        if (context.parsedPostFilter() != null) {
            addMatchedQueries(hitContext, context.parsedPostFilter().namedFilters(), matchedQueries);
        }
    } catch (IOException e) {
        throw ExceptionsHelper.convertToElastic(e);
    } finally {
        SearchContext.current().clearReleasables(Lifetime.COLLECTION);
    }

    hitContext.hit().matchedQueries(matchedQueries.toArray(new String[matchedQueries.size()]));
}
 
Example 8
Source Project: Elasticsearch   Source File: SearchService.java    License: Apache License 2.0 6 votes vote down vote up
public ScrollQuerySearchResult executeQueryPhase(InternalScrollSearchRequest request) {
    final SearchContext context = findContext(request.id());
    ShardSearchStats shardSearchStats = context.indexShard().searchService();
    try {
        shardSearchStats.onPreQueryPhase(context);
        long time = System.nanoTime();
        contextProcessing(context);
        processScroll(request, context);
        queryPhase.execute(context);
        contextProcessedSuccessfully(context);
        shardSearchStats.onQueryPhase(context, System.nanoTime() - time);
        return new ScrollQuerySearchResult(context.queryResult(), context.shardTarget());
    } catch (Throwable e) {
        shardSearchStats.onFailedQueryPhase(context);
        logger.trace("Query phase failed", e);
        processFailure(context, e);
        throw ExceptionsHelper.convertToRuntime(e);
    } finally {
        cleanContext(context);
    }
}
 
Example 9
/**
 * {@inheritDoc}
 */
@Override
public void snapshot(SnapshotId snapshotId, ShardId shardId, SnapshotIndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) {
    SnapshotContext snapshotContext = new SnapshotContext(snapshotId, shardId, snapshotStatus);
    snapshotStatus.startTime(System.currentTimeMillis());

    try {
        snapshotContext.snapshot(snapshotIndexCommit);
        snapshotStatus.time(System.currentTimeMillis() - snapshotStatus.startTime());
        snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.DONE);
    } catch (Throwable e) {
        snapshotStatus.time(System.currentTimeMillis() - snapshotStatus.startTime());
        snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.FAILURE);
        snapshotStatus.failure(ExceptionsHelper.detailedMessage(e));
        if (e instanceof IndexShardSnapshotFailedException) {
            throw (IndexShardSnapshotFailedException) e;
        } else {
            throw new IndexShardSnapshotFailedException(shardId, e.getMessage(), e);
        }
    }
}
 
Example 10
Source Project: Elasticsearch   Source File: TransportClientNodesService.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void onFailure(Throwable e) {
    if (ExceptionsHelper.unwrapCause(e) instanceof ConnectTransportException) {
        int i = ++this.i;
        if (i >= nodes.size()) {
            listener.onFailure(new NoNodeAvailableException("None of the configured nodes were available: " + nodes, e));
        } else {
            try {
                callback.doWithNode(nodes.get((index + i) % nodes.size()), this);
            } catch(final Throwable t) {
                // this exception can't come from the TransportService as it doesn't throw exceptions at all
                listener.onFailure(t);
            }
        }
    } else {
        listener.onFailure(e);
    }
}
 
Example 11
Source Project: Elasticsearch   Source File: NettyTransport.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Disconnects from a node if a channel is found as part of that nodes channels.
 */
protected void disconnectFromNodeChannel(final Channel channel, final Throwable failure) {
    threadPool().generic().execute(new Runnable() {

        @Override
        public void run() {
            for (DiscoveryNode node : connectedNodes.keySet()) {
                if (disconnectFromNode(node, channel, ExceptionsHelper.detailedMessage(failure))) {
                    // if we managed to find this channel and disconnect from it, then break, no need to check on
                    // the rest of the nodes
                    break;
                }
            }
        }
    });
}
 
Example 12
Source Project: crate   Source File: ElasticsearchAssertions.java    License: Apache License 2.0 6 votes vote down vote up
public static void assertThrows(ActionFuture future, RestStatus status, String extraInfo) {
    boolean fail = false;
    extraInfo = extraInfo == null || extraInfo.isEmpty() ? "" : extraInfo + ": ";
    extraInfo += "expected a " + status + " status exception to be thrown";

    try {
        future.actionGet();
        fail = true;
    } catch (Exception e) {
        assertThat(extraInfo, ExceptionsHelper.status(e), equalTo(status));
    }
    // has to be outside catch clause to get a proper message
    if (fail) {
        throw new AssertionError(extraInfo);
    }
}
 
Example 13
Source Project: Elasticsearch   Source File: TransportDeleteAction.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected void doExecute(final Task task, final DeleteRequest request, final ActionListener<DeleteResponse> listener) {
    ClusterState state = clusterService.state();
    if (autoCreateIndex.shouldAutoCreate(request.index(), state)) {
        createIndexAction.execute(task, new CreateIndexRequest(request).index(request.index()).cause("auto(delete api)")
            .masterNodeTimeout(request.timeout()), new ActionListener<CreateIndexResponse>() {
            @Override
            public void onResponse(CreateIndexResponse result) {
                innerExecute(task, request, listener);
            }

            @Override
            public void onFailure(Throwable e) {
                if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) {
                    // we have the index, do it
                    innerExecute(task, request, listener);
                } else {
                    listener.onFailure(e);
                }
            }
        });
    } else {
        innerExecute(task, request, listener);
    }
}
 
Example 14
Source Project: crate   Source File: AbstractScopedSettings.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Validates that all settings are registered and valid.
 *
 * @param settings                       the settings
 * @param validateDependencies           true if dependent settings should be validated
 * @param ignorePrivateSettings          true if private settings should be ignored during validation
 * @param ignoreArchivedSettings         true if archived settings should be ignored during validation
 * @param validateInternalOrPrivateIndex true if index internal settings should be validated
 * @see Setting#getSettingsDependencies(String)
 */
public final void validate(
        final Settings settings,
        final boolean validateDependencies,
        final boolean ignorePrivateSettings,
        final boolean ignoreArchivedSettings,
        final boolean validateInternalOrPrivateIndex) {
    final List<RuntimeException> exceptions = new ArrayList<>();
    for (final String key : settings.keySet()) { // settings iterate in deterministic fashion
        final Setting<?> setting = getRaw(key);
        if (((isPrivateSetting(key) || (setting != null && setting.isPrivateIndex())) && ignorePrivateSettings)) {
            continue;
        }
        if (key.startsWith(ARCHIVED_SETTINGS_PREFIX) && ignoreArchivedSettings) {
            continue;
        }
        try {
            validate(key, settings, validateDependencies, validateInternalOrPrivateIndex);
        } catch (final RuntimeException ex) {
            exceptions.add(ex);
        }
    }
    ExceptionsHelper.rethrowAndSuppress(exceptions);
}
 
Example 15
@Override
protected void innerToXContent(XContentBuilder builder, Params params) throws IOException {
    builder.field("phase", phaseName);
    final boolean group = params.paramAsBoolean("group_shard_failures", true); // we group by default
    builder.field("grouped", group); // notify that it's grouped
    builder.field("failed_shards");
    builder.startArray();
    ShardOperationFailedException[] failures = params.paramAsBoolean("group_shard_failures", true) ? ExceptionsHelper.groupBy(shardFailures) : shardFailures;
    for (ShardOperationFailedException failure : failures) {
        builder.startObject();
        failure.toXContent(builder, params);
        builder.endObject();
    }
    builder.endArray();
    super.innerToXContent(builder, params);
}
 
Example 16
Source Project: crate   Source File: LuceneQueryBuilder.java    License: Apache License 2.0 6 votes vote down vote up
private Query queryFromInnerFunction(Function function, Context context) {
    for (Symbol symbol : function.arguments()) {
        if (symbol.symbolType() == SymbolType.FUNCTION) {
            String functionName = ((Function) symbol).name();
            InnerFunctionToQuery functionToQuery = innerFunctions.get(functionName);
            if (functionToQuery != null) {
                try {
                    Query query = functionToQuery.apply(function, (Function) symbol, context);
                    if (query != null) {
                        return query;
                    }
                } catch (IOException e) {
                    throw ExceptionsHelper.convertToRuntime(e);
                }
            }
        }
    }
    return null;
}
 
Example 17
Source Project: crate   Source File: ReplicationOperation.java    License: Apache License 2.0 6 votes vote down vote up
private void onNoLongerPrimary(Exception failure) {
    final Throwable cause = ExceptionsHelper.unwrapCause(failure);
    final boolean nodeIsClosing =
        cause instanceof NodeClosedException ||
        (cause instanceof TransportException && "TransportService is closed stopped can't send request".equals(cause.getMessage()));

    final String message;
    if (nodeIsClosing) {
        message = String.format(Locale.ROOT,
            "node with primary [%s] is shutting down while failing replica shard", primary.routingEntry());
        // We prefer not to fail the primary to avoid unnecessary warning log
        // when the node with the primary shard is gracefully shutting down.
    } else {
        if (Assertions.ENABLED) {
            if (failure instanceof ShardStateAction.NoLongerPrimaryShardException == false) {
                throw new AssertionError("unexpected failure", failure);
            }
        }
        // we are no longer the primary, fail ourselves and start over
        message = String.format(Locale.ROOT, "primary shard [%s] was demoted while failing replica shard", primary.routingEntry());
        primary.failShard(message, failure);
    }
    finishAsFailed(new RetryOnPrimaryException(primary.routingEntry().shardId(), message, failure));
}
 
Example 18
Source Project: elasticsearch-helper   Source File: TransportClient.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void onFailure(Throwable e) {
    if (ExceptionsHelper.unwrapCause(e) instanceof ConnectTransportException) {
        int n = ++this.n;
        if (n >= nodes.size()) {
            listener.onFailure(new NoNodeAvailableException("none of the configured nodes were available: "
                    + nodes, e));
        } else {
            try {
                logger.warn("retrying on anoher node (n={}, nodes={})", n, nodes.size());
                callback.doWithNode(nodes.get((index + n) % nodes.size()), this);
            } catch (final Throwable t) {
                listener.onFailure(t);
            }
        }
    } else {
        listener.onFailure(e);
    }
}
 
Example 19
protected static XContentBuilder convertToJson(RestChannel channel, ToXContent toxContent) {
	try {
		XContentBuilder builder = channel.newBuilder();
		toxContent.toXContent(builder, ToXContent.EMPTY_PARAMS);
		return builder;
	} catch (IOException e) {
		throw ExceptionsHelper.convertToElastic(e);
	}
}
 
Example 20
protected void response(RestChannel channel, RestStatus status, String message) {

		try {
			final XContentBuilder builder = channel.newBuilder();
			builder.startObject();
			builder.field("status", status.name());
			builder.field("message", message);
			builder.endObject();
			channel.sendResponse(new BytesRestResponse(status, builder));
		} catch (IOException e) {
			throw ExceptionsHelper.convertToElastic(e);
		}
	}
 
Example 21
public static Map<String, Object> convertJsonToxToStructuredMap(String jsonContent) {
    try (XContentParser parser = XContentType.JSON.xContent().createParser(NamedXContentRegistry.EMPTY, OpenDistroSecurityDeprecationHandler.INSTANCE, jsonContent)) {
        return parser.map();
    } catch (IOException e1) {
        throw ExceptionsHelper.convertToElastic(e1);
    }
}
 
Example 22
public static JsonNode convertJsonToJackson(BytesReference jsonContent) {
    try {
        return DefaultObjectMapper.readTree(jsonContent.utf8ToString());
    } catch (IOException e1) {
        throw ExceptionsHelper.convertToElastic(e1);
    }

}
 
Example 23
public static JsonNode convertJsonToJackson(ToXContent jsonContent, boolean omitDefaults) {
    try {
        Map<String, String> pm = new HashMap<>(1);
        pm.put("omit_defaults", String.valueOf(omitDefaults));
        ToXContent.MapParams params = new ToXContent.MapParams(pm);

        final BytesReference bytes = XContentHelper.toXContent(jsonContent, XContentType.JSON, params, false);
        return DefaultObjectMapper.readTree(bytes.utf8ToString());
    } catch (IOException e1) {
        throw ExceptionsHelper.convertToElastic(e1);
    }

}
 
Example 24
public static <T> T serializeToXContentToPojo(ToXContent jsonContent, Class<T> clazz) {
    try {

        if (jsonContent instanceof BytesReference) {
            return serializeToXContentToPojo(((BytesReference) jsonContent).utf8ToString(), clazz);
        }

        final BytesReference bytes = XContentHelper.toXContent(jsonContent, XContentType.JSON, false);
        return DefaultObjectMapper.readValue(bytes.utf8ToString(), clazz);
    } catch (IOException e1) {
        throw ExceptionsHelper.convertToElastic(e1);
    }

}
 
Example 25
Source Project: crate   Source File: Scheduler.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected void afterExecute(Runnable r, Throwable t) {
    if (t != null) return;
    // Scheduler only allows Runnable's so we expect no checked exceptions here. If anyone uses submit directly on `this`, we
    // accept the wrapped exception in the output.
    ExceptionsHelper.reThrowIfNotNull(EsExecutors.rethrowErrors(r));
}
 
Example 26
@Override
public void connect(ChannelHandlerContext ctx, SocketAddress remoteAddress, SocketAddress localAddress, ChannelPromise promise) throws Exception {
    SSLEngine engine = null;
    try {
        if (hostnameVerificationEnabled) {
            final InetSocketAddress inetSocketAddress = (InetSocketAddress) remoteAddress;
            String hostname = null;
            if (hostnameVerificationResovleHostName) {
                hostname = inetSocketAddress.getHostName();
            } else {
                hostname = inetSocketAddress.getHostString();
            }

            if(log.isDebugEnabled()) {
                log.debug("Hostname of peer is {} ({}/{}) with hostnameVerificationResovleHostName: {}", hostname, inetSocketAddress.getHostName(), inetSocketAddress.getHostString(), hostnameVerificationResovleHostName);
            }
            
            engine = odks.createClientTransportSSLEngine(hostname, inetSocketAddress.getPort());
        } else {
            engine = odks.createClientTransportSSLEngine(null, -1);
        }
    } catch (final SSLException e) {
        throw ExceptionsHelper.convertToElastic(e);
    }
    final SslHandler sslHandler = new SslHandler(engine);
    ctx.pipeline().replace(this, "ssl_client", sslHandler);
    super.connect(ctx, remoteAddress, localAddress, promise);
}
 
Example 27
@Test
public void testStaticCRLOk() throws Exception {
    
    File staticCrl = getAbsoluteFilePathFromClassPath("crl/revoked.crl");
    Collection<? extends CRL> crls = null;
    try(FileInputStream crlin = new FileInputStream(staticCrl)) {
        crls = CertificateFactory.getInstance("X.509").generateCRLs(crlin);
    }
    
    Assert.assertEquals(crls.size(), 1);
    
    //trust chain incl intermediate certificates (root + intermediates)
    Collection<? extends Certificate> rootCas;
    final File trustedCas = getAbsoluteFilePathFromClassPath("chain-ca.pem");
    try(FileInputStream trin = new FileInputStream(trustedCas)) {
        rootCas =  (Collection<? extends Certificate>) CertificateFactory.getInstance("X.509").generateCertificates(trin);
    }
    
    Assert.assertEquals(rootCas.size(), 2);

    //certificate chain to validate (client cert + intermediates but without root)
    Collection<? extends Certificate> certsToValidate;
    final File certs = getAbsoluteFilePathFromClassPath("node-0.crt.pem");
    try(FileInputStream trin = new FileInputStream(certs)) {
        certsToValidate =  (Collection<? extends Certificate>) CertificateFactory.getInstance("X.509").generateCertificates(trin);
    }
    
    Assert.assertEquals(certsToValidate.size(), 3);
    
    CertificateValidator validator = new CertificateValidator(rootCas.toArray(new X509Certificate[0]), crls);
    validator.setDate(CRL_DATE);
    try {
        validator.validate(certsToValidate.toArray(new X509Certificate[0]));
    } catch (CertificateException e) {
        Assert.fail(ExceptionsHelper.stackTrace(ExceptionUtils.getRootCause(e)));
    }
}
 
Example 28
@Override
protected void doExecute(Task task, ActionRequest actionRequest, ActionListener<StopDetectorResponse> listener) {
    StopDetectorRequest request = StopDetectorRequest.fromActionRequest(actionRequest);
    String adID = request.getAdID();
    try {
        DiscoveryNode[] dataNodes = nodeFilter.getEligibleDataNodes();
        DeleteModelRequest modelDeleteRequest = new DeleteModelRequest(adID, dataNodes);
        client.execute(DeleteModelAction.INSTANCE, modelDeleteRequest, ActionListener.wrap(response -> {
            if (response.hasFailures()) {
                LOG.warn("Cannot delete all models of detector {}", adID);
                for (FailedNodeException failedNodeException : response.failures()) {
                    LOG.warn("Deleting models of node has exception", failedNodeException);
                }
                // if customers are using an updated detector and we haven't deleted old
                // checkpoints, customer would have trouble
                listener.onResponse(new StopDetectorResponse(false));
            } else {
                LOG.info("models of detector {} get deleted", adID);
                listener.onResponse(new StopDetectorResponse(true));
            }
        }, exception -> {
            LOG.error(new ParameterizedMessage("Deletion of detector [{}] has exception.", adID), exception);
            listener.onResponse(new StopDetectorResponse(false));
        }));
    } catch (Exception e) {
        LOG.error("Fail to stop detector " + adID, e);
        Throwable cause = ExceptionsHelper.unwrapCause(e);
        listener.onFailure(new InternalFailure(adID, cause));
    }
}
 
Example 29
Source Project: anomaly-detection   Source File: AnomalyResultHandler.java    License: Apache License 2.0 5 votes vote down vote up
public void indexAnomalyResult(AnomalyResult anomalyResult) {
    try {
        if (checkIndicesBlocked(clusterService.state(), ClusterBlockLevel.WRITE, AnomalyResult.ANOMALY_RESULT_INDEX)) {
            LOG.warn(CANNOT_SAVE_ERR_MSG);
            return;
        }
        if (!anomalyDetectionIndices.doesAnomalyResultIndexExist()) {
            anomalyDetectionIndices
                .initAnomalyResultIndexDirectly(
                    ActionListener.wrap(initResponse -> onCreateAnomalyResultIndexResponse(initResponse, anomalyResult), exception -> {
                        if (ExceptionsHelper.unwrapCause(exception) instanceof ResourceAlreadyExistsException) {
                            // It is possible the index has been created while we sending the create request
                            saveDetectorResult(anomalyResult);
                        } else {
                            throw new AnomalyDetectionException(
                                anomalyResult.getDetectorId(),
                                "Unexpected error creating anomaly result index",
                                exception
                            );
                        }
                    })
                );
        } else {
            saveDetectorResult(anomalyResult);
        }
    } catch (Exception e) {
        throw new AnomalyDetectionException(
            anomalyResult.getDetectorId(),
            String
                .format(
                    Locale.ROOT,
                    "Error in saving anomaly index for ID %s from %s to %s",
                    anomalyResult.getDetectorId(),
                    anomalyResult.getDataStartTime(),
                    anomalyResult.getDataEndTime()
                ),
            e
        );
    }
}
 
Example 30
Source Project: anomaly-detection   Source File: AnomalyResultHandler.java    License: Apache License 2.0 5 votes vote down vote up
void saveDetectorResult(IndexRequest indexRequest, String context, Iterator<TimeValue> backoff) {
    client.index(indexRequest, ActionListener.<IndexResponse>wrap(response -> LOG.debug(SUCCESS_SAVING_MSG + context), exception -> {
        // Elasticsearch has a thread pool and a queue for write per node. A thread
        // pool will have N number of workers ready to handle the requests. When a
        // request comes and if a worker is free , this is handled by the worker. Now by
        // default the number of workers is equal to the number of cores on that CPU.
        // When the workers are full and there are more write requests, the request
        // will go to queue. The size of queue is also limited. If by default size is,
        // say, 200 and if there happens more parallel requests than this, then those
        // requests would be rejected as you can see EsRejectedExecutionException.
        // So EsRejectedExecutionException is the way that Elasticsearch tells us that
        // it cannot keep up with the current indexing rate.
        // When it happens, we should pause indexing a bit before trying again, ideally
        // with randomized exponential backoff.
        Throwable cause = ExceptionsHelper.unwrapCause(exception);
        if (!(cause instanceof EsRejectedExecutionException) || !backoff.hasNext()) {
            LOG.error(FAIL_TO_SAVE_ERR_MSG + context, cause);
        } else {
            TimeValue nextDelay = backoff.next();
            LOG.warn(RETRY_SAVING_ERR_MSG + context, cause);
            // copy original request's source without other information like autoGeneratedTimestamp
            // otherwise, an exception will be thrown indicating autoGeneratedTimestamp should not be set
            // while request id is already set (id is set because we have already sent the request before).
            IndexRequest newReuqest = new IndexRequest(AnomalyResult.ANOMALY_RESULT_INDEX);
            newReuqest.source(indexRequest.source(), indexRequest.getContentType());
            threadPool.schedule(() -> saveDetectorResult(newReuqest, context, backoff), nextDelay, ThreadPool.Names.SAME);
        }
    }));
}