Java Code Examples for io.vertx.micrometer.backends.BackendRegistries#setupBackend()

The following examples show how to use io.vertx.micrometer.backends.BackendRegistries#setupBackend() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KonduitServingLauncher.java    From konduit-serving with Apache License 2.0 5 votes vote down vote up
@Override
public void beforeStartingVertx(VertxOptions options) {
    MicrometerMetricsOptions micrometerMetricsOptions = new MicrometerMetricsOptions()
            .setMicrometerRegistry(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT))
            .setPrometheusOptions(new VertxPrometheusOptions()
                    .setEnabled(true));

    log.info("Setup micro meter options.");
    BackendRegistries.setupBackend(micrometerMetricsOptions);

    options.setMetricsOptions(micrometerMetricsOptions);
    options.setMaxEventLoopExecuteTime(60);
    options.setMaxEventLoopExecuteTimeUnit(TimeUnit.SECONDS);
}
 
Example 2
Source File: DeployKonduitServing.java    From konduit-serving with Apache License 2.0 5 votes vote down vote up
public static void deployInference(DeploymentOptions deploymentOptions, Handler<AsyncResult<InferenceConfiguration>> eventHandler) {
    MicrometerMetricsOptions micrometerMetricsOptions = new MicrometerMetricsOptions()
            .setMicrometerRegistry(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT))
            .setPrometheusOptions(new VertxPrometheusOptions()
                    .setEnabled(true));

    log.info("Setup micro meter options.");
    BackendRegistries.setupBackend(micrometerMetricsOptions);

    deployInference(new VertxOptions()
                    .setMaxEventLoopExecuteTime(120)
                    .setMaxEventLoopExecuteTimeUnit(TimeUnit.SECONDS)
                    .setMetricsOptions(micrometerMetricsOptions),
            deploymentOptions, eventHandler);
}
 
Example 3
Source File: DeployKonduitServing.java    From konduit-serving with Apache License 2.0 5 votes vote down vote up
public static void deployInference(InferenceConfiguration inferenceConfiguration, Handler<AsyncResult<InferenceConfiguration>> eventHandler) {
    MicrometerMetricsOptions micrometerMetricsOptions = new MicrometerMetricsOptions()
            .setMicrometerRegistry(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT))
            .setPrometheusOptions(new VertxPrometheusOptions()
                    .setEnabled(true));

    log.info("Setup micro meter options.");
    BackendRegistries.setupBackend(micrometerMetricsOptions);

    deployInference(new VertxOptions()
                    .setMaxEventLoopExecuteTime(120)
                    .setMaxEventLoopExecuteTimeUnit(TimeUnit.SECONDS)
                    .setMetricsOptions(micrometerMetricsOptions),
            new DeploymentOptions().setConfig(new JsonObject(inferenceConfiguration.toJson())), eventHandler);
}
 
Example 4
Source File: MetricsUtils.java    From konduit-serving with Apache License 2.0 5 votes vote down vote up
/**
 * Sets up promethues and returns the
 * registry
 * @return
 */
public static Pair<MicrometerMetricsOptions,MeterRegistry> setupPrometheus() {
    PrometheusMeterRegistry registry = new PrometheusMeterRegistry(PrometheusConfig.DEFAULT);

    MicrometerMetricsOptions micrometerMetricsOptions = new MicrometerMetricsOptions()
            .setMicrometerRegistry(registry)
            .setPrometheusOptions(new VertxPrometheusOptions()
                    .setEnabled(true));
    BackendRegistries.setupBackend(micrometerMetricsOptions);

    return Pair.of(micrometerMetricsOptions,registry);

}
 
Example 5
Source File: DeployKonduitOrchestration.java    From konduit-serving with Apache License 2.0 5 votes vote down vote up
public static void deployInferenceClustered(DeploymentOptions deploymentOptions, Handler<AsyncResult<InferenceConfiguration>> eventHandler) {
    MicrometerMetricsOptions micrometerMetricsOptions = new MicrometerMetricsOptions()
            .setMicrometerRegistry(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT))
            .setPrometheusOptions(new VertxPrometheusOptions()
                    .setEnabled(true));

    log.info("Setup micro meter options.");
    BackendRegistries.setupBackend(micrometerMetricsOptions);

    deployInferenceClustered(new VertxOptions()
                    .setMaxEventLoopExecuteTime(120)
                    .setMaxEventLoopExecuteTimeUnit(TimeUnit.SECONDS)
                    .setMetricsOptions(micrometerMetricsOptions),
            deploymentOptions, eventHandler);
}
 
Example 6
Source File: DeployKonduitOrchestration.java    From konduit-serving with Apache License 2.0 5 votes vote down vote up
public static void deployInferenceClustered(InferenceConfiguration inferenceConfiguration, Handler<AsyncResult<InferenceConfiguration>> eventHandler) {
    MicrometerMetricsOptions micrometerMetricsOptions = new MicrometerMetricsOptions()
            .setMicrometerRegistry(new PrometheusMeterRegistry(PrometheusConfig.DEFAULT))
            .setPrometheusOptions(new VertxPrometheusOptions()
                    .setEnabled(true));

    log.info("Setup micro meter options.");
    BackendRegistries.setupBackend(micrometerMetricsOptions);

    deployInferenceClustered(new VertxOptions()
                    .setMaxEventLoopExecuteTime(120)
                    .setMaxEventLoopExecuteTimeUnit(TimeUnit.SECONDS)
                    .setMetricsOptions(micrometerMetricsOptions),
            new DeploymentOptions().setConfig(new JsonObject(inferenceConfiguration.toJson())), eventHandler);
}
 
Example 7
Source File: InferenceVerticleHttp.java    From konduit-serving with Apache License 2.0 4 votes vote down vote up
public Router createRouter() {
    InferenceHttpApi inferenceHttpApi = new InferenceHttpApi(pipelineExecutor);

    Router inferenceRouter = Router.router(vertx);
    ServiceLoader<MetricsProvider> sl = ServiceLoader.load(MetricsProvider.class);
    Iterator<MetricsProvider> iterator = sl.iterator();
    MetricsProvider metricsProvider = null;
    if (iterator.hasNext()) {
        metricsProvider = iterator.next();
    }

    Object endpoint = metricsProvider == null ? null : metricsProvider.getEndpoint();
    if (endpoint != null) {
        log.info("MetricsProvider implementation detected, adding endpoint /metrics");
        MicrometerMetricsOptions micrometerMetricsOptions = new MicrometerMetricsOptions()
                .setMicrometerRegistry(MicrometerRegistry.getRegistry())
                .setPrometheusOptions(new VertxPrometheusOptions().setEnabled(true));
        BackendRegistries.setupBackend(micrometerMetricsOptions);

        inferenceRouter.get("/metrics").handler((Handler<RoutingContext>) endpoint)
                .failureHandler(failureHandler -> {
                    if (failureHandler.failure() != null) {
                        log.error("Failed to scrape metrics", failureHandler.failure());
                    }

                    failureHandler.response()
                            .setStatusCode(500)
                            .end(failureHandler.failure().toString());
                });
    }

    inferenceRouter.post().handler(BodyHandler.create()
            .setUploadsDirectory(DirectoryFetcher.getFileUploadsDir().getAbsolutePath())
            .setDeleteUploadedFilesOnEnd(true)
            .setMergeFormAttributes(true))
            .failureHandler(failureHandler -> {
                Throwable throwable = failureHandler.failure();
                int statusCode = failureHandler.statusCode();

                if (statusCode == 404) {
                    log.warn("404 at route {}" + failureHandler.request().path());
                } else if (failureHandler.failed()) {
                    if (throwable != null) {
                        log.error("Request failed with cause ", throwable);
                    } else {
                        log.error("Request failed with unknown cause.");
                    }
                }

                if (throwable instanceof KonduitServingHttpException) {
                    sendErrorResponse(failureHandler, ((KonduitServingHttpException) throwable).getErrorResponse());
                } else {
                    failureHandler.response()
                            .setStatusCode(500)
                            .end(throwable != null ? throwable.toString() : "Internal Server Exception");
                }
            });

    inferenceRouter.post("/predict")
            .consumes(APPLICATION_JSON.toString())
            .consumes(APPLICATION_OCTET_STREAM.toString())
            .produces(APPLICATION_JSON.toString())
            .produces(APPLICATION_OCTET_STREAM.toString())
            .handler(inferenceHttpApi::predict);


    //Custom endpoints:
    if (inferenceConfiguration.customEndpoints() != null && !inferenceConfiguration.customEndpoints().isEmpty()) {
        addCustomEndpoints(inferenceHttpApi, inferenceRouter);
    }

    return inferenceRouter;
}