Java Code Examples for org.elasticsearch.action.bulk.BulkRequest

The following examples show how to use org.elasticsearch.action.bulk.BulkRequest. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: conductor   Source File: ElasticSearchRestDAOV5.java    License: Apache License 2.0 7 votes vote down vote up
private void indexObject(final String index, final String docType, final String docId, final Object doc) {

        byte[] docBytes;
        try {
            docBytes = objectMapper.writeValueAsBytes(doc);
        } catch (JsonProcessingException e) {
            logger.error("Failed to convert {} '{}' to byte string", docType, docId);
            return;
        }

        IndexRequest request = new IndexRequest(index, docType, docId);
        request.source(docBytes, XContentType.JSON);

        if(bulkRequests.get(docType) == null) {
            bulkRequests.put(docType, new BulkRequests(System.currentTimeMillis(), new BulkRequest()));
        }

        bulkRequests.get(docType).getBulkRequest().add(request);
        if (bulkRequests.get(docType).getBulkRequest().numberOfActions() >= this.indexBatchSize) {
            indexBulkRequest(docType);
        }
    }
 
Example 2
@Override
protected void populateData(TransportClient tc) {

    try {
        tc.admin().indices().create(new CreateIndexRequest("logs").mapping("_doc", FileHelper.loadFile("dlsfls/masked_field_mapping.json"), XContentType.JSON)).actionGet();


        byte[] data = FileHelper.loadFile("dlsfls/logs_bulk_data.json").getBytes(StandardCharsets.UTF_8);
        BulkRequest br = new BulkRequest().add(data, 0, data.length, XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE);
        if(tc.bulk(br).actionGet().hasFailures()) {
            Assert.fail("bulk import failed");
        }
        Thread.sleep(1000);

    } catch (Exception e) {
        e.printStackTrace();
        Assert.fail(e.toString());
    }

}
 
Example 3
Source Project: james-project   Source File: ElasticSearchIndexer.java    License: Apache License 2.0 6 votes vote down vote up
public Mono<BulkResponse> update(List<UpdatedRepresentation> updatedDocumentParts, RoutingKey routingKey) {
    Preconditions.checkNotNull(updatedDocumentParts);
    Preconditions.checkNotNull(routingKey);
    BulkRequest request = new BulkRequest();
    updatedDocumentParts.forEach(updatedDocumentPart -> request.add(
        new UpdateRequest(aliasName.getValue(),
            NodeMappingFactory.DEFAULT_MAPPING_NAME,
            updatedDocumentPart.getId().asString())
            .doc(updatedDocumentPart.getUpdatedDocumentPart(), XContentType.JSON)
            .routing(routingKey.asString())));

    return client.bulk(request, RequestOptions.DEFAULT)
        .onErrorResume(ValidationException.class, exception -> {
            LOGGER.warn("Error while updating index", exception);
            return Mono.empty();
        });
}
 
Example 4
Source Project: code   Source File: EsDataAdapter.java    License: Apache License 2.0 6 votes vote down vote up
public static BulkResponse bulkImport(List<Sku> skuList) throws IOException {
    BulkRequest bulkRequest = new BulkRequest();
    for (Sku sku : skuList) {
        Map<String, Object> skuMap = new HashMap<String, Object>();
        IndexRequest indexRequest = new IndexRequest("sku", "doc", sku.getSkuId());
        skuMap.put("name", sku.getName());
        skuMap.put("price", sku.getPrice());
        skuMap.put("image", sku.getImage());
        skuMap.put("createTime", DateUtils.dateFormat(sku.getCreateTime()));
        skuMap.put("categoryName", sku.getCategoryName());
        skuMap.put("brandName", sku.getBrandName());
        // "{'颜色': '红色', '版本': '8GB+128GB'}"
        Map spec = JSON.parseObject(sku.getSpec());
        skuMap.put("spec", spec);
        skuMap.put("commentNum", sku.getCommentNum());
        skuMap.put("saleNum", sku.getSaleNum());
        skuMap.put("spuId", sku.getSpuId());
        indexRequest.source(skuMap);
        bulkRequest.add(indexRequest);
    }
    return restHighLevelClient.bulk(bulkRequest, RequestOptions.DEFAULT);
}
 
Example 5
Source Project: elasticsearch-helper   Source File: HttpBulkProcessor.java    License: Apache License 2.0 6 votes vote down vote up
HttpBulkProcessor(Client client, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) {
    this.client = client;
    this.listener = listener;
    this.concurrentRequests = concurrentRequests;
    this.bulkActions = bulkActions;
    this.bulkSize = bulkSize.bytes();

    this.semaphore = new Semaphore(concurrentRequests);
    this.bulkRequest = new BulkRequest();

    if (flushInterval != null) {
        this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory(client.settings(), (name != null ? "[" + name + "]" : "") + "bulk_processor"));
        this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
        this.scheduler.setContinueExistingPeriodicTasksAfterShutdownPolicy(false);
        this.scheduledFuture = this.scheduler.scheduleWithFixedDelay(new Flush(), flushInterval.millis(), flushInterval.millis(), TimeUnit.MILLISECONDS);
    } else {
        this.scheduler = null;
        this.scheduledFuture = null;
    }
}
 
Example 6
Source Project: flink   Source File: ElasticsearchSinkBase.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
	LOG.error("Failed Elasticsearch bulk request: {}", failure.getMessage(), failure);

	try {
		for (ActionRequest action : request.requests()) {
			failureHandler.onFailure(action, failure, -1, failureRequestIndexer);
		}
	} catch (Throwable t) {
		// fail the sink and skip the rest of the items
		// if the failure handler decides to throw an exception
		failureThrowable.compareAndSet(null, t);
	}

	if (flushOnCheckpoint) {
		numPendingRequests.getAndAdd(-request.numberOfActions());
	}
}
 
Example 7
Source Project: incubator-gobblin   Source File: ElasticsearchWriterBase.java    License: Apache License 2.0 6 votes vote down vote up
protected Pair<BulkRequest, FutureCallbackHolder> prepareBatch(Batch<Object> batch, WriteCallback callback) {
  BulkRequest bulkRequest = new BulkRequest();
  final StringBuilder stringBuilder = new StringBuilder();
  for (Object record : batch.getRecords()) {
    try {
      byte[] serializedBytes = this.serializer.serializeToJson(record);
      log.debug("serialized record: {}", serializedBytes);
      IndexRequest indexRequest = new IndexRequest(this.indexName, this.indexType)
          .source(serializedBytes, 0, serializedBytes.length, XContentType.JSON);
      if (this.idMappingEnabled) {
        String id = this.typeMapper.getValue(this.idFieldName, record);
        indexRequest.id(id);
        stringBuilder.append(";").append(id);
      }
      bulkRequest.add(indexRequest);
    }
    catch (Exception e) {
      log.error("Encountered exception {}", e);
    }
  }
  FutureCallbackHolder futureCallbackHolder = new FutureCallbackHolder(callback,
      exception -> log.error("Batch: {} failed on ids; {} with exception {}", batch.getId(),
          stringBuilder.toString(), exception),
      this.malformedDocPolicy);
  return new Pair(bulkRequest, futureCallbackHolder);
}
 
Example 8
@Override
public void afterBulk(long executionId, BulkRequest request,
        BulkResponse response) {
    logger.debug("afterBulk {} failures:{}", executionId,
            response.hasFailures());
    if (response.hasFailures()) {
        long succeeded = 0;
        for (Iterator<BulkItemResponse> i = response.iterator(); i
                .hasNext(); ) {
            if (!i.next().isFailed()) {
                succeeded++;
            }
        }
        if (succeeded > 0) {
            succeededDocs.addAndGet(succeeded);
        }
    } else {
        succeededDocs.addAndGet(request.numberOfActions());
    }
    bulkProcessed();
}
 
Example 9
Source Project: garmadon   Source File: ElasticSearchListener.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
    if (response.hasFailures()) {
        LOGGER.error("Bulk[{}] executed with failures", executionId);
        for (BulkItemResponse item : response.getItems()) {
            if (item.isFailed()) {
                LOGGER.error("Failed on {} due to {}", item.getId(), item.getFailureMessage());
                numberOfEventInError.inc();
            }
        }
    } else {
        LOGGER.info("Successfully completed Bulk[{}] in {} ms", executionId, response.getTook().getMillis());
        latencyIndexingEvents.observe(response.getTook().getMillis());
    }
    CommittableOffset<String, byte[]> lastOffset = (CommittableOffset<String, byte[]>) request.payloads().get(request.payloads().size() - 1);
    lastOffset
            .commitAsync()
            .whenComplete((topicPartitionOffset, exception) -> {
                if (exception != null) {
                    LOGGER.warn("Could not commit kafka offset {}|{}", lastOffset.getOffset(), lastOffset.getPartition());
                    numberOfOffsetCommitError.inc();
                } else {
                    LOGGER.info("Committed kafka offset {}|{}", topicPartitionOffset.getOffset(), topicPartitionOffset.getPartition());
                }
            });
}
 
Example 10
@Test
public void testScenarioAsBulkRequest() throws IOException {
    client.bulk(new BulkRequest()
        .add(new IndexRequest(INDEX, DOC_TYPE, "2").source(
            jsonBuilder()
                .startObject()
                .field(FOO, BAR)
                .endObject()
        ))
        .add(new IndexRequest(INDEX, DOC_TYPE, "3").source(
            jsonBuilder()
                .startObject()
                .field(FOO, BAR)
                .endObject()
        ))
    , RequestOptions.DEFAULT);
}
 
Example 11
@Test
public void testRun() throws IOException {
    AnomalyModel anomalyModel = AnomalyModel.newBuilder()
            .key("key")
            .value(100)
            .level("NORMAL")
            .uuid("test")
            .timestamp("date")
            .anomalyThresholds(null)
            .tags(null)
            .build();
    anomalyModel = AnomalyModel.newBuilder(anomalyModel).build();
    List<AnomalyModel> anomalyModels = new ArrayList<>();
    anomalyModels.add(anomalyModel);
    BulkResponse bulkResponse = buildBulkResponseHappy();

    when(client.bulk(any(BulkRequest.class), any(RequestOptions.class))).thenReturn(bulkResponse);
    when(client.close()).thenReturn(true);
    ElasticSearchBulkService elasticSearchBulkService = new ElasticSearchBulkService(anomalyModels);
    elasticSearchBulkService.setElasticSearchClient(client);
    elasticSearchBulkService.run();
    verify(elasticSearchBulkService.getElasticSearchClient(), times(1))
            .bulk(any(BulkRequest.class), any(RequestOptions.class));

}
 
Example 12
Source Project: conductor   Source File: ElasticSearchRestDAOV6.java    License: Apache License 2.0 6 votes vote down vote up
private void indexObject(final String index, final String docType, final String docId, final Object doc) {

        byte[] docBytes;
        try {
            docBytes = objectMapper.writeValueAsBytes(doc);
        } catch (JsonProcessingException e) {
            logger.error("Failed to convert {} '{}' to byte string", docType, docId);
            return;
        }

        IndexRequest request = new IndexRequest(index, docType, docId);
        request.source(docBytes, XContentType.JSON);

        if (bulkRequests.get(docType) == null) {
            bulkRequests.put(docType, new BulkRequests(System.currentTimeMillis(), new BulkRequest()));
        }

        bulkRequests.get(docType).getBulkRequest().add(request);
        if (bulkRequests.get(docType).getBulkRequest().numberOfActions() >= this.indexBatchSize) {
            indexBulkRequest(docType);
        }
    }
 
Example 13
Source Project: data-generator   Source File: Output.java    License: Apache License 2.0 6 votes vote down vote up
private static void writeBatchToES(String index, String type, List<Map<String, Object>> list) throws Exception{
    if(list.isEmpty()){
        return;
    }
    BulkRequest request = new BulkRequest();
    for(Map<String, Object> data : list) {
        String id = data.get("id").toString();
        request.add(
                new IndexRequest(index, type, id)
                        .source(data));

    }
    BulkResponse bulkResponse = CLIENT.bulk(request);
    if (bulkResponse.hasFailures()) {
        for (BulkItemResponse bulkItemResponse : bulkResponse) {
            if (bulkItemResponse.isFailed()) {
                BulkItemResponse.Failure failure = bulkItemResponse.getFailure();
                LOGGER.error("ES索引失败: {}", failure.getMessage());
            }
        }
    }
}
 
Example 14
@Test
public void failureHandlerExecutesFailoverForEachBatchItemSeparately() {

    // given
    Builder builder = createTestObjectFactoryBuilder();
    ClientObjectFactory<TransportClient, BulkRequest> config = builder.build();

    FailoverPolicy failoverPolicy = spy(new NoopFailoverPolicy());

    String payload1 = "test1";
    String payload2 = "test2";
    BulkRequest bulk = new BulkRequest()
            .add(spy(new IndexRequest().source(payload1, XContentType.CBOR)))
            .add(spy(new IndexRequest().source(payload2, XContentType.CBOR)));

    // when
    config.createFailureHandler(failoverPolicy).apply(bulk);

    // then
    ArgumentCaptor<String> captor = ArgumentCaptor.forClass(String.class);
    verify(failoverPolicy, times(2)).deliver(captor.capture());

    assertTrue(captor.getAllValues().contains(payload1));
    assertTrue(captor.getAllValues().contains(payload2));
}
 
Example 15
@Override
public void delete(final long snifferId, final String[] eventIds) {
	clientTpl.executeWithClient(new ClientCallback<Object>() {
		@Override
		public Object execute(final Client client) {
			final BulkRequest deletes = new BulkRequest().refresh(true);
			for (final String id : eventIds) {
				for (final String index : indexNamingStrategy.getRetrievalNames(snifferId)) {
					deletes.add(new DeleteRequest(index, getType(snifferId), id));
				}
			}
			client.bulk(deletes).actionGet();
			logger.info("Deleted events: {}", (Object[]) eventIds);
			return null;
		}
	});
}
 
Example 16
private BulkProcessor build(final RestHighLevelClient client) {
    logger.trace("Bulk processor name: [{}]  bulkActions: [{}], bulkSize: [{}], flush interval time: [{}]," +
                    " concurrent Request: [{}], backoffPolicyTimeInterval: [{}], backoffPolicyRetries: [{}] ",
            new Object[]{bulkProcessorName, bulkActions, bulkSize, flushIntervalTime,
                    concurrentRequest, backoffPolicyTimeInterval, backoffPolicyRetries});
    BiConsumer<BulkRequest, ActionListener<BulkResponse>> bulkConsumer =
            (request, bulkListener) -> client
                    .bulkAsync(request, RequestOptions.DEFAULT, bulkListener);
    return BulkProcessor.builder(bulkConsumer, getListener())
            .setBulkActions(bulkActions)
            .setBulkSize(bulkSize)
            .setFlushInterval(flushIntervalTime)
            .setConcurrentRequests(concurrentRequest)
            .setBackoffPolicy(BackoffPolicy.exponentialBackoff(
                    Util.getTimeValue(backoffPolicyTimeInterval,
                            DEFAULT_ES_BACKOFF_POLICY_START_DELAY),
                    backoffPolicyRetries))
            .build();
}
 
Example 17
@Override
public <T extends Entity> boolean bulkUpdate(String indexName, List<? extends Entity> entities) throws IOException {
    BulkRequest bulkRequest = new BulkRequest();
    entities.stream().map(e -> createUpdateRequest(indexName, getType(e), e.getId(), getJson(e), getParent(e), getRoot(e))).
            forEach(bulkRequest::add);
    bulkRequest.setRefreshPolicy(esCfg.refreshPolicy);

    BulkResponse bulkResponse = client.bulk(bulkRequest);
    if (bulkResponse.hasFailures()) {
        for (BulkItemResponse resp : bulkResponse.getItems()) {
            if (resp.isFailed()) {
                LOGGER.error("bulk update failed : {}", resp.getFailureMessage());
            }
        }
        return false;
    }
    return true;
}
 
Example 18
@Test
public void configReturnsACopyOfServerUrisList() {

    // given
    Builder builder = createTestObjectFactoryBuilder();
    builder.withServerUris("http://localhost:9200;http://localhost:9201;http://localhost:9202");
    ClientObjectFactory<TransportClient, BulkRequest> config = builder.build();

    // when
    Collection<String> serverUrisList = config.getServerList();
    serverUrisList.add("test");

    // then
    assertNotEquals(serverUrisList.size(), config.getServerList().size());

}
 
Example 19
public void bulkDocument(String type, Map<String, Map<String, Object>> sources) {
    try {
        if (this.instance() == null) return;
        BulkRequest requests = new BulkRequest();
        Iterator<String> it = sources.keySet().iterator();
        int count = 0;
        while (it.hasNext()) {
            count++;
            String next = it.next();
            IndexRequest request = new IndexRequest(name, type, next);
            request.source(sources.get(next));
            requests.add(request);
            if (count % 1000 == 0) {
                client.bulk(requests, RequestOptions.DEFAULT);
                requests.requests().clear();
                count = 0;
            }
        }
        if (requests.numberOfActions() > 0) client.bulk(requests, RequestOptions.DEFAULT);
    } catch (IOException e) {
        log.error(e.getMessage());
    }
}
 
Example 20
Source Project: ns4_gear_watchdog   Source File: ElasticSearchHighSink.java    License: Apache License 2.0 5 votes vote down vote up
public void bulkExecute(List<Event> events) throws Exception {
    //批量插入数据
    BulkRequest request = new BulkRequest();
    String indexName = null;
    for (Event event : events) {
        //如果没有切换天,那么索引可以服用,无需重复创建
        if (StringUtils.isEmpty(indexName) || !indexName.endsWith(indexNameBuilder.getIndexSuffix(event))) {
            indexName = indexNameBuilder.getIndexName(event);
        }
        request.add(new IndexRequest(indexName, indexType).source(eventSerializer.serializer(event), XContentType.JSON));
    }
    BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT);
    TimeValue took = bulkResponse.getTook();
    logger.debug("[批量新增花费的毫秒]:" + took + "," + took.getMillis() + "," + took.getSeconds() + ",events[" + events.size() + "]");
}
 
Example 21
Source Project: tunnel   Source File: EsPublisher.java    License: Apache License 2.0 5 votes vote down vote up
private BulkRequest createBulkRequest(List<DocWriteRequest> doc) {
    BulkRequest br = new BulkRequest();
    br.add(doc);
    br.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
    br.waitForActiveShards(ActiveShardCount.ONE);
    return br;
}
 
Example 22
Source Project: Flink-CEPplus   Source File: ElasticsearchSinkBase.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
	if (response.hasFailures()) {
		BulkItemResponse itemResponse;
		Throwable failure;
		RestStatus restStatus;

		try {
			for (int i = 0; i < response.getItems().length; i++) {
				itemResponse = response.getItems()[i];
				failure = callBridge.extractFailureCauseFromBulkItemResponse(itemResponse);
				if (failure != null) {
					LOG.error("Failed Elasticsearch item request: {}", itemResponse.getFailureMessage(), failure);

					restStatus = itemResponse.getFailure().getStatus();
					if (restStatus == null) {
						failureHandler.onFailure(request.requests().get(i), failure, -1, failureRequestIndexer);
					} else {
						failureHandler.onFailure(request.requests().get(i), failure, restStatus.getStatus(), failureRequestIndexer);
					}
				}
			}
		} catch (Throwable t) {
			// fail the sink and skip the rest of the items
			// if the failure handler decides to throw an exception
			failureThrowable.compareAndSet(null, t);
		}
	}

	if (flushOnCheckpoint) {
		numPendingRequests.getAndAdd(-request.numberOfActions());
	}
}
 
Example 23
Source Project: flink   Source File: ElasticsearchSinkBase.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
	if (response.hasFailures()) {
		BulkItemResponse itemResponse;
		Throwable failure;
		RestStatus restStatus;

		try {
			for (int i = 0; i < response.getItems().length; i++) {
				itemResponse = response.getItems()[i];
				failure = callBridge.extractFailureCauseFromBulkItemResponse(itemResponse);
				if (failure != null) {
					LOG.error("Failed Elasticsearch item request: {}", itemResponse.getFailureMessage(), failure);

					restStatus = itemResponse.getFailure().getStatus();
					if (restStatus == null) {
						failureHandler.onFailure(request.requests().get(i), failure, -1, failureRequestIndexer);
					} else {
						failureHandler.onFailure(request.requests().get(i), failure, restStatus.getStatus(), failureRequestIndexer);
					}
				}
			}
		} catch (Throwable t) {
			// fail the sink and skip the rest of the items
			// if the failure handler decides to throw an exception
			failureThrowable.compareAndSet(null, t);
		}
	}

	if (flushOnCheckpoint) {
		numPendingRequests.getAndAdd(-request.numberOfActions());
	}
}
 
Example 24
private BatchOperations<BulkRequest> createDefaultTestBulkRequestBatchOperations() {
    BulkProcessorObjectFactory factory = BulkProcessorObjectFactoryTest
            .createTestObjectFactoryBuilder()
            .build();

    return spy(factory.createBatchOperations());
}
 
Example 25
@Test
public void testScenarioAsBulkRequest() throws IOException, ExecutionException, InterruptedException {
    doBulk(new BulkRequest()
        .add(new IndexRequest(INDEX, DOC_TYPE, "2").source(
            jsonBuilder()
                .startObject()
                .field(FOO, BAR)
                .endObject()
        ))
        .add(new DeleteRequest(INDEX, DOC_TYPE, "2")));

    validateSpanContentAfterBulkRequest();
}
 
Example 26
Source Project: tunnel   Source File: EsPublisher.java    License: Apache License 2.0 5 votes vote down vote up
private BulkRequest createBulkRequest(List<DocWriteRequest> doc) {
    BulkRequest br = new BulkRequest();
    br.add(doc);
    br.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
    br.waitForActiveShards(ActiveShardCount.ONE);
    return br;
}
 
Example 27
Source Project: SkaETL   Source File: ConfService.java    License: Apache License 2.0 5 votes vote down vote up
private void callAddES(ConfigurationLogstash cl) throws IOException {
    BulkRequest bulk = new BulkRequest();
    ObjectMapper objectMapper = new ObjectMapper();
    String fluxJson = objectMapper.writeValueAsString(ConfEsSkalogs.builder().configurationLogstash(cl).pipeline(StringEscapeUtils.escapeJava(utilsConfig.generateConfig(cl))).build());
    bulk.add(new IndexRequest(INDEX_STORAGE)
            .type("doc")
            .id(cl.getIdConfiguration())
            .source(fluxJson, XContentType.JSON));
    BulkResponse bulkResponse = restHighLevelClient.bulk(bulk);
    if (bulkResponse.getItems().length == 1) {
        cl.setIdEs(bulkResponse.getItems()[0].getId());
    } else {
        log.error("Problem with return ES {}", bulkResponse);
    }
}
 
Example 28
Source Project: SkaETL   Source File: ErrorToElasticsearchProcessor.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected void parseResultErrors(BulkRequest request, BulkResponse bulkItemResponses) {
    for (BulkItemResponse bir : bulkItemResponses) {
        MDC.put("item_error", bir.getFailureMessage());
        log.info("EsError" + bir.getFailureMessage());
        MDC.remove("item_error");
        //TODO ...
    }
}
 
Example 29
Source Project: SkaETL   Source File: AbstractElasticsearchProcessor.java    License: Apache License 2.0 5 votes vote down vote up
private void parseErrorsTechnical(BulkRequest bulkRequest, Throwable failure) {
    bulkRequest.requests().stream()
            .filter(request -> request.opType() == DocWriteRequest.OpType.INDEX)
            .map(this::toRawMessage)
            .filter(message -> message != null)
            .forEach(rawMessage -> routeErrorTechnical(rawMessage, failure));

}
 
Example 30
Source Project: SkaETL   Source File: AbstractElasticsearchProcessor.java    License: Apache License 2.0 5 votes vote down vote up
protected void parseResultErrors(BulkRequest request, BulkResponse bulkItemResponses) {
    for (BulkItemResponse bir : bulkItemResponses) {
        DocWriteRequest docWriteRequest = request.requests().get(bir.getItemId());
        if (bir.isFailed()) {
            if (isRetryable(bir)) {
                routeToNextTopic(bir, toRawMessage(docWriteRequest), false);
            } else {
                routeToNextTopic(bir, toRawMessage(docWriteRequest), true);
            }
        }
    }
}