org.elasticsearch.index.shard.IndexShard Java Examples

The following examples show how to use org.elasticsearch.index.shard.IndexShard. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PeerRecoverySourceService.java    From crate with Apache License 2.0 7 votes vote down vote up
private void recover(StartRecoveryRequest request, ActionListener<RecoveryResponse> listener) throws IOException {
    final IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
    final IndexShard shard = indexService.getShard(request.shardId().id());

    final ShardRouting routingEntry = shard.routingEntry();

    if (routingEntry.primary() == false || routingEntry.active() == false) {
        throw new DelayRecoveryException("source shard [" + routingEntry + "] is not an active primary");
    }

    if (request.isPrimaryRelocation()
        && (
            routingEntry.relocating() == false
            || routingEntry.relocatingNodeId().equals(request.targetNode().getId()) == false)) {
        LOGGER.debug(
            "delaying recovery of {} as source shard is not marked yet as relocating to {}",
            request.shardId(), request.targetNode());
        throw new DelayRecoveryException("source shard is not marked yet as relocating to [" + request.targetNode() + "]");
    }

    RecoverySourceHandler handler = ongoingRecoveries.addNewRecovery(request, shard);
    LOGGER.trace(
        "[{}][{}] starting recovery to {}",
        request.shardId().getIndex().getName(), request.shardId().id(), request.targetNode());
    handler.recoverToTarget(ActionListener.runAfter(listener, () -> ongoingRecoveries.remove(shard, handler)));
}
 
Example #2
Source File: TransportShardUpsertAction.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
private Engine.IndexingOperation prepareIndexOnPrimary(IndexShard indexShard,
                                                       long version,
                                                       ShardUpsertRequest request,
                                                       ShardUpsertRequest.Item item) {
    SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, item.source())
            .type(request.type())
            .id(item.id())
            .routing(request.routing());

    if (logger.isTraceEnabled()) {
        logger.trace("[{}] shard operation with opType={} id={} version={}  source={}",
                indexShard.shardId(), item.opType(), item.id(), version, item.source().toUtf8());
    }
    if (item.opType() == IndexRequest.OpType.INDEX) {
        return indexShard.prepareIndexOnPrimary(sourceToParse, version, item.versionType(), request.canHaveDuplicates());
    }
    return indexShard.prepareCreateOnPrimary(
            sourceToParse, version, item.versionType(), request.canHaveDuplicates(), false);
}
 
Example #3
Source File: BlobRecoveryHandler.java    From crate with Apache License 2.0 6 votes vote down vote up
public BlobRecoveryHandler(IndexShard shard,
                           RecoveryTargetHandler recoveryTarget,
                           StartRecoveryRequest request,
                           int fileChunkSizeInBytes,
                           int maxConcurrentFileChunks,
                           final TransportService transportService,
                           BlobTransferTarget blobTransferTarget,
                           BlobIndicesService blobIndicesService) {
    super(shard, recoveryTarget, request, fileChunkSizeInBytes, maxConcurrentFileChunks);
    assert BlobIndex.isBlobIndex(shard.shardId().getIndexName()) : "Shard must belong to a blob index";
    this.blobShard = blobIndicesService.blobShardSafe(request.shardId());
    this.request = request;
    this.transportService = transportService;
    this.blobTransferTarget = blobTransferTarget;
    this.shard = shard;
    String property = System.getProperty("tests.short_timeouts");
    if (property == null) {
        GET_HEAD_TIMEOUT = 30;
    } else {
        GET_HEAD_TIMEOUT = 2;
    }
}
 
Example #4
Source File: RecoverySource.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
synchronized void cancel(IndexShard shard, String reason) {
    final Set<RecoverySourceHandler> shardRecoveryHandlers = ongoingRecoveries.get(shard);
    if (shardRecoveryHandlers != null) {
        final List<Exception> failures = new ArrayList<>();
        for (RecoverySourceHandler handlers : shardRecoveryHandlers) {
            try {
                handlers.cancel(reason);
            } catch (Exception ex) {
                failures.add(ex);
            } finally {
                shard.recoveryStats().decCurrentAsSource();
            }
        }
        ExceptionsHelper.maybeThrowRuntimeAndSuppress(failures);
    }
}
 
Example #5
Source File: TransportShardDeleteActionTest.java    From crate with Apache License 2.0 6 votes vote down vote up
@Before
public void prepare() throws Exception {
    indexUUID = UUIDs.randomBase64UUID();
    IndicesService indicesService = mock(IndicesService.class);
    IndexService indexService = mock(IndexService.class);
    when(indicesService.indexServiceSafe(new Index(TABLE_IDENT.indexNameOrAlias(), indexUUID))).thenReturn(indexService);
    indexShard = mock(IndexShard.class);
    when(indexService.getShard(0)).thenReturn(indexShard);


    transportShardDeleteAction = new TransportShardDeleteAction(
        MockTransportService.createNewService(
            Settings.EMPTY, Version.CURRENT, THREAD_POOL, clusterService.getClusterSettings()),
        mock(IndexNameExpressionResolver.class),
        mock(ClusterService.class),
        indicesService,
        mock(ThreadPool.class),
        mock(ShardStateAction.class),
        mock(SchemaUpdateClient.class)
    );
}
 
Example #6
Source File: SyncedFlushService.java    From crate with Apache License 2.0 6 votes vote down vote up
@Override
public void onShardInactive(final IndexShard indexShard) {
    // we only want to call sync flush once, so only trigger it when we are on a primary
    if (indexShard.routingEntry().primary()) {
        attemptSyncedFlush(indexShard.shardId(), new ActionListener<ShardsSyncedFlushResult>() {
            @Override
            public void onResponse(ShardsSyncedFlushResult syncedFlushResult) {
                LOGGER.trace("{} sync flush on inactive shard returned successfully for sync_id: {}", syncedFlushResult.getShardId(), syncedFlushResult.syncId());
            }

            @Override
            public void onFailure(Exception e) {
                LOGGER.debug(() -> new ParameterizedMessage("{} sync flush on inactive shard failed", indexShard.shardId()), e);
            }
        });
    }
}
 
Example #7
Source File: BlobRecoverySourceHandler.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
public BlobRecoverySourceHandler(final IndexShard shard, final StartRecoveryRequest request, final RecoverySettings recoverySettings,
                                 final TransportService transportService, final ESLogger logger,
                                 BlobTransferTarget blobTransferTarget, BlobIndices blobIndices) {
    super(shard, request, recoverySettings, transportService, logger);
    this.shard = shard;
    this.request = request;
    this.recoverySettings = recoverySettings;
    this.logger = logger;
    this.transportService = transportService;
    this.indexName = this.request.shardId().index().name();
    this.shardId = this.request.shardId().id();

    this.response = new RecoveryResponse();
    if (BlobIndices.isBlobIndex(shard.shardId().getIndex())) {
        blobRecoveryHandler = new BlobRecoveryHandler(
                transportService, recoverySettings, blobTransferTarget, blobIndices, shard, request);
    } else {
        blobRecoveryHandler = null;
    }
}
 
Example #8
Source File: RecoverySource.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
@Inject
public RecoverySource(Settings settings, TransportService transportService, IndicesService indicesService,
                      RecoverySettings recoverySettings, ClusterService clusterService) {
    super(settings);
    this.transportService = transportService;
    this.indicesService = indicesService;
    this.clusterService = clusterService;
    this.indicesService.indicesLifecycle().addListener(new IndicesLifecycle.Listener() {
        @Override
        public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard,
                                           Settings indexSettings) {
            if (indexShard != null) {
                ongoingRecoveries.cancel(indexShard, "shard is closed");
            }
        }
    });

    this.recoverySettings = recoverySettings;

    transportService.registerRequestHandler(Actions.START_RECOVERY, StartRecoveryRequest.class, ThreadPool.Names.GENERIC, new StartRecoveryTransportRequestHandler());
}
 
Example #9
Source File: TransportWriteAction.java    From crate with Apache License 2.0 6 votes vote down vote up
public WritePrimaryResult(ReplicaRequest request,
                          @Nullable Response finalResponse,
                          @Nullable Location location,
                          @Nullable Exception operationFailure,
                          IndexShard primary) {
    super(request, finalResponse, operationFailure);
    this.location = location;
    this.primary = primary;
    assert location == null || operationFailure == null
            : "expected either failure to be null or translog location to be null, " +
            "but found: [" + location + "] translog location and [" + operationFailure + "] failure";
    if (operationFailure != null) {
        this.finishedAsyncActions = true;
    } else {
        /*
         * We call this before replication because this might wait for a refresh and that can take a while.
         * This way we wait for the refresh in parallel on the primary and on the replica.
         */
        new AsyncAfterWriteAction(primary, location, this).run();
    }
}
 
Example #10
Source File: PercolatorQueriesRegistry.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
private int loadQueries(IndexShard shard) {
    shard.refresh("percolator_load_queries");
    // NOTE: we acquire the searcher via the engine directly here since this is executed right
    // before the shard is marked as POST_RECOVERY
    try (Engine.Searcher searcher = shard.engine().acquireSearcher("percolator_load_queries")) {
        Query query = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME));
        QueriesLoaderCollector queryCollector = new QueriesLoaderCollector(PercolatorQueriesRegistry.this, logger, mapperService, indexFieldDataService);
        IndexSearcher indexSearcher = new IndexSearcher(searcher.reader());
        indexSearcher.setQueryCache(null);
        indexSearcher.search(query, queryCollector);
        Map<BytesRef, Query> queries = queryCollector.queries();
        for (Map.Entry<BytesRef, Query> entry : queries.entrySet()) {
            Query previousQuery = percolateQueries.put(entry.getKey(), entry.getValue());
            shardPercolateService.addedQuery(entry.getKey(), previousQuery, entry.getValue());
        }
        return queries.size();
    } catch (Exception e) {
        throw new PercolatorException(shardId.index(), "failed to load queries from percolator index", e);
    }
}
 
Example #11
Source File: IndicesService.java    From crate with Apache License 2.0 6 votes vote down vote up
@Override
public IndexShard createShard(ShardRouting shardRouting, RecoveryState recoveryState, PeerRecoveryTargetService recoveryTargetService,
                              PeerRecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService,
                              Consumer<IndexShard.ShardFailure> onShardFailure,
                              Consumer<ShardId> globalCheckpointSyncer) throws IOException {
    ensureChangesAllowed();
    IndexService indexService = indexService(shardRouting.index());
    IndexShard indexShard = indexService.createShard(shardRouting, globalCheckpointSyncer);
    indexShard.addShardFailureCallback(onShardFailure);
    indexShard.startRecovery(recoveryState, recoveryTargetService, recoveryListener, repositoriesService,
        (type, mapping) -> {
            assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS :
                "mapping update consumer only required by local shards recovery";
            client.admin().indices().preparePutMapping()
                .setConcreteIndex(shardRouting.index()) // concrete index - no name clash, it uses uuid
                .setType(type)
                .setSource(mapping.source().string(), XContentType.JSON)
                .get();
        }, this);
    return indexShard;
}
 
Example #12
Source File: InternalCountOperation.java    From crate with Apache License 2.0 5 votes vote down vote up
@Override
public long count(TransactionContext txnCtx, Index index, int shardId, Symbol filter) throws IOException, InterruptedException {
    IndexService indexService;
    try {
        indexService = indicesService.indexServiceSafe(index);
    } catch (IndexNotFoundException e) {
        if (IndexParts.isPartitioned(index.getName())) {
            return 0L;
        }
        throw e;
    }

    IndexShard indexShard = indexService.getShard(shardId);
    try (Engine.Searcher searcher = indexShard.acquireSearcher("count-operation")) {
        String indexName = indexShard.shardId().getIndexName();
        var relationName = RelationName.fromIndexName(indexName);
        DocTableInfo table = schemas.getTableInfo(relationName, Operation.READ);
        LuceneQueryBuilder.Context queryCtx = queryBuilder.convert(
            filter,
            txnCtx,
            indexService.mapperService(),
            indexName,
            indexService.newQueryShardContext(),
            table,
            indexService.cache()
        );
        if (Thread.interrupted()) {
            throw new InterruptedException("thread interrupted during count-operation");
        }
        return searcher.searcher().count(queryCtx.query());
    }
}
 
Example #13
Source File: SyncedFlushService.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
private InFlightOpsResponse performInFlightOps(InFlightOpsRequest request) {
    IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
    IndexShard indexShard = indexService.shardSafe(request.shardId().id());
    if (indexShard.routingEntry().primary() == false) {
        throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard");
    }
    int opCount = indexShard.getOperationsCount();
    logger.trace("{} in flight operations sampled at [{}]", request.shardId(), opCount);
    return new InFlightOpsResponse(opCount);
}
 
Example #14
Source File: IndexVersionShardService.java    From siren-join with GNU Affero General Public License v3.0 5 votes vote down vote up
@Inject
public IndexVersionShardService(ShardId shardId, Settings indexSettings, IndexShard indexShard) {
  super(shardId, indexSettings);
  this.indexShard = indexShard;
  this.versioningIndexingOperationListener = new VersioningIndexingOperationListener();
  indexShard.indexingService().addListener(versioningIndexingOperationListener);
  version = new AtomicLong(System.nanoTime()); // initialise version number based on time to ensure uniqueness even if shard restarted
}
 
Example #15
Source File: IndicesTTLService.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the shards to purge, i.e. the local started primary shards that have ttl enabled and disable_purge to false
 */
private List<IndexShard> getShardsToPurge() {
    List<IndexShard> shardsToPurge = new ArrayList<>();
    MetaData metaData = clusterService.state().metaData();
    for (IndexService indexService : indicesService) {
        // check the value of disable_purge for this index
        IndexMetaData indexMetaData = metaData.index(indexService.index().name());
        if (indexMetaData == null) {
            continue;
        }
        boolean disablePurge = indexMetaData.getSettings().getAsBoolean(INDEX_TTL_DISABLE_PURGE, false);
        if (disablePurge) {
            continue;
        }

        // check if ttl is enabled for at least one type of this index
        boolean hasTTLEnabled = false;
        for (String type : indexService.mapperService().types()) {
            DocumentMapper documentType = indexService.mapperService().documentMapper(type);
            if (documentType.TTLFieldMapper().enabled()) {
                hasTTLEnabled = true;
                break;
            }
        }
        if (hasTTLEnabled) {
            for (IndexShard indexShard : indexService) {
                if (indexShard.state() == IndexShardState.STARTED && indexShard.routingEntry().primary() && indexShard.routingEntry().started()) {
                    shardsToPurge.add(indexShard);
                }
            }
        }
    }
    return shardsToPurge;
}
 
Example #16
Source File: IndicesService.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Override
public synchronized void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard,
                                                Settings indexSettings) {
    if (indexShard != null) {
        getStats.addTotals(indexShard.getStats());
        indexingStats.addTotals(indexShard.indexingStats());
        searchStats.addTotals(indexShard.searchStats());
        mergeStats.addTotals(indexShard.mergeStats());
        refreshStats.addTotals(indexShard.refreshStats());
        flushStats.addTotals(indexShard.flushStats());
        recoveryStats.addTotals(indexShard.recoveryStats());
    }
}
 
Example #17
Source File: TransportShardMultiGetAction.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Override
protected MultiGetShardResponse shardOperation(MultiGetShardRequest request, ShardId shardId) {
    IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
    IndexShard indexShard = indexService.shardSafe(shardId.id());

    if (request.refresh() && !request.realtime()) {
        indexShard.refresh("refresh_flag_mget");
    }

    MultiGetShardResponse response = new MultiGetShardResponse();
    for (int i = 0; i < request.locations.size(); i++) {
        MultiGetRequest.Item item = request.items.get(i);
        try {
            GetResult getResult = indexShard.getService().get(item.type(), item.id(), item.fields(), request.realtime(), item.version(), item.versionType(), item.fetchSourceContext(), request.ignoreErrorsOnGeneratedFields());
            response.add(request.locations.get(i), new GetResponse(getResult));
        } catch (Throwable t) {
            if (TransportActions.isShardNotAvailableException(t)) {
                throw (ElasticsearchException) t;
            } else {
                logger.debug("{} failed to execute multi_get for [{}]/[{}]", t, shardId, item.type(), item.id());
                response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.type(), item.id(), t));
            }
        }
    }

    return response;
}
 
Example #18
Source File: PeerRecoveryTargetService.java    From crate with Apache License 2.0 5 votes vote down vote up
public void startRecovery(final IndexShard indexShard, final DiscoveryNode sourceNode, final RecoveryListener listener) {
    // create a new recovery status, and process...
    final long recoveryId = onGoingRecoveries.startRecovery(indexShard, sourceNode, listener, recoverySettings.activityTimeout());
    // we fork off quickly here and go async but this is called from the cluster state applier
    // thread too and that can cause assertions to trip if we executed it on the same thread
    // hence we fork off to the generic threadpool.
    threadPool.generic().execute(new RecoveryRunner(recoveryId));
}
 
Example #19
Source File: SQLTransportIntegrationTest.java    From crate with Apache License 2.0 5 votes vote down vote up
public void waitUntilShardOperationsFinished() throws Exception {
    assertBusy(() -> {
        Iterable<IndicesService> indexServices = internalCluster().getInstances(IndicesService.class);
        for (IndicesService indicesService : indexServices) {
            for (IndexService indexService : indicesService) {
                for (IndexShard indexShard : indexService) {
                    assertThat(indexShard.getActiveOperationsCount(), equalTo(0));
                }
            }
        }
    }, 5, TimeUnit.SECONDS);
}
 
Example #20
Source File: InternalTestCluster.java    From crate with Apache License 2.0 5 votes vote down vote up
private void assertSameSyncIdSameDocs() {
    Map<String, Long> docsOnShards = new HashMap<>();
    final Collection<NodeAndClient> nodesAndClients = nodes.values();
    for (NodeAndClient nodeAndClient : nodesAndClients) {
        IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name);
        for (IndexService indexService : indexServices) {
            for (IndexShard indexShard : indexService) {
                try {
                    CommitStats commitStats = indexShard.commitStats();
                    String syncId = commitStats.getUserData().get(Engine.SYNC_COMMIT_ID);
                    if (syncId != null) {
                        long liveDocsOnShard = commitStats.getNumDocs();
                        if (docsOnShards.get(syncId) != null) {
                            assertThat("sync id is equal but number of docs does not match on node "
                                + nodeAndClient.name + ". expected " + docsOnShards.get(syncId) + " but got "
                                + liveDocsOnShard, docsOnShards.get(syncId), equalTo(liveDocsOnShard));
                        } else {
                            docsOnShards.put(syncId, liveDocsOnShard);
                        }
                    }
                } catch (AlreadyClosedException e) {
                    // the engine is closed or if the shard is recovering
                }
            }
        }
    }
}
 
Example #21
Source File: RecoveryStatus.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
public RecoveryStatus(IndexShard indexShard, DiscoveryNode sourceNode, RecoveryTarget.RecoveryListener listener) {

        super("recovery_status");
        this.recoveryId = idGenerator.incrementAndGet();
        this.listener = listener;
        this.logger = Loggers.getLogger(getClass(), indexShard.indexSettings(), indexShard.shardId());
        this.indexShard = indexShard;
        this.sourceNode = sourceNode;
        this.shardId = indexShard.shardId();
        this.tempFilePrefix = RECOVERY_PREFIX + indexShard.recoveryState().getTimer().startTime() + ".";
        this.store = indexShard.store();
        // make sure the store is not released until we are done.
        store.incRef();
        indexShard.recoveryStats().incCurrentAsTarget();
    }
 
Example #22
Source File: InternalIndicesLifecycle.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) {
    for (Listener listener : listeners) {
        try {
            listener.shardRoutingChanged(indexShard, oldRouting, newRouting);
        } catch (Throwable t) {
            logger.warn("{} failed to invoke shard touring changed callback", t, indexShard.shardId());
        }
    }
}
 
Example #23
Source File: IndexService.java    From crate with Apache License 2.0 5 votes vote down vote up
private void maybeRefreshEngine() {
    if (indexSettings.getRefreshInterval().millis() > 0) {
        for (IndexShard shard : this.shards.values()) {
            if (shard.isReadAllowed()) {
                try {
                    if (shard.isRefreshNeeded()) {
                        shard.refresh("schedule");
                    }
                } catch (IndexShardClosedException | AlreadyClosedException ex) {
                    // fine - continue;
                }
            }
        }
    }
}
 
Example #24
Source File: AppendLuceneTransportAction.java    From ES-Fastloader with Apache License 2.0 5 votes vote down vote up
private void doExecuteCore(AppendLuceneRequest request, ActionListener<AppendLuceneResponse> listener) {
    try {
        // 对请求做check
        request.check();

        // 获得shard信息
        ShardId shardId = new ShardId(request.indexName, request.uuid, request.shardId);
        IndexShard shard = indicesService.getShardOrNull(shardId);
        if(shard==null) {
            throw new Exception("shard not found, indexName:" + request.indexName + ", shardId:" + request.shardId);
        }

        // 获得lucene的IndexWriter对象
        /* FIXME 这里需要修改es的代码, 将lucene的IndexWriter对象暴露给plugin使用  */
        InternalEngine engine = (InternalEngine) shard.getEngineOrNull();
        IndexWriter indexWriter = engine.getIndexWriter();

        // 处理主键冲突情况
        long deleteCount = -1;
        List<String> appendDirs = request.getAppendDirs();
        if(request.primeKey!=null && request.primeKey.length()>0) {
            deleteCount = doPrimerKey(appendDirs, indexWriter, request.primeKey);
        }

        // 将新的lucene文件加入到shard中
        Directory[] indexes = new Directory[appendDirs.size()];
        for(int i=0; i<appendDirs.size(); i++) {
            indexes[i] = FSDirectory.open(Paths.get(appendDirs.get(i)));
        }
        indexWriter.addIndexes(indexes);
        indexWriter.commit();

        // 构建response
        AppendLuceneResponse response = new AppendLuceneResponse();
        response.deleteCount = deleteCount;
        listener.onResponse(response);
    } catch (Exception e) {
        listener.onFailure(e);
    }
}
 
Example #25
Source File: PeerRecoverySourceService.java    From crate with Apache License 2.0 5 votes vote down vote up
private RecoverySourceHandler createRecoverySourceHandler(StartRecoveryRequest request, IndexShard shard) {
    RecoverySourceHandler handler;
    final RemoteRecoveryTargetHandler recoveryTarget = new RemoteRecoveryTargetHandler(
        request.recoveryId(),
        request.shardId(),
        transportService,
        request.targetNode(),
        recoverySettings,
        throttleTime -> shard.recoveryStats().addThrottleTime(throttleTime));

    // CRATE_PATCH: used to inject BlobRecoveryHandler
    int recoveryChunkSizeInBytes = recoverySettings.getChunkSize().bytesAsInt();
    handler = getCustomRecoverySourceHandler(
        shard,
        recoveryTarget,
        request,
        recoveryChunkSizeInBytes
    );

    if (handler != null) {
        return handler;
    } else {
        return new RecoverySourceHandler(
            shard,
            recoveryTarget,
            request,
            recoveryChunkSizeInBytes,
            recoverySettings.getMaxConcurrentFileChunks());
    }
}
 
Example #26
Source File: ShardSegments.java    From crate with Apache License 2.0 5 votes vote down vote up
private Stream<ShardSegment> buildShardSegment(IndexShard indexShard) {
    try {
        List<Segment> segments = indexShard.segments(false);
        ShardId shardId = indexShard.shardId();
        return segments.stream().map(
            sgmt -> new ShardSegment(shardId.getId(),
                                     shardId.getIndexName(),
                                     sgmt,
                                     indexShard.routingEntry().primary()));
    } catch (AlreadyClosedException ignored) {
        return Stream.empty();
    }
}
 
Example #27
Source File: ShardCollectorProvider.java    From crate with Apache License 2.0 5 votes vote down vote up
ShardCollectorProvider(ClusterService clusterService,
                       Schemas schemas,
                       NodeJobsCounter nodeJobsCounter,
                       Functions functions,
                       ThreadPool threadPool,
                       Settings settings,
                       TransportActionProvider transportActionProvider,
                       IndexShard indexShard,
                       ShardRowContext shardRowContext) {
    this.shardRowContext = shardRowContext;
    shardNormalizer = new EvaluatingNormalizer(
        functions,
        RowGranularity.SHARD,
        new ShardReferenceResolver(schemas, shardRowContext),
        null
    );
    projectorFactory = new ProjectionToProjectorVisitor(
        clusterService,
        nodeJobsCounter,
        functions,
        threadPool,
        settings,
        transportActionProvider,
        new InputFactory(functions),
        shardNormalizer,
        t -> null,
        t -> null,
        indexShard.indexSettings().getIndexVersionCreated(),
        indexShard.shardId()
    );
}
 
Example #28
Source File: RecoverySourceHandlerTests.java    From crate with Apache License 2.0 5 votes vote down vote up
@Test
public void testSendSnapshotStopOnError() throws Exception {
    final int fileChunkSizeInBytes = between(1, 10 * 1024);
    final StartRecoveryRequest request = getStartRecoveryRequest();
    final IndexShard shard = mock(IndexShard.class);
    when(shard.state()).thenReturn(IndexShardState.STARTED);
    final List<Translog.Operation> ops = new ArrayList<>();
    for (int numOps = between(1, 256), i = 0; i < numOps; i++) {
        final Engine.Index index = getIndex(Integer.toString(i));
        ops.add(new Translog.Index(index, new Engine.IndexResult(1, 1, i, true)));
    }
    final AtomicBoolean wasFailed = new AtomicBoolean();
    RecoveryTargetHandler recoveryTarget = new TestRecoveryTargetHandler() {
        @Override
        public void indexTranslogOperations(List<Translog.Operation> operations, int totalTranslogOps, long timestamp,
                                            long msu, ActionListener<Long> listener) {
            if (randomBoolean()) {
                maybeExecuteAsync(() -> listener.onResponse(SequenceNumbers.NO_OPS_PERFORMED));
            } else {
                maybeExecuteAsync(() -> listener.onFailure(new RuntimeException("test - failed to index")));
                wasFailed.set(true);
            }
        }
    };
    RecoverySourceHandler handler = new RecoverySourceHandler(shard, recoveryTarget, request, fileChunkSizeInBytes, between(1, 10));
    PlainActionFuture<RecoverySourceHandler.SendSnapshotResult> future = new PlainActionFuture<>();
    final long startingSeqNo = randomLongBetween(0, ops.size() - 1L);
    final long endingSeqNo = randomLongBetween(startingSeqNo, ops.size() - 1L);
    handler.phase2(startingSeqNo, startingSeqNo, endingSeqNo, newTranslogSnapshot(ops, Collections.emptyList()),
                   randomNonNegativeLong(), randomNonNegativeLong(), future);
    if (wasFailed.get()) {
        assertThat(expectThrows(RuntimeException.class, future::actionGet).getMessage(), equalTo("test - failed to index"));
    }
}
 
Example #29
Source File: CrateSearchContext.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
private CrateSearchShardRequest(long nowInMillis, Optional<Scroll> scroll,
                                IndexShard indexShard) {
    this.nowInMillis = nowInMillis;
    this.scroll = scroll.orNull();
    this.index = indexShard.indexService().index().name();
    this.shardId = indexShard.shardId().id();
}
 
Example #30
Source File: TransportShardFlushAction.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Override
protected Tuple<ActionWriteResponse, ShardFlushRequest> shardOperationOnPrimary(MetaData metaData, ShardFlushRequest shardRequest) throws Throwable {
    IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).shardSafe(shardRequest.shardId().id());
    indexShard.flush(shardRequest.getRequest());
    logger.trace("{} flush request executed on primary", indexShard.shardId());
    return new Tuple<>(new ActionWriteResponse(), shardRequest);
}