org.apache.lucene.store.RateLimiter Java Examples

The following examples show how to use org.apache.lucene.store.RateLimiter. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SimpleReplicaNode.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public SimpleReplicaNode(Random random, int id, int tcpPort, Path indexPath, long curPrimaryGen, int primaryTCPPort,
                         SearcherFactory searcherFactory, boolean doCheckIndexOnClose) throws IOException {
  super(id, getDirectory(random, id, indexPath, doCheckIndexOnClose), searcherFactory, System.out);
  this.tcpPort = tcpPort;
  this.random = new Random(random.nextLong());

  // Random IO throttling on file copies: 5 - 20 MB/sec:
  double mbPerSec = 5 * (1.0 + 3*random.nextDouble());
  message(String.format(Locale.ROOT, "top: will rate limit file fetch to %.2f MB/sec", mbPerSec));
  fetchRateLimiter = new RateLimiter.SimpleRateLimiter(mbPerSec);
  this.curPrimaryTCPPort = primaryTCPPort;
  
  start(curPrimaryGen);

  // Handles fetching files from primary:
  jobs = new Jobs(this);
  jobs.setName("R" + id + ".copyJobs");
  jobs.setDaemon(true);
  jobs.start();
}
 
Example #2
Source File: ReplicationHandler.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public DirectoryFileStream(SolrParams solrParams) {
  params = solrParams;
  delPolicy = core.getDeletionPolicy();

  fileName = validateFilenameOrError(params.get(FILE));
  cfileName = validateFilenameOrError(params.get(CONF_FILE_SHORT));
  tlogFileName = validateFilenameOrError(params.get(TLOG_FILE));
  
  sOffset = params.get(OFFSET);
  sLen = params.get(LEN);
  compress = params.get(COMPRESSION);
  useChecksum = params.getBool(CHECKSUM, false);
  indexGen = params.getLong(GENERATION);
  if (useChecksum) {
    checksum = new Adler32();
  }
  //No throttle if MAX_WRITE_PER_SECOND is not specified
  double maxWriteMBPerSec = params.getDouble(MAX_WRITE_PER_SECOND, Double.MAX_VALUE);
  rateLimiter = new RateLimiter.SimpleRateLimiter(maxWriteMBPerSec);
}
 
Example #3
Source File: BlobStoreRepository.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
/**
 * Configures RateLimiter based on repository and global settings
 *
 * @param repositorySettings repository settings
 * @param setting            setting to use to configure rate limiter
 * @param defaultRate        default limiting rate
 * @return rate limiter or null of no throttling is needed
 */
private RateLimiter getRateLimiter(RepositorySettings repositorySettings, String setting, ByteSizeValue defaultRate) {
    ByteSizeValue maxSnapshotBytesPerSec = repositorySettings.settings().getAsBytesSize(setting,
            settings.getAsBytesSize(setting, defaultRate));
    if (maxSnapshotBytesPerSec.bytes() <= 0) {
        return null;
    } else {
        return new RateLimiter.SimpleRateLimiter(maxSnapshotBytesPerSec.mbFrac());
    }
}
 
Example #4
Source File: BlobStoreIndexShardRepository.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
/**
 * Called by {@link org.elasticsearch.repositories.blobstore.BlobStoreRepository} on repository startup
 *
 * @param blobStore blob store
 * @param basePath  base path to blob store
 * @param chunkSize chunk size
 */
public void initialize(BlobStore blobStore, BlobPath basePath, ByteSizeValue chunkSize,
                       RateLimiter snapshotRateLimiter, RateLimiter restoreRateLimiter,
                       final RateLimiterListener rateLimiterListener, boolean compress) {
    this.blobStore = blobStore;
    this.basePath = basePath;
    this.chunkSize = chunkSize;
    this.snapshotRateLimiter = snapshotRateLimiter;
    this.restoreRateLimiter = restoreRateLimiter;
    this.rateLimiterListener = rateLimiterListener;
    this.snapshotThrottleListener = new RateLimitingInputStream.Listener() {
        @Override
        public void onPause(long nanos) {
            rateLimiterListener.onSnapshotPause(nanos);
        }
    };
    this.restoreThrottleListener = new RateLimitingInputStream.Listener() {
        @Override
        public void onPause(long nanos) {
            rateLimiterListener.onRestorePause(nanos);
        }
    };
    this.compress = compress;
    indexShardSnapshotFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_CODEC, SNAPSHOT_NAME_FORMAT, BlobStoreIndexShardSnapshot.PROTO, parseFieldMatcher, isCompress());
    indexShardSnapshotLegacyFormat = new LegacyBlobStoreFormat<>(LEGACY_SNAPSHOT_NAME_FORMAT, BlobStoreIndexShardSnapshot.PROTO, parseFieldMatcher);
    indexShardSnapshotsFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_INDEX_CODEC, SNAPSHOT_INDEX_NAME_FORMAT, BlobStoreIndexShardSnapshots.PROTO, parseFieldMatcher, isCompress());
}
 
Example #5
Source File: RemoteRecoveryTargetHandler.java    From crate with Apache License 2.0 5 votes vote down vote up
@Override
public void writeFileChunk(StoreFileMetaData fileMetaData,
                           long position,
                           BytesReference content,
                           boolean lastChunk,
                           int totalTranslogOps,
                           ActionListener<Void> listener) {
    // Pause using the rate limiter, if desired, to throttle the recovery
    final long throttleTimeInNanos;
    // always fetch the ratelimiter - it might be updated in real-time on the recovery settings
    final RateLimiter rl = recoverySettings.rateLimiter();
    if (rl != null) {
        long bytes = bytesSinceLastPause.addAndGet(content.length());
        if (bytes > rl.getMinPauseCheckBytes()) {
            // Time to pause
            bytesSinceLastPause.addAndGet(-bytes);
            try {
                throttleTimeInNanos = rl.pause(bytes);
                onSourceThrottle.accept(throttleTimeInNanos);
            } catch (IOException e) {
                throw new ElasticsearchException("failed to pause recovery", e);
            }
        } else {
            throttleTimeInNanos = 0;
        }
    } else {
        throttleTimeInNanos = 0;
    }

    transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.FILE_CHUNK,
        new RecoveryFileChunkRequest(recoveryId, shardId, fileMetaData, position, content, lastChunk,
            totalTranslogOps,
            /* we send estimateTotalOperations with every request since we collect stats on the target and that way we can
             * see how many translog ops we accumulate while copying files across the network. A future optimization
             * would be in to restart file copy again (new deltas) if we have too many translog ops are piling up.
             */
            throttleTimeInNanos), fileChunkRequestOptions, new ActionListenerResponseHandler<>(
            ActionListener.wrap(r -> listener.onResponse(null), listener::onFailure), in -> TransportResponse.Empty.INSTANCE));
}
 
Example #6
Source File: PeerRecoveryTargetService.java    From crate with Apache License 2.0 5 votes vote down vote up
@Override
public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel, Task task) throws Exception {
    try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) {
        final RecoveryTarget recoveryTarget = recoveryRef.target();
        final RecoveryState.Index indexState = recoveryTarget.state().getIndex();
        if (request.sourceThrottleTimeInNanos() != RecoveryState.Index.UNKNOWN) {
            indexState.addSourceThrottling(request.sourceThrottleTimeInNanos());
        }

        RateLimiter rateLimiter = recoverySettings.rateLimiter();
        if (rateLimiter != null) {
            long bytes = bytesSinceLastPause.addAndGet(request.content().length());
            if (bytes > rateLimiter.getMinPauseCheckBytes()) {
                // Time to pause
                bytesSinceLastPause.addAndGet(-bytes);
                long throttleTimeInNanos = rateLimiter.pause(bytes);
                indexState.addTargetThrottling(throttleTimeInNanos);
                recoveryTarget.indexShard().recoveryStats().addThrottleTime(throttleTimeInNanos);
            }
        }

        final ActionListener<TransportResponse> listener =
            new HandledTransportAction.ChannelActionListener<>(channel, Actions.FILE_CHUNK, request);
        recoveryTarget.writeFileChunk(
            request.metadata(),
            request.position(),
            request.content(),
            request.lastChunk(),
            request.totalTranslogOps(),
            ActionListener.wrap(
                nullVal -> listener.onResponse(TransportResponse.Empty.INSTANCE),
                listener::onFailure)
        );
    }
    channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
 
Example #7
Source File: BlobStoreRepository.java    From crate with Apache License 2.0 5 votes vote down vote up
/**
 * Configures RateLimiter based on repository and global settings
 *
 * @param repositorySettings repository settings
 * @param setting            setting to use to configure rate limiter
 * @param defaultRate        default limiting rate
 * @return rate limiter or null of no throttling is needed
 */
private RateLimiter getRateLimiter(Settings repositorySettings, String setting, ByteSizeValue defaultRate) {
    ByteSizeValue maxSnapshotBytesPerSec = repositorySettings.getAsBytesSize(setting,
            settings.getAsBytesSize(setting, defaultRate));
    if (maxSnapshotBytesPerSec.getBytes() <= 0) {
        return null;
    } else {
        return new RateLimiter.SimpleRateLimiter(maxSnapshotBytesPerSec.getMbFrac());
    }
}
 
Example #8
Source File: RecoverySettings.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
public RateLimiter rateLimiter() {
    return rateLimiter;
}
 
Example #9
Source File: RateLimitingInputStream.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
public RateLimitingInputStream(InputStream delegate, RateLimiter rateLimiter, Listener listener) {
    super(delegate);
    this.rateLimiter = rateLimiter;
    this.listener = listener;
}
 
Example #10
Source File: RecoverySettings.java    From crate with Apache License 2.0 4 votes vote down vote up
public RateLimiter rateLimiter() {
    return rateLimiter;
}
 
Example #11
Source File: RateLimitingInputStream.java    From crate with Apache License 2.0 4 votes vote down vote up
public RateLimitingInputStream(InputStream delegate, RateLimiter rateLimiter, Listener listener) {
    super(delegate);
    this.rateLimiter = rateLimiter;
    this.listener = listener;
}