Java Code Examples for org.elasticsearch.common.settings.Settings#getAsDouble()

The following examples show how to use org.elasticsearch.common.settings.Settings#getAsDouble() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CrateCircuitBreakerService.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
@Inject
public CrateCircuitBreakerService(Settings settings,
                                  NodeSettingsService nodeSettingsService,
                                  CircuitBreakerService esCircuitBreakerService) {
    super(settings);
    this.esCircuitBreakerService = esCircuitBreakerService;

    long memoryLimit = settings.getAsMemory(
            QUERY_CIRCUIT_BREAKER_LIMIT_SETTING,
            DEFAULT_QUERY_CIRCUIT_BREAKER_LIMIT).bytes();
    double overhead = settings.getAsDouble(
            QUERY_CIRCUIT_BREAKER_OVERHEAD_SETTING,
            DEFAULT_QUERY_CIRCUIT_BREAKER_OVERHEAD_CONSTANT);

    queryBreakerSettings = new BreakerSettings(QUERY, memoryLimit, overhead,
            CircuitBreaker.Type.parseValue(
                    settings.get(QUERY_CIRCUIT_BREAKER_TYPE_SETTING,
                    DEFAULT_QUERY_CIRCUIT_BREAKER_TYPE)));

    registerBreaker(queryBreakerSettings);
    nodeSettingsService.addListener(new ApplySettings());
}
 
Example 2
Source File: CrateCircuitBreakerService.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
@Override
public void onRefreshSettings(Settings settings) {

    // Query breaker settings
    long newQueryMax = settings.getAsMemory(
            QUERY_CIRCUIT_BREAKER_LIMIT_SETTING,
            CrateCircuitBreakerService.this.settings.getAsMemory(
                    QUERY_CIRCUIT_BREAKER_LIMIT_SETTING,
                    DEFAULT_QUERY_CIRCUIT_BREAKER_LIMIT
            ).toString()).bytes();
    Double newQueryOverhead = settings.getAsDouble(
            QUERY_CIRCUIT_BREAKER_OVERHEAD_SETTING,
            CrateCircuitBreakerService.this.settings.getAsDouble(
                    QUERY_CIRCUIT_BREAKER_OVERHEAD_SETTING,
                    DEFAULT_QUERY_CIRCUIT_BREAKER_OVERHEAD_CONSTANT
            ));
    if (newQueryMax != CrateCircuitBreakerService.this.queryBreakerSettings.getLimit()
            || newQueryOverhead != CrateCircuitBreakerService.this.queryBreakerSettings.getOverhead()) {

        BreakerSettings newQuerySettings = new BreakerSettings(
                QUERY, newQueryMax, newQueryOverhead,
                CrateCircuitBreakerService.this.queryBreakerSettings.getType());
        registerBreaker(newQuerySettings);
    }
}
 
Example 3
Source File: MergePolicyConfig.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
public MergePolicyConfig(ESLogger logger, Settings indexSettings) {
    this.logger = logger;
    this.noCFSRatio = parseNoCFSRatio(indexSettings.get(INDEX_COMPOUND_FORMAT, Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO)));
    double forceMergeDeletesPctAllowed = indexSettings.getAsDouble("index.merge.policy.expunge_deletes_allowed", DEFAULT_EXPUNGE_DELETES_ALLOWED); // percentage
    ByteSizeValue floorSegment = indexSettings.getAsBytesSize("index.merge.policy.floor_segment", DEFAULT_FLOOR_SEGMENT);
    int maxMergeAtOnce = indexSettings.getAsInt("index.merge.policy.max_merge_at_once", DEFAULT_MAX_MERGE_AT_ONCE);
    int maxMergeAtOnceExplicit = indexSettings.getAsInt("index.merge.policy.max_merge_at_once_explicit", DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT);
    // TODO is this really a good default number for max_merge_segment, what happens for large indices, won't they end up with many segments?
    ByteSizeValue maxMergedSegment = indexSettings.getAsBytesSize("index.merge.policy.max_merged_segment", DEFAULT_MAX_MERGED_SEGMENT);
    double segmentsPerTier = indexSettings.getAsDouble("index.merge.policy.segments_per_tier", DEFAULT_SEGMENTS_PER_TIER);
    double reclaimDeletesWeight = indexSettings.getAsDouble("index.merge.policy.reclaim_deletes_weight", DEFAULT_RECLAIM_DELETES_WEIGHT);
    this.mergesEnabled = indexSettings.getAsBoolean(INDEX_MERGE_ENABLED, true);
    if (mergesEnabled == false) {
        logger.warn("[{}] is set to false, this should only be used in tests and can cause serious problems in production environments", INDEX_MERGE_ENABLED);
    }
    maxMergeAtOnce = adjustMaxMergeAtOnceIfNeeded(maxMergeAtOnce, segmentsPerTier);
    mergePolicy.setNoCFSRatio(noCFSRatio);
    mergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed);
    mergePolicy.setFloorSegmentMB(floorSegment.mbFrac());
    mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce);
    mergePolicy.setMaxMergeAtOnceExplicit(maxMergeAtOnceExplicit);
    mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.mbFrac());
    mergePolicy.setSegmentsPerTier(segmentsPerTier);
    mergePolicy.setReclaimDeletesWeight(reclaimDeletesWeight);
    logger.debug("using [tiered] merge mergePolicy with expunge_deletes_allowed[{}], floor_segment[{}], max_merge_at_once[{}], max_merge_at_once_explicit[{}], max_merged_segment[{}], segments_per_tier[{}], reclaim_deletes_weight[{}]",
            forceMergeDeletesPctAllowed, floorSegment, maxMergeAtOnce, maxMergeAtOnceExplicit, maxMergedSegment, segmentsPerTier, reclaimDeletesWeight);
}
 
Example 4
Source File: DecompoundTokenFilterFactory.java    From elasticsearch-plugin-bundle with GNU Affero General Public License v3.0 5 votes vote down vote up
private Decompounder createDecompounder(Settings settings) {
    try {
        String forward = settings.get("forward", "kompVVic.tree");
        String backward = settings.get("backward", "kompVHic.tree");
        String reduce = settings.get("reduce", "grfExt.tree");
        double threshold = settings.getAsDouble("threshold", 0.51d);
        return new Decompounder(getClass().getResourceAsStream(forward),
                getClass().getResourceAsStream(backward),
                getClass().getResourceAsStream(reduce),
                threshold);
    } catch (Exception e) {
        throw new ElasticsearchException("decompounder resources in settings not found: " + settings, e);
    }
}
 
Example 5
Source File: DoubleSetting.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
@Override
public Double extract(Settings settings) {
    return settings.getAsDouble(settingName(), defaultValue());
}
 
Example 6
Source File: HierarchyCircuitBreakerService.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
@Inject
public HierarchyCircuitBreakerService(Settings settings, NodeSettingsService nodeSettingsService) {
    super(settings);

    // This uses the old InternalCircuitBreakerService.CIRCUIT_BREAKER_MAX_BYTES_SETTING
    // setting to keep backwards compatibility with 1.3, it can be safely
    // removed when compatibility with 1.3 is no longer needed
    String compatibilityFielddataLimitDefault = DEFAULT_FIELDDATA_BREAKER_LIMIT;
    ByteSizeValue compatibilityFielddataLimit = settings.getAsMemory(OLD_CIRCUIT_BREAKER_MAX_BYTES_SETTING, null);
    if (compatibilityFielddataLimit != null) {
        compatibilityFielddataLimitDefault = compatibilityFielddataLimit.toString();
    }

    // This uses the old InternalCircuitBreakerService.CIRCUIT_BREAKER_OVERHEAD_SETTING
    // setting to keep backwards compatibility with 1.3, it can be safely
    // removed when compatibility with 1.3 is no longer needed
    double compatibilityFielddataOverheadDefault = DEFAULT_FIELDDATA_OVERHEAD_CONSTANT;
    Double compatibilityFielddataOverhead = settings.getAsDouble(OLD_CIRCUIT_BREAKER_OVERHEAD_SETTING, null);
    if (compatibilityFielddataOverhead != null) {
        compatibilityFielddataOverheadDefault = compatibilityFielddataOverhead;
    }

    this.fielddataSettings = new BreakerSettings(CircuitBreaker.FIELDDATA,
            settings.getAsMemory(FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, compatibilityFielddataLimitDefault).bytes(),
            settings.getAsDouble(FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, compatibilityFielddataOverheadDefault),
            CircuitBreaker.Type.parseValue(settings.get(FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, DEFAULT_BREAKER_TYPE))
    );

    this.requestSettings = new BreakerSettings(CircuitBreaker.REQUEST,
            settings.getAsMemory(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, DEFAULT_REQUEST_BREAKER_LIMIT).bytes(),
            settings.getAsDouble(REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.0),
            CircuitBreaker.Type.parseValue(settings.get(REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, DEFAULT_BREAKER_TYPE))
    );

    this.parentSettings = new BreakerSettings(CircuitBreaker.PARENT,
            settings.getAsMemory(TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, DEFAULT_TOTAL_CIRCUIT_BREAKER_LIMIT).bytes(), 1.0, CircuitBreaker.Type.PARENT);
    if (logger.isTraceEnabled()) {
        logger.trace("parent circuit breaker with settings {}", this.parentSettings);
    }

    registerBreaker(this.requestSettings);
    registerBreaker(this.fielddataSettings);

    nodeSettingsService.addListener(new ApplySettings());
}
 
Example 7
Source File: HierarchyCircuitBreakerService.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
@Override
public void onRefreshSettings(Settings settings) {

    // Fielddata settings
    ByteSizeValue newFielddataMax = settings.getAsMemory(FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, null);
    Double newFielddataOverhead = settings.getAsDouble(FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, null);
    if (newFielddataMax != null || newFielddataOverhead != null) {
        long newFielddataLimitBytes = newFielddataMax == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getLimit() : newFielddataMax.bytes();
        newFielddataOverhead = newFielddataOverhead == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getOverhead() : newFielddataOverhead;

        BreakerSettings newFielddataSettings = new BreakerSettings(CircuitBreaker.FIELDDATA, newFielddataLimitBytes, newFielddataOverhead,
                HierarchyCircuitBreakerService.this.fielddataSettings.getType());
        registerBreaker(newFielddataSettings);
        HierarchyCircuitBreakerService.this.fielddataSettings = newFielddataSettings;
        logger.info("Updated breaker settings fielddata: {}", newFielddataSettings);
    }

    // Request settings
    ByteSizeValue newRequestMax = settings.getAsMemory(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, null);
    Double newRequestOverhead = settings.getAsDouble(REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, null);
    if (newRequestMax != null || newRequestOverhead != null) {
        long newRequestLimitBytes = newRequestMax == null ? HierarchyCircuitBreakerService.this.requestSettings.getLimit() : newRequestMax.bytes();
        newRequestOverhead = newRequestOverhead == null ? HierarchyCircuitBreakerService.this.requestSettings.getOverhead() : newRequestOverhead;

        BreakerSettings newRequestSettings = new BreakerSettings(CircuitBreaker.REQUEST, newRequestLimitBytes, newRequestOverhead,
                HierarchyCircuitBreakerService.this.requestSettings.getType());
        registerBreaker(newRequestSettings);
        HierarchyCircuitBreakerService.this.requestSettings = newRequestSettings;
        logger.info("Updated breaker settings request: {}", newRequestSettings);
    }

    // Parent settings
    long oldParentMax = HierarchyCircuitBreakerService.this.parentSettings.getLimit();
    ByteSizeValue newParentMax = settings.getAsMemory(TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, null);
    if (newParentMax != null && (newParentMax.bytes() != oldParentMax)) {
        BreakerSettings newParentSettings = new BreakerSettings(CircuitBreaker.PARENT, newParentMax.bytes(), 1.0, CircuitBreaker.Type.PARENT);
        validateSettings(new BreakerSettings[]{newParentSettings});
        HierarchyCircuitBreakerService.this.parentSettings = newParentSettings;
        logger.info("Updated breaker settings parent: {}", newParentSettings);
    }
}
 
Example 8
Source File: MergePolicyConfig.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
@Override
public void onRefreshSettings(Settings settings) {
    final double oldExpungeDeletesPctAllowed = mergePolicy.getForceMergeDeletesPctAllowed();
    final double expungeDeletesPctAllowed = settings.getAsDouble(INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED, oldExpungeDeletesPctAllowed);
    if (expungeDeletesPctAllowed != oldExpungeDeletesPctAllowed) {
        logger.info("updating [expunge_deletes_allowed] from [{}] to [{}]", oldExpungeDeletesPctAllowed, expungeDeletesPctAllowed);
        mergePolicy.setForceMergeDeletesPctAllowed(expungeDeletesPctAllowed);
    }

    final double oldFloorSegmentMB = mergePolicy.getFloorSegmentMB();
    final ByteSizeValue floorSegment = settings.getAsBytesSize(INDEX_MERGE_POLICY_FLOOR_SEGMENT, null);
    if (floorSegment != null && floorSegment.mbFrac() != oldFloorSegmentMB) {
        logger.info("updating [floor_segment] from [{}mb] to [{}]", oldFloorSegmentMB, floorSegment);
        mergePolicy.setFloorSegmentMB(floorSegment.mbFrac());
    }

    final double oldSegmentsPerTier = mergePolicy.getSegmentsPerTier();
    final double segmentsPerTier = settings.getAsDouble(INDEX_MERGE_POLICY_SEGMENTS_PER_TIER, oldSegmentsPerTier);
    if (segmentsPerTier != oldSegmentsPerTier) {
        logger.info("updating [segments_per_tier] from [{}] to [{}]", oldSegmentsPerTier, segmentsPerTier);
        mergePolicy.setSegmentsPerTier(segmentsPerTier);
    }

    final int oldMaxMergeAtOnce = mergePolicy.getMaxMergeAtOnce();
    int maxMergeAtOnce = settings.getAsInt(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, oldMaxMergeAtOnce);
    if (maxMergeAtOnce != oldMaxMergeAtOnce) {
        logger.info("updating [max_merge_at_once] from [{}] to [{}]", oldMaxMergeAtOnce, maxMergeAtOnce);
        maxMergeAtOnce = adjustMaxMergeAtOnceIfNeeded(maxMergeAtOnce, segmentsPerTier);
        mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce);
    }

    final int oldMaxMergeAtOnceExplicit = mergePolicy.getMaxMergeAtOnceExplicit();
    final int maxMergeAtOnceExplicit = settings.getAsInt(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT, oldMaxMergeAtOnceExplicit);
    if (maxMergeAtOnceExplicit != oldMaxMergeAtOnceExplicit) {
        logger.info("updating [max_merge_at_once_explicit] from [{}] to [{}]", oldMaxMergeAtOnceExplicit, maxMergeAtOnceExplicit);
        mergePolicy.setMaxMergeAtOnceExplicit(maxMergeAtOnceExplicit);
    }

    final double oldMaxMergedSegmentMB = mergePolicy.getMaxMergedSegmentMB();
    final ByteSizeValue maxMergedSegment = settings.getAsBytesSize(INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT, null);
    if (maxMergedSegment != null && maxMergedSegment.mbFrac() != oldMaxMergedSegmentMB) {
        logger.info("updating [max_merged_segment] from [{}mb] to [{}]", oldMaxMergedSegmentMB, maxMergedSegment);
        mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.mbFrac());
    }

    final double oldReclaimDeletesWeight = mergePolicy.getReclaimDeletesWeight();
    final double reclaimDeletesWeight = settings.getAsDouble(INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT, oldReclaimDeletesWeight);
    if (reclaimDeletesWeight != oldReclaimDeletesWeight) {
        logger.info("updating [reclaim_deletes_weight] from [{}] to [{}]", oldReclaimDeletesWeight, reclaimDeletesWeight);
        mergePolicy.setReclaimDeletesWeight(reclaimDeletesWeight);
    }

    double noCFSRatio = parseNoCFSRatio(settings.get(INDEX_COMPOUND_FORMAT, Double.toString(MergePolicyConfig.this.noCFSRatio)));
    if (noCFSRatio != MergePolicyConfig.this.noCFSRatio) {
        logger.info("updating index.compound_format from [{}] to [{}]", formatNoCFSRatio(MergePolicyConfig.this.noCFSRatio), formatNoCFSRatio(noCFSRatio));
        mergePolicy.setNoCFSRatio(noCFSRatio);
        MergePolicyConfig.this.noCFSRatio = noCFSRatio;
    }
}