Java Code Examples for org.elasticsearch.common.settings.Settings#EMPTY

The following examples show how to use org.elasticsearch.common.settings.Settings#EMPTY . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TableStatsServiceTest.java    From crate with Apache License 2.0 6 votes vote down vote up
@Test
public void testNoUpdateIfLocalNodeNotAvailable() {
    final ClusterService clusterService = Mockito.mock(ClusterService.class);
    Mockito.when(clusterService.localNode()).thenReturn(null);
    Mockito.when(clusterService.getClusterSettings()).thenReturn(this.clusterService.getClusterSettings());
    SQLOperations sqlOperations = Mockito.mock(SQLOperations.class);
    Session session = Mockito.mock(Session.class);
    Mockito.when(sqlOperations.createSession(ArgumentMatchers.anyString(), ArgumentMatchers.any(), ArgumentMatchers.any())).thenReturn(session);

    TableStatsService statsService = new TableStatsService(
        Settings.EMPTY,
        THREAD_POOL,
        clusterService,
        sqlOperations
    );

    statsService.run();
    Mockito.verify(session, Mockito.times(0)).sync();
}
 
Example 2
Source File: CoordinatorTests.java    From crate with Apache License 2.0 6 votes vote down vote up
/**
 * Simulates a situation where a follower becomes disconnected from the leader, but only for such a short time where
 * it becomes candidate and puts up a NO_MASTER_BLOCK, but then receives a follower check from the leader. If the leader
 * does not notice the node disconnecting, it is important for the node not to be turned back into a follower but try
 * and join the leader again.
 */
public void testStayCandidateAfterReceivingFollowerCheckFromKnownMaster() {
    final Cluster cluster = new Cluster(2, false, Settings.EMPTY);
    cluster.runRandomly();
    cluster.stabilise();

    final ClusterNode leader = cluster.getAnyLeader();
    final ClusterNode nonLeader = cluster.getAnyNodeExcept(leader);
    nonLeader.onNode(() -> {
        logger.debug("forcing {} to become candidate", nonLeader.getId());
        synchronized (nonLeader.coordinator.mutex) {
            nonLeader.coordinator.becomeCandidate("forced");
        }
        logger.debug("simulate follower check coming through from {} to {}", leader.getId(), nonLeader.getId());
        expectThrows(CoordinationStateRejectedException.class, () -> nonLeader.coordinator.onFollowerCheckRequest(
            new FollowersChecker.FollowerCheckRequest(leader.coordinator.getCurrentTerm(), leader.getLocalNode())));
        assertThat(nonLeader.coordinator.getMode(), equalTo(CANDIDATE));
    }).run();
    cluster.stabilise();
}
 
Example 3
Source File: WebhookAuditLogTest.java    From deprecated-security-advanced-modules with Apache License 2.0 6 votes vote down vote up
@Test
public void invalidConfFallbackTest() throws Exception {
	AuditMessage msg = MockAuditMessageFactory.validAuditMessage();

	// provide no settings, fallback must be used
	Settings settings = Settings.builder()
	        .put("path.home", ".")
	        .put("opendistro_security.ssl.transport.truststore_filepath",
                       FileHelper.getAbsoluteFilePathFromClassPath("auditlog/truststore.jks"))
	        .build();
	LoggingSink fallback = new LoggingSink("test", Settings.EMPTY, null, null);
	MockWebhookAuditLog auditlog = new MockWebhookAuditLog(settings, ConfigConstants.OPENDISTRO_SECURITY_AUDIT_CONFIG_DEFAULT, fallback);
	auditlog.store(msg);
	// Webhook sink has failed ...
	Assert.assertEquals(null, auditlog.webhookFormat);
	// ... so message must be stored in fallback
	Assert.assertEquals(1, fallback.messages.size());
	Assert.assertEquals(msg, fallback.messages.get(0));

}
 
Example 4
Source File: PostgresNettyPublishPortTest.java    From crate with Apache License 2.0 6 votes vote down vote up
@Test
public void testBindAndPublishAddressDefault() {
    // First check if binding to a local works
    NetworkService networkService = new NetworkService(Collections.emptyList());
    PostgresNetty psql = new PostgresNetty(
        Settings.EMPTY,
        mock(SQLOperations.class),
        new StubUserManager(),
        networkService,
        new AlwaysOKNullAuthentication(),
        mock(SslContextProvider.class));
    try {
        psql.doStart();
    } finally {
        psql.doStop();
    }
}
 
Example 5
Source File: TermsSetTest.java    From siren-join with GNU Affero General Public License v3.0 6 votes vote down vote up
@Test
public void testCircuitBreakerAdjustmentOnIntTermsSet() {
  HierarchyCircuitBreakerService hcbs = new HierarchyCircuitBreakerService(
          Settings.builder().build(),
          new NodeSettingsService(Settings.EMPTY));

  CircuitBreaker breaker = hcbs.getBreaker(CircuitBreaker.REQUEST);
  assertThat(breaker.getUsed(), is(equalTo(0L)));

  IntegerTermsSet termsSet = new IntegerTermsSet(8, hcbs.getBreaker(CircuitBreaker.REQUEST));
  long usedMem = breaker.getUsed();
  assertThat(usedMem, greaterThan(0L));

  for (int i = 0; i < 16; i++) {
    termsSet.add(i);
  }

  assertThat(breaker.getUsed(), greaterThan(usedMem));

  termsSet.release();

  assertThat(breaker.getUsed(), is(equalTo(0L)));
}
 
Example 6
Source File: CrateCircuitBreakerServiceTest.java    From crate with Apache License 2.0 5 votes vote down vote up
@Test
public void testStats() throws Exception {
    CircuitBreakerService breakerService = new HierarchyCircuitBreakerService(
        Settings.EMPTY, clusterSettings);

    CircuitBreakerStats queryBreakerStats = breakerService.stats(HierarchyCircuitBreakerService.QUERY);
    assertThat(queryBreakerStats.getUsed(), is(0L));
}
 
Example 7
Source File: SysNodeChecksTest.java    From crate with Apache License 2.0 5 votes vote down vote up
@Test
public void testRecoveryExpectedNodesCheckWithCorrectSetting() {
    RecoveryExpectedNodesSysCheck recoveryExpectedNodesCheck =
        new RecoveryExpectedNodesSysCheck(clusterService, Settings.EMPTY);

    assertThat(recoveryExpectedNodesCheck.id(), is(1));
    assertThat(recoveryExpectedNodesCheck.severity(), is(SysCheck.Severity.HIGH));
    assertThat(recoveryExpectedNodesCheck.validate(3, 3), is(true));
}
 
Example 8
Source File: SysNodeChecksTest.java    From crate with Apache License 2.0 5 votes vote down vote up
@Test
public void testValidationLowDiskWatermarkCheck() {
    DiskWatermarkNodesSysCheck low = new LowDiskWatermarkNodesSysCheck(
        clusterService,
        Settings.EMPTY,
        mock(NodeService.class, Answers.RETURNS_MOCKS)
    );

    assertThat(low.id(), is(6));
    assertThat(low.severity(), is(SysCheck.Severity.HIGH));

    // default threshold is: 85% used
    assertThat(low.isValid(15, 100), is(true));
    assertThat(low.isValid(14, 100), is(false));
}
 
Example 9
Source File: ClusterServiceUtils.java    From crate with Apache License 2.0 5 votes vote down vote up
public static MasterService createMasterService(ThreadPool threadPool, ClusterState initialClusterState) {
    MasterService masterService = new MasterService("test_master_node", Settings.EMPTY, threadPool);
    AtomicReference<ClusterState> clusterStateRef = new AtomicReference<>(initialClusterState);
    masterService.setClusterStatePublisher((event, publishListener, ackListener) -> {
        clusterStateRef.set(event.state());
        publishListener.onResponse(null);
    });
    masterService.setClusterStateSupplier(clusterStateRef::get);
    masterService.start();
    return masterService;
}
 
Example 10
Source File: TestingBlobTableInfoFactory.java    From crate with Apache License 2.0 5 votes vote down vote up
TestingBlobTableInfoFactory(Map<RelationName, BlobTableInfo> blobTables,
                            IndexNameExpressionResolver indexNameExpressionResolver,
                            File dataPath) {
    this.tables = blobTables;
    internalFactory = new InternalBlobTableInfoFactory(
        Settings.EMPTY, indexNameExpressionResolver, new Path[]{Path.of(dataPath.toURI())});
}
 
Example 11
Source File: SqlHttpHandlerTest.java    From crate with Apache License 2.0 5 votes vote down vote up
@Test
public void testUserIfHttpBasicAuthIsPresent() {
    SqlHttpHandler handler = new SqlHttpHandler(
        Settings.EMPTY,
        mock(SQLOperations.class),
        (s) -> new NoopCircuitBreaker("dummy"),
        User::of,
        sessionContext -> AccessControl.DISABLED,
        Netty4CorsConfigBuilder.forAnyOrigin().build()
    );

    User user = handler.userFromAuthHeader("Basic QWxhZGRpbjpPcGVuU2VzYW1l");
    assertThat(user.name(), is("Aladdin"));
}
 
Example 12
Source File: S3RepositoryTests.java    From crate with Apache License 2.0 5 votes vote down vote up
private S3Repository createS3Repo(RepositoryMetaData metadata) {
    return new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service(), mock(ThreadPool.class)) {
        @Override
        protected void assertSnapshotOrGenericThread() {
            // eliminate thread name check as we create repo manually on test/main threads
        }
    };
}
 
Example 13
Source File: IndexWriterProjectorUnitTest.java    From crate with Apache License 2.0 4 votes vote down vote up
@Test
public void testNullPKValue() throws Throwable {
    InputCollectExpression sourceInput = new InputCollectExpression(0);
    List<CollectExpression<Row, ?>> collectExpressions = Collections.<CollectExpression<Row, ?>>singletonList(sourceInput);

    TransportCreatePartitionsAction transportCreatePartitionsAction = mock(TransportCreatePartitionsAction.class);
    IndexWriterProjector indexWriter = new IndexWriterProjector(
        clusterService,
        new NodeJobsCounter(),
        scheduler,
        executor,
        CoordinatorTxnCtx.systemTransactionContext(),
        TestingHelpers.getFunctions(),
        Settings.EMPTY,
        5,
        1,
        transportCreatePartitionsAction,
        (request, listener) -> {},
        IndexNameResolver.forTable(BULK_IMPORT_IDENT),
        RAW_SOURCE_REFERENCE,
        Collections.singletonList(ID_IDENT),
        Collections.<Symbol>singletonList(new InputColumn(1)),
        null,
        null,
        sourceInput,
        collectExpressions,
        20,
        null,
        null,
        false,
        false,
        UUID.randomUUID(),
        UpsertResultContext.forRowCount());

    RowN rowN = new RowN(new Object[]{new BytesRef("{\"y\": \"x\"}"), null});
    BatchIterator<Row> batchIterator = InMemoryBatchIterator.of(Collections.singletonList(rowN), SENTINEL, true);
    batchIterator = indexWriter.apply(batchIterator);

    TestingRowConsumer testingBatchConsumer = new TestingRowConsumer();
    testingBatchConsumer.accept(batchIterator, null);

    List<Object[]> result = testingBatchConsumer.getResult();
    // Zero affected rows as a NULL as a PK value will result in an exception.
    // It must never bubble up as other rows might already have been written.
    assertThat(result.get(0)[0], is(0L));
}
 
Example 14
Source File: IndexShardTestCase.java    From crate with Apache License 2.0 4 votes vote down vote up
public Settings threadPoolSettings() {
    return Settings.EMPTY;
}
 
Example 15
Source File: DiskThresholdDeciderUnitTests.java    From crate with Apache License 2.0 4 votes vote down vote up
@Test
public void testCannotAllocateDueToLackOfDiskResources() {
    ClusterSettings nss = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
    DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss);

    MetaData metaData = MetaData.builder()
        .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1))
        .build();

    final Index index = metaData.index("test").getIndex();

    ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), true, EmptyStoreRecoverySource.INSTANCE,
                                                     new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
    DiscoveryNode node_0 = new DiscoveryNode("node_0", buildNewFakeTransportAddress(), Collections.emptyMap(),
                                             new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT);
    DiscoveryNode node_1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Collections.emptyMap(),
                                             new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT);

    RoutingTable routingTable = RoutingTable.builder()
        .addAsNew(metaData.index("test"))
        .build();

    ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
        .metaData(metaData).routingTable(routingTable).build();

    clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
                                                                .add(node_0)
                                                                .add(node_1)
    ).build();

    // actual test -- after all that bloat :)

    ImmutableOpenMap.Builder<String, DiskUsage> leastAvailableUsages = ImmutableOpenMap.builder();
    leastAvailableUsages.put("node_0", new DiskUsage("node_0", "node_0", "_na_", 100, 0)); // all full
    ImmutableOpenMap.Builder<String, DiskUsage> mostAvailableUsage = ImmutableOpenMap.builder();
    final int freeBytes = randomIntBetween(20, 100);
    mostAvailableUsage.put("node_0", new DiskUsage("node_0", "node_0", "_na_", 100, freeBytes));

    ImmutableOpenMap.Builder<String, Long> shardSizes = ImmutableOpenMap.builder();
    // way bigger than available space
    final long shardSize = randomIntBetween(110, 1000);
    shardSizes.put("[test][0][p]", shardSize);
    ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(),
                                              shardSizes.build(), ImmutableOpenMap.of());
    RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Collections.singleton(decider)),
                                                         clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime());
    allocation.debugDecision(true);
    Decision decision = decider.canAllocate(test_0, new RoutingNode("node_0", node_0), allocation);
    assertEquals(Decision.Type.NO, decision.type());

    assertThat(decision.getExplanation(), containsString(
        "allocating the shard to this node will bring the node above the high watermark cluster setting "
        +"[cluster.routing.allocation.disk.watermark.high=90%] "
        + "and cause it to have less than the minimum required [0b] of free space "
        + "(free: [" + freeBytes + "b], estimated shard size: [" + shardSize + "b])"));
}
 
Example 16
Source File: SQLExecutor.java    From crate with Apache License 2.0 4 votes vote down vote up
private Builder(ClusterService clusterService,
                int numNodes,
                Random random,
                List<AnalysisPlugin> analysisPlugins) {
    if (numNodes < 1) {
        throw new IllegalArgumentException("Must have at least 1 node");
    }
    this.random = random;
    this.clusterService = clusterService;
    addNodesToClusterState(numNodes);
    functions = getFunctions();
    UserDefinedFunctionService udfService = new UserDefinedFunctionService(clusterService, functions);
    Map<String, SchemaInfo> schemaInfoByName = new HashMap<>();
    CrateSettings crateSettings = new CrateSettings(clusterService, clusterService.getSettings());
    schemaInfoByName.put("sys", new SysSchemaInfo(clusterService, crateSettings, new CeLicenseService()));
    schemaInfoByName.put("information_schema", new InformationSchemaInfo());
    schemaInfoByName.put(PgCatalogSchemaInfo.NAME, new PgCatalogSchemaInfo(udfService, tableStats));
    IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver();
    schemaInfoByName.put(
        BlobSchemaInfo.NAME,
        new BlobSchemaInfo(
            clusterService,
            new TestingBlobTableInfoFactory(
                Collections.emptyMap(), indexNameExpressionResolver, createTempDir())));


    Map<RelationName, DocTableInfo> docTables = new HashMap<>();
    DocTableInfoFactory tableInfoFactory = new TestingDocTableInfoFactory(
        docTables, functions, indexNameExpressionResolver);
    ViewInfoFactory testingViewInfoFactory = (ident, state) -> null;

    schemas = new Schemas(
        schemaInfoByName,
        clusterService,
        new DocSchemaInfoFactory(tableInfoFactory, testingViewInfoFactory, functions, udfService)
    );
    schemas.start();  // start listen to cluster state changes

    File homeDir = createTempDir();
    Environment environment = new Environment(
        Settings.builder().put(PATH_HOME_SETTING.getKey(), homeDir.getAbsolutePath()).build(),
        homeDir.toPath().resolve("config")
    );
    try {
        analysisRegistry = new AnalysisModule(environment, analysisPlugins).getAnalysisRegistry();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
    fulltextAnalyzerResolver = new FulltextAnalyzerResolver(clusterService, analysisRegistry);
    createTableStatementAnalyzer = new CreateTableStatementAnalyzer(functions);
    createBlobTableAnalyzer = new CreateBlobTableAnalyzer(
        schemas,
        functions
    );
    allocationService = new AllocationService(
        new AllocationDeciders(
            Arrays.asList(
                new SameShardAllocationDecider(Settings.EMPTY, clusterService.getClusterSettings()),
                new ReplicaAfterPrimaryActiveAllocationDecider()
            )
        ),
        new TestGatewayAllocator(),
        new BalancedShardsAllocator(Settings.EMPTY),
        EmptyClusterInfoService.INSTANCE
    );

    publishInitialClusterState();
}
 
Example 17
Source File: TestHelpers.java    From anomaly-detection with Apache License 2.0 4 votes vote down vote up
public static NamedXContentRegistry xContentRegistry() {
    SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList());
    return new NamedXContentRegistry(searchModule.getNamedXContents());
}
 
Example 18
Source File: IntegrationTest.java    From ElasticUtils with MIT License 4 votes vote down vote up
@Test
public void bulkProcessingTest() throws Exception {

    // Weather Data Simulation between 2013-01-01 and 2013-01-03 in 15 Minute intervals:
    LocalWeatherDataSimulator simulator = new LocalWeatherDataSimulator(
            LocalDateTime.of(2013, 1, 1, 0, 0),
            LocalDateTime.of(2013, 1, 3, 0, 0),
            Duration.ofMinutes(15));

    // Index to work on:
    String indexName = "weather_data";

    // Describes how to build the Index:
    LocalWeatherDataMapper mapping = new LocalWeatherDataMapper();

    // Bulk Options for the Wrapped Client:
    BulkProcessorConfiguration bulkConfiguration = new BulkProcessorConfiguration(BulkProcessingOptions.builder()
            .setBulkActions(100)
            .build());

    // Create a new TransportClient with the default options:
    try (TransportClient transportClient = new PreBuiltTransportClient(Settings.EMPTY)) {

        // Add the Transport Address to the TransportClient:
        transportClient.addTransportAddress(new TransportAddress(InetAddress.getByName("127.0.0.1"), 9300));

        // Create the Index, if it doesn't exist yet:
        createIndex(transportClient, indexName);

        // Create the Mapping, if it doesn't exist yet:
        createMapping(transportClient, indexName, mapping);

        // Now wrap the Elastic client in our bulk processing client:
        ElasticSearchClient<LocalWeatherData> client = new ElasticSearchClient<>(transportClient, indexName, mapping, bulkConfiguration);

        // Create some data to work with:
        try (Stream<LocalWeatherData> stream = simulator.generate()) {
            // Consume the Stream with the ElasticSearchClient:
            client.index(stream);
        }

        // The Bulk Insert is asynchronous, we give ElasticSearch some time to do the insert:
        client.awaitClose(1, TimeUnit.SECONDS);
    }
}
 
Example 19
Source File: CoordinationStateTests.java    From crate with Apache License 2.0 4 votes vote down vote up
ClusterNode(DiscoveryNode localNode) {
    this.localNode = localNode;
    persistedState = new InMemoryPersistedState(0L,
        clusterState(0L, 0L, localNode, VotingConfiguration.EMPTY_CONFIG, VotingConfiguration.EMPTY_CONFIG, 0L));
    state = new CoordinationState(Settings.EMPTY, localNode, persistedState);
}
 
Example 20
Source File: ESClusterUpdateSettingsNode.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
public ESClusterUpdateSettingsNode(Set<String> persistentSettingsToRemove, Set<String> transientSettingsToRemove) {
    this.persistentSettingsToRemove = persistentSettingsToRemove;
    this.transientSettingsToRemove = transientSettingsToRemove;
    persistentSettings = Settings.EMPTY;
    transientSettings = Settings.EMPTY;
}