Java Code Examples for org.elasticsearch.transport.TransportService#start()

The following examples show how to use org.elasticsearch.transport.TransportService#start() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TransportClient.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
/**
 * Builds a new instance of the transport client.
 */
public TransportClient build() {
    Settings settings = InternalSettingsPreparer.prepareSettings(this.settings);
    settings = settingsBuilder()
            .put(NettyTransport.PING_SCHEDULE, "5s") // enable by default the transport schedule ping interval
            .put(settings)
            .put("network.server", false)
            .put("node.client", true)
            .put(CLIENT_TYPE_SETTING, CLIENT_TYPE)
            .build();

    PluginsService pluginsService = new PluginsService(settings, null, null, pluginClasses);
    this.settings = pluginsService.updatedSettings();

    Version version = Version.CURRENT;

    final ThreadPool threadPool = new ThreadPool(settings);
    NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();

    boolean success = false;
    try {
        ModulesBuilder modules = new ModulesBuilder();
        modules.add(new Version.Module(version));
        // plugin modules must be added here, before others or we can get crazy injection errors...
        for (Module pluginModule : pluginsService.nodeModules()) {
            modules.add(pluginModule);
        }
        modules.add(new PluginsModule(pluginsService));
        modules.add(new SettingsModule(this.settings));
        modules.add(new NetworkModule(namedWriteableRegistry));
        modules.add(new ClusterNameModule(this.settings));
        modules.add(new ThreadPoolModule(threadPool));
        modules.add(new TransportModule(this.settings, namedWriteableRegistry));
        modules.add(new SearchModule() {
            @Override
            protected void configure() {
                // noop
            }
        });
        modules.add(new ActionModule(true));
        modules.add(new ClientTransportModule());
        modules.add(new CircuitBreakerModule(this.settings));

        pluginsService.processModules(modules);

        Injector injector = modules.createInjector();
        final TransportService transportService = injector.getInstance(TransportService.class);
        transportService.start();
        transportService.acceptIncomingRequests();

        TransportClient transportClient = new TransportClient(injector);
        success = true;
        return transportClient;
    } finally {
        if (!success) {
            ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS);
        }
    }
}
 
Example 2
Source File: Node.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
/**
 * Start the node. If the node is already started, this method is no-op.
 */
public Node start() {
    if (!lifecycle.moveToStarted()) {
        return this;
    }

    ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
    logger.info("starting ...");
    // hack around dependency injection problem (for now...)
    injector.getInstance(Discovery.class).setRoutingService(injector.getInstance(RoutingService.class));
    for (Class<? extends LifecycleComponent> plugin : pluginsService.nodeServices()) {
        injector.getInstance(plugin).start();
    }

    injector.getInstance(MappingUpdatedAction.class).setClient(client);
    injector.getInstance(IndicesService.class).start();
    injector.getInstance(IndexingMemoryController.class).start();
    injector.getInstance(IndicesClusterStateService.class).start();
    injector.getInstance(IndicesTTLService.class).start();
    injector.getInstance(SnapshotsService.class).start();
    injector.getInstance(SnapshotShardsService.class).start();
    injector.getInstance(RoutingService.class).start();
    injector.getInstance(SearchService.class).start();
    injector.getInstance(MonitorService.class).start();
    injector.getInstance(RestController.class).start();

    // TODO hack around circular dependencies problems
    injector.getInstance(GatewayAllocator.class).setReallocation(injector.getInstance(ClusterService.class), injector.getInstance(RoutingService.class));

    injector.getInstance(ResourceWatcherService.class).start();
    injector.getInstance(GatewayService.class).start();
    injector.getInstance(TenantManagementService.class).start();

    // Start the transport service now so the publish address will be added to the local disco node in ClusterService
    TransportService transportService = injector.getInstance(TransportService.class);
    transportService.start();
    injector.getInstance(ClusterService.class).start();

    // start after cluster service so the local disco is known
    DiscoveryService discoService = injector.getInstance(DiscoveryService.class).start();


    transportService.acceptIncomingRequests();
    discoService.joinClusterAndWaitForInitialState();

    if (settings.getAsBoolean("http.enabled", true)) {
        injector.getInstance(HttpServer.class).start();
    }
    injector.getInstance(TribeService.class).start();
    if (settings.getAsBoolean("node.portsfile", false)) {
        if (settings.getAsBoolean("http.enabled", true)) {
            HttpServerTransport http = injector.getInstance(HttpServerTransport.class);
            writePortsFile("http", http.boundAddress());
        }
        TransportService transport = injector.getInstance(TransportService.class);
        writePortsFile("transport", transport.boundAddress());
    }
    logger.info("started");

    return this;
}
 
Example 3
Source File: NodeJoinTests.java    From crate with Apache License 2.0 4 votes vote down vote up
private void setupMasterServiceAndCoordinator(long term, ClusterState initialState, MasterService masterService,
                                              ThreadPool threadPool, Random random) {
    if (this.masterService != null || coordinator != null) {
        throw new IllegalStateException("method setupMasterServiceAndCoordinator can only be called once");
    }
    this.masterService = masterService;
    CapturingTransport capturingTransport = new CapturingTransport() {
        @Override
        protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode destination) {
            if (action.equals(HANDSHAKE_ACTION_NAME)) {
                handleResponse(requestId, new TransportService.HandshakeResponse(destination, initialState.getClusterName(),
                    destination.getVersion()));
            } else if (action.equals(JoinHelper.VALIDATE_JOIN_ACTION_NAME)) {
                handleResponse(requestId, new TransportResponse.Empty());
            } else {
                super.onSendRequest(requestId, action, request, destination);
            }
        }
    };
    final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
    TransportService transportService = capturingTransport.createTransportService(
        Settings.EMPTY,
        threadPool,
        TransportService.NOOP_TRANSPORT_INTERCEPTOR,
        x -> initialState.nodes().getLocalNode(),
        clusterSettings
    );
    coordinator = new Coordinator("test_node", Settings.EMPTY, clusterSettings,
        transportService, writableRegistry(),
        ESAllocationTestCase.createAllocationService(Settings.EMPTY),
        masterService,
        () -> new InMemoryPersistedState(term, initialState), r -> emptyList(),
        new NoOpClusterApplier(),
        Collections.emptyList(),
        random);
    transportService.start();
    transportService.acceptIncomingRequests();
    transport = capturingTransport;
    coordinator.start();
    coordinator.startInitialJoin();
}
 
Example 4
Source File: PublicationTransportHandlerTests.java    From crate with Apache License 2.0 4 votes vote down vote up
public void testDiffSerializationFailure() {
    DeterministicTaskQueue deterministicTaskQueue =
        new DeterministicTaskQueue(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), random());
    final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
    final DiscoveryNode localNode = new DiscoveryNode("localNode", buildNewFakeTransportAddress(), Version.CURRENT);
    final TransportService transportService = new CapturingTransport().createTransportService(
        Settings.EMPTY,
        deterministicTaskQueue.getThreadPool(),
        TransportService.NOOP_TRANSPORT_INTERCEPTOR,
        x -> localNode,
        clusterSettings
    );
    final PublicationTransportHandler handler = new PublicationTransportHandler(transportService,
        writableRegistry(), pu -> null, (pu, l) -> {});
    transportService.start();
    transportService.acceptIncomingRequests();

    final DiscoveryNode otherNode = new DiscoveryNode("otherNode", buildNewFakeTransportAddress(), Version.CURRENT);
    final ClusterState clusterState = CoordinationStateTests.clusterState(2L, 1L,
        DiscoveryNodes.builder().add(localNode).add(otherNode).localNodeId(localNode.getId()).build(),
        VotingConfiguration.EMPTY_CONFIG, VotingConfiguration.EMPTY_CONFIG, 0L);

    final ClusterState unserializableClusterState = new ClusterState(clusterState.version(),
        clusterState.stateUUID(), clusterState) {
        @Override
        public Diff<ClusterState> diff(ClusterState previousState) {
            return new Diff<ClusterState>() {
                @Override
                public ClusterState apply(ClusterState part) {
                    fail("this diff shouldn't be applied");
                    return part;
                }

                @Override
                public void writeTo(StreamOutput out) throws IOException {
                    throw new IOException("Simulated failure of diff serialization");
                }
            };
        }
    };

    ElasticsearchException e = expectThrows(ElasticsearchException.class, () ->
        handler.newPublicationContext(new ClusterChangedEvent("test", unserializableClusterState, clusterState)));
    assertNotNull(e.getCause());
    assertThat(e.getCause(), instanceOf(IOException.class));
    assertThat(e.getCause().getMessage(), containsString("Simulated failure of diff serialization"));
}
 
Example 5
Source File: JoinHelperTests.java    From crate with Apache License 2.0 4 votes vote down vote up
public void testJoinDeduplication() {
    DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(
        Settings.builder().put(NODE_NAME_SETTING.getKey(), "node0").build(), random());
    CapturingTransport capturingTransport = new CapturingTransport();
    DiscoveryNode localNode = new DiscoveryNode("node0", buildNewFakeTransportAddress(), Version.CURRENT);
    TransportService transportService = capturingTransport.createTransportService(
        Settings.EMPTY,
        deterministicTaskQueue.getThreadPool(),
        TransportService.NOOP_TRANSPORT_INTERCEPTOR,
        x -> localNode,
        null
    );
    JoinHelper joinHelper = new JoinHelper(Settings.EMPTY, null, null, transportService, () -> 0L, () -> null,
        (joinRequest, joinCallback) -> { throw new AssertionError(); }, startJoinRequest -> { throw new AssertionError(); },
        Collections.emptyList());
    transportService.start();

    DiscoveryNode node1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT);
    DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), Version.CURRENT);

    assertFalse(joinHelper.isJoinPending());

    // check that sending a join to node1 works
    Optional<Join> optionalJoin1 = randomBoolean() ? Optional.empty() :
        Optional.of(new Join(localNode, node1, randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()));
    joinHelper.sendJoinRequest(node1, optionalJoin1);
    CapturedRequest[] capturedRequests1 = capturingTransport.getCapturedRequestsAndClear();
    assertThat(capturedRequests1.length, equalTo(1));
    CapturedRequest capturedRequest1 = capturedRequests1[0];
    assertEquals(node1, capturedRequest1.node);

    assertTrue(joinHelper.isJoinPending());

    // check that sending a join to node2 works
    Optional<Join> optionalJoin2 = randomBoolean() ? Optional.empty() :
        Optional.of(new Join(localNode, node2, randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()));
    joinHelper.sendJoinRequest(node2, optionalJoin2);
    CapturedRequest[] capturedRequests2 = capturingTransport.getCapturedRequestsAndClear();
    assertThat(capturedRequests2.length, equalTo(1));
    CapturedRequest capturedRequest2 = capturedRequests2[0];
    assertEquals(node2, capturedRequest2.node);

    // check that sending another join to node1 is a noop as the previous join is still in progress
    joinHelper.sendJoinRequest(node1, optionalJoin1);
    assertThat(capturingTransport.getCapturedRequestsAndClear().length, equalTo(0));

    // complete the previous join to node1
    if (randomBoolean()) {
        capturingTransport.handleResponse(capturedRequest1.requestId, TransportResponse.Empty.INSTANCE);
    } else {
        capturingTransport.handleRemoteError(capturedRequest1.requestId, new CoordinationStateRejectedException("dummy"));
    }

    // check that sending another join to node1 now works again
    joinHelper.sendJoinRequest(node1, optionalJoin1);
    CapturedRequest[] capturedRequests1a = capturingTransport.getCapturedRequestsAndClear();
    assertThat(capturedRequests1a.length, equalTo(1));
    CapturedRequest capturedRequest1a = capturedRequests1a[0];
    assertEquals(node1, capturedRequest1a.node);

    // check that sending another join to node2 works if the optionalJoin is different
    Optional<Join> optionalJoin2a = optionalJoin2.isPresent() && randomBoolean() ? Optional.empty() :
        Optional.of(new Join(localNode, node2, randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()));
    joinHelper.sendJoinRequest(node2, optionalJoin2a);
    CapturedRequest[] capturedRequests2a = capturingTransport.getCapturedRequestsAndClear();
    assertThat(capturedRequests2a.length, equalTo(1));
    CapturedRequest capturedRequest2a = capturedRequests2a[0];
    assertEquals(node2, capturedRequest2a.node);

    // complete all the joins and check that isJoinPending is updated
    assertTrue(joinHelper.isJoinPending());
    capturingTransport.handleRemoteError(capturedRequest2.requestId, new CoordinationStateRejectedException("dummy"));
    capturingTransport.handleRemoteError(capturedRequest1a.requestId, new CoordinationStateRejectedException("dummy"));
    capturingTransport.handleRemoteError(capturedRequest2a.requestId, new CoordinationStateRejectedException("dummy"));
    assertFalse(joinHelper.isJoinPending());
}