Java Code Examples for org.apache.cassandra.utils.FBUtilities#getBroadcastAddress()

The following examples show how to use org.apache.cassandra.utils.FBUtilities#getBroadcastAddress() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SerializationsTest.java    From stratio-cassandra with Apache License 2.0 6 votes vote down vote up
private void testValidationCompleteWrite() throws IOException
{
    IPartitioner p = new RandomPartitioner();
    // empty validation
    MerkleTree mt = new MerkleTree(p, FULL_RANGE, MerkleTree.RECOMMENDED_DEPTH, (int) Math.pow(2, 15));
    Validator v0 = new Validator(DESC, FBUtilities.getBroadcastAddress(),  -1);
    ValidationComplete c0 = new ValidationComplete(DESC, mt);

    // validation with a tree
    mt = new MerkleTree(p, FULL_RANGE, MerkleTree.RECOMMENDED_DEPTH, Integer.MAX_VALUE);
    for (int i = 0; i < 10; i++)
        mt.split(p.getRandomToken());
    Validator v1 = new Validator(DESC, FBUtilities.getBroadcastAddress(), -1);
    ValidationComplete c1 = new ValidationComplete(DESC, mt);

    // validation failed
    ValidationComplete c3 = new ValidationComplete(DESC);

    testRepairMessageWrite("service.ValidationComplete.bin", c0, c1, c3);
}
 
Example 2
Source File: StorageService.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
/**
 * Finds living endpoints responsible for the given ranges
 *
 * @param keyspaceName the keyspace ranges belong to
 * @param ranges the ranges to find sources for
 * @return multimap of addresses to ranges the address is responsible for
 */
private Multimap<InetAddress, Range<Token>> getNewSourceRanges(String keyspaceName, Set<Range<Token>> ranges)
{
    InetAddress myAddress = FBUtilities.getBroadcastAddress();
    Multimap<Range<Token>, InetAddress> rangeAddresses = Keyspace.open(keyspaceName).getReplicationStrategy().getRangeAddresses(tokenMetadata.cloneOnlyTokenMap());
    Multimap<InetAddress, Range<Token>> sourceRanges = HashMultimap.create();
    IFailureDetector failureDetector = FailureDetector.instance;

    // find alive sources for our new ranges
    for (Range<Token> range : ranges)
    {
        Collection<InetAddress> possibleRanges = rangeAddresses.get(range);
        IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch();
        List<InetAddress> sources = snitch.getSortedListByProximity(myAddress, possibleRanges);

        assert (!sources.contains(myAddress));

        for (InetAddress source : sources)
        {
            if (failureDetector.isAlive(source))
            {
                sourceRanges.put(source, range);
                break;
            }
        }
    }
    return sourceRanges;
}
 
Example 3
Source File: Differencer.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
/**
 * Starts sending/receiving our list of differences to/from the remote endpoint: creates a callback
 * that will be called out of band once the streams complete.
 */
void performStreamingRepair()
{
    InetAddress local = FBUtilities.getBroadcastAddress();
    // We can take anyone of the node as source or destination, however if one is localhost, we put at source to avoid a forwarding
    InetAddress src = r2.endpoint.equals(local) ? r2.endpoint : r1.endpoint;
    InetAddress dst = r2.endpoint.equals(local) ? r1.endpoint : r2.endpoint;

    SyncRequest request = new SyncRequest(desc, local, src, dst, differences);
    StreamingRepairTask task = new StreamingRepairTask(desc, request);
    task.run();
}
 
Example 4
Source File: ConnectionHandler.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
public void sendInitMessage(Socket socket, boolean isForOutgoing) throws IOException
{
    StreamInitMessage message = new StreamInitMessage(
            FBUtilities.getBroadcastAddress(),
            session.sessionIndex(),
            session.planId(),
            session.description(),
            isForOutgoing);
    ByteBuffer messageBuf = message.createMessage(false, protocolVersion);
    getWriteChannel(socket).write(messageBuf);
}
 
Example 5
Source File: LeveledCompactionStrategyTest.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
@Test
public void testValidationMultipleSSTablePerLevel() throws Exception
{
    byte [] b = new byte[100 * 1024];
    new Random().nextBytes(b);
    ByteBuffer value = ByteBuffer.wrap(b); // 100 KB value, make it easy to have multiple files

    // Enough data to have a level 1 and 2
    int rows = 20;
    int columns = 10;

    // Adds enough data to trigger multiple sstable per level
    for (int r = 0; r < rows; r++)
    {
        DecoratedKey key = Util.dk(String.valueOf(r));
        Mutation rm = new Mutation(ksname, key.getKey());
        for (int c = 0; c < columns; c++)
        {
            rm.add(cfname, Util.cellname("column" + c), value, 0);
        }
        rm.apply();
        cfs.forceBlockingFlush();
    }

    waitForLeveling(cfs);
    WrappingCompactionStrategy strategy = (WrappingCompactionStrategy) cfs.getCompactionStrategy();
    // Checking we're not completely bad at math
    assertTrue(strategy.getSSTableCountPerLevel()[1] > 0);
    assertTrue(strategy.getSSTableCountPerLevel()[2] > 0);

    Range<Token> range = new Range<>(Util.token(""), Util.token(""));
    int gcBefore = keyspace.getColumnFamilyStore(cfname).gcBefore(System.currentTimeMillis());
    UUID parentRepSession = UUID.randomUUID();
    ActiveRepairService.instance.registerParentRepairSession(parentRepSession, Arrays.asList(cfs), Arrays.asList(range));
    RepairJobDesc desc = new RepairJobDesc(parentRepSession, UUID.randomUUID(), ksname, cfname, range);
    Validator validator = new Validator(desc, FBUtilities.getBroadcastAddress(), gcBefore);
    CompactionManager.instance.submitValidation(cfs, validator).get();
}
 
Example 6
Source File: InternalMetadataFactory.java    From cassandra-exporter with Apache License 2.0 4 votes vote down vote up
@Override
public InetAddress localBroadcastAddress() {
    return FBUtilities.getBroadcastAddress();
}
 
Example 7
Source File: StorageService.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
/**
 * move the node to new token or find a new token to boot to according to load
 *
 * @param newToken new token to boot to, or if null, find balanced token to boot to
 *
 * @throws IOException on any I/O operation error
 */
private void move(Token newToken) throws IOException
{
    if (newToken == null)
        throw new IOException("Can't move to the undefined (null) token.");

    if (tokenMetadata.sortedTokens().contains(newToken))
        throw new IOException("target token " + newToken + " is already owned by another node.");

    // address of the current node
    InetAddress localAddress = FBUtilities.getBroadcastAddress();

    // This doesn't make any sense in a vnodes environment.
    if (getTokenMetadata().getTokens(localAddress).size() > 1)
    {
        logger.error("Invalid request to move(Token); This node has more than one token and cannot be moved thusly.");
        throw new UnsupportedOperationException("This node has more than one token and cannot be moved thusly.");
    }

    List<String> keyspacesToProcess = Schema.instance.getNonSystemKeyspaces();

    PendingRangeCalculatorService.instance.blockUntilFinished();
    // checking if data is moving to this node
    for (String keyspaceName : keyspacesToProcess)
    {
        if (tokenMetadata.getPendingRanges(keyspaceName, localAddress).size() > 0)
            throw new UnsupportedOperationException("data is currently moving to this node; unable to leave the ring");
    }

    Gossiper.instance.addLocalApplicationState(ApplicationState.STATUS, valueFactory.moving(newToken));
    setMode(Mode.MOVING, String.format("Moving %s from %s to %s.", localAddress, getLocalTokens().iterator().next(), newToken), true);

    setMode(Mode.MOVING, String.format("Sleeping %s ms before start streaming/fetching ranges", RING_DELAY), true);
    Uninterruptibles.sleepUninterruptibly(RING_DELAY, TimeUnit.MILLISECONDS);

    RangeRelocator relocator = new RangeRelocator(Collections.singleton(newToken), keyspacesToProcess);

    if (relocator.streamsNeeded())
    {
        setMode(Mode.MOVING, "fetching new ranges and streaming old ranges", true);
        try
        {
            relocator.stream().get();
        }
        catch (ExecutionException | InterruptedException e)
        {
            throw new RuntimeException("Interrupted while waiting for stream/fetch ranges to finish: " + e.getMessage());
        }
    }
    else
    {
        setMode(Mode.MOVING, "No ranges to fetch/stream", true);
    }

    setTokens(Collections.singleton(newToken)); // setting new token as we have everything settled

    if (logger.isDebugEnabled())
        logger.debug("Successfully moved to new token {}", getLocalTokens().iterator().next());
}
 
Example 8
Source File: StorageService.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
/**
 * Remove a node that has died, attempting to restore the replica count.
 * If the node is alive, decommission should be attempted.  If decommission
 * fails, then removeToken should be called.  If we fail while trying to
 * restore the replica count, finally forceRemoveCompleteion should be
 * called to forcibly remove the node without regard to replica count.
 *
 * @param hostIdString token for the node
 */
public void removeNode(String hostIdString)
{
    InetAddress myAddress = FBUtilities.getBroadcastAddress();
    UUID localHostId = tokenMetadata.getHostId(myAddress);
    UUID hostId = UUID.fromString(hostIdString);
    InetAddress endpoint = tokenMetadata.getEndpointForHostId(hostId);

    if (endpoint == null)
        throw new UnsupportedOperationException("Host ID not found.");

    Collection<Token> tokens = tokenMetadata.getTokens(endpoint);

    if (endpoint.equals(myAddress))
         throw new UnsupportedOperationException("Cannot remove self");

    if (Gossiper.instance.getLiveMembers().contains(endpoint))
        throw new UnsupportedOperationException("Node " + endpoint + " is alive and owns this ID. Use decommission command to remove it from the ring");

    // A leaving endpoint that is dead is already being removed.
    if (tokenMetadata.isLeaving(endpoint))
        logger.warn("Node {} is already being removed, continuing removal anyway", endpoint);

    if (!replicatingNodes.isEmpty())
        throw new UnsupportedOperationException("This node is already processing a removal. Wait for it to complete, or use 'removenode force' if this has failed.");

    // Find the endpoints that are going to become responsible for data
    for (String keyspaceName : Schema.instance.getNonSystemKeyspaces())
    {
        // if the replication factor is 1 the data is lost so we shouldn't wait for confirmation
        if (Keyspace.open(keyspaceName).getReplicationStrategy().getReplicationFactor() == 1)
            continue;

        // get all ranges that change ownership (that is, a node needs
        // to take responsibility for new range)
        Multimap<Range<Token>, InetAddress> changedRanges = getChangedRangesForLeaving(keyspaceName, endpoint);
        IFailureDetector failureDetector = FailureDetector.instance;
        for (InetAddress ep : changedRanges.values())
        {
            if (failureDetector.isAlive(ep))
                replicatingNodes.add(ep);
            else
                logger.warn("Endpoint {} is down and will not receive data for re-replication of {}", ep, endpoint);
        }
    }
    removingNode = endpoint;

    tokenMetadata.addLeavingEndpoint(endpoint);
    PendingRangeCalculatorService.instance.update();

    // the gossiper will handle spoofing this node's state to REMOVING_TOKEN for us
    // we add our own token so other nodes to let us know when they're done
    Gossiper.instance.advertiseRemoving(endpoint, hostId, localHostId);

    // kick off streaming commands
    restoreReplicaCount(endpoint, myAddress);

    // wait for ReplicationFinishedVerbHandler to signal we're done
    while (!replicatingNodes.isEmpty())
    {
        Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
    }

    excise(tokens, endpoint);

    // gossiper will indicate the token has left
    Gossiper.instance.advertiseTokenRemoved(endpoint, hostId);

    replicatingNodes.clear();
    removingNode = null;
}
 
Example 9
Source File: ExpiredTraceState.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
public ExpiredTraceState(UUID sessionId)
{
    super(FBUtilities.getBroadcastAddress(), sessionId);
}
 
Example 10
Source File: MessageOut.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
private MessageOut(MessagingService.Verb verb, T payload, IVersionedSerializer<T> serializer, Map<String, byte[]> parameters)
{
    this(FBUtilities.getBroadcastAddress(), verb, payload, serializer, parameters);
}
 
Example 11
Source File: DynamicEndpointSnitchTest.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
@Test
public void testSnitch() throws InterruptedException, IOException, ConfigurationException
{
    // do this because SS needs to be initialized before DES can work properly.
    StorageService.instance.initClient(0);
    SimpleSnitch ss = new SimpleSnitch();
    DynamicEndpointSnitch dsnitch = new DynamicEndpointSnitch(ss, String.valueOf(ss.hashCode()));
    InetAddress self = FBUtilities.getBroadcastAddress();
    InetAddress host1 = InetAddress.getByName("127.0.0.2");
    InetAddress host2 = InetAddress.getByName("127.0.0.3");
    InetAddress host3 = InetAddress.getByName("127.0.0.4");
    List<InetAddress> hosts = Arrays.asList(host1, host2, host3);

    // first, make all hosts equal
    setScores(dsnitch, 1, hosts, 10, 10, 10);
    List<InetAddress> order = Arrays.asList(host1, host2, host3);
    assertEquals(order, dsnitch.getSortedListByProximity(self, Arrays.asList(host1, host2, host3)));

    // make host1 a little worse
    setScores(dsnitch, 1, hosts, 20, 10, 10);
    order = Arrays.asList(host2, host3, host1);
    assertEquals(order, dsnitch.getSortedListByProximity(self, Arrays.asList(host1, host2, host3)));

    // make host2 as bad as host1
    setScores(dsnitch, 2, hosts, 15, 20, 10);
    order = Arrays.asList(host3, host1, host2);
    assertEquals(order, dsnitch.getSortedListByProximity(self, Arrays.asList(host1, host2, host3)));

    // make host3 the worst
    setScores(dsnitch, 3, hosts, 10, 10, 30);
    order = Arrays.asList(host1, host2, host3);
    assertEquals(order, dsnitch.getSortedListByProximity(self, Arrays.asList(host1, host2, host3)));

    // make host3 equal to the others
    setScores(dsnitch, 5, hosts, 10, 10, 10);
    order = Arrays.asList(host1, host2, host3);
    assertEquals(order, dsnitch.getSortedListByProximity(self, Arrays.asList(host1, host2, host3)));

    /// Tests CASSANDRA-6683 improvements
    // make the scores differ enough from the ideal order that we sort by score; under the old
    // dynamic snitch behavior (where we only compared neighbors), these wouldn't get sorted
    setScores(dsnitch, 20, hosts, 10, 70, 20);
    order = Arrays.asList(host1, host3, host2);
    assertEquals(order, dsnitch.getSortedListByProximity(self, Arrays.asList(host1, host2, host3)));
}
 
Example 12
Source File: StreamTransferTaskTest.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
@Test
public void testScheduleTimeout() throws Exception
{
    String ks = "Keyspace1";
    String cf = "Standard1";

    InetAddress peer = FBUtilities.getBroadcastAddress();
    StreamSession session = new StreamSession(peer, peer, null, 0);
    ColumnFamilyStore cfs = Keyspace.open(ks).getColumnFamilyStore(cf);

    // create two sstables
    for (int i = 0; i < 2; i++)
    {
        insertData(ks, cf, i, 1);
        cfs.forceBlockingFlush();
    }

    // create streaming task that streams those two sstables
    StreamTransferTask task = new StreamTransferTask(session, cfs.metadata.cfId);
    for (SSTableReader sstable : cfs.getSSTables())
    {
        List<Range<Token>> ranges = new ArrayList<>();
        ranges.add(new Range<>(sstable.first.getToken(), sstable.last.getToken()));
        task.addTransferFile(sstable, sstable.selfRef(), 1, sstable.getPositionsForRanges(ranges), 0);
    }
    assertEquals(2, task.getTotalNumberOfFiles());

    // if file sending completes before timeout then the task should be canceled.
    Future f = task.scheduleTimeout(0, 0, TimeUnit.NANOSECONDS);
    f.get();

    // when timeout runs on second file, task should be completed
    f = task.scheduleTimeout(1, 1, TimeUnit.MILLISECONDS);
    task.complete(1);
    try
    {
        f.get();
        Assert.assertTrue(false);
    }
    catch (CancellationException ex)
    {
    }
    assertEquals(StreamSession.State.WAIT_COMPLETE, session.state());

    // when all streaming are done, time out task should not be scheduled.
    assertNull(task.scheduleTimeout(1, 1, TimeUnit.SECONDS));
}