Java Code Examples for org.apache.cassandra.dht.Range#contains()
The following examples show how to use
org.apache.cassandra.dht.Range#contains() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TokenMetadata.java From stratio-cassandra with Apache License 2.0 | 6 votes |
/** @return the number of nodes bootstrapping into source's primary range */ public int pendingRangeChanges(InetAddress source) { int n = 0; Collection<Range<Token>> sourceRanges = getPrimaryRangesFor(getTokens(source)); lock.readLock().lock(); try { for (Token token : bootstrapTokens.keySet()) for (Range<Token> range : sourceRanges) if (range.contains(token)) n++; } finally { lock.readLock().unlock(); } return n; }
Example 2
Source File: MerkleTree.java From stratio-cassandra with Apache License 2.0 | 6 votes |
TreeRange getHelper(Hashable hashable, Token pleft, Token pright, byte depth, Token t) { if (hashable instanceof Leaf) { // we've reached a hash: wrap it up and deliver it return new TreeRange(this, pleft, pright, depth, hashable); } // else: node. Inner node = (Inner)hashable; if (Range.contains(pleft, node.token, t)) // left child contains token return getHelper(node.lchild, pleft, node.token, inc(depth), t); // else: right child contains token return getHelper(node.rchild, node.token, pright, inc(depth), t); }
Example 3
Source File: MerkleTree.java From stratio-cassandra with Apache License 2.0 | 5 votes |
private void invalidateHelper(Hashable hashable, Token pleft, Token t) { hashable.hash(null); if (hashable instanceof Leaf) return; // else: node. Inner node = (Inner)hashable; if (Range.contains(pleft, node.token, t)) // left child contains token invalidateHelper(node.lchild, pleft, t); else // right child contains token invalidateHelper(node.rchild, node.token, t); }
Example 4
Source File: MerkleTree.java From stratio-cassandra with Apache License 2.0 | 5 votes |
/** * @throws StopRecursion If no match could be found for the range. */ private Hashable findHelper(Hashable current, Range<Token> activeRange, Range<Token> find) throws StopRecursion { if (current instanceof Leaf) { if (!find.contains(activeRange)) // we are not fully contained in this range! throw new StopRecursion.BadRange(); return current; } // else: node. Inner node = (Inner)current; Range<Token> leftRange = new Range<Token>(activeRange.left, node.token); Range<Token> rightRange = new Range<Token>(node.token, activeRange.right); if (find.contains(activeRange)) // this node is fully contained in the range return node.calc(); // else: one of our children contains the range if (leftRange.contains(find)) // left child contains/matches the range return findHelper(node.lchild, leftRange, find); else if (rightRange.contains(find)) // right child contains/matches the range return findHelper(node.rchild, rightRange, find); else throw new StopRecursion.BadRange(); }
Example 5
Source File: MerkleTree.java From stratio-cassandra with Apache License 2.0 | 5 votes |
private Hashable splitHelper(Hashable hashable, Token pleft, Token pright, byte depth, Token t) throws StopRecursion.TooDeep { if (depth >= hashdepth) throw new StopRecursion.TooDeep(); if (hashable instanceof Leaf) { Token midpoint = partitioner.midpoint(pleft, pright); // We should not create a non-sensical range where start and end are the same token (this is non-sensical because range are // start exclusive). Note that we shouldn't hit that unless the full range is very small or we are fairly deep if (midpoint.equals(pleft) || midpoint.equals(pright)) throw new StopRecursion.TooDeep(); // split size++; return new Inner(midpoint, new Leaf(), new Leaf()); } // else: node. // recurse on the matching child Inner node = (Inner)hashable; if (Range.contains(pleft, node.token, t)) // left child contains token node.lchild(splitHelper(node.lchild, pleft, node.token, inc(depth), t)); else // else: right child contains token node.rchild(splitHelper(node.rchild, node.token, pright, inc(depth), t)); return node; }
Example 6
Source File: RingCache.java From stratio-cassandra with Apache License 2.0 | 5 votes |
public Range<Token> getRange(ByteBuffer key) { // TODO: naive linear search of the token map Token t = partitioner.getToken(key); for (Range<Token> range : rangeMap.keySet()) if (range.contains(t)) return range; throw new RuntimeException("Invalid token information returned by describe_ring: " + rangeMap); }
Example 7
Source File: ActiveRepairService.java From stratio-cassandra with Apache License 2.0 | 4 votes |
/** * Return all of the neighbors with whom we share the provided range. * * @param keyspaceName keyspace to repair * @param toRepair token to repair * @param dataCenters the data centers to involve in the repair * * @return neighbors with whom we share the provided range */ public static Set<InetAddress> getNeighbors(String keyspaceName, Range<Token> toRepair, Collection<String> dataCenters, Collection<String> hosts) { StorageService ss = StorageService.instance; Map<Range<Token>, List<InetAddress>> replicaSets = ss.getRangeToAddressMap(keyspaceName); Range<Token> rangeSuperSet = null; for (Range<Token> range : ss.getLocalRanges(keyspaceName)) { if (range.contains(toRepair)) { rangeSuperSet = range; break; } else if (range.intersects(toRepair)) { throw new IllegalArgumentException("Requested range intersects a local range but is not fully contained in one; this would lead to imprecise repair"); } } if (rangeSuperSet == null || !replicaSets.containsKey(rangeSuperSet)) return Collections.emptySet(); Set<InetAddress> neighbors = new HashSet<>(replicaSets.get(rangeSuperSet)); neighbors.remove(FBUtilities.getBroadcastAddress()); if (dataCenters != null) { TokenMetadata.Topology topology = ss.getTokenMetadata().cloneOnlyTokenMap().getTopology(); Set<InetAddress> dcEndpoints = Sets.newHashSet(); Multimap<String,InetAddress> dcEndpointsMap = topology.getDatacenterEndpoints(); for (String dc : dataCenters) { Collection<InetAddress> c = dcEndpointsMap.get(dc); if (c != null) dcEndpoints.addAll(c); } return Sets.intersection(neighbors, dcEndpoints); } else if (hosts != null) { Set<InetAddress> specifiedHost = new HashSet<>(); for (final String host : hosts) { try { final InetAddress endpoint = InetAddress.getByName(host.trim()); if (endpoint.equals(FBUtilities.getBroadcastAddress()) || neighbors.contains(endpoint)) specifiedHost.add(endpoint); } catch (UnknownHostException e) { throw new IllegalArgumentException("Unknown host specified " + host, e); } } if (!specifiedHost.contains(FBUtilities.getBroadcastAddress())) throw new IllegalArgumentException("The current host must be part of the repair"); if (specifiedHost.size() <= 1) { String msg = "Repair requires at least two endpoints that are neighbours before it can continue, the endpoint used for this repair is %s, " + "other available neighbours are %s but these neighbours were not part of the supplied list of hosts to use during the repair (%s)."; throw new IllegalArgumentException(String.format(msg, specifiedHost, neighbors, hosts)); } specifiedHost.remove(FBUtilities.getBroadcastAddress()); return specifiedHost; } return neighbors; }
Example 8
Source File: LeveledManifest.java From stratio-cassandra with Apache License 2.0 | 4 votes |
/** * If we do something that makes many levels contain too little data (cleanup, change sstable size) we will "never" * compact the high levels. * * This method finds if we have gone many compaction rounds without doing any high-level compaction, if so * we start bringing in one sstable from the highest level until that level is either empty or is doing compaction. * * @param targetLevel the level the candidates will be compacted into * @param candidates the original sstables to compact * @return */ private Collection<SSTableReader> getOverlappingStarvedSSTables(int targetLevel, Collection<SSTableReader> candidates) { Set<SSTableReader> withStarvedCandidate = new HashSet<>(candidates); for (int i = generations.length - 1; i > 0; i--) compactionCounter[i]++; compactionCounter[targetLevel] = 0; if (logger.isDebugEnabled()) { for (int j = 0; j < compactionCounter.length; j++) logger.debug("CompactionCounter: {}: {}", j, compactionCounter[j]); } for (int i = generations.length - 1; i > 0; i--) { if (getLevelSize(i) > 0) { if (compactionCounter[i] > NO_COMPACTION_LIMIT) { // we try to find an sstable that is fully contained within the boundaries we are compacting; // say we are compacting 3 sstables: 0->30 in L1 and 0->12, 12->33 in L2 // this means that we will not create overlap in L2 if we add an sstable // contained within 0 -> 33 to the compaction RowPosition max = null; RowPosition min = null; for (SSTableReader candidate : candidates) { if (min == null || candidate.first.compareTo(min) < 0) min = candidate.first; if (max == null || candidate.last.compareTo(max) > 0) max = candidate.last; } Set<SSTableReader> compacting = cfs.getDataTracker().getCompacting(); Range<RowPosition> boundaries = new Range<>(min, max); for (SSTableReader sstable : getLevel(i)) { Range<RowPosition> r = new Range<RowPosition>(sstable.first, sstable.last); if (boundaries.contains(r) && !compacting.contains(sstable)) { logger.info("Adding high-level (L{}) {} to candidates", sstable.getSSTableLevel(), sstable); withStarvedCandidate.add(sstable); return withStarvedCandidate; } } } return candidates; } } return candidates; }
Example 9
Source File: CompactionManager.java From stratio-cassandra with Apache License 2.0 | 4 votes |
/** * Make sure the {validatedForRepair} are marked for compaction before calling this. * * Caller must reference the validatedForRepair sstables (via ParentRepairSession.getAndReferenceSSTables(..)). * * @param cfs * @param ranges Ranges that the repair was carried out on * @param validatedForRepair SSTables containing the repaired ranges. Should be referenced before passing them. * @throws InterruptedException, ExecutionException, IOException */ public void performAnticompaction(ColumnFamilyStore cfs, Collection<Range<Token>> ranges, Refs<SSTableReader> validatedForRepair, long repairedAt) throws InterruptedException, ExecutionException, IOException { logger.info("Starting anticompaction for {}.{} on {}/{} sstables", cfs.keyspace.getName(), cfs.getColumnFamilyName(), validatedForRepair.size(), cfs.getSSTables().size()); logger.debug("Starting anticompaction for ranges {}", ranges); Set<SSTableReader> sstables = new HashSet<>(validatedForRepair); Set<SSTableReader> mutatedRepairStatuses = new HashSet<>(); Set<SSTableReader> nonAnticompacting = new HashSet<>(); Iterator<SSTableReader> sstableIterator = sstables.iterator(); try { while (sstableIterator.hasNext()) { SSTableReader sstable = sstableIterator.next(); for (Range<Token> r : Range.normalize(ranges)) { Range<Token> sstableRange = new Range<>(sstable.first.getToken(), sstable.last.getToken(), sstable.partitioner); if (r.contains(sstableRange)) { logger.info("SSTable {} fully contained in range {}, mutating repairedAt instead of anticompacting", sstable, r); sstable.descriptor.getMetadataSerializer().mutateRepairedAt(sstable.descriptor, repairedAt); sstable.reloadSSTableMetadata(); mutatedRepairStatuses.add(sstable); sstableIterator.remove(); break; } else if (!sstableRange.intersects(r)) { logger.info("SSTable {} ({}) does not intersect repaired range {}, not touching repairedAt.", sstable, sstableRange, r); nonAnticompacting.add(sstable); sstableIterator.remove(); break; } else { logger.info("SSTable {} ({}) will be anticompacted on range {}", sstable, sstableRange, r); } } } cfs.getDataTracker().notifySSTableRepairedStatusChanged(mutatedRepairStatuses); cfs.getDataTracker().unmarkCompacting(Sets.union(nonAnticompacting, mutatedRepairStatuses)); validatedForRepair.release(Sets.union(nonAnticompacting, mutatedRepairStatuses)); if (!sstables.isEmpty()) doAntiCompaction(cfs, ranges, sstables, repairedAt); } finally { validatedForRepair.release(); cfs.getDataTracker().unmarkCompacting(sstables); } logger.info(String.format("Completed anticompaction successfully")); }