Java Code Examples for java.util.SortedSet#removeAll()

The following examples show how to use java.util.SortedSet#removeAll() . These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: netbeans   File: SuiteProperties.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Creates a new instance of SuiteProperties
 */
public SuiteProperties(SuiteProject project, AntProjectHelper helper,
        PropertyEvaluator evaluator, Set<NbModuleProject> subModules) {
    super(helper, evaluator);
    this.project = project;
    refresh(subModules);
    this.disabledModules = getArrayProperty(evaluator, DISABLED_MODULES_PROPERTY);
    this.enabledClusters = getArrayProperty(evaluator, ENABLED_CLUSTERS_PROPERTY);
    if (enabledClusters.length == 0 && activePlatform != null) {
        // Compatibility.
        SortedSet<String> clusters = new TreeSet<String>();
        for (ModuleEntry module : activePlatform.getModules()) {
            clusters.add(module.getClusterDirectory().getName());
        }
        clusters.removeAll(Arrays.asList(getArrayProperty(evaluator, DISABLED_CLUSTERS_PROPERTY)));
        enabledClusters = new String[clusters.size()];
        int i = 0; for (String cluster : clusters) {
            enabledClusters[i++] = SingleModuleProperties.clusterBaseName(cluster);
        }
    }
    brandingModel = new SuiteBrandingModel(this);
    brandingModel.init();
}
 
Example 2
Source Project: ignite   File: GridConsistentHash.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Removes given nodes and all their replicas from consistent hash algorithm
 * (if nodes are {@code null} or empty, then no-op).
 *
 * @param nodes Nodes to remove.
 */
public void removeNodes(@Nullable Collection<N> nodes) {
    if (F.isEmpty(nodes))
        return;

    rw.writeLock().lock();

    try {
        if (!this.nodes.removeAll(nodes))
            return;

        for (Iterator<SortedSet<N>> it = circle.values().iterator(); it.hasNext(); ) {
            SortedSet<N> set = it.next();

            if (!set.removeAll(nodes))
                continue;

            if (set.isEmpty())
                it.remove();
        }
    }
    finally {
        rw.writeLock().unlock();
    }
}
 
Example 3
Source Project: ViaFabric   File: VRViaAPI.java    License: MIT License 5 votes vote down vote up
@Override
public SortedSet<Integer> getSupportedVersions() {
    SortedSet<Integer> outputSet = new TreeSet<>(ProtocolRegistry.getSupportedVersions());
    outputSet.removeAll(Via.getPlatform().getConf().getBlockedProtocols());

    return outputSet;
}
 
Example 4
Source Project: ViaVersion   File: BukkitViaAPI.java    License: MIT License 5 votes vote down vote up
@Override
public SortedSet<Integer> getSupportedVersions() {
    SortedSet<Integer> outputSet = new TreeSet<>(ProtocolRegistry.getSupportedVersions());
    outputSet.removeAll(Via.getPlatform().getConf().getBlockedProtocols());

    return outputSet;
}
 
Example 5
Source Project: ViaVersion   File: BungeeViaAPI.java    License: MIT License 5 votes vote down vote up
@Override
public SortedSet<Integer> getSupportedVersions() {
    SortedSet<Integer> outputSet = new TreeSet<>(ProtocolRegistry.getSupportedVersions());
    outputSet.removeAll(Via.getPlatform().getConf().getBlockedProtocols());

    return outputSet;
}
 
Example 6
Source Project: ViaVersion   File: SpongeViaAPI.java    License: MIT License 5 votes vote down vote up
@Override
public SortedSet<Integer> getSupportedVersions() {
    SortedSet<Integer> outputSet = new TreeSet<>(ProtocolRegistry.getSupportedVersions());
    outputSet.removeAll(Via.getPlatform().getConf().getBlockedProtocols());

    return outputSet;
}
 
Example 7
Source Project: ViaVersion   File: VelocityViaAPI.java    License: MIT License 5 votes vote down vote up
@Override
public SortedSet<Integer> getSupportedVersions() {
    SortedSet<Integer> outputSet = new TreeSet<>(ProtocolRegistry.getSupportedVersions());
    outputSet.removeAll(Via.getPlatform().getConf().getBlockedProtocols());

    return outputSet;
}
 
Example 8
@Override
public boolean onChange(SortedSet<String> oldLiveNodes, SortedSet<String> newLiveNodes) {
  onChangeLatch.countDown();
  Set<String> old = new HashSet<>(oldLiveNodes);
  old.removeAll(newLiveNodes);
  if (!old.isEmpty()) {
    lostNodes.addAll(old);
  }
  newLiveNodes.removeAll(oldLiveNodes);
  if (!newLiveNodes.isEmpty()) {
    addedNodes.addAll(newLiveNodes);
  }
  return false;
}
 
Example 9
/**
 * Get a the set of Java packages that are referenced by this bundle
 * but not provided by it.
 *
 * @return The set of un-provided referenced Java packages.
 */
public SortedSet/*<String>*/<String> getUnprovidedReferencedPackages()
{
  final SortedSet<String> res = new TreeSet<String>(referencedPackages);
  res.removeAll(providedPackages);

  return res;
}
 
Example 10
Source Project: heroic   File: Features.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Apply the given feature set.
 *
 * @param featureSet Feature set to apply.
 * @return A new Feature with the given set applied.
 */
public Features applySet(final FeatureSet featureSet) {
    final SortedSet<Feature> features = new TreeSet<>(this.features());
    features.addAll(featureSet.getEnabled());
    features.removeAll(featureSet.getDisabled());
    return Features.create(features);
}
 
Example 11
Source Project: ohmdb   File: LinksBuilder.java    License: Apache License 2.0 5 votes vote down vote up
public Links build() {
	SortedSet<Long> rights = links.get(-1L);

	if (rights != null) {
		rights.removeAll(linkedTos);
	}

	return UTILS.linksFrom(links);
}
 
Example 12
@NonNull
SortedSet<Integer> findChannels(@NonNull String countryCode) {
    SortedSet<Integer> results = new TreeSet<>(channels);
    SortedSet<Integer> exclude = channelsToExclude.get(StringUtils.capitalize(countryCode));
    if (exclude != null) {
        results.removeAll(exclude);
    }
    return results;
}
 
Example 13
Source Project: iceberg   File: TestTableMetadata.java    License: Apache License 2.0 4 votes vote down vote up
@Test
public void testAddPreviousMetadataRemoveOne() {
  long previousSnapshotId = System.currentTimeMillis() - new Random(1234).nextInt(3600);
  Snapshot previousSnapshot = new BaseSnapshot(
      ops.io(), previousSnapshotId, null, previousSnapshotId, null, null, ImmutableList.of(
      new GenericManifestFile(localInput("file:/tmp/manfiest.1.avro"), SPEC_5.specId())));
  long currentSnapshotId = System.currentTimeMillis();
  Snapshot currentSnapshot = new BaseSnapshot(
      ops.io(), currentSnapshotId, previousSnapshotId, currentSnapshotId, null, null, ImmutableList.of(
      new GenericManifestFile(localInput("file:/tmp/manfiest.2.avro"), SPEC_5.specId())));

  List<HistoryEntry> reversedSnapshotLog = Lists.newArrayList();
  long currentTimestamp = System.currentTimeMillis();
  List<MetadataLogEntry> previousMetadataLog = Lists.newArrayList();
  previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 100,
      "/tmp/000001-" + UUID.randomUUID().toString() + ".metadata.json"));
  previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 90,
      "/tmp/000002-" + UUID.randomUUID().toString() + ".metadata.json"));
  previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 80,
      "/tmp/000003-" + UUID.randomUUID().toString() + ".metadata.json"));
  previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 70,
      "/tmp/000004-" + UUID.randomUUID().toString() + ".metadata.json"));
  previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 60,
      "/tmp/000005-" + UUID.randomUUID().toString() + ".metadata.json"));

  MetadataLogEntry latestPreviousMetadata = new MetadataLogEntry(currentTimestamp - 50,
      "/tmp/000006-" + UUID.randomUUID().toString() + ".metadata.json");

  TableMetadata base = new TableMetadata(localInput(latestPreviousMetadata.file()), 1, UUID.randomUUID().toString(),
      TEST_LOCATION, 0, currentTimestamp - 50, 3, TEST_SCHEMA, 5,
      ImmutableList.of(SPEC_5), ImmutableMap.of("property", "value"), currentSnapshotId,
      Arrays.asList(previousSnapshot, currentSnapshot), reversedSnapshotLog,
      ImmutableList.copyOf(previousMetadataLog));

  previousMetadataLog.add(latestPreviousMetadata);

  TableMetadata metadata = base.replaceProperties(
      ImmutableMap.of(TableProperties.METADATA_PREVIOUS_VERSIONS_MAX, "5"));

  SortedSet<MetadataLogEntry> removedPreviousMetadata =
      Sets.newTreeSet(Comparator.comparingLong(MetadataLogEntry::timestampMillis));
  removedPreviousMetadata.addAll(base.previousFiles());
  removedPreviousMetadata.removeAll(metadata.previousFiles());

  Assert.assertEquals("Metadata logs should match", previousMetadataLog.subList(1, 6),
      metadata.previousFiles());
  Assert.assertEquals("Removed Metadata logs should contain 1", previousMetadataLog.subList(0, 1),
      ImmutableList.copyOf(removedPreviousMetadata));
}
 
Example 14
Source Project: iceberg   File: TestTableMetadata.java    License: Apache License 2.0 4 votes vote down vote up
@Test
public void testAddPreviousMetadataRemoveMultiple() {
  long previousSnapshotId = System.currentTimeMillis() - new Random(1234).nextInt(3600);
  Snapshot previousSnapshot = new BaseSnapshot(
      ops.io(), previousSnapshotId, null, previousSnapshotId, null, null, ImmutableList.of(
      new GenericManifestFile(localInput("file:/tmp/manfiest.1.avro"), SPEC_5.specId())));
  long currentSnapshotId = System.currentTimeMillis();
  Snapshot currentSnapshot = new BaseSnapshot(
      ops.io(), currentSnapshotId, previousSnapshotId, currentSnapshotId, null, null, ImmutableList.of(
      new GenericManifestFile(localInput("file:/tmp/manfiest.2.avro"), SPEC_5.specId())));

  List<HistoryEntry> reversedSnapshotLog = Lists.newArrayList();
  long currentTimestamp = System.currentTimeMillis();
  List<MetadataLogEntry> previousMetadataLog = Lists.newArrayList();
  previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 100,
      "/tmp/000001-" + UUID.randomUUID().toString() + ".metadata.json"));
  previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 90,
      "/tmp/000002-" + UUID.randomUUID().toString() + ".metadata.json"));
  previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 80,
      "/tmp/000003-" + UUID.randomUUID().toString() + ".metadata.json"));
  previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 70,
      "/tmp/000004-" + UUID.randomUUID().toString() + ".metadata.json"));
  previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 60,
      "/tmp/000005-" + UUID.randomUUID().toString() + ".metadata.json"));

  MetadataLogEntry latestPreviousMetadata = new MetadataLogEntry(currentTimestamp - 50,
      "/tmp/000006-" + UUID.randomUUID().toString() + ".metadata.json");

  TableMetadata base = new TableMetadata(localInput(latestPreviousMetadata.file()), 1, UUID.randomUUID().toString(),
      TEST_LOCATION, 0, currentTimestamp - 50, 3, TEST_SCHEMA, 2,
      ImmutableList.of(SPEC_5), ImmutableMap.of("property", "value"), currentSnapshotId,
      Arrays.asList(previousSnapshot, currentSnapshot), reversedSnapshotLog,
      ImmutableList.copyOf(previousMetadataLog));

  previousMetadataLog.add(latestPreviousMetadata);

  TableMetadata metadata = base.replaceProperties(
      ImmutableMap.of(TableProperties.METADATA_PREVIOUS_VERSIONS_MAX, "2"));

  SortedSet<MetadataLogEntry> removedPreviousMetadata =
      Sets.newTreeSet(Comparator.comparingLong(MetadataLogEntry::timestampMillis));
  removedPreviousMetadata.addAll(base.previousFiles());
  removedPreviousMetadata.removeAll(metadata.previousFiles());

  Assert.assertEquals("Metadata logs should match", previousMetadataLog.subList(4, 6),
      metadata.previousFiles());
  Assert.assertEquals("Removed Metadata logs should contain 4", previousMetadataLog.subList(0, 4),
      ImmutableList.copyOf(removedPreviousMetadata));
}
 
Example 15
/**
 * Optimize the broker if the disk usage of the broker is not within the required range.
 *
 * @param allBrokers a sorted set of all the alive brokers in the cluster.
 * @param toOptimize the broker to optimize
 * @param clusterModel the cluster model
 * @param meanDiskUsage the average disk usage of the cluster
 * @param lowerThreshold the lower limit of the disk usage for a broker
 * @param upperThreshold the upper limit of the disk usage for a broker
 * @param excludedTopics the topics to exclude from movement.
 *
 * @return True if an action has been taken to improve the disk usage of the broker, false when a broker cannot or
 * does not need to be improved further.
 */
private boolean checkAndOptimize(SortedSet<BrokerAndSortedReplicas> allBrokers,
                                 BrokerAndSortedReplicas toOptimize,
                                 ClusterModel clusterModel,
                                 double meanDiskUsage,
                                 double lowerThreshold,
                                 double upperThreshold,
                                 Set<String> excludedTopics) {
  if (LOG.isTraceEnabled()) {
    LOG.trace("Optimizing broker {}. BrokerDiskUsage = {}, meanDiskUsage = {}",
              toOptimize.broker(), dWrap(diskUsage(toOptimize.broker())), dWrap(meanDiskUsage));
  }
  double brokerDiskUsage = diskUsage(toOptimize.broker());
  boolean improved = false;
  List<BrokerAndSortedReplicas> candidateBrokersToSwapWith;

  if (brokerDiskUsage > upperThreshold) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Broker {} disk usage {} is above upper threshold of {}",
                toOptimize.broker().id(), dWrap(brokerDiskUsage), dWrap(upperThreshold));
    }
    // Get the brokers whose disk usage is less than the broker to optimize. The list is in ascending order based on
    // broker disk usage.
    candidateBrokersToSwapWith = new ArrayList<>(allBrokers.headSet(toOptimize));

  } else if (brokerDiskUsage < lowerThreshold) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Broker {} disk usage {} is below lower threshold of {}",
                toOptimize.broker().id(), dWrap(brokerDiskUsage), dWrap(lowerThreshold));
    }
    // Get the brokers whose disk usage is more than the broker to optimize. The list is in descending order based on
    // broker disk usage.
    candidateBrokersToSwapWith = new ArrayList<>(allBrokers.tailSet(toOptimize));
    Collections.reverse(candidateBrokersToSwapWith);
  } else {
    // Nothing to optimize.
    return false;
  }

  for (BrokerAndSortedReplicas toSwapWith : candidateBrokersToSwapWith) {
    if (toSwapWith == toOptimize || Math.abs(diskUsage(toSwapWith) - diskUsage(toOptimize)) < USAGE_EQUALITY_DELTA) {
      continue;
    }
    // Remove the brokers involved in swap from the tree set before swap.
    allBrokers.removeAll(Arrays.asList(toOptimize, toSwapWith));
    try {
      if (swapReplicas(toOptimize, toSwapWith, meanDiskUsage, clusterModel, excludedTopics)) {
        improved = true;
        break;
      }
    } finally {
      // Add the brokers back to the tree set after the swap.
      allBrokers.addAll(Arrays.asList(toOptimize, toSwapWith));
    }
  }
  return improved;
}
 
Example 16
/**
 * Utility that could be on SortedSet. Allows faster implementation than
 * what is in Java for doing addAll, removeAll, retainAll, (complementAll).
 * @param a first set
 * @param relation the relation filter, using ANY, CONTAINS, etc.
 * @param b second set
 * @return the new set
 */    
public static <T extends Object & Comparable<? super T>> SortedSet<? extends T> doOperation(SortedSet<T> a, int relation, SortedSet<T> b) {
    // TODO: optimize this as above
    TreeSet<? extends T> temp;
    switch (relation) {
        case ADDALL:
            a.addAll(b); 
            return a;
        case A:
            return a; // no action
        case B:
            a.clear(); 
            a.addAll(b); 
            return a;
        case REMOVEALL: 
            a.removeAll(b);
            return a;
        case RETAINALL: 
            a.retainAll(b);
            return a;
        // the following is the only case not really supported by Java
        // although all could be optimized
        case COMPLEMENTALL:
            temp = new TreeSet<T>(b);
            temp.removeAll(a);
            a.removeAll(b);
            a.addAll(temp);
            return a;
        case B_REMOVEALL:
            temp = new TreeSet<T>(b);
            temp.removeAll(a);
            a.clear();
            a.addAll(temp);
            return a;
        case NONE:
            a.clear();
            return a;
        default: 
            throw new IllegalArgumentException("Relation " + relation + " out of range");
    }
}
 
Example 17
/**
 * Update the gathering of sheet pages into scores.
 * <p>
 * The question is which scores should we update.
 * Clearing all and rebuilding all is OK for pageRefs of all scores without loading sheets.
 * But doing so, we lose logicalPart information of <b>all</b> scores, and to rebuild it we'll
 * need to reload all valid sheets.
 * <p>
 * A better approach is to check the stub before and the stub after the current one.
 * This may result in the addition or the removal of scores.
 *
 * @param currentStub the current stub
 */
public synchronized void updateScores (SheetStub currentStub)
{
    if (scores.isEmpty()) {
        // Easy: allocate scores based on all book stubs
        createScores();
    } else {
        try {
            // Determine just the impacted pageRefs
            final SortedSet<PageRef> impactedRefs = new TreeSet<>();
            final int stubNumber = currentStub.getNumber();

            if (!currentStub.getPageRefs().isEmpty()) {
                // Look in stub before current stub?
                final PageRef firstPageRef = currentStub.getFirstPageRef();

                if (!firstPageRef.isMovementStart()) {
                    final SheetStub prevStub = (stubNumber > 1) ? stubs.get(stubNumber - 2)
                            : null;

                    if (prevStub != null) {
                        final PageRef prevPageRef = prevStub.getLastPageRef();

                        if (prevPageRef != null) {
                            impactedRefs.addAll(getScore(prevPageRef).getPageRefs()); // NPE
                        }
                    }
                }

                // Take pages of current stub
                impactedRefs.addAll(currentStub.getPageRefs());

                // Look in stub after current stub?
                final SheetStub nextStub = (stubNumber < stubs.size()) ? stubs.get(stubNumber)
                        : null;

                if (nextStub != null) {
                    final PageRef nextPageRef = nextStub.getFirstPageRef();

                    if ((nextPageRef != null) && !nextPageRef.isMovementStart()) {
                        impactedRefs.addAll(getScore(nextPageRef).getPageRefs()); // NPE
                    }
                }
            }

            // Determine and remove the impacted scores
            final List<Score> impactedScores = scoresOf(impactedRefs);
            Integer scoreIndex = null;

            if (!impactedScores.isEmpty()) {
                scoreIndex = scores.indexOf(impactedScores.get(0));
            } else {
                for (Score score : scores) {
                    if (score.getFirstPageRef().getSheetNumber() > stubNumber) {
                        scoreIndex = scores.indexOf(score);

                        break;
                    }
                }
            }

            if (scoreIndex == null) {
                scoreIndex = scores.size();
            }

            logger.debug("Impacted pages:{} scores:{}", impactedRefs, impactedScores);
            scores.removeAll(impactedScores);

            // Insert new score(s) to replace the impacted one(s)?
            if (!currentStub.isValid()) {
                impactedRefs.removeAll(currentStub.getPageRefs());
            }

            insertScores(currentStub, impactedRefs, scoreIndex);
        } catch (Exception ex) {
            // This seems to result from inconsistency between scores info and stubs info.
            // Initial cause can be a sheet not marshalled (because of use by another process)
            // followed by a reload of now non-consistent book.xml

            // Workaround: Clear all scores and rebuild them from stubs info
            // (Doing so, we may lose logical-part informations)
            logger.warn("Error updating scores " + ex, ex);
            logger.warn("Rebuilding them from stubs info.");
            scores.clear();
            createScores();
        }
    }
}
 
Example 18
/** Validates that all headers contain the same set of genotyped samples and that files are in order by position of first record. */
private static void assertSameSamplesAndValidOrdering(final List<Path> inputFiles, final boolean disableContigOrderingCheck) {
    final VCFHeader firstHeader = getHeader(inputFiles.get(0));
    final SAMSequenceDictionary dict = firstHeader.getSequenceDictionary();
    if ( dict == null) {
        throw new UserException.BadInput("The first VCF specified is missing the required sequence dictionary. " +
                                                 "This is required to perform validation.  You can skip this validation " +
                                                 "using --"+IGNORE_SAFETY_CHECKS_LONG_NAME +" but ignoring safety checks " +
                                                 "can result in invalid output.");
    }
    final VariantContextComparator comparator = new VariantContextComparator(dict);
    final List<String> samples = firstHeader.getGenotypeSamples();

    Path lastFile = null;
    VariantContext lastContext = null;

    for (final Path f : inputFiles) {
        final FeatureReader<VariantContext> in = getReaderFromVCFUri(f, 0);
        final VCFHeader header = (VCFHeader)in.getHeader();
        dict.assertSameDictionary(header.getSequenceDictionary());
        final List<String> theseSamples = header.getGenotypeSamples();

        if (!samples.equals(theseSamples)) {
            final SortedSet<String> s1 = new TreeSet<>(samples);
            final SortedSet<String> s2 = new TreeSet<>(theseSamples);
            s1.removeAll(theseSamples);
            s2.removeAll(samples);

            throw new IllegalArgumentException("VCFs do not have identical sample lists." +
                    " Samples unique to first file: " + s1 + ". Samples unique to " + f.toUri().toString() + ": " + s2 + ".");
        }

        try(final CloseableIterator<VariantContext> variantIterator = in.iterator()) {
            if (variantIterator.hasNext()) {
                final VariantContext currentContext = variantIterator.next();
                if (lastContext != null) {
                    if ( disableContigOrderingCheck ) {
                        if ( lastContext.getContig().equals(currentContext.getContig()) && lastContext.getStart() >= currentContext.getStart() ) {
                            throw new IllegalArgumentException(
                                    "First record in file " + f.toUri().toString() + " is not after first record in " +
                                            "previous file " + lastFile.toUri().toString());
                        }
                    }
                    else {
                        if ( comparator.compare(lastContext, currentContext) >= 0 ) {
                            throw new IllegalArgumentException(
                                    "First record in file " + f.toUri().toString() + " is not after first record in " +
                                            "previous file " + lastFile.toUri().toString());
                        }
                    }
                }

                lastContext = currentContext;
                lastFile = f;
            }
        } catch (final IOException e) {
            throw new UserException.CouldNotReadInputFile(f, e.getMessage(), e);
        }

        CloserUtil.close(in);
    }
}
 
Example 19
/**
 * Utility that could be on SortedSet. Allows faster implementation than
 * what is in Java for doing addAll, removeAll, retainAll, (complementAll).
 * @param a first set
 * @param relation the relation filter, using ANY, CONTAINS, etc.
 * @param b second set
 * @return the new set
 */    
public static <T extends Object & Comparable<? super T>> SortedSet<? extends T> doOperation(SortedSet<T> a, int relation, SortedSet<T> b) {
    // TODO: optimize this as above
    TreeSet<? extends T> temp;
    switch (relation) {
        case ADDALL:
            a.addAll(b); 
            return a;
        case A:
            return a; // no action
        case B:
            a.clear(); 
            a.addAll(b); 
            return a;
        case REMOVEALL: 
            a.removeAll(b);
            return a;
        case RETAINALL: 
            a.retainAll(b);
            return a;
        // the following is the only case not really supported by Java
        // although all could be optimized
        case COMPLEMENTALL:
            temp = new TreeSet<T>(b);
            temp.removeAll(a);
            a.removeAll(b);
            a.addAll(temp);
            return a;
        case B_REMOVEALL:
            temp = new TreeSet<T>(b);
            temp.removeAll(a);
            a.clear();
            a.addAll(temp);
            return a;
        case NONE:
            a.clear();
            return a;
        default: 
            throw new IllegalArgumentException("Relation " + relation + " out of range");
    }
}
 
Example 20
Source Project: j2objc   File: SortedSetRelation.java    License: Apache License 2.0 4 votes vote down vote up
/**
 * Utility that could be on SortedSet. Allows faster implementation than
 * what is in Java for doing addAll, removeAll, retainAll, (complementAll).
 * @param a first set
 * @param relation the relation filter, using ANY, CONTAINS, etc.
 * @param b second set
 * @return the new set
 */    
public static <T extends Object & Comparable<? super T>> SortedSet<? extends T> doOperation(SortedSet<T> a, int relation, SortedSet<T> b) {
    // TODO: optimize this as above
    TreeSet<? extends T> temp;
    switch (relation) {
        case ADDALL:
            a.addAll(b); 
            return a;
        case A:
            return a; // no action
        case B:
            a.clear(); 
            a.addAll(b); 
            return a;
        case REMOVEALL: 
            a.removeAll(b);
            return a;
        case RETAINALL: 
            a.retainAll(b);
            return a;
        // the following is the only case not really supported by Java
        // although all could be optimized
        case COMPLEMENTALL:
            temp = new TreeSet<T>(b);
            temp.removeAll(a);
            a.removeAll(b);
            a.addAll(temp);
            return a;
        case B_REMOVEALL:
            temp = new TreeSet<T>(b);
            temp.removeAll(a);
            a.clear();
            a.addAll(temp);
            return a;
        case NONE:
            a.clear();
            return a;
        default: 
            throw new IllegalArgumentException("Relation " + relation + " out of range");
    }
}