Java Code Examples for java.util.SortedSet#removeAll()
The following examples show how to use
java.util.SortedSet#removeAll() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: GridConsistentHash.java From ignite with Apache License 2.0 | 6 votes |
/** * Removes given nodes and all their replicas from consistent hash algorithm * (if nodes are {@code null} or empty, then no-op). * * @param nodes Nodes to remove. */ public void removeNodes(@Nullable Collection<N> nodes) { if (F.isEmpty(nodes)) return; rw.writeLock().lock(); try { if (!this.nodes.removeAll(nodes)) return; for (Iterator<SortedSet<N>> it = circle.values().iterator(); it.hasNext(); ) { SortedSet<N> set = it.next(); if (!set.removeAll(nodes)) continue; if (set.isEmpty()) it.remove(); } } finally { rw.writeLock().unlock(); } }
Example 2
Source File: SuiteProperties.java From netbeans with Apache License 2.0 | 6 votes |
/** * Creates a new instance of SuiteProperties */ public SuiteProperties(SuiteProject project, AntProjectHelper helper, PropertyEvaluator evaluator, Set<NbModuleProject> subModules) { super(helper, evaluator); this.project = project; refresh(subModules); this.disabledModules = getArrayProperty(evaluator, DISABLED_MODULES_PROPERTY); this.enabledClusters = getArrayProperty(evaluator, ENABLED_CLUSTERS_PROPERTY); if (enabledClusters.length == 0 && activePlatform != null) { // Compatibility. SortedSet<String> clusters = new TreeSet<String>(); for (ModuleEntry module : activePlatform.getModules()) { clusters.add(module.getClusterDirectory().getName()); } clusters.removeAll(Arrays.asList(getArrayProperty(evaluator, DISABLED_CLUSTERS_PROPERTY))); enabledClusters = new String[clusters.size()]; int i = 0; for (String cluster : clusters) { enabledClusters[i++] = SingleModuleProperties.clusterBaseName(cluster); } } brandingModel = new SuiteBrandingModel(this); brandingModel.init(); }
Example 3
Source File: VelocityViaAPI.java From ViaVersion with MIT License | 5 votes |
@Override public SortedSet<Integer> getSupportedVersions() { SortedSet<Integer> outputSet = new TreeSet<>(ProtocolRegistry.getSupportedVersions()); outputSet.removeAll(Via.getPlatform().getConf().getBlockedProtocols()); return outputSet; }
Example 4
Source File: WiFiChannelCountryGHZ5.java From WiFiAnalyzer with GNU General Public License v3.0 | 5 votes |
@NonNull SortedSet<Integer> findChannels(@NonNull String countryCode) { SortedSet<Integer> results = new TreeSet<>(channels); SortedSet<Integer> exclude = channelsToExclude.get(StringUtils.capitalize(countryCode)); if (exclude != null) { results.removeAll(exclude); } return results; }
Example 5
Source File: LinksBuilder.java From ohmdb with Apache License 2.0 | 5 votes |
public Links build() { SortedSet<Long> rights = links.get(-1L); if (rights != null) { rights.removeAll(linkedTos); } return UTILS.linksFrom(links); }
Example 6
Source File: Features.java From heroic with Apache License 2.0 | 5 votes |
/** * Apply the given feature set. * * @param featureSet Feature set to apply. * @return A new Feature with the given set applied. */ public Features applySet(final FeatureSet featureSet) { final SortedSet<Feature> features = new TreeSet<>(this.features()); features.addAll(featureSet.getEnabled()); features.removeAll(featureSet.getDisabled()); return Features.create(features); }
Example 7
Source File: BundlePackagesInfo.java From knopflerfish.org with BSD 3-Clause "New" or "Revised" License | 5 votes |
/** * Get a the set of Java packages that are referenced by this bundle * but not provided by it. * * @return The set of un-provided referenced Java packages. */ public SortedSet/*<String>*/<String> getUnprovidedReferencedPackages() { final SortedSet<String> res = new TreeSet<String>(referencedPackages); res.removeAll(providedPackages); return res; }
Example 8
Source File: NodeMarkersRegistrationTest.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public boolean onChange(SortedSet<String> oldLiveNodes, SortedSet<String> newLiveNodes) { onChangeLatch.countDown(); Set<String> old = new HashSet<>(oldLiveNodes); old.removeAll(newLiveNodes); if (!old.isEmpty()) { lostNodes.addAll(old); } newLiveNodes.removeAll(oldLiveNodes); if (!newLiveNodes.isEmpty()) { addedNodes.addAll(newLiveNodes); } return false; }
Example 9
Source File: VRViaAPI.java From ViaFabric with MIT License | 5 votes |
@Override public SortedSet<Integer> getSupportedVersions() { SortedSet<Integer> outputSet = new TreeSet<>(ProtocolRegistry.getSupportedVersions()); outputSet.removeAll(Via.getPlatform().getConf().getBlockedProtocols()); return outputSet; }
Example 10
Source File: SpongeViaAPI.java From ViaVersion with MIT License | 5 votes |
@Override public SortedSet<Integer> getSupportedVersions() { SortedSet<Integer> outputSet = new TreeSet<>(ProtocolRegistry.getSupportedVersions()); outputSet.removeAll(Via.getPlatform().getConf().getBlockedProtocols()); return outputSet; }
Example 11
Source File: BungeeViaAPI.java From ViaVersion with MIT License | 5 votes |
@Override public SortedSet<Integer> getSupportedVersions() { SortedSet<Integer> outputSet = new TreeSet<>(ProtocolRegistry.getSupportedVersions()); outputSet.removeAll(Via.getPlatform().getConf().getBlockedProtocols()); return outputSet; }
Example 12
Source File: BukkitViaAPI.java From ViaVersion with MIT License | 5 votes |
@Override public SortedSet<Integer> getSupportedVersions() { SortedSet<Integer> outputSet = new TreeSet<>(ProtocolRegistry.getSupportedVersions()); outputSet.removeAll(Via.getPlatform().getConf().getBlockedProtocols()); return outputSet; }
Example 13
Source File: GatherVcfsCloud.java From gatk with BSD 3-Clause "New" or "Revised" License | 4 votes |
/** Validates that all headers contain the same set of genotyped samples and that files are in order by position of first record. */ private static void assertSameSamplesAndValidOrdering(final List<Path> inputFiles, final boolean disableContigOrderingCheck) { final VCFHeader firstHeader = getHeader(inputFiles.get(0)); final SAMSequenceDictionary dict = firstHeader.getSequenceDictionary(); if ( dict == null) { throw new UserException.BadInput("The first VCF specified is missing the required sequence dictionary. " + "This is required to perform validation. You can skip this validation " + "using --"+IGNORE_SAFETY_CHECKS_LONG_NAME +" but ignoring safety checks " + "can result in invalid output."); } final VariantContextComparator comparator = new VariantContextComparator(dict); final List<String> samples = firstHeader.getGenotypeSamples(); Path lastFile = null; VariantContext lastContext = null; for (final Path f : inputFiles) { final FeatureReader<VariantContext> in = getReaderFromVCFUri(f, 0); final VCFHeader header = (VCFHeader)in.getHeader(); dict.assertSameDictionary(header.getSequenceDictionary()); final List<String> theseSamples = header.getGenotypeSamples(); if (!samples.equals(theseSamples)) { final SortedSet<String> s1 = new TreeSet<>(samples); final SortedSet<String> s2 = new TreeSet<>(theseSamples); s1.removeAll(theseSamples); s2.removeAll(samples); throw new IllegalArgumentException("VCFs do not have identical sample lists." + " Samples unique to first file: " + s1 + ". Samples unique to " + f.toUri().toString() + ": " + s2 + "."); } try(final CloseableIterator<VariantContext> variantIterator = in.iterator()) { if (variantIterator.hasNext()) { final VariantContext currentContext = variantIterator.next(); if (lastContext != null) { if ( disableContigOrderingCheck ) { if ( lastContext.getContig().equals(currentContext.getContig()) && lastContext.getStart() >= currentContext.getStart() ) { throw new IllegalArgumentException( "First record in file " + f.toUri().toString() + " is not after first record in " + "previous file " + lastFile.toUri().toString()); } } else { if ( comparator.compare(lastContext, currentContext) >= 0 ) { throw new IllegalArgumentException( "First record in file " + f.toUri().toString() + " is not after first record in " + "previous file " + lastFile.toUri().toString()); } } } lastContext = currentContext; lastFile = f; } } catch (final IOException e) { throw new UserException.CouldNotReadInputFile(f, e.getMessage(), e); } CloserUtil.close(in); } }
Example 14
Source File: Book.java From audiveris with GNU Affero General Public License v3.0 | 4 votes |
/** * Update the gathering of sheet pages into scores. * <p> * The question is which scores should we update. * Clearing all and rebuilding all is OK for pageRefs of all scores without loading sheets. * But doing so, we lose logicalPart information of <b>all</b> scores, and to rebuild it we'll * need to reload all valid sheets. * <p> * A better approach is to check the stub before and the stub after the current one. * This may result in the addition or the removal of scores. * * @param currentStub the current stub */ public synchronized void updateScores (SheetStub currentStub) { if (scores.isEmpty()) { // Easy: allocate scores based on all book stubs createScores(); } else { try { // Determine just the impacted pageRefs final SortedSet<PageRef> impactedRefs = new TreeSet<>(); final int stubNumber = currentStub.getNumber(); if (!currentStub.getPageRefs().isEmpty()) { // Look in stub before current stub? final PageRef firstPageRef = currentStub.getFirstPageRef(); if (!firstPageRef.isMovementStart()) { final SheetStub prevStub = (stubNumber > 1) ? stubs.get(stubNumber - 2) : null; if (prevStub != null) { final PageRef prevPageRef = prevStub.getLastPageRef(); if (prevPageRef != null) { impactedRefs.addAll(getScore(prevPageRef).getPageRefs()); // NPE } } } // Take pages of current stub impactedRefs.addAll(currentStub.getPageRefs()); // Look in stub after current stub? final SheetStub nextStub = (stubNumber < stubs.size()) ? stubs.get(stubNumber) : null; if (nextStub != null) { final PageRef nextPageRef = nextStub.getFirstPageRef(); if ((nextPageRef != null) && !nextPageRef.isMovementStart()) { impactedRefs.addAll(getScore(nextPageRef).getPageRefs()); // NPE } } } // Determine and remove the impacted scores final List<Score> impactedScores = scoresOf(impactedRefs); Integer scoreIndex = null; if (!impactedScores.isEmpty()) { scoreIndex = scores.indexOf(impactedScores.get(0)); } else { for (Score score : scores) { if (score.getFirstPageRef().getSheetNumber() > stubNumber) { scoreIndex = scores.indexOf(score); break; } } } if (scoreIndex == null) { scoreIndex = scores.size(); } logger.debug("Impacted pages:{} scores:{}", impactedRefs, impactedScores); scores.removeAll(impactedScores); // Insert new score(s) to replace the impacted one(s)? if (!currentStub.isValid()) { impactedRefs.removeAll(currentStub.getPageRefs()); } insertScores(currentStub, impactedRefs, scoreIndex); } catch (Exception ex) { // This seems to result from inconsistency between scores info and stubs info. // Initial cause can be a sheet not marshalled (because of use by another process) // followed by a reload of now non-consistent book.xml // Workaround: Clear all scores and rebuild them from stubs info // (Doing so, we may lose logical-part informations) logger.warn("Error updating scores " + ex, ex); logger.warn("Rebuilding them from stubs info."); scores.clear(); createScores(); } } }
Example 15
Source File: SortedSetRelation.java From fitnotifications with Apache License 2.0 | 4 votes |
/** * Utility that could be on SortedSet. Allows faster implementation than * what is in Java for doing addAll, removeAll, retainAll, (complementAll). * @param a first set * @param relation the relation filter, using ANY, CONTAINS, etc. * @param b second set * @return the new set */ public static <T extends Object & Comparable<? super T>> SortedSet<? extends T> doOperation(SortedSet<T> a, int relation, SortedSet<T> b) { // TODO: optimize this as above TreeSet<? extends T> temp; switch (relation) { case ADDALL: a.addAll(b); return a; case A: return a; // no action case B: a.clear(); a.addAll(b); return a; case REMOVEALL: a.removeAll(b); return a; case RETAINALL: a.retainAll(b); return a; // the following is the only case not really supported by Java // although all could be optimized case COMPLEMENTALL: temp = new TreeSet<T>(b); temp.removeAll(a); a.removeAll(b); a.addAll(temp); return a; case B_REMOVEALL: temp = new TreeSet<T>(b); temp.removeAll(a); a.clear(); a.addAll(temp); return a; case NONE: a.clear(); return a; default: throw new IllegalArgumentException("Relation " + relation + " out of range"); } }
Example 16
Source File: KafkaAssignerDiskUsageDistributionGoal.java From cruise-control with BSD 2-Clause "Simplified" License | 4 votes |
/** * Optimize the broker if the disk usage of the broker is not within the required range. * * @param allBrokers a sorted set of all the alive brokers in the cluster. * @param toOptimize the broker to optimize * @param clusterModel the cluster model * @param meanDiskUsage the average disk usage of the cluster * @param lowerThreshold the lower limit of the disk usage for a broker * @param upperThreshold the upper limit of the disk usage for a broker * @param excludedTopics the topics to exclude from movement. * * @return True if an action has been taken to improve the disk usage of the broker, false when a broker cannot or * does not need to be improved further. */ private boolean checkAndOptimize(SortedSet<BrokerAndSortedReplicas> allBrokers, BrokerAndSortedReplicas toOptimize, ClusterModel clusterModel, double meanDiskUsage, double lowerThreshold, double upperThreshold, Set<String> excludedTopics) { if (LOG.isTraceEnabled()) { LOG.trace("Optimizing broker {}. BrokerDiskUsage = {}, meanDiskUsage = {}", toOptimize.broker(), dWrap(diskUsage(toOptimize.broker())), dWrap(meanDiskUsage)); } double brokerDiskUsage = diskUsage(toOptimize.broker()); boolean improved = false; List<BrokerAndSortedReplicas> candidateBrokersToSwapWith; if (brokerDiskUsage > upperThreshold) { if (LOG.isDebugEnabled()) { LOG.debug("Broker {} disk usage {} is above upper threshold of {}", toOptimize.broker().id(), dWrap(brokerDiskUsage), dWrap(upperThreshold)); } // Get the brokers whose disk usage is less than the broker to optimize. The list is in ascending order based on // broker disk usage. candidateBrokersToSwapWith = new ArrayList<>(allBrokers.headSet(toOptimize)); } else if (brokerDiskUsage < lowerThreshold) { if (LOG.isDebugEnabled()) { LOG.debug("Broker {} disk usage {} is below lower threshold of {}", toOptimize.broker().id(), dWrap(brokerDiskUsage), dWrap(lowerThreshold)); } // Get the brokers whose disk usage is more than the broker to optimize. The list is in descending order based on // broker disk usage. candidateBrokersToSwapWith = new ArrayList<>(allBrokers.tailSet(toOptimize)); Collections.reverse(candidateBrokersToSwapWith); } else { // Nothing to optimize. return false; } for (BrokerAndSortedReplicas toSwapWith : candidateBrokersToSwapWith) { if (toSwapWith == toOptimize || Math.abs(diskUsage(toSwapWith) - diskUsage(toOptimize)) < USAGE_EQUALITY_DELTA) { continue; } // Remove the brokers involved in swap from the tree set before swap. allBrokers.removeAll(Arrays.asList(toOptimize, toSwapWith)); try { if (swapReplicas(toOptimize, toSwapWith, meanDiskUsage, clusterModel, excludedTopics)) { improved = true; break; } } finally { // Add the brokers back to the tree set after the swap. allBrokers.addAll(Arrays.asList(toOptimize, toSwapWith)); } } return improved; }
Example 17
Source File: SortedSetRelation.java From trekarta with GNU General Public License v3.0 | 4 votes |
/** * Utility that could be on SortedSet. Allows faster implementation than * what is in Java for doing addAll, removeAll, retainAll, (complementAll). * @param a first set * @param relation the relation filter, using ANY, CONTAINS, etc. * @param b second set * @return the new set */ public static <T extends Object & Comparable<? super T>> SortedSet<? extends T> doOperation(SortedSet<T> a, int relation, SortedSet<T> b) { // TODO: optimize this as above TreeSet<? extends T> temp; switch (relation) { case ADDALL: a.addAll(b); return a; case A: return a; // no action case B: a.clear(); a.addAll(b); return a; case REMOVEALL: a.removeAll(b); return a; case RETAINALL: a.retainAll(b); return a; // the following is the only case not really supported by Java // although all could be optimized case COMPLEMENTALL: temp = new TreeSet<T>(b); temp.removeAll(a); a.removeAll(b); a.addAll(temp); return a; case B_REMOVEALL: temp = new TreeSet<T>(b); temp.removeAll(a); a.clear(); a.addAll(temp); return a; case NONE: a.clear(); return a; default: throw new IllegalArgumentException("Relation " + relation + " out of range"); } }
Example 18
Source File: TestTableMetadata.java From iceberg with Apache License 2.0 | 4 votes |
@Test public void testAddPreviousMetadataRemoveMultiple() { long previousSnapshotId = System.currentTimeMillis() - new Random(1234).nextInt(3600); Snapshot previousSnapshot = new BaseSnapshot( ops.io(), previousSnapshotId, null, previousSnapshotId, null, null, ImmutableList.of( new GenericManifestFile(localInput("file:/tmp/manfiest.1.avro"), SPEC_5.specId()))); long currentSnapshotId = System.currentTimeMillis(); Snapshot currentSnapshot = new BaseSnapshot( ops.io(), currentSnapshotId, previousSnapshotId, currentSnapshotId, null, null, ImmutableList.of( new GenericManifestFile(localInput("file:/tmp/manfiest.2.avro"), SPEC_5.specId()))); List<HistoryEntry> reversedSnapshotLog = Lists.newArrayList(); long currentTimestamp = System.currentTimeMillis(); List<MetadataLogEntry> previousMetadataLog = Lists.newArrayList(); previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 100, "/tmp/000001-" + UUID.randomUUID().toString() + ".metadata.json")); previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 90, "/tmp/000002-" + UUID.randomUUID().toString() + ".metadata.json")); previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 80, "/tmp/000003-" + UUID.randomUUID().toString() + ".metadata.json")); previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 70, "/tmp/000004-" + UUID.randomUUID().toString() + ".metadata.json")); previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 60, "/tmp/000005-" + UUID.randomUUID().toString() + ".metadata.json")); MetadataLogEntry latestPreviousMetadata = new MetadataLogEntry(currentTimestamp - 50, "/tmp/000006-" + UUID.randomUUID().toString() + ".metadata.json"); TableMetadata base = new TableMetadata(localInput(latestPreviousMetadata.file()), 1, UUID.randomUUID().toString(), TEST_LOCATION, 0, currentTimestamp - 50, 3, TEST_SCHEMA, 2, ImmutableList.of(SPEC_5), ImmutableMap.of("property", "value"), currentSnapshotId, Arrays.asList(previousSnapshot, currentSnapshot), reversedSnapshotLog, ImmutableList.copyOf(previousMetadataLog)); previousMetadataLog.add(latestPreviousMetadata); TableMetadata metadata = base.replaceProperties( ImmutableMap.of(TableProperties.METADATA_PREVIOUS_VERSIONS_MAX, "2")); SortedSet<MetadataLogEntry> removedPreviousMetadata = Sets.newTreeSet(Comparator.comparingLong(MetadataLogEntry::timestampMillis)); removedPreviousMetadata.addAll(base.previousFiles()); removedPreviousMetadata.removeAll(metadata.previousFiles()); Assert.assertEquals("Metadata logs should match", previousMetadataLog.subList(4, 6), metadata.previousFiles()); Assert.assertEquals("Removed Metadata logs should contain 4", previousMetadataLog.subList(0, 4), ImmutableList.copyOf(removedPreviousMetadata)); }
Example 19
Source File: SortedSetRelation.java From j2objc with Apache License 2.0 | 4 votes |
/** * Utility that could be on SortedSet. Allows faster implementation than * what is in Java for doing addAll, removeAll, retainAll, (complementAll). * @param a first set * @param relation the relation filter, using ANY, CONTAINS, etc. * @param b second set * @return the new set */ public static <T extends Object & Comparable<? super T>> SortedSet<? extends T> doOperation(SortedSet<T> a, int relation, SortedSet<T> b) { // TODO: optimize this as above TreeSet<? extends T> temp; switch (relation) { case ADDALL: a.addAll(b); return a; case A: return a; // no action case B: a.clear(); a.addAll(b); return a; case REMOVEALL: a.removeAll(b); return a; case RETAINALL: a.retainAll(b); return a; // the following is the only case not really supported by Java // although all could be optimized case COMPLEMENTALL: temp = new TreeSet<T>(b); temp.removeAll(a); a.removeAll(b); a.addAll(temp); return a; case B_REMOVEALL: temp = new TreeSet<T>(b); temp.removeAll(a); a.clear(); a.addAll(temp); return a; case NONE: a.clear(); return a; default: throw new IllegalArgumentException("Relation " + relation + " out of range"); } }
Example 20
Source File: TestTableMetadata.java From iceberg with Apache License 2.0 | 4 votes |
@Test public void testAddPreviousMetadataRemoveOne() { long previousSnapshotId = System.currentTimeMillis() - new Random(1234).nextInt(3600); Snapshot previousSnapshot = new BaseSnapshot( ops.io(), previousSnapshotId, null, previousSnapshotId, null, null, ImmutableList.of( new GenericManifestFile(localInput("file:/tmp/manfiest.1.avro"), SPEC_5.specId()))); long currentSnapshotId = System.currentTimeMillis(); Snapshot currentSnapshot = new BaseSnapshot( ops.io(), currentSnapshotId, previousSnapshotId, currentSnapshotId, null, null, ImmutableList.of( new GenericManifestFile(localInput("file:/tmp/manfiest.2.avro"), SPEC_5.specId()))); List<HistoryEntry> reversedSnapshotLog = Lists.newArrayList(); long currentTimestamp = System.currentTimeMillis(); List<MetadataLogEntry> previousMetadataLog = Lists.newArrayList(); previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 100, "/tmp/000001-" + UUID.randomUUID().toString() + ".metadata.json")); previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 90, "/tmp/000002-" + UUID.randomUUID().toString() + ".metadata.json")); previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 80, "/tmp/000003-" + UUID.randomUUID().toString() + ".metadata.json")); previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 70, "/tmp/000004-" + UUID.randomUUID().toString() + ".metadata.json")); previousMetadataLog.add(new MetadataLogEntry(currentTimestamp - 60, "/tmp/000005-" + UUID.randomUUID().toString() + ".metadata.json")); MetadataLogEntry latestPreviousMetadata = new MetadataLogEntry(currentTimestamp - 50, "/tmp/000006-" + UUID.randomUUID().toString() + ".metadata.json"); TableMetadata base = new TableMetadata(localInput(latestPreviousMetadata.file()), 1, UUID.randomUUID().toString(), TEST_LOCATION, 0, currentTimestamp - 50, 3, TEST_SCHEMA, 5, ImmutableList.of(SPEC_5), ImmutableMap.of("property", "value"), currentSnapshotId, Arrays.asList(previousSnapshot, currentSnapshot), reversedSnapshotLog, ImmutableList.copyOf(previousMetadataLog)); previousMetadataLog.add(latestPreviousMetadata); TableMetadata metadata = base.replaceProperties( ImmutableMap.of(TableProperties.METADATA_PREVIOUS_VERSIONS_MAX, "5")); SortedSet<MetadataLogEntry> removedPreviousMetadata = Sets.newTreeSet(Comparator.comparingLong(MetadataLogEntry::timestampMillis)); removedPreviousMetadata.addAll(base.previousFiles()); removedPreviousMetadata.removeAll(metadata.previousFiles()); Assert.assertEquals("Metadata logs should match", previousMetadataLog.subList(1, 6), metadata.previousFiles()); Assert.assertEquals("Removed Metadata logs should contain 1", previousMetadataLog.subList(0, 1), ImmutableList.copyOf(removedPreviousMetadata)); }