Java Code Examples for com.google.common.collect.Multimap#size()

The following examples show how to use com.google.common.collect.Multimap#size() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AnomaliesResource.java    From incubator-pinot with Apache License 2.0 6 votes vote down vote up
public static Multimap<String, String> generateFilterSetWithDimensionMap(DimensionMap dimensionMap,
    Multimap<String, String> filterSet) {

  Multimap<String, String> newFilterSet = HashMultimap.create();

  // Dimension map gives more specified dimension information than filter set (i.e., Dimension Map should be a subset
  // of filterSet), so it needs to be processed first.
  if (MapUtils.isNotEmpty(dimensionMap)) {
    for (Map.Entry<String, String> dimensionMapEntry : dimensionMap.entrySet()) {
      newFilterSet.put(dimensionMapEntry.getKey(), dimensionMapEntry.getValue());
    }
  }

  if (filterSet != null && filterSet.size() != 0) {
    for (String key : filterSet.keySet()) {
      if (!newFilterSet.containsKey(key)) {
        newFilterSet.putAll(key, filterSet.get(key));
      }
    }
  }

  return newFilterSet;
}
 
Example 2
Source File: MutliMapTest.java    From fast-family-master with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
    Multimap<String,String> myNultimap = ArrayListMultimap.create();
    myNultimap.put("Fruits","Bannana");
    myNultimap.put("Fruits","Apple");
    myNultimap.put("Fruits","Pear");
    myNultimap.put("Vegetables","Carrot");

    int size = myNultimap.size();
    System.out.println(size);

    Collection<String> fruits = myNultimap.get("Fruits");
    System.out.println(fruits);

    Collection<String> fruitsCol = myNultimap.get("Vegetables");
    System.out.println(fruitsCol);

    for (String value : myNultimap.values()){
        System.out.println(value);
    }

    myNultimap.removeAll("Fruits");
    System.out.println(myNultimap.get("Fruits"));
}
 
Example 3
Source File: IndexWriter.java    From phoenix with Apache License 2.0 6 votes vote down vote up
/**
 * Convert the passed index updates to {@link HTableInterfaceReference}s.
 * @param indexUpdates from the index builder
 * @return pairs that can then be written by an {@link IndexWriter}.
 */
public static Multimap<HTableInterfaceReference, Mutation> resolveTableReferences(
    Collection<Pair<Mutation, byte[]>> indexUpdates) {
  Multimap<HTableInterfaceReference, Mutation> updates = ArrayListMultimap
      .<HTableInterfaceReference, Mutation> create();
  // simple map to make lookups easy while we build the map of tables to create
  Map<ImmutableBytesPtr, HTableInterfaceReference> tables =
      new HashMap<ImmutableBytesPtr, HTableInterfaceReference>(updates.size());
  for (Pair<Mutation, byte[]> entry : indexUpdates) {
    byte[] tableName = entry.getSecond();
    ImmutableBytesPtr ptr = new ImmutableBytesPtr(tableName);
    HTableInterfaceReference table = tables.get(ptr);
    if (table == null) {
      table = new HTableInterfaceReference(ptr);
      tables.put(ptr, table);
    }
    updates.put(table, entry.getFirst());
  }

  return updates;
}
 
Example 4
Source File: ResolvedFeatures.java    From xtext-extras with Eclipse Public License 2.0 6 votes vote down vote up
protected List<IResolvedOperation> computeAllOperations() {
	JvmType rawType = getRawType();
	if (!(rawType instanceof JvmDeclaredType)) {
		return Collections.emptyList();
	}
	Multimap<String, AbstractResolvedOperation> processedOperations = LinkedHashMultimap.create();
	for (IResolvedOperation resolvedOperation : getDeclaredOperations()) {
		processedOperations.put(resolvedOperation.getDeclaration().getSimpleName(), (AbstractResolvedOperation) resolvedOperation);
	}
	if (targetVersion.isAtLeast(JavaVersion.JAVA8)) {
		computeAllOperationsFromSortedSuperTypes((JvmDeclaredType) rawType, processedOperations);
	} else {
		Set<JvmType> processedTypes = Sets.newHashSet(rawType);
		computeAllOperationsFromSuperTypes((JvmDeclaredType) rawType, processedOperations, processedTypes);
	}
	// make sure the declared operations are the first in the list
	List<IResolvedOperation> result = new ArrayList<IResolvedOperation>(processedOperations.size());
	result.addAll(getDeclaredOperations());
	for (AbstractResolvedOperation operation : processedOperations.values()) {
		if (operation.getDeclaration().getDeclaringType() != rawType) {
			result.add(operation);
		}
	}
	return Collections.unmodifiableList(result);
}
 
Example 5
Source File: IndexWriter.java    From phoenix with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
/**
 * Convert the passed index updates to {@link HTableInterfaceReference}s.
 * @param indexUpdates from the index builder
 * @return pairs that can then be written by an {@link IndexWriter}.
 */
public static Multimap<HTableInterfaceReference, Mutation> resolveTableReferences(
    Collection<Pair<Mutation, byte[]>> indexUpdates) {
  Multimap<HTableInterfaceReference, Mutation> updates = ArrayListMultimap
      .<HTableInterfaceReference, Mutation> create();
  // simple map to make lookups easy while we build the map of tables to create
  Map<ImmutableBytesPtr, HTableInterfaceReference> tables =
      new HashMap<ImmutableBytesPtr, HTableInterfaceReference>(updates.size());
  for (Pair<Mutation, byte[]> entry : indexUpdates) {
    byte[] tableName = entry.getSecond();
    ImmutableBytesPtr ptr = new ImmutableBytesPtr(tableName);
    HTableInterfaceReference table = tables.get(ptr);
    if (table == null) {
      table = new HTableInterfaceReference(ptr);
      tables.put(ptr, table);
    }
    updates.put(table, entry.getFirst());
  }

  return updates;
}
 
Example 6
Source File: AbstractXtendInstallDebugInfoMojo.java    From xtext-xtend with Eclipse Public License 2.0 5 votes vote down vote up
protected void logStatus(String folder, Multimap<File, File> trace2class) {
	String p = xtendAsPrimaryDebugSource ? "primary" : "secondary (via SMAP)";
	int n = trace2class.size();
	getLog().info("Installing Xtend files into " + n + " class files as " + p + " debug sources in: " + folder);
	getLog().debug("xtendAsPrimaryDebugSource=" + xtendAsPrimaryDebugSource);
	getLog().debug("hideSyntheticVariables=" + hideSyntheticVariables);
}
 
Example 7
Source File: SqlStageExecution.java    From presto with Apache License 2.0 5 votes vote down vote up
public synchronized Set<RemoteTask> scheduleSplits(InternalNode node, Multimap<PlanNodeId, Split> splits, Multimap<PlanNodeId, Lifespan> noMoreSplitsNotification)
{
    requireNonNull(node, "node is null");
    requireNonNull(splits, "splits is null");

    if (stateMachine.getState().isDone()) {
        return ImmutableSet.of();
    }
    splitsScheduled.set(true);

    checkArgument(stateMachine.getFragment().getPartitionedSources().containsAll(splits.keySet()), "Invalid splits");

    ImmutableSet.Builder<RemoteTask> newTasks = ImmutableSet.builder();
    Collection<RemoteTask> tasks = this.tasks.get(node);
    RemoteTask task;
    if (tasks == null) {
        // The output buffer depends on the task id starting from 0 and being sequential, since each
        // task is assigned a private buffer based on task id.
        TaskId taskId = new TaskId(stateMachine.getStageId(), nextTaskId.getAndIncrement());
        task = scheduleTask(node, taskId, splits, OptionalInt.empty());
        newTasks.add(task);
    }
    else {
        task = tasks.iterator().next();
        task.addSplits(splits);
    }
    if (noMoreSplitsNotification.size() > 1) {
        // The assumption that `noMoreSplitsNotification.size() <= 1` currently holds.
        // If this assumption no longer holds, we should consider calling task.noMoreSplits with multiple entries in one shot.
        // These kind of methods can be expensive since they are grabbing locks and/or sending HTTP requests on change.
        throw new UnsupportedOperationException("This assumption no longer holds: noMoreSplitsNotification.size() < 1");
    }
    for (Entry<PlanNodeId, Lifespan> entry : noMoreSplitsNotification.entries()) {
        task.noMoreSplits(entry.getKey(), entry.getValue());
    }
    return newTasks.build();
}
 
Example 8
Source File: ReasonerUtils.java    From grakn with GNU Affero General Public License v3.0 5 votes vote down vote up
/**
 * NB: assumes MATCH semantics - all types and their subs are considered
 * compute the map of compatible RelationTypes for a given set of Types
 * (intersection of allowed sets of relation types for each entry type) and compatible role types
 * @param types for which the set of compatible RelationTypes is to be computed
 * @param schemaConceptConverter converter between SchemaConcept and relation type-role entries
 * @param <T> type generic
 * @return map of compatible RelationTypes and their corresponding Roles
 */
public static <T extends SchemaConcept> Multimap<RelationType, Role> compatibleRelationTypesWithRoles(Set<T> types, SchemaConceptConverter<T> schemaConceptConverter) {
    Multimap<RelationType, Role> compatibleTypes = HashMultimap.create();
    if (types.isEmpty()) return compatibleTypes;
    Iterator<T> typeIterator = types.iterator();
    compatibleTypes.putAll(schemaConceptConverter.toRelationMultimap(typeIterator.next()));

    while(typeIterator.hasNext() && compatibleTypes.size() > 1) {
        compatibleTypes = multimapIntersection(compatibleTypes, schemaConceptConverter.toRelationMultimap(typeIterator.next()));
    }
    return compatibleTypes;
}
 
Example 9
Source File: GrammaticalLabelFileTest.java    From grammaticus with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
public void testDifferenceInRenameTabs() throws Exception {
    StringBuilder diffs = new StringBuilder();
    for (HumanLanguage language : LanguageProviderFactory.get().getAll()) {
        if (!LanguageDeclensionFactory.get().getDeclension(language).hasPlural()) continue;
        LanguageDictionary dictionary = loadDictionary(language);
        Multimap<String,String> nowHasPlural = TreeMultimap.create();
        Multimap<String,String> missingPlural = TreeMultimap.create();

        for (String entity : new TreeSet<String>(dictionary.getNounsByEntity().keySet())) {
            for (Noun n : dictionary.getNounsByEntity().get(entity)) {
                if (n.getNounType() == NounType.ENTITY) continue;
                boolean hasPluralForm = false;
                for (NounForm form : n.getAllDefinedValues().keySet()) {
                    if (form.getNumber() == LanguageNumber.PLURAL && form.getCase() == LanguageCase.NOMINATIVE) {
                        hasPluralForm = true;
                        break;
                    }
                }
                if (hasPluralForm != (n.getNounType() == NounType.FIELD)) {
                    if (hasPluralForm) {
                        nowHasPlural.put(entity, n.getName());
                    } else {
                        missingPlural.put(entity, n.getName());
                    }
                }
            }
        }
        if (nowHasPlural.size() > 0) {
            diffs.append(language).append(" can rename plural fields for: ").append(nowHasPlural).append('\n');
        }
        if (missingPlural.size() > 0) {
            diffs.append(language).append(" has these plural fields removed for rename: ").append(missingPlural).append('\n');
        }
    }
    System.out.println(diffs.toString());
}
 
Example 10
Source File: EasyCCG.java    From easyccg with MIT License 5 votes vote down vote up
public static void printDetailedTiming(final Parser parser, DecimalFormat format)
{
  // Prints some statistic about how long each length sentence took to parse.
  int sentencesCovered = 0;
  Multimap<Integer, Long> sentenceLengthToParseTimeInNanos = parser.getSentenceLengthToParseTimeInNanos();
  int binNumber = 0;
  int binSize = 10;
  while (sentencesCovered < sentenceLengthToParseTimeInNanos.size()) {
    double totalTimeForBinInMillis = 0;
    int totalSentencesInBin = 0;
    for (int sentenceLength=binNumber*binSize + 1; sentenceLength < 1 + (binNumber+1) * binSize; sentenceLength=sentenceLength+1) {
      for (long time : sentenceLengthToParseTimeInNanos.get(sentenceLength)) {
        totalTimeForBinInMillis += ((double) time / 1000000);
        totalSentencesInBin++;
      }
    }
    sentencesCovered += totalSentencesInBin;
    double averageTimeInMillis = (double) totalTimeForBinInMillis / (totalSentencesInBin);
    if (totalSentencesInBin > 0) {
      System.err.println("Average time for sentences of length " + (1 + binNumber*binSize) + "-" + (binNumber+1) * binSize + " (" + totalSentencesInBin + "): " + format.format(averageTimeInMillis) + "ms");
    }
    
    binNumber++;
  }
  
  int totalSentencesTimes1000 = sentenceLengthToParseTimeInNanos.size() * 1000;
  long totalMillis = parser.getParsingTimeOnlyInMillis() + parser.getTaggingTimeOnlyInMillis();
  System.err.println("Just Parsing Time: " + parser.getParsingTimeOnlyInMillis() + "ms " + (totalSentencesTimes1000 / parser.getParsingTimeOnlyInMillis()) + " per second");
  System.err.println("Just Tagging Time: " + parser.getTaggingTimeOnlyInMillis() + "ms " + (totalSentencesTimes1000 / parser.getTaggingTimeOnlyInMillis()) + " per second");
  System.err.println("Total Time:        " + totalMillis + "ms " + (totalSentencesTimes1000 / totalMillis) + " per second");
}
 
Example 11
Source File: ApiHandler.java    From datamill with ISC License 5 votes vote down vote up
private static JSONObject toJson(Multimap<String, String> headers) {
    if (headers != null && headers.size() > 0) {
        JSONObject json = new JSONObject();
        for (Map.Entry<String, String> entry : headers.entries()) {
            json.put(entry.getKey(), entry.getValue());
        }

        return json;
    }

    return null;
}
 
Example 12
Source File: ExtendedContentIndexingColumnBasedHandler.java    From datawave with Apache License 2.0 5 votes vote down vote up
@Override
public long process(KEYIN key, RawRecordContainer event, Multimap<String,NormalizedContentInterface> eventFields,
                TaskInputOutputContext<KEYIN,? extends RawRecordContainer,KEYOUT,VALUEOUT> context, ContextWriter<KEYOUT,VALUEOUT> contextWriter)
                throws IOException, InterruptedException {
    
    // Hold some event-specific variables to avoid re-processing
    this.shardId = getShardId(event);
    
    if (tokenHelper.isVerboseShardCounters()) {
        context.getCounter("EVENT_SHARD_ID", new String(this.shardId)).increment(1);
    }
    
    this.eventDataTypeName = event.getDataType().outputName();
    this.eventUid = event.getId().toString();
    
    // write the standard set of keys
    Multimap<BulkIngestKey,Value> keys = super.processBulk(key, event, eventFields, new ContextWrappedStatusReporter(context));
    long count = keys.size();
    contextWriter.write(keys, context);
    
    StatusReporter reporter = new ContextWrappedStatusReporter(context);
    
    // gc before we get into the tokenization piece
    keys = null;
    
    // stream the tokens to the context writer here
    count += tokenizeEvent(event, context, contextWriter, reporter);
    
    // return the number of records written
    return count;
}
 
Example 13
Source File: Indexer.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public void postOpen(final ObserverContext<RegionCoprocessorEnvironment> c) {
  Multimap<HTableInterfaceReference, Mutation> updates = failedIndexEdits.getEdits(c.getEnvironment().getRegion());
  
  if (this.disabled) {
      return;
  }

  long start = EnvironmentEdgeManager.currentTimeMillis();
  try {
      //if we have no pending edits to complete, then we are done
      if (updates == null || updates.size() == 0) {
        return;
      }

      LOGGER.info("Found some outstanding index updates that didn't succeed during"
              + " WAL replay - attempting to replay now.");

      // do the usual writer stuff, killing the server again, if we can't manage to make the index
      // writes succeed again
      try {
          writer.writeAndHandleFailure(updates, true, ScanUtil.UNKNOWN_CLIENT_VERSION);
      } catch (IOException e) {
              LOGGER.error("During WAL replay of outstanding index updates, "
                      + "Exception is thrown instead of killing server during index writing", e);
      }
  } finally {
       long duration = EnvironmentEdgeManager.currentTimeMillis() - start;
       if (duration >= slowPostOpenThreshold) {
           if (LOGGER.isDebugEnabled()) {
               LOGGER.debug(getCallTooSlowMessage("postOpen", duration, slowPostOpenThreshold));
           }
           metricSource.incrementNumSlowPostOpenCalls();
       }
       metricSource.updatePostOpenTime(duration);
  }
}
 
Example 14
Source File: ContextFinder.java    From xtext-core with Eclipse Public License 2.0 5 votes vote down vote up
@Override
public Set<ISerializationContext> findByContents(EObject semanticObject, Iterable<ISerializationContext> contextCandidates) {
	if (semanticObject == null)
		throw new NullPointerException();

	initConstraints();

	Multimap<IConstraint, ISerializationContext> constraints;
	if (contextCandidates != null)
		constraints = getConstraints(semanticObject, contextCandidates);
	else
		constraints = getConstraints(semanticObject);

	if (constraints.size() < 2)
		return Sets.newLinkedHashSet(constraints.values());

	for (IConstraint cand : Lists.newArrayList(constraints.keySet()))
		if (!isValidValueQuantity(cand, semanticObject))
			constraints.removeAll(cand);

	if (constraints.size() < 2)
		return Sets.newLinkedHashSet(constraints.values());

	LinkedHashSet<ISerializationContext> result = Sets.newLinkedHashSet(constraints.values());
	for (EStructuralFeature feat : semanticObject.eClass().getEAllStructuralFeatures()) {
		if (transientValueUtil.isTransient(semanticObject, feat) != ValueTransient.NO)
			continue;
		if (feat.isMany() && ((List<?>) semanticObject.eGet(feat)).isEmpty())
			continue;
		Multimap<AbstractElement, ISerializationContext> assignments = collectAssignments(constraints, feat);
		Set<AbstractElement> assignedElements = findAssignedElements(semanticObject, feat, assignments);
		Set<ISerializationContext> keep = Sets.newHashSet();
		for (AbstractElement ele : assignedElements)
			keep.addAll(assignments.get(ele));
		result.retainAll(keep);
	}
	return result;
}
 
Example 15
Source File: ExpandCompositeTerms.java    From datawave with Apache License 2.0 4 votes vote down vote up
/**
 * Rebuilds the current 'and' node, and attempts to create the best composites from the leaf and ancestor anded nodes available. First, we descend into the
 * non-leaf nodes, and keep track of which leaf and anded nodes are used. We then attempt to create composites from the remaining leaf and anded nodes.
 * Finally, any leftover, unused leaf nodes are anded at this level, while the used leaf nodes are passed down to the descendants and anded where
 * appropriate.
 *
 * @param node
 *            An 'and' node from the original script
 * @param data
 *            ExpandData, containing ancestor anded nodes, used anded nodes, and a flag indicating whether composites were found
 * @return An expanded version of the 'and' node containing composite nodes, if found, or the original in node, if not found
 */
@Override
public Object visit(ASTAndNode node, Object data) {
    ExpandData parentData = (ExpandData) data;
    
    // only process delayed predicates
    if (QueryPropertyMarkerVisitor.instanceOfAnyExcept(node, Arrays.asList(ASTDelayedPredicate.class))) {
        return node;
    }
    
    // if we only have one child, just pass through
    // this shouldn't ever really happen, but it could
    if (node.jjtGetNumChildren() == 1)
        return super.visit(node, data);
    
    // first, find all leaf nodes
    // note: an 'and' node defining a range over a single term is considered a leaf node for our purposes
    List<JexlNode> nonLeafNodes = new ArrayList<>();
    Multimap<String,JexlNode> leafNodes = getLeafNodes(node, nonLeafNodes);
    
    // if this is a 'leaf' range node, check to see if a composite can be made
    if (leafNodes.size() == 1 && leafNodes.containsValue(node)) {
        // attempt to build a composite
        return visitLeafNode(node, parentData);
    }
    // otherwise, process the 'and' node as usual
    else {
        
        Multimap<String,JexlNode> usedLeafNodes = LinkedHashMultimap.create();
        
        // process the non-leaf nodes first
        List<JexlNode> processedNonLeafNodes = processNonLeafNodes(parentData, nonLeafNodes, leafNodes, usedLeafNodes);
        
        // remove the used nodes from the leaf and anded nodes
        leafNodes.values().removeAll(usedLeafNodes.values());
        parentData.andedNodes.values().removeAll(parentData.usedAndedNodes.values());
        
        // next, process the remaining leaf nodes
        List<JexlNode> processedLeafNodes = processUnusedLeafNodes(parentData, leafNodes, usedLeafNodes);
        
        // again, remove the used nodes from the leaf and anded nodes
        leafNodes.values().removeAll(usedLeafNodes.values());
        parentData.andedNodes.values().removeAll(parentData.usedAndedNodes.values());
        
        // rebuild the node if composites are found
        if (parentData.foundComposite) {
            List<JexlNode> processedNodes = new ArrayList<>();
            processedNodes.addAll(processedLeafNodes);
            processedNodes.addAll(processedNonLeafNodes);
            
            // rebuild the node
            JexlNode rebuiltNode = createUnwrappedAndNode(processedNodes);
            
            // distribute the used nodes into the rebuilt node
            if (!usedLeafNodes.values().isEmpty()) {
                // first we need to trim the used nodes to eliminate any wrapping nodes
                // i.e. reference, reference expression, or single child and/or nodes
                List<JexlNode> leafNodesToDistribute = usedLeafNodes.values().stream().map(this::getLeafNode).collect(Collectors.toList());
                rebuiltNode = DistributeAndedNodes.distributeAndedNode(rebuiltNode, leafNodesToDistribute, jexlNodeToCompMap);
            }
            
            return rebuiltNode;
        }
        
        return node;
    }
}
 
Example 16
Source File: EventMapper.java    From datawave with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("unchecked")
public void executeHandler(K1 key, RawRecordContainer event, Multimap<String,NormalizedContentInterface> fields, DataTypeHandler<K1> handler,
                Context context) throws Exception {
    long count = 0;
    
    TraceStopwatch handlerTimer = null;
    
    // Handler based metrics
    if (metricsEnabled) {
        
        handlerTimer = new TraceStopwatch("Time in handler");
        handlerTimer.start();
    }
    
    // In the setup we determined whether or not we were performing bulk ingest. This tells us which
    // method to call on the DataTypeHandler interface.
    Multimap<BulkIngestKey,Value> r;
    
    if (!(handler instanceof ExtendedDataTypeHandler)) {
        r = handler.processBulk(key, event, fields, new ContextWrappedStatusReporter(getContext(context)));
        if (r == null) {
            getCounter(context, IngestInput.EVENT_FATAL_ERROR).increment(1);
            getCounter(context, IngestInput.EVENT_FATAL_ERROR.name(), "NullMultiMap").increment(1);
        } else {
            contextWriter.write(r, context);
            count = r.size();
        }
    } else {
        count = ((ExtendedDataTypeHandler<K1,K2,V2>) handler).process(key, event, fields, context, contextWriter);
        if (count == -1) {
            getCounter(context, IngestInput.EVENT_FATAL_ERROR).increment(1);
            getCounter(context, IngestInput.EVENT_FATAL_ERROR.name(), "NegOneCount").increment(1);
        }
    }
    
    // Update the counters
    if (count > 0) {
        getCounter(context, IngestOutput.ROWS_CREATED.name(), handler.getClass().getSimpleName()).increment(count);
        getCounter(context, IngestOutput.ROWS_CREATED).increment(count);
    }
    
    if (handler.getMetadata() != null) {
        handler.getMetadata().addEvent(handler.getHelper(event.getDataType()), event, fields, now.get());
    }
    
    if (metricsEnabled && handlerTimer != null) {
        handlerTimer.stop();
        long handlerTime = handlerTimer.elapsed(TimeUnit.MILLISECONDS);
        
        metricsLabels.clear();
        metricsLabels.put("dataType", event.getDataType().typeName());
        metricsLabels.put("handler", handler.getClass().getName());
        metricsService.collect(Metric.MILLIS_IN_HANDLER, metricsLabels.get(), fields, handlerTime);
        
        if (contextWriter instanceof KeyValueCountingContextWriter) {
            ((KeyValueCountingContextWriter) contextWriter).writeMetrics(event, fields, handler);
        }
    }
}
 
Example 17
Source File: Destacker.java    From securify with Apache License 2.0 4 votes vote down vote up
private void handleStackMerging(Stack<Variable> localStack, int jumpsrc, int jumpdest) {
	// destination already destacked, may need to map current stack
	if (!canonicalStackForBranchJoinJumpdest.containsKey(jumpdest)) {
		throw new IllegalStateException("target jumpdest processed, but no canonical stack defined");
	}
	Stack<Variable> canonicalStack = canonicalStackForBranchJoinJumpdest.get(jumpdest);
	if (localStack.size() != canonicalStack.size()) {
		sawMergeWithDiffStackSize = true;
		// can apparently happen for shared error handling code
		/*if (controlFlowGraph.containsKey(jumpdest)) {
			// find out if all paths from `jumpdest` lead to error
			Set<Integer> reachable = ControlFlowDetector.getAllReachableBranches(controlFlowGraph, jumpdest);
			// check that it does not contain a valid exit point
			boolean allPathsError = !reachable.contains(ControlFlowDetector.DEST_EXIT);
			if (allPathsError) {
				// ignore stack mismatch
				log.println("[ignored] Branch merge: stack size mismatch: canonical @" + toHex(jumpdest) +
						" with size " + canonicalStack.size() + " vs local @" + toHex(jumpsrc) + " with size " + localStack.size());
				return;
			}
		}*/
		// so check if all paths lead to error, and if so just don't merge the stacks, since it doesn't matter anyway (?)
		log.println("Branch merge: stack size mismatch: canonical @" + toHex(jumpdest) +
				" with size " + canonicalStack.size() + " vs local @" + toHex(jumpsrc) + " with size " + localStack.size());
	}
	Multimap<Variable, Variable> mapToCanonical = HashMultimap.create();

	for (int i = 1, n = Math.min(localStack.size(), canonicalStack.size()); i <= n; ++i) {
		mapToCanonical.put(localStack.get(localStack.size() - i), canonicalStack.get(canonicalStack.size() - i));
	}
	log.println("stack merging from @" + toHex(jumpsrc) + " into @" + toHex(jumpdest));
	mapToCanonical.asMap().forEach((variable, canonicals) ->
			canonicals.forEach(canonical -> log.println(" " + canonical + " <- " + variable)));

	if (mapToCanonical.size() != mapToCanonical.values().stream().distinct().count()) {
		throw new IllegalStateException("a canonical variable is assigned multiple times");
	}

	// create re-assignemt instructions
	if (variableReassignments.containsKey(jumpsrc)) {
		throw new IllegalStateException("reassignment does already exist");
	}
	Map<Variable, Variable> reassignments = new LinkedHashMap<>();
	mapToCanonical.asMap().forEach((variable, canonicals) ->
			canonicals.stream().filter(canonical -> variable != canonical)
					.forEach(canonical -> reassignments.put(canonical, variable)));

	// create temporary variables if need have conflicting variable swaps
	Map<Variable, Variable> temporaries = new LinkedHashMap<>();
	Set<Variable> wasAssignedTo = new HashSet<>();
	// search all variables that are reassigned before they get assigned
	reassignments.forEach((canonical, local) -> {
		if (wasAssignedTo.contains(local)) {
			Variable tmpVar = new Variable();
			temporaries.put(local, tmpVar);
			log.println("swap conflict for: " + canonical + " <- " + local + "; created temp variable: " + tmpVar);
		}
		wasAssignedTo.add(canonical);
	});

	if (temporaries.size() > 0) {
		// replace locals with temporaries, if there is a temporary
		reassignments.replaceAll((canonical, local) -> temporaries.getOrDefault(local, local));
		// add assignemts to temporaries at the beginning
		Map<Variable, Variable> reassignmentsWithTemps = new LinkedHashMap<>();
		temporaries.forEach((local, canonical) -> reassignmentsWithTemps.put(canonical, local));
		reassignmentsWithTemps.putAll(reassignments);
		reassignments.clear();
		reassignments.putAll(reassignmentsWithTemps);
	}

	variableReassignments.put(jumpsrc, reassignments);
}
 
Example 18
Source File: MultimapSerializer.java    From jackson-datatypes-collections with Apache License 2.0 4 votes vote down vote up
@Override
public boolean hasSingleElement(Multimap<?,?> map) {
    return map.size() == 1;
}
 
Example 19
Source File: TSet.java    From Clusion with GNU General Public License v3.0 4 votes vote down vote up
public static void constructEMMPar(final byte[] key1, final byte[] key2, final byte[] keyENC,
		final Multimap<String, String> lookup, final Multimap<String, String> encryptedIdToRealId)
		throws InterruptedException, ExecutionException, IOException {

	// Instantiation of B buckets in the secure inverted index
	// Initialize of the free set

	// Determination of the bucketSize B
	bucketSize = lookup.size() * spaceOverhead;
	int count = 2;
	for (int j = 1; j < 1000; j++) {
		if (bucketSize > Math.pow(2, count)) {
			count = 2 * j;
		} else {
			break;
		}
	}

	bucketSize = (int) Math.pow(2, count);

	for (int i = 0; i < bucketSize; i++) {
		secureIndex.add(new ArrayList<Record>());
		free.add(new ArrayList<Integer>());
		// For each bucket initialize to S sub-buckets
		for (int j = 0; j < subBucketSize; j++) {
			// initialize all buckets with random values
			secureIndex.get(i).add(new Record(new byte[16], new byte[16]));
			free.get(i).add(j);
		}
	}

	List<String> listOfKeyword = new ArrayList<String>(lookup.keySet());
	int threads = 0;
	if (Runtime.getRuntime().availableProcessors() > listOfKeyword.size()) {
		threads = listOfKeyword.size();
	} else {
		threads = Runtime.getRuntime().availableProcessors();
	}

	ExecutorService service = Executors.newFixedThreadPool(threads);
	ArrayList<String[]> inputs = new ArrayList<String[]>(threads);

	for (int i = 0; i < threads; i++) {
		String[] tmp;
		if (i == threads - 1) {
			tmp = new String[listOfKeyword.size() / threads + listOfKeyword.size() % threads];
			for (int j = 0; j < listOfKeyword.size() / threads + listOfKeyword.size() % threads; j++) {
				tmp[j] = listOfKeyword.get((listOfKeyword.size() / threads) * i + j);
			}
		} else {
			tmp = new String[listOfKeyword.size() / threads];
			for (int j = 0; j < listOfKeyword.size() / threads; j++) {

				tmp[j] = listOfKeyword.get((listOfKeyword.size() / threads) * i + j);
			}
		}
		inputs.add(i, tmp);
	}

	List<Future<Integer>> futures = new ArrayList<Future<Integer>>();
	for (final String[] input : inputs) {
		Callable<Integer> callable = new Callable<Integer>() {
			public Integer call() throws Exception {

				int output = setup(key1, key2, keyENC, input, lookup, encryptedIdToRealId);
				return 1;
			}
		};
		futures.add(service.submit(callable));
	}

	service.shutdown();

}
 
Example 20
Source File: DestackerFallback.java    From securify with Apache License 2.0 4 votes vote down vote up
private void handleStackMerging(Stack<Variable> localStack, int jumpsrc, Instruction jumpI, int jumpdest) {
	// destination already destacked, may need to map current stack
	if (!canonicalStackForBranchJoinJumpdest.containsKey(jumpdest)) {
		throw new IllegalStateException("target jumpdest processed, but no canonical stack defined");
	}
	Stack<Variable> canonicalStack = canonicalStackForBranchJoinJumpdest.get(jumpdest);
	if (localStack.size() != canonicalStack.size()) {
		log.println("Branch merge: stack size mismatch: canonical @" + HexPrinter.toHex(jumpdest) +
				" with size " + canonicalStack.size() + " vs local @" + HexPrinter.toHex(jumpsrc) + " with size " + localStack.size());
		sawMergeWithDiffStackSize = true;
	}
	Multimap<Variable, Variable> mapToCanonical = HashMultimap.create();

	int mergeSize = Math.min(localStack.size(), canonicalStack.size());
	if (mergeSize == 0) {
		log.println("Branch merge: skipped merger for empty stack");
		return;
	}
	for (int i = 1; i <= mergeSize; ++i) {
		mapToCanonical.put(localStack.get(localStack.size() - i), canonicalStack.get(canonicalStack.size() - i));
	}
	log.println("stack merging from @" + HexPrinter.toHex(jumpsrc) + " into @" + HexPrinter.toHex(jumpdest));
	mapToCanonical.asMap().forEach((variable, canonicals) ->
			canonicals.forEach(canonical -> log.println(" " + canonical + " <- " + variable)));

	if (mapToCanonical.size() != mapToCanonical.values().stream().distinct().count()) {
		throw new IllegalStateException("a canonical variable is assigned multiple times");
	}

	boolean jumpCondition = findJumpCondition(jumpI, jumpdest);
	
	// create re-assignment instructions		
	if (variableReassignments.containsKey(new Pair<>(jumpI, jumpCondition))) {
		throw new IllegalStateException("reassignment does already exist");
	}
	Map<Variable, Variable> reassignments = new LinkedHashMap<>();
	mapToCanonical.asMap().forEach((variable, canonicals) ->
			canonicals.stream().filter(canonical -> variable != canonical)
					.forEach(canonical -> reassignments.put(canonical, variable)));

	// create temporary variables if need have conflicting variable swaps
	Map<Variable, Variable> temporaries = new LinkedHashMap<>();
	Set<Variable> wasAssignedTo = new HashSet<>();
	// search all variables that are reassigned before they get assigned
	reassignments.forEach((canonical, local) -> {
		if (wasAssignedTo.contains(local)) {
			Variable tmpVar = new Variable();
			temporaries.put(local, tmpVar);
			//if (isVirtualCanonicalVar(canonical) && isVirtualCanonicalVar(local)) {
			if (isVirtualCanonicalVar(local)) {
				virtualCanonicalVars.add(tmpVar);
			}
			log.println("swap conflict for: " + canonical + " <- " + local + "; created temp variable: " + tmpVar);
		}
		wasAssignedTo.add(canonical);
	});

	if (temporaries.size() > 0) {
		// replace locals with temporaries, if there is a temporary
		reassignments.replaceAll((canonical, local) -> temporaries.getOrDefault(local, local));
		// add assignemts to temporaries at the beginning
		Map<Variable, Variable> reassignmentsWithTemps = new LinkedHashMap<>();
		temporaries.forEach((local, canonical) -> reassignmentsWithTemps.put(canonical, local));
		reassignmentsWithTemps.putAll(reassignments);
		reassignments.clear();
		reassignments.putAll(reassignmentsWithTemps);
	}

	variableReassignments.put(new Pair<>(jumpI, jumpCondition), reassignments);
}