Java Code Examples for com.google.common.collect.Sets#newLinkedHashSetWithExpectedSize()

The following examples show how to use com.google.common.collect.Sets#newLinkedHashSetWithExpectedSize() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: InstrumentedEhCacheCacheManager.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
@Override
protected Collection<Cache> loadCaches() {
    Assert.notNull(this.cacheManager, "A backing EhCache CacheManager is required");
    Status status = this.cacheManager.getStatus();
    Assert.isTrue(Status.STATUS_ALIVE.equals(status),
            "An 'alive' EhCache CacheManager is required - current cache is " + status.toString());

    String[] names = this.cacheManager.getCacheNames();
    Collection<Cache> caches = Sets.newLinkedHashSetWithExpectedSize(names.length);
    for (String name : names) {
        if (enableMetrics) {
            caches.add(new InstrumentedEhCacheCache(this.cacheManager.getEhcache(name)));
        } else {
            caches.add(new EhCacheCache(this.cacheManager.getEhcache(name)));
        }
    }
    return caches;
}
 
Example 2
Source File: OrderedResourceDescriptionsData.java    From n4js with Eclipse Public License 1.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
protected void registerDescription(final IResourceDescription description,
		final Map<QualifiedName, Object> target) {

	for (final IEObjectDescription object : description.getExportedObjects()) {
		final QualifiedName lowerCase = object.getName().toLowerCase();
		final Object existing = target.put(lowerCase, description);
		if (existing != null && existing != description) {
			Set<IResourceDescription> set = null;
			if (existing instanceof IResourceDescription) {
				// The linked hash set is the difference comparing to the super class.
				set = Sets.newLinkedHashSetWithExpectedSize(2);
				set.add((IResourceDescription) existing);
			} else {
				set = (Set<IResourceDescription>) existing;
			}
			set.add(description);
			target.put(lowerCase, set);
		}
	}
}
 
Example 3
Source File: ResourceDescriptionsData.java    From xtext-core with Eclipse Public License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
protected void registerDescription(IResourceDescription description, Map<QualifiedName, Object> target) {
	for(IEObjectDescription object: description.getExportedObjects()) {
		QualifiedName lowerCase = object.getName().toLowerCase();
		Object existing = target.put(lowerCase, description);
		if (existing != null && existing != description) {
			Set<IResourceDescription> set = null;
			if (existing instanceof IResourceDescription) {
				set = Sets.newLinkedHashSetWithExpectedSize(2);
				set.add((IResourceDescription)existing);
			} else {
				set = (Set<IResourceDescription>) existing;
			}
			set.add(description);
			target.put(lowerCase, set);
		}
	}
}
 
Example 4
Source File: BashFunctionDefImpl.java    From BashSupport with Apache License 2.0 6 votes vote down vote up
@NotNull
@Override
public Set<String> findLocalScopeVariables() {
    if (localScopeVariables == null) {
        synchronized (stateLock) {
            if (localScopeVariables == null) {
                localScopeVariables = Sets.newLinkedHashSetWithExpectedSize(10);

                Collection<BashVarDef> varDefs = PsiTreeUtil.findChildrenOfType(this, BashVarDef.class);
                for (BashVarDef varDef : varDefs) {
                    if (varDef.isLocalVarDef() && this.isEquivalentTo(BashPsiUtils.findNextVarDefFunctionDefScope(varDef))) {
                        localScopeVariables.add(varDef.getReferenceName());
                    }
                }
            }
        }
    }

    return localScopeVariables;
}
 
Example 5
Source File: InListExpression.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Override
public void readFields(DataInput input) throws IOException {
    super.readFields(input);
    input.readBoolean(); // Unused, but left for b/w compat. TODO: remove in next major release
    fixedWidth = WritableUtils.readVInt(input);
    byte[] valuesBytes = Bytes.readByteArray(input);
    valuesByteLength = valuesBytes.length;
    int len = fixedWidth == -1 ? WritableUtils.readVInt(input) : valuesByteLength / fixedWidth;
    // TODO: consider using a regular HashSet as we never serialize from the server-side
    values = Sets.newLinkedHashSetWithExpectedSize(len);
    int offset = 0;
    int i  = 0;
    if (i < len) {
        offset = readValue(input, valuesBytes, offset, minValue = new ImmutableBytesPtr());
        while (++i < len-1) {
            offset = readValue(input, valuesBytes, offset, new ImmutableBytesPtr());
        }
        if (i < len) {
            offset = readValue(input, valuesBytes, offset, maxValue = new ImmutableBytesPtr());
        } else {
            maxValue = minValue;
        }
    } else {
        minValue = maxValue = new ImmutableBytesPtr(ByteUtil.EMPTY_BYTE_ARRAY);
    }
}
 
Example 6
Source File: IndexMaintainer.java    From phoenix with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
private IndexMaintainer(RowKeySchema dataRowKeySchema, boolean isDataTableSalted, byte[] indexTableName,
        int nIndexColumns, int nIndexPKColumns, Integer nIndexSaltBuckets, List<PColumnFamily> cfs, boolean indexWALDisabled) {
    this(dataRowKeySchema, isDataTableSalted);
    int nDataPKColumns = dataRowKeySchema.getFieldCount() - (isDataTableSalted ? 1 : 0);
    this.indexTableName = indexTableName;
    this.indexedColumns = Sets.newLinkedHashSetWithExpectedSize(nIndexPKColumns-nDataPKColumns);
    this.indexedColumnTypes = Lists.<PDataType>newArrayListWithExpectedSize(nIndexPKColumns-nDataPKColumns);
    this.indexedColumnByteSizes = Lists.<Integer>newArrayListWithExpectedSize(nIndexPKColumns-nDataPKColumns);
    this.coveredColumns = Sets.newLinkedHashSetWithExpectedSize(nIndexColumns-nIndexPKColumns);
    this.allColumns = Sets.newLinkedHashSetWithExpectedSize(nDataPKColumns + nIndexColumns);
    this.allColumns.addAll(indexedColumns);
    this.allColumns.addAll(coveredColumns);
    this.rowKeyMetaData = newRowKeyMetaData(nIndexPKColumns);
    this.nIndexSaltBuckets  = nIndexSaltBuckets == null ? 0 : nIndexSaltBuckets;
    this.dataEmptyKeyValueCF = SchemaUtil.getEmptyColumnFamily(cfs);
    this.nDataCFs = cfs.size();
    this.indexWALDisabled = indexWALDisabled;
}
 
Example 7
Source File: InListExpression.java    From phoenix with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
@Override
public void readFields(DataInput input) throws IOException {
    super.readFields(input);
    containsNull = input.readBoolean();
    fixedWidth = WritableUtils.readVInt(input);
    byte[] valuesBytes = Bytes.readByteArray(input);
    valuesByteLength = valuesBytes.length;
    int len = fixedWidth == -1 ? WritableUtils.readVInt(input) : valuesByteLength / fixedWidth;
    values = Sets.newLinkedHashSetWithExpectedSize(len);
    int offset = 0;
    int i  = 0;
    if (i < len) {
        offset = readValue(input, valuesBytes, offset, minValue = new ImmutableBytesPtr());
        while (++i < len-1) {
            offset = readValue(input, valuesBytes, offset, new ImmutableBytesPtr());
        }
        if (i < len) {
            offset = readValue(input, valuesBytes, offset, maxValue = new ImmutableBytesPtr());
        } else {
            maxValue = minValue;
        }
    } else {
        minValue = maxValue = new ImmutableBytesPtr(ByteUtil.EMPTY_BYTE_ARRAY);
    }
}
 
Example 8
Source File: HashSetCodec.java    From bazel with Apache License 2.0 5 votes vote down vote up
@Override
public LinkedHashSet<E> deserialize(DeserializationContext context, CodedInputStream codedIn)
    throws SerializationException, IOException {
  int size = codedIn.readInt32();
  LinkedHashSet<E> set = Sets.newLinkedHashSetWithExpectedSize(size);
  for (int i = 0; i < size; i++) {
    set.add(context.deserialize(codedIn));
  }
  return set;
}
 
Example 9
Source File: PackageFunction.java    From bazel with Apache License 2.0 5 votes vote down vote up
@Override
public Token runAsync(
    List<String> includes, List<String> excludes, boolean excludeDirs, boolean allowEmpty)
    throws BadGlobException, InterruptedException {
  LinkedHashSet<SkyKey> globKeys = Sets.newLinkedHashSetWithExpectedSize(includes.size());
  Map<SkyKey, String> globKeyToPatternMap = Maps.newHashMapWithExpectedSize(includes.size());

  for (String pattern : includes) {
    SkyKey globKey = getGlobKey(pattern, excludeDirs);
    globKeys.add(globKey);
    globKeyToPatternMap.put(globKey, pattern);
  }

  globDepsRequested.addAll(globKeys);

  Map<SkyKey, ValueOrException2<IOException, BuildFileNotFoundException>> globValueMap =
      env.getValuesOrThrow(globKeys, IOException.class, BuildFileNotFoundException.class);

  // For each missing glob, evaluate it asynchronously via the delegate.
  Collection<SkyKey> missingKeys = getMissingKeys(globKeys, globValueMap);
  List<String> globsToDelegate = new ArrayList<>(missingKeys.size());
  for (SkyKey missingKey : missingKeys) {
    String missingPattern = globKeyToPatternMap.get(missingKey);
    if (missingPattern != null) {
      globsToDelegate.add(missingPattern);
      globKeys.remove(missingKey);
    }
  }
  Token legacyIncludesToken =
      globsToDelegate.isEmpty()
          ? null
          : legacyGlobber.runAsync(
              globsToDelegate, ImmutableList.of(), excludeDirs, allowEmpty);
  return new HybridToken(globValueMap, globKeys, legacyIncludesToken, excludes, allowEmpty);
}
 
Example 10
Source File: InListExpression.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public void readFields(DataInput input) throws IOException {
    super.readFields(input);
    input.readBoolean(); // Unused, but left for b/w compat. TODO: remove in next major release
    fixedWidth = WritableUtils.readVInt(input);
    byte[] valuesBytes = Bytes.readByteArray(input);
    valuesByteLength = valuesBytes.length;
    int len = fixedWidth == -1 ? WritableUtils.readVInt(input) : valuesByteLength / fixedWidth;
    // TODO: consider using a regular HashSet as we never serialize from the server-side
    values = Sets.newLinkedHashSetWithExpectedSize(len);
    hashCodeSet = false;
    int offset = 0;
    int i  = 0;
    if (i < len) {
        offset = readValue(input, valuesBytes, offset, minValue = new ImmutableBytesPtr());
        while (++i < len-1) {
            offset = readValue(input, valuesBytes, offset, new ImmutableBytesPtr());
        }
        if (i < len) {
            offset = readValue(input, valuesBytes, offset, maxValue = new ImmutableBytesPtr());
        } else {
            maxValue = minValue;
        }
    } else {
        minValue = maxValue = new ImmutableBytesPtr(ByteUtil.EMPTY_BYTE_ARRAY);
    }
}
 
Example 11
Source File: Round.java    From immutables with Apache License 2.0 5 votes vote down vote up
private Set<Element> allAnnotatedElements() {
  Set<Element> elements = Sets.newLinkedHashSetWithExpectedSize(100);
  for (TypeElement annotation : annotations()) {
    Set<? extends Element> annotatedElements = round().getElementsAnnotatedWith(annotation);
    checkAnnotation(annotation, annotatedElements);
    elements.addAll(annotatedElements);
  }
  return elements;
}
 
Example 12
Source File: OrderByCompiler.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
 * Gets a list of columns in the ORDER BY clause
 * @param context the query context for tracking various states
 * associated with the given select statement
 * @param statement TODO
 * @param groupBy the list of columns in the GROUP BY clause
 * @param limit the row limit or null if no limit
 * @return the compiled ORDER BY clause
 * @throws SQLException
 */
public static OrderBy compile(StatementContext context,
                              FilterableStatement statement,
                              GroupBy groupBy, Integer limit, 
                              boolean isInRowKeyOrder) throws SQLException {
    List<OrderByNode> orderByNodes = statement.getOrderBy();
    if (orderByNodes.isEmpty()) {
        return OrderBy.EMPTY_ORDER_BY;
    }
    // accumulate columns in ORDER BY
    TrackOrderPreservingExpressionCompiler visitor = 
            new TrackOrderPreservingExpressionCompiler(context, groupBy, 
                    orderByNodes.size(), Ordering.ORDERED, null);
    LinkedHashSet<OrderByExpression> orderByExpressions = Sets.newLinkedHashSetWithExpectedSize(orderByNodes.size());
    for (OrderByNode node : orderByNodes) {
        boolean isAscending = node.isAscending();
        Expression expression = node.getNode().accept(visitor);
        if (!expression.isStateless() && visitor.addEntry(expression, isAscending ? SortOrder.ASC : SortOrder.DESC)) {
            // Detect mix of aggregate and non aggregates (i.e. ORDER BY txns, SUM(txns)
            if (!visitor.isAggregate()) {
                if (statement.isAggregate() || statement.isDistinct()) {
                    // Detect ORDER BY not in SELECT DISTINCT: SELECT DISTINCT count(*) FROM t ORDER BY x
                    if (statement.isDistinct()) {
                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.ORDER_BY_NOT_IN_SELECT_DISTINCT)
                        .setMessage(expression.toString()).build().buildException();
                    }
                    ExpressionCompiler.throwNonAggExpressionInAggException(expression.toString());
                }
            }
            if (expression.getSortOrder() == SortOrder.DESC) {
                isAscending = !isAscending;
            }
            OrderByExpression orderByExpression = new OrderByExpression(expression, node.isNullsLast(), isAscending);
            orderByExpressions.add(orderByExpression);
        }
        visitor.reset();
    }
   
    if (orderByExpressions.isEmpty()) {
        return OrderBy.EMPTY_ORDER_BY;
    }
    // If we're ordering by the order returned by the scan, we don't need an order by
    if (isInRowKeyOrder && visitor.isOrderPreserving()) {
        if (visitor.isReverse()) {
            // Don't use reverse scan if we're using a skip scan, as our skip scan doesn't support this yet.
            // REV_ROW_KEY_ORDER_BY scan would not take effect for a projected table, so don't return it for such table types.
            if (context.getConnection().getQueryServices().getProps().getBoolean(QueryServices.USE_REVERSE_SCAN_ATTRIB, QueryServicesOptions.DEFAULT_USE_REVERSE_SCAN)
                    && !context.getScanRanges().useSkipScanFilter()
                    && context.getCurrentTable().getTable().getType() != PTableType.JOIN
                    && context.getCurrentTable().getTable().getType() != PTableType.SUBQUERY) {
                return OrderBy.REV_ROW_KEY_ORDER_BY;
            }
        } else {
            return OrderBy.FWD_ROW_KEY_ORDER_BY;
        }
    }

    return new OrderBy(Lists.newArrayList(orderByExpressions.iterator()));
}
 
Example 13
Source File: OrderByCompiler.java    From phoenix with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
/**
 * Gets a list of columns in the ORDER BY clause
 * @param context the query context for tracking various states
 * associated with the given select statement
 * @param statement TODO
 * @param groupBy the list of columns in the GROUP BY clause
 * @param limit the row limit or null if no limit
 * @return the compiled ORDER BY clause
 * @throws SQLException
 */
public static OrderBy compile(StatementContext context,
                              FilterableStatement statement,
                              GroupBy groupBy, Integer limit) throws SQLException {
    List<OrderByNode> orderByNodes = statement.getOrderBy();
    if (orderByNodes.isEmpty()) {
        return OrderBy.EMPTY_ORDER_BY;
    }
    // accumulate columns in ORDER BY
    TrackOrderPreservingExpressionCompiler visitor = 
            new TrackOrderPreservingExpressionCompiler(context, groupBy, 
                    orderByNodes.size(), Ordering.ORDERED);
    LinkedHashSet<OrderByExpression> orderByExpressions = Sets.newLinkedHashSetWithExpectedSize(orderByNodes.size());
    for (OrderByNode node : orderByNodes) {
        boolean isAscending = node.isAscending();
        Expression expression = node.getNode().accept(visitor);
        if (!expression.isStateless() && visitor.addEntry(expression, isAscending ? null : ColumnModifier.SORT_DESC)) {
            // Detect mix of aggregate and non aggregates (i.e. ORDER BY txns, SUM(txns)
            if (!visitor.isAggregate()) {
                if (statement.isAggregate() || statement.isDistinct()) {
                    // Detect ORDER BY not in SELECT DISTINCT: SELECT DISTINCT count(*) FROM t ORDER BY x
                    if (statement.isDistinct()) {
                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.ORDER_BY_NOT_IN_SELECT_DISTINCT)
                        .setMessage(expression.toString()).build().buildException();
                    }
                    ExpressionCompiler.throwNonAggExpressionInAggException(expression.toString());
                }
            }
            if (expression.getDataType().isArrayType()) {
                throw new SQLExceptionInfo.Builder(SQLExceptionCode.ORDER_BY_ARRAY_NOT_SUPPORTED)
                .setMessage(expression.toString()).build().buildException();
            }
            if (expression.getColumnModifier() == ColumnModifier.SORT_DESC) {
                isAscending = !isAscending;
            }
            OrderByExpression orderByExpression = new OrderByExpression(expression, node.isNullsLast(), isAscending);
            orderByExpressions.add(orderByExpression);
        }
        visitor.reset();
    }
   
    if (orderByExpressions.isEmpty()) {
        return OrderBy.EMPTY_ORDER_BY;
    }
    // If we're ordering by the order returned by the scan, we don't need an order by
    if (visitor.isOrderPreserving()) {
        if (visitor.isReverse()) {
            if (context.getConnection().getQueryServices().supportsFeature(Feature.REVERSE_SCAN)) {
                return OrderBy.REV_ROW_KEY_ORDER_BY;
            }
        } else {
            return OrderBy.FWD_ROW_KEY_ORDER_BY;
        }
    }

    return new OrderBy(Lists.newArrayList(orderByExpressions.iterator()));
}
 
Example 14
Source File: TransactionExceptionTranslatorRegistry.java    From molgenis with GNU Lesser General Public License v3.0 4 votes vote down vote up
public TransactionExceptionTranslatorRegistry() {
  transactionExceptionTranslators = Sets.newLinkedHashSetWithExpectedSize(1);
}
 
Example 15
Source File: CollectionUtils.java    From fastjgame with Apache License 2.0 2 votes vote down vote up
/**
 * 创建足够容量的Set,可减少扩容次数,适合用在能估算最大容量的时候;
 *
 * @param expectedSize 期望添加的元素数量
 * @param <E>          the type of element
 * @return Set
 */
public static <E> LinkedHashSet<E> newLinkedHashSetWithExpectedSize(int expectedSize) {
    return Sets.newLinkedHashSetWithExpectedSize(expectedSize);
}