Java Code Examples for java.util.concurrent.ConcurrentMap#size()

The following examples show how to use java.util.concurrent.ConcurrentMap#size() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FileBasedRegistrationService.java    From hono with Eclipse Public License 2.0 6 votes vote down vote up
/**
 * Adds a device to this registry.
 *
 * @param tenantId The tenant the device belongs to.
 * @param deviceId The ID of the device to add.
 * @param device Additional data to register with the device (may be {@code null}).
 * @param span The tracing span to use.
 * @return The outcome of the operation indicating success or failure.
 */
public OperationResult<Id> processCreateDevice(final String tenantId, final Optional<String> deviceId,
        final Device device, final Span span) {

    Objects.requireNonNull(tenantId);
    final String deviceIdValue = deviceId.orElseGet(() -> generateDeviceId(tenantId));

    final ConcurrentMap<String, Versioned<Device>> devices = getDevicesForTenant(tenantId);
    if (devices.size() >= getConfig().getMaxDevicesPerTenant()) {
        TracingHelper.logError(span, "Maximum devices number limit reached for tenant");
        return Result.from(HttpURLConnection.HTTP_FORBIDDEN, OperationResult::empty);
    }

    final Versioned<Device> newDevice = new Versioned<>(device);
    if (devices.putIfAbsent(deviceIdValue, newDevice) == null) {
        dirty.set(true);
        return OperationResult.ok(HttpURLConnection.HTTP_CREATED,
                Id.of(deviceIdValue), Optional.empty(), Optional.of(newDevice.getVersion()));
    } else {
        TracingHelper.logError(span, "Device already exists for tenant");
        return Result.from(HttpURLConnection.HTTP_CONFLICT, OperationResult::empty);
    }

}
 
Example 2
Source File: RandomWeightedStrategy.java    From spring-boot-starter-micro-job with Apache License 2.0 6 votes vote down vote up
/**
 * @param jobTriggerKey 任务触发器key
 * @return
 * @throws JobException
 */
@Override
public Node select(String jobTriggerKey) throws JobException {
    // 如果该触发器不存在执行的节点,则直接返回null
    ConcurrentMap<String, Node> nodeMap = NodeLoadBalanceService.getLoadBalanceNode(jobTriggerKey);
    if (nodeMap == null || nodeMap.size() == 0) {
        return null;
    }
    nodeMap.values().stream().forEach(
            node -> {
                double lastWeight = this.nodes.size() == 0 ? 0 : this.nodes.lastKey().doubleValue();
                this.nodes.put(node.getWeight() + lastWeight, node);
            }
    );
    Double randomWeight = this.nodes.lastKey() * Math.random();
    SortedMap<Double, Node> tailMap = this.nodes.tailMap(randomWeight, false);
    return this.nodes.get(tailMap.firstKey());
}
 
Example 3
Source File: LocalFileStore.java    From joyqueue with Apache License 2.0 6 votes vote down vote up
/**
 * 以线程安全的方式clone一份深拷贝
 *
 * @return
 * @throws CloneNotSupportedException
 */
private Map<Joint, List<ConsumeBill>> cloneIndexCache(ConcurrentMap<ConsumePartition, Position> consumePositionCache) {
    Map<Joint, List<ConsumeBill>> copyOfConsumeBills = new HashMap<>(consumePositionCache.size());
    for (Map.Entry<ConsumePartition, Position> entry : consumePositionCache.entrySet()) {
        ConsumePartition consumePartition = entry.getKey();
        String topic = consumePartition.getTopic();
        String app = consumePartition.getApp();
        short partition = consumePartition.getPartition();
        int partitionGroup = consumePartition.getPartitionGroup();
        Position position = entry.getValue();
        Joint joint = new Joint(topic, app);
        List<ConsumeBill> consumeBills = copyOfConsumeBills.get(joint);
        if (consumeBills == null) {
            consumeBills = new ArrayList<>();
            copyOfConsumeBills.put(joint, consumeBills);
        }
        ConsumeBill consumeBill = new ConsumeBill(partitionGroup, partition, position);
        consumeBills.add(consumeBill);
    }
    return copyOfConsumeBills;
}
 
Example 4
Source File: RgxOpManager.java    From V8LogScanner with MIT License 6 votes vote down vote up
/**
 * obtains desired property from a single event block.
 * Method signature is suitable for RgxGrouper function
 */
public static String getEventProperty(String input, ConcurrentMap<RegExp, Pattern> eventPatterns,
                                      ConcurrentMap<RegExp, List<String>> cleanProps, ConcurrentMap<RegExp, List<String>> groupProps) {

    RegExp foundEvent = anyMatch(input, eventPatterns);
    if (foundEvent == null)
        return input;

    if (cleanProps.size() > 0)
        input = cleanEventProperty(input, foundEvent, cleanProps);

    List<String> propsRgx = groupProps.get(foundEvent);

    if (propsRgx == null)
        return input;

    List<String> receivedProps = new ArrayList<>(5);

    for (String currRgx : propsRgx) {
        Pattern pattern = Pattern.compile(currRgx, Pattern.DOTALL);
        Matcher matcher = pattern.matcher(input);
        if (matcher.find())
            receivedProps.add(matcher.group());
    }
    return String.join(",", receivedProps);
}
 
Example 5
Source File: OtpService.java    From localization_nifi with Apache License 2.0 5 votes vote down vote up
/**
 * Generates a token and stores it in the specified cache.
 *
 * @param cache                     The cache
 * @param authenticationToken       The authentication
 * @return                          The one time use token
 */
private String generateToken(final ConcurrentMap<CacheKey, String> cache, final OtpAuthenticationToken authenticationToken) {
    if (cache.size() >= MAX_CACHE_SOFT_LIMIT) {
        throw new IllegalStateException("The maximum number of single use tokens have been issued.");
    }

    // hash the authentication and build a cache key
    final CacheKey cacheKey = new CacheKey(hash(authenticationToken));

    // store the token unless the token is already stored which should not update it's original timestamp
    cache.putIfAbsent(cacheKey, authenticationToken.getName());

    // return the token
    return cacheKey.getKey();
}
 
Example 6
Source File: AbstractBfnRule.java    From light with Apache License 2.0 5 votes vote down vote up
protected void clearTagCache(String host, List<String> tags) {
    Map<String, Object> categoryMap = ServiceLocator.getInstance().getMemoryImage("categoryMap");
    ConcurrentMap<Object, Object> listCache = (ConcurrentMap<Object, Object>)categoryMap.get("listCache");
    if(listCache != null && listCache.size() > 0) {
        // clear tagList for this host
        if(tags != null && tags.size() > 0) {
            for(String tag: tags) {
                listCache.remove(host + tag);
            }
        }
    }
}
 
Example 7
Source File: ClientPool.java    From xio with Apache License 2.0 5 votes vote down vote up
public void release(Client client) {
  log.debug("recycling client {}", client);
  client.recycle();
  ConcurrentMap<Client, Meta> pool = getPool(client.remoteAddress());
  if (pool.size() < maxSizePerAddress && !pool.containsKey(client)) {
    log.debug("releasing client to pool {}", client);
    pool.put(client, new Meta(client));
  } else {
    Meta meta = pool.get(client);
    if (meta != null) {
      log.debug("setting client available in pool {}", client);
      meta.available.set(true);
    }
  }
}
 
Example 8
Source File: StandardSchemaCache.java    From grakn with GNU Affero General Public License v3.0 5 votes vote down vote up
@Override
public EntryList getSchemaRelations(long schemaId, BaseRelationType type, Direction dir) {
    Preconditions.checkArgument(IDManager.VertexIDType.Schema.is(schemaId));
    Preconditions.checkArgument((Long.MAX_VALUE >>> (SCHEMAID_TOTALFORW_SHIFT - SCHEMAID_BACK_SHIFT)) >= schemaId);

    long typePlusRelation = getIdentifier(schemaId, type, dir);
    ConcurrentMap<Long, EntryList> types = schemaRelations;
    EntryList entries;
    if (types == null) {
        entries = schemaRelationsBackup.getIfPresent(typePlusRelation);
        if (entries == null) {
            entries = retriever.retrieveSchemaRelations(schemaId, type, dir);
            if (!entries.isEmpty()) { //only cache if type exists
                schemaRelationsBackup.put(typePlusRelation, entries);
            }
        }
    } else {
        entries = types.get(typePlusRelation);
        if (entries == null) { //Retrieve it
            if (types.size() > maxCachedRelations) {
                /* Safe guard against the concurrent hash map growing to large - this would be a VERY rare event
                as it only happens for graph databases with thousands of types.
                 */
                schemaRelations = null;
                return getSchemaRelations(schemaId, type, dir);
            } else {
                //Expand map
                entries = retriever.retrieveSchemaRelations(schemaId, type, dir);
                types.put(typePlusRelation, entries);
            }
        }
    }
    return entries;
}
 
Example 9
Source File: MapCache.java    From component-runtime with Apache License 2.0 5 votes vote down vote up
public <A, B> void evictIfNeeded(final ConcurrentMap<A, B> cache, final int maxSize) {
    if (maxSize < 0) {
        cache.clear();
        return;
    }
    while (cache.size() > maxSize) {
        final Iterator<Map.Entry<A, B>> iterator = cache.entrySet().iterator();
        if (iterator.hasNext()) {
            iterator.remove();
        }
    }
}
 
Example 10
Source File: CsCaching107Provider.java    From demo_cache with Apache License 2.0 5 votes vote down vote up
public void releaseCacheManager(URI uri, ClassLoader classLoader) {
	if (uri == null || classLoader == null) {
		throw new NullPointerException("uri or classLoader should not be null");
	}

	ConcurrentMap<URI, CacheManager> cacheManagersByURI = cacheManagers.get(classLoader);
	if (cacheManagersByURI != null) {
		cacheManagersByURI.remove(uri);

		if (cacheManagersByURI.size() == 0) {
			cacheManagers.remove(classLoader);
		}
	}
}
 
Example 11
Source File: WebSocketConnectionRegistry.java    From actframework with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the connection count in this registry.
 *
 * Note it might count connections that are closed but not removed from registry yet
 *
 * @return the connection count
 */
public int count() {
    int n = 0;
    for (ConcurrentMap<?, ?> bag : registry.values()) {
        n += bag.size();
    }
    return n;
}
 
Example 12
Source File: AbstractCommentRule.java    From light with Apache License 2.0 5 votes vote down vote up
protected void clearCommentCache(String entityRid) {
    Map<String, Object> categoryMap = ServiceLocator.getInstance().getMemoryImage("categoryMap");
    ConcurrentMap<Object, Object> commentCache = (ConcurrentMap<Object, Object>)categoryMap.get("commentCache");
    if(commentCache != null && commentCache.size() > 0) {
        commentCache.remove(entityRid + "createDate" + "desc");
        commentCache.remove(entityRid + "createDate" + "asc");
        commentCache.remove(entityRid + "rank" + "desc");
    }
}
 
Example 13
Source File: YarnTestBase.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public static int getRunningContainers() {
	int count = 0;
	for (int nmId = 0; nmId < NUM_NODEMANAGERS; nmId++) {
		NodeManager nm = yarnCluster.getNodeManager(nmId);
		ConcurrentMap<ContainerId, Container> containers = nm.getNMContext().getContainers();
		count += containers.size();
	}
	return count;
}
 
Example 14
Source File: XmlTransformer.java    From lutece-core with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
/**
 * Release Transformer instance in cache. Previously (before 6.0.0) the cache stored transformers, now it stores templates.
 * 
 * @param templates
 *            The XML templates
 * @param strStyleSheetId
 *            The StyleSheet Id
 */
private void releaseTemplates( Templates templates, String strStyleSheetId )
{
    if ( TRANSFORMER_POOL_SIZE > 0 )
    {
        Templates result = null;
        ConcurrentMap<String, Templates> transformerList = null;
        int nTransformerListIndex = 0;

        do
        {
            transformerList = transformersPoolList.get( nTransformerListIndex );
            nTransformerListIndex++;

            // This set of action is not performed atomically but it can not cause problems
            if ( transformerList.size( ) < MAX_TRANSFORMER_SIZE )
            {
                result = transformerList.putIfAbsent( strStyleSheetId, templates );
            }
            else
            {
                // Aggressive release ( speed up GC )
                transformerList.clear( );

                AppLogService.info( "XmlTransformer : cache is full, you may need to increase cache size." );
            }
        }
        while ( ( result != null ) && ( nTransformerListIndex < TRANSFORMER_POOL_SIZE ) );
    }
}
 
Example 15
Source File: JCasHashMapCompareTest.java    From uima-uimaj with Apache License 2.0 4 votes vote down vote up
private int runConCur(int numberOfThreads) throws Exception {
    final ConcurrentMap<Integer, TOP> m =
        new ConcurrentHashMap<>(200, 0.75F, numberOfThreads);
    concurrentMap = m;
    
    final int numberOfWaiters = numberOfThreads*2;
    final Object[] waiters = new Object[numberOfWaiters];
    for (int i = 0; i < numberOfWaiters; i++) {
      waiters[i] = new Object();
    }
    MultiThreadUtils.Run2isb run2isb= new MultiThreadUtils.Run2isb() {
      
      public void call(int threadNumber, int repeatNumber, StringBuilder sb) {
//        int founds = 0, puts = 0;
        for (int i = 0; i < sizeOfTest*threadNumber; i++) {
          final int key = hash(i, threadNumber) / 2;
          final Object waiter = waiters[key & (numberOfWaiters - 1)];
          TOP newFs = TOP._createSearchKey(key);
          TOP fs = m.putIfAbsent(key, newFs);
//          while (fs != null && fs._isJCasHashMapReserve()) {
//            // someone else reserved this
//
//            // wait for notify
//            synchronized (waiter) {
//              fs = m.get(key);
//              if (fs._isJCasHashMapReserve()) {
//                try {
//                  waiter.wait();
//                } catch (InterruptedException e) {
//                }
//              }
//            }
//          }
//            
////          TOP fs = m.get(key);
//          if (null == fs) {
////            puts ++;
//            TOP prev = m.put(key,  TOP._createSearchKey(key));
//            if (prev._isJCasHashMapReserve()) {
//              synchronized (waiter) {
//                waiter.notifyAll();
//              }
//            }
////              puts --;  // someone beat us 
////              founds ++;
//          }
//          
        } // end of for loop
////        System.out.println("concur Puts = " + puts + ", founds = " + founds);
      }  
    };  
    long start = System.currentTimeMillis();
    MultiThreadUtils.tstMultiThread("JCasHashMapTestCompConcur",  numberOfThreads, 10, run2isb,
        new Runnable() {
          public void run() {
            m.clear();
        }});
    System.out.format("JCasCompTest - using ConcurrentHashMap, threads = %d, time = %,f seconds%n", numberOfThreads, ((double)(System.currentTimeMillis() - start)) / 1000.d);
    return m.size();
  }
 
Example 16
Source File: RollbackExecutor.java    From heisenberg with Apache License 2.0 4 votes vote down vote up
/**
 * 事务回滚
 */
public void rollback(final BlockingSession session) {
    final ServerConnection source = session.getSource();
    final ConcurrentMap<RouteResultsetNode, Channel> target = session.getTarget();
    final int initNodeCount = target.size();
    if (initNodeCount <= 0) {
        ByteBuffer buffer = source.allocate();
        source.write(source.writeToBuffer(OkPacket.OK, buffer));
        return;
    }

    // 初始化
    final ReentrantLock lock = this.lock;
    lock.lock();
    try {
        this.isFail.set(false);
        this.nodeCount = initNodeCount;
    } finally {
        lock.unlock();
    }

    if (source.isClosed()) {
        decrementCountToZero();
        return;
    }

    // 执行
    Executor exec = source.getProcessor().getExecutor();

    int started = 0;
    for (RouteResultsetNode rrn : target.keySet()) {
        final MySQLChannel mc = (MySQLChannel) target.get(rrn);
        if (mc != null) {
            mc.setRunning(true);
            exec.execute(new Runnable() {
                @Override
                public void run() {
                    _rollback(mc, session);
                }
            });
            ++started;
        }
    }

    if (started < initNodeCount) {
        decrementCountBy(initNodeCount - started);
    }
}
 
Example 17
Source File: JCasHashMapCompareTest.java    From uima-uimaj with Apache License 2.0 4 votes vote down vote up
private int runConCur(int numberOfThreads) throws Exception {
    final ConcurrentMap<Integer, FeatureStructureImpl> m = 
        new ConcurrentHashMap<Integer, FeatureStructureImpl>(200, 0.75F, numberOfThreads);
    concurrentMap = m;
    
    final int numberOfWaiters = numberOfThreads*2;
    final Object[] waiters = new Object[numberOfWaiters];
    for (int i = 0; i < numberOfWaiters; i++) {
      waiters[i] = new Object();
    }
    MultiThreadUtils.Run2isb run2isb= new MultiThreadUtils.Run2isb() {
      
      public void call(int threadNumber, int repeatNumber, StringBuilder sb) {
//        int founds = 0, puts = 0;
        for (int i = 0; i < sizeOfTest*threadNumber; i++) {
          final int key = hash(i, threadNumber) / 2;
          final Object waiter = waiters[key & (numberOfWaiters - 1)];
          FeatureStructureImpl fs = m.putIfAbsent(key, new TOP(key, JCasHashMapSubMap.RESERVE_TOP_TYPE_INSTANCE));
          while (fs != null && ((TOP)fs).jcasType == JCasHashMapSubMap.RESERVE_TOP_TYPE_INSTANCE) {
            // someone else reserved this

            // wait for notify
            synchronized (waiter) {
              fs = m.get(key);
              if (((TOP)fs).jcasType == JCasHashMapSubMap.RESERVE_TOP_TYPE_INSTANCE) {
                try {
                  waiter.wait();
                } catch (InterruptedException e) {
                }
              }
            }
          }
            
//          FeatureStructureImpl fs = m.get(key);
          if (null == fs) {
//            puts ++;
            FeatureStructureImpl prev = m.put(key,  new TOP(key, FAKE_TOP_TYPE_INSTANCE));
            if (((TOP)prev).jcasType == JCasHashMapSubMap.RESERVE_TOP_TYPE_INSTANCE) {
              synchronized (waiter) {
                waiter.notifyAll();
              }
            }
//              puts --;  // someone beat us 
//              founds ++;
          }
          
        }
//        System.out.println("concur Puts = " + puts + ", founds = " + founds);
      }
    };  
    long start = System.currentTimeMillis();
    MultiThreadUtils.tstMultiThread("JCasHashMapTestCompConcur",  numberOfThreads, 10, run2isb,
        new Runnable() {
          public void run() {
            m.clear();
        }});
    System.out.format("JCasCompTest - using ConcurrentHashMap, threads = %d, time = %,f seconds%n", numberOfThreads, (System.currentTimeMillis() - start) / 1000.f);
    return m.size();
  }
 
Example 18
Source File: ExecutionGraphSchedulingTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that all slots are being returned to the {@link SlotOwner} if the
 * {@link ExecutionGraph} is being cancelled. See FLINK-9908
 */
@Test
public void testCancellationOfIncompleteScheduling() throws Exception {
	final int parallelism = 10;

	final JobVertex jobVertex = new JobVertex("Test job vertex");
	jobVertex.setInvokableClass(NoOpInvokable.class);
	jobVertex.setParallelism(parallelism);

	final JobGraph jobGraph = new JobGraph(jobVertex);
	jobGraph.setAllowQueuedScheduling(true);
	jobGraph.setScheduleMode(ScheduleMode.EAGER);

	final TestingSlotOwner slotOwner = new TestingSlotOwner();
	final SimpleAckingTaskManagerGateway taskManagerGateway = new SimpleAckingTaskManagerGateway();

	final ConcurrentMap<SlotRequestId, Integer> slotRequestIds = new ConcurrentHashMap<>(parallelism);

	final TestingSlotProvider slotProvider = new TestingSlotProvider(
		(SlotRequestId slotRequestId) -> {
			slotRequestIds.put(slotRequestId, 1);
			// return 50/50 fulfilled and unfulfilled requests
			return slotRequestIds.size() % 2 == 0 ?
				CompletableFuture.completedFuture(
					createSingleLogicalSlot(slotOwner, taskManagerGateway, slotRequestId)) :
				new CompletableFuture<>();
		});

	final ExecutionGraph executionGraph = createExecutionGraph(jobGraph, slotProvider);

	executionGraph.start(TestingComponentMainThreadExecutorServiceAdapter.forMainThread());
	final Set<SlotRequestId> slotRequestIdsToReturn = ConcurrentHashMap.newKeySet(slotRequestIds.size());

	executionGraph.scheduleForExecution();

	slotRequestIdsToReturn.addAll(slotRequestIds.keySet());

	slotOwner.setReturnAllocatedSlotConsumer(logicalSlot -> {
		slotRequestIdsToReturn.remove(logicalSlot.getSlotRequestId());
	});

	slotProvider.setSlotCanceller(slotRequestIdsToReturn::remove);

	// make sure that we complete cancellations of deployed tasks
	taskManagerGateway.setCancelConsumer(
		(ExecutionAttemptID executionAttemptId) -> {
			final Execution execution = executionGraph.getRegisteredExecutions().get(executionAttemptId);

			// if the execution was cancelled in state SCHEDULING, then it might already have been removed
			if (execution != null) {
				execution.completeCancelling();
			}
		}
	);

	executionGraph.cancel();
	assertThat(slotRequestIdsToReturn, is(empty()));
}
 
Example 19
Source File: RetryExecuteJobRunnable.java    From spring-boot-starter-micro-job with Apache License 2.0 4 votes vote down vote up
/**
 * 任务重试
 */
@Override
public void run() {
    while (true) {
        try {
            logger.debug("Current retry queue job count:{}", JobConstants.JOB_RECOVERY_RETRY_QUEUE.size());
            // 从队列内取出一个任务
            JobNodeExecuteDetailRecord executeDetailRecord = JobConstants.JOB_RECOVERY_RETRY_QUEUE.take();
            if (executeDetailRecord != null) {
                // 绑定的节点集合
                ConcurrentMap bindNodes = JobConstants.TRIGGER_NODE_BIND.get(executeDetailRecord.getNedTriggerKey());
                // 重试次数内
                // 不存在执行节点,放入重试执行队列
                if (executeDetailRecord.getNedRetryCount() < configRetryMaxCount || bindNodes == null || bindNodes.size() == 0) {
                    JobConstants.JOB_RECOVERY_RETRY_QUEUE.put(executeDetailRecord);
                    continue;
                }
                // 重试次数内执行 & 存在触发器执行节点
                if (executeDetailRecord.getNedRetryCount() < configRetryMaxCount && bindNodes != null && bindNodes.size() > 0) {

                    // 设置重试次数
                    executeDetailRecord.setNedRetryCount(executeDetailRecord.getNedRetryCount() + 1);

                    logger.warn("Job:[{}],{} retry", executeDetailRecord.getNedId(), executeDetailRecord.getNedRetryCount());
                    // 发布远程执行任务事件
                    applicationContext.publishEvent(new JobClusterExecuteEvent(this, executeDetailRecord));
                }
                // 超过次数 & 存在执行节点
                // 更新执行信息为ERROR
                else if (executeDetailRecord.getNedRetryCount() >= configRetryMaxCount && bindNodes != null && bindNodes.size() > 0) {
                    jobNodeExecuteDetailService.updateStatus(executeDetailRecord.getNedId(), JobExecuteStatusEnum.ERROR.toString());
                }
                // 更新重试次数
                jobNodeExecuteDetailService.updateRetryCount(executeDetailRecord.getNedId(), executeDetailRecord.getNedRetryCount());
            }
        } catch (Exception e) {
            if (e instanceof InterruptedException) {
                logger.error("task from job retry queue error");
            }
            e.printStackTrace();
        }
    }
}
 
Example 20
Source File: WebSocketConnectionRegistry.java    From actframework with Apache License 2.0 2 votes vote down vote up
/**
 * Returns the connection count by key specified in this registry
 *
 * Note it might count connections that are closed but not removed from registry yet
 *
 * @param key
 *         the key
 * @return connection count by key
 */
public int count(String key) {
    ConcurrentMap<WebSocketConnection, WebSocketConnection> bag = registry.get(key);
    return null == bag ? 0 : bag.size();
}