Java Code Examples for java.util.concurrent.ConcurrentHashMap#values()

The following examples show how to use java.util.concurrent.ConcurrentHashMap#values() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DefaultStatusManager.java    From chaosblade-exec-jvm with Apache License 2.0 6 votes vote down vote up
@Override
public Map<String, List<StatusMetric>> listExps() {
    HashMap<String, List<StatusMetric>> map = new HashMap<String, List<StatusMetric>>();
    Set<Entry<String, ConcurrentHashMap<String, StatusMetric>>> entries = models.entrySet();
    for (Entry<String, ConcurrentHashMap<String, StatusMetric>> entry : entries) {
        ConcurrentHashMap<String, StatusMetric> metricMap = entry.getValue();
        String targetName = entry.getKey();
        List<StatusMetric> statusMetrics = map.get(targetName);
        if (statusMetrics == null) {
            statusMetrics = new LinkedList<StatusMetric>();
            map.put(targetName, statusMetrics);
        }
        for (StatusMetric statusMetric : metricMap.values()) {
            statusMetrics.add(statusMetric);
        }
    }
    return map;
}
 
Example 2
Source File: DefaultMessageStore.java    From rocketmq_trans_message with Apache License 2.0 6 votes vote down vote up
@Override
public int cleanUnusedTopic(Set<String> topics) {
    Iterator<Entry<String, ConcurrentHashMap<Integer, ConsumeQueue>>> it = this.consumeQueueTable.entrySet().iterator();
    while (it.hasNext()) {
        Entry<String, ConcurrentHashMap<Integer, ConsumeQueue>> next = it.next();
        String topic = next.getKey();

        if (!topics.contains(topic) && !topic.equals(ScheduleMessageService.SCHEDULE_TOPIC)) {
            ConcurrentHashMap<Integer, ConsumeQueue> queueTable = next.getValue();
            for (ConsumeQueue cq : queueTable.values()) {
                cq.destroy();
                log.info("cleanUnusedTopic: {} {} ConsumeQueue cleaned", //
                        cq.getTopic(), //
                        cq.getQueueId() //
                );

                this.commitLog.removeQueueFromTopicQueueTable(cq.getTopic(), cq.getQueueId());
            }
            it.remove();

            log.info("cleanUnusedTopic: {},topic destroyed", topic);
        }
    }

    return 0;
}
 
Example 3
Source File: RegistryRepository.java    From artemis with Apache License 2.0 6 votes vote down vote up
public Service getService(String serviceId) {
    ConcurrentHashMap<String, Lease<Instance>> serviceInstances = _leases.get(serviceId);
    if (serviceInstances == null || serviceInstances.size() == 0)
        return null;

    List<Instance> instances = new ArrayList<Instance>();
    for (Lease<Instance> lease : serviceInstances.values()) {
        instances.add(lease.data());
    }

    if (instances.size() == 0)
        return null;

    Service service = getApplicationInternal(serviceId);
    service.setInstances(instances);
    return service;
}
 
Example 4
Source File: SysstatementsTableManager.java    From herddb with Apache License 2.0 6 votes vote down vote up
@Override
protected Iterable<Record> buildVirtualRecordList() {
    ConcurrentHashMap<Long, RunningStatementInfo> runningStatements = tableSpaceManager.getDbmanager().getRunningStatements().getRunningStatements();
    List<Record> result = new ArrayList<>();
    long now = System.currentTimeMillis();
    for (RunningStatementInfo info : runningStatements.values()) {

        result.add(RecordSerializer.makeRecord(
                table,
                "id", info.getId(),
                "tablespace", info.getTablespace(),
                "query", info.getQuery(),
                "startts", new java.sql.Timestamp(info.getStartTimestamp()),
                "runningtime", (now - info.getStartTimestamp()),
                "batches", info.getNumBatches(),
                "info", info.getInfo())
        );
    }
    return result;
}
 
Example 5
Source File: DefaultMessageStore.java    From rocketmq_trans_message with Apache License 2.0 5 votes vote down vote up
private void recoverTopicQueueTable() {
    HashMap<String/* topic-queueid */, Long/* offset */> table = new HashMap<String, Long>(1024);
    long minPhyOffset = this.commitLog.getMinOffset();
    for (ConcurrentHashMap<Integer, ConsumeQueue> maps : this.consumeQueueTable.values()) {
        for (ConsumeQueue logic : maps.values()) {
            String key = logic.getTopic() + "-" + logic.getQueueId();
            table.put(key, logic.getMaxOffsetInQueue());
            logic.correctMinOffset(minPhyOffset);
        }
    }

    this.commitLog.setTopicQueueTable(table);
}
 
Example 6
Source File: DefaultMessageStore.java    From RocketMQ-Master-analyze with Apache License 2.0 5 votes vote down vote up
private void deleteExpiredFiles() {
    int deleteLogicsFilesInterval =
            DefaultMessageStore.this.getMessageStoreConfig().getDeleteConsumeQueueFilesInterval();

    long minOffset = DefaultMessageStore.this.commitLog.getMinOffset();
    if (minOffset > this.lastPhysicalMinOffset) {
        this.lastPhysicalMinOffset = minOffset;

        // 删除逻辑队列文件
        ConcurrentHashMap<String, ConcurrentHashMap<Integer, ConsumeQueue>> tables =
                DefaultMessageStore.this.consumeQueueTable;

        for (ConcurrentHashMap<Integer, ConsumeQueue> maps : tables.values()) {
            for (ConsumeQueue logic : maps.values()) {
                int deleteCount = logic.deleteExpiredFile(minOffset);

                if (deleteCount > 0 && deleteLogicsFilesInterval > 0) {
                    try {
                        Thread.sleep(deleteLogicsFilesInterval);
                    }
                    catch (InterruptedException e) {
                    }
                }
            }
        }

        // 删除索引
        DefaultMessageStore.this.indexService.deleteExpiredFile(minOffset);
    }
}
 
Example 7
Source File: Consumer.java    From spring-kafka-demo with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings({ "unchecked", "unchecked", "rawtypes" })
public static void main(String[] args) {
	ch.qos.logback.classic.Logger rootLogger = (ch.qos.logback.classic.Logger)LoggerFactory.getLogger(ch.qos.logback.classic.Logger.ROOT_LOGGER_NAME);
	rootLogger.setLevel(Level.toLevel("info"));
	
	final ClassPathXmlApplicationContext ctx = new ClassPathXmlApplicationContext(CONFIG, Consumer.class);
	ctx.start();

	final QueueChannel channel = ctx.getBean("inputFromKafka", QueueChannel.class);
	Message msg;		
	while((msg = channel.receive()) != null) {
		HashMap map = (HashMap)msg.getPayload();
		Set<Map.Entry> set = map.entrySet();
		for (Map.Entry entry : set) {
			String topic = (String)entry.getKey();
			System.out.println("Topic:" + topic);
			ConcurrentHashMap<Integer,List<byte[]>> messages = (ConcurrentHashMap<Integer,List<byte[]>>)entry.getValue();
			Collection<List<byte[]>> values = messages.values();
			for (Iterator<List<byte[]>> iterator = values.iterator(); iterator.hasNext();) {
				List<byte[]> list = iterator.next();
				for (byte[] object : list) {
					String message = new String(object);
					System.out.println("\tMessage: " + message);
				}
				
			}
		
		}
		
	}
	
	try {
		Thread.sleep(100000);
	} catch (InterruptedException e) {
		e.printStackTrace();
	}
	ctx.close();
}
 
Example 8
Source File: AwEventObserver.java    From AndroidWear-OpenWear with MIT License 5 votes vote down vote up
@Override
public void onPeerDisconnected(Node node) {
    // TODO Auto-generated method stub
    ConcurrentHashMap<String, AbsEventHandler> eventHandlers = ClientManager.getInstance().getEventHandlers();

    String displayName = node.getDisplayName();
    String nodeId = node.getId();
    for (AbsEventHandler eventHandler : eventHandlers.values()) {
        eventHandler.handlePeerDisconnected(displayName, nodeId);
    }
}
 
Example 9
Source File: DefaultMessageStore.java    From rocketmq_trans_message with Apache License 2.0 5 votes vote down vote up
private void deleteExpiredFiles() {
    int deleteLogicsFilesInterval = DefaultMessageStore.this.getMessageStoreConfig().getDeleteConsumeQueueFilesInterval();

    long minOffset = DefaultMessageStore.this.commitLog.getMinOffset();
    if (minOffset > this.lastPhysicalMinOffset) {
        this.lastPhysicalMinOffset = minOffset;

        ConcurrentHashMap<String, ConcurrentHashMap<Integer, ConsumeQueue>> tables = DefaultMessageStore.this.consumeQueueTable;

        for (ConcurrentHashMap<Integer, ConsumeQueue> maps : tables.values()) {
            for (ConsumeQueue logic : maps.values()) {
                int deleteCount = logic.deleteExpiredFile(minOffset);

                if (deleteCount > 0 && deleteLogicsFilesInterval > 0) {
                    try {
                        Thread.sleep(deleteLogicsFilesInterval);
                    } catch (InterruptedException ignored) {
                    }
                }
            }
        }
        ConsumeQueue tranRedoLog = DefaultMessageStore.this.transactionStateService.getTranRedoLog();
        if (tranRedoLog != null) {
            tranRedoLog.deleteExpiredFile(minOffset);
        }
        DefaultMessageStore.this.transactionStateService.deleteExpiredStateFile(minOffset);
        DefaultMessageStore.this.indexService.deleteExpiredFile(minOffset);
    }
}
 
Example 10
Source File: DefaultMessageStore.java    From rocketmq with Apache License 2.0 5 votes vote down vote up
/**
 * 每隔1S执行一次
 *
 * @param retryTimes 默认1次, shutdown时3次
 */
private void doFlush(int retryTimes) {
    int flushConsumeQueueLeastPages = DefaultMessageStore.this.getMessageStoreConfig().getFlushConsumeQueueLeastPages(); //最少刷新2页

    // retryTimes == RETRY_TIMES_OVER时,进行强制flush。主要用于shutdown时。
    if (retryTimes == RETRY_TIMES_OVER) {
        flushConsumeQueueLeastPages = 0;
    }
    long logicsMsgTimestamp = 0;
    int flushConsumeQueueThoroughInterval = DefaultMessageStore.this.getMessageStoreConfig().getFlushConsumeQueueThoroughInterval();  //60
    long currentTimeMillis = System.currentTimeMillis();
    // 当距离上次flush的间隔 >= 60S时,即使写入的数量不足flushConsumeQueueLeastPages,也进行flush
    if (currentTimeMillis >= (this.lastFlushTimestamp + flushConsumeQueueThoroughInterval)) {
        this.lastFlushTimestamp = currentTimeMillis;
        flushConsumeQueueLeastPages = 0;
        logicsMsgTimestamp = DefaultMessageStore.this.getStoreCheckpoint().getLogicsMsgTimestamp();
    }
    // flush消费队列,默认情况下仅进行一次flush
    ConcurrentHashMap<String, ConcurrentHashMap<Integer, ConsumeQueue>> tables = DefaultMessageStore.this.consumeQueueTable;
    for (ConcurrentHashMap<Integer, ConsumeQueue> maps : tables.values()) {
        for (ConsumeQueue cq : maps.values()) {
            boolean result = false;
            for (int i = 0; i < retryTimes && !result; i++) {
                result = cq.flush(flushConsumeQueueLeastPages);
            }
        }
    }
    // flush 存储 check point
    if (0 == flushConsumeQueueLeastPages) {
        if (logicsMsgTimestamp > 0) {
            DefaultMessageStore.this.getStoreCheckpoint().setLogicsMsgTimestamp(logicsMsgTimestamp);
        }
        DefaultMessageStore.this.getStoreCheckpoint().flush();
    }
}
 
Example 11
Source File: ConcurrentHashMapTest.java    From openjdk-jdk9 with GNU General Public License v2.0 5 votes vote down vote up
/**
 * values collection contains all values
 */
public void testValues() {
    ConcurrentHashMap map = map5();
    Collection s = map.values();
    assertEquals(5, s.size());
    assertTrue(s.contains("A"));
    assertTrue(s.contains("B"));
    assertTrue(s.contains("C"));
    assertTrue(s.contains("D"));
    assertTrue(s.contains("E"));
}
 
Example 12
Source File: DefaultMessageStore.java    From rocketmq with Apache License 2.0 5 votes vote down vote up
public void destroyLogics() {
    for (ConcurrentHashMap<Integer, ConsumeQueue> maps : this.consumeQueueTable.values()) {
        for (ConsumeQueue logic : maps.values()) {
            logic.destroy();
        }
    }
}
 
Example 13
Source File: DefaultMessageStore.java    From reading-and-annotate-rocketmq-3.4.6 with GNU General Public License v3.0 5 votes vote down vote up
private void doFlush(int retryTimes) {

            int flushConsumeQueueLeastPages = DefaultMessageStore.this.getMessageStoreConfig().getFlushConsumeQueueLeastPages();

            if (retryTimes == RetryTimesOver) {
                flushConsumeQueueLeastPages = 0;
            }

            long logicsMsgTimestamp = 0;

            int flushConsumeQueueThoroughInterval = DefaultMessageStore.this.getMessageStoreConfig().getFlushConsumeQueueThoroughInterval();
            long currentTimeMillis = System.currentTimeMillis();
            if (currentTimeMillis >= (this.lastFlushTimestamp + flushConsumeQueueThoroughInterval)) {
                this.lastFlushTimestamp = currentTimeMillis;
                flushConsumeQueueLeastPages = 0;
                logicsMsgTimestamp = DefaultMessageStore.this.getStoreCheckpoint().getLogicsMsgTimestamp();
            }

            ConcurrentHashMap<String, ConcurrentHashMap<Integer, ConsumeQueue>> tables = DefaultMessageStore.this.consumeQueueTable;

            for (ConcurrentHashMap<Integer, ConsumeQueue> maps : tables.values()) {
                for (ConsumeQueue cq : maps.values()) {
                    boolean result = false;
                    for (int i = 0; i < retryTimes && !result; i++) {
                        result = cq.commit(flushConsumeQueueLeastPages);
                    }
                }
            }

            if (0 == flushConsumeQueueLeastPages) {
                if (logicsMsgTimestamp > 0) {
                    DefaultMessageStore.this.getStoreCheckpoint().setLogicsMsgTimestamp(logicsMsgTimestamp);
                }
                DefaultMessageStore.this.getStoreCheckpoint().flush();
            }
        }
 
Example 14
Source File: DefaultMessageStore.java    From rocketmq with Apache License 2.0 5 votes vote down vote up
private void recoverTopicQueueTable() {
    HashMap<String/* topic-queueid */, Long/* offset */> table = new HashMap<String, Long>(1024);
    long minPhyOffset = this.commitLog.getMinOffset();
    for (ConcurrentHashMap<Integer, ConsumeQueue> maps : this.consumeQueueTable.values()) {
        for (ConsumeQueue logic : maps.values()) {
            String key = logic.getTopic() + "-" + logic.getQueueId();
            table.put(key, logic.getMaxOffsetInQuque());
            logic.correctMinOffset(minPhyOffset);
        }
    }

    this.commitLog.setTopicQueueTable(table);
}
 
Example 15
Source File: DefaultMessageStore.java    From reading-and-annotate-rocketmq-3.4.6 with GNU General Public License v3.0 5 votes vote down vote up
public void truncateDirtyLogicFiles(long phyOffset) {
    ConcurrentHashMap<String, ConcurrentHashMap<Integer, ConsumeQueue>> tables = DefaultMessageStore.this.consumeQueueTable;

    for (ConcurrentHashMap<Integer, ConsumeQueue> maps : tables.values()) {
        for (ConsumeQueue logic : maps.values()) {
            logic.truncateDirtyLogicFiles(phyOffset);
        }
    }
}
 
Example 16
Source File: DefaultMessageStore.java    From RocketMQ-Master-analyze with Apache License 2.0 5 votes vote down vote up
public void truncateDirtyLogicFiles(long phyOffset) {
    ConcurrentHashMap<String, ConcurrentHashMap<Integer, ConsumeQueue>> tables =
            DefaultMessageStore.this.consumeQueueTable;

    for (ConcurrentHashMap<Integer, ConsumeQueue> maps : tables.values()) {
        for (ConsumeQueue logic : maps.values()) {
            logic.truncateDirtyLogicFiles(phyOffset);
        }
    }
}
 
Example 17
Source File: DupPubRelMessageStoreServiceImpl.java    From netty-learning-example with Apache License 2.0 5 votes vote down vote up
@Override
public List<DupPubRelMessageStore> get(String clientId) {
    if (grozaDupPubRelMessageCache.containsKey(clientId)){
        ConcurrentHashMap<Integer, DupPubRelMessageStore> map = grozaDupPubRelMessageCache.get(clientId);
        Collection<DupPubRelMessageStore> collection = map.values();
        return new ArrayList<>(collection);
    }
    return new ArrayList<>();
}
 
Example 18
Source File: DuwearEventObserver.java    From AndroidWear-OpenWear with MIT License 5 votes vote down vote up
@Override
public void onPeerConnected(Node node) {
    // TODO Auto-generated method stub

    ConcurrentHashMap<String, AbsEventHandler> eventHandlers = ClientManager.getInstance().getEventHandlers();

    String displayName = node.getDisplayName();
    String nodeId = node.getId();
    for (AbsEventHandler eventHandler : eventHandlers.values()) {
        eventHandler.handlePeerConnected(displayName, nodeId);
    }
}
 
Example 19
Source File: ScenarioTestListener.java    From JGiven with Apache License 2.0 5 votes vote down vote up
@Override
public void onFinish( ITestContext paramITestContext ) {
    ConcurrentHashMap<String, ReportModel> reportModels = getReportModels(paramITestContext);
    for( ReportModel reportModel : reportModels.values() ) {
        new CommonReportHelper().finishReport( reportModel );
    }
}
 
Example 20
Source File: DefaultStatusManager.java    From chaosblade-exec-jvm with Apache License 2.0 5 votes vote down vote up
@Override
public List<StatusMetric> getExpByTarget(String targetName) {
    ConcurrentHashMap<String, StatusMetric> metricMap = models.get(targetName);
    if (metricMap == null) {
        return Collections.emptyList();
    }
    LinkedList<StatusMetric> statusMetrics = new LinkedList<StatusMetric>();
    for (StatusMetric statusMetric : metricMap.values()) {
        statusMetrics.add(statusMetric);
    }
    return statusMetrics;
}