Java Code Examples for java.util.concurrent.ConcurrentMap#Entry

The following examples show how to use java.util.concurrent.ConcurrentMap#Entry . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SessionCleanTask.java    From easy-httpserver with Apache License 2.0 6 votes vote down vote up
@Override
public void run() {
	log.info("清理session......");
	ConcurrentMap<String, HttpSession> sessionMap = ApplicationContext.getApplicationContext()
			.getAllSession();
	
	Iterator<Map.Entry<String, HttpSession>> it = sessionMap.entrySet().iterator();
	while (it.hasNext()) {
		ConcurrentMap.Entry<String, HttpSession> entry= (Entry<String, HttpSession>) it.next();
		HttpSession httpSession= entry.getValue();
		
		Date nowDate = new Date();
		int diff = (int) ((nowDate.getTime() - httpSession.getLastVisitTime().getTime())/1000/60);
		
		if (diff > Integer.parseInt(Constants.OTHER_CONFIG_INFO.get(Constants.SESSION_TIMEOUT))) {
			it.remove();
		}
	}

	log.info("清理session结束");
}
 
Example 2
Source File: StatsManager.java    From connector-sdk with Apache License 2.0 5 votes vote down vote up
private static synchronized void resetStatsManager() {
  // Not clearing out or reinitializing stats map here. There may have been static
  // references initialized for OperationStats. We are just clearing out values recorded under
  // OperationStats.
  for (ConcurrentMap.Entry<String, OperationStats> entry : getInstance().stats.entrySet()) {
    entry.getValue().clear();
  }
}
 
Example 3
Source File: AppConfigService.java    From haven-platform with Apache License 2.0 5 votes vote down vote up
/**
 */
public void write(String mimeType, OutputStream os) throws IOException {
    Assert.hasText(mimeType, "MimeType string is null or empty.");
    Assert.notNull(os, "OutputStream is null or empty.");
    MimeType mimeTypeObj = MimeTypeUtils.parseMimeType(mimeType);
    if(MimeTypeUtils.APPLICATION_JSON.equals(mimeTypeObj)) {
        Assert.hasText(mimeType, "MimeType '" + mimeType + "' is not supported.");
    }
    AppConfigObject aco = new AppConfigObject();
    aco.setDate(LocalDateTime.now());
    aco.setVersion(VERSION);
    Map<String, Object> map = new HashMap<>();
    aco.setData(map);
    ConfigWriteContext ctx = ConfigWriteContext.builder()
      .mimeType(mimeTypeObj)
      .build();
    for(ConcurrentMap.Entry<String, ReConfigurableAdapter> cae : adapters.entrySet()) {
        ReConfigurableAdapter ca = cae.getValue();
        Object o = ca.getConfig(ctx);
        if(o == null) {
            continue;
        }
        String name = cae.getKey();
        map.put(name, o);
    }
    objectMapper.writeValue(os, aco);
}
 
Example 4
Source File: SubscriptionManager.java    From HAP-Java with MIT License 5 votes vote down vote up
public synchronized void completeUpdateBatch() {
  if (--this.nestedBatches == 0 && !pendingNotifications.isEmpty()) {
    LOGGER.trace("Publishing batched changes");
    for (ConcurrentMap.Entry<HomekitClientConnection, ArrayList<PendingNotification>> entry :
        pendingNotifications.entrySet()) {
      try {
        HttpResponse message = new EventController().getMessage(entry.getValue());
        entry.getKey().outOfBand(message);
      } catch (Exception e) {
        LOGGER.warn("Failed to create new event message", e);
      }
    }
    pendingNotifications.clear();
  }
}
 
Example 5
Source File: SiddhiBufferedEventsMetric.java    From siddhi with Apache License 2.0 5 votes vote down vote up
@Override
public void enableEventBufferHolderMetrics() {
    for (ConcurrentMap.Entry<Object, ObjectMetric> entry :
            registeredObjects.entrySet()) {
        if (!metricRegistry.getNames().contains(entry.getValue().getName())) {
            metricRegistry.register(entry.getValue().getName(), entry.getValue().getGauge());
        }
    }
}
 
Example 6
Source File: SiddhiBufferedEventsMetric.java    From siddhi with Apache License 2.0 5 votes vote down vote up
@Override
public void disableEventBufferHolderMetrics() {
    for (ConcurrentMap.Entry<Object, ObjectMetric> entry :
            registeredObjects.entrySet()) {
        metricRegistry.remove(entry.getValue().getName());
    }
}
 
Example 7
Source File: SiddhiMemoryUsageMetric.java    From siddhi with Apache License 2.0 5 votes vote down vote up
@Override
public void enableMemoryUsageMetrics() {
    for (ConcurrentMap.Entry<Object, ObjectMetric> entry :
            registeredObjects.entrySet()) {
        if (!metricRegistry.getNames().contains(entry.getValue().getName())) {
            metricRegistry.register(entry.getValue().getName(), entry.getValue().getGauge());
        }
    }
}
 
Example 8
Source File: DefaultSpeculator.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private int maybeScheduleASpeculation(TaskType type) {
  int successes = 0;

  long now = clock.getTime();

  ConcurrentMap<JobId, AtomicInteger> containerNeeds
      = type == TaskType.MAP ? mapContainerNeeds : reduceContainerNeeds;

  for (ConcurrentMap.Entry<JobId, AtomicInteger> jobEntry : containerNeeds.entrySet()) {
    // This race conditon is okay.  If we skip a speculation attempt we
    //  should have tried because the event that lowers the number of
    //  containers needed to zero hasn't come through, it will next time.
    // Also, if we miss the fact that the number of containers needed was
    //  zero but increased due to a failure it's not too bad to launch one
    //  container prematurely.
    if (jobEntry.getValue().get() > 0) {
      continue;
    }

    int numberSpeculationsAlready = 0;
    int numberRunningTasks = 0;

    // loop through the tasks of the kind
    Job job = context.getJob(jobEntry.getKey());

    Map<TaskId, Task> tasks = job.getTasks(type);

    int numberAllowedSpeculativeTasks
        = (int) Math.max(minimumAllowedSpeculativeTasks,
            proportionTotalTasksSpeculatable * tasks.size());

    TaskId bestTaskID = null;
    long bestSpeculationValue = -1L;

    // this loop is potentially pricey.
    // TODO track the tasks that are potentially worth looking at
    for (Map.Entry<TaskId, Task> taskEntry : tasks.entrySet()) {
      long mySpeculationValue = speculationValue(taskEntry.getKey(), now);

      if (mySpeculationValue == ALREADY_SPECULATING) {
        ++numberSpeculationsAlready;
      }

      if (mySpeculationValue != NOT_RUNNING) {
        ++numberRunningTasks;
      }

      if (mySpeculationValue > bestSpeculationValue) {
        bestTaskID = taskEntry.getKey();
        bestSpeculationValue = mySpeculationValue;
      }
    }
    numberAllowedSpeculativeTasks
        = (int) Math.max(numberAllowedSpeculativeTasks,
            proportionRunningTasksSpeculatable * numberRunningTasks);

    // If we found a speculation target, fire it off
    if (bestTaskID != null
        && numberAllowedSpeculativeTasks > numberSpeculationsAlready) {
      addSpeculativeAttempt(bestTaskID);
      ++successes;
    }
  }

  return successes;
}
 
Example 9
Source File: DefaultSpeculator.java    From big-c with Apache License 2.0 4 votes vote down vote up
private int maybeScheduleASpeculation(TaskType type) {
  int successes = 0;

  long now = clock.getTime();

  ConcurrentMap<JobId, AtomicInteger> containerNeeds
      = type == TaskType.MAP ? mapContainerNeeds : reduceContainerNeeds;

  for (ConcurrentMap.Entry<JobId, AtomicInteger> jobEntry : containerNeeds.entrySet()) {
    // This race conditon is okay.  If we skip a speculation attempt we
    //  should have tried because the event that lowers the number of
    //  containers needed to zero hasn't come through, it will next time.
    // Also, if we miss the fact that the number of containers needed was
    //  zero but increased due to a failure it's not too bad to launch one
    //  container prematurely.
    if (jobEntry.getValue().get() > 0) {
      continue;
    }

    int numberSpeculationsAlready = 0;
    int numberRunningTasks = 0;

    // loop through the tasks of the kind
    Job job = context.getJob(jobEntry.getKey());

    Map<TaskId, Task> tasks = job.getTasks(type);

    int numberAllowedSpeculativeTasks
        = (int) Math.max(minimumAllowedSpeculativeTasks,
            proportionTotalTasksSpeculatable * tasks.size());

    TaskId bestTaskID = null;
    long bestSpeculationValue = -1L;

    // this loop is potentially pricey.
    // TODO track the tasks that are potentially worth looking at
    for (Map.Entry<TaskId, Task> taskEntry : tasks.entrySet()) {
      long mySpeculationValue = speculationValue(taskEntry.getKey(), now);

      if (mySpeculationValue == ALREADY_SPECULATING) {
        ++numberSpeculationsAlready;
      }

      if (mySpeculationValue != NOT_RUNNING) {
        ++numberRunningTasks;
      }

      if (mySpeculationValue > bestSpeculationValue) {
        bestTaskID = taskEntry.getKey();
        bestSpeculationValue = mySpeculationValue;
      }
    }
    numberAllowedSpeculativeTasks
        = (int) Math.max(numberAllowedSpeculativeTasks,
            proportionRunningTasksSpeculatable * numberRunningTasks);

    // If we found a speculation target, fire it off
    if (bestTaskID != null
        && numberAllowedSpeculativeTasks > numberSpeculationsAlready) {
      addSpeculativeAttempt(bestTaskID);
      ++successes;
    }
  }

  return successes;
}
 
Example 10
Source File: SiddhiMemoryUsageMetric.java    From siddhi with Apache License 2.0 4 votes vote down vote up
@Override
public void disableMemoryUsageMetrics() {
    for (ConcurrentMap.Entry<Object, ObjectMetric> entry : registeredObjects.entrySet()) {
        metricRegistry.remove(entry.getValue().getName());
    }
}
 
Example 11
Source File: TopologyMetricContext.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public TopologyMetric mergeMetrics() {
    long start = System.currentTimeMillis();

    if (getMemCache().size() == 0) {
        //LOG.info("topology:{}, metric size is 0, skip...", topologyId);
        return null;
    }
    if (isMerging()) {
        LOG.info("topology {} is already merging, skip...", topologyId);
        return null;
    }

    setMerging(true);

    try {
        Map<String, MetricInfo> workerMetricMap = this.memCache;
        // reset mem cache
        this.memCache = new ConcurrentHashMap<>();

        MetricInfo topologyMetrics = MetricUtils.mkMetricInfo();
        MetricInfo componentMetrics = MetricUtils.mkMetricInfo();
        MetricInfo compStreamMetrics = MetricUtils.mkMetricInfo();
        MetricInfo taskMetrics = MetricUtils.mkMetricInfo();
        MetricInfo streamMetrics = MetricUtils.mkMetricInfo();
        MetricInfo workerMetrics = MetricUtils.mkMetricInfo();
        MetricInfo nettyMetrics = MetricUtils.mkMetricInfo();
        TopologyMetric tpMetric = new TopologyMetric(
                topologyMetrics, componentMetrics, workerMetrics, taskMetrics, streamMetrics, nettyMetrics);
        tpMetric.set_compStreamMetric(compStreamMetrics);


        // metric name => worker count
        Map<String, Integer> histogramMetricNameCounters = new HashMap<>();

        // special for histograms & timers, we merge the points to get a new snapshot data.
        Map<String, Map<Integer, Histogram>> histograms = new HashMap<>();

        // iterate metrics of all workers within the same topology
        for (ConcurrentMap.Entry<String, MetricInfo> metricEntry : workerMetricMap.entrySet()) {
            MetricInfo metricInfo = metricEntry.getValue();

            // merge counters: add old and new values, note we only add incoming new metrics and overwrite
            // existing data, same for all below.
            Map<String, Map<Integer, MetricSnapshot>> metrics = metricInfo.get_metrics();
            for (Map.Entry<String, Map<Integer, MetricSnapshot>> metric : metrics.entrySet()) {
                String metricName = metric.getKey();
                Map<Integer, MetricSnapshot> data = metric.getValue();
                MetaType metaType = MetricUtils.metaType(metricName);

                MetricType metricType = MetricUtils.metricType(metricName);
                if (metricType == MetricType.COUNTER) {
                    mergeCounters(tpMetric, metaType, metricName, data);
                } else if (metricType == MetricType.GAUGE) {
                    mergeGauges(tpMetric, metaType, metricName, data);
                } else if (metricType == MetricType.METER) {
                    mergeMeters(getMetricInfoByType(tpMetric, metaType), metricName, data);
                } else if (metricType == MetricType.HISTOGRAM) {
                    mergeHistograms(getMetricInfoByType(tpMetric, metaType),
                            metricName, data, histogramMetricNameCounters, histograms);
                }
            }
        }
        adjustHistogramTimerMetrics(tpMetric, histogramMetricNameCounters, histograms);
        // for counters, we only report delta data every time, need to sum with old data
        //adjustCounterMetrics(tpMetric, oldTpMetric);

        LOG.info("merge topology metrics:{}, cost:{}", topologyId, System.currentTimeMillis() - start);
        LOG.debug("tp:{}, comp:{}, comp_stream:{}, task:{}, stream:{}, worker:{}, netty:{}",
                topologyMetrics.get_metrics_size(), componentMetrics.get_metrics_size(),
                compStreamMetrics.get_metrics_size(), taskMetrics.get_metrics_size(),
                streamMetrics.get_metrics_size(), workerMetrics.get_metrics_size(),
                nettyMetrics.get_metrics_size());
        return tpMetric;
    } finally {
        setMerging(false);
    }
}