org.rrd4j.core.RrdDef Java Examples

The following examples show how to use org.rrd4j.core.RrdDef. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: MetricsHistoryHandler.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private RrdDef createDef(String registry, Group group) {
  registry = SolrMetricManager.enforcePrefix(registry);

  // base sampling period is collectPeriod - samples more frequent than
  // that will be dropped, samples less frequent will be interpolated
  RrdDef def = new RrdDef(URI_PREFIX + registry, collectPeriod);
  // set the start time early enough so that the first sample is always later
  // than the start of the archive
  def.setStartTime(TimeUnit.SECONDS.convert(timeSource.getEpochTimeNs(), TimeUnit.NANOSECONDS) - def.getStep());

  // add datasources
  List<Group> groups = new ArrayList<>();
  groups.add(group);
  if (group == Group.collection) {
    groups.add(Group.core);
  }
  for (Group g : groups) {
    // use NaN when more than 1 sample is missing
    counters.get(g.toString()).forEach(name ->
        def.addDatasource(name, DsType.COUNTER, collectPeriod * 2, Double.NaN, Double.NaN));
    gauges.get(g.toString()).forEach(name ->
        def.addDatasource(name, DsType.GAUGE, collectPeriod * 2, Double.NaN, Double.NaN));
  }
  if (groups.contains(Group.node)) {
    // add nomNodes gauge
    def.addDatasource(NUM_NODES_KEY, DsType.GAUGE, collectPeriod * 2, Double.NaN, Double.NaN);
  }

  // add archives

  // use AVERAGE consolidation,
  // use NaN when >50% samples are missing
  def.addArchive(ConsolFun.AVERAGE, 0.5, 1, 240); // 4 hours
  def.addArchive(ConsolFun.AVERAGE, 0.5, 10, 288); // 48 hours
  def.addArchive(ConsolFun.AVERAGE, 0.5, 60, 336); // 2 weeks
  def.addArchive(ConsolFun.AVERAGE, 0.5, 240, 180); // 2 months
  def.addArchive(ConsolFun.AVERAGE, 0.5, 1440, 365); // 1 year
  return def;
}
 
Example #2
Source File: MetricsHistoryHandler.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private RrdDb getOrCreateDb(String registry, Group group) {
  RrdDb db = knownDbs.computeIfAbsent(registry, r -> {
    RrdDef def = createDef(r, group);
    try {
      RrdDb newDb = new RrdDb(def, factory);
      return newDb;
    } catch (IOException e) {
      log.warn("Can't create RrdDb for registry {}, group {}: {}", registry, group, e);
      return null;
    }
  });
  return db;
}
 
Example #3
Source File: SolrRrdBackendFactoryTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private RrdDef createDef(long startTime) {
  RrdDef def = new RrdDef("solr:foo", 60);
  def.setStartTime(startTime);
  def.addDatasource("one", DsType.COUNTER, 120, Double.NaN, Double.NaN);
  def.addDatasource("two", DsType.GAUGE, 120, Double.NaN, Double.NaN);
  def.addArchive(ConsolFun.AVERAGE, 0.5, 1, 120); // 2 hours
  def.addArchive(ConsolFun.AVERAGE, 0.5, 10, 288); // 48 hours
  def.addArchive(ConsolFun.AVERAGE, 0.5, 60, 336); // 2 weeks
  def.addArchive(ConsolFun.AVERAGE, 0.5, 240, 180); // 2 months
  return def;
}
 
Example #4
Source File: RrdGraphController.java    From plow with Apache License 2.0 5 votes vote down vote up
private void createThreadRrd() {

        logger.info("Initialzing Thread RRD data at: {}", rrdPath(THREAD_RRD));

        RrdDef rrdDef = new RrdDef(rrdPath(THREAD_RRD), 60);
        rrdDef.addArchive(AVERAGE, 0.5, 1, 1440);
        rrdDef.addArchive(AVERAGE, 0.5, 5, 288);
        rrdDef.addArchive(MAX, 0.5, 1, 1440);
        rrdDef.addArchive(MAX, 0.5, 5, 288);

        rrdDef.addDatasource("nodeThreads", COUNTER, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("nodeActiveThreads", GAUGE, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("nodeExecuted", COUNTER, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("nodeWaiting", GAUGE, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("nodeCapacity", GAUGE, 600, Double.NaN, Double.NaN);

        rrdDef.addDatasource("procThreads", COUNTER, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("procActiveThreads", GAUGE, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("procExecuted", COUNTER, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("procWaiting", GAUGE, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("procCapacity", GAUGE, 600, Double.NaN, Double.NaN);

        rrdDef.addDatasource("asyncThreads", COUNTER, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("asyncActiveThreads", GAUGE, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("asyncExecuted", COUNTER, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("asyncWaiting", GAUGE, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("asyncCapacity", GAUGE, 600, Double.NaN, Double.NaN);

        saveRrdDef(rrdDef);
    }
 
Example #5
Source File: RrdGraphController.java    From plow with Apache License 2.0 5 votes vote down vote up
private void createPlowRrd() {

        logger.info("Initialzing Plow RRD data at: {}", rrdPath(PLOW_RRD));

        RrdDef rrdDef = new RrdDef(rrdPath(PLOW_RRD), 60);
        rrdDef.addArchive(AVERAGE, 0.5, 1, 1440);
        rrdDef.addArchive(AVERAGE, 0.5, 5, 288);
        rrdDef.addArchive(MAX, 0.5, 1, 1440);
        rrdDef.addArchive(MAX, 0.5, 5, 288);

        rrdDef.addDatasource("nodeDispatchHit", COUNTER, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("nodeDispatchMiss", COUNTER, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("nodeDispatchFail", COUNTER, 600, Double.NaN, Double.NaN);

        rrdDef.addDatasource("procDispatchHit", COUNTER, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("procDispatchMiss", COUNTER, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("procDispatchFail", COUNTER, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("procAllocCount", COUNTER, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("procUnallocCount", COUNTER, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("procAllocFailCount", COUNTER, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("procUnallocFailCount", COUNTER, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("procOrphan", COUNTER, 600, Double.NaN, Double.NaN);

        rrdDef.addDatasource("taskStartedCount", COUNTER, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("taskStartedFailCount", COUNTER, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("taskStoppedCount", COUNTER, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("taskStoppedFailCount", COUNTER, 600, Double.NaN, Double.NaN);

        rrdDef.addDatasource("jobLaunchCount", COUNTER, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("jobLaunchFailCount", COUNTER, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("jobFinishCount", COUNTER, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("jobKillCount", COUNTER, 600, Double.NaN, Double.NaN);

        rrdDef.addDatasource("rndPingCount", COUNTER, 600, Double.NaN, Double.NaN);
        rrdDef.addDatasource("rndTaskComplete", COUNTER, 600, Double.NaN, Double.NaN);

        saveRrdDef(rrdDef);
    }
 
Example #6
Source File: DefaultMetricsDatabase.java    From onos with Apache License 2.0 5 votes vote down vote up
@Override
public MetricsDatabase build() {
    checkNotNull(metricName, METRIC_NAME_MSG);
    checkNotNull(resourceName, RESOURCE_NAME_MSG);
    checkArgument(!dsDefs.isEmpty(), METRIC_TYPE_MSG);

    // define the resolution of monitored metrics
    rrdDef = new RrdDef(DB_PATH + SPLITTER + metricName +
             SPLITTER + resourceName, RESOLUTION_IN_SECOND);

    try {
        DsDef[] dsDefArray = new DsDef[dsDefs.size()];
        IntStream.range(0, dsDefs.size()).forEach(i -> dsDefArray[i] = dsDefs.get(i));

        rrdDef.addDatasource(dsDefArray);
        rrdDef.setStep(RESOLUTION_IN_SECOND);

        // raw archive, no aggregation is required
        ArcDef rawArchive = new ArcDef(CONSOL_FUNCTION, XFF_VALUE,
                STEP_VALUE, ROW_VALUE);
        rrdDef.addArchive(rawArchive);

        // always store the metric data in memory...
        rrdDb = new RrdDb(rrdDef, RrdBackendFactory.getFactory(STORING_METHOD));
    } catch (IOException e) {
        log.warn("Failed to create a new round-robin database due to {}", e);
    }

    return new DefaultMetricsDatabase(metricName, resourceName, rrdDb);
}
 
Example #7
Source File: RRD4jService.java    From openhab1-addons with Eclipse Public License 2.0 5 votes vote down vote up
private RrdDef getRrdDef(String itemName, File file) {
    RrdDef rrdDef = new RrdDef(file.getAbsolutePath());
    RrdDefConfig useRdc = getRrdDefConfig(itemName);

    rrdDef.setStep(useRdc.step);
    rrdDef.setStartTime(System.currentTimeMillis() / 1000 - 1);
    rrdDef.addDatasource(DATASOURCE_STATE, useRdc.dsType, useRdc.heartbeat, useRdc.min, useRdc.max);
    for (RrdArchiveDef rad : useRdc.archives) {
        rrdDef.addArchive(rad.fcn, rad.xff, rad.steps, rad.rows);
    }
    return rrdDef;
}
 
Example #8
Source File: MetricsHistoryHandler.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
private NamedList<Object> getDbData(RrdDb db, String[] dsNames, Format format, SolrParams params) throws IOException {
  NamedList<Object> res = new SimpleOrderedMap<>();
  if (dsNames == null || dsNames.length == 0) {
    dsNames = db.getDsNames();
  }
  StringBuilder str = new StringBuilder();
  RrdDef def = db.getRrdDef();
  ArcDef[] arcDefs = def.getArcDefs();
  for (ArcDef arcDef : arcDefs) {
    SimpleOrderedMap<Object> map = new SimpleOrderedMap<>();
    res.add(arcDef.dump(), map);
    Archive a = db.getArchive(arcDef.getConsolFun(), arcDef.getSteps());
    // startTime / endTime, arcStep are in seconds
    FetchRequest fr = db.createFetchRequest(arcDef.getConsolFun(),
        a.getStartTime() - a.getArcStep(),
        a.getEndTime() + a.getArcStep());
    FetchData fd = fr.fetchData();
    if (format != Format.GRAPH) {
      // add timestamps separately from values
      long[] timestamps = fd.getTimestamps();
      if (format == Format.LIST) {
        // Arrays.asList works only on arrays of Objects
        map.add("timestamps", Arrays.stream(timestamps).boxed().collect(Collectors.toList()));
      } else {
        str.setLength(0);
        for (int i = 0; i < timestamps.length; i++) {
          if (i > 0) {
            str.append('\n');
          }
          str.append(String.valueOf(timestamps[i]));
        }
        map.add("timestamps", str.toString());
      }
    }
    SimpleOrderedMap<Object> values = new SimpleOrderedMap<>();
    map.add("values", values);
    for (String name : dsNames) {
      double[] vals = fd.getValues(name);
      switch (format) {
        case GRAPH:
          RrdGraphDef graphDef = new RrdGraphDef();
          graphDef.setTitle(name);
          graphDef.datasource(name, fd);
          graphDef.setStartTime(a.getStartTime() - a.getArcStep());
          graphDef.setEndTime(a.getEndTime() + a.getArcStep());
          graphDef.setPoolUsed(false);
          graphDef.setAltAutoscale(true);
          graphDef.setAltYGrid(true);
          graphDef.setAltYMrtg(true);
          graphDef.setSignature("Apache Solr " + versionString);
          graphDef.setNoLegend(true);
          graphDef.setAntiAliasing(true);
          graphDef.setTextAntiAliasing(true);
          graphDef.setWidth(500);
          graphDef.setHeight(175);
          graphDef.setTimeZone(TimeZone.getDefault());
          graphDef.setLocale(Locale.ROOT);
          // redraw immediately
          graphDef.setLazy(false);
          // area with a border
          graphDef.area(name, new Color(0xffb860), null);
          graphDef.line(name, Color.RED, null, 1.0f);
          RrdGraph graph = new RrdGraph(graphDef);
          BufferedImage bi = new BufferedImage(
              graph.getRrdGraphInfo().getWidth(),
              graph.getRrdGraphInfo().getHeight(),
              BufferedImage.TYPE_INT_RGB);
          graph.render(bi.getGraphics());
          ByteArrayOutputStream baos = new ByteArrayOutputStream();
          ImageIO.write(bi, "png", baos);
          values.add(name, Base64.byteArrayToBase64(baos.toByteArray()));
          break;
        case STRING:
          str.setLength(0);
          for (int i = 0; i < vals.length; i++) {
            if (i > 0) {
              str.append('\n');
            }
            str.append(String.valueOf(vals[i]));
          }
          values.add(name, str.toString());
          break;
        case LIST:
          values.add(name, Arrays.stream(vals).boxed().collect(Collectors.toList()));
          break;
      }
    }
  }
  return res;
}
 
Example #9
Source File: RRDDataStore.java    From scheduling with GNU Affero General Public License v3.0 4 votes vote down vote up
protected void initDatabase() throws IOException {
    if (!new File(dataBaseFile).exists()) {
        if (step <= 0) {
            logger.debug("Provided step is invalid, forcing it to " + DEFAULT_STEP_IN_SECONDS);
            step = DEFAULT_STEP_IN_SECONDS;
        }
        logger.info("Node's statistics are saved in " + dataBaseFile);

        RrdDef rrdDef = new RrdDef(dataBaseFile, System.currentTimeMillis() / 1000, step);

        for (String dataSource : dataSources.keySet()) {
            rrdDef.addDatasource(dataSource, DsType.GAUGE, 600, 0, Double.NaN);
        }

        // for step equals 4 seconds
        // Archive of 10 minutes = 600 seconds (4 * 1 * 150) of completely detailed data
        ConsolFun[] consolidationFunctions = new ConsolFun[] { ConsolFun.AVERAGE, ConsolFun.MIN, ConsolFun.MAX,
                                                               ConsolFun.LAST, ConsolFun.FIRST, ConsolFun.TOTAL };
        addArchives(rrdDef, consolidationFunctions, 0.5, 1, 150);

        // An archive of 1 hour = 3600 seconds (4 * 5 * 180) i.e. 180 averages of 5 steps
        addArchives(rrdDef, consolidationFunctions, 0.5, 5, 180);

        // An archive of 4 hours = 14400 seconds (4 * 10 * 360) i.e. 360 averages of 10 steps
        addArchives(rrdDef, consolidationFunctions, 0.5, 10, 360);

        // An archive of 8 hours = 28800 seconds (4 * 20 * 360) i.e. 360 averages of 20 steps
        addArchives(rrdDef, consolidationFunctions, 0.5, 20, 360);

        // An archive of 24 hours = 86400 seconds (4 * 30 * 720) i.e. 720 averages of 30 steps
        addArchives(rrdDef, consolidationFunctions, 0.5, 30, 720);

        // An archive of 1 week = 604800 seconds (4 * 210 * 720) i.e. 720 averages of 210 steps
        addArchives(rrdDef, consolidationFunctions, 0.5, 210, 720);

        // An archive of 1 month ~= 28 days = 604800 seconds (4 * 840 * 720) i.e. 720 averages of 840 steps
        addArchives(rrdDef, consolidationFunctions, 0.5, 840, 720);

        // An archive of 1 year = 364 days = 31449600 seconds (4 * 10920 * 720) i.e. 720 averages of 10920 steps
        addArchives(rrdDef, consolidationFunctions, 0.5, 10920, 720);

        RrdDb dataBase = new RrdDb(rrdDef);
        dataBase.close();
    } else {
        logger.info("Using existing RRD database: " + new File(dataBaseFile).getAbsolutePath());
    }
}
 
Example #10
Source File: RRDDataStore.java    From scheduling with GNU Affero General Public License v3.0 4 votes vote down vote up
private void addArchives(RrdDef rrdDef, ConsolFun[] consolFunctions, double xff, int steps, int rows) {
    for (ConsolFun consolFun : consolFunctions) {
        rrdDef.addArchive(consolFun, xff, steps, rows);
    }
}