com.yammer.metrics.core.TimerContext Java Examples

The following examples show how to use com.yammer.metrics.core.TimerContext. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CaliperRunner.java    From netty-4.1.22 with Apache License 2.0 6 votes vote down vote up
/**
 * Verify measure publication manually.
 */
public static void main(final String[] args) throws Exception {
    final Run run = newRun("test-main");
    for (int param = 0; param < 5; param++) {
        final CaliperMeasure measure = new CaliperMeasure();
        measure.variables().put("param", String.valueOf(param));
        for (int step = 0; step < 5; step++) {
            measure.rate().mark(50 + step);
            final TimerContext time = measure.time().time();
            Thread.sleep(15);
            time.stop();
            measure.size().value(50 + step);
            measure.mark();
        }
        measure.appendTo(run);
    }
    final Result result = newResult(run);
    publish(result);
    System.out.println(json(result));
}
 
Example #2
Source File: KafkaProducerWrapper.java    From common-kafka with Apache License 2.0 6 votes vote down vote up
/**
 * Synchronously sends all {@code records}. The underlying {@link Producer} is immediately flushed and the call will block until
 * the {@code records} have been sent.
 *
 * @param records
 *            the records to send to Kafka
 * @throws IOException
 *             indicates that there was a Kafka error that lead to one of the messages not being sent.
 * @throws org.apache.kafka.common.KafkaException
 *             if there is an issue sending the messages to Kafka
 */
public void sendSynchronously(List<ProducerRecord<K, T>> records) throws IOException {
    // Disregard empty batches.
    if (records.isEmpty()) {
        LOGGER.debug("records was empty; nothing to process");
        return;
    }

    BATCH_SIZE_HISTOGRAM.update(records.size());

    TimerContext context = SYNC_SEND_TIMER.time();
    try {
        List<Future<RecordMetadata>> futures = records.stream().map(kafkaProducer::send).collect(Collectors.toList());

        handlePendingWrites(futures);
    } finally {
        context.stop();
    }
}
 
Example #3
Source File: KafkaProducerWrapper.java    From common-kafka with Apache License 2.0 6 votes vote down vote up
/**
 * Flushes the underlying pending writes created by calls to {@link #send(ProducerRecord)}
 * to the {@link Producer} ensuring they persisted.
 *
 * If an exception occurs when flushing, all pending writes should be
 * {@link #send(ProducerRecord) sent again}.
 *
 * @throws IOException If there was an error persisting one of the pending writes
 */
@Override
public void flush() throws IOException{
    if (pendingWrites.isEmpty()) {
        LOGGER.debug("nothing to flush");
        return;
    }

    BATCH_SIZE_HISTOGRAM.update(pendingWrites.size());

    TimerContext context = FLUSH_TIMER.time();
    try {
        handlePendingWrites(pendingWrites);
    } finally{
        pendingWrites.clear();
        context.stop();
    }
}
 
Example #4
Source File: CaliperRunner.java    From netty4.0.27Learn with Apache License 2.0 6 votes vote down vote up
/**
 * Verify measure publication manually.
 */
public static void main(final String[] args) throws Exception {
    final Run run = newRun("test-main");
    for (int param = 0; param < 5; param++) {
        final CaliperMeasure measure = new CaliperMeasure();
        measure.variables().put("param", String.valueOf(param));
        for (int step = 0; step < 5; step++) {
            measure.rate().mark(50 + step);
            final TimerContext time = measure.time().time();
            Thread.sleep(15);
            time.stop();
            measure.size().value(50 + step);
            measure.mark();
        }
        measure.appendTo(run);
    }
    final Result result = newResult(run);
    publish(result);
    System.out.println(json(result));
}
 
Example #5
Source File: Slf4JBatchSubmitter.java    From usergrid with Apache License 2.0 6 votes vote down vote up
@Override
public Future submit( final Collection<Count> counts ) {
    return executor.submit( new Callable<Object>() {
        final TimerContext timer = addTimer.time();


        @Override
        public Object call() throws Exception {
            // TODO perhaps this could be pushed down further into CountProducer Impl?
            // - this would leave generic submitter class
            for ( Count c : counts ) {
                logger.info( "found count {}", c );
            }
            timer.stop();
            return true;
        }
    } );
}
 
Example #6
Source File: S3Archiver.java    From chancery with Apache License 2.0 6 votes vote down vote up
private void upload(@NotNull File src, @NotNull String key, @NotNull CallbackPayload payload) {
    log.info("Uploading {} to {} in {}", src, key, bucketName);
    final PutObjectRequest request = new PutObjectRequest(bucketName, key, src);
    final ObjectMetadata metadata = request.getMetadata();
    final String commitId = payload.getAfter();
    if (commitId != null) {
        metadata.addUserMetadata("commit-id", commitId);
    }
    final DateTime timestamp = payload.getTimestamp();
    if (timestamp != null) {
        metadata.addUserMetadata("hook-timestamp",
                ISODateTimeFormat.basicTime().print(timestamp));
    }

    final TimerContext time = uploadTimer.time();
    try {
        s3Client.putObject(request);
    } catch (Exception e) {
        log.error("Couldn't upload to {} in {}", key, bucketName, e);
        throw e;
    } finally {
        time.stop();
    }
    log.info("Uploaded to {} in {}", key, bucketName);
}
 
Example #7
Source File: GithubClient.java    From chancery with Apache License 2.0 6 votes vote down vote up
public void createReference(String owner, String repository, String ref, String id)
        throws GithubFailure.forReferenceCreation {
    final URI uri = UriBuilder.
            fromPath("/repos/{a}/{b}/git/refs").
            build(owner, repository);

    final ReferenceCreationRequest req = new ReferenceCreationRequest(ref, id);

    final TimerContext time = referenceCreationTimer.time();
    try {
        /* Github wants a Content-Length, and Jersey doesn't fancy doing that */
        final byte[] payload = mapper.writeValueAsBytes(req);

        resource.uri(uri).
                type(MediaType.APPLICATION_JSON_TYPE).
                post(payload);
    } catch (JsonProcessingException | UniformInterfaceException e) {
        throw new GithubFailure.forReferenceCreation(e);
    } finally {
        time.stop();
    }
}
 
Example #8
Source File: GithubClient.java    From chancery with Apache License 2.0 6 votes vote down vote up
public Path download(String owner, String repository, String id)
        throws IOException, GithubFailure.forDownload {
    final Path tempPath = Files.createTempFile("com.airbnb.chancery-githubdownload-", null);
    tempPath.toFile().deleteOnExit();

    final URI uri = UriBuilder.
            fromPath("/repos/{a}/{b}/tarball/{c}").
            build(owner, repository, id);

    log.info("Downloading {}", uri);

    final TimerContext time = downloadTimer.time();
    try {
        final InputStream inputStream = resource.uri(uri).
                accept(MediaType.WILDCARD_TYPE).
                get(InputStream.class);

        Files.copy(inputStream, tempPath, StandardCopyOption.REPLACE_EXISTING);
        log.info("Downloaded {}", uri);
        return tempPath;
    } catch (UniformInterfaceException e) {
        throw new GithubFailure.forDownload(e);
    } finally {
        time.stop();
    }
}
 
Example #9
Source File: KafkaProducerWrapper.java    From common-kafka with Apache License 2.0 5 votes vote down vote up
/**
 * Sends the specified {@code record}.  Data is not guaranteed to be written until {@link #flush()} is called.
 *
 * @param record the record to send to Kafka
 * @throws IllegalArgumentException the {@code record} cannot be {@code null}.
 * @throws org.apache.kafka.common.KafkaException if there is an issue sending the message to Kafka
 */
public void send(ProducerRecord<K, T> record){
    if(record == null){
        throw new IllegalArgumentException("The 'record' cannot be 'null'.");
    }

    TimerContext context = SEND_TIMER.time();
    try {
        pendingWrites.add(kafkaProducer.send(record));
    } finally {
        context.stop();
    }
}
 
Example #10
Source File: CassandraSubmitter.java    From usergrid with Apache License 2.0 5 votes vote down vote up
@Override
public Future submit( final Collection<Count> counts ) {
    return executor.submit( new Callable<Object>() {
        final TimerContext timer = addTimer.time();


        @Override
        public Object call() throws Exception {
            cassandraCounterStore.save( counts );
            timer.stop();
            return true;
        }
    } );
}
 
Example #11
Source File: AbstractBatcher.java    From usergrid with Apache License 2.0 5 votes vote down vote up
/** Add a count object to this batcher */
public void add( Count count ) throws CounterProcessingUnavailableException {
    invocationCounter.inc();
    final TimerContext context = addTimer.time();
    if ( batchSize == 1 ) {
        getBatch().addSerial( count );
    }
    else {
        getBatch().add( count );
    }
    context.stop();
}
 
Example #12
Source File: Indexer.java    From hbase-indexer with Apache License 2.0 5 votes vote down vote up
private Result readRow(RowData rowData) throws IOException {
    TimerContext timerContext = rowReadTimer.time();
    try {
        Table table = tablePool.getTable(TableName.valueOf(rowData.getTable()));
        try {
            Get get = mapper.getGet(rowData.getRow());
            return table.get(get);
        } finally {
            table.close();
        }
    } finally {
        timerContext.stop();
    }
}
 
Example #13
Source File: DefaultResultToSolrMapper.java    From hbase-indexer with Apache License 2.0 5 votes vote down vote up
@Override
public void map(Result result, SolrUpdateWriter solrUpdateWriter) {
    TimerContext timerContext = mappingTimer.time();
    try {
        SolrInputDocument solrInputDocument = new SolrInputDocument();
        for (SolrDocumentExtractor documentExtractor : resultDocumentExtractors) {
            documentExtractor.extractDocument(result, solrInputDocument);
        }
        solrUpdateWriter.add(solrInputDocument);
    } finally {
        timerContext.stop();
    }
}
 
Example #14
Source File: S3Archiver.java    From chancery with Apache License 2.0 5 votes vote down vote up
private void delete(@NotNull String key) {
    log.info("Removing key {} from {}", key, bucketName);
    final TimerContext time = deleteTimer.time();
    try {
        s3Client.deleteObject(bucketName, key);
    } catch (Exception e) {
        log.error("Couldn't delete {} from {}", key, bucketName, e);
        throw e;
    } finally {
        time.stop();
    }

    log.info("Deleted {} from {}", key, bucketName);
}
 
Example #15
Source File: CQLExecutor.java    From Rhombus with MIT License 5 votes vote down vote up
public PreparedStatement prepareStatement(Session session, CQLStatement cql){
if(preparedStatementCache.containsKey(cql.getQuery())) {
	// When pre-preparing statements, we can send the same one multiple times
	// in this case, we should just return the one from the cache and not prepare again
	return preparedStatementCache.get(cql.getQuery());
} else {
	Long currentTime = System.currentTimeMillis();
	TimerContext prepareTimer = Metrics.defaultRegistry().newTimer(CQLExecutor.class, "statement.prepared").time();
	PreparedStatement ret = session.prepare(cql.getQuery());
	prepareTimer.stop();
	ret.setConsistencyLevel(consistencyLevel);
	preparedStatementCache.put(cql.getQuery(), ret);
	return ret;
}
  }
 
Example #16
Source File: WaitQueue.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
/**
 * The calling thread MUST be the thread that uses the signal.
 * If the Signal is waited on, context.stop() will be called when the wait times out, the Signal is signalled,
 * or the waiting thread is interrupted.
 * @return
 */
public Signal register(TimerContext context)
{
    assert context != null;
    RegisteredSignal signal = new TimedSignal(context);
    queue.add(signal);
    return signal;
}
 
Example #17
Source File: StatementIteratorConsumer.java    From Rhombus with MIT License 4 votes vote down vote up
protected void handle(CQLStatement statement) {
	String methodName = "NULL";
	String cql = statement.getQuery();
	int firstSpace = cql.indexOf(" ");
	if(firstSpace > 0) {
		methodName = cql.substring(0, firstSpace);
	}
	String timerName = "asyncExec." + methodName + "." + statement.getObjectName();
	final Timer asyncExecTimer = Metrics.defaultRegistry().newTimer(StatementIteratorConsumer.class, timerName);
	final TimerContext asyncExecTimerContext = asyncExecTimer.time();
	final long startTime = System.nanoTime();
	ResultSetFuture future = null;
	try {
		future = this.cqlExecutor.executeAsync(statement);
	} catch (RuntimeException re) {
		logger.error("RuntimeException while executing statement {}\n {}", statement.getQuery(), re);
		shutdownLatch.countDown();
		return;
	}
	futures.add(future);
	Futures.addCallback(future, new FutureCallback<ResultSet>() {
		@Override
		public void onSuccess(final ResultSet result) {
			Host queriedHost = result.getExecutionInfo().getQueriedHost();
			Metrics.defaultRegistry().newMeter(StatementIteratorConsumer.class, "queriedhost." + queriedHost.getDatacenter(), queriedHost.getDatacenter(), TimeUnit.SECONDS).mark();
			asyncExecTimerContext.stop();
			logger.debug("Async exec time {}us", (System.nanoTime() - startTime) / 1000);
			shutdownLatch.countDown();
		}

		@Override
		public void onFailure(final Throwable t) {
			asyncExecTimerContext.stop();
			logger.debug("Async failure time {}us", (System.nanoTime() - startTime) / 1000);
			executionExceptions.add(t);
			shutdownLatch.countDown();
		}
	}
			, executorService
	);
}
 
Example #18
Source File: YammerExample.java    From signalfx-java with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {

        System.out.println("Running example...");

        Properties prop = new Properties();
        prop.load(new FileInputStream("auth.properties"));
        final String auth_token = prop.getProperty("auth");
        final String hostUrlStr = prop.getProperty("host");
        final URL hostUrl = new URL(hostUrlStr);
        System.out.println("Auth=" + auth_token + " .. host=" + hostUrl);
        SignalFxReceiverEndpoint endpoint = new SignalFxEndpoint(hostUrl.getProtocol(),
                hostUrl.getHost(), hostUrl.getPort());

        MetricsRegistry metricsRegistry = new MetricsRegistry();
        SignalFxReporter reporter = new SignalFxReporter.Builder(metricsRegistry,
                new StaticAuthToken(auth_token),
                hostUrlStr).setEndpoint(endpoint)
                .setOnSendErrorHandlerCollection(
                        Collections.<OnSendErrorHandler>singleton(new OnSendErrorHandler() {
                            public void handleError(MetricError error) {
                                System.out.println("" + error.getMessage());
                            }
                        }))
                .setDetailsToAdd(ImmutableSet.of(SignalFxReporter.MetricDetails.COUNT,
                        SignalFxReporter.MetricDetails.MIN,
                        SignalFxReporter.MetricDetails.MAX))
                .build();

        final MetricMetadata metricMetadata = reporter.getMetricMetadata();

        Counter counter = getCounter(metricsRegistry, metricMetadata);

        Metric cumulativeCounter = getCumulativeCounter(metricsRegistry, metricMetadata);

        Gauge gauge1 = getGauge(metricsRegistry, metricMetadata);

        Timer timer = getTimer(metricsRegistry, metricMetadata);

        // main body generating data and sending it in a loop
        while (true) {
            final TimerContext context = timer.time();
            try {
                System.out.println("Sending data...");
                Thread.sleep(500);
                counter.inc();
            } finally {
                context.stop();
            }
            reporter.report(); // Report all metrics
        }

    }
 
Example #19
Source File: LongBTreeTest.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
private static ListenableFutureTask<List<ListenableFuture<?>>> doOneTestInsertions(final int upperBound, final int maxRunLength, final int averageModsPerIteration, final int iterations, final boolean quickEquality)
{
    ListenableFutureTask<List<ListenableFuture<?>>> f = ListenableFutureTask.create(new Callable<List<ListenableFuture<?>>>()
    {
        @Override
        public List<ListenableFuture<?>> call()
        {
            final List<ListenableFuture<?>> r = new ArrayList<>();
            NavigableMap<Integer, Integer> canon = new TreeMap<>();
            Object[] btree = BTree.empty();
            final TreeMap<Integer, Integer> buffer = new TreeMap<>();
            final Random rnd = new Random();
            for (int i = 0 ; i < iterations ; i++)
            {
                buffer.clear();
                int mods = (averageModsPerIteration >> 1) + 1 + rnd.nextInt(averageModsPerIteration);
                while (mods > 0)
                {
                    int v = rnd.nextInt(upperBound);
                    int rc = Math.max(0, Math.min(mods, maxRunLength) - 1);
                    int c = 1 + (rc <= 0 ? 0 : rnd.nextInt(rc));
                    for (int j = 0 ; j < c ; j++)
                    {
                        buffer.put(v, v);
                        v++;
                    }
                    mods -= c;
                }
                TimerContext ctxt;
                ctxt = TREE_TIMER.time();
                canon.putAll(buffer);
                ctxt.stop();
                ctxt = BTREE_TIMER.time();
                Object[] next = null;
                while (next == null)
                    next = BTree.update(btree, ICMP, buffer.keySet(), true, SPORADIC_ABORT);
                btree = next;
                ctxt.stop();

                if (!BTree.isWellFormed(btree, ICMP))
                {
                    System.out.println("ERROR: Not well formed");
                    throw new AssertionError("Not well formed!");
                }
                if (quickEquality)
                    testEqual("", BTree.<Integer>slice(btree, true), canon.keySet().iterator());
                else
                    r.addAll(testAllSlices("RND", btree, new TreeSet<>(canon.keySet())));
            }
            return r;
        }
    });
    MODIFY.execute(f);
    return f;
}
 
Example #20
Source File: WaitQueue.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
private TimedSignal(TimerContext context)
{
    this.context = context;
}
 
Example #21
Source File: EsDatasetDeleterCallback.java    From occurrence with Apache License 2.0 4 votes vote down vote up
@Override
public void handleMessage(DeleteDatasetOccurrencesMessage message) {
  MDC.put("datasetKey", message.getDatasetUuid().toString());

  if (OccurrenceDeletionReason.DATASET_MANUAL != message.getDeletionReason()) {
    LOG.warn("In Pipelines we only support DATASET_MANUAL deletion events");
    return;
  }

  final String datasetKey = message.getDatasetUuid().toString();
  // find the indexes where the dataset is indexed
  Set<String> datasetIndexes =
    EsHelper.findExistingIndexesInAliases(esClient, datasetKey, config.esIndex);

  if (datasetIndexes == null || datasetIndexes.isEmpty()) {
    LOG.info("No indexes found in aliases {} for dataset {}", config.esIndex, datasetKey);
    return;
  }

  final TimerContext contextDeleteIndex = processTimerDeleteIndex.time();
  // remove independent indexes for this dataset
  datasetIndexes.stream()
    .filter(i -> i.startsWith(datasetKey))
    .forEach(idx -> EsHelper.deleteIndex(esClient, idx));
  contextDeleteIndex.stop();

  final TimerContext contextDeleteByQuery = processTimerDeleteByQuery.time();
  // delete documents of this dataset in non-independent indexes
  datasetIndexes.stream()
    .filter(i -> !i.startsWith(datasetKey))
    .forEach(idx -> EsHelper.deleteByDatasetKey(esClient, datasetKey, idx));
  contextDeleteByQuery.stop();

  // Delete dataset from ingest folder
  String deleteIngestPath = String.join(Path.SEPARATOR, config.ingestDirPath, datasetKey);
  deleteByPattern(fs, deleteIngestPath);

  // Delete dataset from hdfs view directory
  String viewFileName = HdfsView.VIEW_OCCURRENCE + "_" + datasetKey + "_*";
  String deleteHdfsPath = String.join(Path.SEPARATOR, config.hdfsViewDirPath, viewFileName);
  deleteByPattern(fs, deleteHdfsPath);
}
 
Example #22
Source File: BcryptCommandTest.java    From usergrid with Apache License 2.0 2 votes vote down vote up
/**
 * Tests bcrypt hashing with a default number of rounds.  Note that via the console output, this test should take
 * about 5 seconds to run since we want to force 500 ms per authentication attempt with bcrypt
 */
@Test
public void testHashRoundSpeed() throws UnsupportedEncodingException {

    int cryptIterations = 2 ^ 11;
    int numberOfTests = 10;

    BcryptCommand command = new BcryptCommand();
    command.setDefaultIterations( cryptIterations );

    String baseString = "I am a test password for hashing";

    CredentialsInfo info = new CredentialsInfo();


    UUID user = UUID.randomUUID();
    UUID applicationId = UUID.randomUUID();

    byte[] result = command.hash( baseString.getBytes( "UTF-8" ), info, user, applicationId );


    String stringResults = encodeBase64URLSafeString( result );

    info.setSecret( stringResults );

    Timer timer = Metrics.newTimer( BcryptCommandTest.class, "hashtimer" );

    for ( int i = 0; i < numberOfTests; i++ ) {
        TimerContext timerCtx = timer.time();

        //now check we can auth with the same phrase
        byte[] authed = command.auth( baseString.getBytes( "UTF-8" ), info, user, applicationId );

        timerCtx.stop();


        assertArrayEquals( result, authed );
    }

    /**
     * Print out the data
     */
    ConsoleReporter reporter = new ConsoleReporter( Metrics.defaultRegistry(), System.out, MetricPredicate.ALL );

    reporter.run();
}