org.apache.htrace.Span Java Examples
The following examples show how to use
org.apache.htrace.Span.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PhoenixTraceReaderIT.java From phoenix with Apache License 2.0 | 6 votes |
@Test public void singleSpan() throws Exception { PhoenixMetricsSink sink = new PhoenixMetricsSink(); Properties props = new Properties(TEST_PROPERTIES); Connection conn = DriverManager.getConnection(getUrl(), props); sink.initForTesting(conn); // create a simple metrics record long traceid = 987654; MetricsRecord record = createAndFlush(sink, traceid, Span.ROOT_SPAN_ID, 10, "root", 12, 13, "host-name.value", "test annotation for a span"); // start a reader validateTraces(Collections.singletonList(record), conn, traceid); }
Example #2
Source File: TraceWriter.java From phoenix with Apache License 2.0 | 6 votes |
@Override public void run() { if (conn == null) return; while (!traceSpanReceiver.isSpanAvailable()) { Span span = traceSpanReceiver.getSpan(); if (null == span) break; if (LOGGER.isTraceEnabled()) { LOGGER.trace("Span received: " + span.toJson()); } addToBatch(span); counter++; if (counter >= batchSize) { commitBatch(conn); counter = 0; } } }
Example #3
Source File: TraceReader.java From phoenix with Apache License 2.0 | 6 votes |
/** * Do the same sorting that we would get from reading the table with a {@link TraceReader}, * specifically, by trace and then by start/end. However, these are only every stored in a * single trace, so we can just sort on start/end times. */ @Override public int compareTo(SpanInfo o) { // root span always comes first if (this.parentId == Span.ROOT_SPAN_ID) { return -1; } else if (o.parentId == Span.ROOT_SPAN_ID) { return 1; } int compare = Longs.compare(start, o.start); if (compare == 0) { compare = Longs.compare(end, o.end); if (compare == 0) { return Longs.compare(id, o.id); } } return compare; }
Example #4
Source File: TraceReader.java From phoenix with Apache License 2.0 | 6 votes |
@Override public String toString() { StringBuilder sb = new StringBuilder("Span: " + id + "\n"); sb.append("\tdescription=" + description); sb.append("\n"); sb.append("\tparent=" + (parent == null ? (parentId == Span.ROOT_SPAN_ID ? "ROOT" : "[orphan - id: " + parentId + "]") : parent.id)); sb.append("\n"); sb.append("\tstart,end=" + start + "," + end); sb.append("\n"); sb.append("\telapsed=" + (end - start)); sb.append("\n"); sb.append("\thostname=" + hostname); sb.append("\n"); sb.append("\ttags=(" + tagCount + ") " + tags); sb.append("\n"); sb.append("\tannotations=(" + annotationCount + ") " + annotations); sb.append("\n"); sb.append("\tchildren="); for (SpanInfo child : children) { sb.append(child.id + ", "); } sb.append("\n"); return sb.toString(); }
Example #5
Source File: DFSInputStream.java From big-c with Apache License 2.0 | 6 votes |
private Callable<ByteBuffer> getFromOneDataNode(final DNAddrPair datanode, final LocatedBlock block, final long start, final long end, final ByteBuffer bb, final Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap, final int hedgedReadId) { final Span parentSpan = Trace.currentSpan(); return new Callable<ByteBuffer>() { @Override public ByteBuffer call() throws Exception { byte[] buf = bb.array(); int offset = bb.position(); TraceScope scope = Trace.startSpan("hedgedRead" + hedgedReadId, parentSpan); try { actualGetFromOneDataNode(datanode, block, start, end, buf, offset, corruptedBlockMap); return bb; } finally { scope.close(); } } }; }
Example #6
Source File: TraceReader.java From phoenix with Apache License 2.0 | 6 votes |
@Override public String toString() { StringBuilder sb = new StringBuilder("Span: " + id + "\n"); sb.append("\tdescription=" + description); sb.append("\n"); sb.append("\tparent=" + (parent == null ? (parentId == Span.ROOT_SPAN_ID ? "ROOT" : "[orphan - id: " + parentId + "]") : parent.id)); sb.append("\n"); sb.append("\tstart,end=" + start + "," + end); sb.append("\n"); sb.append("\telapsed=" + (end - start)); sb.append("\n"); sb.append("\thostname=" + hostname); sb.append("\n"); sb.append("\ttags=(" + tagCount + ") " + tags); sb.append("\n"); sb.append("\tannotations=(" + annotationCount + ") " + annotations); sb.append("\n"); sb.append("\tchildren="); for (SpanInfo child : children) { sb.append(child.id + ", "); } sb.append("\n"); return sb.toString(); }
Example #7
Source File: ProtoUtil.java From hadoop with Apache License 2.0 | 6 votes |
public static RpcRequestHeaderProto makeRpcRequestHeader(RPC.RpcKind rpcKind, RpcRequestHeaderProto.OperationProto operation, int callId, int retryCount, byte[] uuid) { RpcRequestHeaderProto.Builder result = RpcRequestHeaderProto.newBuilder(); result.setRpcKind(convert(rpcKind)).setRpcOp(operation).setCallId(callId) .setRetryCount(retryCount).setClientId(ByteString.copyFrom(uuid)); // Add tracing info if we are currently tracing. if (Trace.isTracing()) { Span s = Trace.currentSpan(); result.setTraceInfo(RPCTraceInfoProto.newBuilder() .setParentId(s.getSpanId()) .setTraceId(s.getTraceId()).build()); } return result.build(); }
Example #8
Source File: TraceReader.java From phoenix with Apache License 2.0 | 6 votes |
/** * Do the same sorting that we would get from reading the table with a {@link TraceReader}, * specifically, by trace and then by start/end. However, these are only every stored in a * single trace, so we can just sort on start/end times. */ @Override public int compareTo(SpanInfo o) { // root span always comes first if (this.parentId == Span.ROOT_SPAN_ID) { return -1; } else if (o.parentId == Span.ROOT_SPAN_ID) { return 1; } int compare = Longs.compare(start, o.start); if (compare == 0) { compare = Longs.compare(end, o.end); if (compare == 0) { return Longs.compare(id, o.id); } } return compare; }
Example #9
Source File: ProtoUtil.java From big-c with Apache License 2.0 | 6 votes |
public static RpcRequestHeaderProto makeRpcRequestHeader(RPC.RpcKind rpcKind, RpcRequestHeaderProto.OperationProto operation, int callId, int retryCount, byte[] uuid) { RpcRequestHeaderProto.Builder result = RpcRequestHeaderProto.newBuilder(); result.setRpcKind(convert(rpcKind)).setRpcOp(operation).setCallId(callId) .setRetryCount(retryCount).setClientId(ByteString.copyFrom(uuid)); // Add tracing info if we are currently tracing. if (Trace.isTracing()) { Span s = Trace.currentSpan(); result.setTraceInfo(RPCTraceInfoProto.newBuilder() .setParentId(s.getSpanId()) .setTraceId(s.getTraceId()).build()); } return result.build(); }
Example #10
Source File: TraceSpanReceiverTest.java From phoenix with Apache License 2.0 | 6 votes |
/** * For PHOENIX-1126, Phoenix originally assumed all the annotation values were integers, * but HBase writes some strings as well, so we need to be able to handle that too */ @Test public void testNonIntegerAnnotations(){ Span span = getSpan(); // make sure its less than the length of an integer byte[] value = Bytes.toBytes("a"); byte[] someInt = Bytes.toBytes(1); assertTrue(someInt.length > value.length); // an annotation that is not an integer span.addKVAnnotation(Bytes.toBytes("key"), value); // Create the sink and write the span TraceSpanReceiver source = new TraceSpanReceiver(); Trace.addReceiver(source); Tracer.getInstance().deliver(span); assertTrue(source.getNumSpans() == 1); }
Example #11
Source File: DFSInputStream.java From hadoop with Apache License 2.0 | 6 votes |
private Callable<ByteBuffer> getFromOneDataNode(final DNAddrPair datanode, final LocatedBlock block, final long start, final long end, final ByteBuffer bb, final Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap, final int hedgedReadId) { final Span parentSpan = Trace.currentSpan(); return new Callable<ByteBuffer>() { @Override public ByteBuffer call() throws Exception { byte[] buf = bb.array(); int offset = bb.position(); TraceScope scope = Trace.startSpan("hedgedRead" + hedgedReadId, parentSpan); try { actualGetFromOneDataNode(datanode, block, start, end, buf, offset, corruptedBlockMap); return bb; } finally { scope.close(); } } }; }
Example #12
Source File: TraceMetricsSourceTest.java From phoenix with Apache License 2.0 | 6 votes |
/** * For PHOENIX-1126, Phoenix originally assumed all the annotation values were integers, * but HBase writes some strings as well, so we need to be able to handle that too */ @Test public void testNonIntegerAnnotations(){ Span span = getSpan(); // make sure its less than the length of an integer byte[] value = Bytes.toBytes("a"); byte[] someInt = Bytes.toBytes(1); assertTrue(someInt.length >value.length); // an annotation that is not an integer span.addKVAnnotation(Bytes.toBytes("key"), value); // Create the sink and write the span TraceMetricSource source = new TraceMetricSource(); source.receiveSpan(span); }
Example #13
Source File: IndexRegionObserver.java From phoenix with Apache License 2.0 | 6 votes |
private void doIndexWritesWithExceptions(BatchMutateContext context, boolean post) throws IOException { ListMultimap<HTableInterfaceReference, Mutation> indexUpdates = post ? context.postIndexUpdates : context.preIndexUpdates; //short circuit, if we don't need to do any work if (context == null || indexUpdates == null || indexUpdates.isEmpty()) { return; } // get the current span, or just use a null-span to avoid a bunch of if statements try (TraceScope scope = Trace.startSpan("Completing " + (post ? "post" : "pre") + " index writes")) { Span current = scope.getSpan(); if (current == null) { current = NullSpan.INSTANCE; } current.addTimelineAnnotation("Actually doing " + (post ? "post" : "pre") + " index update for first time"); if (post) { postWriter.write(indexUpdates, false, context.clientVersion); } else { preWriter.write(indexUpdates, false, context.clientVersion); } } }
Example #14
Source File: Sender.java From big-c with Apache License 2.0 | 5 votes |
@Override public void requestShortCircuitShm(String clientName) throws IOException { ShortCircuitShmRequestProto.Builder builder = ShortCircuitShmRequestProto.newBuilder(). setClientName(clientName); if (Trace.isTracing()) { Span s = Trace.currentSpan(); builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder() .setTraceId(s.getTraceId()).setParentId(s.getSpanId())); } ShortCircuitShmRequestProto proto = builder.build(); send(out, Op.REQUEST_SHORT_CIRCUIT_SHM, proto); }
Example #15
Source File: TestTracing.java From big-c with Apache License 2.0 | 5 votes |
public void readWithTracing() throws Exception { String fileName = "testReadTraceHooks.dat"; writeTestFile(fileName); long startTime = System.currentTimeMillis(); TraceScope ts = Trace.startSpan("testReadTraceHooks", Sampler.ALWAYS); readTestFile(fileName); ts.close(); long endTime = System.currentTimeMillis(); String[] expectedSpanNames = { "testReadTraceHooks", "org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations", "ClientNamenodeProtocol#getBlockLocations", "OpReadBlockProto" }; assertSpanNamesFound(expectedSpanNames); // The trace should last about the same amount of time as the test Map<String, List<Span>> map = SetSpanReceiver.SetHolder.getMap(); Span s = map.get("testReadTraceHooks").get(0); Assert.assertNotNull(s); long spanStart = s.getStartTimeMillis(); long spanEnd = s.getStopTimeMillis(); Assert.assertTrue(spanStart - startTime < 100); Assert.assertTrue(spanEnd - endTime < 100); // There should only be one trace id as it should all be homed in the // top trace. for (Span span : SetSpanReceiver.SetHolder.spans.values()) { Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId()); } SetSpanReceiver.SetHolder.spans.clear(); }
Example #16
Source File: TestTracing.java From big-c with Apache License 2.0 | 5 votes |
public static Map<String, List<Span>> getMap() { Map<String, List<Span>> map = new HashMap<String, List<Span>>(); for (Span s : spans.values()) { List<Span> l = map.get(s.getDescription()); if (l == null) { l = new LinkedList<Span>(); map.put(s.getDescription(), l); } l.add(s); } return map; }
Example #17
Source File: Sender.java From big-c with Apache License 2.0 | 5 votes |
@Override public void releaseShortCircuitFds(SlotId slotId) throws IOException { ReleaseShortCircuitAccessRequestProto.Builder builder = ReleaseShortCircuitAccessRequestProto.newBuilder(). setSlotId(PBHelper.convert(slotId)); if (Trace.isTracing()) { Span s = Trace.currentSpan(); builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder() .setTraceId(s.getTraceId()).setParentId(s.getSpanId())); } ReleaseShortCircuitAccessRequestProto proto = builder.build(); send(out, Op.RELEASE_SHORT_CIRCUIT_FDS, proto); }
Example #18
Source File: Server.java From hadoop with Apache License 2.0 | 5 votes |
public Call(int id, int retryCount, Writable param, Connection connection, RPC.RpcKind kind, byte[] clientId, Span span) { this.callId = id; this.retryCount = retryCount; this.rpcRequest = param; this.connection = connection; this.timestamp = Time.now(); this.rpcResponse = null; this.rpcKind = kind; this.clientId = clientId; this.traceSpan = span; }
Example #19
Source File: BlockStorageLocationUtil.java From hadoop with Apache License 2.0 | 5 votes |
VolumeBlockLocationCallable(Configuration configuration, DatanodeInfo datanode, String poolId, long []blockIds, List<Token<BlockTokenIdentifier>> dnTokens, int timeout, boolean connectToDnViaHostname, Span parentSpan) { this.configuration = configuration; this.timeout = timeout; this.datanode = datanode; this.poolId = poolId; this.blockIds = blockIds; this.dnTokens = dnTokens; this.connectToDnViaHostname = connectToDnViaHostname; this.parentSpan = parentSpan; }
Example #20
Source File: TestTracing.java From hadoop with Apache License 2.0 | 5 votes |
public static Map<String, List<Span>> getMap() { Map<String, List<Span>> map = new HashMap<String, List<Span>>(); for (Span s : spans.values()) { List<Span> l = map.get(s.getDescription()); if (l == null) { l = new LinkedList<Span>(); map.put(s.getDescription(), l); } l.add(s); } return map; }
Example #21
Source File: Sender.java From hadoop with Apache License 2.0 | 5 votes |
@Override public void releaseShortCircuitFds(SlotId slotId) throws IOException { ReleaseShortCircuitAccessRequestProto.Builder builder = ReleaseShortCircuitAccessRequestProto.newBuilder(). setSlotId(PBHelper.convert(slotId)); if (Trace.isTracing()) { Span s = Trace.currentSpan(); builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder() .setTraceId(s.getTraceId()).setParentId(s.getSpanId())); } ReleaseShortCircuitAccessRequestProto proto = builder.build(); send(out, Op.RELEASE_SHORT_CIRCUIT_FDS, proto); }
Example #22
Source File: Server.java From big-c with Apache License 2.0 | 5 votes |
public Call(int id, int retryCount, Writable param, Connection connection, RPC.RpcKind kind, byte[] clientId, Span span) { this.callId = id; this.retryCount = retryCount; this.rpcRequest = param; this.connection = connection; this.timestamp = Time.now(); this.rpcResponse = null; this.rpcKind = kind; this.clientId = clientId; this.traceSpan = span; }
Example #23
Source File: Tracing.java From phoenix with Apache License 2.0 | 5 votes |
private static void addCustomAnnotationsToSpan(@Nullable Span span, @NotNull PhoenixConnection conn) { Preconditions.checkNotNull(conn); if (span == null) { return; } Map<String, String> annotations = conn.getCustomTracingAnnotations(); // copy over the annotations as bytes for (Map.Entry<String, String> annotation : annotations.entrySet()) { span.addKVAnnotation(toBytes(annotation.getKey()), toBytes(annotation.getValue())); } }
Example #24
Source File: TraceMetricSource.java From phoenix with Apache License 2.0 | 5 votes |
@Override public void receiveSpan(Span span) { Metric builder = new Metric(span); // add all the metrics for the span builder.addCounter(Interns.info(SPAN.traceName, EMPTY_STRING), span.getSpanId()); builder.addCounter(Interns.info(PARENT.traceName, EMPTY_STRING), span.getParentId()); builder.addCounter(Interns.info(START.traceName, EMPTY_STRING), span.getStartTimeMillis()); builder.addCounter(Interns.info(END.traceName, EMPTY_STRING), span.getStopTimeMillis()); // add the tags to the span. They were written in order received so we mark them as such for (TimelineAnnotation ta : span.getTimelineAnnotations()) { builder.add(new MetricsTag(Interns.info(TAG.traceName, Long.toString(ta.getTime())), ta .getMessage())); } // add the annotations. We assume they are serialized as strings and integers, but that can // change in the future Map<byte[], byte[]> annotations = span.getKVAnnotations(); for (Entry<byte[], byte[]> annotation : annotations.entrySet()) { Pair<String, String> val = TracingUtils.readAnnotation(annotation.getKey(), annotation.getValue()); builder.add(new MetricsTag(Interns.info(ANNOTATION.traceName, val.getFirst()), val .getSecond())); } // add the span to the list we care about synchronized (this) { spans.add(builder); } }
Example #25
Source File: TraceSpanReceiver.java From phoenix with Apache License 2.0 | 5 votes |
@Override public void receiveSpan(Span span) { if (span.getTraceId() != 0 && spanQueue.offer(span)) { if (LOGGER.isTraceEnabled()) { LOGGER.trace("Span buffered to queue " + span.toJson()); } } else if (span.getTraceId() != 0 && LOGGER.isDebugEnabled()) { LOGGER.debug("Span NOT buffered due to overflow in queue " + span.toJson()); } }
Example #26
Source File: PhoenixTraceReaderIT.java From phoenix with Apache License 2.0 | 5 votes |
/** * Test multiple spans, within the same trace. Some spans are independent of the parent span, * some are child spans * @throws Exception on failure */ @Test public void testMultipleSpans() throws Exception { // hook up a phoenix sink PhoenixMetricsSink sink = new PhoenixMetricsSink(); Connection conn = getConnectionWithoutTracing(); sink.initForTesting(conn); // create a simple metrics record long traceid = 12345; List<MetricsRecord> records = new ArrayList<MetricsRecord>(); MetricsRecord record = createAndFlush(sink, traceid, Span.ROOT_SPAN_ID, 7777, "root", 10, 30, "hostname.value", "root-span tag"); records.add(record); // then create a child record record = createAndFlush(sink, traceid, 7777, 6666, "c1", 11, 15, "hostname.value", "first child"); records.add(record); // create a different child record = createAndFlush(sink, traceid, 7777, 5555, "c2", 11, 18, "hostname.value", "second child"); records.add(record); // create a child of the second child record = createAndFlush(sink, traceid, 5555, 4444, "c3", 12, 16, "hostname.value", "third child"); records.add(record); // flush all the values to the table sink.flush(); // start a reader validateTraces(records, conn, traceid); }
Example #27
Source File: PhoenixTraceReaderIT.java From phoenix with Apache License 2.0 | 5 votes |
/** * @param records * @param trace */ private void validateTrace(List<MetricsRecord> records, TraceHolder trace) { // drop each span into a sorted list so we get the expected ordering Iterator<SpanInfo> spanIter = trace.spans.iterator(); for (MetricsRecord record : records) { SpanInfo spanInfo = spanIter.next(); LOG.info("Checking span:\n" + spanInfo); Iterator<AbstractMetric> metricIter = record.metrics().iterator(); assertEquals("Got an unexpected span id", metricIter.next().value(), spanInfo.id); long parentId = (Long) metricIter.next().value(); if (parentId == Span.ROOT_SPAN_ID) { assertNull("Got a parent, but it was a root span!", spanInfo.parent); } else { assertEquals("Got an unexpected parent span id", parentId, spanInfo.parent.id); } assertEquals("Got an unexpected start time", metricIter.next().value(), spanInfo.start); assertEquals("Got an unexpected end time", metricIter.next().value(), spanInfo.end); Iterator<MetricsTag> tags = record.tags().iterator(); int annotationCount = 0; while (tags.hasNext()) { // hostname is a tag, so we differentiate it MetricsTag tag = tags.next(); if (tag.name().equals(MetricInfo.HOSTNAME.traceName)) { assertEquals("Didn't store correct hostname value", tag.value(), spanInfo.hostname); } else { int count = annotationCount++; assertEquals("Didn't get expected annotation", count + " - " + tag.value(), spanInfo.annotations.get(count)); } } assertEquals("Didn't get expected number of annotations", annotationCount, spanInfo.annotationCount); } }
Example #28
Source File: TraceMetricsSourceTest.java From phoenix with Apache License 2.0 | 5 votes |
@Test public void testIntegerAnnotations(){ Span span = getSpan(); // add annotation through the phoenix interfaces TracingUtils.addAnnotation(span, "message", 10); TraceMetricSource source = new TraceMetricSource(); source.receiveSpan(span); }
Example #29
Source File: PhoenixTransactionalIndexer.java From phoenix with Apache License 2.0 | 5 votes |
@Override public void postBatchMutateIndispensably(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp, final boolean success) throws IOException { BatchMutateContext context = getBatchMutateContext(c); if (context == null || context.indexUpdates == null) { return; } // get the current span, or just use a null-span to avoid a bunch of if statements try (TraceScope scope = Trace.startSpan("Starting to write index updates")) { Span current = scope.getSpan(); if (current == null) { current = NullSpan.INSTANCE; } if (success) { // if miniBatchOp was successfully written, write index updates if (!context.indexUpdates.isEmpty()) { this.writer.write(context.indexUpdates, false, context.clientVersion); } current.addTimelineAnnotation("Wrote index updates"); } } catch (Throwable t) { String msg = "Failed to write index updates:" + context.indexUpdates; LOGGER.error(msg, t); ServerUtil.throwIOException(msg, t); } finally { removeBatchMutateContext(c); } }
Example #30
Source File: Indexer.java From phoenix with Apache License 2.0 | 5 votes |
private void doPostWithExceptions(ObserverContext<RegionCoprocessorEnvironment> c, BatchMutateContext context) throws IOException { //short circuit, if we don't need to do any work if (context == null || context.indexUpdates.isEmpty()) { return; } // get the current span, or just use a null-span to avoid a bunch of if statements try (TraceScope scope = Trace.startSpan("Completing index writes")) { Span current = scope.getSpan(); if (current == null) { current = NullSpan.INSTANCE; } long start = EnvironmentEdgeManager.currentTimeMillis(); current.addTimelineAnnotation("Actually doing index update for first time"); writer.writeAndHandleFailure(context.indexUpdates, false, context.clientVersion); long duration = EnvironmentEdgeManager.currentTimeMillis() - start; if (duration >= slowIndexWriteThreshold) { if (LOGGER.isDebugEnabled()) { LOGGER.debug(getCallTooSlowMessage("indexWrite", duration, slowIndexWriteThreshold)); } metricSource.incrementNumSlowIndexWriteCalls(); } metricSource.updateIndexWriteTime(duration); } }