org.apache.htrace.Sampler Java Examples

The following examples show how to use org.apache.htrace.Sampler. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TracingExample.java    From accumulo-examples with Apache License 2.0 6 votes vote down vote up
private void readEntries(Opts opts) throws TableNotFoundException {

    Scanner scanner = client.createScanner(opts.getTableName(), opts.auths);

    // Trace the read operation.
    TraceScope readScope = Trace.startSpan("Client Read", Sampler.ALWAYS);
    System.out.println("TraceID: " + Long.toHexString(readScope.getSpan().getTraceId()));

    int numberOfEntriesRead = 0;
    for (Entry<Key,Value> entry : scanner) {
      System.out.println(entry.getKey().toString() + " -> " + entry.getValue().toString());
      ++numberOfEntriesRead;
    }
    // You can add additional metadata (key, values) to Spans which will be able to be viewed in the
    // Monitor
    readScope.getSpan().addKVAnnotation("Number of Entries Read".getBytes(UTF_8),
        String.valueOf(numberOfEntriesRead).getBytes(UTF_8));

    readScope.close();
  }
 
Example #2
Source File: TracingExample.java    From accumulo-examples with Apache License 2.0 6 votes vote down vote up
private void createEntries(Opts opts) throws TableNotFoundException, AccumuloException {

    // Trace the write operation. Note, unless you flush the BatchWriter, you will not capture
    // the write operation as it is occurs asynchronously. You can optionally create additional
    // Spans
    // within a given Trace as seen below around the flush
    TraceScope scope = Trace.startSpan("Client Write", Sampler.ALWAYS);

    System.out.println("TraceID: " + Long.toHexString(scope.getSpan().getTraceId()));
    try (BatchWriter batchWriter = client.createBatchWriter(opts.getTableName())) {
      Mutation m = new Mutation("row");
      m.put("cf", "cq", "value");

      batchWriter.addMutation(m);
      // You can add timeline annotations to Spans which will be able to be viewed in the Monitor
      scope.getSpan().addTimelineAnnotation("Initiating Flush");
      batchWriter.flush();
    }
    scope.close();
  }
 
Example #3
Source File: RemoteBlockReader2.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public int read(ByteBuffer buf) throws IOException {
  if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
    TraceScope scope = Trace.startSpan(
        "RemoteBlockReader2#readNextPacket(" + blockId + ")", Sampler.NEVER);
    try {
      readNextPacket();
    } finally {
      scope.close();
    }
  }
  if (curDataSlice.remaining() == 0) {
    // we're at EOF now
    return -1;
  }

  int nRead = Math.min(curDataSlice.remaining(), buf.remaining());
  ByteBuffer writeSlice = curDataSlice.duplicate();
  writeSlice.limit(writeSlice.position() + nRead);
  buf.put(writeSlice);
  curDataSlice.position(writeSlice.position());

  return nRead;
}
 
Example #4
Source File: BlockReaderLocalLegacy.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Reads bytes into a buffer until EOF or the buffer's limit is reached
 */
private int fillBuffer(FileInputStream stream, ByteBuffer buf)
    throws IOException {
  TraceScope scope = Trace.startSpan("BlockReaderLocalLegacy#fillBuffer(" +
      blockId + ")", Sampler.NEVER);
  try {
    int bytesRead = stream.getChannel().read(buf);
    if (bytesRead < 0) {
      //EOF
      return bytesRead;
    }
    while (buf.remaining() > 0) {
      int n = stream.getChannel().read(buf);
      if (n < 0) {
        //EOF
        return bytesRead;
      }
      bytesRead += n;
    }
    return bytesRead;
  } finally {
    scope.close();
  }
}
 
Example #5
Source File: BlockReaderLocalLegacy.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Reads bytes into a buffer until EOF or the buffer's limit is reached
 */
private int fillBuffer(FileInputStream stream, ByteBuffer buf)
    throws IOException {
  TraceScope scope = Trace.startSpan("BlockReaderLocalLegacy#fillBuffer(" +
      blockId + ")", Sampler.NEVER);
  try {
    int bytesRead = stream.getChannel().read(buf);
    if (bytesRead < 0) {
      //EOF
      return bytesRead;
    }
    while (buf.remaining() > 0) {
      int n = stream.getChannel().read(buf);
      if (n < 0) {
        //EOF
        return bytesRead;
      }
      bytesRead += n;
    }
    return bytesRead;
  } finally {
    scope.close();
  }
}
 
Example #6
Source File: RemoteBlockReader2.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public int read(ByteBuffer buf) throws IOException {
  if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
    TraceScope scope = Trace.startSpan(
        "RemoteBlockReader2#readNextPacket(" + blockId + ")", Sampler.NEVER);
    try {
      readNextPacket();
    } finally {
      scope.close();
    }
  }
  if (curDataSlice.remaining() == 0) {
    // we're at EOF now
    return -1;
  }

  int nRead = Math.min(curDataSlice.remaining(), buf.remaining());
  ByteBuffer writeSlice = curDataSlice.duplicate();
  writeSlice.limit(writeSlice.position() + nRead);
  buf.put(writeSlice);
  curDataSlice.position(writeSlice.position());

  return nRead;
}
 
Example #7
Source File: RemoteBlockReader.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
protected synchronized int readChunk(long pos, byte[] buf, int offset, 
                                     int len, byte[] checksumBuf) 
                                     throws IOException {
  TraceScope scope =
      Trace.startSpan("RemoteBlockReader#readChunk(" + blockId + ")",
          Sampler.NEVER);
  try {
    return readChunkImpl(pos, buf, offset, len, checksumBuf);
  } finally {
    scope.close();
  }
}
 
Example #8
Source File: TestTracing.java    From big-c with Apache License 2.0 5 votes vote down vote up
public void readWithTracing() throws Exception {
  String fileName = "testReadTraceHooks.dat";
  writeTestFile(fileName);
  long startTime = System.currentTimeMillis();
  TraceScope ts = Trace.startSpan("testReadTraceHooks", Sampler.ALWAYS);
  readTestFile(fileName);
  ts.close();
  long endTime = System.currentTimeMillis();

  String[] expectedSpanNames = {
    "testReadTraceHooks",
    "org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations",
    "ClientNamenodeProtocol#getBlockLocations",
    "OpReadBlockProto"
  };
  assertSpanNamesFound(expectedSpanNames);

  // The trace should last about the same amount of time as the test
  Map<String, List<Span>> map = SetSpanReceiver.SetHolder.getMap();
  Span s = map.get("testReadTraceHooks").get(0);
  Assert.assertNotNull(s);

  long spanStart = s.getStartTimeMillis();
  long spanEnd = s.getStopTimeMillis();
  Assert.assertTrue(spanStart - startTime < 100);
  Assert.assertTrue(spanEnd - endTime < 100);

  // There should only be one trace id as it should all be homed in the
  // top trace.
  for (Span span : SetSpanReceiver.SetHolder.spans.values()) {
    Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
  }
  SetSpanReceiver.SetHolder.spans.clear();
}
 
Example #9
Source File: TestTracingShortCircuitLocalRead.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testShortCircuitTraceHooks() throws IOException {
  assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS);
  conf = new Configuration();
  conf.set(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX +
      SpanReceiverHost.SPAN_RECEIVERS_CONF_SUFFIX,
      TestTracing.SetSpanReceiver.class.getName());
  conf.setLong("dfs.blocksize", 100 * 1024);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, false);
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      "testShortCircuitTraceHooks._PORT");
  conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "CRC32C");
  cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(1)
      .build();
  dfs = cluster.getFileSystem();

  try {
    DFSTestUtil.createFile(dfs, TEST_PATH, TEST_LENGTH, (short)1, 5678L);

    TraceScope ts = Trace.startSpan("testShortCircuitTraceHooks", Sampler.ALWAYS);
    FSDataInputStream stream = dfs.open(TEST_PATH);
    byte buf[] = new byte[TEST_LENGTH];
    IOUtils.readFully(stream, buf, 0, TEST_LENGTH);
    stream.close();
    ts.close();

    String[] expectedSpanNames = {
      "OpRequestShortCircuitAccessProto",
      "ShortCircuitShmRequestProto"
    };
    TestTracing.assertSpanNamesFound(expectedSpanNames);
  } finally {
    dfs.close();
    cluster.shutdown();
  }
}
 
Example #10
Source File: DFSInotifyEventInputStream.java    From big-c with Apache License 2.0 5 votes vote down vote up
DFSInotifyEventInputStream(Sampler traceSampler, ClientProtocol namenode,
      long lastReadTxid) throws IOException {
  this.traceSampler = traceSampler;
  this.namenode = namenode;
  this.it = Iterators.emptyIterator();
  this.lastReadTxid = lastReadTxid;
}
 
Example #11
Source File: DFSOutputStream.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void waitForAckedSeqno(long seqno) throws IOException {
  TraceScope scope = Trace.startSpan("waitForAckedSeqno", Sampler.NEVER);
  try {
    if (DFSClient.LOG.isDebugEnabled()) {
      DFSClient.LOG.debug("Waiting for ack for: " + seqno);
    }
    long begin = Time.monotonicNow();
    try {
      synchronized (dataQueue) {
        while (!isClosed()) {
          checkClosed();
          if (lastAckedSeqno >= seqno) {
            break;
          }
          try {
            dataQueue.wait(1000); // when we receive an ack, we notify on
            // dataQueue
          } catch (InterruptedException ie) {
            throw new InterruptedIOException(
                "Interrupted while waiting for data to be acknowledged by pipeline");
          }
        }
      }
      checkClosed();
    } catch (ClosedChannelException e) {
    }
    long duration = Time.monotonicNow() - begin;
    if (duration > dfsclientSlowLogThresholdMs) {
      DFSClient.LOG.warn("Slow waitForAckedSeqno took " + duration
          + "ms (threshold=" + dfsclientSlowLogThresholdMs + "ms)");
    }
  } finally {
    scope.close();
  }
}
 
Example #12
Source File: CacheDirectiveIterator.java    From big-c with Apache License 2.0 5 votes vote down vote up
public CacheDirectiveIterator(ClientProtocol namenode,
    CacheDirectiveInfo filter, Sampler<?> traceSampler) {
  super(0L);
  this.namenode = namenode;
  this.filter = filter;
  this.traceSampler = traceSampler;
}
 
Example #13
Source File: RemoteBlockReader2.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public synchronized int read(byte[] buf, int off, int len) 
                             throws IOException {

  UUID randomId = null;
  if (LOG.isTraceEnabled()) {
    randomId = UUID.randomUUID();
    LOG.trace(String.format("Starting read #%s file %s from datanode %s",
      randomId.toString(), this.filename,
      this.datanodeID.getHostName()));
  }

  if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
    TraceScope scope = Trace.startSpan(
        "RemoteBlockReader2#readNextPacket(" + blockId + ")", Sampler.NEVER);
    try {
      readNextPacket();
    } finally {
      scope.close();
    }
  }

  if (LOG.isTraceEnabled()) {
    LOG.trace(String.format("Finishing read #" + randomId));
  }

  if (curDataSlice.remaining() == 0) {
    // we're at EOF now
    return -1;
  }
  
  int nRead = Math.min(curDataSlice.remaining(), len);
  curDataSlice.get(buf, off, nRead);
  
  return nRead;
}
 
Example #14
Source File: RemoteBlockReader.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
protected synchronized int readChunk(long pos, byte[] buf, int offset, 
                                     int len, byte[] checksumBuf) 
                                     throws IOException {
  TraceScope scope =
      Trace.startSpan("RemoteBlockReader#readChunk(" + blockId + ")",
          Sampler.NEVER);
  try {
    return readChunkImpl(pos, buf, offset, len, checksumBuf);
  } finally {
    scope.close();
  }
}
 
Example #15
Source File: DFSOutputStream.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void waitForAckedSeqno(long seqno) throws IOException {
  TraceScope scope = Trace.startSpan("waitForAckedSeqno", Sampler.NEVER);
  try {
    if (DFSClient.LOG.isDebugEnabled()) {
      DFSClient.LOG.debug("Waiting for ack for: " + seqno);
    }
    long begin = Time.monotonicNow();
    try {
      synchronized (dataQueue) {
        while (!isClosed()) {
          checkClosed();
          if (lastAckedSeqno >= seqno) {
            break;
          }
          try {
            dataQueue.wait(1000); // when we receive an ack, we notify on
            // dataQueue
          } catch (InterruptedException ie) {
            throw new InterruptedIOException(
                "Interrupted while waiting for data to be acknowledged by pipeline");
          }
        }
      }
      checkClosed();
    } catch (ClosedChannelException e) {
    }
    long duration = Time.monotonicNow() - begin;
    if (duration > dfsclientSlowLogThresholdMs) {
      DFSClient.LOG.warn("Slow waitForAckedSeqno took " + duration
          + "ms (threshold=" + dfsclientSlowLogThresholdMs + "ms)");
    }
  } finally {
    scope.close();
  }
}
 
Example #16
Source File: TestTracing.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public void readWithTracing() throws Exception {
  String fileName = "testReadTraceHooks.dat";
  writeTestFile(fileName);
  long startTime = System.currentTimeMillis();
  TraceScope ts = Trace.startSpan("testReadTraceHooks", Sampler.ALWAYS);
  readTestFile(fileName);
  ts.close();
  long endTime = System.currentTimeMillis();

  String[] expectedSpanNames = {
    "testReadTraceHooks",
    "org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations",
    "ClientNamenodeProtocol#getBlockLocations",
    "OpReadBlockProto"
  };
  assertSpanNamesFound(expectedSpanNames);

  // The trace should last about the same amount of time as the test
  Map<String, List<Span>> map = SetSpanReceiver.SetHolder.getMap();
  Span s = map.get("testReadTraceHooks").get(0);
  Assert.assertNotNull(s);

  long spanStart = s.getStartTimeMillis();
  long spanEnd = s.getStopTimeMillis();
  Assert.assertTrue(spanStart - startTime < 100);
  Assert.assertTrue(spanEnd - endTime < 100);

  // There should only be one trace id as it should all be homed in the
  // top trace.
  for (Span span : SetSpanReceiver.SetHolder.spans.values()) {
    Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
  }
  SetSpanReceiver.SetHolder.spans.clear();
}
 
Example #17
Source File: RemoteBlockReader2.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public synchronized int read(byte[] buf, int off, int len) 
                             throws IOException {

  UUID randomId = null;
  if (LOG.isTraceEnabled()) {
    randomId = UUID.randomUUID();
    LOG.trace(String.format("Starting read #%s file %s from datanode %s",
      randomId.toString(), this.filename,
      this.datanodeID.getHostName()));
  }

  if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
    TraceScope scope = Trace.startSpan(
        "RemoteBlockReader2#readNextPacket(" + blockId + ")", Sampler.NEVER);
    try {
      readNextPacket();
    } finally {
      scope.close();
    }
  }

  if (LOG.isTraceEnabled()) {
    LOG.trace(String.format("Finishing read #" + randomId));
  }

  if (curDataSlice.remaining() == 0) {
    // we're at EOF now
    return -1;
  }
  
  int nRead = Math.min(curDataSlice.remaining(), len);
  curDataSlice.get(buf, off, nRead);
  
  return nRead;
}
 
Example #18
Source File: CacheDirectiveIterator.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public CacheDirectiveIterator(ClientProtocol namenode,
    CacheDirectiveInfo filter, Sampler<?> traceSampler) {
  super(0L);
  this.namenode = namenode;
  this.filter = filter;
  this.traceSampler = traceSampler;
}
 
Example #19
Source File: Tracing.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public static Sampler<?> getConfiguredSampler(TraceStatement traceStatement) {
  double samplingRate = traceStatement.getSamplingRate();
  if (samplingRate >= 1.0) {
      return Sampler.ALWAYS;
  } else if (samplingRate < 1.0 && samplingRate > 0.0) {
      Map<String, String> items = new HashMap<String, String>();
      items.put(ProbabilitySampler.SAMPLER_FRACTION_CONF_KEY, Double.toString(samplingRate));
      return new ProbabilitySampler(HTraceConfiguration.fromMap(items));
  } else {
      return Sampler.NEVER;
  }
}
 
Example #20
Source File: DFSInotifyEventInputStream.java    From hadoop with Apache License 2.0 5 votes vote down vote up
DFSInotifyEventInputStream(Sampler traceSampler, ClientProtocol namenode,
      long lastReadTxid) throws IOException {
  this.traceSampler = traceSampler;
  this.namenode = namenode;
  this.it = Iterators.emptyIterator();
  this.lastReadTxid = lastReadTxid;
}
 
Example #21
Source File: TestTracingShortCircuitLocalRead.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testShortCircuitTraceHooks() throws IOException {
  assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS);
  conf = new Configuration();
  conf.set(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX +
      SpanReceiverHost.SPAN_RECEIVERS_CONF_SUFFIX,
      TestTracing.SetSpanReceiver.class.getName());
  conf.setLong("dfs.blocksize", 100 * 1024);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, false);
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      "testShortCircuitTraceHooks._PORT");
  conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "CRC32C");
  cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(1)
      .build();
  dfs = cluster.getFileSystem();

  try {
    DFSTestUtil.createFile(dfs, TEST_PATH, TEST_LENGTH, (short)1, 5678L);

    TraceScope ts = Trace.startSpan("testShortCircuitTraceHooks", Sampler.ALWAYS);
    FSDataInputStream stream = dfs.open(TEST_PATH);
    byte buf[] = new byte[TEST_LENGTH];
    IOUtils.readFully(stream, buf, 0, TEST_LENGTH);
    stream.close();
    ts.close();

    String[] expectedSpanNames = {
      "OpRequestShortCircuitAccessProto",
      "ShortCircuitShmRequestProto"
    };
    TestTracing.assertSpanNamesFound(expectedSpanNames);
  } finally {
    dfs.close();
    cluster.shutdown();
  }
}
 
Example #22
Source File: Tracing.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public static Sampler<?> getConfiguredSampler(PhoenixConnection connection) {
    String tracelevel = connection.getQueryServices().getProps().get(QueryServices.TRACING_FREQ_ATTRIB, QueryServicesOptions.DEFAULT_TRACING_FREQ);
    return getSampler(tracelevel, new ConfigurationAdapter.ConnectionConfigurationAdapter(
            connection));
}
 
Example #23
Source File: PhoenixConnection.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public Sampler<?> getSampler() {
    return this.sampler;
}
 
Example #24
Source File: TraceQueryPlan.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Override
public Tuple next() throws SQLException {
    if(!first) return null;
    TraceScope traceScope = conn.getTraceScope();
    if (traceStatement.isTraceOn()) {
        conn.setSampler(Tracing.getConfiguredSampler(traceStatement));
        if (conn.getSampler() == Sampler.NEVER) {
            closeTraceScope(conn);
        }
        if (traceScope == null && !conn.getSampler().equals(Sampler.NEVER)) {
            traceScope = Tracing.startNewSpan(conn, "Enabling trace");
            if (traceScope.getSpan() != null) {
                conn.setTraceScope(traceScope);
            } else {
                closeTraceScope(conn);
            }
        }
    } else {
        closeTraceScope(conn);
        conn.setSampler(Sampler.NEVER);
    }
    if (traceScope == null || traceScope.getSpan() == null) return null;
    first = false;
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    ParseNodeFactory factory = new ParseNodeFactory();
    LiteralParseNode literal =
            factory.literal(traceScope.getSpan().getTraceId());
    LiteralExpression expression =
            LiteralExpression.newConstant(literal.getValue(), PLong.INSTANCE,
                Determinism.ALWAYS);
    expression.evaluate(null, ptr);
    byte[] rowKey = ByteUtil.copyKeyBytesIfNecessary(ptr);
    Cell cell =
            PhoenixKeyValueUtil
                    .newKeyValue(rowKey, HConstants.EMPTY_BYTE_ARRAY,
                        HConstants.EMPTY_BYTE_ARRAY,
                        EnvironmentEdgeManager.currentTimeMillis(),
                        HConstants.EMPTY_BYTE_ARRAY);
    List<Cell> cells = new ArrayList<Cell>(1);
    cells.add(cell);
    return new ResultTuple(Result.create(cells));
}
 
Example #25
Source File: Tracing.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public static Sampler<?> getConfiguredSampler(Configuration conf) {
    String tracelevel = conf.get(QueryServices.TRACING_FREQ_ATTRIB, QueryServicesOptions.DEFAULT_TRACING_FREQ);
    return getSampler(tracelevel, new ConfigurationAdapter.HadoopConfigConfigurationAdapter(
            conf));
}
 
Example #26
Source File: Tracing.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private static Sampler<?> getSampler(String traceLevel, ConfigurationAdapter conf) {
    return Frequency.getSampler(traceLevel).builder.apply(conf);
}
 
Example #27
Source File: Tracing.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private static Sampler<?> getSampler(String traceLevel, ConfigurationAdapter conf) {
    return Frequency.getSampler(traceLevel).builder.apply(conf);
}
 
Example #28
Source File: Tracing.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public static Sampler<?> getConfiguredSampler(Configuration conf) {
    String tracelevel = conf.get(QueryServices.TRACING_FREQ_ATTRIB, QueryServicesOptions.DEFAULT_TRACING_FREQ);
    return getSampler(tracelevel, new ConfigurationAdapter.HadoopConfigConfigurationAdapter(
            conf));
}
 
Example #29
Source File: Tracing.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private Frequency(String key, Function<ConfigurationAdapter, Sampler<?>> builder) {
    this.key = key;
    this.builder = builder;
}
 
Example #30
Source File: Tracing.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public static Sampler<?> getConfiguredSampler(PhoenixConnection connection) {
    String tracelevel = connection.getQueryServices().getProps().get(QueryServices.TRACING_FREQ_ATTRIB, QueryServicesOptions.DEFAULT_TRACING_FREQ);
    return getSampler(tracelevel, new ConfigurationAdapter.ConnectionConfigurationAdapter(
            connection));
}