Java Code Examples for org.apache.htrace.Sampler#NEVER

The following examples show how to use org.apache.htrace.Sampler#NEVER . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Tracing.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public static Sampler<?> getConfiguredSampler(TraceStatement traceStatement) {
  double samplingRate = traceStatement.getSamplingRate();
  if (samplingRate >= 1.0) {
      return Sampler.ALWAYS;
  } else if (samplingRate < 1.0 && samplingRate > 0.0) {
      Map<String, String> items = new HashMap<String, String>();
      items.put(ProbabilitySampler.SAMPLER_FRACTION_CONF_KEY, Double.toString(samplingRate));
      return new ProbabilitySampler(HTraceConfiguration.fromMap(items));
  } else {
      return Sampler.NEVER;
  }
}
 
Example 2
Source File: TestCacheDirectives.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test(timeout=120000)
public void testWaitForCachedReplicas() throws Exception {
  FileSystemTestHelper helper = new FileSystemTestHelper();
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      return ((namenode.getNamesystem().getCacheCapacity() ==
          (NUM_DATANODES * CACHE_CAPACITY)) &&
            (namenode.getNamesystem().getCacheUsed() == 0));
    }
  }, 500, 60000);

  // Send a cache report referring to a bogus block.  It is important that
  // the NameNode be robust against this.
  NamenodeProtocols nnRpc = namenode.getRpcServer();
  DataNode dn0 = cluster.getDataNodes().get(0);
  String bpid = cluster.getNamesystem().getBlockPoolId();
  LinkedList<Long> bogusBlockIds = new LinkedList<Long> ();
  bogusBlockIds.add(999999L);
  nnRpc.cacheReport(dn0.getDNRegistrationForBP(bpid), bpid, bogusBlockIds);

  Path rootDir = helper.getDefaultWorkingDirectory(dfs);
  // Create the pool
  final String pool = "friendlyPool";
  nnRpc.addCachePool(new CachePoolInfo("friendlyPool"));
  // Create some test files
  final int numFiles = 2;
  final int numBlocksPerFile = 2;
  final List<String> paths = new ArrayList<String>(numFiles);
  for (int i=0; i<numFiles; i++) {
    Path p = new Path(rootDir, "testCachePaths-" + i);
    FileSystemTestHelper.createFile(dfs, p, numBlocksPerFile,
        (int)BLOCK_SIZE);
    paths.add(p.toUri().getPath());
  }
  // Check the initial statistics at the namenode
  waitForCachedBlocks(namenode, 0, 0, "testWaitForCachedReplicas:0");
  // Cache and check each path in sequence
  int expected = 0;
  for (int i=0; i<numFiles; i++) {
    CacheDirectiveInfo directive =
        new CacheDirectiveInfo.Builder().
          setPath(new Path(paths.get(i))).
          setPool(pool).
          build();
    nnRpc.addCacheDirective(directive, EnumSet.noneOf(CacheFlag.class));
    expected += numBlocksPerFile;
    waitForCachedBlocks(namenode, expected, expected,
        "testWaitForCachedReplicas:1");
  }

  // Check that the datanodes have the right cache values
  DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
  assertEquals("Unexpected number of live nodes", NUM_DATANODES, live.length);
  long totalUsed = 0;
  for (DatanodeInfo dn : live) {
    final long cacheCapacity = dn.getCacheCapacity();
    final long cacheUsed = dn.getCacheUsed();
    final long cacheRemaining = dn.getCacheRemaining();
    assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
    assertEquals("Capacity not equal to used + remaining",
        cacheCapacity, cacheUsed + cacheRemaining);
    assertEquals("Remaining not equal to capacity - used",
        cacheCapacity - cacheUsed, cacheRemaining);
    totalUsed += cacheUsed;
  }
  assertEquals(expected*BLOCK_SIZE, totalUsed);

  // Uncache and check each path in sequence
  RemoteIterator<CacheDirectiveEntry> entries =
    new CacheDirectiveIterator(nnRpc, null, Sampler.NEVER);
  for (int i=0; i<numFiles; i++) {
    CacheDirectiveEntry entry = entries.next();
    nnRpc.removeCacheDirective(entry.getInfo().getId());
    expected -= numBlocksPerFile;
    waitForCachedBlocks(namenode, expected, expected,
        "testWaitForCachedReplicas:2");
  }
}
 
Example 3
Source File: TestCacheDirectives.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test(timeout=120000)
public void testWaitForCachedReplicas() throws Exception {
  FileSystemTestHelper helper = new FileSystemTestHelper();
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      return ((namenode.getNamesystem().getCacheCapacity() ==
          (NUM_DATANODES * CACHE_CAPACITY)) &&
            (namenode.getNamesystem().getCacheUsed() == 0));
    }
  }, 500, 60000);

  // Send a cache report referring to a bogus block.  It is important that
  // the NameNode be robust against this.
  NamenodeProtocols nnRpc = namenode.getRpcServer();
  DataNode dn0 = cluster.getDataNodes().get(0);
  String bpid = cluster.getNamesystem().getBlockPoolId();
  LinkedList<Long> bogusBlockIds = new LinkedList<Long> ();
  bogusBlockIds.add(999999L);
  nnRpc.cacheReport(dn0.getDNRegistrationForBP(bpid), bpid, bogusBlockIds);

  Path rootDir = helper.getDefaultWorkingDirectory(dfs);
  // Create the pool
  final String pool = "friendlyPool";
  nnRpc.addCachePool(new CachePoolInfo("friendlyPool"));
  // Create some test files
  final int numFiles = 2;
  final int numBlocksPerFile = 2;
  final List<String> paths = new ArrayList<String>(numFiles);
  for (int i=0; i<numFiles; i++) {
    Path p = new Path(rootDir, "testCachePaths-" + i);
    FileSystemTestHelper.createFile(dfs, p, numBlocksPerFile,
        (int)BLOCK_SIZE);
    paths.add(p.toUri().getPath());
  }
  // Check the initial statistics at the namenode
  waitForCachedBlocks(namenode, 0, 0, "testWaitForCachedReplicas:0");
  // Cache and check each path in sequence
  int expected = 0;
  for (int i=0; i<numFiles; i++) {
    CacheDirectiveInfo directive =
        new CacheDirectiveInfo.Builder().
          setPath(new Path(paths.get(i))).
          setPool(pool).
          build();
    nnRpc.addCacheDirective(directive, EnumSet.noneOf(CacheFlag.class));
    expected += numBlocksPerFile;
    waitForCachedBlocks(namenode, expected, expected,
        "testWaitForCachedReplicas:1");
  }

  // Check that the datanodes have the right cache values
  DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
  assertEquals("Unexpected number of live nodes", NUM_DATANODES, live.length);
  long totalUsed = 0;
  for (DatanodeInfo dn : live) {
    final long cacheCapacity = dn.getCacheCapacity();
    final long cacheUsed = dn.getCacheUsed();
    final long cacheRemaining = dn.getCacheRemaining();
    assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
    assertEquals("Capacity not equal to used + remaining",
        cacheCapacity, cacheUsed + cacheRemaining);
    assertEquals("Remaining not equal to capacity - used",
        cacheCapacity - cacheUsed, cacheRemaining);
    totalUsed += cacheUsed;
  }
  assertEquals(expected*BLOCK_SIZE, totalUsed);

  // Uncache and check each path in sequence
  RemoteIterator<CacheDirectiveEntry> entries =
    new CacheDirectiveIterator(nnRpc, null, Sampler.NEVER);
  for (int i=0; i<numFiles; i++) {
    CacheDirectiveEntry entry = entries.next();
    nnRpc.removeCacheDirective(entry.getInfo().getId());
    expected -= numBlocksPerFile;
    waitForCachedBlocks(namenode, expected, expected,
        "testWaitForCachedReplicas:2");
  }
}
 
Example 4
Source File: TraceQueryPlan.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Override
public Tuple next() throws SQLException {
    if(!first) return null;
    TraceScope traceScope = conn.getTraceScope();
    if (traceStatement.isTraceOn()) {
        conn.setSampler(Tracing.getConfiguredSampler(traceStatement));
        if (conn.getSampler() == Sampler.NEVER) {
            closeTraceScope(conn);
        }
        if (traceScope == null && !conn.getSampler().equals(Sampler.NEVER)) {
            traceScope = Tracing.startNewSpan(conn, "Enabling trace");
            if (traceScope.getSpan() != null) {
                conn.setTraceScope(traceScope);
            } else {
                closeTraceScope(conn);
            }
        }
    } else {
        closeTraceScope(conn);
        conn.setSampler(Sampler.NEVER);
    }
    if (traceScope == null || traceScope.getSpan() == null) return null;
    first = false;
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    ParseNodeFactory factory = new ParseNodeFactory();
    LiteralParseNode literal =
            factory.literal(traceScope.getSpan().getTraceId());
    LiteralExpression expression =
            LiteralExpression.newConstant(literal.getValue(), PLong.INSTANCE,
                Determinism.ALWAYS);
    expression.evaluate(null, ptr);
    byte[] rowKey = ByteUtil.copyKeyBytesIfNecessary(ptr);
    Cell cell =
            PhoenixKeyValueUtil
                    .newKeyValue(rowKey, HConstants.EMPTY_BYTE_ARRAY,
                        HConstants.EMPTY_BYTE_ARRAY,
                        EnvironmentEdgeManager.currentTimeMillis(),
                        HConstants.EMPTY_BYTE_ARRAY);
    List<Cell> cells = new ArrayList<Cell>(1);
    cells.add(cell);
    return new ResultTuple(Result.create(cells));
}