Java Code Examples for org.apache.htrace.Trace

The following examples show how to use org.apache.htrace.Trace. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: DFSInputStream.java    License: Apache License 2.0 6 votes vote down vote up
private Callable<ByteBuffer> getFromOneDataNode(final DNAddrPair datanode,
    final LocatedBlock block, final long start, final long end,
    final ByteBuffer bb,
    final Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap,
    final int hedgedReadId) {
  final Span parentSpan = Trace.currentSpan();
  return new Callable<ByteBuffer>() {
    @Override
    public ByteBuffer call() throws Exception {
      byte[] buf = bb.array();
      int offset = bb.position();
      TraceScope scope =
          Trace.startSpan("hedgedRead" + hedgedReadId, parentSpan);
      try {
        actualGetFromOneDataNode(datanode, block, start, end, buf, offset,
            corruptedBlockMap);
        return bb;
      } finally {
        scope.close();
      }
    }
  };
}
 
Example 2
Source Project: hadoop   Source File: BlockReaderLocalLegacy.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Reads bytes into a buffer until EOF or the buffer's limit is reached
 */
private int fillBuffer(FileInputStream stream, ByteBuffer buf)
    throws IOException {
  TraceScope scope = Trace.startSpan("BlockReaderLocalLegacy#fillBuffer(" +
      blockId + ")", Sampler.NEVER);
  try {
    int bytesRead = stream.getChannel().read(buf);
    if (bytesRead < 0) {
      //EOF
      return bytesRead;
    }
    while (buf.remaining() > 0) {
      int n = stream.getChannel().read(buf);
      if (n < 0) {
        //EOF
        return bytesRead;
      }
      bytesRead += n;
    }
    return bytesRead;
  } finally {
    scope.close();
  }
}
 
Example 3
Source Project: hadoop   Source File: DFSClient.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Decrypts a EDEK by consulting the KeyProvider.
 */
private KeyVersion decryptEncryptedDataEncryptionKey(FileEncryptionInfo
    feInfo) throws IOException {
  TraceScope scope = Trace.startSpan("decryptEDEK", traceSampler);
  try {
    KeyProvider provider = getKeyProvider();
    if (provider == null) {
      throw new IOException("No KeyProvider is configured, cannot access" +
          " an encrypted file");
    }
    EncryptedKeyVersion ekv = EncryptedKeyVersion.createForDecryption(
        feInfo.getKeyName(), feInfo.getEzKeyVersionName(), feInfo.getIV(),
        feInfo.getEncryptedDataEncryptionKey());
    try {
      KeyProviderCryptoExtension cryptoProvider = KeyProviderCryptoExtension
          .createKeyProviderCryptoExtension(provider);
      return cryptoProvider.decryptEncryptedKey(ekv);
    } catch (GeneralSecurityException e) {
      throw new IOException(e);
    }
  } finally {
    scope.close();
  }
}
 
Example 4
Source Project: phoenix   Source File: IndexRegionObserver.java    License: Apache License 2.0 6 votes vote down vote up
private void doIndexWritesWithExceptions(BatchMutateContext context, boolean post)
          throws IOException {
    ListMultimap<HTableInterfaceReference, Mutation> indexUpdates = post ? context.postIndexUpdates : context.preIndexUpdates;
    //short circuit, if we don't need to do any work

    if (context == null || indexUpdates == null || indexUpdates.isEmpty()) {
        return;
    }

    // get the current span, or just use a null-span to avoid a bunch of if statements
    try (TraceScope scope = Trace.startSpan("Completing " + (post ? "post" : "pre") + " index writes")) {
        Span current = scope.getSpan();
        if (current == null) {
            current = NullSpan.INSTANCE;
        }
        current.addTimelineAnnotation("Actually doing " + (post ? "post" : "pre") + " index update for first time");
        if (post) {
            postWriter.write(indexUpdates, false, context.clientVersion);
        } else {
            preWriter.write(indexUpdates, false, context.clientVersion);
        }
    }
}
 
Example 5
Source Project: accumulo-examples   Source File: TracingExample.java    License: Apache License 2.0 6 votes vote down vote up
private void createEntries(Opts opts) throws TableNotFoundException, AccumuloException {

    // Trace the write operation. Note, unless you flush the BatchWriter, you will not capture
    // the write operation as it is occurs asynchronously. You can optionally create additional
    // Spans
    // within a given Trace as seen below around the flush
    TraceScope scope = Trace.startSpan("Client Write", Sampler.ALWAYS);

    System.out.println("TraceID: " + Long.toHexString(scope.getSpan().getTraceId()));
    try (BatchWriter batchWriter = client.createBatchWriter(opts.getTableName())) {
      Mutation m = new Mutation("row");
      m.put("cf", "cq", "value");

      batchWriter.addMutation(m);
      // You can add timeline annotations to Spans which will be able to be viewed in the Monitor
      scope.getSpan().addTimelineAnnotation("Initiating Flush");
      batchWriter.flush();
    }
    scope.close();
  }
 
Example 6
Source Project: accumulo-examples   Source File: TracingExample.java    License: Apache License 2.0 6 votes vote down vote up
private void readEntries(Opts opts) throws TableNotFoundException {

    Scanner scanner = client.createScanner(opts.getTableName(), opts.auths);

    // Trace the read operation.
    TraceScope readScope = Trace.startSpan("Client Read", Sampler.ALWAYS);
    System.out.println("TraceID: " + Long.toHexString(readScope.getSpan().getTraceId()));

    int numberOfEntriesRead = 0;
    for (Entry<Key,Value> entry : scanner) {
      System.out.println(entry.getKey().toString() + " -> " + entry.getValue().toString());
      ++numberOfEntriesRead;
    }
    // You can add additional metadata (key, values) to Spans which will be able to be viewed in the
    // Monitor
    readScope.getSpan().addKVAnnotation("Number of Entries Read".getBytes(UTF_8),
        String.valueOf(numberOfEntriesRead).getBytes(UTF_8));

    readScope.close();
  }
 
Example 7
Source Project: big-c   Source File: WritableRpcEngine.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Object invoke(Object proxy, Method method, Object[] args)
  throws Throwable {
  long startTime = 0;
  if (LOG.isDebugEnabled()) {
    startTime = Time.now();
  }
  TraceScope traceScope = null;
  if (Trace.isTracing()) {
    traceScope = Trace.startSpan(RpcClientUtil.methodToTraceString(method));
  }
  ObjectWritable value;
  try {
    value = (ObjectWritable)
      client.call(RPC.RpcKind.RPC_WRITABLE, new Invocation(method, args),
        remoteId, fallbackToSimpleAuth);
  } finally {
    if (traceScope != null) traceScope.close();
  }
  if (LOG.isDebugEnabled()) {
    long callTime = Time.now() - startTime;
    LOG.debug("Call: " + method.getName() + " " + callTime);
  }
  return value.get();
}
 
Example 8
Source Project: hadoop   Source File: DFSClient.java    License: Apache License 2.0 6 votes vote down vote up
public void removeDefaultAcl(String src) throws IOException {
  checkOpen();
  TraceScope scope = Trace.startSpan("removeDefaultAcl", traceSampler);
  try {
    namenode.removeDefaultAcl(src);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   AclException.class,
                                   FileNotFoundException.class,
                                   NSQuotaExceededException.class,
                                   SafeModeException.class,
                                   SnapshotAccessControlException.class,
                                   UnresolvedPathException.class);
  } finally {
    scope.close();
  }
}
 
Example 9
Source Project: hadoop   Source File: BlockStorageLocationUtil.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public HdfsBlocksMetadata call() throws Exception {
  HdfsBlocksMetadata metadata = null;
  // Create the RPC proxy and make the RPC
  ClientDatanodeProtocol cdp = null;
  TraceScope scope =
      Trace.startSpan("getHdfsBlocksMetadata", parentSpan);
  try {
    cdp = DFSUtil.createClientDatanodeProtocolProxy(datanode, configuration,
        timeout, connectToDnViaHostname);
    metadata = cdp.getHdfsBlocksMetadata(poolId, blockIds, dnTokens);
  } catch (IOException e) {
    // Bubble this up to the caller, handle with the Future
    throw e;
  } finally {
    scope.close();
    if (cdp != null) {
      RPC.stopProxy(cdp);
    }
  }
  return metadata;
}
 
Example 10
Source Project: hadoop   Source File: ProtoUtil.java    License: Apache License 2.0 6 votes vote down vote up
public static RpcRequestHeaderProto makeRpcRequestHeader(RPC.RpcKind rpcKind,
    RpcRequestHeaderProto.OperationProto operation, int callId,
    int retryCount, byte[] uuid) {
  RpcRequestHeaderProto.Builder result = RpcRequestHeaderProto.newBuilder();
  result.setRpcKind(convert(rpcKind)).setRpcOp(operation).setCallId(callId)
      .setRetryCount(retryCount).setClientId(ByteString.copyFrom(uuid));

  // Add tracing info if we are currently tracing.
  if (Trace.isTracing()) {
    Span s = Trace.currentSpan();
    result.setTraceInfo(RPCTraceInfoProto.newBuilder()
        .setParentId(s.getSpanId())
        .setTraceId(s.getTraceId()).build());
  }

  return result.build();
}
 
Example 11
Source Project: hadoop   Source File: WritableRpcEngine.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Object invoke(Object proxy, Method method, Object[] args)
  throws Throwable {
  long startTime = 0;
  if (LOG.isDebugEnabled()) {
    startTime = Time.now();
  }
  TraceScope traceScope = null;
  if (Trace.isTracing()) {
    traceScope = Trace.startSpan(RpcClientUtil.methodToTraceString(method));
  }
  ObjectWritable value;
  try {
    value = (ObjectWritable)
      client.call(RPC.RpcKind.RPC_WRITABLE, new Invocation(method, args),
        remoteId, fallbackToSimpleAuth);
  } finally {
    if (traceScope != null) traceScope.close();
  }
  if (LOG.isDebugEnabled()) {
    long callTime = Time.now() - startTime;
    LOG.debug("Call: " + method.getName() + " " + callTime);
  }
  return value.get();
}
 
Example 12
Source Project: big-c   Source File: DFSInputStream.java    License: Apache License 2.0 6 votes vote down vote up
private Callable<ByteBuffer> getFromOneDataNode(final DNAddrPair datanode,
    final LocatedBlock block, final long start, final long end,
    final ByteBuffer bb,
    final Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap,
    final int hedgedReadId) {
  final Span parentSpan = Trace.currentSpan();
  return new Callable<ByteBuffer>() {
    @Override
    public ByteBuffer call() throws Exception {
      byte[] buf = bb.array();
      int offset = bb.position();
      TraceScope scope =
          Trace.startSpan("hedgedRead" + hedgedReadId, parentSpan);
      try {
        actualGetFromOneDataNode(datanode, block, start, end, buf, offset,
            corruptedBlockMap);
        return bb;
      } finally {
        scope.close();
      }
    }
  };
}
 
Example 13
Source Project: big-c   Source File: DFSClient.java    License: Apache License 2.0 6 votes vote down vote up
public void removeAclEntries(String src, List<AclEntry> aclSpec)
    throws IOException {
  checkOpen();
  TraceScope scope = Trace.startSpan("removeAclEntries", traceSampler);
  try {
    namenode.removeAclEntries(src, aclSpec);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   AclException.class,
                                   FileNotFoundException.class,
                                   NSQuotaExceededException.class,
                                   SafeModeException.class,
                                   SnapshotAccessControlException.class,
                                   UnresolvedPathException.class);
  } finally {
    scope.close();
  }
}
 
Example 14
Source Project: big-c   Source File: BlockReaderLocalLegacy.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Reads bytes into a buffer until EOF or the buffer's limit is reached
 */
private int fillBuffer(FileInputStream stream, ByteBuffer buf)
    throws IOException {
  TraceScope scope = Trace.startSpan("BlockReaderLocalLegacy#fillBuffer(" +
      blockId + ")", Sampler.NEVER);
  try {
    int bytesRead = stream.getChannel().read(buf);
    if (bytesRead < 0) {
      //EOF
      return bytesRead;
    }
    while (buf.remaining() > 0) {
      int n = stream.getChannel().read(buf);
      if (n < 0) {
        //EOF
        return bytesRead;
      }
      bytesRead += n;
    }
    return bytesRead;
  } finally {
    scope.close();
  }
}
 
Example 15
Source Project: big-c   Source File: DFSClient.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Decrypts a EDEK by consulting the KeyProvider.
 */
private KeyVersion decryptEncryptedDataEncryptionKey(FileEncryptionInfo
    feInfo) throws IOException {
  TraceScope scope = Trace.startSpan("decryptEDEK", traceSampler);
  try {
    KeyProvider provider = getKeyProvider();
    if (provider == null) {
      throw new IOException("No KeyProvider is configured, cannot access" +
          " an encrypted file");
    }
    EncryptedKeyVersion ekv = EncryptedKeyVersion.createForDecryption(
        feInfo.getKeyName(), feInfo.getEzKeyVersionName(), feInfo.getIV(),
        feInfo.getEncryptedDataEncryptionKey());
    try {
      KeyProviderCryptoExtension cryptoProvider = KeyProviderCryptoExtension
          .createKeyProviderCryptoExtension(provider);
      return cryptoProvider.decryptEncryptedKey(ekv);
    } catch (GeneralSecurityException e) {
      throw new IOException(e);
    }
  } finally {
    scope.close();
  }
}
 
Example 16
Source Project: hadoop   Source File: DFSOutputStream.java    License: Apache License 2.0 5 votes vote down vote up
private void queueCurrentPacket() {
  synchronized (dataQueue) {
    if (currentPacket == null) return;
    currentPacket.addTraceParent(Trace.currentSpan());
    dataQueue.addLast(currentPacket);
    lastQueuedSeqno = currentPacket.getSeqno();
    if (DFSClient.LOG.isDebugEnabled()) {
      DFSClient.LOG.debug("Queued packet " + currentPacket.getSeqno());
    }
    currentPacket = null;
    dataQueue.notifyAll();
  }
}
 
Example 17
Source Project: hadoop   Source File: DFSOutputStream.java    License: Apache License 2.0 5 votes vote down vote up
private void waitForAckedSeqno(long seqno) throws IOException {
  TraceScope scope = Trace.startSpan("waitForAckedSeqno", Sampler.NEVER);
  try {
    if (DFSClient.LOG.isDebugEnabled()) {
      DFSClient.LOG.debug("Waiting for ack for: " + seqno);
    }
    long begin = Time.monotonicNow();
    try {
      synchronized (dataQueue) {
        while (!isClosed()) {
          checkClosed();
          if (lastAckedSeqno >= seqno) {
            break;
          }
          try {
            dataQueue.wait(1000); // when we receive an ack, we notify on
            // dataQueue
          } catch (InterruptedException ie) {
            throw new InterruptedIOException(
                "Interrupted while waiting for data to be acknowledged by pipeline");
          }
        }
      }
      checkClosed();
    } catch (ClosedChannelException e) {
    }
    long duration = Time.monotonicNow() - begin;
    if (duration > dfsclientSlowLogThresholdMs) {
      DFSClient.LOG.warn("Slow waitForAckedSeqno took " + duration
          + "ms (threshold=" + dfsclientSlowLogThresholdMs + "ms)");
    }
  } finally {
    scope.close();
  }
}
 
Example 18
Source Project: big-c   Source File: TestTracing.java    License: Apache License 2.0 5 votes vote down vote up
public void readWithTracing() throws Exception {
  String fileName = "testReadTraceHooks.dat";
  writeTestFile(fileName);
  long startTime = System.currentTimeMillis();
  TraceScope ts = Trace.startSpan("testReadTraceHooks", Sampler.ALWAYS);
  readTestFile(fileName);
  ts.close();
  long endTime = System.currentTimeMillis();

  String[] expectedSpanNames = {
    "testReadTraceHooks",
    "org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations",
    "ClientNamenodeProtocol#getBlockLocations",
    "OpReadBlockProto"
  };
  assertSpanNamesFound(expectedSpanNames);

  // The trace should last about the same amount of time as the test
  Map<String, List<Span>> map = SetSpanReceiver.SetHolder.getMap();
  Span s = map.get("testReadTraceHooks").get(0);
  Assert.assertNotNull(s);

  long spanStart = s.getStartTimeMillis();
  long spanEnd = s.getStopTimeMillis();
  Assert.assertTrue(spanStart - startTime < 100);
  Assert.assertTrue(spanEnd - endTime < 100);

  // There should only be one trace id as it should all be homed in the
  // top trace.
  for (Span span : SetSpanReceiver.SetHolder.spans.values()) {
    Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
  }
  SetSpanReceiver.SetHolder.spans.clear();
}
 
Example 19
Source Project: hadoop   Source File: RemoteBlockReader2.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public synchronized int read(byte[] buf, int off, int len) 
                             throws IOException {

  UUID randomId = null;
  if (LOG.isTraceEnabled()) {
    randomId = UUID.randomUUID();
    LOG.trace(String.format("Starting read #%s file %s from datanode %s",
      randomId.toString(), this.filename,
      this.datanodeID.getHostName()));
  }

  if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
    TraceScope scope = Trace.startSpan(
        "RemoteBlockReader2#readNextPacket(" + blockId + ")", Sampler.NEVER);
    try {
      readNextPacket();
    } finally {
      scope.close();
    }
  }

  if (LOG.isTraceEnabled()) {
    LOG.trace(String.format("Finishing read #" + randomId));
  }

  if (curDataSlice.remaining() == 0) {
    // we're at EOF now
    return -1;
  }
  
  int nRead = Math.min(curDataSlice.remaining(), len);
  curDataSlice.get(buf, off, nRead);
  
  return nRead;
}
 
Example 20
Source Project: big-c   Source File: DFSClient.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Create one snapshot.
 * 
 * @param snapshotRoot The directory where the snapshot is to be taken
 * @param snapshotName Name of the snapshot
 * @return the snapshot path.
 * @see ClientProtocol#createSnapshot(String, String)
 */
public String createSnapshot(String snapshotRoot, String snapshotName)
    throws IOException {
  checkOpen();
  TraceScope scope = Trace.startSpan("createSnapshot", traceSampler);
  try {
    return namenode.createSnapshot(snapshotRoot, snapshotName);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException();
  } finally {
    scope.close();
  }
}
 
Example 21
Source Project: hadoop   Source File: Sender.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void releaseShortCircuitFds(SlotId slotId) throws IOException {
  ReleaseShortCircuitAccessRequestProto.Builder builder =
      ReleaseShortCircuitAccessRequestProto.newBuilder().
      setSlotId(PBHelper.convert(slotId));
  if (Trace.isTracing()) {
    Span s = Trace.currentSpan();
    builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()
        .setTraceId(s.getTraceId()).setParentId(s.getSpanId()));
  }
  ReleaseShortCircuitAccessRequestProto proto = builder.build();
  send(out, Op.RELEASE_SHORT_CIRCUIT_FDS, proto);
}
 
Example 22
Source Project: hadoop   Source File: Sender.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void requestShortCircuitShm(String clientName) throws IOException {
  ShortCircuitShmRequestProto.Builder builder =
      ShortCircuitShmRequestProto.newBuilder().
      setClientName(clientName);
  if (Trace.isTracing()) {
    Span s = Trace.currentSpan();
    builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()
        .setTraceId(s.getTraceId()).setParentId(s.getSpanId()));
  }
  ShortCircuitShmRequestProto proto = builder.build();
  send(out, Op.REQUEST_SHORT_CIRCUIT_SHM, proto);
}
 
Example 23
Source Project: hadoop   Source File: DataTransferProtoUtil.java    License: Apache License 2.0 5 votes vote down vote up
static BaseHeaderProto buildBaseHeader(ExtendedBlock blk,
    Token<BlockTokenIdentifier> blockToken) {
  BaseHeaderProto.Builder builder =  BaseHeaderProto.newBuilder()
    .setBlock(PBHelper.convert(blk))
    .setToken(PBHelper.convert(blockToken));
  if (Trace.isTracing()) {
    Span s = Trace.currentSpan();
    builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()
        .setTraceId(s.getTraceId())
        .setParentId(s.getSpanId()));
  }
  return builder.build();
}
 
Example 24
Source Project: pentaho-hadoop-shims   Source File: HadoopShim.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Class[] getHbaseDependencyClasses() {
  return new Class[] {
    HConstants.class, ClientProtos.class, Put.class, CompatibilityFactory.class, TableMapper.class,
    ZooKeeper.class, Channel.class, Message.class, Lists.class, Trace.class, MetricsRegistry.class
  };
}
 
Example 25
Source Project: big-c   Source File: DFSClient.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Get all the current snapshottable directories.
 * @return All the current snapshottable directories
 * @throws IOException
 * @see ClientProtocol#getSnapshottableDirListing()
 */
public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
    throws IOException {
  checkOpen();
  TraceScope scope = Trace.startSpan("getSnapshottableDirListing",
      traceSampler);
  try {
    return namenode.getSnapshottableDirListing();
  } catch(RemoteException re) {
    throw re.unwrapRemoteException();
  } finally {
    scope.close();
  }
}
 
Example 26
Source Project: hadoop   Source File: CachePoolIterator.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public BatchedEntries<CachePoolEntry> makeRequest(String prevKey)
    throws IOException {
  TraceScope scope = Trace.startSpan("listCachePools", traceSampler);
  try {
    return namenode.listCachePools(prevKey);
  } finally {
    scope.close();
  }
}
 
Example 27
Source Project: big-c   Source File: DFSClient.java    License: Apache License 2.0 5 votes vote down vote up
public void removeCacheDirective(long id)
    throws IOException {
  checkOpen();
  TraceScope scope = Trace.startSpan("removeCacheDirective", traceSampler);
  try {
    namenode.removeCacheDirective(id);
  } catch (RemoteException re) {
    throw re.unwrapRemoteException();
  } finally {
    scope.close();
  }
}
 
Example 28
Source Project: big-c   Source File: DFSClient.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Allow snapshot on a directory.
 * 
 * @see ClientProtocol#allowSnapshot(String snapshotRoot)
 */
public void allowSnapshot(String snapshotRoot) throws IOException {
  checkOpen();
  TraceScope scope = Trace.startSpan("allowSnapshot", traceSampler);
  try {
    namenode.allowSnapshot(snapshotRoot);
  } catch (RemoteException re) {
    throw re.unwrapRemoteException();
  } finally {
    scope.close();
  }
}
 
Example 29
Source Project: hadoop   Source File: DFSClient.java    License: Apache License 2.0 5 votes vote down vote up
private long[] callGetStats() throws IOException {
  checkOpen();
  TraceScope scope = Trace.startSpan("getStats", traceSampler);
  try {
    return namenode.getStats();
  } finally {
    scope.close();
  }
}
 
Example 30
Source Project: big-c   Source File: DFSClient.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Requests the namenode to tell all datanodes to use a new, non-persistent
 * bandwidth value for dfs.balance.bandwidthPerSec.
 * See {@link ClientProtocol#setBalancerBandwidth(long)} 
 * for more details.
 * 
 * @see ClientProtocol#setBalancerBandwidth(long)
 */
public void setBalancerBandwidth(long bandwidth) throws IOException {
  TraceScope scope = Trace.startSpan("setBalancerBandwidth", traceSampler);
  try {
    namenode.setBalancerBandwidth(bandwidth);
  } finally {
    scope.close();
  }
}