Java Code Examples for org.apache.hadoop.hdfs.protocol.ExtendedBlock#setNumBytes()

The following examples show how to use org.apache.hadoop.hdfs.protocol.ExtendedBlock#setNumBytes() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DataNode.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Update replica with the new generation stamp and length.  
 */
@Override // InterDatanodeProtocol
public String updateReplicaUnderRecovery(final ExtendedBlock oldBlock,
    final long recoveryId, final long newBlockId, final long newLength)
    throws IOException {
  final String storageID = data.updateReplicaUnderRecovery(oldBlock,
      recoveryId, newBlockId, newLength);
  // Notify the namenode of the updated block info. This is important
  // for HA, since otherwise the standby node may lose track of the
  // block locations until the next block report.
  ExtendedBlock newBlock = new ExtendedBlock(oldBlock);
  newBlock.setGenerationStamp(recoveryId);
  newBlock.setBlockId(newBlockId);
  newBlock.setNumBytes(newLength);
  notifyNamenodeReceivedBlock(newBlock, "", storageID);
  return storageID;
}
 
Example 2
Source File: DataNode.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Update replica with the new generation stamp and length.  
 */
@Override // InterDatanodeProtocol
public String updateReplicaUnderRecovery(final ExtendedBlock oldBlock,
    final long recoveryId, final long newBlockId, final long newLength)
    throws IOException {
  final String storageID = data.updateReplicaUnderRecovery(oldBlock,
      recoveryId, newBlockId, newLength);
  // Notify the namenode of the updated block info. This is important
  // for HA, since otherwise the standby node may lose track of the
  // block locations until the next block report.
  ExtendedBlock newBlock = new ExtendedBlock(oldBlock);
  newBlock.setGenerationStamp(recoveryId);
  newBlock.setBlockId(newBlockId);
  newBlock.setNumBytes(newLength);
  notifyNamenodeReceivedBlock(newBlock, "", storageID);
  return storageID;
}
 
Example 3
Source File: DataNode.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Transfer a replica to the datanode targets.
 * @param b the block to transfer.
 *          The corresponding replica must be an RBW or a Finalized.
 *          Its GS and numBytes will be set to
 *          the stored GS and the visible length. 
 * @param targets targets to transfer the block to
 * @param client client name
 */
void transferReplicaForPipelineRecovery(final ExtendedBlock b,
    final DatanodeInfo[] targets, final StorageType[] targetStorageTypes,
    final String client) throws IOException {
  final long storedGS;
  final long visible;
  final BlockConstructionStage stage;

  //get replica information
  synchronized(data) {
    Block storedBlock = data.getStoredBlock(b.getBlockPoolId(),
        b.getBlockId());
    if (null == storedBlock) {
      throw new IOException(b + " not found in datanode.");
    }
    storedGS = storedBlock.getGenerationStamp();
    if (storedGS < b.getGenerationStamp()) {
      throw new IOException(storedGS
          + " = storedGS < b.getGenerationStamp(), b=" + b);
    }
    // Update the genstamp with storedGS
    b.setGenerationStamp(storedGS);
    if (data.isValidRbw(b)) {
      stage = BlockConstructionStage.TRANSFER_RBW;
    } else if (data.isValidBlock(b)) {
      stage = BlockConstructionStage.TRANSFER_FINALIZED;
    } else {
      final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId());
      throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r);
    }
    visible = data.getReplicaVisibleLength(b);
  }
  //set visible length
  b.setNumBytes(visible);

  if (targets.length > 0) {
    new DataTransfer(targets, targetStorageTypes, b, stage, client).run();
  }
}
 
Example 4
Source File: TestSimulatedFSDataset.java    From hadoop with Apache License 2.0 5 votes vote down vote up
int addSomeBlocks(SimulatedFSDataset fsdataset, int startingBlockId)
    throws IOException {
  int bytesAdded = 0;
  for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
    ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0); 
    // we pass expected len as zero, - fsdataset should use the sizeof actual
    // data written
    ReplicaInPipelineInterface bInfo = fsdataset.createRbw(
        StorageType.DEFAULT, b, false).getReplica();
    ReplicaOutputStreams out = bInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    try {
      OutputStream dataOut  = out.getDataOut();
      assertEquals(0, fsdataset.getLength(b));
      for (int j=1; j <= blockIdToLen(i); ++j) {
        dataOut.write(j);
        assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
        bytesAdded++;
      }
    } finally {
      out.close();
    }
    b.setNumBytes(blockIdToLen(i));
    fsdataset.finalizeBlock(b);
    assertEquals(blockIdToLen(i), fsdataset.getLength(b));
  }
  return bytesAdded;  
}
 
Example 5
Source File: DataNode.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Transfer a replica to the datanode targets.
 * @param b the block to transfer.
 *          The corresponding replica must be an RBW or a Finalized.
 *          Its GS and numBytes will be set to
 *          the stored GS and the visible length. 
 * @param targets targets to transfer the block to
 * @param client client name
 */
void transferReplicaForPipelineRecovery(final ExtendedBlock b,
    final DatanodeInfo[] targets, final StorageType[] targetStorageTypes,
    final String client) throws IOException {
  final long storedGS;
  final long visible;
  final BlockConstructionStage stage;

  //get replica information
  synchronized(data) {
    Block storedBlock = data.getStoredBlock(b.getBlockPoolId(),
        b.getBlockId());
    if (null == storedBlock) {
      throw new IOException(b + " not found in datanode.");
    }
    storedGS = storedBlock.getGenerationStamp();
    if (storedGS < b.getGenerationStamp()) {
      throw new IOException(storedGS
          + " = storedGS < b.getGenerationStamp(), b=" + b);
    }
    // Update the genstamp with storedGS
    b.setGenerationStamp(storedGS);
    if (data.isValidRbw(b)) {
      stage = BlockConstructionStage.TRANSFER_RBW;
    } else if (data.isValidBlock(b)) {
      stage = BlockConstructionStage.TRANSFER_FINALIZED;
    } else {
      final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId());
      throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r);
    }
    visible = data.getReplicaVisibleLength(b);
  }
  //set visible length
  b.setNumBytes(visible);

  if (targets.length > 0) {
    new DataTransfer(targets, targetStorageTypes, b, stage, client).run();
  }
}
 
Example 6
Source File: TestSimulatedFSDataset.java    From big-c with Apache License 2.0 5 votes vote down vote up
int addSomeBlocks(SimulatedFSDataset fsdataset, int startingBlockId)
    throws IOException {
  int bytesAdded = 0;
  for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
    ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0); 
    // we pass expected len as zero, - fsdataset should use the sizeof actual
    // data written
    ReplicaInPipelineInterface bInfo = fsdataset.createRbw(
        StorageType.DEFAULT, b, false).getReplica();
    ReplicaOutputStreams out = bInfo.createStreams(true,
        DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
    try {
      OutputStream dataOut  = out.getDataOut();
      assertEquals(0, fsdataset.getLength(b));
      for (int j=1; j <= blockIdToLen(i); ++j) {
        dataOut.write(j);
        assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
        bytesAdded++;
      }
    } finally {
      out.close();
    }
    b.setNumBytes(blockIdToLen(i));
    fsdataset.finalizeBlock(b);
    assertEquals(blockIdToLen(i), fsdataset.getLength(b));
  }
  return bytesAdded;  
}
 
Example 7
Source File: FanOutOneBlockAsyncDFSOutputHelper.java    From hbase with Apache License 2.0 4 votes vote down vote up
private static List<Future<Channel>> connectToDataNodes(Configuration conf, DFSClient client,
    String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS,
    BlockConstructionStage stage, DataChecksum summer, EventLoopGroup eventLoopGroup,
    Class<? extends Channel> channelClass) {
  StorageType[] storageTypes = locatedBlock.getStorageTypes();
  DatanodeInfo[] datanodeInfos = locatedBlock.getLocations();
  boolean connectToDnViaHostname =
      conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
  int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT);
  ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock());
  blockCopy.setNumBytes(locatedBlock.getBlockSize());
  ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder()
    .setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PBHelperClient.convert(blockCopy))
      .setToken(PBHelperClient.convert(locatedBlock.getBlockToken())))
    .setClientName(clientName).build();
  ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer);
  OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder()
      .setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name()))
      .setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes())
      .setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS)
      .setRequestedChecksum(checksumProto)
      .setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build());
  List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length);
  for (int i = 0; i < datanodeInfos.length; i++) {
    DatanodeInfo dnInfo = datanodeInfos[i];
    StorageType storageType = storageTypes[i];
    Promise<Channel> promise = eventLoopGroup.next().newPromise();
    futureList.add(promise);
    String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname);
    new Bootstrap().group(eventLoopGroup).channel(channelClass)
        .option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() {

          @Override
          protected void initChannel(Channel ch) throws Exception {
            // we need to get the remote address of the channel so we can only move on after
            // channel connected. Leave an empty implementation here because netty does not allow
            // a null handler.
          }
        }).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() {

          @Override
          public void operationComplete(ChannelFuture future) throws Exception {
            if (future.isSuccess()) {
              initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder,
                timeoutMs, client, locatedBlock.getBlockToken(), promise);
            } else {
              promise.tryFailure(future.cause());
            }
          }
        });
  }
  return futureList;
}