org.apache.hadoop.hdfs.server.protocol.BlockCommand Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.protocol.BlockCommand. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PBHelper.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  default:
    return null;
  }
}
 
Example #2
Source File: NNThroughputBenchmark.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
int replicateBlocks() throws IOException {
  // register datanode
  DatanodeCommand[] cmds = nameNode.sendHeartbeat(
      dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, 0, 0);
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
      }
    }
  }
  return 0;
}
 
Example #3
Source File: TestDatanodeDescriptor.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
/**
 * Test that getInvalidateBlocks observes the maxlimit.
 */
public void testGetInvalidateBlocks() throws Exception {
  final int MAX_BLOCKS = 10;
  final int REMAINING_BLOCKS = 2;
  final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS;
  
  DatanodeDescriptor dd = new DatanodeDescriptor();
  ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
  for (int i=0; i<MAX_BLOCKS; i++) {
    blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
  }
  dd.addBlocksToBeInvalidated(blockList);
  BlockCommand bc = dd.getInvalidateBlocks(MAX_LIMIT);
  assertEquals(bc.getBlocks().length, MAX_LIMIT);
  bc = dd.getInvalidateBlocks(MAX_LIMIT);
  assertEquals(bc.getBlocks().length, REMAINING_BLOCKS);
}
 
Example #4
Source File: NNThroughputBenchmark.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(storage,
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
      rep, 0L, 0L, 0, 0, 0, null).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
                              bcmd.getTargetStorageIDs());
      }
    }
  }
  return 0;
}
 
Example #5
Source File: AvatarNode.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public DatanodeCommand blockReportNew(DatanodeRegistration nodeReg, BlockReport rep) throws IOException {
  if (runInfo.shutdown || !runInfo.isRunning) {
    return null;
  }
  if (ignoreDatanodes()) {
    LOG.info("Standby fell behind. Telling " + nodeReg.toString() +
              " to back off");
    // Do not process block reports yet as the ingest thread is catching up
    return AvatarDatanodeCommand.BACKOFF;
  }

  if (currentAvatar == Avatar.STANDBY) {
    Collection<Block> failed = super.blockReportWithRetries(nodeReg, rep);

    BlockCommand bCmd = new BlockCommand(DatanodeProtocols.DNA_RETRY,
        failed.toArray(new Block[failed.size()]));
    return bCmd;
  } else {
    return super.blockReport(nodeReg, rep);
  }
}
 
Example #6
Source File: NNThroughputBenchmark.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused")
int replicateBlocks() throws IOException {
	// register datanode
	DatanodeCommand[] cmds = nameNode.sendHeartbeat(dnRegistration,
			DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0);
	if (cmds != null) {
		for (DatanodeCommand cmd : cmds) {
			if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
				// Send a copy of a block to another datanode
				BlockCommand bcmd = (BlockCommand) cmd;
				return transferBlocks(bcmd.getBlocks(),
						bcmd.getTargets());
			}
		}
	}
	return 0;
}
 
Example #7
Source File: NNThroughputBenchmark.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
int replicateBlocks() throws IOException {
  // register datanode
  DatanodeCommand[] cmds = nameNode.sendHeartbeat(
      dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0);
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
      }
    }
  }
  return 0;
}
 
Example #8
Source File: FsDatasetAsyncDiskService.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public void run() {
  long dfsBytes = blockFile.length() + metaFile.length();
  boolean result;

  result = (trashDirectory == null) ? deleteFiles() : moveFiles();

  if (!result) {
    LOG.warn("Unexpected error trying to "
        + (trashDirectory == null ? "delete" : "move")
        + " block " + block.getBlockPoolId() + " " + block.getLocalBlock()
        + " at file " + blockFile + ". Ignored.");
  } else {
    if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
      datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID());
    }
    volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
    LOG.info("Deleted " + block.getBlockPoolId() + " "
        + block.getLocalBlock() + " file " + blockFile);
  }
  updateDeletedBlockId(block);
  IOUtils.cleanup(null, volumeRef);
}
 
Example #9
Source File: PBHelper.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static DatanodeCommand convert(DatanodeCommandProto proto) {
  switch (proto.getCmdType()) {
  case BalancerBandwidthCommand:
    return PBHelper.convert(proto.getBalancerCmd());
  case BlockCommand:
    return PBHelper.convert(proto.getBlkCmd());
  case BlockRecoveryCommand:
    return PBHelper.convert(proto.getRecoveryCmd());
  case FinalizeCommand:
    return PBHelper.convert(proto.getFinalizeCmd());
  case KeyUpdateCommand:
    return PBHelper.convert(proto.getKeyUpdateCmd());
  case RegisterCommand:
    return REG_CMD;
  case BlockIdCommand:
    return PBHelper.convert(proto.getBlkIdCmd());
  default:
    return null;
  }
}
 
Example #10
Source File: TestDatanodeDescriptor.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Test that getInvalidateBlocks observes the maxlimit.
 */
public void testGetInvalidateBlocks() throws Exception {
  final int MAX_BLOCKS = 10;
  final int REMAINING_BLOCKS = 2;
  final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS;
  
  DatanodeDescriptor dd = new DatanodeDescriptor();
  ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
  for (int i=0; i<MAX_BLOCKS; i++) {
    blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
  }
  dd.addBlocksToBeInvalidated(blockList);
  BlockCommand bc = dd.getInvalidateBlocks(MAX_LIMIT);
  assertEquals(bc.getBlocks().length, MAX_LIMIT);
  bc = dd.getInvalidateBlocks(MAX_LIMIT);
  assertEquals(bc.getBlocks().length, REMAINING_BLOCKS);
}
 
Example #11
Source File: FsDatasetAsyncDiskService.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public void run() {
  long dfsBytes = blockFile.length() + metaFile.length();
  boolean result;

  result = (trashDirectory == null) ? deleteFiles() : moveFiles();

  if (!result) {
    LOG.warn("Unexpected error trying to "
        + (trashDirectory == null ? "delete" : "move")
        + " block " + block.getBlockPoolId() + " " + block.getLocalBlock()
        + " at file " + blockFile + ". Ignored.");
  } else {
    if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
      datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID());
    }
    volume.decDfsUsed(block.getBlockPoolId(), dfsBytes);
    LOG.info("Deleted " + block.getBlockPoolId() + " "
        + block.getLocalBlock() + " file " + blockFile);
  }
  updateDeletedBlockId(block);
  IOUtils.cleanup(null, volumeRef);
}
 
Example #12
Source File: NNThroughputBenchmark.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Send a heartbeat to the name-node and replicate blocks if requested.
 */
@SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
  // register datanode
  StorageReport[] rep = { new StorageReport(storage,
      false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) };
  DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration,
      rep, 0L, 0L, 0, 0, 0, null).getCommands();
  if (cmds != null) {
    for (DatanodeCommand cmd : cmds) {
      if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
        // Send a copy of a block to another datanode
        BlockCommand bcmd = (BlockCommand)cmd;
        return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(),
                              bcmd.getTargetStorageIDs());
      }
    }
  }
  return 0;
}
 
Example #13
Source File: TestPBHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testConvertBlockCommand() {
  Block[] blocks = new Block[] { new Block(21), new Block(22) };
  DatanodeInfo[][] dnInfos = new DatanodeInfo[][] { new DatanodeInfo[1],
      new DatanodeInfo[2] };
  dnInfos[0][0] = DFSTestUtil.getLocalDatanodeInfo();
  dnInfos[1][0] = DFSTestUtil.getLocalDatanodeInfo();
  dnInfos[1][1] = DFSTestUtil.getLocalDatanodeInfo();
  String[][] storageIDs = {{"s00"}, {"s10", "s11"}};
  StorageType[][] storageTypes = {{StorageType.DEFAULT},
      {StorageType.DEFAULT, StorageType.DEFAULT}};
  BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1",
      blocks, dnInfos, storageTypes, storageIDs);
  BlockCommandProto bcProto = PBHelper.convert(bc);
  BlockCommand bc2 = PBHelper.convert(bcProto);
  assertEquals(bc.getAction(), bc2.getAction());
  assertEquals(bc.getBlocks().length, bc2.getBlocks().length);
  Block[] blocks2 = bc2.getBlocks();
  for (int i = 0; i < blocks.length; i++) {
    assertEquals(blocks[i], blocks2[i]);
  }
  DatanodeInfo[][] dnInfos2 = bc2.getTargets();
  assertEquals(dnInfos.length, dnInfos2.length);
  for (int i = 0; i < dnInfos.length; i++) {
    DatanodeInfo[] d1 = dnInfos[i];
    DatanodeInfo[] d2 = dnInfos2[i];
    assertEquals(d1.length, d2.length);
    for (int j = 0; j < d1.length; j++) {
      compare(d1[j], d2[j]);
    }
  }
}
 
Example #14
Source File: DatanodeDescriptor.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Remove the specified number of blocks to be invalidated
 */
BlockCommand getInvalidateBlocks(int maxblocks) {
  Block[] deleteList = null;
  synchronized (invalidateBlocks) {
    deleteList = invalidateBlocks.pollToArray(new Block[Math.min(
        invalidateBlocks.size(), maxblocks)]);
  }
  return (deleteList == null || deleteList.length == 0) ? 
      null: new BlockCommand(DatanodeProtocol.DNA_INVALIDATE, deleteList);
}
 
Example #15
Source File: TestBPOfferService.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test that DNA_INVALIDATE commands from the standby are ignored.
 */
@Test
public void testIgnoreDeletionsFromNonActive() throws Exception {
  BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);

  // Ask to invalidate FAKE_BLOCK when block report hits the
  // standby
  Mockito.doReturn(new BlockCommand(DatanodeProtocol.DNA_INVALIDATE,
      FAKE_BPID, new Block[] { FAKE_BLOCK.getLocalBlock() }))
      .when(mockNN2).blockReport(
          Mockito.<DatanodeRegistration>anyObject(),  
          Mockito.eq(FAKE_BPID),
          Mockito.<StorageBlockReport[]>anyObject(),
          Mockito.<BlockReportContext>anyObject());

  bpos.start();
  try {
    waitForInitialization(bpos);
    
    // Should get block reports from both NNs
    waitForBlockReport(mockNN1);
    waitForBlockReport(mockNN2);

  } finally {
    bpos.stop();
  }
  
  // Should ignore the delete command from the standby
  Mockito.verify(mockFSDataset, Mockito.never())
    .invalidate(Mockito.eq(FAKE_BPID),
        (Block[]) Mockito.anyObject());
}
 
Example #16
Source File: PBHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static BlockCommandProto convert(BlockCommand cmd) {
  BlockCommandProto.Builder builder = BlockCommandProto.newBuilder()
      .setBlockPoolId(cmd.getBlockPoolId());
  switch (cmd.getAction()) {
  case DatanodeProtocol.DNA_TRANSFER:
    builder.setAction(BlockCommandProto.Action.TRANSFER);
    break;
  case DatanodeProtocol.DNA_INVALIDATE:
    builder.setAction(BlockCommandProto.Action.INVALIDATE);
    break;
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setAction(BlockCommandProto.Action.SHUTDOWN);
    break;
  default:
    throw new AssertionError("Invalid action");
  }
  Block[] blocks = cmd.getBlocks();
  for (int i = 0; i < blocks.length; i++) {
    builder.addBlocks(PBHelper.convert(blocks[i]));
  }
  builder.addAllTargets(convert(cmd.getTargets()))
         .addAllTargetStorageUuids(convert(cmd.getTargetStorageIDs()));
  StorageType[][] types = cmd.getTargetStorageTypes();
  if (types != null) {
    builder.addAllTargetStorageTypes(convert(types));
  }
  return builder.build();
}
 
Example #17
Source File: TestPBHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testConvertBlockCommand() {
  Block[] blocks = new Block[] { new Block(21), new Block(22) };
  DatanodeInfo[][] dnInfos = new DatanodeInfo[][] { new DatanodeInfo[1],
      new DatanodeInfo[2] };
  dnInfos[0][0] = DFSTestUtil.getLocalDatanodeInfo();
  dnInfos[1][0] = DFSTestUtil.getLocalDatanodeInfo();
  dnInfos[1][1] = DFSTestUtil.getLocalDatanodeInfo();
  String[][] storageIDs = {{"s00"}, {"s10", "s11"}};
  StorageType[][] storageTypes = {{StorageType.DEFAULT},
      {StorageType.DEFAULT, StorageType.DEFAULT}};
  BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1",
      blocks, dnInfos, storageTypes, storageIDs);
  BlockCommandProto bcProto = PBHelper.convert(bc);
  BlockCommand bc2 = PBHelper.convert(bcProto);
  assertEquals(bc.getAction(), bc2.getAction());
  assertEquals(bc.getBlocks().length, bc2.getBlocks().length);
  Block[] blocks2 = bc2.getBlocks();
  for (int i = 0; i < blocks.length; i++) {
    assertEquals(blocks[i], blocks2[i]);
  }
  DatanodeInfo[][] dnInfos2 = bc2.getTargets();
  assertEquals(dnInfos.length, dnInfos2.length);
  for (int i = 0; i < dnInfos.length; i++) {
    DatanodeInfo[] d1 = dnInfos[i];
    DatanodeInfo[] d2 = dnInfos2[i];
    assertEquals(d1.length, d2.length);
    for (int j = 0; j < d1.length; j++) {
      compare(d1[j], d2[j]);
    }
  }
}
 
Example #18
Source File: TestBPOfferService.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test that DNA_INVALIDATE commands from the standby are ignored.
 */
@Test
public void testIgnoreDeletionsFromNonActive() throws Exception {
  BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);

  // Ask to invalidate FAKE_BLOCK when block report hits the
  // standby
  Mockito.doReturn(new BlockCommand(DatanodeProtocol.DNA_INVALIDATE,
      FAKE_BPID, new Block[] { FAKE_BLOCK.getLocalBlock() }))
      .when(mockNN2).blockReport(
          Mockito.<DatanodeRegistration>anyObject(),  
          Mockito.eq(FAKE_BPID),
          Mockito.<StorageBlockReport[]>anyObject(),
          Mockito.<BlockReportContext>anyObject());

  bpos.start();
  try {
    waitForInitialization(bpos);
    
    // Should get block reports from both NNs
    waitForBlockReport(mockNN1);
    waitForBlockReport(mockNN2);

  } finally {
    bpos.stop();
  }
  
  // Should ignore the delete command from the standby
  Mockito.verify(mockFSDataset, Mockito.never())
    .invalidate(Mockito.eq(FAKE_BPID),
        (Block[]) Mockito.anyObject());
}
 
Example #19
Source File: PBHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static BlockCommandProto convert(BlockCommand cmd) {
  BlockCommandProto.Builder builder = BlockCommandProto.newBuilder()
      .setBlockPoolId(cmd.getBlockPoolId());
  switch (cmd.getAction()) {
  case DatanodeProtocol.DNA_TRANSFER:
    builder.setAction(BlockCommandProto.Action.TRANSFER);
    break;
  case DatanodeProtocol.DNA_INVALIDATE:
    builder.setAction(BlockCommandProto.Action.INVALIDATE);
    break;
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setAction(BlockCommandProto.Action.SHUTDOWN);
    break;
  default:
    throw new AssertionError("Invalid action");
  }
  Block[] blocks = cmd.getBlocks();
  for (int i = 0; i < blocks.length; i++) {
    builder.addBlocks(PBHelper.convert(blocks[i]));
  }
  builder.addAllTargets(convert(cmd.getTargets()))
         .addAllTargetStorageUuids(convert(cmd.getTargetStorageIDs()));
  StorageType[][] types = cmd.getTargetStorageTypes();
  if (types != null) {
    builder.addAllTargetStorageTypes(convert(types));
  }
  return builder.build();
}
 
Example #20
Source File: DatanodeDescriptor.java    From RDFS with Apache License 2.0 4 votes vote down vote up
BlockCommand getLeaseRecoveryCommand(int maxTransfers) {
  List<BlockTargetPair> blocktargetlist = recoverBlocks.poll(maxTransfers);
  return blocktargetlist == null? null:
      new BlockCommand(DatanodeProtocol.DNA_RECOVERBLOCK, blocktargetlist);
}
 
Example #21
Source File: DataNode.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
/**
   * 
   * @param cmd
   * @return true if further processing may be required or false otherwise. 
   * @throws IOException
   */
private boolean processCommand(DatanodeCommand cmd) throws IOException {
  if (cmd == null)
    return true;
  final BlockCommand bcmd = cmd instanceof BlockCommand? (BlockCommand)cmd: null;

  switch(cmd.getAction()) {
  case DatanodeProtocol.DNA_TRANSFER:
    // Send a copy of a block to another datanode
    transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
    myMetrics.blocksReplicated.inc(bcmd.getBlocks().length);
    break;
  case DatanodeProtocol.DNA_INVALIDATE:
    //
    // Some local block(s) are obsolete and can be 
    // safely garbage-collected.
    //
    Block toDelete[] = bcmd.getBlocks();
    try {
      if (blockScanner != null) {
        blockScanner.deleteBlocks(toDelete);
      }
      data.invalidate(toDelete);
    } catch(IOException e) {
      checkDiskError();
      throw e;
    }
    myMetrics.blocksRemoved.inc(toDelete.length);
    break;
  case DatanodeProtocol.DNA_SHUTDOWN:
    // shut down the data node
    this.shutdown();
    return false;
  case DatanodeProtocol.DNA_REGISTER:
    // namenode requested a registration - at start or if NN lost contact
    LOG.info("DatanodeCommand action: DNA_REGISTER");
    if (shouldRun) {
      register();
    }
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    storage.finalizeUpgrade();
    break;
  case UpgradeCommand.UC_ACTION_START_UPGRADE:
    // start distributed upgrade here
    processDistributedUpgradeCommand((UpgradeCommand)cmd);
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    recoverBlocks(bcmd.getBlocks(), bcmd.getTargets());
    break;
  default:
    LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
  }
  return true;
}
 
Example #22
Source File: DatanodeDescriptor.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
/**
 * Remove the specified number of blocks to be invalidated
 */
BlockCommand getInvalidateBlocks(int maxblocks) {
  Block[] deleteList = getBlockArray(invalidateBlocks, maxblocks); 
  return deleteList == null? 
      null: new BlockCommand(DatanodeProtocol.DNA_INVALIDATE, deleteList);
}
 
Example #23
Source File: DatanodeDescriptor.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
BlockCommand getLeaseRecoveryCommand(int maxTransfers) {
  List<BlockTargetPair> blocktargetlist = recoverBlocks.poll(maxTransfers);
  return blocktargetlist == null? null:
      new BlockCommand(DatanodeProtocol.DNA_RECOVERBLOCK, blocktargetlist);
}
 
Example #24
Source File: DatanodeDescriptor.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
BlockCommand getReplicationCommand(int maxTransfers) {
  List<BlockTargetPair> blocktargetlist = replicateBlocks.poll(maxTransfers);
  return blocktargetlist == null? null:
      new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blocktargetlist);
}
 
Example #25
Source File: PBHelper.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
  DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
  if (datanodeCommand == null) {
    return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
        .build();
  }
  switch (datanodeCommand.getAction()) {
  case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
    builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
        .setBalancerCmd(
            PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
    builder
        .setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
        .setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
        .setRecoveryCmd(
            PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_FINALIZE:
    builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
        .setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_REGISTER:
    builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
        .setRegisterCmd(REG_CMD_PROTO);
    break;
  case DatanodeProtocol.DNA_TRANSFER:
  case DatanodeProtocol.DNA_INVALIDATE:
  case DatanodeProtocol.DNA_SHUTDOWN:
    builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).
      setBlkCmd(PBHelper.convert((BlockCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_CACHE:
  case DatanodeProtocol.DNA_UNCACHE:
    builder.setCmdType(DatanodeCommandProto.Type.BlockIdCommand).
      setBlkIdCmd(PBHelper.convert((BlockIdCommand) datanodeCommand));
    break;
  case DatanodeProtocol.DNA_UNKNOWN: //Not expected
  default:
    builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
  }
  return builder.build();
}
 
Example #26
Source File: TestHeartbeatHandling.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
/**
 * Test if {@link FSNamesystem#handleHeartbeat(DatanodeRegistration, long, long, long, int, int)}
 * can pick up replication and/or invalidate requests and 
 * observes the max limit
 */
public void testHeartbeat() throws Exception {
  final Configuration conf = new Configuration();
  final MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  try {
    cluster.waitActive();
    final FSNamesystem namesystem = cluster.getNameNode().getNamesystem();
    final DatanodeRegistration nodeReg = cluster.getDataNodes().get(0).dnRegistration;
    DatanodeDescriptor dd = namesystem.getDatanode(nodeReg);
    
    final int REMAINING_BLOCKS = 1;
    final int MAX_REPLICATE_LIMIT = conf.getInt("dfs.max-repl-streams", 2);
    final int MAX_INVALIDATE_LIMIT = FSNamesystem.BLOCK_INVALIDATE_CHUNK;
    final int MAX_INVALIDATE_BLOCKS = 2*MAX_INVALIDATE_LIMIT+REMAINING_BLOCKS;
    final int MAX_REPLICATE_BLOCKS = 2*MAX_REPLICATE_LIMIT+REMAINING_BLOCKS;
    final DatanodeDescriptor[] ONE_TARGET = new DatanodeDescriptor[1];

    synchronized (namesystem.heartbeats) {
    for (int i=0; i<MAX_REPLICATE_BLOCKS; i++) {
      dd.addBlockToBeReplicated(
          new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP), ONE_TARGET);
    }
    DatanodeCommand[] cmds = namesystem.handleHeartbeat(
        nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
    assertEquals(1, cmds.length);
    assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
    assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
    
    ArrayList<Block> blockList = new ArrayList<Block>(MAX_INVALIDATE_BLOCKS);
    for (int i=0; i<MAX_INVALIDATE_BLOCKS; i++) {
      blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
    }
    dd.addBlocksToBeInvalidated(blockList);
         
    cmds = namesystem.handleHeartbeat(
        nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
    assertEquals(2, cmds.length);
    assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
    assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
    assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
    assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
    
    cmds = namesystem.handleHeartbeat(
        nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
    assertEquals(2, cmds.length);
    assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
    assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
    assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
    assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
    
    cmds = namesystem.handleHeartbeat(
        nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
    assertEquals(1, cmds.length);
    assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction());
    assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);

    cmds = namesystem.handleHeartbeat(
        nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
    assertEquals(null, cmds);
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example #27
Source File: DataNode.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 *
 * @param cmd
 * @return true if further processing may be required or false otherwise.
 * @throws IOException
 */
private boolean processCommand(DatanodeCommand cmd) throws IOException {
  if (cmd == null)
    return true;
  final BlockCommand bcmd = cmd instanceof BlockCommand? (BlockCommand)cmd: null;

  boolean retValue = true;
  long startTime = System.currentTimeMillis();

  switch(cmd.getAction()) {
  case DatanodeProtocol.DNA_TRANSFER:
    // Send a copy of a block to another datanode
    transferBlocks(namespaceId,
        bcmd.getBlocks(), bcmd.getTargets());
    myMetrics.blocksReplicated.inc(bcmd.getBlocks().length);
    break;
  case DatanodeProtocol.DNA_INVALIDATE:
    //
    // Some local block(s) are obsolete and can be 
    // safely garbage-collected.
    //
    Block toDelete[] = bcmd.getBlocks();
    try {
      if (blockScanner != null) {
        blockScanner.deleteBlocks(namespaceId, toDelete);
      }        
      data.invalidate(namespaceId, toDelete);
    } catch(IOException e) {
      checkDiskError();
      throw e;
    }
    myMetrics.blocksRemoved.inc(toDelete.length);
    break;
  case DatanodeProtocol.DNA_SHUTDOWN:
    // shut down the data node
    shouldServiceRun = false;
    retValue = false;
    break;
  case DatanodeProtocol.DNA_REGISTER:
    // namenode requested a registration - at start or if NN lost contact
    LOG.info("DatanodeCommand action: DNA_REGISTER");
    if (shouldRun) {
      register();
      firstBlockReportSent = false;
    }
    break;
  case DatanodeProtocol.DNA_FINALIZE:
     storage.finalizedUpgrade(namespaceId);
    break;
  case UpgradeCommand.UC_ACTION_START_UPGRADE:
    // start distributed upgrade here
    processDistributedUpgradeCommand((UpgradeCommand)cmd);
    break;
  case DatanodeProtocol.DNA_RECOVERBLOCK:
    recoverBlocks(namespaceId, bcmd.getBlocks(), bcmd.getTargets());
    break;
  default:
    LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
  }
  long endTime = System.currentTimeMillis();
  if (endTime - startTime > 1000) {
    LOG.info("processCommand() took " + (endTime - startTime)
        + " msec to process command " + cmd.getAction() + " from " + nnAddr);
  } else if (LOG.isDebugEnabled()) {
    LOG.debug("processCommand() took " + (endTime - startTime)
        + " msec to process command " + cmd.getAction() + " from " + nnAddr);
  }
  return retValue;
}
 
Example #28
Source File: PBHelper.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public static BlockCommand convert(BlockCommandProto blkCmd) {
  List<BlockProto> blockProtoList = blkCmd.getBlocksList();
  Block[] blocks = new Block[blockProtoList.size()];
  for (int i = 0; i < blockProtoList.size(); i++) {
    blocks[i] = PBHelper.convert(blockProtoList.get(i));
  }
  List<DatanodeInfosProto> targetList = blkCmd.getTargetsList();
  DatanodeInfo[][] targets = new DatanodeInfo[targetList.size()][];
  for (int i = 0; i < targetList.size(); i++) {
    targets[i] = PBHelper.convert(targetList.get(i));
  }

  StorageType[][] targetStorageTypes = new StorageType[targetList.size()][];
  List<StorageTypesProto> targetStorageTypesList = blkCmd.getTargetStorageTypesList();
  if (targetStorageTypesList.isEmpty()) { // missing storage types
    for(int i = 0; i < targetStorageTypes.length; i++) {
      targetStorageTypes[i] = new StorageType[targets[i].length];
      Arrays.fill(targetStorageTypes[i], StorageType.DEFAULT);
    }
  } else {
    for(int i = 0; i < targetStorageTypes.length; i++) {
      List<StorageTypeProto> p = targetStorageTypesList.get(i).getStorageTypesList();
      targetStorageTypes[i] = convertStorageTypes(p, targets[i].length);
    }
  }

  List<StorageUuidsProto> targetStorageUuidsList = blkCmd.getTargetStorageUuidsList();
  String[][] targetStorageIDs = new String[targetStorageUuidsList.size()][];
  for(int i = 0; i < targetStorageIDs.length; i++) {
    List<String> storageIDs = targetStorageUuidsList.get(i).getStorageUuidsList();
    targetStorageIDs[i] = storageIDs.toArray(new String[storageIDs.size()]);
  }

  int action = DatanodeProtocol.DNA_UNKNOWN;
  switch (blkCmd.getAction()) {
  case TRANSFER:
    action = DatanodeProtocol.DNA_TRANSFER;
    break;
  case INVALIDATE:
    action = DatanodeProtocol.DNA_INVALIDATE;
    break;
  case SHUTDOWN:
    action = DatanodeProtocol.DNA_SHUTDOWN;
    break;
  default:
    throw new AssertionError("Unknown action type: " + blkCmd.getAction());
  }
  return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets,
      targetStorageTypes, targetStorageIDs);
}
 
Example #29
Source File: TestHeartbeatHandling.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test if
 * {@link FSNamesystem#handleHeartbeat}
 * can pick up replication and/or invalidate requests and observes the max
 * limit
 */
@Test
public void testHeartbeat() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    cluster.waitActive();
    final FSNamesystem namesystem = cluster.getNamesystem();
    final HeartbeatManager hm = namesystem.getBlockManager(
        ).getDatanodeManager().getHeartbeatManager();
    final String poolId = namesystem.getBlockPoolId();
    final DatanodeRegistration nodeReg =
      DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
    final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg);
    final String storageID = DatanodeStorage.generateUuid();
    dd.updateStorage(new DatanodeStorage(storageID));

    final int REMAINING_BLOCKS = 1;
    final int MAX_REPLICATE_LIMIT =
      conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 2);
    final int MAX_INVALIDATE_LIMIT = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
    final int MAX_INVALIDATE_BLOCKS = 2*MAX_INVALIDATE_LIMIT+REMAINING_BLOCKS;
    final int MAX_REPLICATE_BLOCKS = 2*MAX_REPLICATE_LIMIT+REMAINING_BLOCKS;
    final DatanodeStorageInfo[] ONE_TARGET = {dd.getStorageInfo(storageID)};

    try {
      namesystem.writeLock();
      synchronized(hm) {
        for (int i=0; i<MAX_REPLICATE_BLOCKS; i++) {
          dd.addBlockToBeReplicated(
              new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP),
              ONE_TARGET);
        }
        DatanodeCommand[] cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd,
            namesystem).getCommands();
        assertEquals(1, cmds.length);
        assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
        assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);

        ArrayList<Block> blockList = new ArrayList<Block>(MAX_INVALIDATE_BLOCKS);
        for (int i=0; i<MAX_INVALIDATE_BLOCKS; i++) {
          blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
        }
        dd.addBlocksToBeInvalidated(blockList);
        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(2, cmds.length);
        assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
        assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
        assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
        assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
        
        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(2, cmds.length);
        assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
        assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
        assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
        assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
        
        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(1, cmds.length);
        assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction());
        assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);

        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(0, cmds.length);
      }
    } finally {
      namesystem.writeUnlock();
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example #30
Source File: DatanodeDescriptor.java    From RDFS with Apache License 2.0 4 votes vote down vote up
BlockCommand getReplicationCommand(int maxTransfers) {
  List<BlockTargetPair> blocktargetlist = replicateBlocks.poll(maxTransfers);
  return blocktargetlist == null? null:
      new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blocktargetlist);
}