org.apache.hadoop.hdfs.server.common.GenerationStamp Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.common.GenerationStamp. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestGetBlocks.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public void testGenerationStampWildCard() {
  Map<Block, Long> map = new HashMap<Block, Long>();
  final Random RAN = new Random();
  final long seed = RAN.nextLong();
  System.out.println("seed=" +  seed);
  RAN.setSeed(seed);

  long[] blkids = new long[10]; 
  for(int i = 0; i < blkids.length; i++) {
    blkids[i] = 1000L + RAN.nextInt(100000);
    map.put(new Block(blkids[i], 0, blkids[i]), blkids[i]);
  }
  System.out.println("map=" + map.toString().replace(",", "\n  "));
  
  for(int i = 0; i < blkids.length; i++) {
    Block b = new Block(blkids[i], 0, GenerationStamp.WILDCARD_STAMP);
    Long v = map.get(b);
    System.out.println(b + " => " + v);
    assertEquals(blkids[i], v.longValue());
  }
}
 
Example #2
Source File: TestDatanodeDescriptor.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test that getInvalidateBlocks observes the maxlimit.
 */
@Test
public void testGetInvalidateBlocks() throws Exception {
  final int MAX_BLOCKS = 10;
  final int REMAINING_BLOCKS = 2;
  final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS;
  
  DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
  ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
  for (int i=0; i<MAX_BLOCKS; i++) {
    blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
  }
  dd.addBlocksToBeInvalidated(blockList);
  Block[] bc = dd.getInvalidateBlocks(MAX_LIMIT);
  assertEquals(bc.length, MAX_LIMIT);
  bc = dd.getInvalidateBlocks(MAX_LIMIT);
  assertEquals(bc.length, REMAINING_BLOCKS);
}
 
Example #3
Source File: FsDatasetUtil.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Find the meta-file for the specified block file
 * and then return the generation stamp from the name of the meta-file.
 */
static long getGenerationStampFromFile(File[] listdir, File blockFile) {
  String blockName = blockFile.getName();
  for (int j = 0; j < listdir.length; j++) {
    String path = listdir[j].getName();
    if (!path.startsWith(blockName)) {
      continue;
    }
    if (blockFile == listdir[j]) {
      continue;
    }
    return Block.getGenerationStamp(listdir[j].getName());
  }
  FsDatasetImpl.LOG.warn("Block " + blockFile + " does not have a metafile!");
  return GenerationStamp.GRANDFATHER_GENERATION_STAMP;
}
 
Example #4
Source File: TestDatanodeDescriptor.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
/**
 * Test that getInvalidateBlocks observes the maxlimit.
 */
public void testGetInvalidateBlocks() throws Exception {
  final int MAX_BLOCKS = 10;
  final int REMAINING_BLOCKS = 2;
  final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS;
  
  DatanodeDescriptor dd = new DatanodeDescriptor();
  ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
  for (int i=0; i<MAX_BLOCKS; i++) {
    blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
  }
  dd.addBlocksToBeInvalidated(blockList);
  BlockCommand bc = dd.getInvalidateBlocks(MAX_LIMIT);
  assertEquals(bc.getBlocks().length, MAX_LIMIT);
  bc = dd.getInvalidateBlocks(MAX_LIMIT);
  assertEquals(bc.getBlocks().length, REMAINING_BLOCKS);
}
 
Example #5
Source File: TestGetBlocks.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testBlockKey() {
  Map<Block, Long> map = new HashMap<Block, Long>();
  final Random RAN = new Random();
  final long seed = RAN.nextLong();
  System.out.println("seed=" + seed);
  RAN.setSeed(seed);

  long[] blkids = new long[10];
  for (int i = 0; i < blkids.length; i++) {
    blkids[i] = 1000L + RAN.nextInt(100000);
    map.put(new Block(blkids[i], 0, blkids[i]), blkids[i]);
  }
  System.out.println("map=" + map.toString().replace(",", "\n  "));

  for (int i = 0; i < blkids.length; i++) {
    Block b = new Block(blkids[i], 0,
        GenerationStamp.GRANDFATHER_GENERATION_STAMP);
    Long v = map.get(b);
    System.out.println(b + " => " + v);
    assertEquals(blkids[i], v.longValue());
  }
}
 
Example #6
Source File: TestDatanodeDescriptor.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Test that getInvalidateBlocks observes the maxlimit.
 */
public void testGetInvalidateBlocks() throws Exception {
  final int MAX_BLOCKS = 10;
  final int REMAINING_BLOCKS = 2;
  final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS;
  
  DatanodeDescriptor dd = new DatanodeDescriptor();
  ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
  for (int i=0; i<MAX_BLOCKS; i++) {
    blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
  }
  dd.addBlocksToBeInvalidated(blockList);
  BlockCommand bc = dd.getInvalidateBlocks(MAX_LIMIT);
  assertEquals(bc.getBlocks().length, MAX_LIMIT);
  bc = dd.getInvalidateBlocks(MAX_LIMIT);
  assertEquals(bc.getBlocks().length, REMAINING_BLOCKS);
}
 
Example #7
Source File: TestGetBlocks.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testBlockKey() {
  Map<Block, Long> map = new HashMap<Block, Long>();
  final Random RAN = new Random();
  final long seed = RAN.nextLong();
  System.out.println("seed=" + seed);
  RAN.setSeed(seed);

  long[] blkids = new long[10];
  for (int i = 0; i < blkids.length; i++) {
    blkids[i] = 1000L + RAN.nextInt(100000);
    map.put(new Block(blkids[i], 0, blkids[i]), blkids[i]);
  }
  System.out.println("map=" + map.toString().replace(",", "\n  "));

  for (int i = 0; i < blkids.length; i++) {
    Block b = new Block(blkids[i], 0,
        GenerationStamp.GRANDFATHER_GENERATION_STAMP);
    Long v = map.get(b);
    System.out.println(b + " => " + v);
    assertEquals(blkids[i], v.longValue());
  }
}
 
Example #8
Source File: TestDatanodeDescriptor.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Test that getInvalidateBlocks observes the maxlimit.
 */
@Test
public void testGetInvalidateBlocks() throws Exception {
  final int MAX_BLOCKS = 10;
  final int REMAINING_BLOCKS = 2;
  final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS;
  
  DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
  ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
  for (int i=0; i<MAX_BLOCKS; i++) {
    blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
  }
  dd.addBlocksToBeInvalidated(blockList);
  Block[] bc = dd.getInvalidateBlocks(MAX_LIMIT);
  assertEquals(bc.length, MAX_LIMIT);
  bc = dd.getInvalidateBlocks(MAX_LIMIT);
  assertEquals(bc.length, REMAINING_BLOCKS);
}
 
Example #9
Source File: FsDatasetUtil.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Find the meta-file for the specified block file
 * and then return the generation stamp from the name of the meta-file.
 */
static long getGenerationStampFromFile(File[] listdir, File blockFile) {
  String blockName = blockFile.getName();
  for (int j = 0; j < listdir.length; j++) {
    String path = listdir[j].getName();
    if (!path.startsWith(blockName)) {
      continue;
    }
    if (blockFile == listdir[j]) {
      continue;
    }
    return Block.getGenerationStamp(listdir[j].getName());
  }
  FsDatasetImpl.LOG.warn("Block " + blockFile + " does not have a metafile!");
  return GenerationStamp.GRANDFATHER_GENERATION_STAMP;
}
 
Example #10
Source File: TestGetBlocks.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
public void testGenerationStampWildCard() {
  Map<Block, Long> map = new HashMap<Block, Long>();
  final Random RAN = new Random();
  final long seed = RAN.nextLong();
  System.out.println("seed=" +  seed);
  RAN.setSeed(seed);

  long[] blkids = new long[10]; 
  for(int i = 0; i < blkids.length; i++) {
    blkids[i] = 1000L + RAN.nextInt(100000);
    map.put(new Block(blkids[i], 0, blkids[i]), blkids[i]);
  }
  System.out.println("map=" + map.toString().replace(",", "\n  "));
  
  for(int i = 0; i < blkids.length; i++) {
    Block b = new Block(blkids[i], 0, GenerationStamp.WILDCARD_STAMP);
    Long v = map.get(b);
    System.out.println(b + " => " + v);
    assertEquals(blkids[i], v.longValue());
  }
}
 
Example #11
Source File: BlockIdManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Upgrades the generation stamp for the filesystem
 * by reserving a sufficient range for all existing blocks.
 * Should be invoked only during the first upgrade to
 * sequential block IDs.
 */
public long upgradeGenerationStampToV2() {
  Preconditions.checkState(generationStampV2.getCurrentValue() ==
    GenerationStamp.LAST_RESERVED_STAMP);
  generationStampV2.skipTo(generationStampV1.getCurrentValue() +
    HdfsConstants.RESERVED_GENERATION_STAMPS_V1);

  generationStampV1Limit = generationStampV2.getCurrentValue();
  return generationStampV2.getCurrentValue();
}
 
Example #12
Source File: Block.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/** {@inheritDoc} */
public int compareTo(Block b) {
  //Wildcard generationStamp is NOT ALLOWED here
  validateGenerationStamp(this.generationStamp);
  validateGenerationStamp(b.generationStamp);

  if (blockId < b.blockId) {
    return -1;
  } else if (blockId == b.blockId) {
    return GenerationStamp.compare(generationStamp, b.generationStamp);
  } else {
    return 1;
  }
}
 
Example #13
Source File: TestBlockReplicationQueue.java    From RDFS with Apache License 2.0 5 votes vote down vote up
protected void setUp(){
  blockList.clear();
  LOG.info("Generating blocks...");
  for (int i = 0; i < MAX_BLOCKS; i++) {
    blockList.add(new BlockInfo(new Block(i, 0,
        GenerationStamp.FIRST_VALID_STAMP), 3));
  }
}
 
Example #14
Source File: BlockIdManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
public void clear() {
  generationStampV1.setCurrentValue(GenerationStamp.LAST_RESERVED_STAMP);
  generationStampV2.setCurrentValue(GenerationStamp.LAST_RESERVED_STAMP);
  getBlockIdGenerator().setCurrentValue(SequentialBlockIdGenerator
    .LAST_RESERVED_BLOCK_ID);
  generationStampV1Limit = GenerationStamp.GRANDFATHER_GENERATION_STAMP;
}
 
Example #15
Source File: TestComputeInvalidateWork.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test if {@link BlockManager#computeInvalidateWork(int)}
 * can schedule invalidate work correctly 
 */
@Test(timeout=120000)
public void testCompInvalidate() throws Exception {
  final int blockInvalidateLimit = bm.getDatanodeManager()
      .blockInvalidateLimit;
  namesystem.writeLock();
  try {
    for (int i=0; i<nodes.length; i++) {
      for(int j=0; j<3*blockInvalidateLimit+1; j++) {
        Block block = new Block(i*(blockInvalidateLimit+1)+j, 0,
            GenerationStamp.LAST_RESERVED_STAMP);
        bm.addToInvalidates(block, nodes[i]);
      }
    }
    
    assertEquals(blockInvalidateLimit*NUM_OF_DATANODES,
        bm.computeInvalidateWork(NUM_OF_DATANODES+1));
    assertEquals(blockInvalidateLimit*NUM_OF_DATANODES,
        bm.computeInvalidateWork(NUM_OF_DATANODES));
    assertEquals(blockInvalidateLimit*(NUM_OF_DATANODES-1),
        bm.computeInvalidateWork(NUM_OF_DATANODES-1));
    int workCount = bm.computeInvalidateWork(1);
    if (workCount == 1) {
      assertEquals(blockInvalidateLimit+1, bm.computeInvalidateWork(2));
    } else {
      assertEquals(workCount, blockInvalidateLimit);
      assertEquals(2, bm.computeInvalidateWork(2));
    }
  } finally {
    namesystem.writeUnlock();
  }
}
 
Example #16
Source File: TestComputeInvalidateWork.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Test if {@link FSNamesystem#computeInvalidateWork(int)}
 * can schedule invalidate work correctly 
 */
public void testCompInvalidate() throws Exception {
  final Configuration conf = new Configuration();
  final int NUM_OF_DATANODES = 3;
  final MiniDFSCluster cluster = new MiniDFSCluster(conf, NUM_OF_DATANODES, true, null);
  try {
    cluster.waitActive();
    final FSNamesystem namesystem = cluster.getNameNode().getNamesystem();
    DatanodeDescriptor[] nodes =
      namesystem.heartbeats.toArray(new DatanodeDescriptor[NUM_OF_DATANODES]);
    assertEquals(nodes.length, NUM_OF_DATANODES);
    
    synchronized (namesystem) {
    for (int i=0; i<nodes.length; i++) {
      for(int j=0; j<3*namesystem.blockInvalidateLimit+1; j++) {
        Block block = new Block(i*(namesystem.blockInvalidateLimit+1)+j, 0, 
            GenerationStamp.FIRST_VALID_STAMP);
        namesystem.addToInvalidatesNoLog(block, nodes[i], true);
      }
    }
    
    assertEquals(namesystem.blockInvalidateLimit*NUM_OF_DATANODES, 
        namesystem.computeInvalidateWork(NUM_OF_DATANODES+1));
    assertEquals(namesystem.blockInvalidateLimit*NUM_OF_DATANODES, 
        namesystem.computeInvalidateWork(NUM_OF_DATANODES));
    assertEquals(namesystem.blockInvalidateLimit*(NUM_OF_DATANODES-1), 
        namesystem.computeInvalidateWork(NUM_OF_DATANODES-1));
    int workCount = namesystem.computeInvalidateWork(1);
    if (workCount == 1) {
      assertEquals(namesystem.blockInvalidateLimit+1, 
          namesystem.computeInvalidateWork(2));        
    } else {
      assertEquals(workCount, namesystem.blockInvalidateLimit);
      assertEquals(2, namesystem.computeInvalidateWork(2));
    }
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example #17
Source File: TestFileCorruption.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private Block getBlock(File dataDir) {
  assertTrue("data directory does not exist", dataDir.exists());
  File[] blocks = dataDir.listFiles();
  assertTrue("Blocks do not exist in dataDir", (blocks != null) && (blocks.length > 0));

  int idx = 0;
  String blockFileName = null;
  for (; idx < blocks.length; idx++) {
    blockFileName = blocks[idx].getName();
    if (blockFileName.startsWith("blk_") && !blockFileName.endsWith(".meta")) {
      break;
    }
  }
  if (blockFileName == null) {
    return null;
  }
  long blockId = Long.parseLong(blockFileName.substring("blk_".length()));
  long blockTimeStamp = GenerationStamp.WILDCARD_STAMP;
  for (idx=0; idx < blocks.length; idx++) {
    String fileName = blocks[idx].getName();
    if (fileName.startsWith(blockFileName) && fileName.endsWith(".meta")) {
      int startIndex = blockFileName.length()+1;
      int endIndex = fileName.length() - ".meta".length();
      blockTimeStamp = Long.parseLong(fileName.substring(startIndex, endIndex));
      break;
    }
  }
  return new Block(blockId, blocks[idx].length(), blockTimeStamp);
}
 
Example #18
Source File: Block.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/** {@inheritDoc} */
public boolean equals(Object o) {
  if (!(o instanceof Block)) {
    return false;
  }
  final Block that = (Block)o;
  //Wildcard generationStamp is ALLOWED here
  return this.blockId == that.blockId
    && GenerationStamp.equalsWithWildcard(
        this.generationStamp, that.generationStamp);
}
 
Example #19
Source File: TestFileCorruption.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
private Block getBlock(File dataDir) {
  assertTrue("data directory does not exist", dataDir.exists());
  File[] blocks = dataDir.listFiles();
  assertTrue("Blocks do not exist in dataDir", (blocks != null) && (blocks.length > 0));

  int idx = 0;
  String blockFileName = null;
  for (; idx < blocks.length; idx++) {
    blockFileName = blocks[idx].getName();
    if (blockFileName.startsWith("blk_") && !blockFileName.endsWith(".meta")) {
      break;
    }
  }
  if (blockFileName == null) {
    return null;
  }
  long blockId = Long.parseLong(blockFileName.substring("blk_".length()));
  long blockTimeStamp = GenerationStamp.WILDCARD_STAMP;
  for (idx=0; idx < blocks.length; idx++) {
    String fileName = blocks[idx].getName();
    if (fileName.startsWith(blockFileName) && fileName.endsWith(".meta")) {
      int startIndex = blockFileName.length()+1;
      int endIndex = fileName.length() - ".meta".length();
      blockTimeStamp = Long.parseLong(fileName.substring(startIndex, endIndex));
      break;
    }
  }
  return new Block(blockId, blocks[idx].length(), blockTimeStamp);
}
 
Example #20
Source File: TestComputeInvalidateWork.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test if {@link BlockManager#computeInvalidateWork(int)}
 * can schedule invalidate work correctly 
 */
@Test(timeout=120000)
public void testCompInvalidate() throws Exception {
  final int blockInvalidateLimit = bm.getDatanodeManager()
      .blockInvalidateLimit;
  namesystem.writeLock();
  try {
    for (int i=0; i<nodes.length; i++) {
      for(int j=0; j<3*blockInvalidateLimit+1; j++) {
        Block block = new Block(i*(blockInvalidateLimit+1)+j, 0,
            GenerationStamp.LAST_RESERVED_STAMP);
        bm.addToInvalidates(block, nodes[i]);
      }
    }
    
    assertEquals(blockInvalidateLimit*NUM_OF_DATANODES,
        bm.computeInvalidateWork(NUM_OF_DATANODES+1));
    assertEquals(blockInvalidateLimit*NUM_OF_DATANODES,
        bm.computeInvalidateWork(NUM_OF_DATANODES));
    assertEquals(blockInvalidateLimit*(NUM_OF_DATANODES-1),
        bm.computeInvalidateWork(NUM_OF_DATANODES-1));
    int workCount = bm.computeInvalidateWork(1);
    if (workCount == 1) {
      assertEquals(blockInvalidateLimit+1, bm.computeInvalidateWork(2));
    } else {
      assertEquals(workCount, blockInvalidateLimit);
      assertEquals(2, bm.computeInvalidateWork(2));
    }
  } finally {
    namesystem.writeUnlock();
  }
}
 
Example #21
Source File: BlockIdManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public void clear() {
  generationStampV1.setCurrentValue(GenerationStamp.LAST_RESERVED_STAMP);
  generationStampV2.setCurrentValue(GenerationStamp.LAST_RESERVED_STAMP);
  getBlockIdGenerator().setCurrentValue(SequentialBlockIdGenerator
    .LAST_RESERVED_BLOCK_ID);
  generationStampV1Limit = GenerationStamp.GRANDFATHER_GENERATION_STAMP;
}
 
Example #22
Source File: BlockIdManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Upgrades the generation stamp for the filesystem
 * by reserving a sufficient range for all existing blocks.
 * Should be invoked only during the first upgrade to
 * sequential block IDs.
 */
public long upgradeGenerationStampToV2() {
  Preconditions.checkState(generationStampV2.getCurrentValue() ==
    GenerationStamp.LAST_RESERVED_STAMP);
  generationStampV2.skipTo(generationStampV1.getCurrentValue() +
    HdfsConstants.RESERVED_GENERATION_STAMPS_V1);

  generationStampV1Limit = generationStampV2.getCurrentValue();
  return generationStampV2.getCurrentValue();
}
 
Example #23
Source File: Block.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/** {@inheritDoc} */
public int compareTo(Block b) {
  //Wildcard generationStamp is NOT ALLOWED here
  validateGenerationStamp(this.generationStamp);
  validateGenerationStamp(b.generationStamp);

  if (blockId < b.blockId) {
    return -1;
  } else if (blockId == b.blockId) {
    return GenerationStamp.compare(generationStamp, b.generationStamp);
  } else {
    return 1;
  }
}
 
Example #24
Source File: Block.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/** {@inheritDoc} */
public boolean equals(Object o) {
  if (!(o instanceof Block)) {
    return false;
  }
  final Block that = (Block)o;
  //Wildcard generationStamp is ALLOWED here
  return this.blockId == that.blockId
    && GenerationStamp.equalsWithWildcard(
        this.generationStamp, that.generationStamp);
}
 
Example #25
Source File: Block.java    From RDFS with Apache License 2.0 4 votes vote down vote up
static void validateGenerationStamp(long generationstamp) {
  if (generationstamp == GenerationStamp.WILDCARD_STAMP) {
    throw new IllegalStateException("generationStamp (=" + generationstamp
        + ") == GenerationStamp.WILDCARD_STAMP");
  }    
}
 
Example #26
Source File: TestHeartbeatHandling.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test if
 * {@link FSNamesystem#handleHeartbeat}
 * can pick up replication and/or invalidate requests and observes the max
 * limit
 */
@Test
public void testHeartbeat() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    cluster.waitActive();
    final FSNamesystem namesystem = cluster.getNamesystem();
    final HeartbeatManager hm = namesystem.getBlockManager(
        ).getDatanodeManager().getHeartbeatManager();
    final String poolId = namesystem.getBlockPoolId();
    final DatanodeRegistration nodeReg =
      DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
    final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg);
    final String storageID = DatanodeStorage.generateUuid();
    dd.updateStorage(new DatanodeStorage(storageID));

    final int REMAINING_BLOCKS = 1;
    final int MAX_REPLICATE_LIMIT =
      conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 2);
    final int MAX_INVALIDATE_LIMIT = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
    final int MAX_INVALIDATE_BLOCKS = 2*MAX_INVALIDATE_LIMIT+REMAINING_BLOCKS;
    final int MAX_REPLICATE_BLOCKS = 2*MAX_REPLICATE_LIMIT+REMAINING_BLOCKS;
    final DatanodeStorageInfo[] ONE_TARGET = {dd.getStorageInfo(storageID)};

    try {
      namesystem.writeLock();
      synchronized(hm) {
        for (int i=0; i<MAX_REPLICATE_BLOCKS; i++) {
          dd.addBlockToBeReplicated(
              new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP),
              ONE_TARGET);
        }
        DatanodeCommand[] cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd,
            namesystem).getCommands();
        assertEquals(1, cmds.length);
        assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
        assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);

        ArrayList<Block> blockList = new ArrayList<Block>(MAX_INVALIDATE_BLOCKS);
        for (int i=0; i<MAX_INVALIDATE_BLOCKS; i++) {
          blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
        }
        dd.addBlocksToBeInvalidated(blockList);
        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(2, cmds.length);
        assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
        assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
        assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
        assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
        
        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(2, cmds.length);
        assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
        assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
        assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
        assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
        
        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(1, cmds.length);
        assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction());
        assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);

        cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
            .getCommands();
        assertEquals(0, cmds.length);
      }
    } finally {
      namesystem.writeUnlock();
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example #27
Source File: TestBlockInfo.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public void testBlockListMoveToHead() throws Exception {
  LOG.info("BlockInfo moveToHead tests...");

  final int MAX_BLOCKS = 10;

  DatanodeDescriptor dd = new DatanodeDescriptor();
  ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
  ArrayList<BlockInfo> blockInfoList = new ArrayList<BlockInfo>();

  BlockInfo head = null;

  LOG.info("Building block list...");
  for (int i = 0; i < MAX_BLOCKS; i++) {
    blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
    blockInfoList.add(new BlockInfo(blockList.get(i), 3));
    blockInfoList.get(i).addNode(dd);
    head = blockInfoList.get(i).listInsert(head, dd, -1);

    // index of the datanode should be 0
    assertEquals("Find datanode should be 0", 0, blockInfoList.get(i)
        .findDatanode(dd));
  }

  // list length should be equal to the number of blocks we inserted
  LOG.info("Checking list length...");
  assertEquals("Length should be MEX_BLOCK", MAX_BLOCKS, head.listCount(dd));

  DatanodeIndex ind = new DatanodeIndex();
  ind.headIndex = head.findDatanode(dd);

  LOG.info("Moving each block to the head of the list...");
  for (int i = 0; i < MAX_BLOCKS; i++) {
    ind.currentIndex = blockInfoList.get(i).findDatanode(dd);
    head = dd.listMoveToHead(blockInfoList.get(i), head, ind);
    // the moved element must be at the head of the list
    assertEquals("Block should be at the head of the list now.",
        blockInfoList.get(i), head);
    // list length must not change
    assertEquals("List size should not change", MAX_BLOCKS,
        head.listCount(dd));
  }

  // move head of the list to the head - this should not change the list
  LOG.info("Moving head to the head...");
  BlockInfo temp = head;
  ind.currentIndex = 0;
  ind.headIndex = 0;
  head = dd.listMoveToHead(head, head, ind);
  assertEquals(
      "Moving head to the head of the list shopuld not change the list",
      temp, head);

  // check all elements of the list against the original blockInfoList
  LOG.info("Checking elements of the list...");
  BlockInfo it = head;
  assertNotNull("Head should not be null", head);
  int c = MAX_BLOCKS - 1;
  while (it != null) {
    assertEquals("Expected element is not on the list",
        blockInfoList.get(c--), it);
    it = it.getNext(0);
  }

  ind.headIndex = head.findDatanode(dd);

  LOG.info("Moving random blocks to the head of the list...");
  Random rand = new Random();
  for (int i = 0; i < MAX_BLOCKS; i++) {
    int j = rand.nextInt(MAX_BLOCKS);
    ind.currentIndex = blockInfoList.get(j).findDatanode(dd);
    head = dd.listMoveToHead(blockInfoList.get(j), head, ind);
    // the moved element must be at the head of the list
    assertEquals("Block should be at the head of the list now.",
        blockInfoList.get(j), head);
    // list length must not change
    assertEquals("List size should not change", MAX_BLOCKS,
        head.listCount(dd));
  }

}
 
Example #28
Source File: TestHeartbeatHandling.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
/**
 * Test if {@link FSNamesystem#handleHeartbeat(DatanodeRegistration, long, long, long, int, int)}
 * can pick up replication and/or invalidate requests and 
 * observes the max limit
 */
public void testHeartbeat() throws Exception {
  final Configuration conf = new Configuration();
  final MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  try {
    cluster.waitActive();
    final FSNamesystem namesystem = cluster.getNameNode().getNamesystem();
    final DatanodeRegistration nodeReg = cluster.getDataNodes().get(0).dnRegistration;
    DatanodeDescriptor dd = namesystem.getDatanode(nodeReg);
    
    final int REMAINING_BLOCKS = 1;
    final int MAX_REPLICATE_LIMIT = conf.getInt("dfs.max-repl-streams", 2);
    final int MAX_INVALIDATE_LIMIT = FSNamesystem.BLOCK_INVALIDATE_CHUNK;
    final int MAX_INVALIDATE_BLOCKS = 2*MAX_INVALIDATE_LIMIT+REMAINING_BLOCKS;
    final int MAX_REPLICATE_BLOCKS = 2*MAX_REPLICATE_LIMIT+REMAINING_BLOCKS;
    final DatanodeDescriptor[] ONE_TARGET = new DatanodeDescriptor[1];

    synchronized (namesystem.heartbeats) {
    for (int i=0; i<MAX_REPLICATE_BLOCKS; i++) {
      dd.addBlockToBeReplicated(
          new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP), ONE_TARGET);
    }
    DatanodeCommand[] cmds = namesystem.handleHeartbeat(
        nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
    assertEquals(1, cmds.length);
    assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
    assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
    
    ArrayList<Block> blockList = new ArrayList<Block>(MAX_INVALIDATE_BLOCKS);
    for (int i=0; i<MAX_INVALIDATE_BLOCKS; i++) {
      blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
    }
    dd.addBlocksToBeInvalidated(blockList);
         
    cmds = namesystem.handleHeartbeat(
        nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
    assertEquals(2, cmds.length);
    assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
    assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
    assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
    assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
    
    cmds = namesystem.handleHeartbeat(
        nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
    assertEquals(2, cmds.length);
    assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
    assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
    assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
    assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
    
    cmds = namesystem.handleHeartbeat(
        nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
    assertEquals(1, cmds.length);
    assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction());
    assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);

    cmds = namesystem.handleHeartbeat(
        nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
    assertEquals(null, cmds);
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example #29
Source File: Block.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
static void validateGenerationStamp(long generationstamp) {
  if (generationstamp == GenerationStamp.WILDCARD_STAMP) {
    throw new IllegalStateException("generationStamp (=" + generationstamp
        + ") == GenerationStamp.WILDCARD_STAMP");
  }    
}
 
Example #30
Source File: TestHeartbeatHandling.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Test if {@link FSNamesystem#handleHeartbeat(DatanodeRegistration, long, long, long, int, int)}
 * can pick up replication and/or invalidate requests and 
 * observes the max limit
 */
public void testHeartbeat() throws Exception {
  final Configuration conf = new Configuration();
  final MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  try {
    cluster.waitActive();
    final FSNamesystem namesystem = cluster.getNameNode().getNamesystem();
    final DatanodeRegistration nodeReg = cluster.getDataNodes().get(0)
        .getDNRegistrationForNS(cluster.getNameNode().getNamespaceID());
    DatanodeDescriptor dd = namesystem.getDatanode(nodeReg);
    
    final int REMAINING_BLOCKS = 1;
    final int MAX_REPLICATE_LIMIT = conf.getInt("dfs.max-repl-streams", 2);
    final int MAX_INVALIDATE_LIMIT = FSNamesystem.BLOCK_INVALIDATE_CHUNK;
    final int MAX_INVALIDATE_BLOCKS = 2*MAX_INVALIDATE_LIMIT+REMAINING_BLOCKS;
    final int MAX_REPLICATE_BLOCKS = 2*MAX_REPLICATE_LIMIT+REMAINING_BLOCKS;
    final DatanodeDescriptor[] ONE_TARGET = new DatanodeDescriptor[1];

    synchronized (namesystem.heartbeats) {
    for (int i=0; i<MAX_REPLICATE_BLOCKS; i++) {
      dd.addBlockToBeReplicated(
          new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP), ONE_TARGET);
    }
    DatanodeCommand[] cmds = namesystem.handleHeartbeat(
        nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), dd.getNamespaceUsed(), 0, 0);
    assertEquals(1, cmds.length);
    assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
    assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
    
    ArrayList<Block> blockList = new ArrayList<Block>(MAX_INVALIDATE_BLOCKS);
    for (int i=0; i<MAX_INVALIDATE_BLOCKS; i++) {
      blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
    }
    dd.addBlocksToBeInvalidated(blockList);
         
    cmds = namesystem.handleHeartbeat(
        nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), dd.getNamespaceUsed(), 0, 0);
    assertEquals(2, cmds.length);
    assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
    assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
    assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
    assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
    
    cmds = namesystem.handleHeartbeat(
        nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), dd.getNamespaceUsed(), 0, 0);
    assertEquals(2, cmds.length);
    assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
    assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
    assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
    assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
    
    cmds = namesystem.handleHeartbeat(
        nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), dd.getNamespaceUsed(), 0, 0);
    assertEquals(1, cmds.length);
    assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction());
    assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);

    cmds = namesystem.handleHeartbeat(
        nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), dd.getNamespaceUsed(), 0, 0);
    assertEquals(null, cmds);
    }
  } finally {
    cluster.shutdown();
  }
}